From 0dd42d16520d25e11f9f061c38fea8f8f4f66e6f Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Sun, 28 May 2023 12:39:13 +0200 Subject: [PATCH 01/55] make python3 compatible --- PicoProducer/python/analysis/Cutflow.py | 16 +++--- .../analysis/ETauFakeRate/ModuleETau.py | 12 ++--- PicoProducer/python/analysis/GenDumper.py | 18 +++---- .../python/analysis/GenFilterMuTau.py | 44 ++++++++-------- PicoProducer/python/analysis/GenMatcher.py | 34 ++++++------- .../python/analysis/HighPT/ModuleDiJet.py | 6 +-- .../python/analysis/HighPT/ModuleTauNu.py | 12 ++--- .../python/analysis/HighPT/ModuleWJ.py | 14 +++--- .../ModuleCharmTauFakeSimple.py | 2 +- PicoProducer/python/analysis/ModuleEMu.py | 12 ++--- PicoProducer/python/analysis/ModuleETau.py | 10 ++-- PicoProducer/python/analysis/ModuleHighPT.py | 50 +++++++++---------- PicoProducer/python/analysis/ModuleMuMu.py | 12 ++--- PicoProducer/python/analysis/ModuleMuTau.py | 10 ++-- .../ModuleMuTau_nanoV10_DeepTau2p1.py | 10 ++-- .../ModuleMuTau_nanoV10_DeepTau2p5.py | 10 ++-- PicoProducer/python/analysis/ModuleTauPair.py | 50 +++++++++---------- PicoProducer/python/analysis/ModuleTauTau.py | 8 +-- .../analysis/MuTauFakeRate/ModuleMuTau.py | 10 ++-- .../analysis/MuTauFakeRate/ModuleTauPair.py | 50 +++++++++---------- PicoProducer/python/analysis/StitchEffs.py | 18 +++---- .../python/analysis/TauES/ModuleMuTau.py | 10 ++-- PicoProducer/python/analysis/TestModule.py | 4 +- PicoProducer/python/analysis/TreeProducer.py | 20 ++++---- .../python/analysis/TreeProducerDiJet.py | 4 +- .../python/analysis/TreeProducerEMu.py | 4 +- .../python/analysis/TreeProducerETau.py | 4 +- .../python/analysis/TreeProducerHighPT.py | 2 +- .../python/analysis/TreeProducerMuMu.py | 4 +- .../python/analysis/TreeProducerMuNu.py | 4 +- .../python/analysis/TreeProducerMuTau.py | 4 +- .../python/analysis/TreeProducerTauNu.py | 4 +- .../python/analysis/TreeProducerTauPair.py | 2 +- .../python/analysis/TreeProducerTauTau.py | 4 +- .../python/analysis/TreeProducerWJ.py | 4 +- PicoProducer/python/analysis/utils.py | 4 +- 36 files changed, 243 insertions(+), 243 deletions(-) diff --git a/PicoProducer/python/analysis/Cutflow.py b/PicoProducer/python/analysis/Cutflow.py index 3396f9e31..7d3385819 100644 --- a/PicoProducer/python/analysis/Cutflow.py +++ b/PicoProducer/python/analysis/Cutflow.py @@ -18,7 +18,7 @@ def addcut(self, name, title, index=None): if index==None: index = self.nextidx self.nextidx += 1 - assert all(index!=i for n,i in self.cuts.iteritems()), "Index %d for %r already in use! Taken: %s"%(index,name,self.cuts) + assert all(index!=i for n,i in self.cuts.items()), "Index %d for %r already in use! Taken: %s"%(index,name,self.cuts) #assert not hasattr(self,name), "%s already has attribute '%s'!"%(self,name) #setattr(self,name,index) bin = 1+index # range 0-ncuts, bin numbers 1-(ncuts+1) @@ -42,15 +42,15 @@ def display(self,itot=1,nfinal=None,final="Final selection"): ntot = self.hist.GetBinContent(itot) # number of events before any cuts nlast = (-99,ntot) #padcut = 3+max(len(c) for c in self.cuts) # padding - values = [self.hist.GetBinContent(1+i) for k, i in self.cuts.items() if self.hist.GetBinContent(1+i)>0] # all values > 0 + values = [self.hist.GetBinContent(1+i) for k, i in list(self.cuts.items()) if self.hist.GetBinContent(1+i)>0] # all values > 0 maxval = max(abs(x) for x in values) padevt = 4+(int(floor(log10(maxval))) if maxval>0 else 0) # pad all numbers of events padtot = 3+(int(floor(log10(ntot))) if ntot>0 else 0) # pad total number of events denstr = str(ntot).rjust(padtot) if ntot else " 0" - print underline("Cutflow:"+' '*(46+padevt+padtot),pre=">>> ") - print underline("%5s %5s / %5s = %-8s %-8s %-23s"%( # header - '','npass'.rjust(padevt),'ntot'.rjust(padtot),'abseff','releff','cut'),pre=">>> ") - for cut, index in sorted(self.cuts.items(),key=lambda x: x[1]): + print(underline("Cutflow:"+' '*(46+padevt+padtot),pre=">>> ")) + print(underline("%5s %5s / %5s = %-8s %-8s %-23s"%( # header + '','npass'.rjust(padevt),'ntot'.rjust(padtot),'abseff','releff','cut'),pre=">>> ")) + for cut, index in sorted(list(self.cuts.items()),key=lambda x: x[1]): nevts = self.hist.GetBinContent(1+index) title = self.hist.GetXaxis().GetBinLabel(1+index) or cut frac = " " @@ -60,11 +60,11 @@ def display(self,itot=1,nfinal=None,final="Final selection"): frac2 = "%4.2f%%"%(100.0*(nevts/nlast[1])) if nlast[1] and index==nlast[0]+1 else ' ' # relative efficiency w.r.t. last cut nomstr = ("%.1f"%nevts).rjust(padevt) #print ">>> %4d: %s / %s %s %s"%(index,nomstr,denstr,frac,title) # without rel. eff. - print ">>> %4d: %5s / %5s %s %8s %s"%(index,nomstr,denstr,frac,frac2,title) # with rel. eff. + print(">>> %4d: %5s / %5s %s %8s %s"%(index,nomstr,denstr,frac,frac2,title)) # with rel. eff. nlast = (index,nevts) # for next iteration if nfinal!=None: frac = "= %6.2f%%"%(100.0*nfinal/ntot) if ntot else ' ' nomstr = str(float(nfinal)).rjust(padevt) - print underline("%5s %5s / %5s %s %8s %-23s"%('',nomstr,denstr,frac,'',final),pre=">>> ") + print(underline("%5s %5s / %5s %s %8s %-23s"%('',nomstr,denstr,frac,'',final),pre=">>> ")) \ No newline at end of file diff --git a/PicoProducer/python/analysis/ETauFakeRate/ModuleETau.py b/PicoProducer/python/analysis/ETauFakeRate/ModuleETau.py index c583bf48e..e3acb3aff 100644 --- a/PicoProducer/python/analysis/ETauFakeRate/ModuleETau.py +++ b/PicoProducer/python/analysis/ETauFakeRate/ModuleETau.py @@ -53,12 +53,12 @@ def __init__(self, fname, **kwargs): def beginJob(self): """Before processing any events or files.""" super(ModuleETau,self).beginJob() - print ">>> %-12s = %s"%('tauwp', self.tauwp) - print ">>> %-12s = %s"%('eleCutPt', self.eleCutPt) - print ">>> %-12s = %s"%('eleCutEta', self.eleCutEta) - print ">>> %-12s = %s"%('tauCutPt', self.tauCutPt) - print ">>> %-12s = %s"%('tauCutEta', self.tauCutEta) - print ">>> %-12s = %s"%('ZpeekReso', self.resoScale) + print(">>> %-12s = %s"%('tauwp', self.tauwp)) + print(">>> %-12s = %s"%('eleCutPt', self.eleCutPt)) + print(">>> %-12s = %s"%('eleCutEta', self.eleCutEta)) + print(">>> %-12s = %s"%('tauCutPt', self.tauCutPt)) + print(">>> %-12s = %s"%('tauCutEta', self.tauCutEta)) + print(">>> %-12s = %s"%('ZpeekReso', self.resoScale)) pass diff --git a/PicoProducer/python/analysis/GenDumper.py b/PicoProducer/python/analysis/GenDumper.py index df5e6dae1..8ca73514a 100755 --- a/PicoProducer/python/analysis/GenDumper.py +++ b/PicoProducer/python/analysis/GenDumper.py @@ -45,15 +45,15 @@ def __init__(self,*args,**kwargs): def analyze(self,event): """Dump gen information for each gen particle in given event.""" - print "\n%s event %s %s"%('-'*10,event.event,'-'*68) + print("\n%s event %s %s"%('-'*10,event.event,'-'*68)) self.nevents += 1 leptonic = False particles = Collection(event,'GenPart') #particles = Collection(event,'LHEPart') seeds = [ ] # seeds for decay chain chain = { } # decay chain - print " \033[4m%7s %8s %8s %8s %8s %8s %8s %8s %9s %10s \033[0m"%( - "index","pdgId","moth","mothid","dR","pt","eta","status","prompt","last copy") + print(" \033[4m%7s %8s %8s %8s %8s %8s %8s %8s %9s %10s \033[0m"%( + "index","pdgId","moth","mothid","dR","pt","eta","status","prompt","last copy")) for i, particle in enumerate(particles): mothidx = particle.genPartIdxMother if 0<=mothidx0: - print " %-10s %4d / %-4d (%.1f%%)"%('leptonic:',self.nleptons,self.nevents,100.0*self.nleptons/self.nevents) - print "%s done %s\n"%('-'*10,'-'*64) + print(" %-10s %4d / %-4d (%.1f%%)"%('leptonic:',self.nleptons,self.nevents,100.0*self.nleptons/self.nevents)) + print("%s done %s\n"%('-'*10,'-'*64)) diff --git a/PicoProducer/python/analysis/GenFilterMuTau.py b/PicoProducer/python/analysis/GenFilterMuTau.py index 6cf9986ea..56769fde7 100755 --- a/PicoProducer/python/analysis/GenFilterMuTau.py +++ b/PicoProducer/python/analysis/GenFilterMuTau.py @@ -10,7 +10,7 @@ import ROOT; ROOT.PyConfig.IgnoreCommandLineOptions = True import re from ROOT import TLorentzVector, TH1D, TH2D, gStyle, kRed -from TreeProducer import TreeProducer +from .TreeProducer import TreeProducer from PhysicsTools.NanoAODTools.postprocessing.framework.eventloop import Module from PhysicsTools.NanoAODTools.postprocessing.framework.datamodel import Collection, Event from TauFW.PicoProducer.analysis.utils import hasbit, filtermutau, statusflags_dict, dumpgenpart, getdecaychain, getmother, deltaPhi @@ -25,7 +25,7 @@ class GenFilterMuTau(Module): def __init__(self,fname,**kwargs): self.out = TreeProducerGenFilterMuTau(fname,self) self.verb = kwargs.get('verb',0) - print ">>> verb = %r"%(self.verb) + print(">>> verb = %r"%(self.verb)) def endJob(self): """Wrap up after running on all events and files""" @@ -203,10 +203,10 @@ def analyze(self, event): # CHECK FOR MISSING DECAYS if self.verb>=2 and len(taus_hard)>=2 and len(elecs_tau)+len(muons_tau)+ntauhs<2: - print ">>> MISSING DITAU DECAY? electrons=%d (%d), muons=%d (%d), tauh=%d"%( - len(elecs_tau),len(elecs),len(muons_tau),len(muons),ntauhs) + print(">>> MISSING DITAU DECAY? electrons=%d (%d), muons=%d (%d), tauh=%d"%( + len(elecs_tau),len(elecs),len(muons_tau),len(muons),ntauhs)) for tau in taus_hard: - print getdecaychain(tau,particles) + print(getdecaychain(tau,particles)) for particle in particles: pid = abs(particle.pdgId) if pid in [11,13,15,16]: @@ -226,7 +226,7 @@ def analyze(self, event): if len(taus_hard)>=2: moth_pid2 = abs(getmother(taus_hard[1],particles)) if moth_pid!=moth_pid2: - print ">>> Mother of ditau does not match! %s vs %s"%(moth_pid,moth_pid2) + print(">>> Mother of ditau does not match! %s vs %s"%(moth_pid,moth_pid2)) self.out.h_mothpid.Fill(moth_pid) # FILL TREE BRANCHES @@ -394,14 +394,14 @@ def endJob(self): if ntau>0: for tag, title in [('',''),('_hard',' from hard process with tau pT > 18 GeV')]: ntau_decay = sum(self.cutflow.getbincontent(b+tag) for b, e in bins) - print ">>> Ditau decays%s:"%(title) + print(">>> Ditau decays%s:"%(title)) for bin, exp in bins: nbin = self.cutflow.getbincontent(bin+tag) - print ">>> %8d / %5d = %5.2f%% %6d / %d = %5.2f%% %s, expect %s%%"%( - nbin,ntau,100.0*nbin/ntau,nbin,ntau_decay,100.0*nbin/ntau_decay,bin,exp) - print ">>> %8d / %5d = %5.2f%% found ditau decays / all tau pairs"%(ntau_decay,ntau,100.0*ntau_decay/ntau) + print(">>> %8d / %5d = %5.2f%% %6d / %d = %5.2f%% %s, expect %s%%"%( + nbin,ntau,100.0*nbin/ntau,nbin,ntau_decay,100.0*nbin/ntau_decay,bin,exp)) + print(">>> %8d / %5d = %5.2f%% found ditau decays / all tau pairs"%(ntau_decay,ntau,100.0*ntau_decay/ntau)) else: - print ">>> No ditau..." + print(">>> No ditau...") # MUTAU FILTERS getfiltereff(self.h_mutaufilter) @@ -429,9 +429,9 @@ def endJob(self): super(TreeProducerGenFilterMuTau,self).endJob() def fillStatusFlag(self,particle): - for xbit in xrange(0,15): + for xbit in range(0,15): if not hasbit(particle.statusFlags,xbit): continue - for ybit in xrange(0,15): + for ybit in range(0,15): if not hasbit(particle.statusFlags,ybit): continue self.h_statusflags.Fill(xbit,ybit) @@ -444,10 +444,10 @@ def getfiltereff(hist): if ntot>0: # https://cms-pdmv.cern.ch/mcm/edit?db_name=requests&prepid=TAU-RunIISummer19UL18wmLHEGEN-00007&page=0 eff = npass/ntot - print ">>> Efficiency of custom mutau gen-filter (pT>18, |eta|<2.5):" - print ">>> %8d / %5d = %5.3f%%"%(npass,ntot,100.0*npass/ntot) - print ">>> Expect ~ 0.888 % = B(ll->tautau) * eff(Z -> mutau) for DYJetsToLL_M-50 (pT>16, muon |eta|<2.5, tau |eta|<2.7)" # = 1.815e+03 / 5343.0 * 0.02615 - print ">>> Expect ~ 0.519 % = B(ll->tautau) * eff(Z -> mutau) for DYJetsToLL_M-50 (pT>18, |eta|<2.5, no FSR)" # = 1.815e+03 / 5343.0 * 0.02615 * 0.5841 + print(">>> Efficiency of custom mutau gen-filter (pT>18, |eta|<2.5):") + print(">>> %8d / %5d = %5.3f%%"%(npass,ntot,100.0*npass/ntot)) + print(">>> Expect ~ 0.888 % = B(ll->tautau) * eff(Z -> mutau) for DYJetsToLL_M-50 (pT>16, muon |eta|<2.5, tau |eta|<2.7)") # = 1.815e+03 / 5343.0 * 0.02615 + print(">>> Expect ~ 0.519 % = B(ll->tautau) * eff(Z -> mutau) for DYJetsToLL_M-50 (pT>18, |eta|<2.5, no FSR)") # = 1.815e+03 / 5343.0 * 0.02615 * 0.5841 return eff @@ -472,7 +472,7 @@ def getfiltereff(hist): title, fname = fname.split('=') else: title = fname.replace('.root','') - print ">>> Opening %s (%s)"%(fname,title) + print(">>> Opening %s (%s)"%(fname,title)) file = TFile.Open(fname,'READ') tree = file.Get('tree') tree.title = title @@ -513,7 +513,7 @@ def getfiltereff(hist): ] for stitle, sstring in selections: sname = stitle.replace(' ','').replace(',','-').replace('>','gt').replace('#','').replace('GeV','').replace('fromhardprocess','_hard') - print ">>> Drawing %r..."%(stitle) #,sstring) + print(">>> Drawing %r..."%(stitle)) #,sstring) for xvar, nbins, xmin, xmax in vars: xtitle = trees[0].GetBranch(xvar).GetTitle() #xvar pname = "%s_%s%s"%(xvar,sname,args.tag) @@ -524,14 +524,14 @@ def getfiltereff(hist): dcmd = "%s >> %s"%(xvar,hname) hist = TH1D(hname,title,nbins,xmin,xmax) out = tree.Draw(dcmd,sstring,'gOff') - print ">>> %8s = tree.Draw(%r,%r,'gOff')"%(out,dcmd,sstring) + print(">>> %8s = tree.Draw(%r,%r,'gOff')"%(out,dcmd,sstring)) nevts = hist.Integral() if nevts: hist.Scale(1./nevts) hists.append(hist) # PLOT HISTOGRAMS - print ">>> Plotting..." + print(">>> Plotting...") plot = Plot(xtitle,hists,clone=True) plot.draw(ratio=True,lstyle=1) plot.drawlegend() @@ -540,5 +540,5 @@ def getfiltereff(hist): plot.close() for tree in trees: tree.file.Close() - print ">>> Done." + print(">>> Done.") diff --git a/PicoProducer/python/analysis/GenMatcher.py b/PicoProducer/python/analysis/GenMatcher.py index d4f166061..6afc2782f 100755 --- a/PicoProducer/python/analysis/GenMatcher.py +++ b/PicoProducer/python/analysis/GenMatcher.py @@ -26,7 +26,7 @@ import ROOT; ROOT.PyConfig.IgnoreCommandLineOptions = True import re from ROOT import TH2D, gStyle, kRed -from TreeProducer import TreeProducer +from .TreeProducer import TreeProducer from PhysicsTools.NanoAODTools.postprocessing.framework.eventloop import Module from PhysicsTools.NanoAODTools.postprocessing.framework.datamodel import Collection, Event from TauFW.PicoProducer.analysis.utils import hasbit @@ -165,9 +165,9 @@ def __init__(self, filename, module, **kwargs): # ONLY JOB (no hadd) self.localjob = bool(re.search(r"(-\d+|[^\d])$",filename.replace(".root",""))) self.onlyjob = self.localjob or bool(re.search(r"_0$",filename.replace(".root",""))) - print ">>> filename = %r"%(filename) - print ">>> localjob = %r"%(self.localjob) - print ">>> onlyjob = %r"%(self.onlyjob) + print(">>> filename = %r"%(filename)) + print(">>> localjob = %r"%(self.localjob)) + print(">>> onlyjob = %r"%(self.onlyjob)) # CUTFLOW self.cutflow.addcut('none', "no cut" ) @@ -256,15 +256,15 @@ def setbinlabels(hist): for ix in range(1,nxbins+1): # loop over columns hist.GetXaxis().SetBinLabel(ix,str(ix-1)) # set alphanumerical bin label if hist.GetBinContent(ix,0)!=0: # check underflow - print ">>> WARNING!!! Underflow in (ix,0)=(%s,0) of %r"%(ix,hist.GetName()) + print(">>> WARNING!!! Underflow in (ix,0)=(%s,0) of %r"%(ix,hist.GetName())) if hist.GetBinContent(ix,nybins+1)!=0: # check overflow - print ">>> WARNING!!! Overflow in (ix,nybins+1)=(%s,%s) of %r"%(ix,nybins+1,hist.GetName()) + print(">>> WARNING!!! Overflow in (ix,nybins+1)=(%s,%s) of %r"%(ix,nybins+1,hist.GetName())) for iy in range(1,nybins+1): # loop over rows hist.GetYaxis().SetBinLabel(iy,str(iy-1)) # set alphanumerical bin label if hist.GetBinContent(0,iy)!=0: # check underflow - print ">>> WARNING!!! Underflow in (0,iy)=(0,%s) of %r"%(iy,hist.GetName()) + print(">>> WARNING!!! Underflow in (0,iy)=(0,%s) of %r"%(iy,hist.GetName())) if hist.GetBinContent(nxbins+1,iy)!=0: # check overflow - print ">>> WARNING!!! Overflow in (nxbins+1,iy)=(%s,%s) of %r"%(nxbins+1,iy,hist.GetName()) + print(">>> WARNING!!! Overflow in (nxbins+1,iy)=(%s,%s) of %r"%(nxbins+1,iy,hist.GetName())) hist.GetXaxis().SetNdivisions(10) hist.GetYaxis().SetNdivisions(10) return hist @@ -288,7 +288,7 @@ def normalize(hist,hname=None,direction=None): frac = 100.0*hist.GetBinContent(ix,iy)/ntot # fraction of all entries hist.SetBinContent(ix,iy,frac) # overwrite number of entries with fraction else: - print ">>> normalize: Cannot normalize: ntot=%s"%(ntot) + print(">>> normalize: Cannot normalize: ntot=%s"%(ntot)) elif 'row' in direction: hist.GetZaxis().SetTitle("Row fraction [%]") for iy in range(1,nybins+1): # loop over rows @@ -339,14 +339,14 @@ def formatvar(var): #'h_gm_HTT_vs_nano','h_gm_HTT_nopt_vs_nano', 'h_gm_HTT_stat_vs_nano', #'h_gm_HTT_vs_HTT_nopt', 'h_gm_HTT_vs_HTT_stat' ] - print ">>> Retrieve histograms..." + print(">>> Retrieve histograms...") for hname in hnames: hist = file.Get(hname) hist.SetTitle("pt>20 GeV, VVVLoose VSjet, VVVLoose VSe, VLoose VSmu") if hist: hists.append(hist) else: - print ">>> WARNING! Could not find histogram %r! Ignoring..."%(hname) + print(">>> WARNING! Could not find histogram %r! Ignoring..."%(hname)) # DRAW NEW HISTOGRAMS selections = [ @@ -362,7 +362,7 @@ def formatvar(var): tree = file.Get('tree') for stitle, sstring in selections: sname = stitle.replace(' ','').replace(',','-').replace('>','gt').replace('#','').replace('GeV','') - print ">>> Drawing %r..."%(stitle) #,sstring) + print(">>> Drawing %r..."%(stitle)) #,sstring) vars = [ # (xvar, yvar) ('genmatch', 'genmatch_HTT'), ('genmatch', 'genmatch_HTT_nopt'), @@ -377,16 +377,16 @@ def formatvar(var): dcmd = "%s:%s >> %s"%(yvar,xvar,hname) hist = TH2D(hname,title,7,0,7,7,0,7) #hist.SetDirectory(0) - print ">>> tree.Draw(%r,%r,'gOff')"%(dcmd,sstring) + print(">>> tree.Draw(%r,%r,'gOff')"%(dcmd,sstring)) out = tree.Draw(dcmd,sstring,'gOff') - print ">>> %10s taus passed"%(out) + print(">>> %10s taus passed"%(out)) hists.append(hist) # PLOT HISTOGRAMS - print ">>> Plotting..." + print(">>> Plotting...") for hist in hists: if not hist: - print ">>> WARNING!!! Empty hist %r! Ignoring..."%(hist) + print(">>> WARNING!!! Empty hist %r! Ignoring..."%(hist)) continue hname = hist.GetName() htitle = hist.GetTitle() @@ -423,5 +423,5 @@ def formatvar(var): canvas.SaveAs(pname+".pdf") canvas.Close() file.Close() - print ">>> Done." + print(">>> Done.") diff --git a/PicoProducer/python/analysis/HighPT/ModuleDiJet.py b/PicoProducer/python/analysis/HighPT/ModuleDiJet.py index d85e6e018..722e1ae1d 100644 --- a/PicoProducer/python/analysis/HighPT/ModuleDiJet.py +++ b/PicoProducer/python/analysis/HighPT/ModuleDiJet.py @@ -36,9 +36,9 @@ def __init__(self, fname, **kwargs): def beginJob(self): """Before processing any events or files.""" super(ModuleDiJet,self).beginJob() - print ">>> %-12s = %s"%('tauPtCut', self.tauPtCut) - print ">>> %-12s = %s"%('tauEtaCut', self.tauEtaCut) - print ">>> %-12s = %s"%('dphiCut', self.dphiCut) + print(">>> %-12s = %s"%('tauPtCut', self.tauPtCut)) + print(">>> %-12s = %s"%('tauEtaCut', self.tauEtaCut)) + print(">>> %-12s = %s"%('dphiCut', self.dphiCut)) pass diff --git a/PicoProducer/python/analysis/HighPT/ModuleTauNu.py b/PicoProducer/python/analysis/HighPT/ModuleTauNu.py index 4fd6c8e7a..4dfe3f5b3 100644 --- a/PicoProducer/python/analysis/HighPT/ModuleTauNu.py +++ b/PicoProducer/python/analysis/HighPT/ModuleTauNu.py @@ -59,12 +59,12 @@ def __init__(self, fname, **kwargs): def beginJob(self): """Before processing any events or files.""" super(ModuleTauNu,self).beginJob() - print ">>> %-12s = %s"%('tauPtCut', self.tauPtCut) - print ">>> %-12s = %s"%('tauEtaCut', self.tauEtaCut) - print ">>> %-12s = %s"%('metCut', self.metCut) - print ">>> %-12s = %s"%('mtCut', self.mtCut) - print ">>> %-12s = %s"%('dphiCut', self.dphiCut) - print ">>> %-12s = %s"%('tes_shift', self.tes_shift) + print(">>> %-12s = %s"%('tauPtCut', self.tauPtCut)) + print(">>> %-12s = %s"%('tauEtaCut', self.tauEtaCut)) + print(">>> %-12s = %s"%('metCut', self.metCut)) + print(">>> %-12s = %s"%('mtCut', self.mtCut)) + print(">>> %-12s = %s"%('dphiCut', self.dphiCut)) + print(">>> %-12s = %s"%('tes_shift', self.tes_shift)) # should be run after filling met and tau branches def FillTESshifts(self): diff --git a/PicoProducer/python/analysis/HighPT/ModuleWJ.py b/PicoProducer/python/analysis/HighPT/ModuleWJ.py index 7b4a4828b..40168e3b6 100644 --- a/PicoProducer/python/analysis/HighPT/ModuleWJ.py +++ b/PicoProducer/python/analysis/HighPT/ModuleWJ.py @@ -59,13 +59,13 @@ def __init__(self, fname, **kwargs): def beginJob(self): """Before processing any events or files.""" super(ModuleWJ,self).beginJob() - print ">>> %-12s = %s"%('muonPtCut', self.muonPtCut) - print ">>> %-12s = %s"%('muonEtaCut', self.muonEtaCut) - print ">>> %-12s = %s"%('tauPtCut', self.tauPtCut) - print ">>> %-12s = %s"%('tauEtaCut', self.tauEtaCut) - print ">>> %-12s = %s"%('metCut', self.metCut) - print ">>> %-12s = %s"%('mtCut', self.mtCut) - print ">>> %-12s = %s"%('dphiCut', self.dphiCut) + print(">>> %-12s = %s"%('muonPtCut', self.muonPtCut)) + print(">>> %-12s = %s"%('muonEtaCut', self.muonEtaCut)) + print(">>> %-12s = %s"%('tauPtCut', self.tauPtCut)) + print(">>> %-12s = %s"%('tauEtaCut', self.tauEtaCut)) + print(">>> %-12s = %s"%('metCut', self.metCut)) + print(">>> %-12s = %s"%('mtCut', self.mtCut)) + print(">>> %-12s = %s"%('dphiCut', self.dphiCut)) pass diff --git a/PicoProducer/python/analysis/JetTauFakeRate/ModuleCharmTauFakeSimple.py b/PicoProducer/python/analysis/JetTauFakeRate/ModuleCharmTauFakeSimple.py index 078017efa..e6fd0ad2a 100644 --- a/PicoProducer/python/analysis/JetTauFakeRate/ModuleCharmTauFakeSimple.py +++ b/PicoProducer/python/analysis/JetTauFakeRate/ModuleCharmTauFakeSimple.py @@ -95,7 +95,7 @@ def analyze(self, event): # MATCH # https://pdg.lbl.gov/2019/reviews/rpp2019-rev-monte-carlo-numbering.pdf - print '-'*80 + print('-'*80) D_pids = [4,411,421,413,423,415,425,431,433,435,10411,10421,10413,10423,] Dparts = [ ] for genpart in genparts: diff --git a/PicoProducer/python/analysis/ModuleEMu.py b/PicoProducer/python/analysis/ModuleEMu.py index 5b560963b..db6048618 100644 --- a/PicoProducer/python/analysis/ModuleEMu.py +++ b/PicoProducer/python/analysis/ModuleEMu.py @@ -54,12 +54,12 @@ def __init__(self, fname, **kwargs): def beginJob(self): """Before processing any events or files.""" super(ModuleEMu,self).beginJob() - print ">>> %-12s = %s"%('muonCutPt', self.muonCutPt) - print ">>> %-12s = %s"%('muonCutEta', self.muonCutEta) - print ">>> %-12s = %s"%('eleCutPt', self.eleCutPt) - print ">>> %-12s = %s"%('eleCutEta', self.eleCutEta) - print ">>> %-12s = %s"%('tauCutPt', self.tauCutPt) - print ">>> %-12s = %s"%('tauCutEta', self.tauCutEta) + print(">>> %-12s = %s"%('muonCutPt', self.muonCutPt)) + print(">>> %-12s = %s"%('muonCutEta', self.muonCutEta)) + print(">>> %-12s = %s"%('eleCutPt', self.eleCutPt)) + print(">>> %-12s = %s"%('eleCutEta', self.eleCutEta)) + print(">>> %-12s = %s"%('tauCutPt', self.tauCutPt)) + print(">>> %-12s = %s"%('tauCutEta', self.tauCutEta)) pass diff --git a/PicoProducer/python/analysis/ModuleETau.py b/PicoProducer/python/analysis/ModuleETau.py index c8b1d4211..1cccd99c6 100644 --- a/PicoProducer/python/analysis/ModuleETau.py +++ b/PicoProducer/python/analysis/ModuleETau.py @@ -48,11 +48,11 @@ def __init__(self, fname, **kwargs): def beginJob(self): """Before processing any events or files.""" super(ModuleETau,self).beginJob() - print ">>> %-12s = %s"%('tauwp', self.tauwp) - print ">>> %-12s = %s"%('eleCutPt', self.eleCutPt) - print ">>> %-12s = %s"%('eleCutEta', self.eleCutEta) - print ">>> %-12s = %s"%('tauCutPt', self.tauCutPt) - print ">>> %-12s = %s"%('tauCutEta', self.tauCutEta) + print(">>> %-12s = %s"%('tauwp', self.tauwp)) + print(">>> %-12s = %s"%('eleCutPt', self.eleCutPt)) + print(">>> %-12s = %s"%('eleCutEta', self.eleCutEta)) + print(">>> %-12s = %s"%('tauCutPt', self.tauCutPt)) + print(">>> %-12s = %s"%('tauCutEta', self.tauCutEta)) pass diff --git a/PicoProducer/python/analysis/ModuleHighPT.py b/PicoProducer/python/analysis/ModuleHighPT.py index 7701fcc75..580b6f0d0 100644 --- a/PicoProducer/python/analysis/ModuleHighPT.py +++ b/PicoProducer/python/analysis/ModuleHighPT.py @@ -23,7 +23,7 @@ class ModuleHighPT(Module): """Base class the channels of an analysis with muon and neutrino: for munu, taunu, jetjet.""" def __init__(self, fname, **kwargs): - print header(self.__class__.__name__) + print(header(self.__class__.__name__)) # SETTINGS self.filename = fname # output file name @@ -80,33 +80,33 @@ def __init__(self, fname, **kwargs): def beginJob(self): """Before processing any events or files.""" - print '-'*80 - print ">>> %-12s = %r"%('filename', self.filename) - print ">>> %-12s = %s"%('year', self.year) - print ">>> %-12s = %r"%('dtype', self.dtype) - print ">>> %-12s = %r"%('channel', self.channel) - print ">>> %-12s = %s"%('ismc', self.ismc) - print ">>> %-12s = %s"%('isdata', self.isdata) - print ">>> %-12s = %s"%('isembed', self.isembed) + print('-'*80) + print(">>> %-12s = %r"%('filename', self.filename)) + print(">>> %-12s = %s"%('year', self.year)) + print(">>> %-12s = %r"%('dtype', self.dtype)) + print(">>> %-12s = %r"%('channel', self.channel)) + print(">>> %-12s = %s"%('ismc', self.ismc)) + print(">>> %-12s = %s"%('isdata', self.isdata)) + print(">>> %-12s = %s"%('isembed', self.isembed)) if self.channel.count('tau')>0: - print ">>> %-12s = %s"%('tes', self.tes) - print ">>> %-12s = %r"%('tessys', self.tessys) - print ">>> %-12s = %r"%('fes', self.fes) - print ">>> %-12s = %s"%('ltf', self.ltf) - print ">>> %-12s = %s"%('jtf', self.jtf) + print(">>> %-12s = %s"%('tes', self.tes)) + print(">>> %-12s = %r"%('tessys', self.tessys)) + print(">>> %-12s = %r"%('fes', self.fes)) + print(">>> %-12s = %s"%('ltf', self.ltf)) + print(">>> %-12s = %s"%('jtf', self.jtf)) #if self.channel.count('ele')>0: # print ">>> %-12s = %s"%('ees', self.ees) - print ">>> %-12s = %s"%('dotoppt', self.dotoppt) - print ">>> %-12s = %s"%('dopdf', self.dopdf) - print ">>> %-12s = %s"%('dozpt', self.dozpt) + print(">>> %-12s = %s"%('dotoppt', self.dotoppt)) + print(">>> %-12s = %s"%('dopdf', self.dopdf)) + print(">>> %-12s = %s"%('dozpt', self.dozpt)) #print ">>> %-12s = %s"%('dorecoil', self.dorecoil) - print ">>> %-12s = %s"%('dojec', self.dojec) - print ">>> %-12s = %s"%('dojecsys', self.dojecsys) - print ">>> %-12s = %s"%('dosys', self.dosys) - print ">>> %-12s = %s"%('dotight', self.dotight) - print ">>> %-12s = %s"%('useT1', self.useT1) - print ">>> %-12s = %s"%('jetCutPt', self.jetCutPt) - print ">>> %-12s = %s"%('dowmasswgt',self.dowmasswgt) + print(">>> %-12s = %s"%('dojec', self.dojec)) + print(">>> %-12s = %s"%('dojecsys', self.dojecsys)) + print(">>> %-12s = %s"%('dosys', self.dosys)) + print(">>> %-12s = %s"%('dotight', self.dotight)) + print(">>> %-12s = %s"%('useT1', self.useT1)) + print(">>> %-12s = %s"%('jetCutPt', self.jetCutPt)) + print(">>> %-12s = %s"%('dowmasswgt',self.dowmasswgt)) def endJob(self): @@ -279,7 +279,7 @@ def fillJetMETBranches(self,event,leptons,lep1): self.out.metnomu[0] = metNoMu.Pt() # MET SYSTEMATICS - for unc, met_var in met_vars.iteritems(): + for unc, met_var in met_vars.items(): getattr(self.out,"met_"+unc)[0] = met_var.Pt() getattr(self.out,"metphi_"+unc)[0] = met_var.Phi() dphi = abs(lep1.DeltaPhi(met_var)) diff --git a/PicoProducer/python/analysis/ModuleMuMu.py b/PicoProducer/python/analysis/ModuleMuMu.py index bed363d7d..b05cad314 100644 --- a/PicoProducer/python/analysis/ModuleMuMu.py +++ b/PicoProducer/python/analysis/ModuleMuMu.py @@ -57,12 +57,12 @@ def __init__(self, fname, **kwargs): def beginJob(self): """Before processing any events or files.""" super(ModuleMuMu,self).beginJob() - print ">>> %-12s = %s"%('muon1CutPt', self.muon1CutPt) - print ">>> %-12s = %s"%('muon2CutPt', self.muon2CutPt) - print ">>> %-12s = %s"%('muonCutEta', self.muonCutEta) - print ">>> %-12s = %s"%('tauCutPt', self.tauCutPt) - print ">>> %-12s = %s"%('tauCutEta', self.tauCutEta) - print ">>> %-12s = %s"%('zwindow', self.zwindow) + print(">>> %-12s = %s"%('muon1CutPt', self.muon1CutPt)) + print(">>> %-12s = %s"%('muon2CutPt', self.muon2CutPt)) + print(">>> %-12s = %s"%('muonCutEta', self.muonCutEta)) + print(">>> %-12s = %s"%('tauCutPt', self.tauCutPt)) + print(">>> %-12s = %s"%('tauCutEta', self.tauCutEta)) + print(">>> %-12s = %s"%('zwindow', self.zwindow)) pass diff --git a/PicoProducer/python/analysis/ModuleMuTau.py b/PicoProducer/python/analysis/ModuleMuTau.py index 9bf888b5b..857010c5b 100644 --- a/PicoProducer/python/analysis/ModuleMuTau.py +++ b/PicoProducer/python/analysis/ModuleMuTau.py @@ -58,11 +58,11 @@ def __init__(self, fname, **kwargs): def beginJob(self): """Before processing any events or files.""" super(ModuleMuTau,self).beginJob() - print ">>> %-12s = %s"%('tauwp', self.tauwp) - print ">>> %-12s = %s"%('muonCutPt', self.muonCutPt) - print ">>> %-12s = %s"%('muonCutEta', self.muonCutEta) - print ">>> %-12s = %s"%('tauCutPt', self.tauCutPt) - print ">>> %-12s = %s"%('tauCutEta', self.tauCutEta) + print(">>> %-12s = %s"%('tauwp', self.tauwp)) + print(">>> %-12s = %s"%('muonCutPt', self.muonCutPt)) + print(">>> %-12s = %s"%('muonCutEta', self.muonCutEta)) + print(">>> %-12s = %s"%('tauCutPt', self.tauCutPt)) + print(">>> %-12s = %s"%('tauCutEta', self.tauCutEta)) pass diff --git a/PicoProducer/python/analysis/ModuleMuTau_nanoV10_DeepTau2p1.py b/PicoProducer/python/analysis/ModuleMuTau_nanoV10_DeepTau2p1.py index b89f89c0e..99757e5a7 100644 --- a/PicoProducer/python/analysis/ModuleMuTau_nanoV10_DeepTau2p1.py +++ b/PicoProducer/python/analysis/ModuleMuTau_nanoV10_DeepTau2p1.py @@ -62,11 +62,11 @@ def __init__(self, fname, **kwargs): def beginJob(self): """Before processing any events or files.""" super(ModuleMuTau_nanoV10_DeepTau2p1,self).beginJob() - print ">>> %-12s = %s"%('tauwp', self.tauwp) - print ">>> %-12s = %s"%('muonCutPt', self.muonCutPt) - print ">>> %-12s = %s"%('muonCutEta', self.muonCutEta) - print ">>> %-12s = %s"%('tauCutPt', self.tauCutPt) - print ">>> %-12s = %s"%('tauCutEta', self.tauCutEta) + print(">>> %-12s = %s"%('tauwp', self.tauwp)) + print(">>> %-12s = %s"%('muonCutPt', self.muonCutPt)) + print(">>> %-12s = %s"%('muonCutEta', self.muonCutEta)) + print(">>> %-12s = %s"%('tauCutPt', self.tauCutPt)) + print(">>> %-12s = %s"%('tauCutEta', self.tauCutEta)) pass diff --git a/PicoProducer/python/analysis/ModuleMuTau_nanoV10_DeepTau2p5.py b/PicoProducer/python/analysis/ModuleMuTau_nanoV10_DeepTau2p5.py index a1e131f8a..77797ad43 100644 --- a/PicoProducer/python/analysis/ModuleMuTau_nanoV10_DeepTau2p5.py +++ b/PicoProducer/python/analysis/ModuleMuTau_nanoV10_DeepTau2p5.py @@ -62,11 +62,11 @@ def __init__(self, fname, **kwargs): def beginJob(self): """Before processing any events or files.""" super(ModuleMuTau_nanoV10_DeepTau2p5,self).beginJob() - print ">>> %-12s = %s"%('tauwp', self.tauwp) - print ">>> %-12s = %s"%('muonCutPt', self.muonCutPt) - print ">>> %-12s = %s"%('muonCutEta', self.muonCutEta) - print ">>> %-12s = %s"%('tauCutPt', self.tauCutPt) - print ">>> %-12s = %s"%('tauCutEta', self.tauCutEta) + print(">>> %-12s = %s"%('tauwp', self.tauwp)) + print(">>> %-12s = %s"%('muonCutPt', self.muonCutPt)) + print(">>> %-12s = %s"%('muonCutEta', self.muonCutEta)) + print(">>> %-12s = %s"%('tauCutPt', self.tauCutPt)) + print(">>> %-12s = %s"%('tauCutEta', self.tauCutEta)) pass diff --git a/PicoProducer/python/analysis/ModuleTauPair.py b/PicoProducer/python/analysis/ModuleTauPair.py index 3f87139ed..23108f270 100644 --- a/PicoProducer/python/analysis/ModuleTauPair.py +++ b/PicoProducer/python/analysis/ModuleTauPair.py @@ -21,7 +21,7 @@ class ModuleTauPair(Module): """Base class the channels of an analysis with two tau leptons: for mutau, etau, tautau, emu, mumu, ee.""" def __init__(self, fname, **kwargs): - print header(self.__class__.__name__) + print(header(self.__class__.__name__)) # SETTINGS self.filename = fname # output file name @@ -89,33 +89,33 @@ def __init__(self, fname, **kwargs): def beginJob(self): """Before processing any events or files.""" - print '-'*80 - print ">>> %-12s = %r"%('filename', self.filename) - print ">>> %-12s = %s"%('year', self.year) - print ">>> %-12s = %r"%('dtype', self.dtype) - print ">>> %-12s = %r"%('channel', self.channel) - print ">>> %-12s = %s"%('ismc', self.ismc) - print ">>> %-12s = %s"%('isdata', self.isdata) - print ">>> %-12s = %s"%('isembed', self.isembed) + print('-'*80) + print(">>> %-12s = %r"%('filename', self.filename)) + print(">>> %-12s = %s"%('year', self.year)) + print(">>> %-12s = %r"%('dtype', self.dtype)) + print(">>> %-12s = %r"%('channel', self.channel)) + print(">>> %-12s = %s"%('ismc', self.ismc)) + print(">>> %-12s = %s"%('isdata', self.isdata)) + print(">>> %-12s = %s"%('isembed', self.isembed)) if self.channel.count('tau')>0: - print ">>> %-12s = %s"%('tes', self.tes) - print ">>> %-12s = %r"%('tessys', self.tessys) - print ">>> %-12s = %r"%('fes', self.fes) - print ">>> %-12s = %s"%('ltf', self.ltf) - print ">>> %-12s = %s"%('jtf', self.jtf) + print(">>> %-12s = %s"%('tes', self.tes)) + print(">>> %-12s = %r"%('tessys', self.tessys)) + print(">>> %-12s = %r"%('fes', self.fes)) + print(">>> %-12s = %s"%('ltf', self.ltf)) + print(">>> %-12s = %s"%('jtf', self.jtf)) #if self.channel.count('ele')>0: # print ">>> %-12s = %s"%('ees', self.ees) - print ">>> %-12s = %s"%('dotoppt', self.dotoppt) - print ">>> %-12s = %s"%('dopdf', self.dopdf) - print ">>> %-12s = %s"%('dozpt', self.dozpt) + print(">>> %-12s = %s"%('dotoppt', self.dotoppt)) + print(">>> %-12s = %s"%('dopdf', self.dopdf)) + print(">>> %-12s = %s"%('dozpt', self.dozpt)) #print ">>> %-12s = %s"%('dorecoil', self.dorecoil) - print ">>> %-12s = %s"%('dojec', self.dojec) - print ">>> %-12s = %s"%('dojecsys', self.dojecsys) - print ">>> %-12s = %s"%('dosys', self.dosys) - print ">>> %-12s = %s"%('dotight', self.dotight) - print ">>> %-12s = %s"%('useT1', self.useT1) - print ">>> %-12s = %s"%('jetCutPt', self.jetCutPt) - print ">>> %-12s = %s"%('bjetCutEta',self.bjetCutEta) + print(">>> %-12s = %s"%('dojec', self.dojec)) + print(">>> %-12s = %s"%('dojecsys', self.dojecsys)) + print(">>> %-12s = %s"%('dosys', self.dosys)) + print(">>> %-12s = %s"%('dotight', self.dotight)) + print(">>> %-12s = %s"%('useT1', self.useT1)) + print(">>> %-12s = %s"%('jetCutPt', self.jetCutPt)) + print(">>> %-12s = %s"%('bjetCutEta',self.bjetCutEta)) def endJob(self): @@ -449,7 +449,7 @@ def fillMETAndDiLeptonBranches(self, event, tau1, tau2, met, met_vars): self.out.dzeta[0] = pzetamiss - 0.85*pzetavis # MET SYSTEMATICS - for unc, met_var in met_vars.iteritems(): + for unc, met_var in met_vars.items(): getattr(self.out,"met_"+unc)[0] = met_var.Pt() getattr(self.out,"metphi_"+unc)[0] = met_var.Phi() getattr(self.out,"mt_1_"+unc)[0] = sqrt( 2 * self.out.pt_1[0] * met_var.Pt() * ( 1 - cos(deltaPhi(self.out.phi_1[0],met_var.Phi())) )) diff --git a/PicoProducer/python/analysis/ModuleTauTau.py b/PicoProducer/python/analysis/ModuleTauTau.py index deac090c6..bc35ad502 100644 --- a/PicoProducer/python/analysis/ModuleTauTau.py +++ b/PicoProducer/python/analysis/ModuleTauTau.py @@ -47,10 +47,10 @@ def __init__(self, fname, **kwargs): def beginJob(self): """Before processing any events or files.""" super(ModuleTauTau,self).beginJob() - print ">>> %-12s = %s"%('tauwp', self.tauwp) - print ">>> %-12s = %s"%('tauCutPt', self.tauCutPt) - print ">>> %-12s = %s"%('tauCutEta', self.tauCutEta) - print ">>> %-12s = '%s'"%('triggers',self.trigger.path.replace("||","\n>>> %s||"%(' '*16))) + print(">>> %-12s = %s"%('tauwp', self.tauwp)) + print(">>> %-12s = %s"%('tauCutPt', self.tauCutPt)) + print(">>> %-12s = %s"%('tauCutEta', self.tauCutEta)) + print(">>> %-12s = '%s'"%('triggers',self.trigger.path.replace("||","\n>>> %s||"%(' '*16)))) def analyze(self, event): diff --git a/PicoProducer/python/analysis/MuTauFakeRate/ModuleMuTau.py b/PicoProducer/python/analysis/MuTauFakeRate/ModuleMuTau.py index 7eff7347f..1d0d66292 100644 --- a/PicoProducer/python/analysis/MuTauFakeRate/ModuleMuTau.py +++ b/PicoProducer/python/analysis/MuTauFakeRate/ModuleMuTau.py @@ -55,11 +55,11 @@ def __init__(self, fname, **kwargs): def beginJob(self): """Before processing any events or files.""" super(ModuleMuTau,self).beginJob() - print ">>> %-12s = %s"%('tauwp', self.tauwp) - print ">>> %-12s = %s"%('muonCutPt', self.muonCutPt) - print ">>> %-12s = %s"%('muonCutEta', self.muonCutEta) - print ">>> %-12s = %s"%('tauCutPt', self.tauCutPt) - print ">>> %-12s = %s"%('tauCutEta', self.tauCutEta) + print(">>> %-12s = %s"%('tauwp', self.tauwp)) + print(">>> %-12s = %s"%('muonCutPt', self.muonCutPt)) + print(">>> %-12s = %s"%('muonCutEta', self.muonCutEta)) + print(">>> %-12s = %s"%('tauCutPt', self.tauCutPt)) + print(">>> %-12s = %s"%('tauCutEta', self.tauCutEta)) pass diff --git a/PicoProducer/python/analysis/MuTauFakeRate/ModuleTauPair.py b/PicoProducer/python/analysis/MuTauFakeRate/ModuleTauPair.py index 516e28277..a486e86db 100644 --- a/PicoProducer/python/analysis/MuTauFakeRate/ModuleTauPair.py +++ b/PicoProducer/python/analysis/MuTauFakeRate/ModuleTauPair.py @@ -21,7 +21,7 @@ class ModuleTauPair(Module): """Base class the channels of an analysis with two tau leptons: for mutau, etau, tautau, emu, mumu, ee.""" def __init__(self, fname, **kwargs): - print header(self.__class__.__name__) + print(header(self.__class__.__name__)) # SETTINGS self.filename = fname # output file name @@ -88,33 +88,33 @@ def __init__(self, fname, **kwargs): def beginJob(self): """Before processing any events or files.""" - print '-'*80 - print ">>> %-12s = %r"%('filename', self.filename) - print ">>> %-12s = %s"%('year', self.year) - print ">>> %-12s = %r"%('dtype', self.dtype) - print ">>> %-12s = %r"%('channel', self.channel) - print ">>> %-12s = %s"%('ismc', self.ismc) - print ">>> %-12s = %s"%('isdata', self.isdata) - print ">>> %-12s = %s"%('isembed', self.isembed) + print('-'*80) + print(">>> %-12s = %r"%('filename', self.filename)) + print(">>> %-12s = %s"%('year', self.year)) + print(">>> %-12s = %r"%('dtype', self.dtype)) + print(">>> %-12s = %r"%('channel', self.channel)) + print(">>> %-12s = %s"%('ismc', self.ismc)) + print(">>> %-12s = %s"%('isdata', self.isdata)) + print(">>> %-12s = %s"%('isembed', self.isembed)) if self.channel.count('tau')>0: - print ">>> %-12s = %s"%('tes', self.tes) - print ">>> %-12s = %r"%('tessys', self.tessys) - print ">>> %-12s = %r"%('fes', self.fes) - print ">>> %-12s = %r"%('res', self.res) - print ">>> %-12s = %s"%('ltf', self.ltf) - print ">>> %-12s = %s"%('jtf', self.jtf) + print(">>> %-12s = %s"%('tes', self.tes)) + print(">>> %-12s = %r"%('tessys', self.tessys)) + print(">>> %-12s = %r"%('fes', self.fes)) + print(">>> %-12s = %r"%('res', self.res)) + print(">>> %-12s = %s"%('ltf', self.ltf)) + print(">>> %-12s = %s"%('jtf', self.jtf)) #if self.channel.count('ele')>0: # print ">>> %-12s = %s"%('ees', self.ees) - print ">>> %-12s = %s"%('dotoppt', self.dotoppt) - print ">>> %-12s = %s"%('dopdf', self.dopdf) - print ">>> %-12s = %s"%('dozpt', self.dozpt) + print(">>> %-12s = %s"%('dotoppt', self.dotoppt)) + print(">>> %-12s = %s"%('dopdf', self.dopdf)) + print(">>> %-12s = %s"%('dozpt', self.dozpt)) #print ">>> %-12s = %s"%('dorecoil', self.dorecoil) - print ">>> %-12s = %s"%('dojec', self.dojec) - print ">>> %-12s = %s"%('dojecsys', self.dojecsys) - print ">>> %-12s = %s"%('dotight', self.dotight) - print ">>> %-12s = %s"%('useT1', self.useT1) - print ">>> %-12s = %s"%('jetCutPt', self.jetCutPt) - print ">>> %-12s = %s"%('bjetCutEta',self.bjetCutEta) + print(">>> %-12s = %s"%('dojec', self.dojec)) + print(">>> %-12s = %s"%('dojecsys', self.dojecsys)) + print(">>> %-12s = %s"%('dotight', self.dotight)) + print(">>> %-12s = %s"%('useT1', self.useT1)) + print(">>> %-12s = %s"%('jetCutPt', self.jetCutPt)) + print(">>> %-12s = %s"%('bjetCutEta',self.bjetCutEta)) def endJob(self): @@ -384,7 +384,7 @@ def fillMETAndDiLeptonBranches(self, event, tau1, tau2, met, met_vars): self.out.dzeta[0] = pzetamiss - 0.85*pzetavis # MET SYSTEMATICS - for unc, met_var in met_vars.iteritems(): + for unc, met_var in met_vars.items(): getattr(self.out,"met_"+unc)[0] = met_var.Pt() getattr(self.out,"metphi_"+unc)[0] = met_var.Phi() getattr(self.out,"mt_1_"+unc)[0] = sqrt( 2 * self.out.pt_1[0] * met_var.Pt() * ( 1 - cos(deltaPhi(self.out.phi_1[0],met_var.Phi())) )) diff --git a/PicoProducer/python/analysis/StitchEffs.py b/PicoProducer/python/analysis/StitchEffs.py index e65a6fe5e..787a15e2a 100755 --- a/PicoProducer/python/analysis/StitchEffs.py +++ b/PicoProducer/python/analysis/StitchEffs.py @@ -28,9 +28,9 @@ def __init__(self,fname,**kwargs): self.outfile = TFile(fname,'RECREATE') # make custom file with only few histograms self.verb = kwargs.get('verb', 0 ) self.domutau = kwargs.get('mutau',False) - print ">>> fname = %r"%(self.fname) - print ">>> domutau = %r"%(self.domutau) - print ">>> verb = %r"%(self.verb) + print(">>> fname = %r"%(self.fname)) + print(">>> domutau = %r"%(self.domutau)) + print(">>> verb = %r"%(self.verb)) # HISTOGRAMS self.outfile.cd() @@ -88,7 +88,7 @@ def printtable(hist,norm=False): else: xstr = "%.4g-%.4g"%(hist.GetXaxis().GetBinLowEdge(xbin),hist.GetXaxis().GetBinUpEdge(xbin)) header += " %10s"%(xstr) - print header + print(header) for ybin in range(1,nybins+1): if yint: ystr = "%5.2g"%(hist.GetYaxis().GetBinLowEdge(ybin)) @@ -100,7 +100,7 @@ def printtable(hist,norm=False): if norm and nevts>0: zval /= nevts row += " %10.5f"%(zval) - print row + print(row) # QUICK PLOTTING SCRIPT @@ -133,7 +133,7 @@ def printtable(hist,norm=False): title, fname = fname.split('=') else: title = fname.split('/')[-1].replace('.root','') - print ">>> Opening %s (%s)"%(fname,title) + print(">>> Opening %s (%s)"%(fname,title)) file = TFile.Open(fname,'READ') files.append((title,file)) @@ -145,7 +145,7 @@ def printtable(hist,norm=False): nevts = hist.Integral() if nevts>0: # normalize hist.Scale(100./nevts) - print ">>> Efficiencies for %s in %s:"%(hname[2:],title) + print(">>> Efficiencies for %s in %s:"%(hname[2:],title)) printtable(hist,norm=True) xtitle = formattitle(hist.GetXaxis().GetTitle()) ytitle = formattitle(hist.GetYaxis().GetTitle()) @@ -165,7 +165,7 @@ def printtable(hist,norm=False): plot.close() # PLOT HISTOGRAM COMPARISONS - print ">>> Plot comparisons..." + print(">>> Plot comparisons...") for hname in hnames: hists = [ ] for title, file in files: @@ -199,5 +199,5 @@ def printtable(hist,norm=False): for _, file in files: file.Close() - print ">>> Done." + print(">>> Done.") diff --git a/PicoProducer/python/analysis/TauES/ModuleMuTau.py b/PicoProducer/python/analysis/TauES/ModuleMuTau.py index b91f3c1e6..1a139f78e 100644 --- a/PicoProducer/python/analysis/TauES/ModuleMuTau.py +++ b/PicoProducer/python/analysis/TauES/ModuleMuTau.py @@ -59,11 +59,11 @@ def __init__(self, fname, **kwargs): def beginJob(self): """Before processing any events or files.""" super(ModuleMuTau,self).beginJob() - print ">>> %-12s = %s"%('tauwp', self.tauwp) - print ">>> %-12s = %s"%('muonCutPt', self.muonCutPt) - print ">>> %-12s = %s"%('muonCutEta', self.muonCutEta) - print ">>> %-12s = %s"%('tauCutPt', self.tauCutPt) - print ">>> %-12s = %s"%('tauCutEta', self.tauCutEta) + print(">>> %-12s = %s"%('tauwp', self.tauwp)) + print(">>> %-12s = %s"%('muonCutPt', self.muonCutPt)) + print(">>> %-12s = %s"%('muonCutEta', self.muonCutEta)) + print(">>> %-12s = %s"%('tauCutPt', self.tauCutPt)) + print(">>> %-12s = %s"%('tauCutEta', self.tauCutEta)) pass diff --git a/PicoProducer/python/analysis/TestModule.py b/PicoProducer/python/analysis/TestModule.py index 3d398674f..0956a53b7 100755 --- a/PicoProducer/python/analysis/TestModule.py +++ b/PicoProducer/python/analysis/TestModule.py @@ -21,7 +21,7 @@ def beginJob(self): self.time0 = time.time() def endJob(self): - print ">>> endJob: done after %.1f seconds"%(time.time()-self.time0) + print(">>> endJob: done after %.1f seconds"%(time.time()-self.time0)) def analyze(self, event): """Process event, return True (pass, go to next module) or False (fail, go to next event).""" @@ -84,5 +84,5 @@ def analyze(self, event): p = PostProcessor(outdir,infiles,cut=None,branchsel=None,maxEntries=maxevts, modules=modules,postfix=postfix,noOut=False) p.run() - print(">>> TestModule.py done after %.1f seconds"%(time.time()-time0)) + print((">>> TestModule.py done after %.1f seconds"%(time.time()-time0))) diff --git a/PicoProducer/python/analysis/TreeProducer.py b/PicoProducer/python/analysis/TreeProducer.py index 947cbca97..62869f82f 100644 --- a/PicoProducer/python/analysis/TreeProducer.py +++ b/PicoProducer/python/analysis/TreeProducer.py @@ -37,7 +37,7 @@ class TreeProducer(object): def __init__(self, filename, module, **kwargs): self.verbosity = kwargs.get('verb',getattr(module,'verbosity',False) or getattr(module,'verb',False)) if self.verbosity>=1: - print ">>> TreeProducer.__init__: %r, %r, kwargs=%s..."%(filename,module,kwargs) + print(">>> TreeProducer.__init__: %r, %r, kwargs=%s..."%(filename,module,kwargs)) self.filename = filename self.module = module self.outfile = TFile(filename,'RECREATE') @@ -81,13 +81,13 @@ def addHist(self,name,*args): else: raise IOError("TreeProducer.addHist: Could not parse histogram arguments: %r, args=%r"%(name,args)) if self.verbosity+2>=1: - print ">>> TreeProducer.addHist: Adding TH1D %r with bins %r..."%(hname,bins) + print(">>> TreeProducer.addHist: Adding TH1D %r with bins %r..."%(hname,bins)) self.hists[name] = hist if dname: # make subdirectory subdir = self.outfile.GetDirectory(dname) if not subdir: # create directory for the first time if self.verbosity+2>=1: - print ">>> TreeProducer.addHist: Creating subdirectory %s..."%(dname) + print(">>> TreeProducer.addHist: Creating subdirectory %s..."%(dname)) subdir = self.outfile.mkdir(dname) #,'',True) hist.SetDirectory(subdir) return hist @@ -110,16 +110,16 @@ def addBranch(self, name, dtype='f', default=None, title=None, arrname=None, **k arrstr = "" if isinstance(dtype,str): # Set correct data type for numpy: if dtype=='F': # 'F' = 'complex64', which do not work for filling float branches - print ">>> TreeProducer.addBranch: Warning! Converting numpy data type 'F' (complex64) to 'f' (float32, Float_t)" + print(">>> TreeProducer.addBranch: Warning! Converting numpy data type 'F' (complex64) to 'f' (float32, Float_t)") dtype = 'float32' # 'f' = 'float32' -> 'F' -> Float_t elif dtype=='D': # 'D' = 'complex128', which do not work for filling float branches - print ">>> TreeProducer.addBranch: Warning! Converting numpy data type 'D' (complex128) to 'd' (float64, Double_t)" + print(">>> TreeProducer.addBranch: Warning! Converting numpy data type 'D' (complex128) to 'd' (float64, Double_t)") dtype = 'float64' # 'd' = 'float64' -> 'D' -> Double_t address = np.zeros(maxlen,dtype=dtype) # array address to be filled during event loop setattr(self,arrname,address) leaflist = "%s%s/%s"%(name,arrstr,root_dtype[dtype]) if self.verbosity>=1: - print ">>> TreeProducer.addBranch: tree.Branch(%r,%s,%r), %s=%r, maxlen=%s, default=%s"%(name,arrname,leaflist,arrname,address,maxlen,default) + print(">>> TreeProducer.addBranch: tree.Branch(%r,%s,%r), %s=%r, maxlen=%s, default=%s"%(name,arrname,leaflist,arrname,address,maxlen,default)) branch = self.tree.Branch(name,address,leaflist) if default!=None: if hasattr(default,'__getitem__'): # vector/array/list/tuple @@ -129,19 +129,19 @@ def addBranch(self, name, dtype='f', default=None, title=None, arrname=None, **k for i in range(len(default)): address[i] = default[i] if self.verbosity>=2: - print ">>> TreeProducer.addBranch: Set default value %s to list %r: %r"%(arrname,default,address) + print(">>> TreeProducer.addBranch: Set default value %s to list %r: %r"%(arrname,default,address)) else: # single value, like float or int for i in range(len(address)): address[i] = default if self.verbosity>=2: - print ">>> TreeProducer.addBranch: Set default value %s to single value %r: %r"%(arrname,default,address) + print(">>> TreeProducer.addBranch: Set default value %s to single value %r: %r"%(arrname,default,address)) if title: branch.SetTitle(title) return branch def setAlias(self,newbranch,oldbranch): if self.verbosity>=1: - print ">>> TreeProducer.setAlias: %r -> %r..."%(oldbranch,newbranch) + print(">>> TreeProducer.setAlias: %r -> %r..."%(oldbranch,newbranch)) self.tree.SetAlias(newbranch,oldbranch) return newbranch @@ -157,7 +157,7 @@ def endJob(self): if self.cutflow and self.display: nfinal = self.tree.GetEntries() if self.tree else None self.cutflow.display(nfinal=nfinal,final="stored in tree") - print ">>> Write %s..."%(self.outfile.GetName()) + print(">>> Write %s..."%(self.outfile.GetName())) self.outfile.Write() self.outfile.Close() diff --git a/PicoProducer/python/analysis/TreeProducerDiJet.py b/PicoProducer/python/analysis/TreeProducerDiJet.py index de3a881b8..2706910a7 100644 --- a/PicoProducer/python/analysis/TreeProducerDiJet.py +++ b/PicoProducer/python/analysis/TreeProducerDiJet.py @@ -2,14 +2,14 @@ # Sources: # https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorking2016#Synchronisation # https://cms-nanoaod-integration.web.cern.ch/integration/master-102X/mc102X_doc.html -from TreeProducerHighPT import TreeProducerHighPT +from .TreeProducerHighPT import TreeProducerHighPT class TreeProducerDiJet(TreeProducerHighPT): """Class to create and prepare a custom output file & tree.""" def __init__(self, filename, module, **kwargs): - print "Loading TreeProducerDiJet for %r"%(filename) + print("Loading TreeProducerDiJet for %r"%(filename)) super(TreeProducerDiJet,self).__init__(filename,module,**kwargs) ###################################### diff --git a/PicoProducer/python/analysis/TreeProducerEMu.py b/PicoProducer/python/analysis/TreeProducerEMu.py index 1dc591857..cc541ba33 100644 --- a/PicoProducer/python/analysis/TreeProducerEMu.py +++ b/PicoProducer/python/analysis/TreeProducerEMu.py @@ -2,14 +2,14 @@ # Sources: # https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorking2016#Synchronisation # https://cms-nanoaod-integration.web.cern.ch/integration/master-102X/mc102X_doc.html -from TreeProducerTauPair import TreeProducerTauPair +from .TreeProducerTauPair import TreeProducerTauPair class TreeProducerEMu(TreeProducerTauPair): """Class to create and prepare a custom output file & tree.""" def __init__(self, filename, module, **kwargs): - print "Loading TreeProducerEMu for %r"%(filename) + print("Loading TreeProducerEMu for %r"%(filename)) super(TreeProducerEMu,self).__init__(filename,module,**kwargs) diff --git a/PicoProducer/python/analysis/TreeProducerETau.py b/PicoProducer/python/analysis/TreeProducerETau.py index 2e93bf50c..bde82c86f 100644 --- a/PicoProducer/python/analysis/TreeProducerETau.py +++ b/PicoProducer/python/analysis/TreeProducerETau.py @@ -2,14 +2,14 @@ # Sources: # https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorking2016#Synchronisation # https://cms-nanoaod-integration.web.cern.ch/integration/master-102X/mc102X_doc.html -from TreeProducerTauPair import TreeProducerTauPair +from .TreeProducerTauPair import TreeProducerTauPair class TreeProducerETau(TreeProducerTauPair): """Class to create and prepare a custom output file & tree.""" def __init__(self, filename, module, **kwargs): - print "Loading TreeProducerETau for %r"%(filename) + print("Loading TreeProducerETau for %r"%(filename)) super(TreeProducerETau,self).__init__(filename,module,**kwargs) diff --git a/PicoProducer/python/analysis/TreeProducerHighPT.py b/PicoProducer/python/analysis/TreeProducerHighPT.py index 4859e0a05..f01b875fb 100644 --- a/PicoProducer/python/analysis/TreeProducerHighPT.py +++ b/PicoProducer/python/analysis/TreeProducerHighPT.py @@ -4,7 +4,7 @@ # https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorking2016#Synchronisation # https://cms-nanoaod-integration.web.cern.ch/integration/master-102X/mc102X_doc.html from ROOT import TH1D -from TreeProducer import TreeProducer +from .TreeProducer import TreeProducer class TreeProducerHighPT(TreeProducer): diff --git a/PicoProducer/python/analysis/TreeProducerMuMu.py b/PicoProducer/python/analysis/TreeProducerMuMu.py index fb7c1fcf2..362c83102 100644 --- a/PicoProducer/python/analysis/TreeProducerMuMu.py +++ b/PicoProducer/python/analysis/TreeProducerMuMu.py @@ -2,14 +2,14 @@ # Sources: # https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorking2016#Synchronisation # https://cms-nanoaod-integration.web.cern.ch/integration/master-102X/mc102X_doc.html -from TreeProducerTauPair import TreeProducerTauPair +from .TreeProducerTauPair import TreeProducerTauPair class TreeProducerMuMu(TreeProducerTauPair): """Class to create and prepare a custom output file & tree.""" def __init__(self, filename, module, **kwargs): - print "Loading TreeProducerMuMu for %r"%(filename) + print("Loading TreeProducerMuMu for %r"%(filename)) super(TreeProducerMuMu,self).__init__(filename,module,**kwargs) diff --git a/PicoProducer/python/analysis/TreeProducerMuNu.py b/PicoProducer/python/analysis/TreeProducerMuNu.py index 1542d91cc..06efa9957 100644 --- a/PicoProducer/python/analysis/TreeProducerMuNu.py +++ b/PicoProducer/python/analysis/TreeProducerMuNu.py @@ -2,14 +2,14 @@ # Sources: # https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorking2016#Synchronisation # https://cms-nanoaod-integration.web.cern.ch/integration/master-102X/mc102X_doc.html -from TreeProducerHighPT import TreeProducerHighPT +from .TreeProducerHighPT import TreeProducerHighPT class TreeProducerMuNu(TreeProducerHighPT): """Class to create and prepare a custom output file & tree.""" def __init__(self, filename, module, **kwargs): - print "Loading TreeProducerMuNu for %r"%(filename) + print("Loading TreeProducerMuNu for %r"%(filename)) super(TreeProducerMuNu,self).__init__(filename,module,**kwargs) diff --git a/PicoProducer/python/analysis/TreeProducerMuTau.py b/PicoProducer/python/analysis/TreeProducerMuTau.py index 5092025d5..4ae1f6a43 100644 --- a/PicoProducer/python/analysis/TreeProducerMuTau.py +++ b/PicoProducer/python/analysis/TreeProducerMuTau.py @@ -2,14 +2,14 @@ # Sources: # https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorking2016#Synchronisation # https://cms-nanoaod-integration.web.cern.ch/integration/master-102X/mc102X_doc.html -from TreeProducerTauPair import TreeProducerTauPair +from .TreeProducerTauPair import TreeProducerTauPair class TreeProducerMuTau(TreeProducerTauPair): """Class to create and prepare a custom output file & tree.""" def __init__(self, filename, module, **kwargs): - print "Loading TreeProducerMuTau for %r"%(filename) + print("Loading TreeProducerMuTau for %r"%(filename)) super(TreeProducerMuTau,self).__init__(filename,module,**kwargs) diff --git a/PicoProducer/python/analysis/TreeProducerTauNu.py b/PicoProducer/python/analysis/TreeProducerTauNu.py index 92c2ce1b8..4ddb79d4f 100644 --- a/PicoProducer/python/analysis/TreeProducerTauNu.py +++ b/PicoProducer/python/analysis/TreeProducerTauNu.py @@ -2,14 +2,14 @@ # Sources: # https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorking2016#Synchronisation # https://cms-nanoaod-integration.web.cern.ch/integration/master-102X/mc102X_doc.html -from TreeProducerHighPT import TreeProducerHighPT +from .TreeProducerHighPT import TreeProducerHighPT class TreeProducerTauNu(TreeProducerHighPT): """Class to create and prepare a custom output file & tree.""" def __init__(self, filename, module, **kwargs): - print "Loading TreeProducerTauNu for %r"%(filename) + print("Loading TreeProducerTauNu for %r"%(filename)) super(TreeProducerTauNu,self).__init__(filename,module,**kwargs) diff --git a/PicoProducer/python/analysis/TreeProducerTauPair.py b/PicoProducer/python/analysis/TreeProducerTauPair.py index 5e65c5196..bbe50d443 100644 --- a/PicoProducer/python/analysis/TreeProducerTauPair.py +++ b/PicoProducer/python/analysis/TreeProducerTauPair.py @@ -4,7 +4,7 @@ # https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorking2016#Synchronisation # https://cms-nanoaod-integration.web.cern.ch/integration/master-102X/mc102X_doc.html from ROOT import TH1D -from TreeProducer import TreeProducer +from .TreeProducer import TreeProducer class TreeProducerTauPair(TreeProducer): diff --git a/PicoProducer/python/analysis/TreeProducerTauTau.py b/PicoProducer/python/analysis/TreeProducerTauTau.py index 8cae72983..6df9b3bd8 100644 --- a/PicoProducer/python/analysis/TreeProducerTauTau.py +++ b/PicoProducer/python/analysis/TreeProducerTauTau.py @@ -2,14 +2,14 @@ # Sources: # https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorking2016#Synchronisation # https://cms-nanoaod-integration.web.cern.ch/integration/master-102X/mc102X_doc.html -from TreeProducerTauPair import TreeProducerTauPair +from .TreeProducerTauPair import TreeProducerTauPair class TreeProducerTauTau(TreeProducerTauPair): """Class to create and prepare a custom output file & tree.""" def __init__(self, filename, module, **kwargs): - print "Loading TreeProducerTauTau for %r"%(filename) + print("Loading TreeProducerTauTau for %r"%(filename)) super(TreeProducerTauTau,self).__init__(filename,module,**kwargs) diff --git a/PicoProducer/python/analysis/TreeProducerWJ.py b/PicoProducer/python/analysis/TreeProducerWJ.py index f76d3d17b..6a42e547d 100644 --- a/PicoProducer/python/analysis/TreeProducerWJ.py +++ b/PicoProducer/python/analysis/TreeProducerWJ.py @@ -2,14 +2,14 @@ # Sources: # https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorking2016#Synchronisation # https://cms-nanoaod-integration.web.cern.ch/integration/master-102X/mc102X_doc.html -from TreeProducerHighPT import TreeProducerHighPT +from .TreeProducerHighPT import TreeProducerHighPT class TreeProducerWJ(TreeProducerHighPT): """Class to create and prepare a custom output file & tree.""" def __init__(self, filename, module, **kwargs): - print "Loading TreeProducerWJ for %r"%(filename) + print("Loading TreeProducerWJ for %r"%(filename)) super(TreeProducerWJ,self).__init__(filename,module,**kwargs) ############## diff --git a/PicoProducer/python/analysis/utils.py b/PicoProducer/python/analysis/utils.py index d31a8db6c..1e92e6032 100644 --- a/PicoProducer/python/analysis/utils.py +++ b/PicoProducer/python/analysis/utils.py @@ -58,10 +58,10 @@ def ensurebranches(tree,branches): def redirectbranch(oldbranch,newbranch): """Redirect some branch names. newbranch -> oldbranch""" if isinstance(oldbranch,str): # rename - print("redirectbranch: directing %r -> %r"%(newbranch,oldbranch)) + print(("redirectbranch: directing %r -> %r"%(newbranch,oldbranch))) exec("setattr(Event,newbranch,property(lambda self: self._tree.readBranch(%r)))"%(oldbranch)) else: # set default value - print("redirectbranch: directing %r -> %r"%(newbranch,oldbranch)) + print(("redirectbranch: directing %r -> %r"%(newbranch,oldbranch))) exec("setattr(Event,newbranch,%s)"%(oldbranch)) From cd1eb851f1c939f806e4de2e1a5937033853ffff Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Sun, 28 May 2023 12:40:42 +0200 Subject: [PATCH 02/55] make python3 compatible (common) --- common/python/tools/Table.py | 11 ++++++----- common/python/tools/file.py | 23 ++++++++++++----------- common/python/tools/log.py | 20 ++++++++++++-------- common/python/tools/math.py | 2 +- common/python/tools/utils.py | 17 +++++++++-------- 5 files changed, 40 insertions(+), 33 deletions(-) diff --git a/common/python/tools/Table.py b/common/python/tools/Table.py index 177521d34..1ba62f26a 100644 --- a/common/python/tools/Table.py +++ b/common/python/tools/Table.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Author: Izaak Neutelings (2017) +from __future__ import print_function # for python3 compatibility import re formatrexp = re.compile(r"(%(-?\d*)\.?\d*[sfgdir])") @@ -59,8 +60,8 @@ def __init__(self, *args, **kwargs): self.columnformats.insert(0,self.rowformat[icol:ilast]) ilast = icol if self.verbosity>=self.level+3: - print ">>> headerformat=%r, rowformat=%r"%(self.headerformat,self.rowformat) - print ">>> columnformats=%s, ncols=%r"%(self.columnformats,self.ncols) + print(">>> headerformat=%r, rowformat=%r"%(self.headerformat,self.rowformat)) + print(">>> columnformats=%s, ncols=%r"%(self.columnformats,self.ncols)) def __str__(self): return '\n'.join(self.rows) @@ -68,17 +69,17 @@ def __str__(self): def printtable(self): """Print full table.""" for r in self.rows: - print r + print(r) def printheader(self,*args,**kwargs): """Print row.""" if self.verbosity>=self.level: - print self.header(*args,**kwargs) + print(self.header(*args,**kwargs)) def printrow(self,*args,**kwargs): """Print row.""" if self.verbosity>=self.level: - print self.row(*args,**kwargs) + print(self.row(*args,**kwargs)) def header(self,*args,**kwargs): """Header for table which is assumed to be all strings.""" diff --git a/common/python/tools/file.py b/common/python/tools/file.py index 0eac56920..af52b7087 100644 --- a/common/python/tools/file.py +++ b/common/python/tools/file.py @@ -1,4 +1,5 @@ # Author: Izaak Neutelings (May 2020) +from __future__ import print_function # for python3 compatibility import os, re, shutil, glob import importlib, traceback import ROOT; ROOT.PyConfig.IgnoreCommandLineOptions = True @@ -37,9 +38,9 @@ def ensuredir(*dirnames,**kwargs): elif not os.path.exists(dirname): os.makedirs(dirname) if verbosity>=1: - print '>>> Made directory "%s"'%(dirname) + print(">>> Made directory %r"%(dirname)) if not os.path.exists(dirname): - print '>>> Failed to make directory "%s"'%(dirname) + print(">>> Failed to make directory %r"%(dirname)) elif empty: for filename in os.listdir(dirname): filepath = os.path.join(dirname,filename) @@ -59,7 +60,7 @@ def ensurefile(*paths,**kwargs): if fatal: raise IOError("Did not find file %s."%(path)) else: - print ">>> Warning! Did not find file %s."%(path) + print(">>> Warning! Did not find file %s."%(path)) #path = None return path @@ -88,7 +89,7 @@ def expandfiles(files,verb=0): if isglob(fname): fnames = glob.glob(fname) # expand glob pattern if verb>=1: - print ">>> expandfiles: %r -> %s"%(fname,fnames) + print(">>> expandfiles: %r -> %s"%(fname,fnames)) index = files.index(fname) files = files[:index] + fnames + files[index+1:] # insert expanded list return files @@ -106,10 +107,10 @@ def rmfile(filepaths,verb=0): for filepath in filepaths: if os.path.isfile(filepath): if verb>=2: - print ">>> rmfile: Removing %r..."%(filepath) + print(">>> rmfile: Removing %r..."%(filepath)) os.unlink(filepath) elif verb>=2: - print ">>> rmfile: Did not find %r..."%(filepath) + print(">>> rmfile: Did not find %r..."%(filepath)) def getline(fname,iline): @@ -145,7 +146,7 @@ def ensureTDirectory(file,dirname,cd=True,verb=0): if not directory: directory = file.mkdir(dirname) if verb>=1: - print ">>> created directory %s in %s"%(dirname,file.GetPath()) + print(">>> Created directory %s in %s"%(dirname,file.GetPath())) if cd: directory.cd() return directory @@ -156,7 +157,7 @@ def ensureinit(*paths,**kwargs): init = os.path.join(os.path.join(*paths),'__init__.py') script = kwargs.get('by',"") if not os.path.isfile(init): - print ">>> Creating '%s' to allow import of module..."%(init) + print(">>> Creating '%s' to allow import of module..."%(init)) with open(init,'w') as file: if script: script = "by "+script @@ -170,13 +171,13 @@ def gethist(file,histname,setdir=True,close=None,retfile=False,fatal=True,warn=T if close==None: close = not retfile if not file or file.IsZombie(): - LOG.throw(IOError,'Could not open file by name "%s"'%(filename)) + LOG.throw(IOError,"Could not open file by name %r"%(filename)) hist = file.Get(histname) if not hist: if fatal: - LOG.throw(IOError,'Did not find histogram %r in file %s!'%(histname,file.GetPath())) + LOG.throw(IOError,"Did not find histogram %r in file %r!"%(histname,file.GetPath())) elif warn: - LOG.warning('Did not find histogram %r in file %s!'%(histname,file.GetPath())) + LOG.warn("Did not find histogram %r in file %r!"%(histname,file.GetPath())) if (close or setdir) and isinstance(hist,ROOT.TH1): hist.SetDirectory(0) if close: # close TFile diff --git a/common/python/tools/log.py b/common/python/tools/log.py index 34d42106c..166a22329 100644 --- a/common/python/tools/log.py +++ b/common/python/tools/log.py @@ -1,11 +1,15 @@ # Author: Izaak Neutelings (May 2020) +from __future__ import print_function # for python3 compatibility tcol_dict = { 'black': 30, 'red': 31, 'green': 32, 'yellow': 33, 'orange': 33, 'blue': 34, 'purple': 35, 'magenta': 36, 'white': 37, 'grey': 90, 'none': 0 } bcol_dict = {k: (10+v if v else v) for k,v in tcol_dict.iteritems()} + + def color(string,c='green',b=False,ul=False,**kwargs): + """Give string color in shell print out.""" tcol_key = kwargs.get('color', c ) bcol_key = kwargs.get('bg', None ) bcol_key = kwargs.get('background',bcol_key) @@ -84,7 +88,7 @@ def setverbosity(self,*args): def info(self,string,**kwargs): """Info""" - print self.pre+string + print(self.pre+string) def verbose(self,string,verb=None,level=1,**kwargs): """Check verbosity and print if verbosity level is matched.""" @@ -101,7 +105,7 @@ def verbose(self,string,verb=None,level=1,**kwargs): string = color(string,col) if isinstance(col,str) else color(string) if ul: string = underline(string) - print pre+string + print(pre+string) return True return False @@ -115,11 +119,11 @@ def getcolor(self,*args,**kwargs): def color(self,*args,**kwargs): """Print color.""" - print self.pre+color(*args,**kwargs) + print(self.pre+color(*args,**kwargs)) def underline(self,*args,**kwargs): """Print underline.""" - print self.pre+underline(*args,**kwargs) + print(self.pre+underline(*args,**kwargs)) def ul(self,*args,**kwargs): """Print underline.""" @@ -130,24 +134,24 @@ def warning(self,string,trigger=True,**kwargs): if trigger: exclam = color(kwargs.get('exclam',"Warning! "),'yellow',b=True,pre=self.pre+kwargs.get('pre',"")) message = color(string,'yellow',pre="") - print exclam+message + print(exclam+message) def warn(self,*args,**kwargs): """Alias for Logger.warn.""" return self.warning(*args,**kwargs) def title(self,*args,**kwargs): - print header(*args,**kwargs) + print(header(*args,**kwargs)) def header(self,*args,**kwargs): - print header(*args,**kwargs) + print(header(*args,**kwargs)) def error(self,string,trigger=True,**kwargs): """Print error if triggered without throwing an exception.""" if trigger: exclam = color(kwargs.get('exclam',"ERROR! "),'red',b=True,pre=self.pre+kwargs.get('pre',"")) message = color(string,'red',pre="") - print exclam+message + print(exclam+message) return trigger def fatal(self,string,trigger=True,**kwargs): diff --git a/common/python/tools/math.py b/common/python/tools/math.py index 44c8c21f9..da034c430 100644 --- a/common/python/tools/math.py +++ b/common/python/tools/math.py @@ -72,7 +72,7 @@ def partition(mylist,nparts): nleft -= nnew divider -= 1 findex = lindex - #print nnew + #print("partition: nnew=%r"%(nnew)) return parts diff --git a/common/python/tools/utils.py b/common/python/tools/utils.py index e2bb97b3c..dcade373d 100644 --- a/common/python/tools/utils.py +++ b/common/python/tools/utils.py @@ -1,4 +1,5 @@ # Author: Izaak Neutelings (May 2020) +from __future__ import print_function # for python3 compatibility import os, sys, re from itertools import islice from subprocess import Popen, PIPE, STDOUT, CalledProcessError @@ -10,32 +11,32 @@ def execute(command,dry=False,fatal=True,verb=0): command = str(command) out = "" if dry: - print ">>> Dry run: %r"%(command) + print(">>> Dry run: %r"%(command)) else: if verb>=1: - print ">>> Executing: %r"%(command) + print(">>> Executing: %r"%(command)) try: #process = Popen(command.split(),stdout=PIPE,stderr=STDOUT) #,shell=True) process = Popen(command,stdout=PIPE,stderr=STDOUT,bufsize=1,shell=True) #,universal_newlines=True for line in iter(process.stdout.readline,""): if verb>=1: # real time print out (does not work for python scripts without flush) - print line.rstrip() + print(line.rstrip()) out += line process.stdout.close() retcode = process.wait() - ##print 0, process.communicate() + ##print(0, process.communicate()) ##out = process.stdout.read() ##err = process.stderr.read() - ##print out + ##print(out) out = out.strip() except Exception as e: if verb<1: - print out #">>> Output: %s"%(out) - print ">>> Failed: %r"%(command) + print(out #">>> Output: %s"%(out)) + print(">>> Failed: %r"%(command)) raise e if retcode and fatal: if verb<1: - print out + print(out) raise CalledProcessError(retcode,command) #raise Exception("Command '%s' ended with return code %s"%(command,retcode)) #,err) return out From 3b7cbd662f3569cec2198bca4726b06364b7f6be Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Sun, 28 May 2023 12:46:03 +0200 Subject: [PATCH 03/55] remove unneeded list() --- PicoProducer/python/analysis/Cutflow.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/PicoProducer/python/analysis/Cutflow.py b/PicoProducer/python/analysis/Cutflow.py index 7d3385819..61c21f671 100644 --- a/PicoProducer/python/analysis/Cutflow.py +++ b/PicoProducer/python/analysis/Cutflow.py @@ -42,7 +42,7 @@ def display(self,itot=1,nfinal=None,final="Final selection"): ntot = self.hist.GetBinContent(itot) # number of events before any cuts nlast = (-99,ntot) #padcut = 3+max(len(c) for c in self.cuts) # padding - values = [self.hist.GetBinContent(1+i) for k, i in list(self.cuts.items()) if self.hist.GetBinContent(1+i)>0] # all values > 0 + values = [self.hist.GetBinContent(1+i) for k, i in self.cuts.items() if self.hist.GetBinContent(1+i)>0] # all values > 0 maxval = max(abs(x) for x in values) padevt = 4+(int(floor(log10(maxval))) if maxval>0 else 0) # pad all numbers of events padtot = 3+(int(floor(log10(ntot))) if ntot>0 else 0) # pad total number of events @@ -50,7 +50,7 @@ def display(self,itot=1,nfinal=None,final="Final selection"): print(underline("Cutflow:"+' '*(46+padevt+padtot),pre=">>> ")) print(underline("%5s %5s / %5s = %-8s %-8s %-23s"%( # header '','npass'.rjust(padevt),'ntot'.rjust(padtot),'abseff','releff','cut'),pre=">>> ")) - for cut, index in sorted(list(self.cuts.items()),key=lambda x: x[1]): + for cut, index in sorted(self.cuts.items(),key=lambda x: x[1]): nevts = self.hist.GetBinContent(1+index) title = self.hist.GetXaxis().GetBinLabel(1+index) or cut frac = " " @@ -67,4 +67,4 @@ def display(self,itot=1,nfinal=None,final="Final selection"): nomstr = str(float(nfinal)).rjust(padevt) print(underline("%5s %5s / %5s %s %8s %-23s"%('',nomstr,denstr,frac,'',final),pre=">>> ")) - \ No newline at end of file + From 1ad317bba483a0da19ffc272d546c966c50590b1 Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Sun, 28 May 2023 13:00:32 +0200 Subject: [PATCH 04/55] make python3 compatible (batch) --- PicoProducer/python/batch/BatchSystem.py | 9 +++++---- PicoProducer/python/batch/HTCondor.py | 4 ++-- PicoProducer/python/batch/SGE.py | 10 +++++----- PicoProducer/python/batch/SLURM.py | 4 ++-- PicoProducer/python/batch/utils.py | 23 ++++++++++++----------- 5 files changed, 26 insertions(+), 24 deletions(-) diff --git a/PicoProducer/python/batch/BatchSystem.py b/PicoProducer/python/batch/BatchSystem.py index 93f692094..e5d0930c9 100644 --- a/PicoProducer/python/batch/BatchSystem.py +++ b/PicoProducer/python/batch/BatchSystem.py @@ -1,4 +1,5 @@ # Author: Izaak Neutelings (May 2020) +from past.builtins import basestring # for python2 compatibility #import os, re, shutil import os, re import importlib @@ -24,7 +25,7 @@ def __repr__(self): def statuscode(self,code): status = '?' - for skey, codelist in self.statusdict.iteritems(): + for skey, codelist in self.statusdict.items(): if code in codelist: status = skey return status @@ -41,7 +42,7 @@ def parsejobs(self,rows,**kwargs): jobs = JobList([]) rows = rows.split('\n') if len(rows)>0 and self.verbosity>=1: - print ">>> %10s %10s %8s %8s %s"%('user','jobid','taskid','status','args') + print(">>> %10s %10s %8s %8s %s"%('user','jobid','taskid','status','args')) for row in rows: values = row.split() if len(values)<5 or not values[1].isdigit() or not values[2].isdigit(): @@ -52,12 +53,12 @@ def parsejobs(self,rows,**kwargs): status = self.statuscode(values[3]) args = ' '.join(values[4:]) if self.verbosity>=1: - print ">>> %10s %10s %8s %8s %s"%(user,jobid,taskid,status,args) + print(">>> %10s %10s %8s %8s %s"%(user,jobid,taskid,status,args)) job = Job(self,jobid,taskid=taskid,args=args,status=status) jobs.append(job) if verbosity>=3: for job in jobs: - print repr(job) + print(repr(job)) return jobs @abstractmethod diff --git a/PicoProducer/python/batch/HTCondor.py b/PicoProducer/python/batch/HTCondor.py index 6c8f7e319..094130527 100644 --- a/PicoProducer/python/batch/HTCondor.py +++ b/PicoProducer/python/batch/HTCondor.py @@ -65,8 +65,8 @@ def submit(self,script,taskfile=None,**kwargs): for match in matches: jobids.append(int(match)) if fail: - print ">>> Warning! Submission failed!" - print out + print(">>> Warning! Submission failed!") + print(out) jobid = jobids[0] if len(jobids)==1 else jobids if len(jobids)>1 else 0 return jobid diff --git a/PicoProducer/python/batch/SGE.py b/PicoProducer/python/batch/SGE.py index 1f503c5e0..89523b0ef 100644 --- a/PicoProducer/python/batch/SGE.py +++ b/PicoProducer/python/batch/SGE.py @@ -48,8 +48,8 @@ def submit(self,script,taskfile=None,**kwargs): out = self.execute(subcmd,dry=dry,verb=verbosity) for line in out.split(os.linesep): if any(f in line for f in failflags): - print ">>> Warning! Submission failed!" - print out + print(">>> Warning! Submission failed!") + print(out) matches = jobidrexp.findall(line) for match in matches: jobids.append(int(match)) @@ -80,20 +80,20 @@ def jobs(self,jobids,**kwargs): rows = self.execute(subcmd,verb=verbosity) jobs = JobList() if rows and self.verbosity>=1: - print ">>> %10s %10s %8s %8s %s"%('user','jobid','taskid','status','args') + print((">>> %10s %10s %8s %8s %s"%('user','jobid','taskid','status','args'))) for row in rows.split('\n'): values = row.split() if len(values)<5: continue if verbosity>=3: - print ">>> job row: %s"%(row) + print((">>> job row: %s"%(row))) user = values[0] jobid = values[1] taskid = values[2] status = self.statusdict.get(int(values[3]),'?') args = ' '.join(values[4:]) if self.verbosity>=1: - print ">>> %10s %10s %8s %8s %s"%(user,jobid,taskid,status,args) + print((">>> %10s %10s %8s %8s %s"%(user,jobid,taskid,status,args))) job = Job(self,jobid,taskid=taskid,args=args,status=status) jobs.append(job) return jobs diff --git a/PicoProducer/python/batch/SLURM.py b/PicoProducer/python/batch/SLURM.py index 5bda25db4..fa8d73bff 100644 --- a/PicoProducer/python/batch/SLURM.py +++ b/PicoProducer/python/batch/SLURM.py @@ -73,8 +73,8 @@ def submit(self,script,taskfile=None,**kwargs): for match in matches: jobids.append(int(match)) if fail: - print ">>> Warning! Submission failed!" - print out + print(">>> Warning! Submission failed!") + print(out) jobid = jobids[0] if len(jobids)==1 else jobids if len(jobids)>1 else 0 return jobid diff --git a/PicoProducer/python/batch/utils.py b/PicoProducer/python/batch/utils.py index 92d83aa7f..9e93e382a 100644 --- a/PicoProducer/python/batch/utils.py +++ b/PicoProducer/python/batch/utils.py @@ -1,4 +1,5 @@ # Author: Izaak Neutelings (May 2020) +from past.builtins import basestring # for python2 compatibility import os, re, glob import importlib from TauFW.common.tools.file import ensureTFile @@ -29,7 +30,7 @@ def chunkify_by_evts(fnames,maxevts,evenly=True,evtdict=None,verb=0): if verb<=0 and len(fnames)>=5: bar = LoadingBar(len(fnames),width=20,pre=">>> Checking number of events: ",counter=True,remove=True) elif verb>=4: - print ">>> chunkify_by_evts: events per file:" + print(">>> chunkify_by_evts: events per file:") for fname in fnames[:]: if evtsplitexp.match(fname): # already split; cannot be split again # TODO: add maxevts to ntot ? @@ -38,7 +39,7 @@ def chunkify_by_evts(fnames,maxevts,evenly=True,evtdict=None,verb=0): if evtdict and fname in evtdict: # get number of events from sample's dictionary to speed up nevts = evtdict[fname] if verb>=4: - print ">>> %10d %s (dict)"%(nevts,fname) + print(">>> %10d %s (dict)"%(nevts,fname)) else: # get number of events from file file = ensureTFile(fname,'READ') nevts = file.Get('Events').GetEntries() @@ -46,7 +47,7 @@ def chunkify_by_evts(fnames,maxevts,evenly=True,evtdict=None,verb=0): if isinstance(evtdict,dict): evtdict[fname] = nevts # store for possible later reuse (if same sample is submitted multiple times) if verb>=4: - print ">>> %10d %s"%(nevts,fname) + print(">>> %10d %s"%(nevts,fname)) if nevts=1: - print ">>> chunkify_by_evts: %d small files (<%d events) and %d large files (>=%d events)"%( + print(">>> chunkify_by_evts: %d small files (<%d events) and %d large files (>=%d events)"%() len(nsmall),maxevts,len(nlarge),maxevts) for nevts in nlarge: for fname in nlarge[nevts]: # split large files into several chunks @@ -68,7 +69,7 @@ def chunkify_by_evts(fnames,maxevts,evenly=True,evtdict=None,verb=0): nchunks = ceil(float(nevts)/maxevts) maxevts_ = int(ceil(nevts/nchunks)) # new maxevts per chunk if verb>=3: - print ">>> nevts/maxevts = %d/%d = %.2f => make %d chunks with max. %d events"%( + print(">>> nevts/maxevts = %d/%d = %.2f => make %d chunks with max. %d events"%() nevts,maxevts,nevts/float(maxevts),nchunks,maxevts_) ifirst = 0 # first event to process in first chunk while ifirst=4: - print ">>> chunkify_by_evts: chunks = [" + print(">>> chunkify_by_evts: chunks = [") for chunk in result: - print ">>> %s"%(chunk) - print ">>> ]" + print(">>> %s"%(chunk)) + print(">>> ]") return ntot, result @@ -125,12 +126,12 @@ def getcfgsamples(jobcfgnames,filter=[ ],veto=[ ],dtype=[ ],verb=0): samples = [ ] if verb>=2: if jobcfgs: - print ">>> getcfgsamples: Found job config:" + print(">>> getcfgsamples: Found job config:") else: - print ">>> getcfgsamples: Found NO job configs %s"%(jobcfgnames) + print(">>> getcfgsamples: Found NO job configs %s"%(jobcfgnames)) for cfgname in sorted(jobcfgs): if verb>=2: - print ">>> %s"%(cfgname) + print(">>> %s"%(cfgname)) sample = Sample.loadjson(cfgname) if filters and not sample.match(filters,verb): continue if vetoes and sample.match(vetoes,verb): continue From 25f7b2114a22b3c455362094ce6d9313bd78707a Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Sun, 28 May 2023 13:34:32 +0200 Subject: [PATCH 05/55] make python3 compatible (pico) --- PicoProducer/python/pico/config.py | 183 +++++++++---------- PicoProducer/python/pico/job.py | 274 ++++++++++++++--------------- PicoProducer/python/pico/run.py | 62 +++---- PicoProducer/scripts/pico.py | 6 +- 4 files changed, 263 insertions(+), 262 deletions(-) diff --git a/PicoProducer/python/pico/config.py b/PicoProducer/python/pico/config.py index 0d7a5d370..40fba6f1d 100755 --- a/PicoProducer/python/pico/config.py +++ b/PicoProducer/python/pico/config.py @@ -1,4 +1,5 @@ # Author: Izaak Neutelings (February 2022) +from past.builtins import basestring # for python2 compatibility import os, glob, json from TauFW.common.tools.file import ensurefile, ensureinit from TauFW.common.tools.string import repkey, rreplace @@ -15,26 +16,26 @@ def main_list(args): """List contents of configuration for those lazy to do 'cat config/config.json'.""" if args.verbosity>=1: - print ">>> main_list", args + print(">>> main_list", args) verbosity = args.verbosity cfgname = CONFIG._path if verbosity>=1: - print '-'*80 - print ">>> %-14s = %s"%('cfgname',cfgname) - print ">>> %-14s = %s"%('config',CONFIG) - print '-'*80 + print('-'*80) + print(">>> %-14s = %s"%('cfgname',cfgname)) + print(">>> %-14s = %s"%('config',CONFIG)) + print('-'*80) - print ">>> Configuration %s:"%(cfgname) - for variable, value in CONFIG.iteritems(): + print(">>> Configuration %s:"%(cfgname)) + for variable, value in CONFIG.items(): variable = "'"+color(variable)+"'" if isinstance(value,dict): - print ">>> %s:"%(variable) - for key, item in value.iteritems(): + print(">>> %s:"%(variable)) + for key, item in value.items(): if isinstance(item,basestring): item = str(item) - print ">>> %-12r -> %r"%(str(key),str(item)) + print(">>> %-12r -> %r"%(str(key),str(item))) else: if isinstance(value,basestring): value = str(value) - print ">>> %-24s = %r"%(variable,value) + print(">>> %-24s = %r"%(variable,value)) @@ -45,7 +46,7 @@ def main_list(args): def main_get(args): """Get information of given variable in configuration or samples.""" if args.verbosity>=1: - print ">>> main_get", args + print(">>> main_get", args) variable = args.variable eras = args.eras # eras to loop over and run channels = args.channels or [""] # channels to loop over and run @@ -66,19 +67,19 @@ def main_get(args): getnevts = variable in ['nevents','nevts'] cfgname = CONFIG._path if verbosity>=1: - print '-'*80 - print ">>> %-14s = %s"%('variable',variable) - print ">>> %-14s = %s"%('eras',eras) - print ">>> %-14s = %s"%('channels',channels) - print ">>> %-14s = %s"%('cfgname',cfgname) - print ">>> %-14s = %s"%('config',CONFIG) - print ">>> %-14s = %s"%('checkdas',checkdas) - print ">>> %-14s = %s"%('checklocal',checklocal) - print ">>> %-14s = %s"%('split',split) - print ">>> %-14s = %s"%('limit',limit) - print ">>> %-14s = %s"%('writedir',writedir) - print ">>> %-14s = %s"%('ncores',ncores) - print '-'*80 + print('-'*80) + print(">>> %-14s = %s"%('variable',variable)) + print(">>> %-14s = %s"%('eras',eras)) + print(">>> %-14s = %s"%('channels',channels)) + print(">>> %-14s = %s"%('cfgname',cfgname)) + print(">>> %-14s = %s"%('config',CONFIG)) + print(">>> %-14s = %s"%('checkdas',checkdas)) + print(">>> %-14s = %s"%('checklocal',checklocal)) + print(">>> %-14s = %s"%('split',split)) + print(">>> %-14s = %s"%('limit',limit)) + print(">>> %-14s = %s"%('writedir',writedir)) + print(">>> %-14s = %s"%('ncores',ncores)) + print('-'*80) # LIST SAMPLES if variable=='samples': @@ -87,17 +88,17 @@ def main_get(args): for era in eras: for channel in channels: if channel: - print ">>> Getting file list for era %r, channel %r"%(era,channel) + print(">>> Getting file list for era %r, channel %r"%(era,channel)) else: - print ">>> Getting file list for era %r"%(era) + print(">>> Getting file list for era %r"%(era)) samples = getsamples(era,channel=channel,dtype=dtypes,filter=filters,veto=vetoes, dasfilter=dasfilters,dasveto=dasvetoes,verb=verbosity) if not samples: LOG.warning("No samples found for era %r."%(era)) for sample in samples: - print ">>> %s"%(bold(sample.name)) + print(">>> %s"%(bold(sample.name))) for path in sample.paths: - print ">>> %s"%(path) + print(">>> %s"%(path)) # LIST SAMPLE FILES elif variable in ['files','nevents','nevts']: @@ -109,10 +110,10 @@ def main_get(args): for channel in channels: target = "file list" if variable=='files' else "nevents" if channel: - print ">>> Getting %s for era %r, channel %r"%(target,era,channel) + print(">>> Getting %s for era %r, channel %r"%(target,era,channel)) else: - print ">>> Getting %s for era %r"%(target,era) - print ">>> " + print(">>> Getting %s for era %r"%(target,era)) + print(">>> ") # GET SAMPLES LOG.insist(era in CONFIG.eras,"Era '%s' not found in the configuration file. Available: %s"%(era,CONFIG.eras)) @@ -121,38 +122,38 @@ def main_get(args): # LOOP over SAMPLES for sample in samples: - print ">>> %s"%(bold(sample.name)) + print(">>> %s"%(bold(sample.name))) for path in sample.paths: - print ">>> %s"%(bold(path)) + print(">>> %s"%(bold(path))) if getnevts or checkdas or checklocal: das = checkdas and not checklocal # checklocal overrides checkdas refresh = das # (not sample.storage and all('/store' in f for f in sample.files) nevents = sample.getnevents(das=das,refresh=refresh,verb=verbosity+1) storage = "(%s)"%sample.storage.__class__.__name__ if checklocal else "(DAS)" if checkdas else "" - print ">>> %-7s = %s %s"%('nevents',nevents,storage) + print(">>> %-7s = %s %s"%('nevents',nevents,storage)) if variable=='files': infiles = sample.getfiles(das=checkdas,url=inclurl,limit=limit,verb=verbosity+1) - print ">>> %-7s = %r"%('channel',channel) - print ">>> %-7s = %r"%('url',sample.url) - print ">>> %-7s = %r"%('postfix',sample.postfix) - print ">>> %-7s = %s"%('nfiles',len(infiles)) - print ">>> %-7s = [ "%('infiles') + print(">>> %-7s = %r"%('channel',channel)) + print(">>> %-7s = %r"%('url',sample.url)) + print(">>> %-7s = %r"%('postfix',sample.postfix)) + print(">>> %-7s = %s"%('nfiles',len(infiles))) + print(">>> %-7s = [ "%('infiles')) for file in infiles: - print ">>> %r"%file - print ">>> ]" - print ">>> " + print(">>> %r"%file) + print(">>> ]") + print(">>> ") if writedir: # write files to text files sample.filelist = None # do not load from existing text file; overwrite existing ones flistname = repkey(writedir,ERA=era,GROUP=sample.group,SAMPLE=sample.name,TAG=tag) - print ">>> Write list to %r..."%(flistname) + print(">>> Write list to %r..."%(flistname)) sample.writefiles(flistname,nevts=getnevts,das=checkdas,refresh=checkdas,ncores=ncores,verb=verbosity) # CONFIGURATION else: if variable in CONFIG: - print ">>> Configuration of %r: %s"%(variable,color(CONFIG[variable])) + print(">>> Configuration of %r: %s"%(variable,color(CONFIG[variable]))) else: - print ">>> Did not find %r in the configuration"%(variable) + print(">>> Did not find %r in the configuration"%(variable)) @@ -163,7 +164,7 @@ def main_get(args): def main_write(args): """Get information of given variable in configuration or samples.""" if args.verbosity>=1: - print ">>> main_write", args + print(">>> main_write", args) listname = args.listname # write sample file list to text file eras = args.eras # eras to loop over and run channels = args.channels or [""] # channels to loop over and run @@ -181,14 +182,14 @@ def main_write(args): verbosity = args.verbosity cfgname = CONFIG._path if verbosity>=1: - print '-'*80 - print ">>> %-14s = %s"%('listname',listname) - print ">>> %-14s = %s"%('getnevts',getnevts) - print ">>> %-14s = %s"%('eras',eras) - print ">>> %-14s = %s"%('channels',channels) - print ">>> %-14s = %s"%('cfgname',cfgname) - print ">>> %-14s = %s"%('config',CONFIG) - print '-'*80 + print('-'*80) + print(">>> %-14s = %s"%('listname',listname)) + print(">>> %-14s = %s"%('getnevts',getnevts)) + print(">>> %-14s = %s"%('eras',eras)) + print(">>> %-14s = %s"%('channels',channels)) + print(">>> %-14s = %s"%('cfgname',cfgname)) + print(">>> %-14s = %s"%('config',CONFIG)) + print('-'*80) # LOOP over ERAS & CHANNELS if not eras: @@ -198,8 +199,8 @@ def main_write(args): info = ">>> Getting file list for era %r"%(era) if channel: info += ", channel %r"%(channel) - print info - print ">>> " + print(info) + print(">>> ") LOG.insist(era in CONFIG.eras,"Era '%s' not found in the configuration file. Available: %s"%(era,CONFIG.eras)) samples0 = getsamples(era,channel=channel,dtype=dtypes,filter=filters,veto=vetoes, @@ -217,24 +218,24 @@ def main_write(args): break if retry>0 and len(samples0)>1: if retries>=2: - print ">>> Retry %d/%d: %d/%d samples...\n>>>"%(retry,retries,len(samples),len(samples0)) + print(">>> Retry %d/%d: %d/%d samples...\n>>>"%(retry,retries,len(samples),len(samples0))) else: - print ">>> Trying again %d/%d samples...\n>>>"%(len(samples),len(samples0)) + print(">>> Trying again %d/%d samples...\n>>>"%(len(samples),len(samples0))) for sample in samples: - print ">>> %s"%(bold(sample.name)) + print(">>> %s"%(bold(sample.name))) sample.filelist = None # do not load from existing text file; overwrite existing ones for path in sample.paths: - print ">>> %s"%(bold(path)) + print(">>> %s"%(bold(path))) #infiles = sample.getfiles(das=checkdas,url=inclurl,limit=limit,verb=verbosity+1) flistname = repkey(listname,ERA=era,GROUP=sample.group,SAMPLE=sample.name) #,TAG=tag try: sample.writefiles(flistname,nevts=getnevts,skipempty=skipempty,das=checkdas,refresh=checkdas,ncores=ncores,verb=verbosity) except IOError as err: # one of the ROOT file could not be opened - print "IOError: "+err.message + print("IOError: "+err.message) if retry>> Will try again..." + print(">>> Will try again...") sampleset[retry+1].append(sample) - print ">>> " + print(">>> ") @@ -245,7 +246,7 @@ def main_write(args): def main_set(args): """Set variables in the config file.""" if args.verbosity>=1: - print ">>> main_set", args + print(">>> main_set", args) variable = args.variable key = args.key # 'channel' or 'era' value = args.value @@ -257,12 +258,12 @@ def main_set(args): elif variable in ['channel','era']: LOG.throw(IOError,"Variable '%s' is reserved for dictionaries!"%(variable)) if verbosity>=1: - print '-'*80 - print ">>> Setting variable '%s' to '%s' config"%(color(variable),value) + print('-'*80) + print(">>> Setting variable '%s' to '%s' config"%(color(variable),value)) if verbosity>=1: - print ">>> %-14s = %s"%('cfgname',cfgname) - print ">>> %-14s = %s"%('config',CONFIG) - print '-'*80 + print(">>> %-14s = %s"%('cfgname',cfgname)) + print(">>> %-14s = %s"%('config',CONFIG)) + print('-'*80) if variable=='all': if 'default' in value: GLOB.setdefaultconfig(verb=verb) @@ -284,7 +285,7 @@ def main_set(args): def main_link(args): """Link channels or eras in the config file.""" if args.verbosity>=1: - print ">>> main_link", args + print(">>> main_link", args) variable = args.subcommand varkey = variable+'s' key = args.key @@ -292,14 +293,14 @@ def main_link(args): verbosity = args.verbosity cfgname = CONFIG._path if verbosity>=1: - print '-'*80 - print ">>> Linking %s '%s' to '%s' in the configuration..."%(variable,color(key),value) + print('-'*80) + print(">>> Linking %s '%s' to '%s' in the configuration..."%(variable,color(key),value)) if verbosity>=1: - print ">>> %-14s = %s"%('cfgname',cfgname) - print ">>> %-14s = %s"%('key',key) - print ">>> %-14s = %s"%('value',value) - print ">>> %-14s = %s"%('config',CONFIG) - print '-'*80 + print(">>> %-14s = %s"%('cfgname',cfgname)) + print(">>> %-14s = %s"%('key',key)) + print(">>> %-14s = %s"%('value',value)) + print(">>> %-14s = %s"%('config',CONFIG)) + print('-'*80) # SANITY CHECKS if varkey not in CONFIG: @@ -333,7 +334,7 @@ def main_link(args): LOG.insist(glob.glob(path),"Did not find any sample lists '%s'"%(path)) ensureinit(os.path.dirname(path),by="pico.py") if value!=oldval: - print ">>> Converted '%s' to '%s'"%(oldval,value) + print(">>> Converted '%s' to '%s'"%(oldval,value)) CONFIG[varkey][key] = value CONFIG.write(backup=True) @@ -346,23 +347,23 @@ def main_link(args): def main_rm(args): """Remove variable from the config file.""" if args.verbosity>=1: - print ">>> main_rm", args + print(">>> main_rm", args) variable = args.variable key = args.key # 'channel' or 'era' verbosity = args.verbosity cfgname = CONFIG._path if verbosity>=1: - print '-'*80 + print('-'*80) if key: - print ">>> Removing %s '%s' from the configuration..."%(variable,color(key)) + print(">>> Removing %s '%s' from the configuration..."%(variable,color(key))) else: - print ">>> Removing variable '%s' from the configuration..."%(color(variable)) + print(">>> Removing variable '%s' from the configuration..."%(color(variable))) if verbosity>=1: - print ">>> %-14s = %s"%('variable',variable) - print ">>> %-14s = %s"%('key',key) - print ">>> %-14s = %s"%('cfgname',cfgname) - print ">>> %-14s = %s"%('config',CONFIG) - print '-'*80 + print(">>> %-14s = %s"%('variable',variable)) + print(">>> %-14s = %s"%('key',key)) + print(">>> %-14s = %s"%('cfgname',cfgname)) + print(">>> %-14s = %s"%('config',CONFIG)) + print('-'*80) if key: # redirect 'channel' and 'era' keys to main_link variable = variable+'s' if variable in CONFIG: @@ -370,13 +371,13 @@ def main_rm(args): CONFIG[variable].pop(key,None) CONFIG.write(backup=True) else: - print ">>> %s '%s' not in the configuration. Nothing to remove..."%(variable.capitalize(),key) + print(">>> %s '%s' not in the configuration. Nothing to remove..."%(variable.capitalize(),key)) else: - print ">>> Variable '%s' not in the configuration. Nothing to remove..."%(variable) + print(">>> Variable '%s' not in the configuration. Nothing to remove..."%(variable)) else: if variable in CONFIG: CONFIG.pop(variable) CONFIG.write(backup=True) else: - print ">>> Variable '%s' not in the configuration. Nothing to remove..."%(variable) + print(">>> Variable '%s' not in the configuration. Nothing to remove..."%(variable)) diff --git a/PicoProducer/python/pico/job.py b/PicoProducer/python/pico/job.py index 2e444bbf3..108628566 100755 --- a/PicoProducer/python/pico/job.py +++ b/PicoProducer/python/pico/job.py @@ -22,7 +22,7 @@ def preparejobs(args): """Help function for (re)submission to iterate over samples per given channel and era and prepare job config and list.""" if args.verbosity>=1: - print ">>> preparejobs", args + print(">>> preparejobs", args) resubmit = args.subcommand=='resubmit' eras = args.eras @@ -65,15 +65,15 @@ def preparejobs(args): skim = 'skim' in channel.lower() module, processor, procopts, extrachopts = getmodule(channel,extraopts) if verbosity>=1: - print '-'*80 - print ">>> %-12s = %r"%('channel',channel) - print ">>> %-12s = %r"%('processor',processor) - print ">>> %-12s = %r"%('module',module) - print ">>> %-12s = %r"%('procopts',procopts) - print ">>> %-12s = %r"%('extrachopts',extrachopts) - print ">>> %-12s = %s"%('filters',filters) - print ">>> %-12s = %s"%('vetoes',vetoes) - print ">>> %-12s = %r"%('dtypes',dtypes) + print('-'*80) + print(">>> %-12s = %r"%('channel',channel)) + print(">>> %-12s = %r"%('processor',processor)) + print(">>> %-12s = %r"%('module',module)) + print(">>> %-12s = %r"%('procopts',procopts)) + print(">>> %-12s = %r"%('extrachopts',extrachopts)) + print(">>> %-12s = %s"%('filters',filters)) + print(">>> %-12s = %s"%('vetoes',vetoes)) + print(">>> %-12s = %r"%('dtypes',dtypes)) # GET SAMPLES jobdirformat = CONFIG.jobdir # for job config & log files @@ -86,15 +86,15 @@ def preparejobs(args): jobcfgs = repkey(os.path.join(jobdir_,"config/jobconfig_$SAMPLE$TAG_try[0-9]*.json"), ERA=era,SAMPLE='*',CHANNEL=channel,TAG=tag) if verbosity>=2: - print ">>> %-12s = %s"%('cwd',os.getcwd()) - print ">>> %-12s = %s"%('jobcfgs',jobcfgs) + print(">>> %-12s = %s"%('cwd',os.getcwd())) + print(">>> %-12s = %s"%('jobcfgs',jobcfgs)) samples = getcfgsamples(jobcfgs,filter=filters,veto=vetoes,dtype=dtypes,verb=verbosity) else: # get samples from sample list LOG.insist(era in CONFIG.eras,"Era '%s' not found in the configuration file. Available: %s"%(era,CONFIG.eras)) samples = getsamples(era,channel=channel,tag=tag,dtype=dtypes,filter=filters,veto=vetoes, dasfilter=dasfilters,dasveto=dasvetoes,moddict=moddict,verb=verbosity) if verbosity>=2: - print ">>> Found samples: "+", ".join(repr(s.name) for s in samples) + print(">>> Found samples: "+", ".join(repr(s.name) for s in samples)) if testrun: samples = samples[:2] # run at most two samples @@ -102,9 +102,9 @@ def preparejobs(args): found = len(samples)>=0 failed = [ ] # failed samples for sample in samples: - print ">>> %s"%(bold(sample.name)) + print(">>> %s"%(bold(sample.name))) for path in sample.paths: - print ">>> %s"%(bold(path)) + print(">>> %s"%(bold(path))) # DIRECTORIES subtry = sample.subtry+1 if resubmit else 1 @@ -136,28 +136,28 @@ def preparejobs(args): cfgname = "%s/jobconfig%s.json"%(cfgdir,jobtag) joblist = '%s/jobarglist%s.txt'%(cfgdir,jobtag) if verbosity==1: - print ">>> %-12s = %s"%('cfgname',cfgname) - print ">>> %-12s = %s"%('joblist',joblist) + print(">>> %-12s = %s"%('cfgname',cfgname)) + print(">>> %-12s = %s"%('joblist',joblist)) elif verbosity>=2: - print '-'*80 - print ">>> Preparing job %ssubmission for '%s'"%("re" if resubmit else "",sample.name) - print ">>> %-12s = %r"%('processor',processor) - print ">>> %-12s = %r"%('dtype',dtype) - print ">>> %-12s = %r"%('jobname',jobname) - print ">>> %-12s = %r"%('jobtag',jobtag) - print ">>> %-12s = %r"%('postfix',postfix) - print ">>> %-12s = %r"%('outdir',outdir) - print ">>> %-12s = %r"%('extraopts',extraopts_) - print ">>> %-12s = %r"%('prefetch',prefetch_) - print ">>> %-12s = %r"%('preselect',preselect) - print ">>> %-12s = %r"%('cfgdir',cfgdir) - print ">>> %-12s = %r"%('logdir',logdir) - print ">>> %-12s = %r"%('tmpdir',tmpdir) - print ">>> %-12s = %r"%('cfgname',cfgname) - print ">>> %-12s = %r"%('joblist',joblist) - print ">>> %-12s = %s"%('try',subtry) - print ">>> %-12s = %r"%('jobids',jobids) - print ">>> %-12s = %r"%('queue',queue_) + print('-'*80) + print(">>> Preparing job %ssubmission for '%s'"%("re" if resubmit else "",sample.name)) + print(">>> %-12s = %r"%('processor',processor)) + print(">>> %-12s = %r"%('dtype',dtype)) + print(">>> %-12s = %r"%('jobname',jobname)) + print(">>> %-12s = %r"%('jobtag',jobtag)) + print(">>> %-12s = %r"%('postfix',postfix)) + print(">>> %-12s = %r"%('outdir',outdir)) + print(">>> %-12s = %r"%('extraopts',extraopts_)) + print(">>> %-12s = %r"%('prefetch',prefetch_)) + print(">>> %-12s = %r"%('preselect',preselect)) + print(">>> %-12s = %r"%('cfgdir',cfgdir)) + print(">>> %-12s = %r"%('logdir',logdir)) + print(">>> %-12s = %r"%('tmpdir',tmpdir)) + print(">>> %-12s = %r"%('cfgname',cfgname)) + print(">>> %-12s = %r"%('joblist',joblist)) + print(">>> %-12s = %s"%('try',subtry)) + print(">>> %-12s = %r"%('jobids',jobids)) + print(">>> %-12s = %r"%('queue',queue_)) # CHECKS if os.path.isfile(cfgname): @@ -171,23 +171,23 @@ def preparejobs(args): while True: submit = raw_input(">>> Submit anyway? [y/n] "%(nchunks)) if 'f' in submit.lower(): # submit this job, and stop asking - print ">>> Force all." + print(">>> Force all.") force = True; skip = True; break elif 'y' in submit.lower(): # submit this job - print ">>> Continue submission..." + print(">>> Continue submission...") skip = True; break elif 'n' in submit.lower(): # do not submit this job - print ">>> Not submitting." + print(">>> Not submitting.") break else: - print ">>> '%s' is not a valid answer, please choose y/n."%submit + print(">>> '%s' is not a valid answer, please choose y/n."%submit) else: skip = True LOG.warning("Job configuration %r already exists and might cause conflicting job output! "%(cfgname)+ "To submit anyway, please use the --force flag") if skip: # do not submit this job failed.append(sample) - print "" + print("") continue if not resubmit: # check for existing jobss cfgpattern = re.sub(r"(?<=try)\d+(?=.json(?:\.gz)?$)",r"*",cfgname) @@ -214,51 +214,51 @@ def preparejobs(args): if testrun: infiles = infiles[:4] # only run four files per sample if verbosity==1: - print ">>> %-12s = %s"%('maxevts',maxevts_) - print ">>> %-12s = %s"%('nfilesperjob',nfilesperjob_) - print ">>> %-12s = %s"%('nfiles',len(infiles)) + print(">>> %-12s = %s"%('maxevts',maxevts_)) + print(">>> %-12s = %s"%('nfilesperjob',nfilesperjob_)) + print(">>> %-12s = %s"%('nfiles',len(infiles))) elif verbosity>=2: - print ">>> %-12s = %s"%('maxevts',maxevts_) - print ">>> %-12s = %s"%('nfilesperjob',nfilesperjob_) - print ">>> %-12s = %s"%('nfiles',len(infiles)) - print ">>> %-12s = [ "%('infiles') + print(">>> %-12s = %s"%('maxevts',maxevts_)) + print(">>> %-12s = %s"%('nfilesperjob',nfilesperjob_)) + print(">>> %-12s = %s"%('nfiles',len(infiles))) + print(">>> %-12s = [ "%('infiles')) for file in infiles: - print ">>> %r"%file - print ">>> ]" + print(">>> %r"%file) + print(">>> ]") # CHUNKS - partition/split list infiles.sort() # to have consistent order with resubmission chunks = [ ] # chunk indices if maxevts_>1: if verbosity>=1: - print ">>> Preparing jobs with chunks split by number of events..." + print(">>> Preparing jobs with chunks split by number of events...") try: ntot, fchunks = chunkify_by_evts(infiles,maxevts_,evtdict=sample.filenevts,verb=verbosity) # list of file chunks split by events if nevents<=0 and not resubmit: nevents = ntot except IOError as err: # capture if opening files fail - print "IOError: "+err.message + print("IOError: "+err.message) LOG.warning("Skipping submission...") failed.append(sample) - print "" + print("") continue # ignore this submission if testrun: fchunks = fchunks[:4] else: if verbosity>=1: - print ">>> Preparing jobs with chunks split by number of files..." + print(">>> Preparing jobs with chunks split by number of files...") fchunks = chunkify(infiles,nfilesperjob_) # list of file chunks split by number of files nfiles = len(infiles) nchunks = len(fchunks) if verbosity>=1: - print ">>> %-12s = %s"%('nchunks',nchunks) + print(">>> %-12s = %s"%('nchunks',nchunks)) if verbosity>=2: - print ">>> %-12s = %s"%('nevents',nevents) - print '-'*80 + print(">>> %-12s = %s"%('nevents',nevents)) + print('-'*80) # WRITE JOB LIST with arguments per job if args.verbosity>=1: - print ">>> Creating job list %s..."%(joblist) + print(">>> Creating job list %s..."%(joblist)) if fchunks: with open(joblist,'w') as listfile: ichunk = 0 @@ -306,7 +306,7 @@ def preparejobs(args): jobcmd += " --opt '%s'"%("' '".join(extraopts_)) jobcmd += " -i %s"%(jobfiles) # add last if args.verbosity>=1: - print ">>> chunk=%d, jobcmd=%r"%(ichunk,jobcmd) + print(">>> chunk=%d, jobcmd=%r"%(ichunk,jobcmd)) listfile.write(jobcmd+'\n') chunkdict[ichunk] = fchunk chunks.append(ichunk) @@ -332,7 +332,7 @@ def preparejobs(args): if not found: print_no_samples(dtypes,filters,vetoes,[channel],jobdir_,jobcfgs) elif failed and len(failed)!=len(samples): - print ">>> %d/%d samples failed: %s\n"%(len(failed),len(samples),', '.join(s.name for s in failed)) + print(">>> %d/%d samples failed: %s\n"%(len(failed),len(samples),', '.join(s.name for s in failed))) @@ -389,9 +389,9 @@ def checkchunks(sample,**kwargs): ndasevents = sample.getnevents(das=True) oldjobcfg['nevents'] = ndasevents elif verbosity>=2: - print ">>> %-12s = %s"%('ndasevents',ndasevents) + print(">>> %-12s = %s"%('ndasevents',ndasevents)) if verbosity>=3: - print ">>> %-12s = %s"%('chunkdict',chunkdict) + print(">>> %-12s = %s"%('chunkdict',chunkdict)) # CHECK PENDING JOBS if checkqueue<0 or pendjobs: @@ -411,20 +411,20 @@ def checkchunks(sample,**kwargs): if evtsplit: fpatterns.append("*%s_[0-9]*.root"%(postfix)) if verbosity>=2: - print ">>> %-12s = %r"%('flagexp',flagexp.pattern) - print ">>> %-12s = %r"%('flagexp2',flagexp2.pattern) - print ">>> %-12s = %r"%('fpatterns',fpatterns) - print ">>> %-12s = %r"%('chunkexp',chunkexp.pattern) - print ">>> %-12s = %s"%('checkqueue',checkqueue) - print ">>> %-12s = %s"%('pendjobs',pendjobs) - print ">>> %-12s = %s"%('jobids',jobids) + print(">>> %-12s = %r"%('flagexp',flagexp.pattern)) + print(">>> %-12s = %r"%('flagexp2',flagexp2.pattern)) + print(">>> %-12s = %r"%('fpatterns',fpatterns)) + print(">>> %-12s = %r"%('chunkexp',chunkexp.pattern)) + print(">>> %-12s = %s"%('checkqueue',checkqueue)) + print(">>> %-12s = %s"%('pendjobs',pendjobs)) + print(">>> %-12s = %s"%('jobids',jobids)) # CHECK PENDING JOBS pendfiles = [ ] if pendjobs: for job in pendjobs: if verbosity>=3: - print ">>> Found job %r, status=%r, args=%r"%(job,job.getstatus(),job.args.rstrip()) + print(">>> Found job %r, status=%r, args=%r"%(job,job.getstatus(),job.args.rstrip())) if job.getstatus() in ['q','r']: if 'HTCondor' in CONFIG.batch: jobarg = str(job.args) @@ -435,9 +435,9 @@ def checkchunks(sample,**kwargs): matches = flagexp.findall(jobarg) matches2 = flagexp2.findall(jobarg) if verbosity>=3: - print ">>> jobarg =",jobarg.replace('\n','') - print ">>> matches =",matches - print ">>> matches2 =",matches2 + print(">>> jobarg =",jobarg.replace('\n','')) + print(">>> matches =",matches) + print(">>> matches2 =",matches2) if not matches: continue infiles = [ ] @@ -470,12 +470,12 @@ def checkchunks(sample,**kwargs): bar = LoadingBar(len(outfiles),width=20,pre=">>> Checking output files: ", message="files, 0/%d (0%%)"%(ndasevents),counter=True,remove=True) elif verbosity>=2: - print ">>> %-12s = %s"%('pendchunks',pendchunks) - print ">>> %-12s = %s"%('outfiles',outfiles) + print(">>> %-12s = %s"%('pendchunks',pendchunks)) + print(">>> %-12s = %s"%('outfiles',outfiles)) validated = itervalid(outfiles,checkevts=checkevts,ncores=ncores,verb=verbosity) # get number of events processed & check for corruption for nevents, fname in validated: # use validator for parallel processing if verbosity>=2: - print ">>> Checking job output '%s'..."%(fname) + print(">>> Checking job output '%s'..."%(fname)) basename = os.path.basename(fname) infile = chunkexp.sub(r"\1.root",basename) # reconstruct input file without path or postfix outmatch = chunkexp.match(basename) @@ -495,7 +495,7 @@ def checkchunks(sample,**kwargs): maxevts = int(inmatch.group(3)) if firstevt/maxevts!=ipart: # right file, wrong chunk if verbosity>=3: - print ">>> Not in chunk %d, %r"%(i,chunkfile) + print(">>> Not in chunk %d, %r"%(i,chunkfile)) continue if checkexpevts or verbosity>=2: filentot = filenevts.get(inmatch.group(1),-1) @@ -508,11 +508,11 @@ def checkchunks(sample,**kwargs): ichunk = i if ichunk in pendchunks: if verbosity>=2: - print ">>> => Pending..." + print(">>> => Pending...") continue if nevents<0: if verbosity>=2: - print ">>> => Bad nevents=%s..."%(nevents) + print(">>> => Bad nevents=%s..."%(nevents)) badfiles.append(chunkfile) else: if checkexpevts and nevtsexp>-1 and nevents!=nevtsexp: @@ -520,17 +520,17 @@ def checkchunks(sample,**kwargs): if verbosity>=2: if nevtsexp>0: frac = "%.1f%%"%(100.0*nevents/nevtsexp) if nevtsexp!=0 else "" - print ">>> => Good, nevents=%s/%s %s"%(nevents,nevtsexp,frac) + print(">>> => Good, nevents=%s/%s %s"%(nevents,nevtsexp,frac)) else: - print ">>> => Good, nevents=%s"%(nevents) + print(">>> => Good, nevents=%s"%(nevents)) nprocevents += nevents goodfiles.append(chunkfile) if verbosity>=2: if ichunk<0: if matches: - print ">>> => No match with input file (ipart=%d, but found in %d chunks; %s)..."%(ipart,len(matches),matches) + print(">>> => No match with input file (ipart=%d, but found in %d chunks; %s)..."%(ipart,len(matches),matches)) else: - print ">>> => No match with input file..." + print(">>> => No match with input file...") #LOG.warning("Did not recognize output file '%s'!"%(fname)) continue if bar: @@ -572,18 +572,18 @@ def checkchunks(sample,**kwargs): fpattern = "*%s_[0-9]*.root"%(postfix) # _$postfix_$chunk chunkexp = re.compile(r".+%s_(\d+)\.root"%(postfix)) if verbosity>=2: - print ">>> %-12s = %r"%('flagexp',flagexp.pattern) - print ">>> %-12s = %r"%('fpattern',fpattern) - print ">>> %-12s = %r"%('chunkexp',chunkexp.pattern) - print ">>> %-12s = %s"%('checkqueue',checkqueue) - print ">>> %-12s = %s"%('pendjobs',pendjobs) - print ">>> %-12s = %s"%('jobids',jobids) + print(">>> %-12s = %r"%('flagexp',flagexp.pattern)) + print(">>> %-12s = %r"%('fpattern',fpattern)) + print(">>> %-12s = %r"%('chunkexp',chunkexp.pattern)) + print(">>> %-12s = %s"%('checkqueue',checkqueue)) + print(">>> %-12s = %s"%('pendjobs',pendjobs)) + print(">>> %-12s = %s"%('jobids',jobids)) # CHECK PENDING JOBS if pendjobs: for job in pendjobs: if verbosity>=3: - print ">>> Found job %r, status=%r, args=%r"%(job,job.getstatus(),job.args.rstrip()) + print(">>> Found job %r, status=%r, args=%r"%(job,job.getstatus(),job.args.rstrip())) if job.getstatus() in ['q','r']: if 'HTCondor' in CONFIG.batch: jobarg = str(job.args) @@ -592,8 +592,8 @@ def checkchunks(sample,**kwargs): jobarg = getline(joblist,job.taskid-1) matches = flagexp.findall(jobarg) if verbosity>=3: - print ">>> jobarg = %r"%(jobarg) - print ">>> matches = %s"%(matches) + print(">>> jobarg = %r"%(jobarg)) + print(">>> matches = %s"%(matches)) if not matches: continue ichunk = int(matches[0]) @@ -608,11 +608,11 @@ def checkchunks(sample,**kwargs): bar = LoadingBar(len(outfiles),width=20,pre=">>> Checking output files: ", message="files, 0/%d (0%%)"%(ndasevents),counter=True,remove=True) elif verbosity>=2: - print ">>> %-12s = %s"%('pendchunks',pendchunks) - print ">>> %-12s = %s"%('outfiles',outfiles) + print(">>> %-12s = %s"%('pendchunks',pendchunks)) + print(">>> %-12s = %s"%('outfiles',outfiles)) for fname in outfiles: if verbosity>=2: - print ">>> Checking job output '%s'..."%(fname) + print(">>> Checking job output '%s'..."%(fname)) match = chunkexp.search(fname) if match: ichunk = int(match.group(1)) @@ -626,7 +626,7 @@ def checkchunks(sample,**kwargs): nevents = isvalid(fname) if checkevts else 0 # get number of processed events & check for corruption if nevents<0: if verbosity>=2: - print ">>> => Bad, nevents=%s"%(nevents) + print(">>> => Bad, nevents=%s"%(nevents)) badchunks.append(ichunk) # TODO: remove file from outdir to avoid conflicting output ? else: @@ -649,9 +649,9 @@ def checkchunks(sample,**kwargs): if verbosity>=2: if nevtsexp>0: frac = "%.1f%%"%(100.0*nevents/nevtsexp) if nevtsexp!=0 else "" - print ">>> => Good, nevents=%d/%d %s"%(nevents,nevtsexp,frac) + print(">>> => Good, nevents=%d/%d %s"%(nevents,nevtsexp,frac)) else: - print ">>> => Good, nevents=%s"%(nevents) + print(">>> => Good, nevents=%s"%(nevents)) nprocevents += nevents goodchunks.append(ichunk) if bar: @@ -660,7 +660,7 @@ def checkchunks(sample,**kwargs): # GET FILES for RESUBMISSION + sanity checks if verbosity>=2: - print ">>> %-12s = %s"%('nprocevents',nprocevents) + print(">>> %-12s = %s"%('nprocevents',nprocevents)) for ichunk in chunkdict.keys(): count = goodchunks.count(ichunk)+pendchunks.count(ichunk)+badchunks.count(ichunk) LOG.insist(count in [0,1],"Found %d times chunk '%d' (good=%d, pending=%d, bad=%d). "%( @@ -679,7 +679,7 @@ def checkchunks(sample,**kwargs): ########################################################################### - # PRINT CHUNKS + # print(CHUNKS) goodchunks.sort() pendchunks.sort() badchunks.sort() @@ -695,9 +695,9 @@ def printchunks(jobden,label,text,col,show=False): jstr = ": all %s-%s"%(jobden[0],jobden[-1]) else: # list pending/failed/missing chunks jstr = (": "+', '.join(str(j) for j in jobden)) - print ">>> %s %s - %s%s"%(ratio,label,text,jstr) + print(">>> %s %s - %s%s"%(ratio,label,text,jstr)) #else: - # print ">>> %2d/%d %s - %s"%(len(jobden),len(jobs),label,text) + # print(">>> %2d/%d %s - %s"%(len(jobden),len(jobs),label,text)) rtext = "" if ndasevents>0: # report number of processed events if checkevts: @@ -711,7 +711,7 @@ def printchunks(jobden,label,text,col,show=False): printchunks(badchunks, 'FAIL', "Chunks with corrupted output in outdir",'red',True) printchunks(misschunks,'MISS',"Chunks with no output in outdir",'red',True) - # PRINT LOG FILES for debugging + # print(LOG FILES for debugging) if showlogs!=0 and (badchunks or misschunks): logglob = os.path.join(logdir,"*.*.*") #.log lognames = sorted(glob.glob(logglob),key=alphanum_key,reverse=True) @@ -738,7 +738,7 @@ def printchunks(jobden,label,text,col,show=False): #logexp = re.compile(".*\.\d{3,}\.%d(?:\.log)?$"%(taskid)) #$JOBNAME.$JOBID.$TASKID.log matches = [f for f in lognames if f.endswith(logexp)] if matches: - print ">>> %s"%(matches[0]) + print(">>> %s"%(matches[0])) lognames.remove(matches[0]) break else: @@ -755,7 +755,7 @@ def printchunks(jobden,label,text,col,show=False): def main_submit(args): """Submit or resubmit jobs to the batch system.""" if args.verbosity>=1: - print ">>> main_submit", args + print(">>> main_submit", args) verbosity = args.verbosity resubmit = args.subcommand=='resubmit' @@ -782,7 +782,7 @@ def main_submit(args): 'short': (testrun>0), 'queue':queue, 'time':time } if nchunks<=0: - print ">>> Nothing to %ssubmit!"%('re' if resubmit else '') + print(">>> Nothing to %ssubmit!"%('re' if resubmit else '')) continue if batch.system=='HTCondor': # use specific settings for KIT condor @@ -807,20 +807,20 @@ def main_submit(args): while True: submit = raw_input(">>> Do you want to submit %d jobs to the batch system? [y/n] "%(nchunks)) if any(s in submit.lower() for s in ['q','exit']): # quit this script - print ">>> Quitting..." + print(">>> Quitting...") exit(0) elif any(s in submit.lower() for s in ['f','all']): - print ">>> Force submission..." + print(">>> Force submission...") submit = 'y' args.prompt = False # stop asking for next samples if 'y' in submit.lower(): # submit this job jobid = batch.submit(script,joblist,**jkwargs) break elif 'n' in submit.lower(): # do not submit this job - print ">>> Not submitting." + print(">>> Not submitting.") break else: - print ">>> '%s' is not a valid answer, please choose y/n."%submit + print(">>> '%s' is not a valid answer, please choose y/n."%submit) else: jobid = batch.submit(script,joblist,**jkwargs) @@ -828,7 +828,7 @@ def main_submit(args): if jobid!=None: jobcfg['jobids'].append(jobid) if verbosity>=1: - print ">>> Creating config file '%s'..."%(cfgname) + print(">>> Creating config file '%s'..."%(cfgname)) if cfgname.endswith(".json.gz"): with gzip.open(cfgname,'wt') as file: file.write(json.dump(jobcfg),indent=2) @@ -845,7 +845,7 @@ def main_submit(args): def main_status(args): """Check status of jobs (succesful/pending/failed/missing), or hadd job output.""" if args.verbosity>=1: - print ">>> main_status", args + print(">>> main_status", args) # SETTING eras = args.eras # eras to loop over and run @@ -890,15 +890,15 @@ def main_status(args): jobcfgs = repkey(os.path.join(jobdir_,"config/jobconfig_$CHANNEL$TAG_try[0-9]*.json"), ERA=era,SAMPLE='*',GROUP='*',CHANNEL=channel,TAG=tag) # get ALL job configs if verbosity>=1: - print ">>> %-12s = %s"%('cwd',os.getcwd()) - print ">>> %-12s = %s"%('jobdir',jobdir_) - print ">>> %-12s = %s"%('jobcfgs',jobcfgs) - print ">>> %-12s = %s"%('filters',filters) - print ">>> %-12s = %s"%('vetoes',vetoes) - print ">>> %-12s = %s"%('dtypes',dtypes) + print(">>> %-12s = %s"%('cwd',os.getcwd())) + print(">>> %-12s = %s"%('jobdir',jobdir_)) + print(">>> %-12s = %s"%('jobcfgs',jobcfgs)) + print(">>> %-12s = %s"%('filters',filters)) + print(">>> %-12s = %s"%('vetoes',vetoes)) + print(">>> %-12s = %s"%('dtypes',dtypes)) samples = getcfgsamples(jobcfgs,filter=filters,veto=vetoes,dtype=dtypes,verb=verbosity) if verbosity>=2: - print ">>> Found samples: "+", ".join(repr(s.name) for s in samples) + print(">>> Found samples: "+", ".join(repr(s.name) for s in samples)) if subcmd in ['hadd','haddclean'] and 'skim' in channel.lower(): LOG.warning("Hadding into one file not available for skimming...") print @@ -915,9 +915,9 @@ def main_status(args): elif sample.channels and channel_ not in sample.channels: continue found = True - print ">>> %s"%(bold(sample.name)) + print(">>> %s"%(bold(sample.name))) for path in sample.paths: - print ">>> %s"%(bold(path)) + print(">>> %s"%(bold(path))) # CHECK JOBS ONLY ONCE if checkqueue==1 and jobs!=None: @@ -940,15 +940,15 @@ def main_status(args): cfgfiles = os.path.join(cfgdir,'job*%s_try[0-9]*.*'%(postfix)) logfiles = os.path.join(logdir,'*%s*.*.log'%(postfix)) if verbosity>=1: - print ">>> %sing job output for '%s'"%(subcmd.capitalize(),sample.name) - print ">>> %-12s = %r"%('cfgname',cfgname) - print ">>> %-12s = %r"%('jobdir',jobdir) - print ">>> %-12s = %r"%('cfgdir',cfgdir) - print ">>> %-12s = %r"%('outdir',outdir) - print ">>> %-12s = %r"%('storedir',storedir) - print ">>> %-12s = %s"%('infiles',infiles) + print(">>> %sing job output for '%s'"%(subcmd.capitalize(),sample.name)) + print(">>> %-12s = %r"%('cfgname',cfgname)) + print(">>> %-12s = %r"%('jobdir',jobdir)) + print(">>> %-12s = %r"%('cfgdir',cfgdir)) + print(">>> %-12s = %r"%('outdir',outdir)) + print(">>> %-12s = %r"%('storedir',storedir)) + print(">>> %-12s = %s"%('infiles',infiles)) if subcmd in ['hadd','haddclean']: - print ">>> %-12s = %r"%('outfile',outfile) + print(">>> %-12s = %r"%('outfile',outfile)) resubfiles, chunkdict, npend = checkchunks(sample,channel=channel_,tag=tag,jobs=jobs,checkqueue=checkqueue,checkevts=checkevts, das=checkdas,checkexpevts=checkexpevts,ncores=ncores,verb=verbosity) if (len(resubfiles)>0 or npend>0) and not force: # only clean or hadd if all jobs were successful @@ -970,7 +970,7 @@ def main_status(args): rmcmd = None if len(glob.glob(allcfgs))==len(glob.glob(cfgfiles)): # check for other jobs if verbosity>=2: - print ">>> %-12s = %s"%('cfgfiles',cfgfiles) + print(">>> %-12s = %s"%('cfgfiles',cfgfiles)) rmcmd = "rm -r %s"%(jobdir) # remove whole job directory else: # only remove files related to this job (era/channel/sample) rmfiles = [ ] @@ -979,8 +979,8 @@ def main_status(args): if len(glob.glob(files))>0: rmfiles.append(files) if verbosity>=2: - print ">>> %-12s = %s"%('cfgfiles',cfgfiles) - print ">>> %-12s = %s"%('rmfileset',rmfileset) + print(">>> %-12s = %s"%('cfgfiles',cfgfiles)) + print(">>> %-12s = %s"%('rmfileset',rmfileset)) if rmfiles: rmcmd = "rm %s"%(' '.join(rmfiles)) if rmcmd: @@ -996,11 +996,11 @@ def main_status(args): outdir = sample.jobcfg['outdir'] logdir = sample.jobcfg['logdir'] if verbosity>=1: - print ">>> Checking job status for '%s'"%(sample.name) - print ">>> %-12s = %r"%('cfgname',cfgname) - print ">>> %-12s = %r"%('jobdir',jobdir) - print ">>> %-12s = %r"%('outdir',outdir) - print ">>> %-12s = %r"%('logdir',logdir) + print(">>> Checking job status for '%s'"%(sample.name)) + print(">>> %-12s = %r"%('cfgname',cfgname)) + print(">>> %-12s = %r"%('jobdir',jobdir)) + print(">>> %-12s = %r"%('outdir',outdir)) + print(">>> %-12s = %r"%('logdir',logdir)) checkchunks(sample,channel=channel_,tag=tag,jobs=jobs,showlogs=showlogs,checkqueue=checkqueue, checkevts=checkevts,das=checkdas,ncores=ncores,verb=verbosity) diff --git a/PicoProducer/python/pico/run.py b/PicoProducer/python/pico/run.py index 55af46ce0..dd63ba764 100755 --- a/PicoProducer/python/pico/run.py +++ b/PicoProducer/python/pico/run.py @@ -14,7 +14,7 @@ def main_run(args): """Run given module locally.""" if args.verbosity>=1: - print ">>> main_run", args + print(">>> main_run", args) eras = args.eras # eras to loop over and run channels = args.channels # channels to loop over and run tag = args.tag # extra tag for output file @@ -39,9 +39,9 @@ def main_run(args): # LOOP over ERAS if not eras: - print ">>> Please specify a valid era (-y)." + print(">>> Please specify a valid era (-y).") if not channels: - print ">>> Please specify a valid channel (-c)." + print(">>> Please specify a valid channel (-c).") for era in eras: moddict = { } # save time by loading samples and get their files only once @@ -55,27 +55,27 @@ def main_run(args): # VERBOSE if verbosity>=1: - print '-'*80 - print ">>> Running %r"%(channel) - print ">>> %-12s = %r"%('channel',channel) - print ">>> %-12s = %r"%('module',module) - print ">>> %-12s = %r"%('processor',processor) - print ">>> %-12s = %r"%('procopts',procopts) - print ">>> %-12s = %r"%('extrachopts',extrachopts) - print ">>> %-12s = %r"%('prefetch',prefetch) - print ">>> %-12s = %r"%('preselect',preselect) - print ">>> %-12s = %s"%('filters',filters) - print ">>> %-12s = %s"%('vetoes',vetoes) - print ">>> %-12s = %r"%('dtypes',dtypes) - print ">>> %-12s = %r"%('userfiles',userfiles) - print ">>> %-12s = %r"%('outdir',outdir) + print('-'*80) + print(">>> Running %r"%(channel)) + print(">>> %-12s = %r"%('channel',channel)) + print(">>> %-12s = %r"%('module',module)) + print(">>> %-12s = %r"%('processor',processor)) + print(">>> %-12s = %r"%('procopts',procopts)) + print(">>> %-12s = %r"%('extrachopts',extrachopts)) + print(">>> %-12s = %r"%('prefetch',prefetch)) + print(">>> %-12s = %r"%('preselect',preselect)) + print(">>> %-12s = %s"%('filters',filters)) + print(">>> %-12s = %s"%('vetoes',vetoes)) + print(">>> %-12s = %r"%('dtypes',dtypes)) + print(">>> %-12s = %r"%('userfiles',userfiles)) + print(">>> %-12s = %r"%('outdir',outdir)) # LOOP over FILTERS samples = [ ] for filter in filters: filters_ = [filter] if filter else [ ] if verbosity>=2: - print ">>> Checking filters=%s, vetoes=%s, dtypes=%s..."%(filters_,vetoes,dtypes) + print(">>> Checking filters=%s, vetoes=%s, dtypes=%s..."%(filters_,vetoes,dtypes)) # GET SAMPLES if not userfiles and (filters_ or vetoes or dtypes): @@ -93,16 +93,16 @@ def main_run(args): if not userfiles and (filters_ or vetoes or dtypes): print_no_samples(dtypes,filters_,vetoes,[channel]) if verbosity>=1: - print ">>> %-12s = %r"%('samples',samples) - print '-'*80 + print(">>> %-12s = %r"%('samples',samples)) + print('-'*80) # LOOP over SAMPLES for sample in samples: if sample: - print ">>> %s"%(bold(sample.name)) + print(">>> %s"%(bold(sample.name))) if verbosity>=1: for path in sample.paths: - print ">>> %s"%(bold(path)) + print(">>> %s"%(bold(path))) # SETTINGS dtype = None @@ -114,9 +114,9 @@ def main_run(args): else: filetag = "_%s_%s%s"%(channel,era,tag) if verbosity>=1: - print ">>> %-12s = %s"%('sample',sample) - print ">>> %-12s = %r"%('filetag',filetag) # postfix - print ">>> %-12s = %s"%('extraopts',extraopts_) + print(">>> %-12s = %s"%('sample',sample)) + print(">>> %-12s = %r"%('filetag',filetag) # postfix) + print(">>> %-12s = %s"%('extraopts',extraopts_)) # GET FILES infiles = [ ] @@ -129,12 +129,12 @@ def main_run(args): if nfiles>0: infiles = infiles[:nfiles] if verbosity==1: - print ">>> %-12s = %r"%('dtype',dtype) - print ">>> %-12s = %s"%('nfiles',len(infiles)) - print ">>> %-12s = [ "%('infiles') + print(">>> %-12s = %r"%('dtype',dtype)) + print(">>> %-12s = %s"%('nfiles',len(infiles))) + print(">>> %-12s = [ "%('infiles')) for file in infiles: - print ">>> %r"%file - print ">>> ]" + print(">>> %r"%file) + print(">>> ]") # RUN runcmd = processor @@ -162,7 +162,7 @@ def main_run(args): runcmd += " --opt '%s'"%("' '".join(extraopts_)) #elif nfiles: # runcmd += " --nfiles %s"%(nfiles) - print ">>> Executing: "+bold(runcmd) + print(">>> Executing: "+bold(runcmd)) if not dryrun: #execute(runcmd,dry=dryrun,verb=verbosity+1) # real-time print out does not work well with python script os.system(runcmd) diff --git a/PicoProducer/scripts/pico.py b/PicoProducer/scripts/pico.py index c51a01c0f..1a5816876 100755 --- a/PicoProducer/scripts/pico.py +++ b/PicoProducer/scripts/pico.py @@ -21,7 +21,7 @@ def main_install(args): # - set defaults of config file # - outside CMSSW: create symlinks for standalone if args.verbosity>=1: - print ">>> main_install", args + print(">>> main_install", args) verbosity = args.verbosity @@ -241,7 +241,7 @@ def main_install(args): from TauFW.PicoProducer.pico.job import main_status main_status(args) else: - print ">>> subcommand '%s' not implemented!"%(args.subcommand) + print(">>> subcommand '%s' not implemented!"%(args.subcommand)) - print ">>> Done!" + print(">>> Done!") From 57fcc85cefd663f69b3aa046ea8bb4fd476a8ec6 Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Sun, 28 May 2023 13:38:18 +0200 Subject: [PATCH 06/55] fix typo --- PicoProducer/python/batch/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/PicoProducer/python/batch/utils.py b/PicoProducer/python/batch/utils.py index 9e93e382a..7b5d972f1 100644 --- a/PicoProducer/python/batch/utils.py +++ b/PicoProducer/python/batch/utils.py @@ -60,7 +60,7 @@ def chunkify_by_evts(fnames,maxevts,evenly=True,evtdict=None,verb=0): if bar: bar.count("files") if verb>=1: - print(">>> chunkify_by_evts: %d small files (<%d events) and %d large files (>=%d events)"%() + print(">>> chunkify_by_evts: %d small files (<%d events) and %d large files (>=%d events)"%( len(nsmall),maxevts,len(nlarge),maxevts) for nevts in nlarge: for fname in nlarge[nevts]: # split large files into several chunks @@ -69,7 +69,7 @@ def chunkify_by_evts(fnames,maxevts,evenly=True,evtdict=None,verb=0): nchunks = ceil(float(nevts)/maxevts) maxevts_ = int(ceil(nevts/nchunks)) # new maxevts per chunk if verb>=3: - print(">>> nevts/maxevts = %d/%d = %.2f => make %d chunks with max. %d events"%() + print(">>> nevts/maxevts = %d/%d = %.2f => make %d chunks with max. %d events"%( nevts,maxevts,nevts/float(maxevts),nchunks,maxevts_) ifirst = 0 # first event to process in first chunk while ifirst Date: Sun, 28 May 2023 13:47:57 +0200 Subject: [PATCH 07/55] make python3 compatible (storage) --- PicoProducer/python/storage/GridKA_NRG.py | 11 ++--- PicoProducer/python/storage/Sample.py | 42 ++++++++++---------- PicoProducer/python/storage/StorageSystem.py | 33 +++++++-------- PicoProducer/python/storage/T2_DESY.py | 17 ++++---- PicoProducer/python/storage/das.py | 4 +- PicoProducer/python/storage/utils.py | 25 ++++++------ 6 files changed, 68 insertions(+), 64 deletions(-) diff --git a/PicoProducer/python/storage/GridKA_NRG.py b/PicoProducer/python/storage/GridKA_NRG.py index 7e9b391f2..2bef15019 100644 --- a/PicoProducer/python/storage/GridKA_NRG.py +++ b/PicoProducer/python/storage/GridKA_NRG.py @@ -1,5 +1,6 @@ #! /usr/bin/env python # Author: Sebastian Brommer (July 2020) +from past.builtins import basestring # for python2 compatibility import os from TauFW.common.tools.utils import execute from TauFW.PicoProducer.storage.StorageSystem import StorageSystem @@ -36,7 +37,7 @@ def ensure_local_temp_dir(self, tmpdir, verb): def remove_local_temp_dir(self, tmpdir, verb): """Remove local tempdir.""" if verb >= 2: - print ">>> removing temp directory {}/* ".format(tmpdir) + print(">>> removing temp directory {}/* ".format(tmpdir)) for root, dirs, files in os.walk(tmpdir, topdown=False): for name in files: os.remove(os.path.join(root, name)) @@ -67,10 +68,10 @@ def hadd(self, sources, target, **kwargs): os.path.relpath(file, '/store/user/')) source = source.strip() if verb >= 2: - print ">>> %-10s = %r" % ('sources', sources) - print ">>> %-10s = %r" % ('source', source) - print ">>> %-10s = %r" % ('target', target) - print ">>> %-10s = %r" % ('htarget', htarget) + print(">>> %-10s = %r" % ('sources', sources)) + print(">>> %-10s = %r" % ('source', source)) + print(">>> %-10s = %r" % ('target', target)) + print(">>> %-10s = %r" % ('htarget', htarget)) out = self.execute("%s %s %s" % (self.haddcmd, htarget, source), verb=verb) if tmpdir: diff --git a/PicoProducer/python/storage/Sample.py b/PicoProducer/python/storage/Sample.py index 9e48436f7..c49309e7d 100644 --- a/PicoProducer/python/storage/Sample.py +++ b/PicoProducer/python/storage/Sample.py @@ -119,14 +119,14 @@ def __init__(self,group,name,*paths,**kwargs): # VERBOSITY if self.verbosity>=3: - print ">>> Sample.__init__: %r from group %r and type %r"%(self.name,self.group,self.dtype) - print ">>> %-11s = %s"%('paths',self.paths) - print ">>> %-11s = %r"%('storage',self.storage) - print ">>> %-11s = %r, %r"%('url, dasurl',self.url,self.dasurl) - print ">>> %-11s = %r"%('filelist',self.filelist) - print ">>> %-11s = %s"%('filenevts',self.filenevts) - print ">>> %-11s = %s"%('nevents',self.nevents) - print ">>> %-11s = %r"%('extraopts',self.extraopts) + print(">>> Sample.__init__: %r from group %r and type %r"%(self.name,self.group,self.dtype)) + print(">>> %-11s = %s"%('paths',self.paths)) + print(">>> %-11s = %r"%('storage',self.storage)) + print(">>> %-11s = %r, %r"%('url, dasurl',self.url,self.dasurl)) + print(">>> %-11s = %r"%('filelist',self.filelist)) + print(">>> %-11s = %s"%('filenevts',self.filenevts)) + print(">>> %-11s = %s"%('nevents',self.nevents)) + print(">>> %-11s = %r"%('extraopts',self.extraopts)) def __str__(self): return self.name @@ -150,7 +150,7 @@ def loadjson(cfgname): for key in ['group','name','paths','try','channel','chunkdict','dtype','extraopts']: LOG.insist(key in jobcfg,"Did not find key '%s' in job configuration %s"%(key,cfgname)) jobcfg['config'] = str(cfgname) - jobcfg['chunkdict'] = { int(k): v for k, v in jobcfg['chunkdict'].iteritems() } + jobcfg['chunkdict'] = { int(k): v for k, v in jobcfg['chunkdict'].items() } nfilesperjob = int(jobcfg['nfilesperjob']) filenevts = jobcfg.get('filenevts',{ }) dtype = jobcfg['dtype'] @@ -193,9 +193,9 @@ def match(self,patterns,verb=0): break if verb>=2: if match_: - print ">>> Sample.match: '%s' match to '%s'!"%(sample,pattern) + print(">>> Sample.match: '%s' match to '%s'!"%(sample,pattern)) else: - print ">>> Sample.match: NO '%s' match to '%s'!"%(sample,pattern) + print(">>> Sample.match: NO '%s' match to '%s'!"%(sample,pattern)) return match_ def filterpath(self,filter=[],veto=[],copy=False,verb=0): @@ -209,7 +209,7 @@ def filterpath(self,filter=[],veto=[],copy=False,verb=0): if keep: paths.append(path) if verb>=1: - print ">>> Sample.filterpath: filters=%s, vetoes=%s, %s -> %s"%(filter,veto,self.paths,paths) + print(">>> Sample.filterpath: filters=%s, vetoes=%s, %s -> %s"%(filter,veto,self.paths,paths)) if len(paths)!=len(self.paths): if copy: sample = deepcopy(self) @@ -345,7 +345,7 @@ def _writefile(ofile,fname,prefix=""): nevts = getnevents(fname,treename) # get nevents from file if nevts<1: if skipempty: - print ">>> Sample.writefiles: Skip %s with %s events..."%(fname,nevts) + print(">>> Sample.writefiles: Skip %s with %s events..."%(fname,nevts)) return else: self.nempty += 1 @@ -356,24 +356,24 @@ def _writefile(ofile,fname,prefix=""): listname_ = repkey(listname,PATH=path.strip('/').replace('/','__')) with open(listname_,'w+') as lfile: if '$PATH' in listname: # write only the file list of this path to this text file - print ">>> Write %s files to list %r..."%(len(self.pathfiles[path]),listname_) + print(">>> Write %s files to list %r..."%(len(self.pathfiles[path]),listname_)) for infile in self.pathfiles[path]: _writefile(lfile,infile) elif len(self.paths)<=1: # write file list for the only path if self.nevents>0: - print ">>> Write %s files to list %r..."%(len(files),listname_) + print(">>> Write %s files to list %r..."%(len(files),listname_)) else: - print ">>> Write %s files (%d events) to list %r..."%(len(files),self.nevents,listname_) + print(">>> Write %s files (%d events) to list %r..."%(len(files),self.nevents,listname_)) for infile in files: _writefile(lfile,infile) else: # divide up list per DAS dataset path if self.nevents>0: - print ">>> Write %s files (%d events) to list %r..."%(len(files),self.nevents,listname_) + print(">>> Write %s files (%d events) to list %r..."%(len(files),self.nevents,listname_)) else: - print ">>> Write %s files to list %r..."%(len(files),listname_) + print(">>> Write %s files to list %r..."%(len(files),listname_)) for i, path in enumerate(self.paths): assert path in self.pathfiles, "self.pathfiles.keys()=%s"%(self.pathfiles.keys()) - print ">>> %3s files for %s..."%(len(self.pathfiles[path]),path) + print(">>> %3s files for %s..."%(len(self.pathfiles[path]),path)) lfile.write("DASPATH=%s\n"%(path)) # write special line to text file, which loadfiles() can parse for infile in self.pathfiles[path]: # loop over this list (general list is sorted) LOG.insist(infile in files,"Did not find file %s in general list! %s"%(infile,files)) @@ -397,7 +397,7 @@ def loadfiles(self,listname_,**kwargs): for path in paths: listname_ = repkey(listname,PATH=path.strip('/').replace('/','__')) if self.verbosity>=1: - print ">>> Sample.loadfiles: Loading sample files from %r..."%(listname_) + print(">>> Sample.loadfiles: Loading sample files from %r..."%(listname_)) self.pathfiles[path] = [ ] if os.path.isfile(listname_): skip = False @@ -429,7 +429,7 @@ def loadfiles(self,listname_,**kwargs): filenevts[infile] = nevts # store/cache in dictionary nevents += nevts if self.verbosity>=3: - print ">>> %7d events for %s"%(nevts,infile) + print(">>> %7d events for %s"%(nevts,infile)) filelist.append(infile) self.pathfiles[path].append(infile) if not filelist: diff --git a/PicoProducer/python/storage/StorageSystem.py b/PicoProducer/python/storage/StorageSystem.py index 39acf34e0..aadb949d1 100644 --- a/PicoProducer/python/storage/StorageSystem.py +++ b/PicoProducer/python/storage/StorageSystem.py @@ -1,6 +1,7 @@ # Author: Izaak Neutelings (May 2020) # Description: Superclass of a generic storage system with common operations like # ls, cp, rm, mkdir, etc. to allow for easy implementation of storage system plug-ins. +from past.builtins import basestring # for python2 compatibility import os from fnmatch import fnmatch # for glob pattern from TauFW.common.tools.utils import execute, ensurelist @@ -37,17 +38,17 @@ def __init__(self,path,verb=0,ensure=False): self.parent = '/'+'/'.join(path.split('/')[:3]) self.mounted = os.path.exists(self.parent) if verb>=3: - print ">>> EOS.__init__:" - print ">>> %-10s = %r"%('path',self.path) - print ">>> %-10s = %r"%('parent',self.parent) - print ">>> %-10s = %r"%('tmpdir',self.tmpdir) - print ">>> %-10s = %r"%('mounted',self.mounted) - print ">>> %-10s = %r"%('lscmd',self.lscmd) - print ">>> %-10s = %r"%('lsurl',self.lsurl) - print ">>> %-10s = %r"%('rmcmd',self.rmcmd) - print ">>> %-10s = %r"%('rmurl',self.rmurl) - print ">>> %-10s = %r"%('mkdrcmd',self.mkdrcmd) - print ">>> %-10s = %r"%('mkdrurl',self.mkdrurl) + print(">>> EOS.__init__:") + print(">>> %-10s = %r"%('path',self.path)) + print(">>> %-10s = %r"%('parent',self.parent)) + print(">>> %-10s = %r"%('tmpdir',self.tmpdir)) + print(">>> %-10s = %r"%('mounted',self.mounted)) + print(">>> %-10s = %r"%('lscmd',self.lscmd)) + print(">>> %-10s = %r"%('lsurl',self.lsurl)) + print(">>> %-10s = %r"%('rmcmd',self.rmcmd)) + print(">>> %-10s = %r"%('rmurl',self.rmurl)) + print(">>> %-10s = %r"%('mkdrcmd',self.mkdrcmd)) + print(">>> %-10s = %r"%('mkdrurl',self.mkdrurl)) def __str__(self): return self.path @@ -189,11 +190,11 @@ def hadd(self,sources,target,**kwargs): source += self.expandpath(file,url=fileurl)+' ' source = source.strip() if verb>=2: - print ">>> %-10s = %r"%('sources',sources) - print ">>> %-10s = %r"%('source',source) - print ">>> %-10s = %r"%('target',target) - print ">>> %-10s = %r"%('htarget',htarget) - print ">>> %-10s = %r"%('maxopen',maxopen) + print(">>> %-10s = %r"%('sources',sources)) + print(">>> %-10s = %r"%('source',source)) + print(">>> %-10s = %r"%('target',target)) + print(">>> %-10s = %r"%('htarget',htarget)) + print(">>> %-10s = %r"%('maxopen',maxopen)) haddcmd = self.haddcmd if maxopen>=1: haddcmd += " -n %s"%(maxopen) diff --git a/PicoProducer/python/storage/T2_DESY.py b/PicoProducer/python/storage/T2_DESY.py index c152e140e..7b7bdb728 100644 --- a/PicoProducer/python/storage/T2_DESY.py +++ b/PicoProducer/python/storage/T2_DESY.py @@ -1,5 +1,6 @@ #! /usr/bin/env python # Author: Andrea Cardini (September 2020) +from past.builtins import basestring # for python2 compatibility import os import glob from TauFW.common.tools.utils import execute @@ -51,18 +52,18 @@ def cp(self,source,target=None,**kwargs): source=glob.glob(source) dryrun = kwargs.get('dry', False) verb = kwargs.get('verb',self.verbosity) - print source - print target + print(source) + print(target) if isinstance(source,list): for source_ in source: - print source_ + print(source_) source_ = self.expandpath(source_,url=self.cpurl) target = self.expandpath(target,url=self.cpurl) self.rm('%s/%s'%(os.path.abspath(target),source_)) self.execute('srmcp -2 "file:%s srm://dcache-se-cms.desy.de:8443/%s/%s"'%(os.path.abspath(source_),os.path.abspath(target),source_),dry=dryrun,verb=verb) return else: - print source + print(source) source = self.expandpath(source,url=self.cpurl) target = self.expandpath(target,url=self.cpurl) self.rm('%s/%s'%(os.path.abspath(target),source)) @@ -89,10 +90,10 @@ def hadd(self,sources,target,**kwargs): source += self.expandpath(file,url=fileurl)+' ' source = source.strip() if verb>=2: - print ">>> %-10s = %r"%('sources',sources) - print ">>> %-10s = %r"%('source',source) - print ">>> %-10s = %r"%('target',target) - print ">>> %-10s = %r"%('htarget',htarget) + print(">>> %-10s = %r"%('sources',sources)) + print(">>> %-10s = %r"%('source',source)) + print(">>> %-10s = %r"%('target',target)) + print(">>> %-10s = %r"%('htarget',htarget)) out = self.execute("%s %s %s"%(self.haddcmd,htarget,source),dry=dryrun,verb=verb) cpout = self.cp(htarget,os.path.dirname(target),dry=dryrun,verb=verb) if not dryrun: diff --git a/PicoProducer/python/storage/das.py b/PicoProducer/python/storage/das.py index 5fbcca31c..829f4819c 100644 --- a/PicoProducer/python/storage/das.py +++ b/PicoProducer/python/storage/das.py @@ -54,7 +54,7 @@ def expanddas(*datasets,**kwargs): """Get full list of datasets for a list of DAS dataset patterns.""" verbosity = kwargs.get('verb', 0) if verbosity>=1: - print ">>> expanddas(%r)"%(datasets) + print(">>> expanddas(%r)"%(datasets)) datasets = unwraplistargs(datasets) for dataset in datasets[:]: if '*' not in dataset: continue @@ -73,7 +73,7 @@ def expanddas(*datasets,**kwargs): def getparent(dataset,depth=0,verb=0): """Recursively get full ancestory of DAS dataset.""" if verb>=1: - print ">>> getparent(%r)"%(dataset) + print(">>> getparent(%r)"%(dataset)) query = "parent dataset=%s"%(dataset) if dataset.endswith('USER'): query += " instance=prod/phys03" diff --git a/PicoProducer/python/storage/utils.py b/PicoProducer/python/storage/utils.py index 55b72ca41..681204b8b 100644 --- a/PicoProducer/python/storage/utils.py +++ b/PicoProducer/python/storage/utils.py @@ -1,4 +1,5 @@ # Author: Izaak Neutelings (May 2020) +from past.builtins import basestring # for python2 compatibility import os, glob import getpass, platform import importlib @@ -68,7 +69,7 @@ def getstorage(path,verb=0,ensure=False): "If it is a special system, you need to subclass StorageSystem, see " "https://github.com/cms-tau-pog/TauFW/tree/master/PicoProducer#Storage-system") if verb>=2: - print ">>> storage.utils.getstorage(%r), %r"%(path,storage) + print(">>> storage.utils.getstorage(%r), %r"%(path,storage)) return storage @@ -115,7 +116,7 @@ def getsamples(era,channel="",tag="",dtype=[],filter=[],veto=[],dasfilter=[],das def getnevents(fname,treename='Events',verb=0): """Help function to get number of entries in a tree.""" if verb>=3: - print ">>> storage.utils.getnevents: opening %s:%r"%(fname,treename) + print(">>> storage.utils.getnevents: opening %s:%r"%(fname,treename)) file = ensureTFile(fname) tree = file.Get(treename) if not tree: @@ -160,21 +161,21 @@ def loopvalid(fnames_,**kwargs): if nchunks>=len(fnames): nchunks = len(fnames)-1 if verb>=2: - print ">>> storage.utils.itervalid: partitioning %d files into %d chunks for ncores=%d"%(len(fnames),nchunks,ncores) + print(">>> storage.utils.itervalid: partitioning %d files into %d chunks for ncores=%d"%(len(fnames),nchunks,ncores)) for i, subset in enumerate(partition(fnames,nchunks)): # process in ncores chunks if not subset: break name = "itervalid_%d"%(i) processor.start(loopvalid,subset,kwargs,name=name) for process in processor: if verb>=2: - print ">>> storage.utils.itervalid: joining process %r..."%(process.name) + print(">>> storage.utils.itervalid: joining process %r..."%(process.name)) nevtfiles = process.join() for nevts, fname in nevtfiles: yield nevts, fname else: # run validation in series for fname in fnames: if verb>=2: - print ">>> storage.utils.itervalid: Validating job output '%s'..."%(fname) + print(">>> storage.utils.itervalid: Validating job output '%s'..."%(fname)) nevts = isvalid(fname) yield nevts, fname @@ -194,7 +195,7 @@ def loopevts(fnames_): if nchunks>=len(fnames): nchunks = len(fnames)-1 if verb>=2: - print ">>> storage.utils.iterevts: partitioning %d files into %d chunks for ncores=%d..."%(len(fnames),nchunks,ncores) + print(">>> storage.utils.iterevts: partitioning %d files into %d chunks for ncores=%d..."%(len(fnames),nchunks,ncores)) for i, subset in enumerate(partition(fnames,nchunks)): # process in ncores chunks for fname in subset[:]: # check cache if not refresh and fname in filenevts: @@ -207,13 +208,13 @@ def loopevts(fnames_): processor.start(loopevts,subset,name=name) for process in processor: # collect output from parallel processes if verb>=2: - print ">>> storage.utils.iterevts: joining process %r..."%(process.name) + print(">>> storage.utils.iterevts: joining process %r..."%(process.name)) nevtfiles = process.join() for nevts, fname in nevtfiles: yield nevts, fname else: # run events check in SERIES if verb>=2: - print ">>> storage.utils.iterevts: retrieving number of events for %d files (in series)..."%(len(fnames)) + print(">>> storage.utils.iterevts: retrieving number of events for %d files (in series)..."%(len(fnames))) for fname in fnames: if refresh or fname not in filenevts: nevts = getnevents(fname,tree,verb=verb) @@ -225,11 +226,11 @@ def loopevts(fnames_): def print_no_samples(dtype=[],filter=[],veto=[],channel=[],jobdir="",jobcfgs=""): """Help function to print that no samples were found.""" if jobdir and not glob.glob(jobdir): #os.path.exists(jobdir): - print ">>> Job output directory %s does not exist!"%(jobdir) + print(">>> Job output directory %s does not exist!"%(jobdir)) elif jobcfgs and not glob.glob(jobcfgs): - print ">>> Did not find any job config files %s!"%(jobcfgs) + print(">>> Did not find any job config files %s!"%(jobcfgs)) else: - string = ">>> Did not find any samples" + string = ">>> Did not find any samples" if filter or veto or (dtype and len(dtype)<3): strings = [ ] if filter: @@ -241,6 +242,6 @@ def print_no_samples(dtype=[],filter=[],veto=[],channel=[],jobdir="",jobcfgs="") if channel: strings.append("channel%s %s"%('s' if len(channel)>1 else "",quotestrs(channel))) string += " with "+', '.join(strings) - print string + print(string) print From 308d48f40f0414899fe561c9444c9550d4a5123f Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Sun, 28 May 2023 13:56:18 +0200 Subject: [PATCH 08/55] make python3 compatible (tool) --- PicoProducer/python/tools/config.py | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/PicoProducer/python/tools/config.py b/PicoProducer/python/tools/config.py index 5a453a24a..bf6f1cc24 100644 --- a/PicoProducer/python/tools/config.py +++ b/PicoProducer/python/tools/config.py @@ -1,5 +1,6 @@ #! /usr/bin/env python # Author: Izaak Neutelings (May 2020) +from past.builtins import basestring # for python2 compatibility import os, sys, re, glob, json from datetime import datetime import importlib @@ -67,8 +68,8 @@ def getconfig(verb=0,refresh=False): cfgname = os.path.join(cfgdir,"config.json") bkpname = os.path.join(cfgdir,"config.json.bkp") # back up to recover config if reset cfgdict = _cfgdefaults.copy() - rqdstrs = [k for k,v in _cfgdefaults.iteritems() if isinstance(v,basestring)] - rqddicts = [k for k,v in _cfgdefaults.iteritems() if isinstance(v,dict)] + rqdstrs = [k for k,v in _cfgdefaults.items() if isinstance(v,basestring)] + rqddicts = [k for k,v in _cfgdefaults.items() if isinstance(v,dict)] # GET CONFIG if os.path.isfile(cfgname): @@ -76,7 +77,7 @@ def getconfig(verb=0,refresh=False): cfgdict = json.load(file,object_pairs_hook=OrderedDict) nmiss = len([0 for k in _cfgdefaults.keys() if k not in cfgdict]) # count missing keys if nmiss>=5 and os.path.isfile(bkpname): # recover reset config file - print ">>> Config file may have been reset. Opening backup %s..."%(bkpname) + print(">>> Config file may have been reset. Opening backup %s..."%(bkpname)) with open(bkpname,'r') as file: bkpcfgdict = json.load(file,object_pairs_hook=OrderedDict) for key in bkpcfgdict.keys(): # check for missing keys @@ -90,7 +91,7 @@ def getconfig(verb=0,refresh=False): LOG.warning("Key '%s' not set in config file %s. Setting to default %r"%(key,os.path.relpath(cfgname),_cfgdefaults[key])) cfgdict[key] = _cfgdefaults[key] nmiss += 1 - print ">>> Saving updated keys..." + print(">>> Saving updated keys...") with open(cfgname,'w') as file: json.dump(cfgdict,file,indent=2) else: @@ -110,11 +111,11 @@ def getconfig(verb=0,refresh=False): # RETURN if verb>=1: - print '-'*80 - print ">>> Reading config JSON file '%s'"%cfgname - for key, value in cfgdict.iteritems(): - print ">>> %-13s = %s"%(key,value) - print '-'*80 + print('-'*80) + print(">>> Reading config JSON file '%s'"%cfgname) + for key, value in cfgdict.items(): + print(">>> %-13s = %s"%(key,value)) + print('-'*80) CONFIG = Config(cfgdict,cfgname) return CONFIG @@ -190,7 +191,11 @@ def __iter__(self): return iter(self._dict) def iteritems(self): - for x in self._dict.iteritems(): + for x in self._dict.items(): + yield x + + def items(self): + for x in self._dict.items(): yield x def __len__(self): From 5e659f2643cfa89c48f36534f2836f1bff6cb881 Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Sun, 28 May 2023 13:56:40 +0200 Subject: [PATCH 09/55] make python3 compatible (analysis fix) --- PicoProducer/python/analysis/ModuleHighPT.py | 2 +- PicoProducer/python/analysis/ModuleTauPair.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/PicoProducer/python/analysis/ModuleHighPT.py b/PicoProducer/python/analysis/ModuleHighPT.py index 580b6f0d0..3668621f7 100644 --- a/PicoProducer/python/analysis/ModuleHighPT.py +++ b/PicoProducer/python/analysis/ModuleHighPT.py @@ -336,7 +336,7 @@ def fillJetMETBranches(self,event,leptons,lep1): ## FILL JET VARIATION BRANCHES (Not available in NanoAOD v10) # if self.dojecsys: - # for unc, jets_var in jets_vars.iteritems(): + # for unc, jets_var in jets_vars.items(): # getattr(self.out,"njets_"+unc)[0] = len(jets_var) # getattr(self.out,"ncjets_"+unc)[0] = ncjets_var # getattr(self.out,"nfjets_"+unc)[0] = nfjets_var diff --git a/PicoProducer/python/analysis/ModuleTauPair.py b/PicoProducer/python/analysis/ModuleTauPair.py index 23108f270..017da44d0 100644 --- a/PicoProducer/python/analysis/ModuleTauPair.py +++ b/PicoProducer/python/analysis/ModuleTauPair.py @@ -347,7 +347,7 @@ def fillJetBranches(self,event,tau1,tau2): ## FILL JET VARIATION BRANCHES #if self.dojecsys: - # for unc, jets_var in jets_vars.iteritems(): + # for unc, jets_var in jets_vars.items(): # ptvar = 'pt_'+unc # jets_var.sort(key=lambda j: getattr(j,ptvar),reverse=True) # njets_vars[unc] = len(jets_var) From 833e6e7b476312bc3854d6f34550a6b4ff2a8a35 Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Sun, 28 May 2023 14:31:13 +0200 Subject: [PATCH 10/55] fix parentheses typo --- PicoProducer/python/batch/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/PicoProducer/python/batch/utils.py b/PicoProducer/python/batch/utils.py index 7b5d972f1..cd4e82252 100644 --- a/PicoProducer/python/batch/utils.py +++ b/PicoProducer/python/batch/utils.py @@ -61,7 +61,7 @@ def chunkify_by_evts(fnames,maxevts,evenly=True,evtdict=None,verb=0): bar.count("files") if verb>=1: print(">>> chunkify_by_evts: %d small files (<%d events) and %d large files (>=%d events)"%( - len(nsmall),maxevts,len(nlarge),maxevts) + len(nsmall),maxevts,len(nlarge),maxevts)) for nevts in nlarge: for fname in nlarge[nevts]: # split large files into several chunks maxevts_ = maxevts @@ -70,7 +70,7 @@ def chunkify_by_evts(fnames,maxevts,evenly=True,evtdict=None,verb=0): maxevts_ = int(ceil(nevts/nchunks)) # new maxevts per chunk if verb>=3: print(">>> nevts/maxevts = %d/%d = %.2f => make %d chunks with max. %d events"%( - nevts,maxevts,nevts/float(maxevts),nchunks,maxevts_) + nevts,maxevts,nevts/float(maxevts),nchunks,maxevts_)) ifirst = 0 # first event to process in first chunk while ifirst=nevts: # if nevts%maxevts_!=0; index starts counting from 0 From 7f8e34e83cd484fabdc4749b8e08c5c8cde86dd5 Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Sun, 28 May 2023 14:34:58 +0200 Subject: [PATCH 11/55] make python3 compatible (processors) --- PicoProducer/python/processors/Bookkeeper.py | 1 + PicoProducer/python/processors/dumpGen.py | 1 + PicoProducer/python/processors/test.py | 19 ++++++++++--------- PicoProducer/python/storage/GridKA_NRG.py | 2 +- 4 files changed, 13 insertions(+), 10 deletions(-) diff --git a/PicoProducer/python/processors/Bookkeeper.py b/PicoProducer/python/processors/Bookkeeper.py index 9f76d97e7..c1437acf5 100644 --- a/PicoProducer/python/processors/Bookkeeper.py +++ b/PicoProducer/python/processors/Bookkeeper.py @@ -1,5 +1,6 @@ # Author: Izaak Neutelings (July 2022) # Description: Keep track of number of events and sum of weights before and after skimming +from __future__ import print_function # for python3 compatibility import time import ROOT from ROOT import TH1D diff --git a/PicoProducer/python/processors/dumpGen.py b/PicoProducer/python/processors/dumpGen.py index 832a08506..bde7136e0 100755 --- a/PicoProducer/python/processors/dumpGen.py +++ b/PicoProducer/python/processors/dumpGen.py @@ -8,6 +8,7 @@ # https://github.com/cms-nanoAOD/nanoAOD-tools/tree/master/python/postprocessing/examples # https://github.com/cms-sw/cmssw/blob/master/PhysicsTools/NanoAOD/python/genparticles_cff.py # https://github.com/cms-sw/cmssw/blob/master/PhysicsTools/NanoAOD/plugins/LHETablesProducer.cc +from __future__ import print_function # for python3 compatibility from PhysicsTools.NanoAODTools.postprocessing.framework.postprocessor import PostProcessor from PhysicsTools.NanoAODTools.postprocessing.framework.eventloop import Module from PhysicsTools.NanoAODTools.postprocessing.framework.datamodel import Collection diff --git a/PicoProducer/python/processors/test.py b/PicoProducer/python/processors/test.py index df233f595..0fb1c67f8 100755 --- a/PicoProducer/python/processors/test.py +++ b/PicoProducer/python/processors/test.py @@ -1,6 +1,7 @@ #! /usr/bin/env python # Author: Izaak Neutelings (April 2020) # Description: Speed test of nanoAOD postprocessing +from __future__ import print_function # for python3 compatibility import os, sys import time; time0 = time.time() from PhysicsTools.NanoAODTools.postprocessing.framework.postprocessor import PostProcessor @@ -35,7 +36,7 @@ def beginJob(self): self.time0 = time.time() def endJob(self): - print ">>> endJob: done after %.1f seconds"%(time.time()-self.time0) + print(">>> endJob: done after %.1f seconds"%(time.time()-self.time0)) def analyze(self, event): """Process event, return True (pass, go to next module) or False (fail, go to next event).""" @@ -105,17 +106,17 @@ def analyze(self, event): if nfiles>0: infiles = infiles[:nfiles] # PRINT -print '-'*80 -print ">>> %-10s = %s"%('maxevts',maxevts) -print ">>> %-10s = %r"%('outdir',outdir) -print ">>> %-10s = %r"%('postfix',postfix) -print ">>> %-10s = %s"%('infiles',infiles) -print ">>> %-10s = %r"%('branchsel',branchsel) -print '-'*80 +print('-'*80) +print(">>> %-10s = %s"%('maxevts',maxevts)) +print(">>> %-10s = %r"%('outdir',outdir)) +print(">>> %-10s = %r"%('postfix',postfix)) +print(">>> %-10s = %s"%('infiles',infiles)) +print(">>> %-10s = %r"%('branchsel',branchsel)) +print('-'*80) # RUN module = TestModule() p = PostProcessor(outdir,infiles,cut=None,branchsel=branchsel,outputbranchsel=branchsel,noOut=False, modules=[module],provenance=False,postfix=postfix,maxEntries=maxevts) p.run() -print ">>> Done after %.1f seconds"%(time.time()-time0) +print(">>> Done after %.1f seconds"%(time.time()-time0)) diff --git a/PicoProducer/python/storage/GridKA_NRG.py b/PicoProducer/python/storage/GridKA_NRG.py index 2bef15019..817e92bb2 100644 --- a/PicoProducer/python/storage/GridKA_NRG.py +++ b/PicoProducer/python/storage/GridKA_NRG.py @@ -28,7 +28,7 @@ def __init__(self, path, verb=0, ensure=False): def ensure_local_temp_dir(self, tmpdir, verb): """Ensure local tempdir exists.""" if verb >= 2: - print ">>> Creating temp directory {} if not existent yet".format( + print(">>> Creating temp directory {} if not existent yet".format() tmpdir) if not os.path.exists(tmpdir): os.mkdir(tmpdir) From ec45db8d3816816b1543b0f4ae9fcd340ef8b40d Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Sun, 28 May 2023 14:51:38 +0200 Subject: [PATCH 12/55] make python3 compatible (corrections) --- PicoProducer/python/corrections/BTagTool.py | 16 ++++----- .../python/corrections/ElectronSFs.py | 2 +- .../python/corrections/METCorrectionTool.py | 4 +-- .../python/corrections/MetTriggerSF.py | 2 +- PicoProducer/python/corrections/MuonSFs.py | 4 +-- PicoProducer/python/corrections/PileupTool.py | 2 +- .../corrections/RecoilCorrectionTool.py | 4 +-- .../python/corrections/TauTriggerSFs.py | 6 ++-- .../python/corrections/TrigObjMatcher.py | 34 +++++++++---------- PicoProducer/python/corrections/era_config.py | 6 ++-- 10 files changed, 40 insertions(+), 40 deletions(-) diff --git a/PicoProducer/python/corrections/BTagTool.py b/PicoProducer/python/corrections/BTagTool.py index a5b5a34d9..0a40dbf2f 100644 --- a/PicoProducer/python/corrections/BTagTool.py +++ b/PicoProducer/python/corrections/BTagTool.py @@ -190,14 +190,14 @@ def __init__(self,tagger,wp,era,channel,maxeta=None,loadsys=False,type_bc='comb' raise IOError("Did not recognize %r tagger..."%(tagger)) # LOAD CALIBRATION TOOL - print "Loading BTagWeightTool for %s (%s WP) %s..."%(tagger,wp,csvname) #,(", "+sigma) if sigma!='central' else "" + print("Loading BTagWeightTool for %s (%s WP) %s..."%(tagger,wp,csvname) #,(", "+sigma) if sigma!='central' else "") calib = BTagCalibration(tagger,csvname) if csvname_bc and csvname_bc!=csvname: - print " and from %s..."%(csvname_bc) + print(" and from %s..."%(csvname_bc)) calib_bc = BTagCalibration(tagger,csvname_bc) else: calib_bc = calib # use same calibrator - print " with efficiencies from %s..."%(effname) + print(" with efficiencies from %s..."%(effname)) # CSV READER readers = { } @@ -265,9 +265,9 @@ def getWeight(self,jets,unc='Nom'): for jet in jets: if abs(jet.eta)>> BTagWeightTool.getWeight: sf=%8.5f pt=%8.3f eta=%6.3f flavor=%2d tagged=%5r score=%8.5f wp=%8.4f"%( + ###print(">>> BTagWeightTool.getWeight: sf=%8.5f pt=%8.3f eta=%6.3f flavor=%2d tagged=%5r score=%8.5f wp=%8.4f"%() ### sf,jet.pt,jet.eta,jet.partonFlavour,self.tagged(jet),jet.btagDeepFlavB,self.wp) - ###print ">>> BTagWeightTool.getWeight: weight=%.6f"%(weight) + ###print(">>> BTagWeightTool.getWeight: weight=%.6f"%(weight)) return weight def getHeavyFlavorWeight(self,jets,unc='Nom'): @@ -325,7 +325,7 @@ def getEff(self,pt,eta,flavor): elif ybin>hist.GetYaxis().GetNbins(): ybin -= 1 eff = hist.GetBinContent(xbin,ybin) ###if eff==1: - ### print "Warning! BTagWeightTool.getEff: MC efficiency is 1 for pt=%s, eta=%s, flavor=%s, sf=%s"%(pt,eta,flavor,sf) + ### print("Warning! BTagWeightTool.getEff: MC efficiency is 1 for pt=%s, eta=%s, flavor=%s, sf=%s"%(pt,eta,flavor,sf)) return eff def fillEffMaps(self,jets,usejec=False,tag=""): @@ -381,8 +381,8 @@ def getDefaultEffMap(hname,flavor,wp='medium'): else: eff = 0.60 if flavor=='b' else 0.05 if flavor=='c' else 0.001 hname = hname.split('/')[-1] + "_default" hist = getJetMap(hname) - for xbin in xrange(0,hist.GetXaxis().GetNbins()+2): - for ybin in xrange(0,hist.GetYaxis().GetNbins()+2): + for xbin in range(0,hist.GetXaxis().GetNbins()+2): + for ybin in range(0,hist.GetYaxis().GetNbins()+2): hist.SetBinContent(xbin,ybin,eff) return hist diff --git a/PicoProducer/python/corrections/ElectronSFs.py b/PicoProducer/python/corrections/ElectronSFs.py index 485ef486c..707ef0df4 100644 --- a/PicoProducer/python/corrections/ElectronSFs.py +++ b/PicoProducer/python/corrections/ElectronSFs.py @@ -5,7 +5,7 @@ # 2018: https://hypernews.cern.ch/HyperNews/CMS/get/higgstautau/1132.html import os from TauFW.PicoProducer import datadir -from ScaleFactorTool import ScaleFactor, ScaleFactorHTT +from .ScaleFactorTool import ScaleFactor, ScaleFactorHTT pathPOG = os.path.join(datadir,"lepton/EGammaPOG/") pathHTT = os.path.join(datadir,"lepton/HTT/Electron/") "UL2017/egammaEffi.txt_EGM2D_MVA90noIso_UL17.root", diff --git a/PicoProducer/python/corrections/METCorrectionTool.py b/PicoProducer/python/corrections/METCorrectionTool.py index d14ed65ca..22810c5e7 100644 --- a/PicoProducer/python/corrections/METCorrectionTool.py +++ b/PicoProducer/python/corrections/METCorrectionTool.py @@ -67,13 +67,13 @@ def correct(self,oldmet,oldmetphi,npv,run=-1): xcorr = self.corrs[0]*npv + self.corrs[1] ycorr = self.corrs[2]*npv + self.corrs[3] else: - for (runa,runb), corrs in self.corrs.iteritems(): + for (runa,runb), corrs in self.corrs.items(): if runa<=run<=runb: xcorr = corrs[0]*npv + corrs[1] ycorr = corrs[2]*npv + corrs[3] break else: - print ">>> METCorrectionTool.correct: Could not find run %d in %s"%(run,self.corrs.keys()) + print(">>> METCorrectionTool.correct: Could not find run %d in %s"%(run,list(self.corrs.keys()))) return met, metphi metx = oldmet*TMath.Cos(oldmetphi) - xcorr diff --git a/PicoProducer/python/corrections/MetTriggerSF.py b/PicoProducer/python/corrections/MetTriggerSF.py index d34e232bf..9775d4905 100644 --- a/PicoProducer/python/corrections/MetTriggerSF.py +++ b/PicoProducer/python/corrections/MetTriggerSF.py @@ -13,7 +13,7 @@ def __init__(self,filename): self.histo = { } for label in self.labels: self.histo[label] = self.rootfile.Get(label) - print(label,self.histo[label]) + print((label,self.histo[label])) def getWeight(self,metnomu,mhtnomu): diff --git a/PicoProducer/python/corrections/MuonSFs.py b/PicoProducer/python/corrections/MuonSFs.py index e31cfae30..6279874c8 100644 --- a/PicoProducer/python/corrections/MuonSFs.py +++ b/PicoProducer/python/corrections/MuonSFs.py @@ -5,7 +5,7 @@ # https://twiki.cern.ch/twiki/bin/view/CMS/MuonLegacy2016 import os from TauFW.PicoProducer import datadir -from ScaleFactorTool import ScaleFactor, ScaleFactorHTT +from .ScaleFactorTool import ScaleFactor, ScaleFactorHTT pathPOG = os.path.join(datadir,"lepton/MuonPOG/") pathHTT = os.path.join(datadir,"lepton/HTT/Muon/") @@ -74,7 +74,7 @@ def __init__(self, era='2017', verb=0): ###sftool_iso = ScaleFactor(pathPOG+"Run2018/RunABCD_SF_ISO.root","NUM_TightRelIso_DEN_MediumID_pt_abseta",'mu_iso',ptvseta=False) ###self.sftool_idiso = sftool_id*sftool_iso assert self.sftool_trig!=None and self.sftool_idiso!=None, "MuonSFs.__init__: Did not find muon SF tool for %r"%(era) - print "Loading MuonSF for %s, %s"%(self.sftool_trig.filename,self.sftool_idiso.filename) + print("Loading MuonSF for %s, %s"%(self.sftool_trig.filename,self.sftool_idiso.filename)) def getTriggerSF(self, pt, eta): """Get SF for single muon trigger.""" diff --git a/PicoProducer/python/corrections/PileupTool.py b/PicoProducer/python/corrections/PileupTool.py index 6392782c8..ffd7b6e8d 100644 --- a/PicoProducer/python/corrections/PileupTool.py +++ b/PicoProducer/python/corrections/PileupTool.py @@ -56,7 +56,7 @@ def __init__(self, era, sigma='central', sample=None, buggy=False, flat=False, m if flat or (sample and hasFlatPU(sample)): mcfilename = os.path.join(datadir,"MC_PileUp_%d_FlatPU0to75.root"%year) - print "Loading PileupWeightTool for %s and %s"%(datafilename,mcfilename) + print("Loading PileupWeightTool for %s and %s"%(datafilename,mcfilename)) self.datafile = ensureTFile(datafilename, 'READ') self.mcfile = ensureTFile(mcfilename, 'READ') self.datahist = self.datafile.Get('pileup') diff --git a/PicoProducer/python/corrections/RecoilCorrectionTool.py b/PicoProducer/python/corrections/RecoilCorrectionTool.py index f5384cc7b..8b094931b 100644 --- a/PicoProducer/python/corrections/RecoilCorrectionTool.py +++ b/PicoProducer/python/corrections/RecoilCorrectionTool.py @@ -27,7 +27,7 @@ def __init__(self, era, filename=None, histname='zptmass_weight'): histname = "zptmass_histo" filename = zptpath+"zpt_reweighting_LO.root" ## Test with Danny's weights #zptmass_weights_UL2018.root" assert filename, "ZptCorrectionTool.__init__: Did not find filename for %r"%(era) - print "Loading ZptCorrectionTool for %s:%r..."%(filename,histname) + print("Loading ZptCorrectionTool for %s:%r..."%(filename,histname)) file = ensureTFile(filename,'READ') hist = file.Get(histname) hist.SetDirectory(0) @@ -61,7 +61,7 @@ def __init__(self, year=2017, dozpt=True): filename = rcpath+"Type1_PFMET_2017.root" else: filename = rcpath+"TypeI-PFMet_Run2018.root" - print "Loading RecoilCorrectionTool for %s..."%filename + print("Loading RecoilCorrectionTool for %s..."%filename) CMSSW_BASE = os.environ.get("CMSSW_BASE",None) recoil_h = "%s/src/HTT-utilities/RecoilCorrections/interface/RecoilCorrector.h"%(CMSSW_BASE) assert CMSSW_BASE, "RecoilCorrectionTool: Did not find $CMSSW_BASE" diff --git a/PicoProducer/python/corrections/TauTriggerSFs.py b/PicoProducer/python/corrections/TauTriggerSFs.py index 6b4a11b09..59ec4f8e6 100644 --- a/PicoProducer/python/corrections/TauTriggerSFs.py +++ b/PicoProducer/python/corrections/TauTriggerSFs.py @@ -12,7 +12,7 @@ class TauTriggerSFs(object): def __init__(self, trigger, wp='Medium', id='DeepTau2017v2p1', year=2016): """Load tau trigger histograms from files.""" - print "Loading %s trigger SFs for %s WP of %s ID for year %d..."%(trigger,wp,id,year) + print("Loading %s trigger SFs for %s WP of %s ID for year %d..."%(trigger,wp,id,year)) # CHECKS dms = [0,1,10,11] @@ -83,12 +83,12 @@ def getSF(self, pt, dm, unc=None): if unc=='All': for eff in eff_mc: if eff < 1e-5: - print "MC eff. is suspiciously low! MC eff=%s, trigger=%s, ID=%s, WP=%s, pt=%s"%(eff,self.trigger,self.id,self.wp,pt) + print("MC eff. is suspiciously low! MC eff=%s, trigger=%s, ID=%s, WP=%s, pt=%s"%(eff,self.trigger,self.id,self.wp,pt)) return 0.0, 0.0, 0.0 return eff_data[0]/eff_mc[0], eff_data[1]/eff_mc[1], eff_data[2]/eff_mc[2] else: if eff_mc < 1e-5: - print "MC eff. is suspiciously low! MC eff=%s, trigger=%s, ID=%s, WP=%s, pt=%s"%(eff_mc,self.trigger,self.id,self.wp,pt) + print("MC eff. is suspiciously low! MC eff=%s, trigger=%s, ID=%s, WP=%s, pt=%s"%(eff_mc,self.trigger,self.id,self.wp,pt)) return 0.0 sf = eff_data / eff_mc return sf diff --git a/PicoProducer/python/corrections/TrigObjMatcher.py b/PicoProducer/python/corrections/TrigObjMatcher.py index cba8de1dd..51e849869 100644 --- a/PicoProducer/python/corrections/TrigObjMatcher.py +++ b/PicoProducer/python/corrections/TrigObjMatcher.py @@ -10,7 +10,7 @@ TriggerData = namedtuple('TriggerData',['trigdict','combdict']) # simple container class objectTypes = { 1: 'Jet', 6: 'FatJet', 2: 'MET', 3: 'HT', 4: 'MHT', 11: 'Electron', 13: 'Muon', 15: 'Tau', 22: 'Photon', } -objectIds = { t: i for i,t in objectTypes.iteritems() } +objectIds = { t: i for i,t in objectTypes.items() } objects = [ 'Electron', 'Muon', 'Tau', 'Photon', 'Jet', 'FatJet', 'MET', 'HT', 'MHT' ] @@ -42,7 +42,7 @@ def loadTriggerDataFromJSON(filename,channel=None,isdata=True,verbose=False): combdict = dict of channel -> list of combined triggers ('Trigger' object) """ if verbose: - print ">>> loadTriggerDataFromJSON: loading '%s'"%(filename) + print(">>> loadTriggerDataFromJSON: loading '%s'"%(filename)) datatype = 'data' if isdata else 'mc' channel_ = channel triggers = [ ] @@ -59,7 +59,7 @@ def loadTriggerDataFromJSON(filename,channel=None,isdata=True,verbose=False): bitdict = data['filterbits'] # HLT PATHS with corresponding filter bits, pt, eta cut - for path, trigobjdict in data['hltpaths'].iteritems(): + for path, trigobjdict in data['hltpaths'].items(): runrange = trigobjdict.get('runrange',None) if isdata else None filters = [ ] for obj in objects: # ensure order @@ -81,8 +81,8 @@ def loadTriggerDataFromJSON(filename,channel=None,isdata=True,verbose=False): # COMBINATIONS OF HLT PATHS if 'hltcombs' in data: if channel_: - assert channel_ in data['hltcombs'][datatype], "Did not find channel '%s' in JSON file! Available: '%s'"%(channel_,"', '".join(data['hltcombs'][datatype].keys())) - for channel, paths in data['hltcombs'][datatype].iteritems(): + assert channel_ in data['hltcombs'][datatype], "Did not find channel '%s' in JSON file! Available: '%s'"%(channel_,"', '".join(list(data['hltcombs'][datatype].keys()))) + for channel, paths in data['hltcombs'][datatype].items(): if channel_ and channel!=channel_: continue #combtrigs = [trigdict[p] for p in paths] combdict[channel] = [trigdict[p] for p in paths] #TriggerCombination(combtrigs) @@ -90,21 +90,21 @@ def loadTriggerDataFromJSON(filename,channel=None,isdata=True,verbose=False): # PRINT triggers.sort(key=lambda t: t.path) if verbose: - print ">>> triggers & filters:" + print(">>> triggers & filters:") for trigger in triggers: if channel_ and trigger not in combdict[channel_]: continue - print ">>> %s"%(trigger.path) + print(">>> %s"%(trigger.path)) for filter in trigger.filters: - print ">>> %-9s %r"%(filter.type+':',filter.name) #,"bits=%s"%filter.bits - print ">>> trigger combinations for %s:"%datatype - for channel, triglist in combdict.iteritems(): + print(">>> %-9s %r"%(filter.type+':',filter.name)) #,"bits=%s"%filter.bits + print(">>> trigger combinations for %s:"%datatype) + for channel, triglist in combdict.items(): if channel_ and channel!=channel_: continue - print ">>> %s"%(channel) + print(">>> %s"%(channel)) for trigger in triglist: path = "'%s'"%trigger.path if trigger.runrange: path += ", %d <= run <= %d"%(trigger.runrange[0],trigger.runrange[1]) - print ">>> "+path + print(">>> "+path) return TriggerData(trigdict,combdict) @@ -128,7 +128,7 @@ def __init__(self,path,filters,runrange=None,**kwargs): self.path = path # human readable trigger combination self.patheval = patheval # trigger evaluation per event 'e' self.fireddef = "self.fired = lambda e: "+patheval # exact definition of 'fired' function - exec self.fireddef in locals() # method to check if trigger was fired for a given event + exec(self.fireddef, locals()) # method to check if trigger was fired for a given event #self.fired = lambda e: any(e.p for p in self.paths) def __repr__(self): @@ -140,7 +140,7 @@ def __str__(self): trigstr = "'%s'"%self.path if self.runrange: trigstr += ", %d <= run <= %d"%(self.runrange[0],self.runrange[1]) - print trigstr + print(trigstr) ###class TriggerCombination: @@ -276,7 +276,7 @@ def __init__(self,triggers,**kwargs): self.path = path # human readable trigger combination self.patheval = patheval # trigger evaluation per event 'e' self.fireddef = firedef # exact definition of 'fired' function - exec self.fireddef in locals() # method to check if any of the triggers was fired for a given event + exec(self.fireddef, locals()) # method to check if any of the triggers was fired for a given event def __repr__(self): """Returns string representation of TriggerFilter object.""" @@ -288,9 +288,9 @@ def printTriggersAndFilters(self,indent=">>> "): trigstr = indent + "'%s'"%trigger.path if trigger.runrange: trigstr += ", %d <= run <= %d"%(trigger.runrange[0],trigger.runrange[1]) - print trigstr + print(trigstr) for i, filter in enumerate(trigger.filters,1): - print "%s leg %d: %s, %r"%(indent,i,filter.type,filter.name) + print("%s leg %d: %s, %r"%(indent,i,filter.type,filter.name)) def match(self,event,recoObj,leg=1,dR=0.2): """Match given reconstructed object to trigger objects.""" diff --git a/PicoProducer/python/corrections/era_config.py b/PicoProducer/python/corrections/era_config.py index 8601ce894..0841acb4a 100644 --- a/PicoProducer/python/corrections/era_config.py +++ b/PicoProducer/python/corrections/era_config.py @@ -27,7 +27,7 @@ def getjson(era,dtype='data'): json = 'Cert_294927-306462_13TeV_PromptReco_Collisions17_JSON.txt' elif year==2018: json = 'Cert_314472-325175_13TeV_PromptReco_Collisions18_JSON.txt' - print "Warning! Using outdated certified run JSON file %s for era %s... Please move to UltraLegacy (UL)!"%(json,era) + print("Warning! Using outdated certified run JSON file %s for era %s... Please move to UltraLegacy (UL)!"%(json,era)) assert json!=None, "getjson: Did not find certified run JSON for era %r, year %r"%(era,year) json = ensurefile(datadir,'json',str(year),json) return json @@ -42,9 +42,9 @@ def getperiod(filename,year=None,dtype='data'): filename = filename[0] matches = re.findall(r"Run(20[0-4][0-9])([A-Z]+)",filename) if not matches: - print "Warning! Could not find an era in %s"%filename + print("Warning! Could not find an era in %s"%filename) elif year and str(year)!=matches[0][0]: - print "Warning! Given year (%r) does not match the data file %s (%r)"%(year,filename,''.join(matches[0])) + print("Warning! Given year (%r) does not match the data file %s (%r)"%(year,filename,''.join(matches[0]))) else: period = matches[0][1] return period From db2bb4d96da112731e19ea5259409fa65767212b Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Sun, 28 May 2023 14:54:27 +0200 Subject: [PATCH 13/55] fix parentheses typo --- PicoProducer/python/storage/GridKA_NRG.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/PicoProducer/python/storage/GridKA_NRG.py b/PicoProducer/python/storage/GridKA_NRG.py index 817e92bb2..240f71be4 100644 --- a/PicoProducer/python/storage/GridKA_NRG.py +++ b/PicoProducer/python/storage/GridKA_NRG.py @@ -28,8 +28,7 @@ def __init__(self, path, verb=0, ensure=False): def ensure_local_temp_dir(self, tmpdir, verb): """Ensure local tempdir exists.""" if verb >= 2: - print(">>> Creating temp directory {} if not existent yet".format() - tmpdir) + print(">>> Creating temp directory {} if not existent yet".format(tmpdir)) if not os.path.exists(tmpdir): os.mkdir(tmpdir) return True From e2c82532345204488d1a00a7482269be47e8b761 Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Sun, 28 May 2023 14:55:53 +0200 Subject: [PATCH 14/55] fix parentheses typo --- PicoProducer/python/corrections/BTagTool.py | 2 +- PicoProducer/python/pico/run.py | 2 +- PicoProducer/python/processors/dumpGen.py | 16 ++++++++-------- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/PicoProducer/python/corrections/BTagTool.py b/PicoProducer/python/corrections/BTagTool.py index 0a40dbf2f..f58c4fb54 100644 --- a/PicoProducer/python/corrections/BTagTool.py +++ b/PicoProducer/python/corrections/BTagTool.py @@ -190,7 +190,7 @@ def __init__(self,tagger,wp,era,channel,maxeta=None,loadsys=False,type_bc='comb' raise IOError("Did not recognize %r tagger..."%(tagger)) # LOAD CALIBRATION TOOL - print("Loading BTagWeightTool for %s (%s WP) %s..."%(tagger,wp,csvname) #,(", "+sigma) if sigma!='central' else "") + print("Loading BTagWeightTool for %s (%s WP) %s..."%(tagger,wp,csvname)) #,(", "+sigma) if sigma!='central' else "" calib = BTagCalibration(tagger,csvname) if csvname_bc and csvname_bc!=csvname: print(" and from %s..."%(csvname_bc)) diff --git a/PicoProducer/python/pico/run.py b/PicoProducer/python/pico/run.py index dd63ba764..25f322363 100755 --- a/PicoProducer/python/pico/run.py +++ b/PicoProducer/python/pico/run.py @@ -115,7 +115,7 @@ def main_run(args): filetag = "_%s_%s%s"%(channel,era,tag) if verbosity>=1: print(">>> %-12s = %s"%('sample',sample)) - print(">>> %-12s = %r"%('filetag',filetag) # postfix) + print(">>> %-12s = %r"%('filetag',filetag)) # postfix print(">>> %-12s = %s"%('extraopts',extraopts_)) # GET FILES diff --git a/PicoProducer/python/processors/dumpGen.py b/PicoProducer/python/processors/dumpGen.py index bde7136e0..16687da93 100755 --- a/PicoProducer/python/processors/dumpGen.py +++ b/PicoProducer/python/processors/dumpGen.py @@ -60,13 +60,13 @@ def __init__(self): def analyze(self,event): """Dump gen information for each gen particle in given event.""" - print "\n%s event %s %s"%('-'*10,event.event,'-'*60) + print("\n%s event %s %s"%('-'*10,event.event,'-'*60)) self.nevents += 1 leptonic = False particles = Collection(event,'GenPart') #particles = Collection(event,'LHEPart') - print " \033[4m%7s %8s %8s %8s %8s %8s %8s %8s %9s %10s \033[0m"%( - "index","pdgId","moth","mothid","dR","pt","eta","status","prompt","last copy") + print(" \033[4m%7s %8s %8s %8s %8s %8s %8s %8s %9s %10s \033[0m"%( + "index","pdgId","moth","mothid","dR","pt","eta","status","prompt","last copy")) for i, particle in enumerate(particles): mothidx = particle.genPartIdxMother if 0<=mothidx0: - print " %-10s %4d / %-4d (%.1f%%)"%('leptonic:',self.nleptons,self.nevents,100.0*self.nleptons/self.nevents) - print "%s done %s\n"%('-'*10,'-'*64) + print(" %-10s %4d / %-4d (%.1f%%)"%('leptonic:',self.nleptons,self.nevents,100.0*self.nleptons/self.nevents)) + print("%s done %s\n"%('-'*10,'-'*64)) # PROCESS NANOAOD processor = PostProcessor(outdir,infiles,noOut=True,modules=[LHEDumper()],maxEntries=maxEvts) From ed4ea596f9ac244534c45e5cf4ea9fe63edccf59 Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Sun, 28 May 2023 17:15:09 +0200 Subject: [PATCH 15/55] make python3 compatible (Plotter, except JetToTauFR) --- Plotter/python/methods/ScaleVariations.py | 2 +- Plotter/python/plot/Context.py | 6 +- Plotter/python/plot/MultiDraw.py | 2 +- Plotter/python/plot/MultiThread.py | 19 +++--- Plotter/python/plot/Plot.py | 64 ++++++++++---------- Plotter/python/plot/Plot2D.py | 18 +++--- Plotter/python/plot/Ratio.py | 4 +- Plotter/python/plot/Selection.py | 2 +- Plotter/python/plot/Stack.py | 4 +- Plotter/python/plot/Variable.py | 36 ++++++------ Plotter/python/plot/string.py | 30 +++++----- Plotter/python/plot/utils.py | 72 +++++++++++------------ Plotter/python/sample/HistSet.py | 28 ++++----- Plotter/python/sample/MergedSample.py | 14 ++--- Plotter/python/sample/Sample.py | 70 +++++++++++----------- Plotter/python/sample/SampleSet.py | 23 ++++---- Plotter/python/sample/utils.py | 36 ++++++------ 17 files changed, 215 insertions(+), 215 deletions(-) diff --git a/Plotter/python/methods/ScaleVariations.py b/Plotter/python/methods/ScaleVariations.py index af6db42f2..2cfd742c9 100644 --- a/Plotter/python/methods/ScaleVariations.py +++ b/Plotter/python/methods/ScaleVariations.py @@ -16,7 +16,7 @@ def getenvelope_scalevars(self,variables,selection,**kwargs): parallel = kwargs.get('parallel', False ) if verbosity>=2: LOG.header("Varying scale weights for variables %s (%s)"%(self.name,name)) - print ">>> selection=%r"%(selection.selection) + print(">>> selection=%r"%(selection.selection)) wtags = [ "0p5_0p5", "0p5_1p0", "1p0_0p5", "1p0_2p0", "2p0_1p0", "2p0_2p0", ] # GET NORMALIZATION SFs from 'qweight' histogram diff --git a/Plotter/python/plot/Context.py b/Plotter/python/plot/Context.py index 05e48c7ff..eaec87c02 100644 --- a/Plotter/python/plot/Context.py +++ b/Plotter/python/plot/Context.py @@ -14,7 +14,7 @@ class Context(object): def __init__(self, context_dict, *args, **kwargs): if not isinstance(context_dict,dict): - LOG.warning("Context.init: No dictionary given!") + LOG.warn("Context.init: No dictionary given!") self.context = context_dict self.default = args[0] if len(args)>0 else context_dict.get('default',None) self.regex = kwargs.get('regex',False) @@ -37,10 +37,10 @@ def getcontext(self,*args,**kwargs): # CHECK if len(args)==0: - LOG.warning("Context.getcontext: No arguments given!") + LOG.warn("Context.getcontext: No arguments given!") return self.default if not self.context: - LOG.warning("Context.getcontext: No context dictionary!") + LOG.warn("Context.getcontext: No context dictionary!") return None # MATCH diff --git a/Plotter/python/plot/MultiDraw.py b/Plotter/python/plot/MultiDraw.py index ebc4611f8..bfaa543f3 100644 --- a/Plotter/python/plot/MultiDraw.py +++ b/Plotter/python/plot/MultiDraw.py @@ -3,7 +3,7 @@ # Description: Efficiently draw multiple histograms with one loop over all events in a TTree # This script injects a MultiDraw method into TTree when it is imported. # Source: https://github.com/pwaller/minty/blob/master/minty/junk/MultiDraw.py -from __future__ import print_function +from __future__ import print_function # for python3 compatibility import os, re, traceback from ROOT import gROOT, gDirectory, TObject, TTree, TObjArray, TTreeFormula,\ TH1D, TH2D, TH2, SetOwnership, TTreeFormulaManager diff --git a/Plotter/python/plot/MultiThread.py b/Plotter/python/plot/MultiThread.py index 33dad01dd..f423fd870 100644 --- a/Plotter/python/plot/MultiThread.py +++ b/Plotter/python/plot/MultiThread.py @@ -2,6 +2,7 @@ # Author: Izaak Neutelings (2017) # Source: https://stackoverflow.com/questions/6893968/how-to-get-the-return-value-from-a-thread-in-python # https://stackoverflow.com/questions/10415028/how-can-i-recover-the-return-value-of-a-function-passed-to-multiprocessing-proce/28799109 +from __future__ import print_function # for python3 compatibility #from threading import Thread as _Thread from multiprocessing import Process, Pipe, Manager manager = Manager() @@ -15,12 +16,12 @@ def __init__(self, target, args=(), kwargs={}, group=None, name=None, verbose=No def mytarget(self,result,*args,**kwargs): result.append(self._target(*self._args,**self._kwargs)) - print result + print(result) def run(self): """Override run method to save result.""" self.mytarget(self._return,*self._args,**self._kwargs) - print self._return + print(self._return) def join(self): """Override join method to return result.""" @@ -43,7 +44,7 @@ def __iter__(self): """To loop over processes, and do process.join().""" for i, (process, endin, endout) in enumerate(self.procs): if self.verbose: - print ">>> MultiProcessor.__iter__: i=%s, process=%r, endin=%r, endout=%s, "%(i,process,endin,endout) + print(">>> MultiProcessor.__iter__: i=%s, process=%r, endin=%r, endout=%s, "%(i,process,endin,endout)) yield ReturnProcess(process,endin,endout,verbose=self.verbose) if self.max>=1 and self.waiting: #print "MultiProcessor.__iter__: starting new process (i=%d, max=%d, waiting=%d)"%(i,self.max,len(self.waiting)) @@ -67,8 +68,8 @@ def start(self, target, args=(), kwargs={}, group=None, name=None, verbose=False process = Process(group,mptarget,name,newargs,kwargs) process.kwret = kwret if verbose: - print ">>> MultiProcessor.start: endin=%r, target=%r, args=%r, kwargs=%r, kwret=%r, max=%s"%( - endin,target,args,kwargs,kwret,self.max) + print(">>> MultiProcessor.start: endin=%r, target=%r, args=%r, kwargs=%r, kwret=%r, max=%s"%( + endin,target,args,kwargs,kwret,self.max)) if self.max<1 or len(self.procs)>> ReturnProcess.join: name=%r, args=%s"%(self.name,args) + print(">>> ReturnProcess.join: name=%r, args=%s"%(self.name,args)) self.process.join(*args) # wait for process to finish #if self.endin: # self.endin.close() @@ -132,12 +133,12 @@ def join(self,*args,**kwargs): elif isinstance(kwretval,list): kwargs[kwret].extend(kwretval) # list only else: - print "Warning! MultiThread.ReturnProcess.join: No implementation for keyword return value '%s' of type %s..."%(kwret,type(kwretval)) + print("Warning! MultiThread.ReturnProcess.join: No implementation for keyword return value '%s' of type %s..."%(kwret,type(kwretval))) return out return self.endout.recv() else: # serial process if verbose: - print ">>> ReturnProcess.join: name=%r (serial), args=%s"%(self.name,args) + print(">>> ReturnProcess.join: name=%r (serial), args=%s"%(self.name,args)) if kwret in kwargs: kwretval = self.process.kwargs[kwret] if isinstance(kwretval,dict): @@ -145,7 +146,7 @@ def join(self,*args,**kwargs): elif isinstance(kwretval,list): kwargs[kwret].extend(kwretval) # list only else: - print "Warning! MultiThread.ReturnProcess.join: No implementation for keyword return value '%s' of type %s..."%(kwret,type(kwretval)) + print("Warning! MultiThread.ReturnProcess.join: No implementation for keyword return value '%s' of type %s..."%(kwret,type(kwretval))) return self.endout diff --git a/Plotter/python/plot/Plot.py b/Plotter/python/plot/Plot.py index 7ff3b22e6..eb2222733 100644 --- a/Plotter/python/plot/Plot.py +++ b/Plotter/python/plot/Plot.py @@ -222,10 +222,10 @@ def draw(self,*args,**kwargs): for hist in hists: if hist: hist.SetBins(*xbins) # set binning with xmin>0 if verbosity>=1: - print ">>> Plot.draw: hists=%s, norm=%r, dividebins=%r"%(self.hists,norm,dividebins) - print ">>> Plot.draw: xtitle=%r, ytitle=%r"%(xtitle,ytitle) + print(">>> Plot.draw: hists=%s, norm=%r, dividebins=%r"%(self.hists,norm,dividebins)) + print(">>> Plot.draw: xtitle=%r, ytitle=%r"%(xtitle,ytitle)) if verbosity>=2: - print ">>> Plot.draw: xmin=%s, xmax=%s, ymin=%s, ymax=%s, rmin=%s, rmax=%s"%(xmin,xmax,ymin,ymax,rmin,rmax) + print(">>> Plot.draw: xmin=%s, xmax=%s, ymin=%s, ymax=%s, rmin=%s, rmax=%s"%(xmin,xmax,ymin,ymax,rmin,rmax)) # NORMALIZE if norm: @@ -375,7 +375,7 @@ def close(self,keep=False,**kwargs): if not keep: # do not keep histograms if verbosity>=3: hlist = ', '.join(repr(h) for h in self.hists) - print ">>> Plot.close: Deleting histograms: %s..."%(hlist) + print(">>> Plot.close: Deleting histograms: %s..."%(hlist)) for hist in self.hists: deletehist(hist,**kwargs) if self.errband: @@ -413,9 +413,9 @@ def setcanvas(self,**kwargs): tmargin *= 0.90 #rmargin *= 3.6 if verbosity>=2: - print ">>> Plot.setcanvas: square=%r, lower=%r, split=%r"%(square,lower,split) - print ">>> Plot.setcanvas: width=%s, height=%s"%(width,height) - print ">>> Plot.setcanvas: lmargin=%.5g, rmargin=%.5g, tmargin=%.5g, bmargin=%.5g"%(lmargin,rmargin,tmargin,bmargin) + print(">>> Plot.setcanvas: square=%r, lower=%r, split=%r"%(square,lower,split)) + print(">>> Plot.setcanvas: width=%s, height=%s"%(width,height)) + print(">>> Plot.setcanvas: lmargin=%.5g, rmargin=%.5g, tmargin=%.5g, bmargin=%.5g"%(lmargin,rmargin,tmargin,bmargin)) canvas = TCanvas('canvas','canvas',100,100,int(width),int(height)) canvas.SetFillColor(0) #canvas.SetFillStyle(0) @@ -460,7 +460,7 @@ def setaxes(self, *args, **kwargs): elif isnumber(arg): binning.append(arg) if not hists: - LOG.warning("Plot.setaxes: No objects (TH1, TGraph, ...) given in args %s to set axis..."%(args)) + LOG.warn("Plot.setaxes: No objects (TH1, TGraph, ...) given in args %s to set axis..."%(args)) return 0, 0, 100, 100 frame = hists[0] if len(binning)>=2: @@ -509,9 +509,9 @@ def setaxes(self, *args, **kwargs): xtitle = makelatex(xtitle) LOG.verb("Plot.setaxes: Binning (%s,%.1f,%.1f), frame=%s"%(nbins,xmin,xmax,frame),verbosity,2) if verbosity>=3: - print ">>> Plot.setaxes: main=%r, lower=%r, grid=%r, latex=%r"%(main,lower,grid,latex) - print ">>> Plot.setaxes: logx=%r, logy=%r, ycenter=%r, intbins=%r, nxdiv=%s, nydiv=%s"%(logx,logy,ycenter,intbins,nxdivisions,nydivisions) - print ">>> Plot.setaxes: lmargin=%.5g, _lmargin=%.5g, scale=%s, yscale=%s"%(gPad.GetLeftMargin(),_lmargin,scale,yscale) + print(">>> Plot.setaxes: main=%r, lower=%r, grid=%r, latex=%r"%(main,lower,grid,latex)) + print(">>> Plot.setaxes: logx=%r, logy=%r, ycenter=%r, intbins=%r, nxdiv=%s, nydiv=%s"%(logx,logy,ycenter,intbins,nxdivisions,nydivisions)) + print(">>> Plot.setaxes: lmargin=%.5g, _lmargin=%.5g, scale=%s, yscale=%s"%(gPad.GetLeftMargin(),_lmargin,scale,yscale)) if ratiorange: ymin, ymax = 1.-ratiorange, 1.+ratiorange @@ -530,8 +530,8 @@ def setaxes(self, *args, **kwargs): if binlabels: nxdivisions = 15 if verbosity>=3: - print ">>> Plot.setaxes: xtitlesize=%.5g, ytitlesize=%.5g, xlabelsize=%.5g, ylabelsize=%.5g"%(xtitlesize,ytitlesize,xlabelsize,ylabelsize) - print ">>> Plot.setaxes: xtitleoffset=%.5g, ytitleoffset=%.5g, xlabeloffset=%.5g"%(xtitleoffset,ytitleoffset,xlabeloffset) + print(">>> Plot.setaxes: xtitlesize=%.5g, ytitlesize=%.5g, xlabelsize=%.5g, ylabelsize=%.5g"%(xtitlesize,ytitlesize,xlabelsize,ylabelsize)) + print(">>> Plot.setaxes: xtitleoffset=%.5g, ytitleoffset=%.5g, xlabeloffset=%.5g"%(xtitleoffset,ytitleoffset,xlabeloffset)) # GET HIST MAX hmaxs = [ ] @@ -616,7 +616,7 @@ def setaxes(self, *args, **kwargs): # alphanumerical bin labels if binlabels: if len(binlabels)=1: - print ">>> Plot.setaxes: xtitle=%r, [hmin,hmax] = [%.6g,%.6g], [xmin,xmax] = [%.6g,%.6g], [ymin,ymax] = [%.6g,%.6g]"%( - xtitle,hmin,hmax,xmin,xmax,ymin,ymax) + print(">>> Plot.setaxes: xtitle=%r, [hmin,hmax] = [%.6g,%.6g], [xmin,xmax] = [%.6g,%.6g], [ymin,ymax] = [%.6g,%.6g]"%( + xtitle,hmin,hmax,xmin,xmax,ymin,ymax)) elif verbosity>=2: - print ">>> Plot.setaxes: frame=%s"%(frame) - print ">>> Plot.setaxes: hists=%s"%(hists) - print ">>> Plot.setaxes: [hmin,hmax] = [%.6g,%.6g], [xmin,xmax] = [%.6g,%.6g], [ymin,ymax] = [%.6g,%.6g]"%(hmin,hmax,xmin,xmax,ymin,ymax) - print ">>> Plot.setaxes: xtitlesize=%4.4g, xlabelsize=%4.4g, xtitleoffset=%4.4g, xtitle=%r"%(xtitlesize,xlabelsize,xtitleoffset,xtitle) - print ">>> Plot.setaxes: ytitlesize=%4.4g, ylabelsize=%4.4g, ytitleoffset=%4.4g, ytitle=%r"%(ytitlesize,ylabelsize,ytitleoffset,ytitle) - print ">>> Plot.setaxes: scale=%4.4g, nxdivisions=%s, nydivisions=%s, ymargin=%.3f, logyrange=%.3f"%(scale,nxdivisions,nydivisions,ymargin,logyrange) + print(">>> Plot.setaxes: frame=%s"%(frame)) + print(">>> Plot.setaxes: hists=%s"%(hists)) + print(">>> Plot.setaxes: [hmin,hmax] = [%.6g,%.6g], [xmin,xmax] = [%.6g,%.6g], [ymin,ymax] = [%.6g,%.6g]"%(hmin,hmax,xmin,xmax,ymin,ymax)) + print(">>> Plot.setaxes: xtitlesize=%4.4g, xlabelsize=%4.4g, xtitleoffset=%4.4g, xtitle=%r"%(xtitlesize,xlabelsize,xtitleoffset,xtitle)) + print(">>> Plot.setaxes: ytitlesize=%4.4g, ylabelsize=%4.4g, ytitleoffset=%4.4g, ytitle=%r"%(ytitlesize,ylabelsize,ytitleoffset,ytitle)) + print(">>> Plot.setaxes: scale=%4.4g, nxdivisions=%s, nydivisions=%s, ymargin=%.3f, logyrange=%.3f"%(scale,nxdivisions,nydivisions,ymargin,logyrange)) if main or not lower: #if any(a!=None and a!=b for a, b in [(self.xmin,xmin),(self.xmax,xmax)]): - # LOG.warning("Plot.setaxes: x axis range changed: [xmin,xmax] = [%6.6g,%6.6g] -> [%6.6g,%6.6g]"%( + # LOG.warn("Plot.setaxes: x axis range changed: [xmin,xmax] = [%6.6g,%6.6g] -> [%6.6g,%6.6g]"%( # self.xmin,self.xmax,xmin,xmax)) #if any(a!=None and a!=b for a, b in [(self.ymin,ymin),(self.ymax,ymax)]): - # LOG.warning("Plot.setaxes: y axis range changed: [ymin,ymax] = [%6.6g,%6.6g] -> [%6.6g,%6.6g]"%( + # LOG.warn("Plot.setaxes: y axis range changed: [ymin,ymax] = [%6.6g,%6.6g] -> [%6.6g,%6.6g]"%( # self.ymin,self.ymax,ymin,ymax)) self.xmin, self.xmax = xmin, xmax self.ymin, self.ymax = ymin, ymax @@ -889,12 +889,12 @@ def drawlegend(self,position=None,**kwargs): legend.AddEntry(0,line,'') if verbosity>=2: - print ">>> Plot.drawlegend: title=%r, texts=%s, latex=%s"%(title,texts,latex) - print ">>> Plot.drawlegend: hists=%s"%(hists) - print ">>> Plot.drawlegend: entries=%s"%(entries) - print ">>> Plot.drawlegend: styles=%s, style=%s, errstyle=%s"%(styles,style,errstyle) - print ">>> Plot.drawlegend: nlines=%s, len(hists)=%s, len(texts)=%s, ncols=%s, margin=%s, xscale=%s"%( - nlines,len(hists),len(texts),ncols,margin,xscale) + print(">>> Plot.drawlegend: title=%r, texts=%s, latex=%s"%(title,texts,latex)) + print(">>> Plot.drawlegend: hists=%s"%(hists)) + print(">>> Plot.drawlegend: entries=%s"%(entries)) + print(">>> Plot.drawlegend: styles=%s, style=%s, errstyle=%s"%(styles,style,errstyle)) + print(">>> Plot.drawlegend: nlines=%s, len(hists)=%s, len(texts)=%s, ncols=%s, margin=%s, xscale=%s"%( + nlines,len(hists),len(texts),ncols,margin,xscale)) legend.Draw(option) self.legends.append(legend) @@ -1149,7 +1149,7 @@ def setfillstyle(self, *hists, **kwargs): fcolors = kwargs.get('colors', None ) or self.fcolors icol = 0 for hist in hists: - #print hist.GetFillColor() + #print(hist.GetFillColor()) if not reset and hist.GetFillColor() not in [kBlack,kWhite]: continue #color = getColor(hist.GetName() ) @@ -1161,5 +1161,5 @@ def setfillstyle(self, *hists, **kwargs): if line: hist.SetLineColor(kBlack) if verbosity>=2: - print ">>> Plot.setfillstyle: hist=%r, icol=%s, color=%s"%(hist, icol, color) + print(">>> Plot.setfillstyle: hist=%r, icol=%s, color=%s"%(hist, icol, color)) diff --git a/Plotter/python/plot/Plot2D.py b/Plotter/python/plot/Plot2D.py index 2c4a98f4d..20c2b660c 100644 --- a/Plotter/python/plot/Plot2D.py +++ b/Plotter/python/plot/Plot2D.py @@ -27,9 +27,9 @@ def __init__(self, *args, **kwargs): if kwargs.get('clone',False): hist = hist.Clone(hist.GetName()+"_clone_Plot2D") else: - LOG.warning("Plot2D.__init__: Did not recognize input: %s"%(args,)) + LOG.warn("Plot2D.__init__: Did not recognize input: %s"%(args,)) if len(vars)==1: - LOG.warning("Plot2D.__init__: Need one more variable!") + LOG.warn("Plot2D.__init__: Need one more variable!") if len(vars)>=2: xvariable = vars[0] yvariable = vars[1] @@ -140,11 +140,11 @@ def draw(self,*args,**kwargs): resetx = logx and hist.GetXaxis().GetBinLowEdge(1)<=0 and xmax>0 resety = logy and hist.GetYaxis().GetBinLowEdge(1)<=0 and ymax>0 if verbosity>=2: - print ">>> Plot2D.draw: xmin=%s, xmax=%s, ymin=%s, ymax=%s, zmin=%s, zmax=%s"%(xmin,xmax,ymin,ymax,zmin,zmax) - print ">>> Plot2D.draw: logx=%s, logy=%s, logz=%s, resetx=%r, resety=%r"%(logx,logy,logz,resetx,resety) - print ">>> Plot2D.draw: tmargin=%s, bmargin=%s, lmargin=%s, rmargin=%s"%(tmargin,bmargin,lmargin,rmargin) - print ">>> Plot2D.draw: xoffset=%s, yoffset=%s, zoffset=%s"%(xoffset,yoffset,zoffset) - print ">>> Plot2D.draw: xlabeloffset=%s, xlabeloffset=%s, xlabeloffset=%s"%(xlabeloffset,ylabeloffset,zlabeloffset) + print(">>> Plot2D.draw: xmin=%s, xmax=%s, ymin=%s, ymax=%s, zmin=%s, zmax=%s"%(xmin,xmax,ymin,ymax,zmin,zmax)) + print(">>> Plot2D.draw: logx=%s, logy=%s, logz=%s, resetx=%r, resety=%r"%(logx,logy,logz,resetx,resety)) + print(">>> Plot2D.draw: tmargin=%s, bmargin=%s, lmargin=%s, rmargin=%s"%(tmargin,bmargin,lmargin,rmargin)) + print(">>> Plot2D.draw: xoffset=%s, yoffset=%s, zoffset=%s"%(xoffset,yoffset,zoffset)) + print(">>> Plot2D.draw: xlabeloffset=%s, xlabeloffset=%s, xlabeloffset=%s"%(xlabeloffset,ylabeloffset,zlabeloffset)) # CANVAS canvas = TCanvas("canvas","canvas",100,100,int(cwidth),int(cheight)) @@ -221,13 +221,13 @@ def draw(self,*args,**kwargs): if xbinlabels: nxbins = hist.GetXaxis().GetNbins() if len(xbinlabels)1: @@ -62,7 +62,7 @@ def __init__(self, histden, *histnums, **kwargs): if isinstance(histnum,TH1) or isinstance(histnum,THStack): ratio = gethistratio(histnum,histden,tag=tag,drawzero=self.drawzero,errorX=errorX) elif isinstance(histnum,TGraph): - LOG.warning("Ratio.init: TGraph ratio not validated! Please check verbose output...") + LOG.warn("Ratio.init: TGraph ratio not validated! Please check verbose output...") ratio = getgraphratio(histnum,histden,tag=tag,drawzero=self.drawzero,errorX=errorX) #elif isinstance(hist,TProfile): # histtemp = hist.ProjectionX(hist.GetName()+"_projx",'E') diff --git a/Plotter/python/plot/Selection.py b/Plotter/python/plot/Selection.py index ddc0b9e18..a7b9531ec 100644 --- a/Plotter/python/plot/Selection.py +++ b/Plotter/python/plot/Selection.py @@ -53,7 +53,7 @@ def __init__(self, *args, **kwargs): self.filename = kwargs.get('filename', self.filename ) # name for files, histograms self.weight = kwargs.get('weight', self.weight ) #if self.selection=="": - # LOG.warning('Selection::Selection - No selection string given for %r!'%(self.name)) + # LOG.warn('Selection::Selection - No selection string given for %r!'%(self.name)) self.context = getcontext(kwargs,self.selection) # context-dependent channel selections self.only = kwargs.get('only', [ ] ) self.veto = kwargs.get('veto', [ ] ) diff --git a/Plotter/python/plot/Stack.py b/Plotter/python/plot/Stack.py index e66901c82..b60f9b671 100644 --- a/Plotter/python/plot/Stack.py +++ b/Plotter/python/plot/Stack.py @@ -129,8 +129,8 @@ def draw(self,*args,**kwargs): for hist in self.exphists+[self.datahist]: if hist: hist.SetBins(*xbins) # set binning with xmin>0 if verbosity>=2: - print ">>> Stack.draw: xtitle=%r, ytitle=%r"%(xtitle,ytitle) - print ">>> Stack.draw: xmin=%s, xmax=%s, ymin=%s, ymax=%s, rmin=%s, rmax=%s"%(xmin,xmax,ymin,ymax,rmin,rmax) + print(">>> Stack.draw: xtitle=%r, ytitle=%r"%(xtitle,ytitle)) + print(">>> Stack.draw: xmin=%s, xmax=%s, ymin=%s, ymax=%s, rmin=%s, rmax=%s"%(xmin,xmax,ymin,ymax,rmin,rmax)) # DIVIDE BY BINSIZE if dividebins: diff --git a/Plotter/python/plot/Variable.py b/Plotter/python/plot/Variable.py index 08a00241d..6403217bd 100644 --- a/Plotter/python/plot/Variable.py +++ b/Plotter/python/plot/Variable.py @@ -78,14 +78,14 @@ def __init__(self, name, *args, **kwargs): if self.latex: self.title = makelatex(self.title,units=self.units) if 'ctitle' in kwargs: - for ckey, title in kwargs['ctitle'].iteritems(): + for ckey, title in kwargs['ctitle'].items(): kwargs['ctitle'][ckey] = makelatex(title,units=self.units) if self.only: self.only = ensurelist(self.only) if self.veto: self.veto = ensurelist(self.veto) if self.binlabels and len(self.binlabels)=2: - print ">>> Variable.clone: Old strargs=%r, binargs=%r, kwargs=%r"%(strargs,binargs,kwargs) + print(">>> Variable.clone: Old strargs=%r, binargs=%r, kwargs=%r"%(strargs,binargs,kwargs)) if not strargs: strargs = (kwargs.pop('name',self.name),) # default name if not binargs: # get binning @@ -144,7 +144,7 @@ def clone(self,*args,**kwargs): if cut and self.ctxbins: # change context based on extra cut bins = self.ctxbins.getcontext(cut) # get bins in this context if binargs!=bins and verbosity>=2: - print ">>> Variable.clone: Changing binning %r -> %r because of context %r"%(binargs,bins,cut) + print(">>> Variable.clone: Changing binning %r -> %r because of context %r"%(binargs,bins,cut)) binargs = bins if isinstance(binargs,list): # assume list is bin edges binargs = (binargs,) # force list in tuple @@ -166,11 +166,11 @@ def clone(self,*args,**kwargs): newdict['ctxbins'].default = binargs # change default context newargs = strargs+binargs if verbosity>=2: - print ">>> Variable.clone: New args=%r, kwargs=%r"%(newargs,kwargs) + print(">>> Variable.clone: New args=%r, kwargs=%r"%(newargs,kwargs)) newvar = Variable(*newargs,**kwargs) newvar.__dict__.update(newdict) if verbosity>=2: - print ">>> Variable.clone: Cloned %r -> %r"%(self,newvar) + print(">>> Variable.clone: Cloned %r -> %r"%(self,newvar)) return newvar def issame(self,ovar,**kwargs): @@ -208,7 +208,7 @@ def getbins(self,full=False): if self.hasvariablebins(): return self.bins elif full: # get binedges - return [self.min+i*(self.max-self.min)/self.nbins for i in xrange(self.nbins+1)] + return [self.min+i*(self.max-self.min)/self.nbins for i in range(self.nbins+1)] else: return (self.nbins,self.min,self.max) @@ -240,21 +240,21 @@ def changecontext(self,*args,**kwargs): title = self.ctxtitle.getcontext(*args) if title!=None: if verbosity>=3: - print ">>> Variable.changecontext: ctxtitle=%s, args=%r"%(self.ctxtitle.context,args) - print ">>> Variable.changecontext: title=%r -> %r"%(self.title,title) + print(">>> Variable.changecontext: ctxtitle=%s, args=%r"%(self.ctxtitle.context,args)) + print(">>> Variable.changecontext: title=%r -> %r"%(self.title,title)) self.title = title elif verbosity>=3: - print ">>> Variable.changecontext: ctxtitle=%s, args=%r, title=%r (no change)"%(self.ctxtitle.context,args,self.title) + print(">>> Variable.changecontext: ctxtitle=%s, args=%r, title=%r (no change)"%(self.ctxtitle.context,args,self.title)) if self.ctxbins: bins = self.ctxbins.getcontext(*args) if isinstance(bins,list): bins = (bins,) if bins!=None: if verbosity>=3: - print ">>> Variable.changecontext: ctxbins=%s, args=%r"%(self.ctxbins.context,args) - print ">>> Variable.changecontext: bins=%r -> %r"%(self.bins,bins) + print(">>> Variable.changecontext: ctxbins=%s, args=%r"%(self.ctxbins.context,args)) + print(">>> Variable.changecontext: bins=%r -> %r"%(self.bins,bins)) elif verbosity>=3: - print ">>> Variable.changecontext: ctxbins=%s, args=%r, bins=%r (no change)"%(self.ctxbins.context,args,self.bins) + print(">>> Variable.changecontext: ctxbins=%s, args=%r, bins=%r (no change)"%(self.ctxbins.context,args,self.bins)) self.setbins(*bins) if self._addoverflow: self.addoverflow() # in case the last bin changed @@ -275,11 +275,11 @@ def changecontext(self,*args,**kwargs): cut = self.ctxcut.getcontext(*args) if cut!=None: if verbosity>=3: - print ">>> Variable.changecontext: ctxcut=%s, args=%r"%(self.ctxcut.context,args) - print ">>> Variable.changecontext: cut=%r -> %r"%(self.cut,cut) + print(">>> Variable.changecontext: ctxcut=%s, args=%r"%(self.ctxcut.context,args)) + print(">>> Variable.changecontext: cut=%r -> %r"%(self.cut,cut)) self.cut = cut elif verbosity>=3: - print ">>> Variable.changecontext: ctxcut=%s, args=%r, cut=%r (no change)"%(self.ctxcut.context,args,self.cut) + print(">>> Variable.changecontext: ctxcut=%s, args=%r, cut=%r (no change)"%(self.ctxcut.context,args,self.cut)) if self.ctxweight: weight = self.ctxweight.getcontext(*args) if weight!=None: @@ -427,7 +427,7 @@ def shiftjme(self,jshift,title=None,**kwargs): newvar.cut = shiftjme(newvar.cut,jshift,**kwargs) LOG.verb("Variable.shiftjme: extra cut = %r -> %r"%(self.cut,newvar.cut),verbosity,2) if newvar.ctxcut: - for key, cut in newvar.ctxcut.context.iteritems(): + for key, cut in newvar.ctxcut.context.items(): newvar.ctxcut.context[key] = shiftjme(cut,jshift,**kwargs) newvar.ctxcut.default = shiftjme(newvar.ctxcut.default,jshift,**kwargs) return newvar @@ -492,7 +492,7 @@ def wrapvariable(*args,**kwargs): return Variable(args) # (xvar,nxbins,xmin,xmax) elif len(args)==1 and isinstance(args[0],Variable): return args[0] - LOG.warning('wrapvariable: Could not unwrap arguments "%s" to a Variable object. Returning None.'%args) + LOG.warn('wrapvariable: Could not unwrap arguments "%s" to a Variable object. Returning None.'%args) return None diff --git a/Plotter/python/plot/string.py b/Plotter/python/plot/string.py index f35014114..a026d66af 100644 --- a/Plotter/python/plot/string.py +++ b/Plotter/python/plot/string.py @@ -185,11 +185,11 @@ def makelatex(string,**kwargs): if "GeV" not in string: string += " [GeV]" if cm: - LOG.warning("makelatex: Flagged units are both GeV and cm!") + LOG.warn("makelatex: Flagged units are both GeV and cm!") elif cm: #or 'd_' in string string += " [cm]" if (verbosity>=2 and string!=oldstr) or verbosity>=3: - print ">>> makelatex: %r -> %r"%(oldstr,string) + print(">>> makelatex: %r -> %r"%(oldstr,string)) return string @@ -340,7 +340,7 @@ def joincuts(*cuts,**kwargs): cuts = [c for c in cuts if c and isinstance(c,str)] weight = kwargs.get('weight', False) if any('||' in c and not ('(' in c and ')' in c) for c in cuts): - LOG.warning('joincuts: Be careful with those "or" statements in %s! Not sure how to join...'%(cuts,)) + LOG.warn('joincuts: Be careful with those "or" statements in %s! Not sure how to join...'%(cuts,)) for i, cut in enumerate(cuts): if '||' in cut and not ('(' in cut and ')' in cut): cuts[i] = "(%s)"%(cut) @@ -383,7 +383,7 @@ def shift(oldstr,shifttag,vars=["\w+"],**kwargs): newstr = oldstr vars = ensurelist(vars) if re.search(r"(Up|Down)",oldstr): - print "shift: already shifts in %r"%(oldstr) + print("shift: already shifts in %r"%(oldstr)) if kwargs.get('us',True) and len(shifttag)>0 and shifttag[0]!='_': # ensure underscore in front shifttag = '_'+shifttag for oldvar in vars: # shift each jet/MET variable @@ -398,7 +398,7 @@ def shift(oldstr,shifttag,vars=["\w+"],**kwargs): verbstr += "\n>>> %r -> %r"%(oldstr,newstr) else: verbstr += "\n>>> %r\n>>> -> %r"%(oldstr,newstr) - print verbstr + print(verbstr) return newstr @@ -425,7 +425,7 @@ def invertcharge(oldcuts,target='SS',**kwargs): LOG.verbose("invertcharge: oldcuts=%r"%(oldcuts),verbosity,2) LOG.verbose("invertcharge: matchOS=%r, matchSS=%r"%(matchOS,matchSS),verbosity,2) if (len(matchOS)+len(matchSS))>1: - LOG.warning('invertcharge: more than one charge match (%d OS, %d SS) in %r'%(len(matchOS),len(matchSS),oldcuts)) + LOG.warn('invertcharge: more than one charge match (%d OS, %d SS) in %r'%(len(matchOS),len(matchSS),oldcuts)) if target=='OS': for match in matchSS: newcuts = oldcuts.replace(match,"q_1*q_2<0") # invert SS to OS elif target=='SS': @@ -456,15 +456,15 @@ def invertcharge(oldcuts,target='SS',**kwargs): ### ### # REPLACE ### if match_iso_1 and match_iso_2: -### if len(match_iso_1)>1: LOG.warning("invertIsolationNanoAOD: More than one iso_1 match! cuts=%s"%cuts) -### if len(match_iso_2)>1: LOG.warning("invertIsolationNanoAOD: More than one iso_2 match! cuts=%s"%cuts) +### if len(match_iso_1)>1: LOG.warn("invertIsolationNanoAOD: More than one iso_1 match! cuts=%s"%cuts) +### if len(match_iso_2)>1: LOG.warn("invertIsolationNanoAOD: More than one iso_2 match! cuts=%s"%cuts) ### if remove1: ### cuts = cuts.replace(match_iso_1[0],'') ### cuts = cuts.replace(match_iso_2[0],'') ### if iso_relaxed: ### cuts = combineCuts(cuts,iso_relaxed) ### elif cuts and match_iso_1 or match_iso_2: -### LOG.warning('invertIsolationNanoAOD: %d iso_1 and %d iso_2 matches! cuts=%r'%(len(match_iso_1),len(match_iso_2),cuts)) +### LOG.warn('invertIsolationNanoAOD: %d iso_1 and %d iso_2 matches! cuts=%r'%(len(match_iso_1),len(match_iso_2),cuts)) ### cuts = cleanBooleans(cuts) ### ### LOG.verbose(' %r\n>>> -> %r\n>>>'%(cuts0,cuts),verbosity,level=2) @@ -488,8 +488,8 @@ def invertcharge(oldcuts,target='SS',**kwargs): ### cjets = re.findall(r"&* *ncjets(?:20)? *[<=>]=? *\d+ *",cuts) ### cjets += re.findall(r"&* *nc?btag(?:20)? *[<=>]=? *ncjets(?:20)? *",cuts) ### LOG.verbose('relaxJetSelection:\n>>> btags = %s\n>>> cjets = %r' % (btags,cjets),verbosity,level=2) -### if len(btags)>1: LOG.warning('relaxJetSelection: More than one btags match! Only using first instance in cuts %r'%cuts) -### if len(cjets)>1: LOG.warning('relaxJetSelection: More than one cjets match! Only using first instance in cuts %r'%cuts) +### if len(btags)>1: LOG.warn('relaxJetSelection: More than one btags match! Only using first instance in cuts %r'%cuts) +### if len(cjets)>1: LOG.warn('relaxJetSelection: More than one cjets match! Only using first instance in cuts %r'%cuts) ### ### # REPLACE ### #if len(btags): @@ -504,7 +504,7 @@ def invertcharge(oldcuts,target='SS',**kwargs): ### if btags_relaxed: cuts = "%s && %s && %s" % (cuts,btags_relaxed,cjets_relaxed) ### else: cuts = "%s && %s" % (cuts, cjets_relaxed) ### #elif len(btags) or len(cjets): -### # LOG.warning("relaxJetSelection: %d btags and %d cjets matches! cuts=%s"%(len(btags),len(cjets),cuts)) +### # LOG.warn("relaxJetSelection: %d btags and %d cjets matches! cuts=%s"%(len(btags),len(cjets),cuts)) ### cuts = cuts.lstrip(' ').lstrip('&').lstrip(' ') ### ### LOG.verbose(' %r\n>>> -> %r\n>>>'%(cuts0,cuts),verbosity,level=2) @@ -567,9 +567,9 @@ def invertcharge(oldcuts,target='SS',**kwargs): ### elif len(match)>1: ### for match, genmatch in match: ### if '>0' in genmatch.replace(' ','') or '!=0' in genmatch.replace(' ',''): -### LOG.warning('vetojtf: more than one "genmatch" match (%d) in %r, ignoring...'%(len(match),cuts)) +### LOG.warn('vetojtf: more than one "genmatch" match (%d) in %r, ignoring...'%(len(match),cuts)) ### return cuts -### LOG.warning('vetojtf: more than one "genmatch" match (%d) in %r, only looking at first match...'%(len(match),cuts)) +### LOG.warn('vetojtf: more than one "genmatch" match (%d) in %r, only looking at first match...'%(len(match),cuts)) ### match, genmatch = match[0] ### genmatch = genmatch.replace(' ','') ### subcuts0 = stripWeights(cuts) @@ -581,7 +581,7 @@ def invertcharge(oldcuts,target='SS',**kwargs): ### elif not '0' in genmatch: # "genmatch_2!=*" ### subcuts1 = combineCuts(subcuts0,"genmatch_2>0") ### elif "=0" in genmatch: # "genmatch_2*=6" -### LOG.warning('vetojtf: selection %r with %r set to "0"!'%(cuts,genmatch)) +### LOG.warn('vetojtf: selection %r with %r set to "0"!'%(cuts,genmatch)) ### subcuts1 = "0" ### elif '<' in genmatch: # "genmatch_2>*" ### subcuts1 = combineCuts(subcuts0,"genmatch_2>0") diff --git a/Plotter/python/plot/utils.py b/Plotter/python/plot/utils.py index 6075f28b3..899c6a2cc 100644 --- a/Plotter/python/plot/utils.py +++ b/Plotter/python/plot/utils.py @@ -28,7 +28,7 @@ def normalize(*hists,**kwargs): if integral: hist.Scale(scale/integral) else: - LOG.warning("norm: Could not normalize; integral = 0!") + LOG.warn("norm: Could not normalize; integral = 0!") def getframe(pad,hist,xmin=None,xmax=None,**kwargs): @@ -60,7 +60,7 @@ def close(*hists,**kwargs): for hist in hists: if isinstance(hist,THStack): if verbosity>1: - print '>>> close: Deleting histograms from stack "%s"...'%(hist.GetName()) + print('>>> close: Deleting histograms from stack "%s"...'%(hist.GetName())) for subhist in hist.GetStack(): deletehist(subhist,**kwargs) deletehist(hist,**kwargs) @@ -79,10 +79,10 @@ def gethist(hists,*searchterms,**kwargs): if match(searchterms,hist.GetName(),**kwargs): matches.append(hist) if not matches and warning: - LOG.warning("gethist: Did not find a historgram with searchterms %s..."%(quotestrs(searchterms))) + LOG.warn("gethist: Did not find a historgram with searchterms %s..."%(quotestrs(searchterms))) elif unique: if len(matches)>1: - LOG.warning("gethist: Found more than one match to %s. Using first match only: %s"%( + LOG.warn("gethist: Found more than one match to %s. Using first match only: %s"%( quotestrs(searchterms),quotestrs(h.GetName() for h in matches))) return matches[0] return matches @@ -104,9 +104,9 @@ def deletehist(*hists,**kwargs): elif hname: gDirectory.Delete(hist.GetName()) else: - LOG.warning("deletehist: %s %s has no name!"%(hclass,hist)) + LOG.warn("deletehist: %s %s has no name!"%(hclass,hist)) elif warn: - LOG.warning("deletehist: %s is already %s"%(hclass,hist)) + LOG.warn("deletehist: %s is already %s"%(hclass,hist)) #except AttributeError: # print ">>> AttributeError: " # raise AttributeError @@ -158,7 +158,7 @@ def grouphists(hists,searchterms,name=None,title=None,color=None,**kwargs): if close: deletehist(hist) else: - LOG.warning("grouphists: Did not find a histogram with searchterms %s..."%(quotestrs(searchterms))) + LOG.warn("grouphists: Did not find a histogram with searchterms %s..."%(quotestrs(searchterms))) return histsum @@ -179,7 +179,7 @@ def getTGraphRange(graphs,min=+10e10,max=-10e10,margin=0.0,axis='y'): for graph in graphs: npoints = graph.GetN() x, y = Double(), Double() - for i in xrange(0,npoints): + for i in range(0,npoints): graph.GetPoint(i,x,y) vup = getUp(graph,i) vlow = getLow(graph,i) @@ -235,12 +235,12 @@ def getbinedges(hist,**kwargs): verbosity = LOG.getverbosity(kwargs) bins = [ ] if isinstance(hist,TH1): - for i in xrange(1,hist.GetXaxis().GetNbins()+1): + for i in range(1,hist.GetXaxis().GetNbins()+1): low = round(hist.GetXaxis().GetBinLowEdge(i),9) up = round(hist.GetXaxis().GetBinUpEdge(i),9) bins.append((low,up)) else: - for i in xrange(0,hist.GetN()): + for i in range(0,hist.GetN()): x, y = Double(), Double() hist.GetPoint(i,x,y) low = round(x-hist.GetErrorXlow(i),9) @@ -260,7 +260,7 @@ def havesamebins(hist1,hist2,**kwargs): xbins2 = hist2.GetXaxis().GetXbins() if xbins1.GetSize()!=xbins2.GetSize(): return False - for i in xrange(xbins1.GetSize()): + for i in range(xbins1.GetSize()): #print xbins1[i] if xbins1[i]!=xbins2[i]: return False @@ -276,8 +276,8 @@ def havesamebins(hist1,hist2,**kwargs): bins1 = [ (a+b)/2 for a,b in bins1] bins2 = [ (a+b)/2 for a,b in bins2] if bins1!=bins2: - print "bins1 =",bins1 - print "bins2 =",bins2 + print("bins1 =",bins1) + print("bins2 =",bins2) return bins1==bins2 @@ -303,7 +303,7 @@ def gethistratio(histnum,histden,**kwargs): #rhist.Divide(histden) TAB = LOG.table("%5d %9.3f %9.3f %9.3f %9.3f +- %7.3f",verb=verbosity,level=3) TAB.printheader("ibin","xval","yden","ynum","ratio","error") - for ibin in xrange(0,ncells+1): + for ibin in range(0,ncells+1): yden = histden.GetBinContent(ibin) ynum = histnum.GetBinContent(ibin) enum = histnum.GetBinError(ibin) #max(histnum.GetBinErrorLow(ibin),histnum.GetBinErrorUp(ibin)) @@ -319,7 +319,7 @@ def gethistratio(histnum,histden,**kwargs): rhist.SetBinContent(ibin,ratio) rhist.SetBinError(ibin,erat) else: # works only for TH1 - LOG.warning("gethistratio: %r and %r do not have the same bins..."%(histnum,histden)) + LOG.warn("gethistratio: %r and %r do not have the same bins..."%(histnum,histden)) TAB = LOG.table("%5d %9.3f %9.3f %5d %9.3f %9.3f %5d %8.3f +- %7.3f",verb=verbosity,level=3) TAB.printheader("iden","xval","yden","inum","xval","ynum","ratio","error") for iden in range(0,ncells+1): @@ -453,7 +453,7 @@ def geterrorband(*hists,**kwargs): hist0 = hists[0] nbins = hist0.GetNbinsX() if sysvars and isinstance(sysvars,dict): - sysvars = [v for k, v in sysvars.iteritems()] + sysvars = [v for k, v in sysvars.items()] error = TGraphAsymmErrors() error.SetName(name) error.SetTitle(title) @@ -512,7 +512,7 @@ def dividebybinsize(hist,**kwargs): graph.SetTitle(hist.GetTitle()) copystyle(graph,hist) ip = 0 # skip zero bins if not zero - for ibin in xrange(1,nbins+1): + for ibin in range(1,nbins+1): xval = hist.GetXaxis().GetBinCenter(ibin) width = hist.GetXaxis().GetBinWidth(ibin) xerr = width/2 if errorX else 0 @@ -532,7 +532,7 @@ def dividebybinsize(hist,**kwargs): ip += 1 return graph else: - for ibin in xrange(0,nbins+2): + for ibin in range(0,nbins+2): xval = hist.GetXaxis().GetBinCenter(ibin) width = hist.GetXaxis().GetBinWidth(ibin) yval = hist.GetBinContent(ibin) @@ -555,7 +555,7 @@ def normalizebins(oldstack,**kwargs): for oldhist in oldstack.GetHists(): hname = "%s_%s"%(oldhist.GetName(),tag) newhist = oldhist.Clone(hname) - for ibin in xrange(0,nbins+2): + for ibin in range(0,nbins+2): ytot = oldstack.GetStack().Last().GetBinContent(ibin) if ytot>0: newhist.SetBinContent(ibin,newhist.GetBinContent(ibin)/ytot) @@ -573,7 +573,7 @@ def getconstanthist(oldhist,yval=1,**kwargs): LOG.verbose('getconstanthist: %r -> %r'%(oldhist.GetName(),hname),verbosity,2) newhist = oldhist.Clone(hname) newhist.Reset() - for ibin in xrange(0,oldhist.GetXaxis().GetNbins()+2): + for ibin in range(0,oldhist.GetXaxis().GetNbins()+2): newhist.SetBinContent(ibin,yval) # set to same value newhist.SetBinError(ibin,yerr) # set to same value return newhist @@ -656,7 +656,7 @@ def capoff(hist,ymin=None,ymax=None,verb=0): hist.SetBinContent(i,yval) hist.SetBinError(i,yerr) if verb>=2: - print ">>> capoff: Found %d/%d values < %s, and %d/%d values > %s for histogram %s"%(nmin,ntot,ymin,nmax,ntot,ymax,hist) + print(">>> capoff: Found %d/%d values < %s, and %d/%d values > %s for histogram %s"%(nmin,ntot,ymin,nmax,ntot,ymax,hist)) return nmin+nmax # number of reset bins @@ -691,30 +691,30 @@ def reducehist2d(oldhist,**kwargs): # FIND INDICES TO KEEP if xlabels: if verb>=1: - print ">>> reducehist2d: Filter xlabels..." - for ix in xrange(1,oldnxbins+1): # x axis + print(">>> reducehist2d: Filter xlabels...") + for ix in range(1,oldnxbins+1): # x axis if ix in ix_rm: continue label = oldhist.GetXaxis().GetBinLabel(ix) if any(s.match(label) for s in xlabels): ix_keep.add(ix) elif not ix_keep: # remove bins - ix_keep = set(i for i in xrange(1,oldnxbins+1) if i not in ix_rm) + ix_keep = set(i for i in range(1,oldnxbins+1) if i not in ix_rm) if ylabels: if verb>=1: - print ">>> reducehist2d: Filter ylabels..." - for iy in xrange(1,oldnybins+1): # y axis + print(">>> reducehist2d: Filter ylabels...") + for iy in range(1,oldnybins+1): # y axis if iy in iy_rm: continue label = oldhist.GetYaxis().GetBinLabel(iy) if any(s.match(label) for s in ylabels): iy_keep.add(iy) elif not iy_keep: # remove bins - iy_keep = set(i for i in xrange(1,oldnybins+1) if i not in iy_rm) + iy_keep = set(i for i in range(1,oldnybins+1) if i not in iy_rm) # SANITY CHECK newnxbins = len(ix_keep) newnybins = len(iy_keep) if newnxbins==0 or newnybins==0: - print ">>> reducehist2d: Cannot create new histogram with %d x %d dimensions! Ignoring..."%(newnxbins,newnybins) + print(">>> reducehist2d: Cannot create new histogram with %d x %d dimensions! Ignoring..."%(newnxbins,newnybins)) return None # SORT @@ -727,12 +727,12 @@ def reducehist2d(oldhist,**kwargs): # CREATE & FILL REDUCED HISTOGRAM if verb>=1: - print ">>> reducehist2d: Reduce %dx%d to %dx%d histogram %r..."%(oldnxbins,oldnybins,newnxbins,newnybins,newhname) + print(">>> reducehist2d: Reduce %dx%d to %dx%d histogram %r..."%(oldnxbins,oldnybins,newnxbins,newnybins,newhname)) newhist = TH2F(newhname,newhname,newnxbins,0,newnxbins,newnybins,0,newnybins) for ix_new, ix_old in enumerate(ix_keep,1): # x axis label = oldhist.GetXaxis().GetBinLabel(ix_old) if verb>=2: - print ">>> reducehist2d: ix=%d -> %d, %r"%(ix_old,ix_new,label) + print(">>> reducehist2d: ix=%d -> %d, %r"%(ix_old,ix_new,label)) newhist.GetXaxis().SetBinLabel(ix_new,label) for iy_new, iy_old in enumerate(iy_keep,1): # y axis zval = oldhist.GetBinContent(ix_old,iy_old) @@ -740,7 +740,7 @@ def reducehist2d(oldhist,**kwargs): for iy_new, iy_old in enumerate(iy_keep,1): # y axis label = oldhist.GetYaxis().GetBinLabel(iy_old) if verb>=2: - print ">>> reducehist2d: iy=%d -> %d, %r"%(iy_old,iy_new,label) + print(">>> reducehist2d: iy=%d -> %d, %r"%(iy_old,iy_new,label)) newhist.GetYaxis().SetBinLabel(iy_new,label) return newhist @@ -822,15 +822,15 @@ def getbincenter(axis,ibin,amin=None,amax=None): try: newhist = TH2D(hname,hname,*bins) except TypeError as err: - print ">>> resetrange: hname=%r bins=%r"%(hname,bins) + print(">>> resetrange: hname=%r bins=%r"%(hname,bins)) raise err if verbosity>=1: - print ">>> resetrange: (nxbins,xmin,xmax) = (%s,%s,%s) -> (%s,%s,%s)"%( + print(">>> resetrange: (nxbins,xmin,xmax) = (%s,%s,%s) -> (%s,%s,%s)"%( oldhist.GetXaxis().GetNbins(),oldhist.GetXaxis().GetXmin(),oldhist.GetXaxis().GetXmax(), - newhist.GetXaxis().GetNbins(),newhist.GetXaxis().GetXmin(),newhist.GetXaxis().GetXmax()) - print ">>> resetrange: (nybins,ymin,ymax) = (%s,%s,%s) -> (%s,%s,%s)"%( + newhist.GetXaxis().GetNbins(),newhist.GetXaxis().GetXmin(),newhist.GetXaxis().GetXmax())) + print(">>> resetrange: (nybins,ymin,ymax) = (%s,%s,%s) -> (%s,%s,%s)"%( oldhist.GetYaxis().GetNbins(),oldhist.GetYaxis().GetXmin(),oldhist.GetYaxis().GetXmax(), - newhist.GetYaxis().GetNbins(),newhist.GetYaxis().GetXmin(),newhist.GetYaxis().GetXmax()) + newhist.GetYaxis().GetNbins(),newhist.GetYaxis().GetXmin(),newhist.GetYaxis().GetXmax())) for iyold in range(0,oldhist.GetYaxis().GetNbins()+2): for ixold in range(0,oldhist.GetXaxis().GetNbins()+2): xval = getbincenter(oldhist.GetXaxis(),ixold,xmin,xmax) diff --git a/Plotter/python/sample/HistSet.py b/Plotter/python/sample/HistSet.py index 439824859..a7a94873f 100644 --- a/Plotter/python/sample/HistSet.py +++ b/Plotter/python/sample/HistSet.py @@ -100,30 +100,30 @@ def printall(self,full=False): """Print for debugging purposes.""" nvars = len(self.vars) if islist(self.vars) else 1 ndata = len(self.vars) if isinstance(self.vars,dict) else 1 - print ">>> HistSet: nvars=%d, ndata=%d, nexp=%d, nsig=%d"%( - nvars,ndata,len(self.exp),len(self.signal)) + print(">>> HistSet: nvars=%d, ndata=%d, nexp=%d, nsig=%d"%( + nvars,ndata,len(self.exp),len(self.signal))) if full: - print ">>> vars=%s"%(self.vars) - print ">>> data=%s"%(self.data) - print ">>> exp=%s"%(self.exp) - print ">>> sig=%s"%(self.signal) + print(">>> vars=%s"%(self.vars)) + print(">>> data=%s"%(self.data)) + print(">>> exp=%s"%(self.exp)) + print(">>> sig=%s"%(self.signal)) else: - print ">>> vars = %s"%((', '.join(repr(str(v)) for v in self.vars) if isinstance(self.vars,list) else self.vars)) + print(">>> vars = %s"%((', '.join(repr(str(v)) for v in self.vars) if isinstance(self.vars,list) else self.vars))) if self.data and isinstance(self.data,dict): - print ">>> data = { %s }"%(', '.join(repr(h.GetName()) for v,h in self.data.iteritems())) + print(">>> data = { %s }"%(', '.join(repr(h.GetName()) for v,h in self.data.items()))) elif isinstance(self.data,TH1): - print ">>> data = %r"%(self.data.GetName()) + print(">>> data = %r"%(self.data.GetName())) else: - print ">>> data = %s"%(self.data) + print(">>> data = %s"%(self.data)) def printset(dtype,set): """Help function to print exp and signal histogram sets (dictionaries or lists).""" if isinstance(set,dict): - for var, hlist in set.iteritems(): - print ">>> %s['%s'] = [ %s ]"%(dtype,var,', '.join(repr(h.GetName()) for h in hlist)) + for var, hlist in set.items(): + print(">>> %s['%s'] = [ %s ]"%(dtype,var,', '.join(repr(h.GetName()) for h in hlist))) elif isinstance(set,list) and set: - print ">>> %-4s = [ %s ]"%(dtype,', '.join(repr(h.GetName()) for h in set)) + print(">>> %-4s = [ %s ]"%(dtype,', '.join(repr(h.GetName()) for h in set))) else: - print ">>> %-4s = %s"%(dtype,set) + print(">>> %-4s = %s"%(dtype,set)) printset('exp',self.exp) printset('sig',self.signal) diff --git a/Plotter/python/sample/MergedSample.py b/Plotter/python/sample/MergedSample.py index e0fb8435f..6908c98bc 100644 --- a/Plotter/python/sample/MergedSample.py +++ b/Plotter/python/sample/MergedSample.py @@ -167,10 +167,10 @@ def getentries(self, selection, **kwargs): # PRINT if verbosity>=3: - print ">>>\n>>> MergedSample.getentries: %s"%(color(self.name,color="grey")) - print ">>> entries: %d"%(nevents) - print ">>> scale: %.6g (scale=%.6g, norm=%.6g)"%(scale,self.scale,self.norm) - print ">>> %r"%(cuts) + print(">>>\n>>> MergedSample.getentries: %s"%(color(self.name,color="grey"))) + print(">>> entries: %d"%(nevents)) + print(">>> scale: %.6g (scale=%.6g, norm=%.6g)"%(scale,self.scale,self.norm)) + print(">>> %r"%(cuts)) return nevents @@ -238,8 +238,8 @@ def gethist(self, *args, **kwargs): if sumhist.GetEntries()>nentries: nentries = sumhist.GetEntries() integral = sumhist.Integral() - print ">>>\n>>> MergedSample.gethist - %s"%(color(name,color="grey")) - print ">>> entries: %d (%.2f integral)"%(nentries,integral) + print(">>>\n>>> MergedSample.gethist - %s"%(color(name,color="grey"))) + print(">>> entries: %d (%.2f integral)"%(nentries,integral)) if issingle: return sumhists[0] @@ -257,7 +257,7 @@ def gethist2D(self, *args, **kwargs): kwargs['weight'] = joinweights(kwargs.get('weight', ""), self.weight ) # pass scale down kwargs['scale'] = kwargs.get('scale', 1.0) * self.scale * self.norm # pass scale down if verbosity>=2: - print ">>>\n>>> MergedSample.gethist2D: %s: %s"%(color(name,color="grey"), self.fnameshort) + print(">>>\n>>> MergedSample.gethist2D: %s: %s"%(color(name,color="grey"), self.fnameshort)) #print ">>> norm=%.4f, scale=%.4f, total %.4f"%(self.norm,kwargs['scale'],self.scale) # HISTOGRAMS diff --git a/Plotter/python/sample/Sample.py b/Plotter/python/sample/Sample.py index 7c936efdd..0178140b8 100644 --- a/Plotter/python/sample/Sample.py +++ b/Plotter/python/sample/Sample.py @@ -110,18 +110,18 @@ def __repr__(self): @staticmethod def printheader(title=None,merged=True,justname=25,justtitle=25): if title!=None: - print ">>> %s"%(title) + print(">>> %s"%(title)) name = "Sample name".ljust(justname) title = "title".ljust(justtitle) if merged: - print ">>> \033[4m%s %s %10s %12s %17s %9s %s\033[0m"%( - name,title,"xsec [pb]","nevents","sumweights","norm","weight"+' '*8) + print(">>> \033[4m%s %s %10s %12s %17s %9s %s\033[0m"%( + name,title,"xsec [pb]","nevents","sumweights","norm","weight"+' '*8)) else: - print ">>> \033[4m%s %s %s\033[0m"%(name,title+' '*5,"Extra cut"+' '*18) + print(">>> \033[4m%s %s %s\033[0m"%(name,title+' '*5,"Extra cut"+' '*18)) def printrow(self,**kwargs): - print self.row(**kwargs) + print(self.row(**kwargs)) def row(self,pre="",indent=0,justname=25,justtitle=25,merged=True,split=True,colpass=False): """Returns string that can be used as a row in a samples summary table""" @@ -148,15 +148,15 @@ def splitrows(self,indent=0,justname=25,justtitle=25): def printobjs(self,title="",file=False): """Print all sample objects recursively.""" if isinstance(self,MergedSample): - print ">>> %s%r"%(title,self) + print(">>> %s%r"%(title,self)) for sample in self.samples: sample.printobjs(title+" ",file=file) elif file: - print ">>> %s%r %s"%(title,self,self.filename) + print(">>> %s%r %s"%(title,self,self.filename)) else: - print ">>> %s%r"%(title,self) + print(">>> %s%r"%(title,self)) if self.splitsamples: - print ">>> %s Split samples:"%(title) + print(">>> %s Split samples:"%(title)) for sample in self.splitsamples: sample.printobjs(title+" ",file=file) @@ -214,12 +214,12 @@ def gethist_from_file(self,hname,tag="",close=True,**kwargs): mode = kwargs.get('mode', None ) # if mode=='sumw': add samples together with normalized weights hist = None if verbosity>=2: - print ">>> Sample.gethist_from_file: %s%r, %r, mode=%r, incl=%r"%(indent,self.name,hname,mode,incl) + print(">>> Sample.gethist_from_file: %s%r, %r, mode=%r, incl=%r"%(indent,self.name,hname,mode,incl)) if isinstance(self,MergedSample): - #print ">>> Sample.getHistFromFile: %sincl=%r, sample_incl=%r"%(indent,incl,self.sample_incl) + #print(">>> Sample.getHistFromFile: %sincl=%r, sample_incl=%r"%(indent,incl,self.sample_incl)) if incl and self.sample_incl: # only get histogram from inclusive sample if verbosity>=2: - print ">>> Sample.gethist_from_file: %sOnly use inclusive sample %r!"%(indent,self.sample_incl.name) + print(">>> Sample.gethist_from_file: %sOnly use inclusive sample %r!"%(indent,self.sample_incl.name)) hist = self.sample_incl.gethist_from_file(hname,tag=tag,indent=indent+" ",incl=False,verb=verbosity) hist.SetDirectory(0) else: # add histograms from each sub sample @@ -249,17 +249,17 @@ def gethist_from_file(self,hname,tag="",close=True,**kwargs): hist.SetName(hname+tag) hist.SetDirectory(0) else: - #print file, hist + #print(file, hist) LOG.warning("Sample.gethist_from_file: Could not find %r in %s!"%(hname,self.filename)) if file and close: file.Close() if hist and mode=='sumw' and norm>0: if verbosity>=2: - print ">>> Sample.gethist_from_file: %s%r: scale=%s, norm=%s, %s"%(indent,self.name,scale,norm,fname) #,hist) - print ">>> Sample.gethist_from_file: %sbin 5 = %s, xsec=%s, nevts=%s, sumw=%s"%(indent,hist.GetBinContent(5),self.xsec,self.nevents,self.sumweights) + print(">>> Sample.gethist_from_file: %s%r: scale=%s, norm=%s, %s"%(indent,self.name,scale,norm,fname)) #,hist) + print(">>> Sample.gethist_from_file: %sbin 5 = %s, xsec=%s, nevts=%s, sumw=%s"%(indent,hist.GetBinContent(5),self.xsec,self.nevents,self.sumweights)) hist.Scale(scale*norm) if verbosity>=2: - print ">>> Sample.gethist_from_file: %sbin 5 = %s after normalization"%(indent,hist.GetBinContent(5)) + print(">>> Sample.gethist_from_file: %sbin 5 = %s after normalization"%(indent,hist.GetBinContent(5))) return hist @property @@ -376,7 +376,7 @@ def reload(self,**kwargs): verbosity = LOG.getverbosity(kwargs) if self.file: if verbosity>=4: - print "Sample.reload: closing and deleting %s with content:"%(self.file.GetName()) + print("Sample.reload: closing and deleting %s with content:"%(self.file.GetName())) self.file.ls() self.file.Close() del self._file @@ -399,7 +399,7 @@ def close(self,**kwargs): verbosity = LOG.getverbosity(kwargs) if self._file: if verbosity>=4: - print "Sample.close: closing and deleting %s with content:"%(self._file.GetName()) + print("Sample.close: closing and deleting %s with content:"%(self._file.GetName())) self._file.ls() self._file.Close() del self._file @@ -675,10 +675,10 @@ def getentries(self, selection, **kwargs): # PRINT if verbosity>=3: - print ">>>\n>>> Sample.getentries: %s, %s"%(color(self.name,color="grey"),self.fnameshort) - print ">>> entries: %d"%(nevents) - print ">>> scale: %.6g (scale=%.6g, norm=%.6g)"%(scale,self.scale,self.norm) - print ">>> %r"%(cuts) + print(">>>\n>>> Sample.getentries: %s, %s"%(color(self.name,color="grey"),self.fnameshort)) + print(">>> entries: %d"%(nevents)) + print(">>> scale: %.6g (scale=%.6g, norm=%.6g)"%(scale,self.scale,self.norm)) + print(">>> %r"%(cuts)) return nevents @@ -785,16 +785,16 @@ def gethist(self, *args, **kwargs): # PRINT if verbosity>=3: - print ">>>\n>>> Sample.gethist: %s, %s"%(color(self.name,color="grey"),self.fnameshort) - print ">>> entries: %d (%.2f integral)"%(nentries,integral) - print ">>> scale: %.6g (scale=%.6g, norm=%.6g, xsec=%.6g, nevents=%.6g, sumw=%.6g)"%(scale,self.scale,self.norm,self.xsec,self.nevents,self.sumweights) + print(">>>\n>>> Sample.gethist: %s, %s"%(color(self.name,color="grey"),self.fnameshort)) + print(">>> entries: %d (%.2f integral)"%(nentries,integral)) + print(">>> scale: %.6g (scale=%.6g, norm=%.6g, xsec=%.6g, nevents=%.6g, sumw=%.6g)"%(scale,self.scale,self.norm,self.xsec,self.nevents,self.sumweights)) if verbosity>=4: - print ">>> self.weight=%r, self.extraweight=%r, kwargs.get('weight','')=%r, "%(self.weight,self.extraweight,kwargs.get('weight',"")) - print ">>> %r"%(cuts) + print(">>> self.weight=%r, self.extraweight=%r, kwargs.get('weight','')=%r, "%(self.weight,self.extraweight,kwargs.get('weight',""))) + print(">>> %r"%(cuts)) if verbosity>=4: for var, varexp, hist in zip(variables,varexps,hists): - print '>>> entries=%d (%.1f integral) for variable %r, varexp=%r'%(hist.GetEntries(),hist.Integral(),var.name,varexp) - #print '>>> Variable %r: cut=%r, weight=%r, varexp=%r'%(var.name,var.cut,var.weight,varexp) + print('>>> entries=%d (%.1f integral) for variable %r, varexp=%r'%(hist.GetEntries(),hist.Integral(),var.name,varexp)) + #print('>>> Variable %r: cut=%r, weight=%r, varexp=%r'%(var.name,var.cut,var.weight,varexp)) if verbosity>=5: printhist(hist,pre=">>> ") @@ -868,14 +868,14 @@ def gethist2D(self, *args, **kwargs): # PRINT if verbosity>=2: - print ">>>\n>>> Sample.gethist2D - %s: %s"%(color(name,color="grey"),self.fnameshort) - print ">>> scale: %.6g (scale=%.6g, norm=%.6g)"%(scale,self.scale,self.norm) - print ">>> entries: %d (%.2f integral)"%(nentries,integral) - print ">>> %s"%cuts + print(">>>\n>>> Sample.gethist2D - %s: %s"%(color(name,color="grey"),self.fnameshort)) + print(">>> scale: %.6g (scale=%.6g, norm=%.6g)"%(scale,self.scale,self.norm)) + print(">>> entries: %d (%.2f integral)"%(nentries,integral)) + print(">>> %s"%cuts) if verbosity>=4: for var, varexp, hist in zip(variables,varexps,hists): - print '>>> Variables (%r,%r): varexp=%r, entries=%d, integral=%d'%(var[0].name,var[1].name,varexp,hist.GetEntries(),hist.Integral()) - #print '>>> Variable %r: cut=%r, weight=%r, varexp=%r'%(var.name,var.cut,var.weight,varexp) + print('>>> Variables (%r,%r): varexp=%r, entries=%d, integral=%d'%(var[0].name,var[1].name,varexp,hist.GetEntries(),hist.Integral())) + #print('>>> Variable %r: cut=%r, weight=%r, varexp=%r'%(var.name,var.cut,var.weight,varexp)) if verbosity>=5: printhist(hist,pre=">>> ") diff --git a/Plotter/python/sample/SampleSet.py b/Plotter/python/sample/SampleSet.py index 157d7069e..5e9b7f456 100644 --- a/Plotter/python/sample/SampleSet.py +++ b/Plotter/python/sample/SampleSet.py @@ -12,7 +12,6 @@ from TauFW.common.tools.LoadingBar import LoadingBar - class SampleSet(object): """Collect samples into one set to draw histgrams from trees for data/MC comparisons, and allow data-driven background estimations. @@ -99,13 +98,13 @@ def printtable(self,title=None,merged=True,split=True): import TauFW.Plotter.sample.utils as GLOB if not title: name = self.name+" samples" if self.name else "Samples" - print ">>>\n>>> %s with integrated luminosity L = %s / fb at sqrt(s) = 13 TeV"%(name,GLOB.lumi) + print(">>>\n>>> %s with integrated luminosity L = %s / fb at sqrt(s) = 13 TeV"%(name,GLOB.lumi)) justname = 2+max(s.get_max_name_len() for s in self.samples) justtitle = 2+max(s.get_max_title_len() for s in self.samples) Sample.printheader(title,justname=justname,justtitle=justtitle,merged=merged) for sample in self.samples: sample.printrow(justname=justname,justtitle=justtitle,merged=merged,split=split) - print ">>> " + print(">>> ") def __iter__(self): """Start iteration over samples.""" @@ -343,9 +342,9 @@ def gethists(self, *args, **kwargs): """Create and fill histograms for all samples and return lists of histograms.""" verbosity = LOG.getverbosity(kwargs,self) if verbosity>=1: - print ">>> gethists" + print(">>> gethists") variables, selection, issingle = unwrap_gethist_args(*args) - datavars = filter(lambda v: v.data,variables) # filter out gen-level variables + datavars = [v for v in variables if v.data] # filter out gen-level variables dodata = kwargs.get('data', True ) # create data hists domc = kwargs.get('mc', True ) # create expected (SM background) hists doexp = kwargs.get('exp', domc ) # create expected (SM background) hists @@ -413,9 +412,9 @@ def gethists(self, *args, **kwargs): if verbosity>=2: if not ('QCD' in task or 'JFR' in task): LOG.header("Creating histograms for %s"%selection) #.title - print ">>> variables: %s"%(quotestrs([v.filename for v in variables])) - #print ">>> split=%s, makeQCD=%s, makeJTF=%s, nojtf=%s, keepWJ=%s"%(split,makeQCD,makeJTF,nojtf,keepWJ) - print '>>> with extra weights "%s" for MC and "%s" for data'%(weight,dataweight) + print(">>> variables: %s"%(quotestrs([v.filename for v in variables]))) + #print(">>> split=%s, makeQCD=%s, makeJTF=%s, nojtf=%s, keepWJ=%s"%(split,makeQCD,makeJTF,nojtf,keepWJ)) + print('>>> with extra weights "%s" for MC and "%s" for data'%(weight,dataweight)) elif self.loadingbar and verbosity<=1: bar = LoadingBar(len(samples),width=16,pre=">>> %s: "%(task),counter=True,remove=True) # %s: selection.title @@ -485,9 +484,9 @@ def gethists(self, *args, **kwargs): # YIELDS if verbosity>=2 and len(variables)>0: var = variables[0] - print ">>> selection:" - print ">>> %r"%(selection.selection) - print ">>> yields: " + print(">>> selection:") + print(">>> %r"%(selection.selection)) + print(">>> yields: ") TAB = LOG.table("%11.1f %11.2f %r") TAB.printheader("entries","integral","hist name") totint = 0 @@ -568,7 +567,7 @@ def gethists2D(self, *args, **kwargs): ## ADD JTF #if makeJTF: - # print "CHECK IMPLEMENTATION!" + # print("CHECK IMPLEMENTATION!") # hists = self.jetFakeRate2D(*args,tag=tag,weight=weight,verbosity=verbosity) # for variable, hist in zip(variables,hists): # histsB[variable].insert(0,hist) diff --git a/Plotter/python/sample/utils.py b/Plotter/python/sample/utils.py index d5ad2ed21..6b4ef7b5d 100644 --- a/Plotter/python/sample/utils.py +++ b/Plotter/python/sample/utils.py @@ -54,7 +54,7 @@ def getsampleset(datasample,expsamples,sigsamples=[ ],**kwargs): else: LOG.throw(IOError,"Did not recognize mc row %s"%(info)) fname = repkey(fpattern,ERA=era,GROUP=group,SAMPLE=name,CHANNEL=channel,TAG=tag) - #print fname + #print(fname) sample = MC(name,title,fname,xsec,**expkwargs) expsamples[i] = sample @@ -86,7 +86,7 @@ def getsampleset(datasample,expsamples,sigsamples=[ ],**kwargs): datakwargs.update(newkwargs) elif datasample: LOG.throw(IOError,"Did not recognize data row %s"%(datasample)) - #print fnames + #print(fnames) if datasample: fpattern = repkey(fpattern,ERA=era,GROUP=group,SAMPLE=dname,CHANNEL=channel,TAG=tag) fnames = glob.glob(fpattern) @@ -98,7 +98,7 @@ def getsampleset(datasample,expsamples,sigsamples=[ ],**kwargs): datasample = MergedSample(dname,'Observed',data=True) for fname in fnames: setname = namerexp.findall(fname)[0] - #print setname + #print(setname) datasample.add(Data(setname,'Observed',fname,**datakwargs)) else: LOG.throw(IOError,"Did not find data file %r"%(fpattern)) @@ -125,10 +125,10 @@ def getmcsample(group,sample,title,xsec,channel,era,tag="",verb=0,**kwargs): picodir = CONFIG['picodir'] fname_ = repkey(fname,PICODIR=picodir,USER=user,ERA=era,GROUP=group,SAMPLE=sample,CHANNEL=channel,TAG=tag) if not os.path.isfile(fname_): - print ">>> Did not find %r"%(fname_) + print(">>> Did not find %r"%(fname_)) name = sample+tag if verb>=1: - print ">>> getmcsample: %s, %s, %s"%(name,sample,fname_) + print(">>> getmcsample: %s, %s, %s"%(name,sample,fname_)) sample = MC(name,title,fname_,xsec,**kwargs) return sample @@ -408,7 +408,7 @@ def stitch(samplelist,*searchterms,**kwargs): name,name,len(stitchlist),"', '".join(searchterms))) return samplelist for s in stitchlist: - print ">>> %s"%s.name + print(">>> %s"%s.name) sample_incl = None sample_mutau = None #"DYJetsToMuTauh_M-50" @@ -419,7 +419,7 @@ def stitch(samplelist,*searchterms,**kwargs): else: sample_incl = sample if not sample_incl: - print "No inclusive sample to stitch... abort" + print("No inclusive sample to stitch... abort") return samplelist # Compute k-factor for NLO cross section normalisation @@ -443,36 +443,36 @@ def stitch(samplelist,*searchterms,**kwargs): if sample in samples_jetIncl: continue else: - print sample.name + print(sample.name) njets = int(sample.name[int(sample.name.find("Jets")-1)]) - print "...jet multiplcity: %i"%njets + print("...jet multiplcity: %i"%njets) sample_njet[njets] = sample - print "Lumi = %.6g, kfactor = %.6g, xsec = %.6g, sumw = %.6g"%(sample_incl.lumi, kfactor, sample_incl.xsec, sample_incl.sumweights) - print "Sample_incl.norm = %.6g"%sample_incl.norm + print("Lumi = %.6g, kfactor = %.6g, xsec = %.6g, sumw = %.6g"%(sample_incl.lumi, kfactor, sample_incl.xsec, sample_incl.sumweights)) + print("Sample_incl.norm = %.6g"%sample_incl.norm) wIncl = sample_incl.lumi * kfactor * sample_incl.xsec * 1000. / sample_incl.sumweights - print "Inclusive weight = %.6g"%wIncl + print("Inclusive weight = %.6g"%wIncl) effIncl_njet = dict() wIncl_njet = dict() for njets in sample_njet: sample = sample_njet[njets] effIncl_njet[njets] = sample.xsec/sample_incl.xsec - print "%i-jet efficiency in inclusive sample = %.6g"%(njets,effIncl_njet[njets]) + print("%i-jet efficiency in inclusive sample = %.6g"%(njets,effIncl_njet[njets])) wIncl_njet[njets] = sample.lumi * kfactor * sample.xsec * 1000. / (sample.sumweights + effIncl_njet[njets]*sample_incl.sumweights) - print "Lumi = %.6g, kfactor = %.6g, xsec = %.6g, sumw = %.6g"%(sample.lumi, kfactor, sample.xsec, sample.sumweights) - print "Sample.norm = %.6g"%sample.norm - print "Inclusive %i jets weight = %.6g"%(njets,wIncl_njet[njets]) + print("Lumi = %.6g, kfactor = %.6g, xsec = %.6g, sumw = %.6g"%(sample.lumi, kfactor, sample.xsec, sample.sumweights)) + print("Sample.norm = %.6g"%sample.norm) + print("Inclusive %i jets weight = %.6g"%(njets,wIncl_njet[njets])) wIncl_mutau = "" wMuTau_njet = dict() if sample_mutau: wIncl_mutau = sample_incl.lumi * kfactor * sample_incl.xsec * 1000. * effMuTau_incl / ( effMuTau_incl*sample_incl.sumweights + effMuTau_excl*sample_mutau.sumweights ) - print "Inclusive mutau weight = %.6g"%wIncl_mutau + print("Inclusive mutau weight = %.6g"%wIncl_mutau) for njets in sample_njet: sample = sample_njet[njets] wMuTau_njet[njets] = sample.lumi * kfactor * sample.xsec * 1000. * effMuTau_njet[njets] / ( effMuTau_njet[njets]*sample.sumweights + effMuTauNjet_excl[njets]*sample_mutau.sumweights + effMuTauNjet_incl[njets]*sample_incl.sumweights ) - print "Inclusive mutau %i jets weight = %.6g"%(njets,wMuTau_njet[njets]) + print("Inclusive mutau %i jets weight = %.6g"%(njets,wMuTau_njet[njets])) conditionalWeight_incl = "" if sample_mutau: From 9368629f90adc570fda47a9d86d957c5e5fd9a7a Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Sun, 28 May 2023 17:15:28 +0200 Subject: [PATCH 16/55] make python3 compatible (Plotter/JetToTauFR) --- .../plotJetToTauFRvariables_writeJson.py | 24 +- ...etToTauFRvariables_writeJson_FRinPtBins.py | 52 ++-- .../corrections/JetToTauFR/plot_forTauFR.py | 8 +- .../corrections/JetToTauFR/readAndPlotFR.py | 14 +- .../corrections/JetToTauFR/tools/dataset.py | 230 +++++++++--------- .../JetToTauFR/tools/fakeFactors.py | 22 +- .../JetToTauFR/tools/histograms.py | 65 ++--- .../JetToTauFR/tools/jsonWriter.py | 8 +- .../corrections/JetToTauFR/tools/plots.py | 49 ++-- .../corrections/JetToTauFR/tools/styles.py | 4 +- 10 files changed, 240 insertions(+), 236 deletions(-) diff --git a/Plotter/python/corrections/JetToTauFR/plotJetToTauFRvariables_writeJson.py b/Plotter/python/corrections/JetToTauFR/plotJetToTauFRvariables_writeJson.py index 3ed31af5e..d5fb12d96 100755 --- a/Plotter/python/corrections/JetToTauFR/plotJetToTauFRvariables_writeJson.py +++ b/Plotter/python/corrections/JetToTauFR/plotJetToTauFRvariables_writeJson.py @@ -118,7 +118,7 @@ def plot(sampleset,channel,parallel=True,tag="",extratext="",outdir="plots",era= if extratext: text += ("" if '\n' in extratext[:3] else ", ") + extratext - for stack, variable in stacks.iteritems(): + for stack, variable in stacks.items(): for h in stack.hists: if "_pt" in selection.filename : continue # make sure you choose only the files related to the FR measurement #if "eta" not in selection.filename : continue # make sure you choose only the files related to the FR measurement @@ -150,7 +150,7 @@ def plot(sampleset,channel,parallel=True,tag="",extratext="",outdir="plots",era= stack.saveas(fname,ext=exts,tag=tag) stack.close() - print len(LooseDataHists), len(LooseMCHists), len(TightDataHists), len(TightMCHists) + print(len(LooseDataHists), len(LooseMCHists), len(TightDataHists), len(TightMCHists)) # calcuate jet->tau FR jsonList = [] @@ -376,9 +376,9 @@ def WriteJsonInfoToFile(jsonFileInfoDict,outdir,era,channel,verbose): jsonWr = jsonWriter.JsonWriter(outdir, verbose) usedKeys = [] - for i, key in enumerate(jsonFileInfoDict.keys(), 1): + for i, key in enumerate(jsonFileInfoDict.keys(),1): infoList = jsonFileInfoDict[key] - if verbose : print infoList + if verbose : print(infoList) usedKey = "-".join(key.split("-")[:-1]) # For-loop: All info rows @@ -403,12 +403,12 @@ def _ConvertInfoToJsonParameter(keyString, infoString, verbose, firstIndex=False jsonInfoDelimiter = " & " #self.Verbose("keyString = %s" % (keyString), True) - if verbose: print "keystring = ", keyString + if verbose: print("keystring = ", keyString) inputDir = keyString.split(":")[0] - if verbose: print "inputDir = ", inputDir + if verbose: print("inputDir = ", inputDir) - if verbose: print " keyString.split(:)[-1].split(-)" ,keyString.split(":")[-1].split("-") - if verbose: print " len keyString.split(:)[-1].split(-)" ,len( keyString.split(":")[-1].split("-") ) + if verbose: print(" keyString.split(:)[-1].split(-)" ,keyString.split(":")[-1].split("-")) + if verbose: print(" len keyString.split(:)[-1].split(-)" ,len( keyString.split(":")[-1].split("-") )) if len(keyString.split(":")[-1].split("-")) > 1: #if verbose: print " keyString.split(:)[-1].split(-)" ,keyString.split(":")[-1].split("-") @@ -480,10 +480,10 @@ def ConvertToFriendlySavename(text): def Print(msg, printHeader=False): if printHeader==True: #print "=== ", _GetFName() - print "=== ", __file__.split("/")[-1].replace("pyc", "py") - print "\t", msg + print("=== ", __file__.split("/")[-1].replace("pyc", "py")) + print("\t", msg) else: - print "\t", msg + print("\t", msg) return ################# @@ -549,4 +549,4 @@ def main(args): LOG.verbosity = args.verbosity PLOG.verbosity = args.verbosity main(args) - print "\n>>> Done." + print("\n>>> Done.") diff --git a/Plotter/python/corrections/JetToTauFR/plotJetToTauFRvariables_writeJson_FRinPtBins.py b/Plotter/python/corrections/JetToTauFR/plotJetToTauFRvariables_writeJson_FRinPtBins.py index f44d0b1ab..2840067b1 100755 --- a/Plotter/python/corrections/JetToTauFR/plotJetToTauFRvariables_writeJson_FRinPtBins.py +++ b/Plotter/python/corrections/JetToTauFR/plotJetToTauFRvariables_writeJson_FRinPtBins.py @@ -93,14 +93,14 @@ def plot(sampleset,channel,parallel=True,tag="",extratext="",outdir="plots",era= TightMCGenuineHists = [] for selection in selections: - print ">>> Selection %r: %r"%(selection.title,selection.selection) + print(">>> Selection %r: %r"%(selection.title,selection.selection)) stacks = sampleset.getstack(variables,selection,method='',parallel=parallel) fname = "%s/$VAR_%s-%s-%s$TAG"%(outdir,channel.replace('mu','m').replace('tau','t'),selection.filename,era) text = "%s: %s"%(channel.replace('mu',"#mu").replace('tau',"#tau_{h}"),selection.title) if extratext: text += ("" if '\n' in extratext[:3] else ", ") + extratext - for stack, variable in stacks.iteritems(): + for stack, variable in stacks.items(): for h in stack.hists: if "_pt" in selection.filename : continue # make sure you choose only the files related to the FR measurement if("SingleMuon" not in h.GetName() and "SingleElectron" not in h.GetName() and "EGamma" not in h.GetName() and "Simulation" not in h.GetName()) : continue @@ -219,14 +219,14 @@ def GetEfficiencyHisto(numHisto, denHisto, dsetType, outdir, era, channel, ptLis # First determine the bin number [Bin# 0 contains the underflow. The Last bin (bin# nbins+1) contains the overflow.] #for j in range(1, nBinsX+1): for j in range(1, nBinsX+1): - print "nBinsX is = ", nBinsX - print "and j now is =", j + print("nBinsX is = ", nBinsX) + print("and j now is =", j) # Get the denominator denValue = ctypes.c_double(0.0) denError_ = ctypes.c_double(0.0) denValue = denHisto.IntegralAndError(j, j, denError_) - print "HELOOOOOOOOOOOOOOOOOOOOOO, HERE IS YOUR DENVALUE" - print denValue + print("HELOOOOOOOOOOOOOOOOOOOOOO, HERE IS YOUR DENVALUE") + print(denValue) denError = denError_.value # Get the denominator @@ -234,8 +234,8 @@ def GetEfficiencyHisto(numHisto, denHisto, dsetType, outdir, era, channel, ptLis numError_ = ctypes.c_double(0.0) numValue = numHisto.IntegralAndError(j, j, numError_) numError = numError_.value - print "HELOOOOOOOOOOOOOOOOOOOOOO, HERE IS YOUR NUMVALUE" - print numValue + print("HELOOOOOOOOOOOOOOOOOOOOOO, HERE IS YOUR NUMVALUE") + print(numValue) effValue = 0.0 effError = 0.0 histoName = numHisto.GetName() @@ -269,8 +269,8 @@ def GetEfficiencyHisto(numHisto, denHisto, dsetType, outdir, era, channel, ptLis hDen.SetBinContent(1, denValue) hDen.SetBinError(1, denError) - #print hNum.GetNbinsX() - #print hDen.GetNbinsX() + #print(hNum.GetNbinsX()) + #print(hDen.GetNbinsX()) # Sanity check if not (ROOT.TEfficiency.CheckConsistency(hNum, hDen)): @@ -326,7 +326,7 @@ def GetEfficiencyHisto(numHisto, denHisto, dsetType, outdir, era, channel, ptLis # For json file json = "%s__%.1f__%.1f__%.5f__%.5f__%.5f__%.5f__%.5f__%.5f" % (ptRange, numValue, denValue, effValue, effError, effErrorUp, effErrorLow, numValue, denValue) - #print "json = ", json.replace("__", jsonInfoDelimiter) + #print("json = ", json.replace("__", jsonInfoDelimiter)) jsonList.append(json.replace("__", jsonInfoDelimiter)) # Reset histos @@ -355,7 +355,7 @@ def WriteJsonInfoToFile(jsonFileInfoDict,outdir,era,channel,verbose): usedKeys = [] for i, key in enumerate(jsonFileInfoDict.keys(), 1): infoList = jsonFileInfoDict[key] - if verbose : print infoList + if verbose : print(infoList) usedKey = "-".join(key.split("-")[:-1]) # For-loop: All info rows @@ -380,24 +380,24 @@ def _ConvertInfoToJsonParameter(keyString, infoString, verbose, firstIndex=False jsonInfoDelimiter = " & " #self.Verbose("keyString = %s" % (keyString), True) - if verbose: print "keystring = ", keyString + if verbose: print("keystring = ", keyString) inputDir = keyString.split(":")[0] - if verbose: print "inputDir = ", inputDir + if verbose: print("inputDir = ", inputDir) - if verbose: print " keyString.split(:)[-1].split(-)" ,keyString.split(":")[-1].split("-") - if verbose: print " len keyString.split(:)[-1].split(-)" ,len( keyString.split(":")[-1].split("-") ) + if verbose: print(" keyString.split(:)[-1].split(-)" ,keyString.split(":")[-1].split("-")) + if verbose: print(" len keyString.split(:)[-1].split(-)" ,len( keyString.split(":")[-1].split("-") )) if len(keyString.split(":")[-1].split("-")) > 1: - #if verbose: print " keyString.split(:)[-1].split(-)" ,keyString.split(":")[-1].split("-") + #if verbose: print(" keyString.split(:)[-1].split(-)" ,keyString.split(":")[-1].split("-")) ptRatio = keyString.split("-")[0] #it was 2 - #if verbose: print "ptRatio = " , ptRatio + #if verbose: print("ptRatio = " , ptRatio) dataType = keyString.split(":")[-1].split("-")[1] # it was 3 - #if verbose: print "dataType = ", dataType + #if verbose: print("dataType = ", dataType) else: ptRatio = "N/A" - #if verbose: print "ptRatio = " , ptRatio + #if verbose: print("ptRatio = " , ptRatio) dataType = keyString.split(":")[-1].split("-")[1] # it was 2 - #if verbose: print "dataType = ", dataType + #if verbose: print("dataType = ", dataType) #self.Verbose("keyString.split(\"-\") = %s" % (keyString.split("-")), True) #self.Verbose("InputDir = %s, DM = %s, etaRegion = %s, ptRatio = %s, dataType = %s" % (inputDir, decayMode, etaRegion, ptRatio, dataType), True) @@ -456,11 +456,11 @@ def ConvertToFriendlySavename(text): # return fName def Print(msg, printHeader=False): if printHeader==True: - #print "=== ", _GetFName() - print "=== ", __file__.split("/")[-1].replace("pyc", "py") - print "\t", msg + #print("=== ", _GetFName()) + print("=== ", __file__.split("/")[-1].replace("pyc", "py") ) + print("\t", msg) else: - print "\t", msg + print("\t", msg) return ################# @@ -526,4 +526,4 @@ def main(args): LOG.verbosity = args.verbosity PLOG.verbosity = args.verbosity main(args) - print "\n>>> Done." + print("\n>>> Done.") diff --git a/Plotter/python/corrections/JetToTauFR/plot_forTauFR.py b/Plotter/python/corrections/JetToTauFR/plot_forTauFR.py index 49c612432..0818d1464 100755 --- a/Plotter/python/corrections/JetToTauFR/plot_forTauFR.py +++ b/Plotter/python/corrections/JetToTauFR/plot_forTauFR.py @@ -125,7 +125,7 @@ def plot(sampleset,channel,parallel=True,tag="",extratext="",outdir="plots",era= ## ccccccccccccccccccccccccccccccccccccc for selection in selections: - print ">>> Selection %r: %r"%(selection.title,selection.selection) + print(">>> Selection %r: %r"%(selection.title,selection.selection)) ## ccccccccccccccccccccccccccccccccccccc if closure: #stacks = sampleset.getstack(variables,selection,method='JetToTau_MisID',parallel=parallel) @@ -138,11 +138,11 @@ def plot(sampleset,channel,parallel=True,tag="",extratext="",outdir="plots",era= text = "%s: %s"%(channel.replace('mu',"#mu").replace('tau',"#tau_{h}"),selection.title) if extratext: text += ("" if '\n' in extratext[:3] else ", ") + extratext - for stack, variable in stacks.iteritems(): + for stack, variable in stacks.items(): ## ccccccccccccccccccccccccccccccccccccc if closure: stack.datahist.Reset() - for stack_Fake, variable_Fake in stacks_Fake.iteritems(): + for stack_Fake, variable_Fake in stacks_Fake.items(): if variable_Fake == variable: stack.datahist.Add(stack_Fake.datahist) ## ccccccccccccccccccccccccccccccccccccc @@ -217,4 +217,4 @@ def main(args): LOG.verbosity = args.verbosity PLOG.verbosity = args.verbosity main(args) - print "\n>>> Done." + print("\n>>> Done.") diff --git a/Plotter/python/corrections/JetToTauFR/readAndPlotFR.py b/Plotter/python/corrections/JetToTauFR/readAndPlotFR.py index 1e614942b..c3aa6f833 100755 --- a/Plotter/python/corrections/JetToTauFR/readAndPlotFR.py +++ b/Plotter/python/corrections/JetToTauFR/readAndPlotFR.py @@ -79,8 +79,8 @@ def Print(msg, printHeader=True): ''' fName = __file__.split("/")[-1] if printHeader: - print "=== ", fName - print "\t", msg + print("=== ", fName) + print("\t", msg) return def getLumiByYear(year): @@ -118,10 +118,10 @@ def doCompareJson(dirList, name, **kwargs): myFakeFactorsData = fakeFactors.FakeFactors(dirList, "Data" , nProng, eta, None, opts.analysisType, opts.verbose) myFakeFactorsSimulation = fakeFactors.FakeFactors(dirList, "Simulation", nProng, eta, None, opts.analysisType, opts.verbose) #myFakeFactors = fakeFactors.FakeFactors(dirList, opts.dataType, eta, nProng, None, "mumutau", opts.verbose) - #print "nProng = ", nProng - #print "eta = ", eta - #print "myFakeFactorsData =" , myFakeFactorsData - #print "myFakeFactorsSimulation =", myFakeFactorsSimulation + #print("nProng = ", nProng) + #print("eta = ", eta) + #print("myFakeFactorsData =" , myFakeFactorsData) + #print("myFakeFactorsSimulation =", myFakeFactorsSimulation) Verbose("Creating the comparison plot (%s, %s)" % (nProng, eta), True) jsonToGraphDict = doPlots(myFakeFactorsData, myFakeFactorsSimulation, nProng, eta, opts.name, opts) @@ -527,7 +527,7 @@ def main(): # Sanity check #if len(opts.dirList) < 2: - # print opts.dirList + # print(opts.dirList) # msg = "Only %d directories provided. Need at least 2 for comparison plots:\n\t%s" % (len(opts.dirList), "\n\t".join(opts.dirList)) # raise Exception(es + msg + ns) diff --git a/Plotter/python/corrections/JetToTauFR/tools/dataset.py b/Plotter/python/corrections/JetToTauFR/tools/dataset.py index 140a62fd6..e29bec65d 100755 --- a/Plotter/python/corrections/JetToTauFR/tools/dataset.py +++ b/Plotter/python/corrections/JetToTauFR/tools/dataset.py @@ -11,6 +11,8 @@ #================================================================================================ # Import modules #================================================================================================ +from __future__ import print_function # for python3 compatibility +from past.builtins import basestring # for python2 compatibility import glob, os, sys, re import math import copy @@ -89,10 +91,10 @@ def Print(msg, printHeader=True): ''' fName = __file__.split("/")[-1] if printHeader==True: - print "=== ", fName - print "\t", msg + print("=== ", fName) + print("\t", msg) else: - print "\t", msg + print("\t", msg) return @@ -103,7 +105,7 @@ def PrintFlushed(msg, printHeader=True): msg = "\r\t" + msg ERASE_LINE = '\x1b[2K' if printHeader: - print "=== " + GetSelfName() + print("=== " + GetSelfName()) sys.stdout.write(ERASE_LINE) sys.stdout.write(msg) sys.stdout.flush() @@ -305,7 +307,7 @@ def readFromCrabDirs(taskdirs, emptyDatasetsAsNone=False, **kwargs): files = glob.glob(os.path.join(d, "results", inputFile)) name = name.replace("crab_", "") if len(files) == 0: - print >> sys.stderr, "Ignoring dataset %s: no files matched to '%s' in task directory %s" % (d, inputFile, os.path.join(d, "res")) + print("Ignoring dataset %s: no files matched to '%s' in task directory %s"%(d, inputFile, os.path.join(d, "res")),file=sys.stderr) noFiles = True continue @@ -314,9 +316,9 @@ def readFromCrabDirs(taskdirs, emptyDatasetsAsNone=False, **kwargs): dlist.append( (name+postfix, files) ) if noFiles: - print >> sys.stderr, "" - print >> sys.stderr, " There were datasets without files. Have you merged the files with hplusMergeHistograms.py?" - print >> sys.stderr, "" + print("",file=sys.stderr) + print(" There were datasets without files. Have you merged the files with hplusMergeHistograms.py?",file=sys.stderr) + print("",file=sys.stderr) if len(dlist) == 0: raise Exception("No datasets. Have you merged the files with hplusMergeHistograms.py?") @@ -441,13 +443,13 @@ def __init__(self, **defaults): self.data = copy.deepcopy(defaults) def set(self, **kwargs): - for key, value in kwargs.iteritems(): + for key, value in kwargs.items(): if not key in self.data: raise Exception("Not allowed to insert '%s', available settings: %s" % (key, ", ".join(self.data.keys()))) self.data[key] = value def append(self, **kwargs): - for key, value in kwargs.iteritems(): + for key, value in kwargs.items(): if not key in self.data: raise Exception("Not allowed to insert '%s', available settings: %s" % (key, ", ".join(self.data.keys()))) try: @@ -595,7 +597,7 @@ def divideBinomial(countPassed, countTotal): def _histoToCounter(histo): ret = [] - for bin in xrange(1, histo.GetNbinsX()+1): + for bin in range(1, histo.GetNbinsX()+1): ret.append( (histo.GetXaxis().GetBinLabel(bin), Count(float(histo.GetBinContent(bin)), float(histo.GetBinError(bin)))) ) @@ -618,7 +620,7 @@ def _counterToHisto(name, counter): ## Transform histogram (TH1) to a list of values def histoToList(histo): - return [histo.GetBinContent(bin) for bin in xrange(1, histo.GetNbinsX()+1)] + return [histo.GetBinContent(bin) for bin in range(1, histo.GetNbinsX()+1)] ## Transform histogram (TH1) to a dictionary. @@ -628,7 +630,7 @@ def histoToList(histo): def _histoToDict(histo): ret = {} - for bin in xrange(1, histo.GetNbinsX()+1): + for bin in range(1, histo.GetNbinsX()+1): ret[histo.GetXaxis().GetBinLabel(bin)] = histo.GetBinContent(bin) return ret @@ -639,7 +641,7 @@ def histoIntegrateToCount(histo): if histo is None: return count - for bin in xrange(0, histo.GetNbinsX()+2): + for bin in range(0, histo.GetNbinsX()+2): count.add(Count(histo.GetBinContent(bin), histo.GetBinError(bin))) return count @@ -661,7 +663,7 @@ def _rescaleInfo(d): factor = 1/d["control"] ret = {} - for k, v in d.iteritems(): + for k, v in d.items(): if k in ["isPileupReweighted","isTopPtReweighted"]: ret[k] = v else: @@ -763,7 +765,7 @@ def _mergeStackHelper(datasetList, nameList, task, allowMissingDatasets=False): del dlist[ind] message = "Tried to %s '"%task + ", ".join(dlist) +"' which don't exist" if allowMissingDatasets: - print >> sys.stderr, "WARNING: "+message + print("WARNING: "+message,file=sys.stderr) else: raise Exception(message) @@ -893,7 +895,7 @@ def draw(self, dataset): raise Exception("Error when calling TTree.Draw with the following parameters for dataset %s, nentries=%d\ntree: %s\nvarexp: %s\nselection: %s\noption: %s" % (dataset.getName(), nentries, treeName, varexp, selection, option)) h = tree.GetHistogram() if h == None: # need '==' to compare null TH1 - print >>sys.stderr, "WARNING: TTree.Draw with the following parameters returned null histogram for dataset %s (%d entries)\ntree: %s\nvarexp: %s\nselection: %s\noption: %s" % (dataset.getName(), nentries, treeName, varexp, selection, option) + print("WARNING: TTree.Draw with the following parameters returned null histogram for dataset %s (%d entries)\ntree: %s\nvarexp: %s\nselection: %s\noption: %s" % (dataset.getName(), nentries, treeName, varexp, selection, option),file=sys.stderr) return None h.SetName(dataset.getName()+"_"+h.GetName()) @@ -970,7 +972,7 @@ def draw(self, dataset): tree.Draw(">>elist", self.selection) elist = ROOT.gDirectory.Get("elist") - for ientry in xrange(elist.GetN()): + for ientry in range(elist.GetN()): tree.GetEntry(elist.GetEntry(ientry)) self.function(tree) @@ -1020,10 +1022,10 @@ def draw(self, dataset): h = None datasetName = dataset.getName() if datasetName in self.datasetMap: - #print "Dataset %s in datasetMap" % datasetName, self.datasetMap[datasetName].selection + #print("Dataset %s in datasetMap" % datasetName, self.datasetMap[datasetName].selection) h = self.datasetMap[datasetName].draw(dataset) else: - #print "Dataset %s with default" % datasetName, self.default.selection + #print("Dataset %s with default" % datasetName, self.default.selection) h = self.default.draw(dataset) return h @@ -1035,7 +1037,7 @@ def draw(self, dataset): # are cloned with the given keyword arguments. def clone(self, **kwargs): ret = TreeDrawCompound(self.default.clone(**kwargs)) - for name, td in self.datasetMap.iteritems(): + for name, td in self.datasetMap.items(): ret.datasetMap[name] = td.clone(**kwargs) return ret @@ -1061,7 +1063,7 @@ def _treeDrawToNumEntriesSingle(treeDraw): def treeDrawToNumEntries(treeDraw): if isinstance(treeDraw, TreeDrawCompound): td = TreeDrawCompound(_treeDrawToNumEntriesSingle(treeDraw.default)) - for name, td2 in treeDraw.datasetMap.iteritems(): + for name, td2 in treeDraw.datasetMap.items(): td.add(name, _treeDrawToNumEntriesSingle(td2)) return td else: @@ -1203,23 +1205,23 @@ def draw(self, dset): def addUncertainties(self, dset, rootHistoWithUncertainties, modify=None): verbose = self._settings.get("verbose") if verbose: - print "Adding uncertainties to histogram '%s' of dataset '%s'" % (self._histoName, dset.getName()) + print("Adding uncertainties to histogram '%s' of dataset '%s'" % (self._histoName, dset.getName())) onlyFor = self._settings.get("applyToDatasets") if dset.isMC(): if onlyFor is Systematics.OnlyForPseudo: if verbose: - print " Dataset is MC, no systematics considered (Systematics(..., onlyForMC=%s))" % onlyFor.__name__ + print(" Dataset is MC, no systematics considered (Systematics(..., onlyForMC=%s))" % onlyFor.__name__) return elif dset.isPseudo(): if onlyFor is Systematics.OnlyForMC: if verbose: - print " Dataset is pseudo, no systematics considered (Systematics(..., onlyForMC=%s))" % onlyFor.__name__ + print(" Dataset is pseudo, no systematics considered (Systematics(..., onlyForMC=%s))" % onlyFor.__name__) return elif dset.isData(): if onlyFor is not Systematics.All: if verbose: - print " Dataset is data, no systematics considered (Systematics(..., onlyForMC=%s))" % onlyFor.__name__ + print(" Dataset is data, no systematics considered (Systematics(..., onlyForMC=%s))" % onlyFor.__name__) return else: raise Exception("Internal error (unknown dataset type)") @@ -1230,14 +1232,14 @@ def addUncertainties(self, dset, rootHistoWithUncertainties, modify=None): if self._settings.get("allShapes"): shapes = allShapes if verbose: - print " Using all available shape variations (%s)" % ",".join(shapes) + print(" Using all available shape variations (%s)" % ",".join(shapes)) else: shapes = self._settings.get("shapes") #for s in self._settings.get("shapes"): # if s in allShapes: # shapes.append(s) if verbose: - print " Using explicitly specified shape variations (%s)" % ",".join(shapes) + print(" Using explicitly specified shape variations (%s)" % ",".join(shapes)) for source in shapes: plus = None minus = None @@ -1254,8 +1256,8 @@ def addUncertainties(self, dset, rootHistoWithUncertainties, modify=None): additShapes = self._settings.get("additionalShapes") if len(additShapes) > 0: if verbose: - print " Adding additional shape variations %s" % ",".join(additShapes.keys()) - for source, (th1plus, th1minus) in additShapes.iteritems(): + print(" Adding additional shape variations %s" % ",".join(additShapes.keys())) + for source, (th1plus, th1minus) in additShapes.items(): hp = th1plus hm = th1minus if not isinstance(th1plus, ROOT.TH1): @@ -1272,36 +1274,36 @@ def addUncertainties(self, dset, rootHistoWithUncertainties, modify=None): relShapes = self._settings.get("additionalShapesRelative") if len(relShapes) > 0: if verbose: - print " Adding additional bin-wise relative uncertainties %s" % ",".join(relShapes.keys()) - for source, th1 in relShapes.iteritems(): + print(" Adding additional bin-wise relative uncertainties %s" % ",".join(relShapes.keys())) + for source, th1 in relShapes.items(): rootHistoWithUncertainties.addShapeUncertaintyFromVariationRelative(source, th1) # Add normalization uncertainties given the selection step normSel = self._settings.get("normalizationSelections") if len(normSel) > 0: if verbose: - print " Adding normalization uncertainties after selections %s" % ",".join(normSel) + print(" Adding normalization uncertainties after selections %s" % ",".join(normSel)) raise Exception("Normalization uncertainties given the set of selections is not supported yet") # Add any user-supplied normalization uncertainties additNorm = self._settings.get("additionalNormalizations") if len(additNorm) > 0: if verbose: - print " Adding additional relative normalization uncertainties %s" % ",".join(additNorm.keys()) - for source, value in additNorm.iteritems(): + print(" Adding additional relative normalization uncertainties %s" % ",".join(additNorm.keys())) + for source, value in additNorm.items(): rootHistoWithUncertainties.addNormalizationUncertaintyRelative(source, value) # Add any user-supplied dataset-specific normalization uncertainties additNorm = self._settings.get("additionalDatasetNormalizations") if len(additNorm) > 0: if verbose: - print " Adding additional dataset-specific (for %s) relative normalization uncertainties %s" % (dset.getName(), ",".join(additNorm.keys())) - for source, func in additNorm.iteritems(): + print(" Adding additional dataset-specific (for %s) relative normalization uncertainties %s" % (dset.getName(), ",".join(additNorm.keys()))) + for source, func in additNorm.items(): value = func(dset.getName()) rootHistoWithUncertainties.addNormalizationUncertaintyRelative(source, value) if verbose: - print " Below is the final set of uncertainties for this histogram" + print(" Below is the final set of uncertainties for this histogram") rootHistoWithUncertainties.printUncertainties() @@ -1344,8 +1346,8 @@ def Print(self, msg, printHeader=True): fName = __file__.split("/")[-1] fName = fName.replace(".pyc", ".py") if printHeader: - print "=== ", fName - print "\t", msg + print("=== ", fName) + print("\t", msg) return def _checkConsistency(self, name, th1): @@ -1396,7 +1398,7 @@ def getRateStatUncertainty(self): if not self._flowBinsVisibleStatus: raise Exception("getRate(): The under/overflow bins might not be not empty! Did you forget to call makeFlowBinsVisible() before getRate()?") if len(self._treatShapesAsStat) > 0: - print "WARNING: some shapes are treated as statistical uncertainty, but they have not been implemented yet to getRateStatUncertainty()!" + print("WARNING: some shapes are treated as statistical uncertainty, but they have not been implemented yet to getRateStatUncertainty()!") mySum = 0.0 if isinstance(self._rootHisto, ROOT.TH2): raise Exception("getRateStatUncertainty() supported currently only for TH1!") @@ -1425,7 +1427,7 @@ def makeFlowBinsVisible(self): # return self._flowBinsVisibleStatus = True # Update systematics histograms first - for key, (hPlus, hMinus) in self._shapeUncertainties.iteritems(): + for key, (hPlus, hMinus) in self._shapeUncertainties.items(): histogramsExtras.makeFlowBinsVisible(hPlus) histogramsExtras.makeFlowBinsVisible(hMinus) # Update nominal histogram @@ -1443,7 +1445,7 @@ def treatBins(h): if self._rootHisto.GetBinError(i+1) < minimumStatUncertainty: self._rootHisto.SetBinError(i+1, minimumStatUncertainty) # Treat negative bins in variations - for key, (hPlus, hMinus) in self._shapeUncertainties.iteritems(): + for key, (hPlus, hMinus) in self._shapeUncertainties.items(): treatBins(hPlus) treatBins(hMinus) @@ -1540,7 +1542,7 @@ def addShapeUncertaintyRelative(self, name, th1Plus, th1Minus=None): hminus = aux.Clone(th1Plus) hminus.Scale(-1) - for bin in xrange(1, self._rootHisto.GetNbinsX()+1): + for bin in range(1, self._rootHisto.GetNbinsX()+1): myRate = self._rootHisto.GetBinContent(bin) hplus.SetBinContent(bin, myRate * hplus.GetBinContent(bin)) hminus.SetBinContent(bin, myRate * hminus.GetBinContent(bin)) @@ -1562,7 +1564,7 @@ def addNormalizationUncertaintyRelative(self, name, uncertaintyPlus, uncertainty hplus = aux.Clone(self._rootHisto) hplus.Reset() hminus = aux.Clone(hplus) - for bin in xrange(1, self._rootHisto.GetNbinsX()+1): + for bin in range(1, self._rootHisto.GetNbinsX()+1): myRate = self._rootHisto.GetBinContent(bin) hplus.SetBinContent(bin, myRate * uncertaintyPlus) if uncertaintyMinus == None: @@ -1674,9 +1676,9 @@ def statErrors(self, i): shapes = self._shapeUncertainties.values() #shapes.sort() - #print addStatistical, addSystematic, "\n ".join([x[0].GetName() for x in shapes]) + #print(addStatistical, addSystematic, "\n ".join([x[0].GetName() for x in shapes])) - for i in xrange(wrapper.begin(), wrapper.end()): + for i in range(wrapper.begin(), wrapper.end()): (xval, xlow, xhigh) = wrapper.xvalues(i) yval = wrapper.y(i) @@ -1711,11 +1713,11 @@ def printUncertainties(self): ''' Print associated systematic uncertainties ''' - print "Shape uncertainties (%d):" % len(self._shapeUncertainties) + print("Shape uncertainties (%d):" % len(self._shapeUncertainties)) keys = self._shapeUncertainties.keys() keys.sort() for key in keys: - print " %s" % key + print(" %s" % key) #### Below are methods for "better" implementation for some ROOT TH1 methods def integral(self): @@ -1795,7 +1797,7 @@ def getBinWidths(self): ''' if self._rootHisto is None: return None - return [self._rootHisto.GetBinWidth(i) for i in xrange(1, self._rootHisto.GetNbinsX()+1)] + return [self._rootHisto.GetBinWidth(i) for i in range(1, self._rootHisto.GetNbinsX()+1)] #### Below are methods for ROOT TH1 compatibility (only selected methods are implemented) def GetNbinsX(self): @@ -1964,12 +1966,12 @@ def ScaleVariationUncertainty(self, name, value): (plus, minus) = self._shapeUncertainties[name] plus.Scale(value) minus.Scale(value) - #for i in xrange(0, self._rootHisto.GetNbinsX()+2): + #for i in range(0, self._rootHisto.GetNbinsX()+2): #if abs(self._rootHisto.GetBinContent(i)) > 0.0: #oldValue = (plus.GetBinContent(i),self._rootHisto.GetBinContent(i)) #plus.SetBinContent(i, plus.GetBinContent(i)*value) #minus.SetBinContent(i, minus.GetBinContent(i)*value) - #print oldValue, plus.GetBinContent(i) + #print(oldValue, plus.GetBinContent(i)) self._shapeUncertainties[name] = (plus, minus) def Clone(self): @@ -1980,7 +1982,7 @@ def Clone(self): histograms, th1.SetDirectory(0) is called. ''' clone = RootHistoWithUncertainties(aux.Clone(self._rootHisto)) - for key, value in self._shapeUncertainties.iteritems(): + for key, value in self._shapeUncertainties.items(): (plus, minus) = (aux.Clone(value[0]), aux.Clone(value[1])) clone._shapeUncertainties[key] = (plus, minus) clone._treatShapesAsStat = set(self._treatShapesAsStat) @@ -2083,8 +2085,8 @@ def histoErrors(h): rows.append( align.format("Rate_syst_uncert_up" , graphErrors(self.getSystematicUncertaintyGraph(), True)) ) rows.append( align.format("Rate_syst_uncert_down", graphErrors(self.getSystematicUncertaintyGraph(), False)) ) - #print ",%s"%sUp - #print "rate_syst_uncert_down,%s\n"%sDown + #print(",%s"%sUp) + #print("rate_syst_uncert_down,%s\n"%sDown) # Print all rows in a loop to get the table rows.append(hLine) @@ -2812,7 +2814,7 @@ def __init__(self, name, tfiles, analysisName, self.info = None self.dataVersion = None def assertInfo(refInfo, newInfo, refFile, newFile, name): - for key, value in refInfo.iteritems(): + for key, value in refInfo.items(): valnew = info[key] if isinstance(value, basestring): if value == valnew: @@ -2916,7 +2918,7 @@ def addDirContentsToDict(tdirectory, dictionary): assertInfo(updateInfo, info, self.files[0], f, realDirName+"/configinfo") if "energy" in updateInfo: #raise Exception("You may not set 'energy' in analysis directory specific configinfo histogram. Please fix %s." % realName) - print "WARNING: 'energy' has been set in analysis directory specific configinfo histogram (%s), it will be ignored. Please fix your pseudomulticrab code." % (realName+"/configinfo") + print("WARNING: 'energy' has been set in analysis directory specific configinfo histogram (%s), it will be ignored. Please fix your pseudomulticrab code." % (realName+"/configinfo")) del updateInfo["energy"] self.info.update(updateInfo) @@ -2927,7 +2929,7 @@ def addDirContentsToDict(tdirectory, dictionary): # Update Nallevents to weighted one if "isPileupReweighted" in self.info and self.info["isPileupReweighted"]: - #print "%s: is pileup-reweighted, calling updateNAllEventsToPUWeighted()" % self.name + #print("%s: is pileup-reweighted, calling updateNAllEventsToPUWeighted()" % self.name) self.updateNAllEventsToPUWeighted() # Set cross section, if MC and we know the energy @@ -2935,7 +2937,7 @@ def addDirContentsToDict(tdirectory, dictionary): if "energy" in self.info: crosssection.setBackgroundCrossSectionForDataset(self, quietMode=(self._systematicVariation != None or self._optimizationMode != None)) else: - print "%s is MC but has no energy set in configInfo/configinfo, not setting its cross section automatically" % self.name + print("%s is MC but has no energy set in configInfo/configinfo, not setting its cross section automatically" % self.name) # For some reason clearing the in-memory representations of # the files increases the reading (object lookup?) performance @@ -2944,7 +2946,7 @@ def addDirContentsToDict(tdirectory, dictionary): for f in self.files: # Skip this step if on OS X (crashes) if _platform == "darwin": - #print "=== dataset.py: Skip the clearing the in-memory representations of the files in macOS (causes crash)" + #print("=== dataset.py: Skip the clearing the in-memory representations of the files in macOS (causes crash)") continue # else: # f.Clear() # uncommented by Santeri in order to avoid segmentation faults @@ -3053,7 +3055,7 @@ def getAvailableSystematicVariationSources(self): def getRootHisto(self, name, **kwargs): if hasattr(name, "draw"): if len(kwargs) > 0: - print >>sys.stderr, "WARNING: You gave keyword arguments to getDatasetRootHisto() together with object with draw() method. The keyword arguments are not passed to the draw() call. This may or may not be what you want." + print("WARNING: You gave keyword arguments to getDatasetRootHisto() together with object with draw() method. The keyword arguments are not passed to the draw() call. This may or may not be what you want.",file=sys.stderr) h = name.draw(self) realName = None else: @@ -3150,12 +3152,12 @@ def _readCounters(self): ctr = _histoToCounter(counter) self.nAllEventsUnweighted = ctr[0][1].value() # first counter, second element of the tuple, corresponds to ttree: skimCounterAll if _debugCounters: # Debug print - print "DEBUG: Unweighted counters, Dataset name: "+self.getName() + print("DEBUG: Unweighted counters, Dataset name: "+self.getName()) for i in range(counter.GetNbinsX()+1): if counter.GetXaxis().GetBinLabel(i) == "Base::AllEvents": allEventsBin = i - print "bin %d, label: %s, content: = %s"%(i, counter.GetXaxis().GetBinLabel(i), counter.GetBinContent(i)) - print "\n\n" + print("bin %d, label: %s, content: = %s"%(i, counter.GetXaxis().GetBinLabel(i), counter.GetBinContent(i))) + print("\n\n") # Normalization check (to spot e.g. PROOF problems), based on weighted counters (counter, realName) = self.getRootHisto(self.counterDir+"/weighted/counter") # weighted allEventsBin = None @@ -3168,9 +3170,9 @@ def _readCounters(self): nPUReEvts = counter.GetBinContent(allEventsBin+1) normalizationCheckStatus = False if _debugNAllEvents: # Debug print - print "DEBUG: self.nAllEventsUnweighted = "+str(self.nAllEventsUnweighted) + print("DEBUG: self.nAllEventsUnweighted = "+str(self.nAllEventsUnweighted)) # If reading unweighted counters fail - except HistogramNotFoundException, e: + except HistogramNotFoundException as e: if not self._weightedCounters: raise Exception("Could not find counter histogram, message: %s" % str(e)) self.nAllEventsUnweighted = -1 @@ -3188,17 +3190,17 @@ def _readCounters(self): (counter, realName) = self.getRootHisto(self.counterDir+"/counter") # weighted ctr = _histoToCounter(counter) self.nAllEventsWeighted = ctr[0][1].value() # first counter, second element of the tuple, corresponds to ttree: skimCounterAll - except HistogramNotFoundException, e: + except HistogramNotFoundException as e: raise Exception("Could not find counter histogram, message: %s" % str(e)) if _debugNAllEvents: # Debug print - print "DEBUG: self.nAllEventsWeighted = "+str(self.nAllEventsWeighted) + print("DEBUG: self.nAllEventsWeighted = "+str(self.nAllEventsWeighted)) # Set nAllEvents to unweighted value (corresponding to ttree: skimCounterAll) # The corresponding value in the weighted counter is 0, so we don't want to use that self.nAllEvents = self.nAllEventsUnweighted if _debugNAllEvents: # Debug print - print "DEBUG: self.nAllEvents = "+str(self.nAllEvents) + print("DEBUG: self.nAllEvents = "+str(self.nAllEvents)) def getName(self): return self.name @@ -3325,7 +3327,7 @@ def updateNAllEventsToPUWeighted(self, era=None, **kwargs): delta = (self.info["isPileupReweighted"] - self.nAllEvents) / self.nAllEvents ratio = ratio * self.info["isPileupReweighted"] / self.nAllEvents if _debugNAllEvents and abs(delta) > 0.00001: - print "dataset (%s): Updated NAllEvents to pileUpReweighted NAllEvents, change: %0.6f %%"%(self.getName(), delta*100.0) + print("dataset (%s): Updated NAllEvents to pileUpReweighted NAllEvents, change: %0.6f %%"%(self.getName(), delta*100.0)) if not "isTopPtReweighted" in self.info.keys(): raise Exception("Key 'isTopPtReweighted' missing in configinfo histogram!") if self.info["isTopPtReweighted"] > 0.0: @@ -3334,7 +3336,7 @@ def updateNAllEventsToPUWeighted(self, era=None, **kwargs): delta = (self.info["isTopPtReweighted"] - self.nAllEvents) / self.nAllEvents ratio = ratio * self.info["isTopPtReweighted"] / self.nAllEvents if _debugNAllEvents and abs(delta) > 0.00001: - print "dataset (%s): Updated NAllEvents to isTopPtReweighted NAllEvents, change: %0.6f %%"%(self.getName(), delta*100.0) + print(message,file=sys.stderr) self.nAllEvents = ratio * self.nAllEvents def getNAllEvents(self): @@ -3410,7 +3412,7 @@ def getDatasetRootHisto(self, name, modify=None, **kwargs): #h = None # if hasattr(name, "draw"): # if len(kwargs) > 0: - # print >>sys.stderr, "WARNING: You gave keyword arguments to getDatasetRootHisto() together with object with draw() method. The keyword arguments are not passed to the draw() call. This may or may not be what you want." + # print("WARNING: You gave keyword arguments to getDatasetRootHisto() together with object with draw() method. The keyword arguments are not passed to the draw() call. This may or may not be what you want.",file=sys.stderr) # h = name.draw(self) # else: (h, realName) = self.getRootHisto(name, **kwargs) @@ -4057,7 +4059,7 @@ def selectAndReorder(self, nameList): try: selected.append(self.datasetMap[name]) except KeyError: - print >> sys.stderr, "WARNING: Dataset selectAndReorder: dataset %s doesn't exist" % name + print("WARNING: Dataset selectAndReorder: dataset %s doesn't exist" % name,file=sys.stderr) self.datasets = selected self._populateMap() @@ -4099,7 +4101,7 @@ def rename(self, oldName, newName): # # \see rename() def renameMany(self, nameMap, silent=False): - for oldName, newName in nameMap.iteritems(): + for oldName, newName in nameMap.items(): if oldName == newName: continue @@ -4108,7 +4110,7 @@ def renameMany(self, nameMap, silent=False): try: self.datasetMap[oldName].setName(newName) - except KeyError, e: + except KeyError as e: if not silent: raise Exception("Trying to rename dataset '%s' to '%s', but '%s' doesn't exist!" % (oldName, newName, oldName)) self._populateMap() @@ -4162,7 +4164,7 @@ def mergeMany(self, mapping, *args, **kwargs): else: toMerge[newName] = [d.getName()] - for newName, nameList in toMerge.iteritems(): + for newName, nameList in toMerge.items(): self.merge(newName, nameList, *args, **kwargs) return @@ -4193,7 +4195,7 @@ def merge(self, newName, nameList, keepSources=False, addition=False, silent=Tru if allowMissingDatasets: if not silent: - print >> sys.stderr, message + print(message,file=sys.stderr) #Print(message, True) else: raise Exception(message) @@ -4201,7 +4203,7 @@ def merge(self, newName, nameList, keepSources=False, addition=False, silent=Tru elif len(selected) == 1 and not keepSources: if not silent: message = "Dataset merge, one dataset '" + selected[0].getName() + "' found from list '" + ", ".join(nameList)+"'. Renaming it to '%s'" % newName - print >> sys.stderr, message + print(message,file=sys.stderr) #Print(message, True) self.rename(selected[0].getName(), newName) return @@ -4256,7 +4258,7 @@ def loadLuminosities(self, fname="lumi.json"): ### Alexandros: Needs to be nested? # # For-loop: All Dataset-Lumi pairs in dictionary -# for name, value in data.iteritems(): +# for name, value in data.items(): # Print("%s has %s pb" % (name, value), False) # if self.hasDataset(name): # Print("%s, setting lumi to %s" % (name, value), False) @@ -4266,7 +4268,7 @@ def loadLuminosities(self, fname="lumi.json"): ### Alexandros: Needs to be nested? # For-loop: All Dataset-Lumi pairs in dictionary - for name, value in data.iteritems(): + for name, value in data.items(): Verbose("%s has %s pb" % (name, value), False) if self.hasDataset(name): Verbose("%s, setting lumi to %s" % (name, value), False) @@ -4278,10 +4280,10 @@ def loadLuminosities(self, fname="lumi.json"): #### fname = os.path.join(self.basedir, fname) #### #### if not os.path.exists(fname): -#### print >> sys.stderr, "WARNING: luminosity json file '%s' doesn't exist!" % fname +#### print("WARNING: luminosity json file '%s' doesn't exist!" % fname,file=sys.stderr) #### #### data = json.load(open(fname)) -#### for name, value in data.iteritems(): +#### for name, value in data.items(): #### if self.hasDataset(name): #### self.getDataset(name).setLuminosity(value) return @@ -4468,7 +4470,7 @@ def PrintCrossSections(self): myDatasets[d.getName()] = d.getCrossSection() else: pass - # print d.getLuminosity() + # print(d.getLuminosity()) index = 0 # For-loop: All keys in dataset-xsection map (sorted by descending xsection value) @@ -4526,7 +4528,7 @@ def formatInfo(self): ## Print dataset information. def printInfo(self): - print self.formatInfo() + print(self.formatInfo()) def formatDatasetTree(self): @@ -4538,7 +4540,7 @@ def formatDatasetTree(self): def printDatasetTree(self): - print self.formatDatasetTree() + print(self.formatDatasetTree()) ## Prints the parameterSet of some Dataset # @@ -4546,12 +4548,12 @@ def printDatasetTree(self): # from will not be given. def printSelections(self): namePSets = self.datasets[0].forEach(lambda d: (d.getName(), d.getParameterSet())) - print "ParameterSet for dataset", namePSets[0][0] - print namePSets[0][1] + print("ParameterSet for dataset", namePSets[0][0]) + print(namePSets[0][1]) def getSelections(self): namePSets = self.datasets[0].forEach(lambda d: (d.getName(), d.getParameterSet())) - #print "ParameterSet for dataset", namePSets[0][0] + #print("ParameterSet for dataset", namePSets[0][0]) return namePSets[0][1] ## \var datasets @@ -4620,7 +4622,7 @@ def __init__(self, name, filenames): # Get the data version (e.g. 80Xdata or 80Xmc) dv = aux.Get(rf, "configInfo/dataVersion") if dv == None: - print "Unable to find 'configInfo/dataVersion' from ROOT file '%s', I have no idea if this file is data, MC, or pseudo" % name + print("Unable to find 'configInfo/dataVersion' from ROOT file '%s', I have no idea if this file is data, MC, or pseudo" % name) continue if self._dataVersion is None: self._dataVersion = dv.GetTitle() @@ -4653,7 +4655,7 @@ def __init__(self, name, filenames): # pileup (up) pileup_up = aux.Get(rf, "configInfo/pileup_up") if pileup_up == None: - print "Unable to find 'configInfo/pileup_up' from ROOT file '%s'" % name + print("Unable to find 'configInfo/pileup_up' from ROOT file '%s'" % name) if self._pileup_up is None: if pileup_up != None: self._pileup_up = pileup_up @@ -4662,7 +4664,7 @@ def __init__(self, name, filenames): # pileup (down) pileup_down = aux.Get(rf, "configInfo/pileup_down") if pileup_down == None: - print "Unable to find 'configInfo/pileup_down' from ROOT file '%s'" % name + print("Unable to find 'configInfo/pileup_down' from ROOT file '%s'" % name) if self._pileup_down is None: if pileup_down != None: self._pileup_down = pileup_down @@ -4678,7 +4680,7 @@ def __init__(self, name, filenames): raise Exception("Error: The first bin of the counters histogram should be the all events bin!") self._nAllEvents += counters.GetBinContent(1) if self._nAllEvents == 0.0: - print "Warning (DatasetPrecursor): N(allEvents) = 0 !!!" + print("Warning (DatasetPrecursor): N(allEvents) = 0 !!!") # rf.Close() @@ -4968,7 +4970,7 @@ def isInEra(eras, precursor): dset = Dataset(precursor.getName(), precursor.getFiles(), **_args) else: dset = Dataset(precursor.getName(), precursor.getFiles(), availableSystematicVariationSources=self._systematicVariationSources, **_args) - except AnalysisNotFoundException, e: + except AnalysisNotFoundException as e: msg = str(e)+"\n" helpFound = False for arg, attr in [("analysisName", "getAnalyses"), @@ -5036,49 +5038,49 @@ def getSystematicVariationSources(self): return self._systematicVariationSources def printAnalyses(self): - print "Analyses (analysisName):" + print("Analyses (analysisName):") for a in self._analyses: - print " "+a + print(" "+a) print if len(self._searchModes) == 0: - print "No search modes" + print("No search modes") else: - print "Search modes (searchMode):" + print("Search modes (searchMode):") for s in self._searchModes: - print " "+s + print(" "+s) print if len(self._mcDataEras) == 0: - print "No data eras in MC" + print("No data eras in MC") else: - print "Data eras (in MC) (dataEra):" + print("Data eras (in MC) (dataEra):") for d in self._mcDataEras: - print " "+d + print(" "+d) print if len(self._dataDataEras) == 0: - print "No data eras in data" + print("No data eras in data") else: - print "Data eras (in data, the letters can be combined in almost any way) (dataEra):" + print("Data eras (in data, the letters can be combined in almost any way) (dataEra):") for d in self._dataDataEras: - print " "+d + print(" "+d) print if len(self._optimizationModes) == 0: - print "No optimization modes" + print("No optimization modes") else: - print "Optimization modes (optimizationMode):" + print("Optimization modes (optimizationMode):") for o in self._optimizationModes: - print " "+o + print(" "+o) print if len(self._systematicVariations) == 0: - print "No systematic variations" + print("No systematic variations") else: - print "Systematic variations (systematicVariation):" + print("Systematic variations (systematicVariation):") for s in self._systematicVariations: - print " "+s + print(" "+s) print ## Close the ROOT files @@ -5253,7 +5255,7 @@ def getSelectorDir(name_): selector.setPrintStatus(self.printStatus) directories = [directory] - for name, (selecName, selecArgs) in self.additionalSelectors.iteritems(): + for name, (selecName, selecArgs) in self.additionalSelectors.items(): directory = getSelectorDir(name).mkdir(datasetName) directory.cd() argsNamed = ROOT.TNamed("selectorArgs", str(selectorArgs)) @@ -5262,7 +5264,7 @@ def getSelectorDir(name_): selector.addSelector(name, getattr(ROOT, selecName)(*selectorArgs), directory) directories.append(directory) - print "Processing dataset", datasetName + print("Processing dataset", datasetName) # Setup cache useCache = True @@ -5287,7 +5289,7 @@ def getSelectorDir(name_): cpuTime = clockStop-clockStart realTime = timeStop-timeStart readMbytes = float(readBytesStop-readBytesStart)/1024/1024 - print "Real time %.2f, CPU time %.2f (%.1f %%), read %.2f MB (%d calls), read speed %.2f MB/s" % (realTime, cpuTime, cpuTime/realTime*100, readMbytes, readCallsStop-readCallsStart, readMbytes/realTime) + print("Real time %.2f, CPU time %.2f (%.1f %%), read %.2f MB (%d calls), read speed %.2f MB/s" % (realTime, cpuTime, cpuTime/realTime*100, readMbytes, readCallsStop-readCallsStart, readMbytes/realTime)) for d in directories: d.Write() @@ -5342,7 +5344,7 @@ def clone(self, **kwargs): return c def set(self, **kwargs): - for key, value in kwargs.iteritems(): + for key, value in kwargs.items(): if not hasattr(self, key): raise Exception("This SelectorArgs does not have property %s" % key) setattr(self, key, value) diff --git a/Plotter/python/corrections/JetToTauFR/tools/fakeFactors.py b/Plotter/python/corrections/JetToTauFR/tools/fakeFactors.py index 6ee30055d..47a26665b 100755 --- a/Plotter/python/corrections/JetToTauFR/tools/fakeFactors.py +++ b/Plotter/python/corrections/JetToTauFR/tools/fakeFactors.py @@ -59,8 +59,8 @@ def Print(msg, printHeader=True): ''' fName = __file__.split("/")[-1] if printHeader: - print "=== ", fName - print "\t", msg + print("=== ", fName) + print("\t", msg) return def setStyle(graph): @@ -121,16 +121,16 @@ def Verbose(self, msg, printHeader=True): def Print(self, msg, printHeader=False): if printHeader==True: - print "=== ", self._GetFName() - print "\t", msg + print("=== ", self._GetFName()) + print("\t", msg) else: - print "\t", msg + print("\t", msg) return def PrintFlushed(self, msg, printHeader=True): msg = "\r\t" + msg if printHeader: - print "=== ", self._GetFName() + print("=== ", self._GetFName()) sys.stdout.write(msg) sys.stdout.flush() sys.stdout.write("\033[K") #clear line @@ -398,7 +398,7 @@ def getTable(self, nDigits=5): table.append(hLine) table.append(header) table.append(hLine) - for i in xrange(len(self.mass_string)): + for i in range(len(self.mass_string)): mass = self.mass_string[i] observed = precision % (self.observed_string[i]) median = precision % (self.expectedMedian_string[i]) @@ -467,7 +467,7 @@ def saveAsLatexTable(self, unblindedStatus=False, nDigits=3, savePath=None, HToT s += "\\hline \n" # Get the limit values - for i in xrange(len(self.mass_string)): + for i in range(len(self.mass_string)): mass = self.mass_string[i] eMinus2 = float( precision % (self.expectedMinus2_string[i]) ) eMinus1 = float( precision % (self.expectedMinus1_string[i]) ) @@ -520,7 +520,7 @@ def GetPtRangeAsFloat(self, ptRange): for pt in ptRange: ptLow, ptAv, ptHigh = self.GetPtAsFloat(pt) - # print "%s, %s, %s" % (ptLow, ptAv, ptHigh) + # print("%s, %s, %s" % (ptLow, ptAv, ptHigh)) ptLowList.append(ptAv-ptLow) ptAvList.append(ptAv) ptHighList.append(ptHigh-ptAv) @@ -593,7 +593,7 @@ def divideGraph(num, denom): \return new TGraph as the ratio of the two TGraphs ''' gr = ROOT.TGraph(num) - for i in xrange(gr.GetN()): + for i in range(gr.GetN()): y = denom.GetY()[i] val = 0 if y != 0: @@ -611,7 +611,7 @@ def subtractGraph(minuend, subtrahend): \return new TGraph as the difference of the two TGraphs ''' gr = ROOT.TGraph(minuend) - for i in xrange(gr.GetN()): + for i in range(gr.GetN()): val = gr.GetY() - subtrahend.GetY()[i] gr.SetPoint(i, gr.GetX()[i], val) return diff --git a/Plotter/python/corrections/JetToTauFR/tools/histograms.py b/Plotter/python/corrections/JetToTauFR/tools/histograms.py index 593e2cf7f..a31c0c029 100755 --- a/Plotter/python/corrections/JetToTauFR/tools/histograms.py +++ b/Plotter/python/corrections/JetToTauFR/tools/histograms.py @@ -8,6 +8,7 @@ #================================================================================================ # Import modules #================================================================================================ +from past.builtins import basestring # for python2 compatibility import os, sys import glob import array @@ -129,7 +130,7 @@ def _setDefaults(self, name, **kwargs): \li \a y Y coordinate \li \a size Font size ''' - for x, value in kwargs.iteritems(): + for x, value in kwargs.items(): setattr(self, name+"_"+x, value) ## Modify the default position of "CMS Preliminary" text @@ -349,13 +350,13 @@ def Draw(self, options=""): def _printTextDeprecationWarning(oldFunctionName, newFunctionName="histograms.addStandardTexts()"): import traceback - print "#################### WARNING ####################" + print("#################### WARNING ####################") print - print "%s is deprecated, please use %s instead" % (oldFunctionName, newFunctionName) - print "Traceback (most recent call last):" + print("%s is deprecated, please use %s instead" % (oldFunctionName, newFunctionName)) + print("Traceback (most recent call last):") stack = traceback.extract_stack()[:-2] # take out calls to this and the caller - print "".join(traceback.format_list(stack)) - print "#################################################" + print("".join(traceback.format_list(stack))) + print("#################################################") return def addCmsPreliminaryText(x=None, y=None, text=None): @@ -643,7 +644,7 @@ def copy(self): # \li \a fillStyle Fill style # \li \a fillColor Fill color def setDefaults(self, **kwargs): - for x, value in kwargs.iteritems(): + for x, value in kwargs.items(): setattr(self, x, value) ## Move the default position/width/height @@ -809,7 +810,7 @@ def drawNonVisibleErrorsTH1(th1): pad_ymax = ROOT.gPad.GetUymax() ret = [] - for i in xrange(1, th1.GetNbinsX()+1): + for i in range(1, th1.GetNbinsX()+1): x = th1.GetBinCenter(i) y = th1.GetBinContent(i) ymin = y - th1.GetBinError(i) @@ -832,11 +833,11 @@ def drawNonVisibleErrorsTGraph(tgraph): # Get the Y-axis min/max from pad pad_ymin = ROOT.gPad.GetYmin() pad_ymax = ROOT.gPad.GetYmax() -# print "FOO", ROOT.gPad.GetUxmin(), ROOT.gPad.GetUxmax(),ROOT.gPad.GetUymin(), ROOT.gPad.GetUymax() - for i in xrange(0, tgraph.GetN()): +# print("FOO", ROOT.gPad.GetUxmin(), ROOT.gPad.GetUxmax(),ROOT.gPad.GetUymin(), ROOT.gPad.GetUymax()) + for i in range(0, tgraph.GetN()): ymin = tgraph.GetY()[i]-tgraph.GetErrorYhigh(i) ymax = tgraph.GetErrorYhigh(i)+tgraph.GetY()[i] - print ymin, tgraph.GetY()[i], ymax + print(ymin, tgraph.GetY()[i], ymax) raise Exception("This function is not finished because of lack of need") ## Helper function for lessThan/greaterThan argument handling @@ -884,7 +885,7 @@ def dist2pass(hdist, **kwargs): # Here we assume that all the bins in hdist have equal widths. If # this doesn't hold, the output must be TGraph bw = hdist.GetBinWidth(1); - for bin in xrange(2, hdist.GetNbinsX()+1): + for bin in range(2, hdist.GetNbinsX()+1): if abs(bw - hdist.GetBinWidth(bin))/bw > 0.01: raise Exception("Input histogram with variable bin width is not supported (yet). The bin width of bin1 was %f, and bin width of bin %d was %f" % (bw, bin, hdist.GetBinWidth(bin))) @@ -904,7 +905,7 @@ def dist2pass(hdist, **kwargs): if lessThan: passedCumulative = 0 passedCumulativeErrSq = 0 - for bin in xrange(0, hdist.GetNbinsX()+2): + for bin in range(0, hdist.GetNbinsX()+2): passedCumulative += hdist.GetBinContent(bin) err = hdist.GetBinError(bin) passedCumulativeErrSq += err*err @@ -914,7 +915,7 @@ def dist2pass(hdist, **kwargs): else: passedCumulative = 0 passedCumulativeErrSq = 0 - for bin in xrange(hdist.GetNbinsX()+1, -1, -1): + for bin in range(hdist.GetNbinsX()+1, -1, -1): passedCumulative += hdist.GetBinContent(bin) err = hdist.GetBinError(bin) passedCumulativeErrSq += err*err @@ -929,11 +930,11 @@ def dist2pass(hdist, **kwargs): # \param th1 TH1 object # \param function Function taking a number as an input, and returning a number def th1ApplyBin(th1, function): - for bin in xrange(0, th1.GetNbinsX()+2): + for bin in range(0, th1.GetNbinsX()+2): th1.SetBinContent(bin, function(th1.GetBinContent(bin))) def th1ApplyBinError(th1, function): - for bin in xrange(0, th1.GetNbinsX()+2): + for bin in range(0, th1.GetNbinsX()+2): th1.SetBinError(bin, function(th1.GetBinError(bin))) ## Convert TH1 distribution to TH1 of efficiency as a function of cut value @@ -1150,7 +1151,7 @@ def __init__(self, histoManager, name, canvasOpts={}, **kwargs): opts["nbinsx"] = int((opts["xmax"]-opts["xmin"])/binWidth +0.5) except: # Added to allow for use with TH2 (for some reason 'hasBinLabels' does not get the instancec right - # print type(rootHisto) + # print(type(rootHisto)) binWidth = None opts["nbinsx"] = None @@ -1162,11 +1163,11 @@ def __init__(self, histoManager, name, canvasOpts={}, **kwargs): if hasBinLabels: try: firstBin = rootHisto.FindFixBin(opts["xmin"]) - for i in xrange(0, opts["nbinsx"]): + for i in range(0, opts["nbinsx"]): self.frame.GetXaxis().SetBinLabel(i+1, rootHisto.GetXaxis().GetBinLabel(firstBin+i)) except: # Added to allow for use with TH2 (for some reason 'hasBinLabels' does not get the instancec right - # print type(rootHisto) + # print(type(rootHisto)) pass return @@ -1349,7 +1350,7 @@ def getYmax(self): # Copy the bin labels if hasBinLabels: firstBin = rootHisto.FindFixBin(opts1["xmin"]) - for i in xrange(0, opts1["nbinsx"]): + for i in range(0, opts1["nbinsx"]): self.frame.GetXaxis().SetBinLabel(i+1, rootHisto.GetXaxis().GetBinLabel(firstBin+i)) ## \var frame1 @@ -1575,7 +1576,7 @@ def _addToLegendHisto(self, legend): return h = self.getRootHisto() if h is None: - print >>sys.stderr, "WARNING: Trying to add Histo %s to the legend, but rootHisto is None" % self.getName() + print("WARNING: Trying to add Histo %s to the legend, but rootHisto is None" % self.getName(),file=sys.stderr) return cloned = False @@ -1591,7 +1592,7 @@ def _addToLegendHisto(self, legend): h.SetFillStyle(3345) else: info = inspect.getframeinfo(inspect.currentframe()) - print >>sys.stderr, 'WARNING: encountered fill styles %d and %d for stat and syst uncertainties, and there is no support yet for "combining" them for stat. Consider adding your case to %s near line %d' % (fillStyles[0], fillStyles[1], info.filename, info.lineno) + print('WARNING: encountered fill styles %d and %d for stat and syst uncertainties, and there is no support yet for "combining" them for stat. Consider adding your case to %s near line %d' % (fillStyles[0], fillStyles[1], info.filename, info.lineno),file=sys.stderr) # Keep reference to avoid segfault self.rootHistoForLegend = addToLegend(legend, h, self.legendLabel, self.legendStyle, canModify=cloned) @@ -1626,7 +1627,7 @@ def call(self, func): def draw(self, opt): h = self.getRootHisto() if h is None: - print >>sys.stderr, "WARNING: Trying to draw Histo %s, but rootHisto is None" % self.getName() + print("WARNING: Trying to draw Histo %s, but rootHisto is None" % self.getName(),file=sys.stderr) return if self._uncertaintyDrawStyle is not None: unc = self.getSystematicUncertaintyGraph() @@ -1878,7 +1879,7 @@ def setRootGraph(self, rootGraph): self.setRootHisto(rootGraph) def _values(self, values, func): - return [func(values[i], i) for i in xrange(0, self.getRootGraph().GetN())] + return [func(values[i], i) for i in range(0, self.getRootGraph().GetN())] def getXmin(self): if isinstance(self.getRootGraph(), ROOT.TGraphErrors) or isinstance(self.getRootGraph(), ROOT.TGraphAsymmErrors): @@ -1986,7 +1987,7 @@ def __init__(self, rootEfficiency, name, legendStyle="l", drawStyle="L", **kwarg def _values(self, function): ret = [] - for bin in xrange(1, self.getRootPassedHisto().GetNbinsX()+1): + for bin in range(1, self.getRootPassedHisto().GetNbinsX()+1): ret.append(function(self.getRootEfficiency(), bin)) return ret @@ -2199,7 +2200,7 @@ def forHisto(self, name, func): try: self.nameHistoMap[name].call(func) except KeyError: - print >> sys.stderr, "WARNING: Tried to call a function for histogram '%s', which doesn't exist." % name + print("WARNING: Tried to call a function for histogram '%s', which doesn't exist." % name,file=sys.stderr) ## Call each MC histograms.Histo with a function. # @@ -2255,11 +2256,11 @@ def getHistos(self): # # \param nameMap Dictionary with name->label mappings def setHistoLegendLabelMany(self, nameMap): - for name, label in nameMap.iteritems(): + for name, label in nameMap.items(): try: self.nameHistoMap[name].setLegendLabel(label) except KeyError: - print >> sys.stderr, "WARNING: Tried to set legend label for histogram '%s', which doesn't exist." % name + print("WARNING: Tried to set legend label for histogram '%s', which doesn't exist." % name,file=sys.stderr) ## Set the legend style for a given histogram. # @@ -2269,7 +2270,7 @@ def setHistoLegendStyle(self, name, style): try: self.nameHistoMap[name].setLegendStyle(style) except KeyError: - print >> sys.stderr, "WARNING: Tried to set legend style for histogram '%s', which doesn't exist." % name + print("WARNING: Tried to set legend style for histogram '%s', which doesn't exist." % name,file=sys.stderr) ## Set the legend style for all histograms. # @@ -2286,7 +2287,7 @@ def setHistoDrawStyle(self, name, style): try: self.nameHistoMap[name].setDrawStyle(style) except KeyError: - print >> sys.stderr, "WARNING: Tried to set draw style for histogram '%s', which doesn't exist." % name + print("WARNING: Tried to set draw style for histogram '%s', which doesn't exist." % name,file=sys.stderr) ## Set the histogram drawing style for all histograms. # @@ -2355,13 +2356,13 @@ def stackHistograms(self, newName, nameList): def addMCUncertainty(self, style, name="MCuncertainty", legendLabel="Sim. stat. unc.", uncertaintyLegendLabel=None, nameList=None): mcHistos = filter(lambda x: x.isMC(), self.drawList) if len(mcHistos) == 0: - print >> sys.stderr, "WARNING: Tried to create MC uncertainty histogram, but there are not MC histograms!" + print("WARNING: Tried to create MC uncertainty histogram, but there are not MC histograms!",file=sys.stderr) return if nameList != None: mcHistos = filter(lambda x: x.getName() in nameList, mcHistos) if len(mcHistos) == 0: - print >>sys.stderr, "WARNING: No MC histograms to use for uncertainty band" + print("WARNING: No MC histograms to use for uncertainty band",file=sys.stderr) return hse = HistoTotalUncertainty(mcHistos, name) diff --git a/Plotter/python/corrections/JetToTauFR/tools/jsonWriter.py b/Plotter/python/corrections/JetToTauFR/tools/jsonWriter.py index de768c082..6809bc62d 100755 --- a/Plotter/python/corrections/JetToTauFR/tools/jsonWriter.py +++ b/Plotter/python/corrections/JetToTauFR/tools/jsonWriter.py @@ -36,10 +36,10 @@ def __init__(self, saveDir="", verbose=False): def Print(self, msg, printHeader=False): fName = __file__.split("/")[-1] if printHeader==True: - print "=== ", fName - print "\t", msg + print("=== ", fName) + print("\t", msg) else: - print "\t", msg + print("\t", msg) return def Verbose(self, msg, printHeader=True, verbose=False): @@ -116,7 +116,7 @@ def writeAllGraphs(self): x = self.graphs[key].GetX() y = self.graphs[key].GetY() if 0: - print "\t x = %s, y = %s" % (x[i], y[i]) + print("\t x = %s, y = %s" % (x[i], y[i])) comma = "," if i == self.graphs[key].GetN() - 1: comma = "" diff --git a/Plotter/python/corrections/JetToTauFR/tools/plots.py b/Plotter/python/corrections/JetToTauFR/tools/plots.py index 25d245643..3fdbd2167 100755 --- a/Plotter/python/corrections/JetToTauFR/tools/plots.py +++ b/Plotter/python/corrections/JetToTauFR/tools/plots.py @@ -15,6 +15,7 @@ #================================================================================================ # Import Modules #================================================================================================ +from past.builtins import basestring # for python2 compatibility import sys import array import math @@ -220,7 +221,7 @@ def _createRatioErrorPropagation(histo1, histo2, ytitle, returnHisto=False): ratio.SetDirectory(0) ratio.Divide(histo2) if histograms.uncertaintyMode.equal(histograms.Uncertainty.SystOnly): - for i in xrange(0, ratio.GetNbinsX()+2): + for i in range(0, ratio.GetNbinsX()+2): ratio.SetBinError(i, 0) _plotStyles["Ratio"].apply(ratio) @@ -237,7 +238,7 @@ def _createRatioErrorPropagation(histo1, histo2, ytitle, returnHisto=False): xvalues = [] yvalues = [] yerrs = [] - for i in xrange(0, histo1.GetN()): + for i in range(0, histo1.GetN()): yval = histo2.GetY()[i] if yval == 0: continue @@ -276,7 +277,7 @@ def _createRatioErrorPropagation(histo1, histo2, ytitle, returnHisto=False): unc1 = histo1.getSystematicUncertaintyGraph(addStatistical=addStat) unc2 = histo2.getSystematicUncertaintyGraph(addStatistical=addStat) - for i in xrange(0, unc1.GetN()): + for i in range(0, unc1.GetN()): yval1 = unc1.GetY()[i] yval2 = unc2.GetY()[i] if yval2 == 0.0: @@ -370,7 +371,7 @@ def _createRatioBinomial(histo1, histo2, ytitle): ''' if isinstance(histo1, ROOT.TH1) and isinstance(histo2, ROOT.TH1): if histograms.uncertaintyNode != histograms.Uncertainty.StatOnly: - print >>sys.stderr, "Warning: uncertainty mode is not 'StatOnly' (but %s). Nevertheless, the binomial uncertainty is calculated incorporating the uncertainty from the number of events in the input histograms" % (histograms.uncertaintyMode.getName()) + print("Warning: uncertainty mode is not 'StatOnly' (but %s). Nevertheless, the binomial uncertainty is calculated incorporating the uncertainty from the number of events in the input histograms" % (histograms.uncertaintyMode.getName()),file=sys.stderr) eff = ROOT.TGraphAsymmErrors(rootHisto1, rootHisto2) styles.getDataStyle().apply(eff) @@ -529,7 +530,7 @@ def getRatio(self): yerrhigh = [] yerrlow = [] # statistical uncertainty (stat. unc.), normally represented by shaded gray area in ratio pad - for bin in xrange(h2.begin(), h2.end()): # important to use h2 because of TGraph logic + for bin in range(h2.begin(), h2.end()): # important to use h2 because of TGraph logic (scale, ylow, yhigh) = h2.yvalues(bin) (xval, xlow, xhigh) = h2.xvalues(bin) ratioWrapped.divide(bin, scale, xval) @@ -582,7 +583,7 @@ def getRatio(self): ratioSyst1 = histo1.getSystematicUncertaintyGraph(addStatistical=histograms.uncertaintyMode.addStatToSyst()) ratioSyst2 = histo2.getSystematicUncertaintyGraph(addStatistical=histograms.uncertaintyMode.addStatToSyst()) removes = [] - for i in xrange(0, ratioSyst2.GetN()): + for i in range(0, ratioSyst2.GetN()): yval = ratioSyst2.GetY()[i] if yval == 0.0: removes.append(i) @@ -595,7 +596,7 @@ def getRatio(self): ratioSyst2.SetPoint(i, ratioSyst2.GetX()[i], 1) ratioSyst2.SetPointEYhigh(i, ratioSyst2.GetErrorYhigh(i)/yval) ratioSyst2.SetPointEYlow(i, ratioSyst2.GetErrorYlow(i)/yval) -# print i, ratioSyst2.GetX()[i], ratioSyst2.GetErrorXlow(i), ratioSyst2.GetErrorXhigh(i), yval, ratioSyst2.GetY()[i], ratioSyst2.GetErrorYhigh(i), ratioSyst2.GetErrorYlow(i) +# print(i, ratioSyst2.GetX()[i], ratioSyst2.GetErrorXlow(i), ratioSyst2.GetErrorXhigh(i), yval, ratioSyst2.GetY()[i], ratioSyst2.GetErrorYhigh(i), ratioSyst2.GetErrorYlow(i)) removes.reverse() for i in removes: ratioSyst1.RemovePoint(i) @@ -1339,7 +1340,7 @@ def _createFrameRatio(self, filename, numerator, denominator, ytitle, invertRati if ratioIsBinomial: if ratioType is not None: raise Exception("You should not set (deprecated) ratioIsBinomial=True, and give ratioType (%s)." % ratioType) - print "WARNING: ratioIsBinomial is deprepcated, please yse ratioType='binomial' instead" + print("WARNING: ratioIsBinomial is deprepcated, please yse ratioType='binomial' instead") ratioType = "binomial" ratioHistos = _createRatioHistos(num, denom, ytitle, ratioType=ratioType, ratioErrorOptions=ratioErrorOptions) @@ -1393,7 +1394,7 @@ def _createFrameRatioMany(self, filename, numerators, denominator, invertRatio=F if ratioIsBinomial: if ratioType is not None: raise Exception("You should not set (deprecated) ratioIsBinomial=True, and give ratioType (%s)." % ratioType) - print "WARNING: ratioIsBinomial is deprepcated, please yse ratioType='binomial' instead" + print("WARNING: ratioIsBinomial is deprepcated, please yse ratioType='binomial' instead") ratioType = "binomial" self.ratioHistoMgr.removeAllHistos() @@ -1766,7 +1767,7 @@ def __init__(self, datasetMgr, name, normalizeToLumi=None, **kwargs): # \param kwargs Keyword arguments, forwarded to PlotSameBase.createFrame() or PlotRatioBase._createFrameRatio() def createFrame(self, filename, createRatio=False, **kwargs): if createRatio and not self.histoMgr.hasHisto("Data"): - print >> sys.stderr, "Warning: Trying to create data/MC ratio, but there is no 'Data' histogram." + print("Warning: Trying to create data/MC ratio, but there is no 'Data' histogram.",file=sys.stderr) createRatio = False if not createRatio: @@ -1791,7 +1792,7 @@ def createFrame(self, filename, createRatio=False, **kwargs): # \param filename Name for TCanvas (becomes the file name) # \param kwargs Keyword arguments, forwarded to createFrame() def createFrameFraction(self, filename, **kwargs): - print "Please move to use createFrame(..., createRatio=True) instead of createFrameFraction()" + print("Please move to use createFrame(..., createRatio=True) instead of createFrameFraction()") self.createFrame(filename, createRatio=True, **kwargs) ## Add cut box and/or line @@ -2277,7 +2278,7 @@ def __init__(self, # # \param kwargs Keyword arguments (same arguments as for __init__()) def setDefaults(self, **kwargs): - for name, value in kwargs.iteritems(): + for name, value in kwargs.items(): if not hasattr(self, name+"Default"): raise Exception("No default value for '%s'"%name) setattr(self, name+"Default", value) @@ -2383,9 +2384,9 @@ def rebin(self, p, name, **kwargs): if rebinX is not None and rebinToWidthX is not None: raise Exception("Only one of 'rebinX' and 'rebinToWidthX' may be given as an argument.") if rebinX is not None: - print "Plot '%s', argument 'rebinX=%s' overrides the default 'rebinToWidthX=%s'" % (name, str(rebinX), str(self.rebinToWidthXDefault)) + print("Plot '%s', argument 'rebinX=%s' overrides the default 'rebinToWidthX=%s'" % (name, str(rebinX), str(self.rebinToWidthXDefault))) if rebinToWidthX is not None: - print "Plot '%s', argument 'rebinToWidthX=%s' overrides the default 'rebinX=%s'" % (name, str(rebinToWidthX), str(self.rebinXDefault)) + print("Plot '%s', argument 'rebinToWidthX=%s' overrides the default 'rebinX=%s'" % (name, str(rebinToWidthX), str(self.rebinXDefault))) if rebinY is not None and rebinToWidthY is not None: if "rebinY" in kwargs: rebinToWidthY = None @@ -2395,9 +2396,9 @@ def rebin(self, p, name, **kwargs): if rebinY is not None and rebinToWidthY is not None: raise Exception("Only one of 'rebinY' and 'rebinToWidthY' may be given as an argument.") if rebinY is not None: - print "Plot '%s', argument 'rebinY=%s' overrides the default 'rebinToWidthY=%s'" % (name, str(rebinY), str(self.rebinToWidthYDefault)) + print("Plot '%s', argument 'rebinY=%s' overrides the default 'rebinToWidthY=%s'" % (name, str(rebinY), str(self.rebinToWidthYDefault))) if rebinToWidthY is not None: - print "Plot '%s', argument 'rebinToWidthY=%s' overrides the default 'rebinY=%s'" % (name, str(rebinToWidthY), str(self.rebinYDefault)) + print("Plot '%s', argument 'rebinToWidthY=%s' overrides the default 'rebinY=%s'" % (name, str(rebinToWidthY), str(self.rebinYDefault))) rebinFunction = None @@ -2408,7 +2409,7 @@ def rebin(self, p, name, **kwargs): def rebinList(h): rhwu = h.getRootHistoWithUncertainties() if hasattr(rhwu.getRootHisto(), "Rebin2D"): - print >>sys.stderr, "WARNING: Plot '%s', trying to rebin TH2 histogram '%s' with nonequal bin sizes" % (name, h.getName()) + print("WARNING: Plot '%s', trying to rebin TH2 histogram '%s' with nonequal bin sizes" % (name, h.getName()),file=sys.stderr) return rhwu.Rebin(n, rhwu.GetName(), array.array("d", rebinX)) @@ -2426,7 +2427,7 @@ def rebinToWidthTH1(h): # Check that the number of bins is integer diff = abs(intbins - nbins) if diff > 1e-3: - print >>sys.stderr, "WARNING: Trying to rebin histogram '%s' of plot '%s' for bin width %g, the X axis minimum is %g, maximum %g => number of bins would be %g, which is not integer (diff is %g)" % (h.getName(), name, rebinToWidthX, xmin, xmax, nbins, diff) + print("WARNING: Trying to rebin histogram '%s' of plot '%s' for bin width %g, the X axis minimum is %g, maximum %g => number of bins would be %g, which is not integer (diff is %g)" % (h.getName(), name, rebinToWidthX, xmin, xmax, nbins, diff),file=sys.stderr) return nbins = intbins @@ -2461,7 +2462,7 @@ def rebinToWidth(h): # Check that the requested binning makes sense remainderX = th.GetNbinsX() % intbinsx if remainderX != 0: - print >>sys.stderr, "WARNING: Trying to rebin histogram '%s' of plot '%s' for X bin width %g, the X axis minimum is %g, maximum %g => number of bins would be %g, which is not divisor of the number of bins %d, remainder is %d" % (h.getName(), name, rebinToWidthX, xmin, xmax, nbinsx, th.GetNbinsX(), remainderX) + print("WARNING: Trying to rebin histogram '%s' of plot '%s' for X bin width %g, the X axis minimum is %g, maximum %g => number of bins would be %g, which is not divisor of the number of bins %d, remainder is %d" % (h.getName(), name, rebinToWidthX, xmin, xmax, nbinsx, th.GetNbinsX(), remainderX),file=sys.stderr) return rex = th.GetNbinsX()/intbinsx if rebinToWidthY is not None: @@ -2473,7 +2474,7 @@ def rebinToWidth(h): # Check that the requested binning makes sense remainderY = th.GetNbinsY() % intbinsy if remainderY != 0: - print >>sys.stderr, "WARNING: Trying to rebin histogram '%s' of plot '%s' for Y bin width %g, the Y axis minimum is %g, maximum %g => number of bins would be %g, which is not divisor of the number of bins %d, remainder is %d" % (h.getName(), name, rebinToWidthY, ymin, ymax, nbinsy, th.GetNbinsY(), remainderY) + print("WARNING: Trying to rebin histogram '%s' of plot '%s' for Y bin width %g, the Y axis minimum is %g, maximum %g => number of bins would be %g, which is not divisor of the number of bins %d, remainder is %d" % (h.getName(), name, rebinToWidthY, ymin, ymax, nbinsy, th.GetNbinsY(), remainderY),file=sys.stderr) return rey = th.GetNbinsY()/intbinsy @@ -2492,7 +2493,7 @@ def rebinToWidth(h): msg = "=== plots.py\n\tWARNING! Tried to \"DivideByBinWidth\" but failed." msg += "\n\tLikely reason is that one of the RooHistoWithUncertainties is a THGraphAsymmErrors instead of TH1." msg += "\n\tThis is a workaround to that problem. Ugly but ~works" - #print msg + #print(msg) types = ["TH1F", "TGraphAsymmErrors", "THStack"] p.histoMgr.forEachHisto(lambda h: h.getRootHistoWithUncertainties().Scale(1, "width") if type(h.getRootHisto()).__name__ in ["TH1F", "THStack"] else True) @@ -2509,15 +2510,15 @@ def rebinToWidth(h): raise Exception("Unsupported type(rh) = %s. EXIT" % (str(type(rh))), True) if rh_type == "TGraphAsymmErrors": - for i in xrange(0, rh.GetN()): # start from 0 bin, not 1! + for i in range(0, rh.GetN()): # start from 0 bin, not 1! # Get bin-width and divide all values # https://root-forum.cern.ch/t/tgraph-and-scale/6255/3 # https://github.com/root-project/root/pull/8143/files dx = rh.GetErrorX(i)*2 # same as: rh.GetEXlow()[i]*2 # Debugging if 0: - #print "name = %s, type = %s" % (rh_name, rh_type) - print "bin#%d: x = %s +/- %s, y = %s + %s - %s" % (i, rh.GetX()[i], dx, rh.GetY()[i], rh.GetEYlow()[i], rh.GetEYhigh()[i]) + #print("name = %s, type = %s" % (rh_name, rh_type)) + print("bin#%d: x = %s +/- %s, y = %s + %s - %s" % (i, rh.GetX()[i], dx, rh.GetY()[i], rh.GetEYlow()[i], rh.GetEYhigh()[i])) # Do the division by the bin width rh.GetY()[i] *= 1/dx diff --git a/Plotter/python/corrections/JetToTauFR/tools/styles.py b/Plotter/python/corrections/JetToTauFR/tools/styles.py index 89550bf37..f2d35f373 100755 --- a/Plotter/python/corrections/JetToTauFR/tools/styles.py +++ b/Plotter/python/corrections/JetToTauFR/tools/styles.py @@ -731,7 +731,7 @@ def getABCDStyle(region): elif region == "CR4" or region == "CRfour": return FakeBStyle6 else: - print "Invalid region \"%s\". Returning qcd style" % (region) + print("Invalid region \"%s\". Returning qcd style" % (region)) return qcdStyle def getBaselineStyle(): @@ -813,7 +813,7 @@ def getSignalStyleHToTB_M(myMass): elif mass == "10000": return signalStyleHToTB3000 else: - print "Invalid mass point \"%s\". Returning default style" % (mass) + print("Invalid mass point \"%s\". Returning default style" % (mass)) return signalStyleHToTB500 def getErrorStyle(): From 50da437b7af5079eadaff2ca70104dc7f68dd41b Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Sun, 28 May 2023 17:40:49 +0200 Subject: [PATCH 17/55] make python3 compatible (Fitter) --- Fitter/python/models/TagAndProbeModel.py | 10 ++++----- Fitter/python/plot/correlation.py | 26 ++++++++++++------------ Fitter/python/plot/datacard.py | 12 +++++------ Fitter/python/plot/postfit.py | 2 +- 4 files changed, 25 insertions(+), 25 deletions(-) diff --git a/Fitter/python/models/TagAndProbeModel.py b/Fitter/python/models/TagAndProbeModel.py index f96bba184..256a43136 100644 --- a/Fitter/python/models/TagAndProbeModel.py +++ b/Fitter/python/models/TagAndProbeModel.py @@ -64,11 +64,11 @@ def doParametersOfInterest(self): elif self.passbin in bin: exp_pass += self.DC.exp[bin][param] if self.verbose>=3: - print ">>> Expected number of %s events in %s bin: %9.1f"%(param,bin,self.DC.exp[bin][param]) + print(">>> Expected number of %s events in %s bin: %9.1f"%(param,bin,self.DC.exp[bin][param])) if self.verbose>=2: - print ">>> Expected number of signal events in pass region: %9.1f"%(exp_pass) - print ">>> Expected number of signal events in fail region: %9.1f"%(exp_fail) - print ">>> SF_fail = (%.2f+(1.-SF)*%.2f)/%.2f"%(exp_fail,exp_pass,exp_fail) + print(">>> Expected number of signal events in pass region: %9.1f"%(exp_pass)) + print(">>> Expected number of signal events in fail region: %9.1f"%(exp_fail)) + print(">>> SF_fail = (%.2f+(1.-SF)*%.2f)/%.2f"%(exp_fail,exp_pass,exp_fail)) assert exp_fail, "Fail region has no signal events!" # DEFINE PARAMETERS @@ -88,7 +88,7 @@ def getYieldScale(self,bin,process): elif self.passbin in bin: scale = 'SF' if self.verbose>=1: - print ">>> Scaling %3s in %s bin by '%s'"%(process,bin,scale) + print(">>> Scaling %3s in %s bin by '%s'"%(process,bin,scale)) return scale return 1 diff --git a/Fitter/python/plot/correlation.py b/Fitter/python/plot/correlation.py index 04f60219d..36b6256ad 100644 --- a/Fitter/python/plot/correlation.py +++ b/Fitter/python/plot/correlation.py @@ -22,7 +22,7 @@ def plotCorrelationHist(fname,**kwargs): """Draw correlation of nuisance parameters from FitDiagnostics output.""" - print green("\n>>> plotCorrelationHist") + print(green("\n>>> plotCorrelationHist")) pois = kwargs.get('poi', "r" ) filters = kwargs.get('filter', [ ] ) nbins = kwargs.get('nbins', 40 ) @@ -56,7 +56,7 @@ def plotCorrelationHist(fname,**kwargs): cuts = { c: 0 for c in [0.10,0.25,0.5,0.75] } pcut = 0.15 # for printing for fitname, fittitle in fits: - print ">>> File %s:%s"%(fname,fitname) + print(">>> File %s:%s"%(fname,fitname)) fit = file.Get(fitname) # fit results if not fit: warning(">>> Could not get %r!"%(fit)) @@ -68,7 +68,7 @@ def plotCorrelationHist(fname,**kwargs): rpars = [ ] # list of POIs for correlation computation for poi in pois: npars_ = 0 - print ">>> Looking for %r..."%(poi) + print(">>> Looking for %r..."%(poi)) if any(c in poi for c in '.*+()[]'): # assume regexp exp_poi = re.compile(poi) for i in range(npars): # find all matches to POI regexp @@ -77,11 +77,11 @@ def plotCorrelationHist(fname,**kwargs): if not exp_poi.search(rpar.GetName()): continue # no match rpars.append(rpar) npars_ += 1 - print ">>> Found %d parameters for %r"%(npars_,poi) + print(">>> Found %d parameters for %r"%(npars_,poi)) else: # find by exact name rpar = fpars.find(poi) rpars.append(rpar) - print ">>> Number of POIs for correlation: %d"%(len(rpars)) + print(">>> Number of POIs for correlation: %d"%(len(rpars))) #corrs = fit.correlation(rpar) #corrs = fit.correlation(poi) for i in range(npars): @@ -90,7 +90,7 @@ def plotCorrelationHist(fname,**kwargs): npar = fpars.at(i) if filters and not any(f.search(npar.GetName()) for f in filters): continue ridx = rpars.index(npar)+1 if npar in rpars else 0 # avoid double counting multiple POIs (assume same order) - #print len(rpars[ridx:]), ridx, npar.GetName() + #print(len(rpars[ridx:]), ridx, npar.GetName()) for rpar in rpars[ridx:]: # get correlation to each POI #if npar==rpar: continue # redundant with ridx corr = fit.correlation(rpar,npar) @@ -98,9 +98,9 @@ def plotCorrelationHist(fname,**kwargs): if abs(corr)>cut: cuts[cut] += 1 if abs(corr)>pcut: # print - print ">>> %+6.3f > %.2f: %s to %s"%(corr,pcut,npar.GetName(),rpar.GetName()) + print(">>> %+6.3f > %.2f: %s to %s"%(corr,pcut,npar.GetName(),rpar.GetName())) #corr = corrs.at(i) - #print i, corr, rpar, npar + #print(i, corr, rpar, npar) hist.Fill(corr) ymargin = 1.15 @@ -207,7 +207,7 @@ def plotCorrelationHist(fname,**kwargs): def drawCorrelationMatrix(hist,pname,**kwargs): """Help function to plot correlation matrix.""" - print ">>> drawCorrelationMatrix(%r)"%(pname) + print(">>> drawCorrelationMatrix(%r)"%(pname)) zmin = kwargs.get('zmin', -100.0 ) zmax = kwargs.get('zmax', 100.0 ) title = kwargs.get('title', "" ) @@ -240,8 +240,8 @@ def drawCorrelationMatrix(hist,pname,**kwargs): xlsize = min(0.050*scaleF,0.03) # x label size ylsize = min(0.056*scaleF,0.03) # x label size dsize = min(0.15*scaleF,0.03) # divider size - print ">>> canvas %d x %d"%(canvasW,canvasH) - print ">>> scaleF=%.5g, scaleH=%.5g, scaleW=%.5g"%(scaleF,scaleH,scaleW) + print(">>> canvas %d x %d"%(canvasW,canvasH)) + print(">>> scaleF=%.5g, scaleH=%.5g, scaleW=%.5g"%(scaleF,scaleH,scaleW)) canvas = TCanvas('canvas','canvas',100,100,canvasW,canvasH) canvas.SetFillColor(0) @@ -291,8 +291,8 @@ def drawCorrelationMatrix(hist,pname,**kwargs): ybody = 1.-tmargin-bmargin dx = (4.9*xlsize+0.6*dsize)*nxbins/xbody dy = (4.2*ylsize+0.7*dsize)*nybins/ybody - #print ">>> dsize=%s, xlsize=%s, nxbins=%s, xbody=%s => dx=%s"%(dsize,xlsize,nxbins,xbody,dx) - #print ">>> dsize=%s, ylsize=%s, nybins=%s, ybody=%s => dy=%s"%(dsize,ylsize,nybins,ybody,dy) + #print(">>> dsize=%s, xlsize=%s, nxbins=%s, xbody=%s => dx=%s"%(dsize,xlsize,nxbins,xbody,dx)) + #print(">>> dsize=%s, ylsize=%s, nybins=%s, ybody=%s => dy=%s"%(dsize,ylsize,nybins,ybody,dy)) if xcats: latex = TLatex() latex.SetTextSize(dsize) diff --git a/Fitter/python/plot/datacard.py b/Fitter/python/plot/datacard.py index 9484ef7cc..2811205b9 100644 --- a/Fitter/python/plot/datacard.py +++ b/Fitter/python/plot/datacard.py @@ -85,7 +85,7 @@ def createinputs(fname,sampleset,obsset,bins,syst="",**kwargs): fname_ = repkey(fname,OBS=obsname,TAG=tag) # replace keys file = TFile.Open(fname_,option) if recreate: - print ">>> created file %s"%(fname_) + print(">>> created file %s"%(fname_)) for selection in bins: if not obs.plotfor(selection): continue obs.changecontext(selection) # update contextual cuts, binning, name, title, ... @@ -100,11 +100,11 @@ def createinputs(fname,sampleset,obsset,bins,syst="",**kwargs): # GET HISTS for selection in bins: bin = selection.filename # bin name - print ">>>\n>>> "+color(" %s "%(bin),'magenta',bold=True,ul=True) + print(">>>\n>>> "+color(" %s "%(bin),'magenta',bold=True,ul=True)) if htag: # hist tag for systematic - print ">>> systematic uncertainty: %s"%(color(htag.lstrip('_'),'grey')) + print(">>> systematic uncertainty: %s"%(color(htag.lstrip('_'),'grey'))) if recreate or verbosity>=1: - print ">>> %r"%(selection.selection) + print(">>> %r"%(selection.selection)) for obs in obsset: # update contextual cuts, binning, name, title, ... obs.changecontext(selection) hists = sampleset.gethists(obsset,selection,method=method,split=True, @@ -132,7 +132,7 @@ def createinputs(fname,sampleset,obsset,bins,syst="",**kwargs): hist.GetXaxis().SetTitle(obs.title) for i, yval in enumerate(hist): if yval<0: - print ">>> replace bin %d (%.3f<0) of %r"%(i,yval,hist.GetName()) + print(">>> replace bin %d (%.3f<0) of %r"%(i,yval,hist.GetName())) hist.SetBinContent(i,0) if files[obs].cd(bin): # $FILE:$BIN/$PROCESS_$SYSTEMATC hist.Write(name,TH1.kOverwrite) @@ -162,7 +162,7 @@ def plotinputs(fname,varprocs,obsset,bins,**kwargs): groups = kwargs.get('group', [ ] ) # add processes together into one histogram verbosity = kwargs.get('verb', 0 ) ensuredir(outdir) - print ">>>\n>>> "+color(" plotting... ",'magenta',bold=True,ul=True) + print(">>>\n>>> "+color(" plotting... ",'magenta',bold=True,ul=True)) if 'Nom' not in varprocs: LOG.warning("plotinputs: Cannot make plots because did not find nominal process templates 'Nom'.") return diff --git a/Fitter/python/plot/postfit.py b/Fitter/python/plot/postfit.py index dc17bf8a1..acf445257 100644 --- a/Fitter/python/plot/postfit.py +++ b/Fitter/python/plot/postfit.py @@ -12,7 +12,7 @@ def drawpostfit(fname,bin,procs,**kwargs): """Plot pre- and post-fit plots PostFitShapesFromWorkspace.""" - print '>>>\n>>> drawpostfit("%s","%s")'%(fname,bin) + print(">>>\n>>> drawpostfit(%r,%r)"%(fname,bin)) outdir = kwargs.get('outdir', "" ) pname = kwargs.get('pname', "$FIT.png" ) # replace $FIT = 'prefit', 'postfit' ratio = kwargs.get('ratio', True ) From f92f7e7fc1f3e9e808e6f3b000a2adb0df73e152 Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Sun, 28 May 2023 17:41:07 +0200 Subject: [PATCH 18/55] fix typos (common/tools) --- common/python/tools/file.py | 2 +- common/python/tools/utils.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/common/python/tools/file.py b/common/python/tools/file.py index af52b7087..9d376c2e8 100644 --- a/common/python/tools/file.py +++ b/common/python/tools/file.py @@ -75,7 +75,7 @@ def ensuremodule(modname,package): try: module = importlib.import_module(modpath) except Exception as err: - print traceback.format_exc() + print(traceback.format_exc()) LOG.throw(ImportError,"Importing module '%s' failed. Please check %s! cwd=%r"%(modpath,modfile,os.getcwd())) if not hasattr(module,modclass): LOG.throw(IOError,"Module '%s' in %s does not have a module named '%s'!"%(module,modfile,modname)) diff --git a/common/python/tools/utils.py b/common/python/tools/utils.py index dcade373d..1824666fd 100644 --- a/common/python/tools/utils.py +++ b/common/python/tools/utils.py @@ -31,7 +31,7 @@ def execute(command,dry=False,fatal=True,verb=0): out = out.strip() except Exception as e: if verb<1: - print(out #">>> Output: %s"%(out)) + print(out) #">>> Output: %s"%(out) print(">>> Failed: %r"%(command)) raise e if retcode and fatal: From 1988e1e31ad7caf8b3f87e58b8b4291bef87cc4c Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Tue, 30 May 2023 14:01:20 +0200 Subject: [PATCH 19/55] rename fillCommonCorrBraches -> fillCommonCorrBranches (since we're anyway touching most of these files) --- PicoProducer/python/analysis/ETauFakeRate/ModuleETau.py | 2 +- PicoProducer/python/analysis/HighPT/ModuleDiJet.py | 2 +- PicoProducer/python/analysis/HighPT/ModuleMuNu.py | 2 +- PicoProducer/python/analysis/HighPT/ModuleTauNu.py | 2 +- PicoProducer/python/analysis/HighPT/ModuleWJ.py | 2 +- PicoProducer/python/analysis/ModuleEMu.py | 2 +- PicoProducer/python/analysis/ModuleETau.py | 2 +- PicoProducer/python/analysis/ModuleHighPT.py | 2 +- PicoProducer/python/analysis/ModuleMuMu.py | 2 +- PicoProducer/python/analysis/ModuleMuTau.py | 2 +- PicoProducer/python/analysis/ModuleMuTau_nanoV10_DeepTau2p1.py | 2 +- PicoProducer/python/analysis/ModuleMuTau_nanoV10_DeepTau2p5.py | 2 +- PicoProducer/python/analysis/ModuleTauPair.py | 2 +- PicoProducer/python/analysis/ModuleTauTau.py | 2 +- PicoProducer/python/analysis/MuTauFakeRate/ModuleMuTau.py | 2 +- PicoProducer/python/analysis/MuTauFakeRate/ModuleTauPair.py | 2 +- PicoProducer/python/analysis/TauES/ModuleMuTau.py | 2 +- 17 files changed, 17 insertions(+), 17 deletions(-) diff --git a/PicoProducer/python/analysis/ETauFakeRate/ModuleETau.py b/PicoProducer/python/analysis/ETauFakeRate/ModuleETau.py index e3acb3aff..0888782a0 100644 --- a/PicoProducer/python/analysis/ETauFakeRate/ModuleETau.py +++ b/PicoProducer/python/analysis/ETauFakeRate/ModuleETau.py @@ -273,7 +273,7 @@ def analyze(self, event): # WEIGHTS if self.ismc: - self.fillCommonCorrBraches(event,jets,met,njets_vars,met_vars) + self.fillCommonCorrBranches(event,jets,met,njets_vars,met_vars) if electron.pfRelIso03_all<0.50 and tau.idDeepTau2017v2p1VSjet>=2: self.btagTool.fillEffMaps(jets,usejec=self.dojec) diff --git a/PicoProducer/python/analysis/HighPT/ModuleDiJet.py b/PicoProducer/python/analysis/HighPT/ModuleDiJet.py index 722e1ae1d..1d7080215 100644 --- a/PicoProducer/python/analysis/HighPT/ModuleDiJet.py +++ b/PicoProducer/python/analysis/HighPT/ModuleDiJet.py @@ -139,7 +139,7 @@ def analyze(self, event): # WEIGHTS self.out.weight[0] = 1.0 # for data if self.ismc: - self.fillCommonCorrBraches(event) + self.fillCommonCorrBranches(event) self.out.trigweight[0] = 1.0 self.out.weight[0] = self.out.trigweight[0]*self.out.puweight[0]*self.out.genweight[0]*self.out.idisoweight_1[0] if self.dozpt: diff --git a/PicoProducer/python/analysis/HighPT/ModuleMuNu.py b/PicoProducer/python/analysis/HighPT/ModuleMuNu.py index ce37b6db3..2291da8d1 100644 --- a/PicoProducer/python/analysis/HighPT/ModuleMuNu.py +++ b/PicoProducer/python/analysis/HighPT/ModuleMuNu.py @@ -127,7 +127,7 @@ def analyze(self, event): # WEIGHTS self.out.weight[0] = 1.0 # for data if self.ismc: - self.fillCommonCorrBraches(event) + self.fillCommonCorrBranches(event) # MUON WEIGHTS self.out.trigweight[0] = self.muSFs.getTriggerSF(muon1.pt,muon1.eta) # muon trigger self.out.idisoweight_1[0] = self.muSFs.getIdIsoSF(muon1.pt,muon1.eta) # muon id and isolation SF diff --git a/PicoProducer/python/analysis/HighPT/ModuleTauNu.py b/PicoProducer/python/analysis/HighPT/ModuleTauNu.py index 4dfe3f5b3..2d1d46442 100644 --- a/PicoProducer/python/analysis/HighPT/ModuleTauNu.py +++ b/PicoProducer/python/analysis/HighPT/ModuleTauNu.py @@ -229,7 +229,7 @@ def analyze(self, event): # WEIGHTS self.out.weight[0] = 1.0 # for data if self.ismc: - self.fillCommonCorrBraches(event) + self.fillCommonCorrBranches(event) self.out.trigweight[0] = self.trig_corr.getWeight(self.out.metnomu[0],self.out.mhtnomu[0]) self.out.idisoweight_1[0] = 1.0 diff --git a/PicoProducer/python/analysis/HighPT/ModuleWJ.py b/PicoProducer/python/analysis/HighPT/ModuleWJ.py index 40168e3b6..b844bf5a8 100644 --- a/PicoProducer/python/analysis/HighPT/ModuleWJ.py +++ b/PicoProducer/python/analysis/HighPT/ModuleWJ.py @@ -189,7 +189,7 @@ def analyze(self, event): # WEIGHTS self.out.weight[0] = 1.0 # for data if self.ismc: - self.fillCommonCorrBraches(event) + self.fillCommonCorrBranches(event) # MUON WEIGHTS self.out.trigweight[0] = self.muSFs.getTriggerSF(muon1.pt,muon1.eta) # muon trigger SF self.out.idisoweight_1[0] = self.muSFs.getIdIsoSF(muon1.pt,muon1.eta) # muon Id/iso SF diff --git a/PicoProducer/python/analysis/ModuleEMu.py b/PicoProducer/python/analysis/ModuleEMu.py index db6048618..edb7de9c3 100644 --- a/PicoProducer/python/analysis/ModuleEMu.py +++ b/PicoProducer/python/analysis/ModuleEMu.py @@ -240,7 +240,7 @@ def analyze(self, event): # WEIGHTS if self.ismc: - self.fillCommonCorrBraches(event,jets,met,njets_vars,met_vars) + self.fillCommonCorrBranches(event,jets,met,njets_vars,met_vars) if electron.pfRelIso03_all<0.50 and muon.pfRelIso04_all<0.50: self.btagTool.fillEffMaps(jets,usejec=self.dojec) diff --git a/PicoProducer/python/analysis/ModuleETau.py b/PicoProducer/python/analysis/ModuleETau.py index 1cccd99c6..fcfa59372 100644 --- a/PicoProducer/python/analysis/ModuleETau.py +++ b/PicoProducer/python/analysis/ModuleETau.py @@ -258,7 +258,7 @@ def analyze(self, event): # WEIGHTS if self.ismc: - self.fillCommonCorrBraches(event,jets,met,njets_vars,met_vars) + self.fillCommonCorrBranches(event,jets,met,njets_vars,met_vars) if electron.pfRelIso03_all<0.50 and tau.idDeepTau2017v2p1VSjet>=2: self.btagTool.fillEffMaps(jets,usejec=self.dojec) diff --git a/PicoProducer/python/analysis/ModuleHighPT.py b/PicoProducer/python/analysis/ModuleHighPT.py index 3668621f7..8bb01de16 100644 --- a/PicoProducer/python/analysis/ModuleHighPT.py +++ b/PicoProducer/python/analysis/ModuleHighPT.py @@ -344,7 +344,7 @@ def fillJetMETBranches(self,event,leptons,lep1): return jets, ht_muons, met, met_vars - def fillCommonCorrBraches(self, event): + def fillCommonCorrBranches(self, event): """Help function to apply common corrections, and fill weight branches.""" #if self.dorecoil: diff --git a/PicoProducer/python/analysis/ModuleMuMu.py b/PicoProducer/python/analysis/ModuleMuMu.py index b05cad314..16b7f829c 100644 --- a/PicoProducer/python/analysis/ModuleMuMu.py +++ b/PicoProducer/python/analysis/ModuleMuMu.py @@ -238,7 +238,7 @@ def analyze(self, event): # WEIGHTS if self.ismc: - self.fillCommonCorrBraches(event,jets,met,njets_vars,met_vars) + self.fillCommonCorrBranches(event,jets,met,njets_vars,met_vars) if muon1.pfRelIso04_all<0.50 and muon2.pfRelIso04_all<0.50: self.btagTool.fillEffMaps(jets,usejec=self.dojec) diff --git a/PicoProducer/python/analysis/ModuleMuTau.py b/PicoProducer/python/analysis/ModuleMuTau.py index 857010c5b..4d9e5e172 100644 --- a/PicoProducer/python/analysis/ModuleMuTau.py +++ b/PicoProducer/python/analysis/ModuleMuTau.py @@ -250,7 +250,7 @@ def analyze(self, event): # WEIGHTS if self.ismc: - self.fillCommonCorrBraches(event,jets,met,njets_vars,met_vars) + self.fillCommonCorrBranches(event,jets,met,njets_vars,met_vars) if muon.pfRelIso04_all<0.50 and tau.idDeepTau2017v2p1VSjet>=2: self.btagTool.fillEffMaps(jets,usejec=self.dojec) diff --git a/PicoProducer/python/analysis/ModuleMuTau_nanoV10_DeepTau2p1.py b/PicoProducer/python/analysis/ModuleMuTau_nanoV10_DeepTau2p1.py index 99757e5a7..5fa61f19f 100644 --- a/PicoProducer/python/analysis/ModuleMuTau_nanoV10_DeepTau2p1.py +++ b/PicoProducer/python/analysis/ModuleMuTau_nanoV10_DeepTau2p1.py @@ -275,7 +275,7 @@ def analyze(self, event): # WEIGHTS if self.ismc: - self.fillCommonCorrBraches(event,jets,met,njets_vars,met_vars) + self.fillCommonCorrBranches(event,jets,met,njets_vars,met_vars) if muon.pfRelIso04_all<0.50 and tau.idDeepTau2017v2p1VSjet>=2: #if muon.pfRelIso04_all<0.50 and tau.idDeepTau2018v2p5VSjet>=2: self.btagTool.fillEffMaps(jets,usejec=self.dojec) diff --git a/PicoProducer/python/analysis/ModuleMuTau_nanoV10_DeepTau2p5.py b/PicoProducer/python/analysis/ModuleMuTau_nanoV10_DeepTau2p5.py index 77797ad43..7e9a21ea6 100644 --- a/PicoProducer/python/analysis/ModuleMuTau_nanoV10_DeepTau2p5.py +++ b/PicoProducer/python/analysis/ModuleMuTau_nanoV10_DeepTau2p5.py @@ -275,7 +275,7 @@ def analyze(self, event): # WEIGHTS if self.ismc: - self.fillCommonCorrBraches(event,jets,met,njets_vars,met_vars) + self.fillCommonCorrBranches(event,jets,met,njets_vars,met_vars) #if muon.pfRelIso04_all<0.50 and tau.idDeepTau2017v2p1VSjet>=2: if muon.pfRelIso04_all<0.50 and tau.idDeepTau2018v2p5VSjet>=2: self.btagTool.fillEffMaps(jets,usejec=self.dojec) diff --git a/PicoProducer/python/analysis/ModuleTauPair.py b/PicoProducer/python/analysis/ModuleTauPair.py index 017da44d0..057d969b1 100644 --- a/PicoProducer/python/analysis/ModuleTauPair.py +++ b/PicoProducer/python/analysis/ModuleTauPair.py @@ -360,7 +360,7 @@ def fillJetBranches(self,event,tau1,tau2): return jets, metnom, njets_vars, met_vars - def fillCommonCorrBraches(self, event, jets, met, njets_vars, met_vars): + def fillCommonCorrBranches(self, event, jets, met, njets_vars, met_vars): """Help function to apply common corrections, and fill weight branches.""" #if self.dorecoil: diff --git a/PicoProducer/python/analysis/ModuleTauTau.py b/PicoProducer/python/analysis/ModuleTauTau.py index bc35ad502..e8b171106 100644 --- a/PicoProducer/python/analysis/ModuleTauTau.py +++ b/PicoProducer/python/analysis/ModuleTauTau.py @@ -224,7 +224,7 @@ def analyze(self, event): # WEIGHTS if self.ismc: - self.fillCommonCorrBraches(event,jets,met,njets_vars,met_vars) + self.fillCommonCorrBranches(event,jets,met,njets_vars,met_vars) if tau1.idDeepTau2017v2p1VSjet>=2 and tau2.idDeepTau2017v2p1VSjet>=2: self.btagTool.fillEffMaps(jets,usejec=self.dojec) self.out.trigweight[0] = self.trigTool.getSFPair(tau1,tau2) diff --git a/PicoProducer/python/analysis/MuTauFakeRate/ModuleMuTau.py b/PicoProducer/python/analysis/MuTauFakeRate/ModuleMuTau.py index 1d0d66292..7efa43697 100644 --- a/PicoProducer/python/analysis/MuTauFakeRate/ModuleMuTau.py +++ b/PicoProducer/python/analysis/MuTauFakeRate/ModuleMuTau.py @@ -251,7 +251,7 @@ def analyze(self, event): # WEIGHTS if self.ismc: - self.fillCommonCorrBraches(event,jets,met,njets_vars,met_vars) + self.fillCommonCorrBranches(event,jets,met,njets_vars,met_vars) if muon.pfRelIso04_all<0.50 and tau.idDeepTau2017v2p1VSjet>=2: self.btagTool.fillEffMaps(jets,usejec=self.dojec) diff --git a/PicoProducer/python/analysis/MuTauFakeRate/ModuleTauPair.py b/PicoProducer/python/analysis/MuTauFakeRate/ModuleTauPair.py index a486e86db..fb0f8c863 100644 --- a/PicoProducer/python/analysis/MuTauFakeRate/ModuleTauPair.py +++ b/PicoProducer/python/analysis/MuTauFakeRate/ModuleTauPair.py @@ -297,7 +297,7 @@ def fillJetBranches(self,event,tau1,tau2): return jets, metnom, njets_vars, met_vars - def fillCommonCorrBraches(self, event, jets, met, njets_vars, met_vars): + def fillCommonCorrBranches(self, event, jets, met, njets_vars, met_vars): """Help function to apply common corrections, and fill weight branches.""" #if self.dorecoil: diff --git a/PicoProducer/python/analysis/TauES/ModuleMuTau.py b/PicoProducer/python/analysis/TauES/ModuleMuTau.py index 1a139f78e..ace76ac98 100644 --- a/PicoProducer/python/analysis/TauES/ModuleMuTau.py +++ b/PicoProducer/python/analysis/TauES/ModuleMuTau.py @@ -252,7 +252,7 @@ def analyze(self, event): # WEIGHTS if self.ismc: - self.fillCommonCorrBraches(event,jets,met,njets_vars,met_vars) + self.fillCommonCorrBranches(event,jets,met,njets_vars,met_vars) if muon.pfRelIso04_all<0.50 and tau.idDeepTau2017v2p1VSjet>=2: self.btagTool.fillEffMaps(jets,usejec=self.dojec) From f8f16699ec612175f87861db6e055185bdcc83d8 Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Tue, 30 May 2023 14:08:23 +0200 Subject: [PATCH 20/55] add instructions --- README.md | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index b1df4a571..83be8c0e4 100644 --- a/README.md +++ b/README.md @@ -8,13 +8,34 @@ Framework for tau analysis using NanoAOD at CMS. Three main packages are ## Installation First, setup a CMSSW release, for example, + + + + + + + + +
nanoAODv10 older versions
+ +```bash +export CMSSW=CMSSW_12_4_8 +export SCRAM_ARCH=el8_amd64_gcc10 +cmsrel $CMSSW +cd $CMSSW/src +cmsenv ``` + + +```bash export CMSSW=CMSSW_10_6_13 export SCRAM_ARCH=slc7_amd64_gcc700 cmsrel $CMSSW cd $CMSSW/src cmsenv ``` +
+ Which CMSSW version should matter for post-processing of nanoAOD, but if you like to use Combine in the same repository, it is better to use at least the [recommended version](https://cms-analysis.github.io/HiggsAnalysis-CombinedLimit/#setting-up-the-environment-and-installation). @@ -54,13 +75,15 @@ cd $CMSSW_BASE/src git clone https://github.com/cms-analysis/HiggsAnalysis-CombinedLimit.git HiggsAnalysis/CombinedLimit cd HiggsAnalysis/CombinedLimit git fetch origin -git checkout v8.1.0 +git checkout v8.1.0 # for CMSSW_10_X +git checkout v9.1.0 # for CMSSW_11_X ``` and then [`CombineHarvester`](https://github.com/cms-analysis/CombineHarvester), ``` cd $CMSSW_BASE/src git clone https://github.com/cms-analysis/CombineHarvester.git CombineHarvester scramv1 b clean; scramv1 b +git checkout v2.0.0 # for CMSSW_11_X only ``` ### TauID Scale Factor (SF) JSON and ROOT files creation From f9c322d63c91269640c51d9b6987a618fd984ad4 Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Tue, 30 May 2023 14:49:36 +0200 Subject: [PATCH 21/55] update instruction for correctionlib/JSONs --- README.md | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index 83be8c0e4..7fdb7aa28 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,6 @@ Framework for tau analysis using NanoAOD at CMS. Three main packages are 3. [`Fitter`](Fitter): Tools for measurements and fits in combine. [Under development.] ## Installation - First, setup a CMSSW release, for example, @@ -87,22 +86,12 @@ git checkout v2.0.0 # for CMSSW_11_X only ``` ### TauID Scale Factor (SF) JSON and ROOT files creation +To create JSON files for +[`correctionlib`](https://github.com/cms-nanoAOD/correctionlib), +please follow the instructions +[here](https://gitlab.cern.ch/cms-tau-pog/jsonpog-integration/-/blob/TauPOG_v2/POG/TAU/README4UPDATES.md). To create ROOT files including the measured SFs please install [`TauIDSFs` tool](https://github.com/cms-tau-pog/TauFW/#picoproducer) as illustrated above. Modify the `TauIDSFs/utils/createSFFiles.py` script to include your measured SFs into the script. Finally, run the `TauFW/scripts/tau_createROOT.sh` to generate your ROOT files. They will be created into `TauFW/scripts/data/` IMPORTANT: please comment and do not delete older SFs - -To create JSON files with SFs values, please install [`correctionlib`](https://github.com/cms-tau-pog/correctionlib) into the same `$CMSSW_BASE/src` as `TauFW`. -To install `correctionlib`: -``` -#Change directory to the same $CMSSW_BASE/src as TauFW -cd $CMSSW_BASE/src -source /cvmfs/sft.cern.ch/lcg/views/LCG_99/x86_64-centos7-gcc8-opt/setup.sh -git clone --recursive https://github.com/cms-tau-pog/correctionlib.git -cd correctionlib -python3 -m pip install --user . - -``` -Your SFs should be included in `correctionlib/scripts/tau_createJSONs.py`. -Finally, run the `TauFW/scripts/tau_createJSONs.sh` script. JSON files will be produced in the `TauFW/scripts/data/tau/new` folder. From 5950a6a3422892f7d79f4d77a5e9370577eb5b98 Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Tue, 30 May 2023 14:52:05 +0200 Subject: [PATCH 22/55] update instructions --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 7fdb7aa28..5fb012967 100644 --- a/README.md +++ b/README.md @@ -90,6 +90,8 @@ To create JSON files for [`correctionlib`](https://github.com/cms-nanoAOD/correctionlib), please follow the instructions [here](https://gitlab.cern.ch/cms-tau-pog/jsonpog-integration/-/blob/TauPOG_v2/POG/TAU/README4UPDATES.md). +From at least `CMSSW_11_3_X`, `correctionlib` should be pre-installed. + To create ROOT files including the measured SFs please install [`TauIDSFs` tool](https://github.com/cms-tau-pog/TauFW/#picoproducer) as illustrated above. Modify the `TauIDSFs/utils/createSFFiles.py` script to include your measured SFs into the script. From 59de4321b03c5362c5fe11c2a1bf6e12a520d8c2 Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Tue, 30 May 2023 15:50:36 +0200 Subject: [PATCH 23/55] fix typos --- PicoProducer/python/analysis/StitchEffs.py | 4 ++-- common/python/tools/log.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/PicoProducer/python/analysis/StitchEffs.py b/PicoProducer/python/analysis/StitchEffs.py index 787a15e2a..e59c57a2c 100755 --- a/PicoProducer/python/analysis/StitchEffs.py +++ b/PicoProducer/python/analysis/StitchEffs.py @@ -140,7 +140,7 @@ def printtable(hist,norm=False): # PLOT 2D HISTOGRAMS for hname in hnames_2d: hist = file.Get(hname) - assert hist, "Did not find %s:%s"(file.GetName(),hname) + assert hist, "Did not find %s:%s"%(file.GetName(),hname) hist.SetTitle(title) nevts = hist.Integral() if nevts>0: # normalize @@ -170,7 +170,7 @@ def printtable(hist,norm=False): hists = [ ] for title, file in files: hist = file.Get(hname) - assert hist, "Did not find %s:%s"(file.GetName(),hname) + assert hist, "Did not find %s:%s"%(file.GetName(),hname) hist.SetTitle(title) nevts = hist.Integral() if nevts>0: # normalize diff --git a/common/python/tools/log.py b/common/python/tools/log.py index 166a22329..f98f0942e 100644 --- a/common/python/tools/log.py +++ b/common/python/tools/log.py @@ -5,7 +5,7 @@ 'yellow': 33, 'orange': 33, 'blue': 34, 'purple': 35, 'magenta': 36, 'white': 37, 'grey': 90, 'none': 0 } -bcol_dict = {k: (10+v if v else v) for k,v in tcol_dict.iteritems()} +bcol_dict = {k: (10+v if v else v) for k,v in tcol_dict.items()} def color(string,c='green',b=False,ul=False,**kwargs): From e12c4a881469cd416930ada02e396f2e36b9fdf0 Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Tue, 30 May 2023 17:40:06 +0200 Subject: [PATCH 24/55] import unicode from past; fix keys() -> list(keys()) --- PicoProducer/python/pico/job.py | 6 +++--- PicoProducer/python/storage/Sample.py | 5 +++-- PicoProducer/python/tools/config.py | 6 +++--- PicoProducer/scripts/pico.py | 25 +++++++++++++++++-------- 4 files changed, 26 insertions(+), 16 deletions(-) diff --git a/PicoProducer/python/pico/job.py b/PicoProducer/python/pico/job.py index 108628566..ee4f02d2e 100755 --- a/PicoProducer/python/pico/job.py +++ b/PicoProducer/python/pico/job.py @@ -538,7 +538,7 @@ def checkchunks(sample,**kwargs): bar.count(status) # GET FILES for RESUBMISSION + sanity checks - for ichunk in chunkdict.keys(): # chuckdict length might be changed (popped) + for ichunk in list(chunkdict.keys()): # chuckdict length might be changed (popped) if ichunk in pendchunks: # output still pending continue chunkfiles = chunkdict[ichunk] @@ -616,7 +616,7 @@ def checkchunks(sample,**kwargs): match = chunkexp.search(fname) if match: ichunk = int(match.group(1)) - LOG.insist(ichunk in chunkdict,"Found an impossible chunk %d for file %s! Chunkdict has %s"%(ichunk,fname,sorted(chunkdict.keys()))+ + LOG.insist(ichunk in chunkdict,"Found an impossible chunk %d for file %s! Chunkdict has %s"%(ichunk,fname,list(sorted(chunkdict.keys())))+ " Possible overcounting or conflicting job output file format! Check %s"%(oldcfgname)) if ichunk in pendchunks: continue @@ -661,7 +661,7 @@ def checkchunks(sample,**kwargs): # GET FILES for RESUBMISSION + sanity checks if verbosity>=2: print(">>> %-12s = %s"%('nprocevents',nprocevents)) - for ichunk in chunkdict.keys(): + for ichunk in list(chunkdict.keys()): count = goodchunks.count(ichunk)+pendchunks.count(ichunk)+badchunks.count(ichunk) LOG.insist(count in [0,1],"Found %d times chunk '%d' (good=%d, pending=%d, bad=%d). "%( count,ichunk,goodchunks.count(ichunk),pendchunks.count(ichunk),badchunks.count(ichunk))+ diff --git a/PicoProducer/python/storage/Sample.py b/PicoProducer/python/storage/Sample.py index c49309e7d..fef4da941 100644 --- a/PicoProducer/python/storage/Sample.py +++ b/PicoProducer/python/storage/Sample.py @@ -6,6 +6,7 @@ # root://t3dcachedb.psi.ch:1094/ # PSI T3 # root://storage01.lcg.cscs.ch/ # PSI T2 # root://cmseos.fnal.gov/ # Fermi lab +from past.builtins import basestring, unicode # for python2 compatibility import os, re, json import gzip import importlib @@ -214,7 +215,7 @@ def filterpath(self,filter=[],veto=[],copy=False,verb=0): if copy: sample = deepcopy(self) sample.paths = paths - for path in sample.pathfiles.keys(): + for path in list(sample.pathfiles.keys()): if path not in paths: sample.pathfiles.pop(path) return sample @@ -372,7 +373,7 @@ def _writefile(ofile,fname,prefix=""): else: print(">>> Write %s files to list %r..."%(len(files),listname_)) for i, path in enumerate(self.paths): - assert path in self.pathfiles, "self.pathfiles.keys()=%s"%(self.pathfiles.keys()) + assert path in self.pathfiles, "self.pathfiles.keys()=%s"%(list(self.pathfiles.keys())) print(">>> %3s files for %s..."%(len(self.pathfiles[path]),path)) lfile.write("DASPATH=%s\n"%(path)) # write special line to text file, which loadfiles() can parse for infile in self.pathfiles[path]: # loop over this list (general list is sorted) diff --git a/PicoProducer/python/tools/config.py b/PicoProducer/python/tools/config.py index bf6f1cc24..2465081cd 100644 --- a/PicoProducer/python/tools/config.py +++ b/PicoProducer/python/tools/config.py @@ -1,6 +1,6 @@ #! /usr/bin/env python # Author: Izaak Neutelings (May 2020) -from past.builtins import basestring # for python2 compatibility +from past.builtins import basestring, unicode # for python2 compatibility import os, sys, re, glob, json from datetime import datetime import importlib @@ -142,11 +142,11 @@ def __init__(self,cfgdict={ },path="config.json"): """Container class for a global configuration.""" self._dict = cfgdict self._path = path - for key in self._dict.keys(): + for key in list(self._dict.keys()): if isinstance(self._dict[key],unicode): self._dict[str(key)] = str(self._dict[key]) # convert unicode to str elif isinstance(self._dict[key],dict): - for subkey in self._dict[key].keys(): + for subkey in list(self._dict[key].keys()): item = self._dict[key][subkey] if isinstance(item,unicode): item = str(item) diff --git a/PicoProducer/scripts/pico.py b/PicoProducer/scripts/pico.py index 1a5816876..399656c08 100755 --- a/PicoProducer/scripts/pico.py +++ b/PicoProducer/scripts/pico.py @@ -2,13 +2,19 @@ # Author: Izaak Neutelings (April 2020) import os, sys, glob, json #import ROOT; ROOT.PyConfig.IgnoreCommandLineOptions = True -from TauFW.common.tools.file import ensurefile, ensureinit -from TauFW.common.tools.string import repkey, rreplace -from TauFW.PicoProducer.analysis.utils import ensuremodule -from TauFW.PicoProducer.storage.utils import getsamples -from TauFW.PicoProducer.pico.common import * - - +try: + from TauFW.common.tools.file import ensurefile, ensureinit + from TauFW.common.tools.string import repkey, rreplace + from TauFW.PicoProducer.analysis.utils import ensuremodule + from TauFW.PicoProducer.storage.utils import getsamples + from TauFW.PicoProducer.pico.common import * +except ImportError as err: + print("\033[1m\033[31mImportError for TauFW modules: Please check if you compiled with `scram b`.") + if sys.version_info[0]<3: + print("For CMSSW_12_X and higher, please check if you have the correct python version by default,\n" + "for which you can try this hack: `python() { python3 $@; }; export -f python`.\033[0m") + raise err + ############### # INSTALL # @@ -137,7 +143,7 @@ def main_install(args): parser_hdc = subparsers.add_parser('haddclean',parents=[parser_hdd_], help=help_hdc, description=help_hdc) parser_cln = subparsers.add_parser('clean', parents=[parser_chk], help=help_cln, description=help_cln) #parser_get.add_argument('variable', help='variable to change in the config file') - parser_get.add_argument('variable', help="variable to get information on",choices=['samples','files','nevents','nevts',]+CONFIG.keys()) + parser_get.add_argument('variable', help="variable to get information on",choices=['samples','files','nevents','nevts',]+list(CONFIG.keys())) parser_set.add_argument('variable', help="variable to set or change in the config file") parser_set.add_argument('key', help="channel or era key name", nargs='?', default=None) parser_set.add_argument('value', help="value for given value") @@ -204,6 +210,9 @@ def main_install(args): args = parser.parse_args(args) if hasattr(args,'tag') and len(args.tag)>=1 and args.tag[0]!='_': args.tag = '_'+args.tag + if args.subcommand==None: + parser.print_help() + exit(0) # VERBOSITY if args.verbosity>=2: From b86ecc37e2c23b6b0076e278fd447cfb56421c31 Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Wed, 31 May 2023 13:48:05 +0200 Subject: [PATCH 25/55] replace exec() with eval() --- PicoProducer/python/analysis/utils.py | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/PicoProducer/python/analysis/utils.py b/PicoProducer/python/analysis/utils.py index 1e92e6032..0dcd1b496 100644 --- a/PicoProducer/python/analysis/utils.py +++ b/PicoProducer/python/analysis/utils.py @@ -248,16 +248,15 @@ def getmet(era,var="",useT1=False,verb=0): branch += "_T1" if var=='nom': var = "" - pt = '%s_pt'%(branch) - phi = '%s_phi'%(branch) + pt = '%s_pt'%(branch) + phi = '%s_phi'%(branch) if var: - pt += '_'+var - phi += '_'+var - funcstr = "func = lambda e: TLorentzVector(e.%s*cos(e.%s),e.%s*sin(e.%s),0,e.%s)"%(pt,phi,pt,phi,pt) - if verb>=1: + pt += '_'+var + phi += '_'+var + funcstr = "lambda e: TLorentzVector(e.%s*cos(e.%s),e.%s*sin(e.%s),0,e.%s)"%(pt,phi,pt,phi,pt) + if verb+2>=1: LOG.verb(">>> getmet: %r"%(funcstr)) - exec(funcstr) #in locals() - return func + return eval(funcstr) def correctmet(met,dp): @@ -296,11 +295,10 @@ def getmetfilters(era,isdata,verb=0): filters.extend(['Flag_eeBadScFilter']) # eeBadScFilter "not suggested" for MC if ('2017' in era or '2018' in era) and ('UL' not in era): filters.extend(['Flag_ecalBadCalibFilterV2']) # under review for change in Ultra Legacy - funcstr = "func = lambda e: e."+' and e.'.join(filters) + funcstr = "lambda e: e."+' and e.'.join(filters) if verb>=1: LOG.verb(">>> getmetfilters: %r"%(funcstr)) - exec(funcstr) #in locals() - return func + return eval(funcstr) def loosestIso(tau): From b60f88269bb34fd08d9c83a6aa576ef7f7d13f19 Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Wed, 31 May 2023 13:49:06 +0200 Subject: [PATCH 26/55] remove unneeded import that does not work in python3 --- PicoProducer/python/corrections/RecoilCorrectionTool.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/PicoProducer/python/corrections/RecoilCorrectionTool.py b/PicoProducer/python/corrections/RecoilCorrectionTool.py index 8b094931b..38d843c99 100644 --- a/PicoProducer/python/corrections/RecoilCorrectionTool.py +++ b/PicoProducer/python/corrections/RecoilCorrectionTool.py @@ -12,7 +12,7 @@ from TauFW.PicoProducer.analysis.utils import hasbit from PhysicsTools.NanoAODTools.postprocessing.framework.datamodel import Collection import ROOT -from ROOT import TLorentzVector, gROOT, gSystem, gInterpreter, Double +from ROOT import TLorentzVector, gROOT, gSystem, gInterpreter #, Double rcpath = "HTT-utilities/RecoilCorrections/data/" zptpath = os.path.join(datadir,"zpt/") From 40f04ee93e24877df6f01534225c6b3debf04f5e Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Wed, 31 May 2023 13:50:04 +0200 Subject: [PATCH 27/55] change default --- PicoProducer/python/processors/picojob.py | 2 +- PicoProducer/scripts/pico.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/PicoProducer/python/processors/picojob.py b/PicoProducer/python/processors/picojob.py index 3bfd436bd..28aff1611 100755 --- a/PicoProducer/python/processors/picojob.py +++ b/PicoProducer/python/processors/picojob.py @@ -15,7 +15,7 @@ parser.add_argument('-o', '--outdir', dest='outdir', type=str, default='.') parser.add_argument('-C', '--copydir', dest='copydir', type=str, default=None) parser.add_argument('-s', '--firstevt', dest='firstevt', type=int, default=0) -parser.add_argument('-m', '--maxevts', dest='maxevts', type=int, default=None) +parser.add_argument('-m', '--maxevts', dest='maxevts', type=int, default=-1) parser.add_argument('-t', '--tag', dest='tag', type=str, default="") parser.add_argument('-d', '--dtype', dest='dtype', choices=['data','mc','embed'], default=None) parser.add_argument('-y','-e','--era', dest='era', type=str, default='2018') diff --git a/PicoProducer/scripts/pico.py b/PicoProducer/scripts/pico.py index 399656c08..0e5a3a1f1 100755 --- a/PicoProducer/scripts/pico.py +++ b/PicoProducer/scripts/pico.py @@ -213,7 +213,7 @@ def main_install(args): if args.subcommand==None: parser.print_help() exit(0) - + # VERBOSITY if args.verbosity>=2: SLOG.setverbosity(args.verbosity-1) From a57063420e3443740b8150c7a2583a6c2ea247a8 Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Wed, 31 May 2023 13:50:39 +0200 Subject: [PATCH 28/55] add 'validate' argument for CMSSW_12_X --- PicoProducer/python/corrections/BTagTool.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/PicoProducer/python/corrections/BTagTool.py b/PicoProducer/python/corrections/BTagTool.py index f58c4fb54..eb11cf3d7 100644 --- a/PicoProducer/python/corrections/BTagTool.py +++ b/PicoProducer/python/corrections/BTagTool.py @@ -191,7 +191,10 @@ def __init__(self,tagger,wp,era,channel,maxeta=None,loadsys=False,type_bc='comb' # LOAD CALIBRATION TOOL print("Loading BTagWeightTool for %s (%s WP) %s..."%(tagger,wp,csvname)) #,(", "+sigma) if sigma!='central' else "" - calib = BTagCalibration(tagger,csvname) + if 'validate' in BTagCalibration.__init__.__doc__: # for CMSSW_12_X + calib = BTagCalibration(tagger,csvname,False) # validate=False to speed up + else: # for older than CMSSW_12_X + calib = BTagCalibration(tagger,csvname) if csvname_bc and csvname_bc!=csvname: print(" and from %s..."%(csvname_bc)) calib_bc = BTagCalibration(tagger,csvname_bc) From 9107eb0e2a773cc4b209e467db673a6b5978101a Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Wed, 31 May 2023 15:44:21 +0200 Subject: [PATCH 29/55] add python2 version --- PicoProducer/README.md | 3 ++- PicoProducer/python/pico/job.py | 9 +++++++-- PicoProducer/python/pico/run.py | 3 ++- PicoProducer/scripts/pico.py | 5 ++--- PicoProducer/scripts/pico2.py | 4 ++++ 5 files changed, 17 insertions(+), 7 deletions(-) create mode 100755 PicoProducer/scripts/pico2.py diff --git a/PicoProducer/README.md b/PicoProducer/README.md index ed29ef8a5..013ba8301 100644 --- a/PicoProducer/README.md +++ b/PicoProducer/README.md @@ -12,7 +12,7 @@ There are two modes: A central script called [`pico.py`](scripts/pico.py) allows you to run both modes of nanoAOD processing, either locally or on a batch system. -You can link several skimming or analysis codes to _channels_. +You can link several skimming or analysis modules to so-called _channels_. TauFW PicoProducer workflow @@ -48,6 +48,7 @@ pico.py --help If CMSSW is compiled correctly with `scram b`, then the `pico.py` script should have been automatically copied from `scripts/` to `$CMSSW_BASE/bin/$SCRAM_ARCH`, and should be available as a command via `$PATH`. +To run with `python2`, please use `pico2.py` instead. If you need to access DAS for getting file lists of nanoAOD samples, make sure you have a GRID certificate installed, and a VOMS proxy setup diff --git a/PicoProducer/python/pico/job.py b/PicoProducer/python/pico/job.py index ee4f02d2e..d8c15c2b1 100755 --- a/PicoProducer/python/pico/job.py +++ b/PicoProducer/python/pico/job.py @@ -226,7 +226,7 @@ def preparejobs(args): print(">>> %r"%file) print(">>> ]") - # CHUNKS - partition/split list + # CHUNKS - partition/split list of input files infiles.sort() # to have consistent order with resubmission chunks = [ ] # chunk indices if maxevts_>1: @@ -262,6 +262,8 @@ def preparejobs(args): if fchunks: with open(joblist,'w') as listfile: ichunk = 0 + + # PREPARE JOB per input chunk for fchunk in fchunks: while ichunk in chunkdict: ichunk += 1 # allows for different nfilesperjob on resubmission @@ -281,7 +283,10 @@ def preparejobs(args): filetag += "_%d"%(ichunk) elif firstevt>=0: filetag += "_%d"%(firstevt/maxevts__) - jobcmd = processor + + # BUILD COMMAND + python = "python2" if sys.version<3 else "python3" + runcmd = "%s %s"%(python,processor) if procopts: jobcmd += " %s"%(procopts) if skim: diff --git a/PicoProducer/python/pico/run.py b/PicoProducer/python/pico/run.py index 25f322363..0017ad703 100755 --- a/PicoProducer/python/pico/run.py +++ b/PicoProducer/python/pico/run.py @@ -137,7 +137,8 @@ def main_run(args): print(">>> ]") # RUN - runcmd = processor + python = "python2" if sys.version<3 else "python3" + runcmd = "%s %s"%(python,processor) if procopts: runcmd += " %s"%(procopts) if skim: diff --git a/PicoProducer/scripts/pico.py b/PicoProducer/scripts/pico.py index 0e5a3a1f1..1c2c49b1a 100755 --- a/PicoProducer/scripts/pico.py +++ b/PicoProducer/scripts/pico.py @@ -1,4 +1,4 @@ -#! /usr/bin/env python +#! /usr/bin/env python3 # Author: Izaak Neutelings (April 2020) import os, sys, glob, json #import ROOT; ROOT.PyConfig.IgnoreCommandLineOptions = True @@ -11,8 +11,7 @@ except ImportError as err: print("\033[1m\033[31mImportError for TauFW modules: Please check if you compiled with `scram b`.") if sys.version_info[0]<3: - print("For CMSSW_12_X and higher, please check if you have the correct python version by default,\n" - "for which you can try this hack: `python() { python3 $@; }; export -f python`.\033[0m") + print("For releases older than CMSSW_12_X, please use pico2.py with python2.\033[0m") raise err diff --git a/PicoProducer/scripts/pico2.py b/PicoProducer/scripts/pico2.py new file mode 100755 index 000000000..508e892d8 --- /dev/null +++ b/PicoProducer/scripts/pico2.py @@ -0,0 +1,4 @@ +#! /bin/sh +# Author: Izaak Neutelings (May 2023) +# Description: Run pico.py with python2 for backward compatibility +python2 $(dirname $BASH_SOURCE)/pico.py $@ \ No newline at end of file From 8d52a9a3e3c9234bb4fb8af1f2f2c305bac1b328 Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Wed, 31 May 2023 16:07:04 +0200 Subject: [PATCH 30/55] make python2 compatible with 'from __future__ import print_function' --- PicoProducer/python/pico/job.py | 4 ++-- PicoProducer/python/pico/run.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/PicoProducer/python/pico/job.py b/PicoProducer/python/pico/job.py index d8c15c2b1..4de0dfb49 100755 --- a/PicoProducer/python/pico/job.py +++ b/PicoProducer/python/pico/job.py @@ -1,6 +1,6 @@ #! /usr/bin/env python # Author: Izaak Neutelings (April 2020) -import os, re, glob, json +import os, sys, re, glob, json from datetime import datetime from collections import OrderedDict from TauFW.common.tools.file import ensuredir, getline @@ -285,7 +285,7 @@ def preparejobs(args): filetag += "_%d"%(firstevt/maxevts__) # BUILD COMMAND - python = "python2" if sys.version<3 else "python3" + python = "python2" if sys.version_info.major<3 else "python3" runcmd = "%s %s"%(python,processor) if procopts: jobcmd += " %s"%(procopts) diff --git a/PicoProducer/python/pico/run.py b/PicoProducer/python/pico/run.py index 0017ad703..7cb55c21a 100755 --- a/PicoProducer/python/pico/run.py +++ b/PicoProducer/python/pico/run.py @@ -1,5 +1,5 @@ # Author: Izaak Neutelings (April 2020) -import os +import os, sys from TauFW.common.tools.utils import execute from TauFW.PicoProducer.analysis.utils import ensuremodule from TauFW.PicoProducer.storage.utils import getsamples, print_no_samples @@ -137,7 +137,7 @@ def main_run(args): print(">>> ]") # RUN - python = "python2" if sys.version<3 else "python3" + python = "python2" if sys.version_info.major<3 else "python3" runcmd = "%s %s"%(python,processor) if procopts: runcmd += " %s"%(procopts) From 2049a80e03d7210e6d53ffc53fa54d4c3e8d14a5 Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Wed, 31 May 2023 16:10:05 +0200 Subject: [PATCH 31/55] make python2 compatible with 'from __future__ import print_function' --- Plotter/python/corrections/JetToTauFR/tools/histograms.py | 1 + Plotter/python/corrections/JetToTauFR/tools/plots.py | 1 + common/python/tools/file.py | 1 + 3 files changed, 3 insertions(+) diff --git a/Plotter/python/corrections/JetToTauFR/tools/histograms.py b/Plotter/python/corrections/JetToTauFR/tools/histograms.py index a31c0c029..9a215a73c 100755 --- a/Plotter/python/corrections/JetToTauFR/tools/histograms.py +++ b/Plotter/python/corrections/JetToTauFR/tools/histograms.py @@ -8,6 +8,7 @@ #================================================================================================ # Import modules #================================================================================================ +from __future__ import print_function # for python3 compatibility from past.builtins import basestring # for python2 compatibility import os, sys import glob diff --git a/Plotter/python/corrections/JetToTauFR/tools/plots.py b/Plotter/python/corrections/JetToTauFR/tools/plots.py index 3fdbd2167..3b763c818 100755 --- a/Plotter/python/corrections/JetToTauFR/tools/plots.py +++ b/Plotter/python/corrections/JetToTauFR/tools/plots.py @@ -15,6 +15,7 @@ #================================================================================================ # Import Modules #================================================================================================ +from __future__ import print_function # for python3 compatibility from past.builtins import basestring # for python2 compatibility import sys import array diff --git a/common/python/tools/file.py b/common/python/tools/file.py index 9d376c2e8..be4f4f1f3 100644 --- a/common/python/tools/file.py +++ b/common/python/tools/file.py @@ -1,5 +1,6 @@ # Author: Izaak Neutelings (May 2020) from __future__ import print_function # for python3 compatibility +from past.builtins import basestring # for python2 compatibility import os, re, shutil, glob import importlib, traceback import ROOT; ROOT.PyConfig.IgnoreCommandLineOptions = True From aa77a52ec1987fbec6409bb7eebdfe075e554cbe Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Wed, 31 May 2023 17:52:59 +0200 Subject: [PATCH 32/55] remove unneeded TauPOG.TauIDSFs import --- PicoProducer/python/analysis/ModuleMuMu.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/PicoProducer/python/analysis/ModuleMuMu.py b/PicoProducer/python/analysis/ModuleMuMu.py index 16b7f829c..b1063b12f 100644 --- a/PicoProducer/python/analysis/ModuleMuMu.py +++ b/PicoProducer/python/analysis/ModuleMuMu.py @@ -7,7 +7,7 @@ from TauFW.PicoProducer.analysis.utils import LeptonPair, idIso, matchtaujet from TauFW.PicoProducer.corrections.MuonSFs import * from TauFW.PicoProducer.corrections.TrigObjMatcher import loadTriggerDataFromJSON, TrigObjMatcher -from TauPOG.TauIDSFs.TauIDSFTool import TauIDSFTool, TauESTool +#from TauPOG.TauIDSFs.TauIDSFTool import TauIDSFTool, TauESTool class ModuleMuMu(ModuleTauPair): From 53693ffda26ba2ab93acb065d28d93bb40df714b Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Wed, 31 May 2023 17:56:35 +0200 Subject: [PATCH 33/55] fix bug --- PicoProducer/python/pico/job.py | 2 +- PicoProducer/scripts/pico.py | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/PicoProducer/python/pico/job.py b/PicoProducer/python/pico/job.py index 4de0dfb49..1a59441f4 100755 --- a/PicoProducer/python/pico/job.py +++ b/PicoProducer/python/pico/job.py @@ -286,7 +286,7 @@ def preparejobs(args): # BUILD COMMAND python = "python2" if sys.version_info.major<3 else "python3" - runcmd = "%s %s"%(python,processor) + jobcmd = "%s %s"%(python,processor) if procopts: jobcmd += " %s"%(procopts) if skim: diff --git a/PicoProducer/scripts/pico.py b/PicoProducer/scripts/pico.py index 1c2c49b1a..200509e2f 100755 --- a/PicoProducer/scripts/pico.py +++ b/PicoProducer/scripts/pico.py @@ -10,8 +10,7 @@ from TauFW.PicoProducer.pico.common import * except ImportError as err: print("\033[1m\033[31mImportError for TauFW modules: Please check if you compiled with `scram b`.") - if sys.version_info[0]<3: - print("For releases older than CMSSW_12_X, please use pico2.py with python2.\033[0m") + "For releases older than CMSSW_12_X, please use pico2.py with python2.\033[0m") raise err From 1867a62096ffbf4666a115efda27eb578ee5b126 Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Wed, 31 May 2023 17:58:29 +0200 Subject: [PATCH 34/55] fix bug --- PicoProducer/scripts/pico.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/PicoProducer/scripts/pico.py b/PicoProducer/scripts/pico.py index 200509e2f..a9aecd387 100755 --- a/PicoProducer/scripts/pico.py +++ b/PicoProducer/scripts/pico.py @@ -9,7 +9,7 @@ from TauFW.PicoProducer.storage.utils import getsamples from TauFW.PicoProducer.pico.common import * except ImportError as err: - print("\033[1m\033[31mImportError for TauFW modules: Please check if you compiled with `scram b`.") + print("\033[1m\033[31mImportError for TauFW modules: Please check if you compiled with `scram b`." "For releases older than CMSSW_12_X, please use pico2.py with python2.\033[0m") raise err From 319a85d567506f6af7e338a5e84f84a1dd59f843 Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Wed, 31 May 2023 19:31:59 +0200 Subject: [PATCH 35/55] make python2-python3 compatible: command line (buffsize=1 -> 0); long --- PicoProducer/python/storage/Sample.py | 2 ++ common/python/tools/log.py | 1 + common/python/tools/utils.py | 10 +++------- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/PicoProducer/python/storage/Sample.py b/PicoProducer/python/storage/Sample.py index fef4da941..355d1cde9 100644 --- a/PicoProducer/python/storage/Sample.py +++ b/PicoProducer/python/storage/Sample.py @@ -238,12 +238,14 @@ def getfiles(self,das=False,refresh=False,url=True,limit=-1,verb=0): for daspath in self.paths: # loop over DAS dataset paths pathfiles[daspath] = [ ] if (self.storage and not das) or (not self.instance): # get files from storage system + LOG.verb("Sample.getfiles: Retrieving files from storage system %r..."%(self.storage),verb,2) postfix = self.postfix+'.root' sepath = repkey(self.storepath,PATH=daspath,DAS=daspath).replace('//','/') outlist = self.storage.getfiles(sepath,url=url,verb=verb-1) else: # get files from DAS postfix = '.root' outlist = getdasfiles(daspath,instance=self.instance,limit=limit,verb=verb-1) + LOG.verb("Sample.getfiles: outlist=%r"%(outlist),verb,4) for line in outlist: # filter ROOT files line = line.strip() if line.endswith(postfix) and not any(f.endswith(line) for f in self.blacklist): diff --git a/common/python/tools/log.py b/common/python/tools/log.py index f98f0942e..b950e671b 100644 --- a/common/python/tools/log.py +++ b/common/python/tools/log.py @@ -1,5 +1,6 @@ # Author: Izaak Neutelings (May 2020) from __future__ import print_function # for python3 compatibility +from past.builtins import long # for python2 compatibility tcol_dict = { 'black': 30, 'red': 31, 'green': 32, 'yellow': 33, 'orange': 33, 'blue': 34, diff --git a/common/python/tools/utils.py b/common/python/tools/utils.py index 1824666fd..1ebbf5d0c 100644 --- a/common/python/tools/utils.py +++ b/common/python/tools/utils.py @@ -16,18 +16,14 @@ def execute(command,dry=False,fatal=True,verb=0): if verb>=1: print(">>> Executing: %r"%(command)) try: - #process = Popen(command.split(),stdout=PIPE,stderr=STDOUT) #,shell=True) - process = Popen(command,stdout=PIPE,stderr=STDOUT,bufsize=1,shell=True) #,universal_newlines=True - for line in iter(process.stdout.readline,""): + process = Popen(command,stdout=PIPE,stderr=STDOUT,bufsize=0,shell=True) #,universal_newlines=True + for line in iter(process.stdout.readline,b''): # read line by line + line = str(line.decode('utf-8')) # decode/convert binary to str if verb>=1: # real time print out (does not work for python scripts without flush) print(line.rstrip()) out += line process.stdout.close() retcode = process.wait() - ##print(0, process.communicate()) - ##out = process.stdout.read() - ##err = process.stderr.read() - ##print(out) out = out.strip() except Exception as e: if verb<1: From f77603e723dad23566ced699c2354446ff9e97b1 Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Wed, 7 Jun 2023 10:26:45 +0200 Subject: [PATCH 36/55] change relative import to absolute: .TreeProducer -> TauFW.PicoProducer.analysis.TreeProducer --- PicoProducer/python/analysis/GenFilterMuTau.py | 2 +- PicoProducer/python/analysis/GenMatcher.py | 2 +- PicoProducer/python/analysis/TreeProducerDiJet.py | 2 +- PicoProducer/python/analysis/TreeProducerEMu.py | 2 +- PicoProducer/python/analysis/TreeProducerETau.py | 2 +- PicoProducer/python/analysis/TreeProducerHighPT.py | 2 +- PicoProducer/python/analysis/TreeProducerMuMu.py | 2 +- PicoProducer/python/analysis/TreeProducerMuNu.py | 2 +- PicoProducer/python/analysis/TreeProducerMuTau.py | 2 +- PicoProducer/python/analysis/TreeProducerTauNu.py | 2 +- PicoProducer/python/analysis/TreeProducerTauPair.py | 2 +- PicoProducer/python/analysis/TreeProducerTauTau.py | 2 +- PicoProducer/python/analysis/TreeProducerWJ.py | 2 +- 13 files changed, 13 insertions(+), 13 deletions(-) diff --git a/PicoProducer/python/analysis/GenFilterMuTau.py b/PicoProducer/python/analysis/GenFilterMuTau.py index 56769fde7..a21b0876e 100755 --- a/PicoProducer/python/analysis/GenFilterMuTau.py +++ b/PicoProducer/python/analysis/GenFilterMuTau.py @@ -10,7 +10,7 @@ import ROOT; ROOT.PyConfig.IgnoreCommandLineOptions = True import re from ROOT import TLorentzVector, TH1D, TH2D, gStyle, kRed -from .TreeProducer import TreeProducer +from TauFW.PicoProducer.analysis.TreeProducer import TreeProducer from PhysicsTools.NanoAODTools.postprocessing.framework.eventloop import Module from PhysicsTools.NanoAODTools.postprocessing.framework.datamodel import Collection, Event from TauFW.PicoProducer.analysis.utils import hasbit, filtermutau, statusflags_dict, dumpgenpart, getdecaychain, getmother, deltaPhi diff --git a/PicoProducer/python/analysis/GenMatcher.py b/PicoProducer/python/analysis/GenMatcher.py index 6afc2782f..88687228c 100755 --- a/PicoProducer/python/analysis/GenMatcher.py +++ b/PicoProducer/python/analysis/GenMatcher.py @@ -26,7 +26,7 @@ import ROOT; ROOT.PyConfig.IgnoreCommandLineOptions = True import re from ROOT import TH2D, gStyle, kRed -from .TreeProducer import TreeProducer +from TauFW.PicoProducer.analysis.TreeProducer import TreeProducer from PhysicsTools.NanoAODTools.postprocessing.framework.eventloop import Module from PhysicsTools.NanoAODTools.postprocessing.framework.datamodel import Collection, Event from TauFW.PicoProducer.analysis.utils import hasbit diff --git a/PicoProducer/python/analysis/TreeProducerDiJet.py b/PicoProducer/python/analysis/TreeProducerDiJet.py index 2706910a7..2f3477286 100644 --- a/PicoProducer/python/analysis/TreeProducerDiJet.py +++ b/PicoProducer/python/analysis/TreeProducerDiJet.py @@ -2,7 +2,7 @@ # Sources: # https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorking2016#Synchronisation # https://cms-nanoaod-integration.web.cern.ch/integration/master-102X/mc102X_doc.html -from .TreeProducerHighPT import TreeProducerHighPT +from TauFW.PicoProducer.analysis.TreeProducerHighPT import TreeProducerHighPT class TreeProducerDiJet(TreeProducerHighPT): diff --git a/PicoProducer/python/analysis/TreeProducerEMu.py b/PicoProducer/python/analysis/TreeProducerEMu.py index cc541ba33..975bbc772 100644 --- a/PicoProducer/python/analysis/TreeProducerEMu.py +++ b/PicoProducer/python/analysis/TreeProducerEMu.py @@ -2,7 +2,7 @@ # Sources: # https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorking2016#Synchronisation # https://cms-nanoaod-integration.web.cern.ch/integration/master-102X/mc102X_doc.html -from .TreeProducerTauPair import TreeProducerTauPair +from TauFW.PicoProducer.analysis.TreeProducerTauPair import TreeProducerTauPair class TreeProducerEMu(TreeProducerTauPair): diff --git a/PicoProducer/python/analysis/TreeProducerETau.py b/PicoProducer/python/analysis/TreeProducerETau.py index bde82c86f..1b51174d7 100644 --- a/PicoProducer/python/analysis/TreeProducerETau.py +++ b/PicoProducer/python/analysis/TreeProducerETau.py @@ -2,7 +2,7 @@ # Sources: # https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorking2016#Synchronisation # https://cms-nanoaod-integration.web.cern.ch/integration/master-102X/mc102X_doc.html -from .TreeProducerTauPair import TreeProducerTauPair +from TauFW.PicoProducer.analysis.TreeProducerTauPair import TreeProducerTauPair class TreeProducerETau(TreeProducerTauPair): diff --git a/PicoProducer/python/analysis/TreeProducerHighPT.py b/PicoProducer/python/analysis/TreeProducerHighPT.py index f01b875fb..4189d11f4 100644 --- a/PicoProducer/python/analysis/TreeProducerHighPT.py +++ b/PicoProducer/python/analysis/TreeProducerHighPT.py @@ -4,7 +4,7 @@ # https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorking2016#Synchronisation # https://cms-nanoaod-integration.web.cern.ch/integration/master-102X/mc102X_doc.html from ROOT import TH1D -from .TreeProducer import TreeProducer +from TauFW.PicoProducer.analysis.TreeProducer import TreeProducer class TreeProducerHighPT(TreeProducer): diff --git a/PicoProducer/python/analysis/TreeProducerMuMu.py b/PicoProducer/python/analysis/TreeProducerMuMu.py index 362c83102..d030097ce 100644 --- a/PicoProducer/python/analysis/TreeProducerMuMu.py +++ b/PicoProducer/python/analysis/TreeProducerMuMu.py @@ -2,7 +2,7 @@ # Sources: # https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorking2016#Synchronisation # https://cms-nanoaod-integration.web.cern.ch/integration/master-102X/mc102X_doc.html -from .TreeProducerTauPair import TreeProducerTauPair +from TauFW.PicoProducer.analysis.TreeProducerTauPair import TreeProducerTauPair class TreeProducerMuMu(TreeProducerTauPair): diff --git a/PicoProducer/python/analysis/TreeProducerMuNu.py b/PicoProducer/python/analysis/TreeProducerMuNu.py index 06efa9957..91c9e7334 100644 --- a/PicoProducer/python/analysis/TreeProducerMuNu.py +++ b/PicoProducer/python/analysis/TreeProducerMuNu.py @@ -2,7 +2,7 @@ # Sources: # https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorking2016#Synchronisation # https://cms-nanoaod-integration.web.cern.ch/integration/master-102X/mc102X_doc.html -from .TreeProducerHighPT import TreeProducerHighPT +from TauFW.PicoProducer.analysis.TreeProducerHighPT import TreeProducerHighPT class TreeProducerMuNu(TreeProducerHighPT): diff --git a/PicoProducer/python/analysis/TreeProducerMuTau.py b/PicoProducer/python/analysis/TreeProducerMuTau.py index 4ae1f6a43..721db5cf0 100644 --- a/PicoProducer/python/analysis/TreeProducerMuTau.py +++ b/PicoProducer/python/analysis/TreeProducerMuTau.py @@ -2,7 +2,7 @@ # Sources: # https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorking2016#Synchronisation # https://cms-nanoaod-integration.web.cern.ch/integration/master-102X/mc102X_doc.html -from .TreeProducerTauPair import TreeProducerTauPair +from TauFW.PicoProducer.analysis.TreeProducerTauPair import TreeProducerTauPair class TreeProducerMuTau(TreeProducerTauPair): diff --git a/PicoProducer/python/analysis/TreeProducerTauNu.py b/PicoProducer/python/analysis/TreeProducerTauNu.py index 4ddb79d4f..aa8054cc3 100644 --- a/PicoProducer/python/analysis/TreeProducerTauNu.py +++ b/PicoProducer/python/analysis/TreeProducerTauNu.py @@ -2,7 +2,7 @@ # Sources: # https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorking2016#Synchronisation # https://cms-nanoaod-integration.web.cern.ch/integration/master-102X/mc102X_doc.html -from .TreeProducerHighPT import TreeProducerHighPT +from TauFW.PicoProducer.analysis.TreeProducerHighPT import TreeProducerHighPT class TreeProducerTauNu(TreeProducerHighPT): diff --git a/PicoProducer/python/analysis/TreeProducerTauPair.py b/PicoProducer/python/analysis/TreeProducerTauPair.py index bbe50d443..ad3a3efb7 100644 --- a/PicoProducer/python/analysis/TreeProducerTauPair.py +++ b/PicoProducer/python/analysis/TreeProducerTauPair.py @@ -4,7 +4,7 @@ # https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorking2016#Synchronisation # https://cms-nanoaod-integration.web.cern.ch/integration/master-102X/mc102X_doc.html from ROOT import TH1D -from .TreeProducer import TreeProducer +from TauFW.PicoProducer.analysis.TreeProducer import TreeProducer class TreeProducerTauPair(TreeProducer): diff --git a/PicoProducer/python/analysis/TreeProducerTauTau.py b/PicoProducer/python/analysis/TreeProducerTauTau.py index 6df9b3bd8..673f7a59d 100644 --- a/PicoProducer/python/analysis/TreeProducerTauTau.py +++ b/PicoProducer/python/analysis/TreeProducerTauTau.py @@ -2,7 +2,7 @@ # Sources: # https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorking2016#Synchronisation # https://cms-nanoaod-integration.web.cern.ch/integration/master-102X/mc102X_doc.html -from .TreeProducerTauPair import TreeProducerTauPair +from TauFW.PicoProducer.analysis.TreeProducerTauPair import TreeProducerTauPair class TreeProducerTauTau(TreeProducerTauPair): diff --git a/PicoProducer/python/analysis/TreeProducerWJ.py b/PicoProducer/python/analysis/TreeProducerWJ.py index 6a42e547d..1f26d9c36 100644 --- a/PicoProducer/python/analysis/TreeProducerWJ.py +++ b/PicoProducer/python/analysis/TreeProducerWJ.py @@ -2,7 +2,7 @@ # Sources: # https://twiki.cern.ch/twiki/bin/viewauth/CMS/HiggsToTauTauWorking2016#Synchronisation # https://cms-nanoaod-integration.web.cern.ch/integration/master-102X/mc102X_doc.html -from .TreeProducerHighPT import TreeProducerHighPT +from TauFW.PicoProducer.analysis.TreeProducerHighPT import TreeProducerHighPT class TreeProducerWJ(TreeProducerHighPT): From 360fddae025dc827f3a3965c1bec946f440b9ab9 Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Wed, 7 Jun 2023 10:27:41 +0200 Subject: [PATCH 37/55] debug --- PicoProducer/python/processors/picojob.py | 4 ++-- PicoProducer/python/processors/skimjob.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/PicoProducer/python/processors/picojob.py b/PicoProducer/python/processors/picojob.py index 28aff1611..d1bb24857 100755 --- a/PicoProducer/python/processors/picojob.py +++ b/PicoProducer/python/processors/picojob.py @@ -15,7 +15,7 @@ parser.add_argument('-o', '--outdir', dest='outdir', type=str, default='.') parser.add_argument('-C', '--copydir', dest='copydir', type=str, default=None) parser.add_argument('-s', '--firstevt', dest='firstevt', type=int, default=0) -parser.add_argument('-m', '--maxevts', dest='maxevts', type=int, default=-1) +parser.add_argument('-m', '--maxevts', dest='maxevts', type=int, default=None) parser.add_argument('-t', '--tag', dest='tag', type=str, default="") parser.add_argument('-d', '--dtype', dest='dtype', choices=['data','mc','embed'], default=None) parser.add_argument('-y','-e','--era', dest='era', type=str, default='2018') @@ -47,7 +47,7 @@ copydir = args.copydir # directory to copy output to at end firstevt = args.firstevt # index of first event to run maxevts = args.maxevts # maximum number of events to run -nfiles = 1 if maxevts>0 else -1 # maximum number of files to run +nfiles = 1 if (maxevts!=None and maxevts>0) else -1 # maximum number of files to run tag = args.tag # postfix tag of job output file if tag: tag = ('' if tag.startswith('_') else '_') + tag diff --git a/PicoProducer/python/processors/skimjob.py b/PicoProducer/python/processors/skimjob.py index 0b061f048..c3048a101 100755 --- a/PicoProducer/python/processors/skimjob.py +++ b/PicoProducer/python/processors/skimjob.py @@ -39,7 +39,7 @@ copydir = args.copydir # directory to copy output to at end firstevt = args.firstevt # index of first event to run maxevts = args.maxevts # maximum number of events to run -nfiles = -1 if maxevts>0 else -1 # maximum number of files to run +nfiles = 1 if (maxevts!=None and maxevts>0) else -1 # maximum number of files to run tag = args.tag # postfix tag of job output file tag = ('' if not tag or tag.startswith('_') else '_') + tag postfix = tag From e0be74dc5f22d048a7207cfc10ec8681e45cae20 Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Wed, 7 Jun 2023 10:29:09 +0200 Subject: [PATCH 38/55] add comments; improve error handling --- .gitignore | 1 + PicoProducer/python/batch/utils.py | 2 +- PicoProducer/python/corrections/BTagTool.py | 2 +- PicoProducer/python/pico/job.py | 10 ++++++---- PicoProducer/python/storage/Sample.py | 2 +- PicoProducer/python/storage/utils.py | 16 ++++++++++++---- 6 files changed, 22 insertions(+), 11 deletions(-) diff --git a/.gitignore b/.gitignore index 1bc385e81..c4c99e14a 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ *.pyc +*.bak *.d *.so *.pcm diff --git a/PicoProducer/python/batch/utils.py b/PicoProducer/python/batch/utils.py index cd4e82252..7f7b1a920 100644 --- a/PicoProducer/python/batch/utils.py +++ b/PicoProducer/python/batch/utils.py @@ -20,7 +20,7 @@ def chunkify_by_evts(fnames,maxevts,evenly=True,evtdict=None,verb=0): and update input fnames to bookkeep first event and maximum events. E.g. ['nano_1.root','nano_2.root','nano_3.root','nano_4.root'] -> [ ['nano_1.root:0:1000'], ['nano_1.root:1000:1000'], # 'fname:firstevt:maxevts' - ['nano_2.root','nano_3.root','nano_4.root'] ] + ['nano_2.root','nano_3.root','nano_4.root'] ] # group files with SAMPLE > CONFIG if split_nfpj>1: # divide nfilesperjob by split_nfpj nfilesperjob_ = int(max(1,nfilesperjob_/float(split_nfpj))) - elif resubmit and maxevts<=0: # reuse previous maxevts settings if maxevts not set by user + elif resubmit and (maxevts==None or maxevts<=0): # reuse previous maxevts settings if maxevts not set by user maxevts_ = sample.jobcfg.get('maxevts',maxevts_) if nfilesperjob<=0: # reuse previous nfilesperjob settings if nfilesperjob not set by user nfilesperjob_ = sample.jobcfg.get('nfilesperjob',nfilesperjob_) @@ -397,6 +397,7 @@ def checkchunks(sample,**kwargs): print(">>> %-12s = %s"%('ndasevents',ndasevents)) if verbosity>=3: print(">>> %-12s = %s"%('chunkdict',chunkdict)) + print(">>> %-12s = %s"%('ncores',ncores)) # CHECK PENDING JOBS if checkqueue<0 or pendjobs: @@ -472,8 +473,9 @@ def checkchunks(sample,**kwargs): goodfiles = [ ] bar = None # loading bar if verbosity<=1 and len(outfiles)>=15: + status = "files, 0/%d events (0%%)"%(ndasevents) if ndasevents>0 else "files" bar = LoadingBar(len(outfiles),width=20,pre=">>> Checking output files: ", - message="files, 0/%d (0%%)"%(ndasevents),counter=True,remove=True) + message=status,counter=True,remove=True) elif verbosity>=2: print(">>> %-12s = %s"%('pendchunks',pendchunks)) print(">>> %-12s = %s"%('outfiles',outfiles)) @@ -539,7 +541,7 @@ def checkchunks(sample,**kwargs): #LOG.warning("Did not recognize output file '%s'!"%(fname)) continue if bar: - status = "files, %s/%s events (%d%%)"%(nprocevents,ndasevents,100.0*nprocevents/ndasevents) if ndasevents>0 else "" + status = "files, %s/%s events (%d%%)"%(nprocevents,ndasevents,100.0*nprocevents/ndasevents) if ndasevents>0 else "files" bar.count(status) # GET FILES for RESUBMISSION + sanity checks @@ -708,7 +710,7 @@ def printchunks(jobden,label,text,col,show=False): if checkevts: ratio = 100.0*nprocevents/ndasevents rcol = 'green' if ratio>90. else 'yellow' if ratio>80. else 'red' - rtext = ": "+color("%d/%d (%d%%)"%(nprocevents,ndasevents,ratio),rcol,bold=True) + rtext = ": "+color("%d/%d events (%d%%)"%(nprocevents,ndasevents,ratio),rcol,bold=True) else: rtext = ": expect %d events"%(ndasevents) printchunks(goodchunks,'SUCCESS', "Chunks with output in outdir"+rtext,'green') diff --git a/PicoProducer/python/storage/Sample.py b/PicoProducer/python/storage/Sample.py index 355d1cde9..44557cc33 100644 --- a/PicoProducer/python/storage/Sample.py +++ b/PicoProducer/python/storage/Sample.py @@ -89,7 +89,7 @@ def __init__(self,group,name,*paths,**kwargs): self.refreshable = not self.files # allow refresh of file list in getfiles() # CHECK PATH FORMAT - if not self.storepath and not self.files: + if not self.storepath: #and not self.files: for path in self.paths: if path.count('/')<3 or not path.startswith('/'): LOG.warn("DAS path %r has wrong format. Need /SAMPLE/CAMPAIGN/TIER."%(path)) diff --git a/PicoProducer/python/storage/utils.py b/PicoProducer/python/storage/utils.py index 681204b8b..9a766b447 100644 --- a/PicoProducer/python/storage/utils.py +++ b/PicoProducer/python/storage/utils.py @@ -130,16 +130,24 @@ def getnevents(fname,treename='Events',verb=0): def isvalid(fname,hname='cutflow',bin=1): """Check if a given file is valid, or corrupt.""" nevts = -1 - file = TFile.Open(fname,'READ') + try: + file = TFile.Open(fname,'READ') + except OSError as err: + print(err) + file = None if file and not file.IsZombie(): if file.GetListOfKeys().Contains('Events'): # NANOAOD nevts = file.Get('Events').GetEntries() if nevts<=0: LOG.warning("'Events' tree of file %r has nevts=%s<=0..."%(fname,nevts)) elif file.GetListOfKeys().Contains('tree') and file.GetListOfKeys().Contains(hname): # pico - nevts = file.Get(hname).GetBinContent(bin) - if nevts<=0: - LOG.warning("Cutflow of file %r has nevts=%s<=0..."%(fname,nevts)) + hist = file.Get(hname) + if hist: + nevts = hist.GetBinContent(bin) + if nevts<=0: + LOG.warning("Cutflow of file %r has nevts=%s<=0..."%(fname,nevts)) + else: # corrupted ? + LOG.warning("Could not open cutflow %s:%s..."%(fname,hname)) return nevts From a51833b0d9d03f747b83da634337f7985ffb35d5 Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Wed, 7 Jun 2023 18:10:48 +0200 Subject: [PATCH 39/55] add haddcmd option for haddnano.py --- PicoProducer/python/pico/job.py | 6 ++-- PicoProducer/python/storage/GridKA_NRG.py | 30 ++++++++++---------- PicoProducer/python/storage/StorageSystem.py | 6 ++-- PicoProducer/python/tools/config.py | 4 +-- PicoProducer/scripts/pico.py | 4 ++- 5 files changed, 25 insertions(+), 25 deletions(-) diff --git a/PicoProducer/python/pico/job.py b/PicoProducer/python/pico/job.py index d4ac9887b..502cac13d 100755 --- a/PicoProducer/python/pico/job.py +++ b/PicoProducer/python/pico/job.py @@ -867,6 +867,7 @@ def main_status(args): dasfilters = args.dasfilters # filter (only include) these das paths (glob patterns) vetoes = args.vetoes # exclude these sample (glob patterns) dasvetoes = args.dasvetoes # exclude these DAS paths (glob patterns) + haddcmd = args.haddcmd # alternative hadd command, e.g. haddnano.py force = args.force subcmd = args.subcommand cleanup = subcmd=='clean' or (subcmd=='hadd' and args.cleanup) or subcmd=='haddclean' @@ -941,7 +942,7 @@ def main_status(args): postfix = sample.jobcfg['postfix'] storedir = repkey(storedirformat,ERA=era,CHANNEL=channel_,TAG=tag,SAMPLE=sample.name, DAS=sample.paths[0].strip('/'),GROUP=sample.group) - storage = getstorage(storedir,ensure=True,verb=verbosity) + storage = getstorage(storedir,ensure=True,haddcmd=haddcmd,verb=verbosity) outfile = '%s_%s%s.root'%(sample.name,channel_,tag) infiles = os.path.join(outdir,'*%s_[0-9]*.root'%(postfix)) cfgfiles = os.path.join(cfgdir,'job*%s_try[0-9]*.*'%(postfix)) @@ -964,11 +965,8 @@ def main_status(args): continue if subcmd in ['hadd','haddclean']: - #haddcmd = 'hadd -f %s %s'%(outfile,infiles) - #haddout = execute(haddcmd,dry=dryrun,verb=max(1,verbosity)) haddout = storage.hadd(infiles,outfile,dry=dryrun,verb=cmdverb,maxopenfiles=maxopenfiles) # TODO: add option to print out cutflow for outfile - #os.system(haddcmd) # CLEAN UP # TODO: check if hadd was succesful with isvalid diff --git a/PicoProducer/python/storage/GridKA_NRG.py b/PicoProducer/python/storage/GridKA_NRG.py index 240f71be4..57a27d964 100644 --- a/PicoProducer/python/storage/GridKA_NRG.py +++ b/PicoProducer/python/storage/GridKA_NRG.py @@ -11,16 +11,16 @@ class GridKA_NRG(StorageSystem): def __init__(self, path, verb=0, ensure=False): """NRG Storage at GridKA""" super(GridKA_NRG, self).__init__(path, verb=verb, ensure=False) - self.lscmd = "xrdfs" - self.lsurl = "root://cmsxrootd-kit.gridka.de/ ls " - self.rmcmd = "xrdfs" - self.rmurl = "root://cmsxrootd-kit.gridka.de/ ls " - self.mkdrcmd = "xrdfs" - self.mkdrurl = 'root://cmsxrootd-kit.gridka.de/ mkdir -p ' - self.cpcmd = 'xrdcp -f' - self.cpurl = "root://cmsxrootd-kit.gridka.de/" - self.tmpdir = '/tmp/' - self.fileurl = "root://cmsxrootd-kit.gridka.de/" + self.lscmd = "xrdfs" + self.lsurl = "root://cmsxrootd-kit.gridka.de/ ls " + self.rmcmd = "xrdfs" + self.rmurl = "root://cmsxrootd-kit.gridka.de/ ls " + self.mkdrcmd = "xrdfs" + self.mkdrurl = 'root://cmsxrootd-kit.gridka.de/ mkdir -p ' + self.cpcmd = 'xrdcp -f' + self.cpurl = "root://cmsxrootd-kit.gridka.de/" + self.tmpdir = '/tmp/' + self.fileurl = "root://cmsxrootd-kit.gridka.de/" self.localmount = "/storage/gridka-nrg/" if ensure: self.ensuredir(self.path) @@ -46,11 +46,11 @@ def remove_local_temp_dir(self, tmpdir, verb): def hadd(self, sources, target, **kwargs): """ - Hadd files. For NRG we use the local mountpoint at - /storage/gridka-nrg for this, merge locally - and then move the file back to NRG. - os.path.relpath(old_path, '/abc/dfg/') - """ + Hadd files. For NRG we use the local mountpoint at + /storage/gridka-nrg for this, merge locally + and then move the file back to NRG. + os.path.relpath(old_path, '/abc/dfg/') + """ target = self.expandpath(target, here=True) verb = kwargs.get('verb', self.verbosity) tmpdir = kwargs.get('tmpdir', target.startswith(self.parent)) diff --git a/PicoProducer/python/storage/StorageSystem.py b/PicoProducer/python/storage/StorageSystem.py index aadb949d1..392ffc7c9 100644 --- a/PicoProducer/python/storage/StorageSystem.py +++ b/PicoProducer/python/storage/StorageSystem.py @@ -12,7 +12,7 @@ class StorageSystem(object): - def __init__(self,path,verb=0,ensure=False): + def __init__(self,path,ensure=False,verb=0,**kwargs): self.path = path.rstrip('/') self.lscmd = 'ls' self.lsurl = '' @@ -28,7 +28,7 @@ def __init__(self,path,verb=0,ensure=False): self.chmdprm = '777' self.chmdcmd = 'chmod' self.chmdurl = '' - self.haddcmd = 'hadd -f' + self.haddcmd = kwargs.get('haddcmd',None) or 'hadd -f' self.tmpdir = '/tmp/$USER/' # $TMPDIR # mounted temporary directory self.fileurl = "" self.verbosity = verb @@ -196,7 +196,7 @@ def hadd(self,sources,target,**kwargs): print(">>> %-10s = %r"%('htarget',htarget)) print(">>> %-10s = %r"%('maxopen',maxopen)) haddcmd = self.haddcmd - if maxopen>=1: + if maxopen>=1 and 'haddnano.py' not in haddcmd: haddcmd += " -n %s"%(maxopen) out = self.execute("%s %s %s"%(haddcmd,htarget,source),dry=dryrun,verb=verb) if tmpdir: # copy hadd target and remove temporary file diff --git a/PicoProducer/python/tools/config.py b/PicoProducer/python/tools/config.py index 2465081cd..b7409bb02 100644 --- a/PicoProducer/python/tools/config.py +++ b/PicoProducer/python/tools/config.py @@ -51,7 +51,7 @@ ('batch',_batchsystem), ('queue',_queue), ('nfilesperjob',_nfilesperjob), ('maxevtsperjob',_maxevtsperjob), ('filelistdir',_filelistdir), - ('maxopenfiles',_maxopenfiles), + ('maxopenfiles',_maxopenfiles), ('haddcmd', "" ), # for pico.py hadd ('ncores',_ncores), ]) sys.path.append(basedir) @@ -119,7 +119,7 @@ def getconfig(verb=0,refresh=False): CONFIG = Config(cfgdict,cfgname) return CONFIG - + def setdefaultconfig(verb=0): """Set configuration to default values.""" diff --git a/PicoProducer/scripts/pico.py b/PicoProducer/scripts/pico.py index a9aecd387..1355d9c18 100755 --- a/PicoProducer/scripts/pico.py +++ b/PicoProducer/scripts/pico.py @@ -106,7 +106,9 @@ def main_install(args): parser_job.add_argument('--tmpdir', dest='tmpdir', type=str, default=None, help="for skimming only: temporary output directory befor copying to outdir") parser_hdd_.add_argument('-m','--maxopenfiles',dest='maxopenfiles', type=int, default=CONFIG.maxopenfiles, - metavar='NFILES', help="maximum numbers to be opened during hadd, default=%(default)d") + metavar='NFILES', help="maximum numbers to be opened during hadd, default=%(default)d") + parser_hdd_.add_argument('--haddcmd', dest='haddcmd', default=CONFIG.haddcmd, + help="alternative hadd command, e.g. haddnano.py") # SUBCOMMANDS subparsers = parser.add_subparsers(title="sub-commands",dest='subcommand',help="sub-command help") From 510f23f6b89c01024ffe46490d6c162a9f046541 Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Wed, 7 Jun 2023 18:20:52 +0200 Subject: [PATCH 40/55] debug --- PicoProducer/python/pico/job.py | 7 +++++-- PicoProducer/python/storage/EOS.py | 4 ++-- PicoProducer/python/storage/GridKA_NRG.py | 4 ++-- PicoProducer/python/storage/StorageSystem.py | 2 +- PicoProducer/python/storage/T2_DESY.py | 4 ++-- PicoProducer/python/storage/T2_PSI.py | 4 ++-- PicoProducer/python/storage/T3_PSI.py | 4 ++-- PicoProducer/python/storage/utils.py | 19 ++++++++++--------- PicoProducer/scripts/pico.py | 4 ++-- 9 files changed, 28 insertions(+), 24 deletions(-) diff --git a/PicoProducer/python/pico/job.py b/PicoProducer/python/pico/job.py index 502cac13d..cdd323409 100755 --- a/PicoProducer/python/pico/job.py +++ b/PicoProducer/python/pico/job.py @@ -867,11 +867,14 @@ def main_status(args): dasfilters = args.dasfilters # filter (only include) these das paths (glob patterns) vetoes = args.vetoes # exclude these sample (glob patterns) dasvetoes = args.dasvetoes # exclude these DAS paths (glob patterns) - haddcmd = args.haddcmd # alternative hadd command, e.g. haddnano.py force = args.force subcmd = args.subcommand cleanup = subcmd=='clean' or (subcmd=='hadd' and args.cleanup) or subcmd=='haddclean' - maxopenfiles = args.maxopenfiles if subcmd=='hadd' else 0 # maximum number of files opened during hadd, via -n option + haddcmd = "" + maxopenfiles = 0 + if 'hadd' in subcmd: + haddcmd = args.haddcmd # alternative hadd command, e.g. haddnano.py + maxopenfiles = args.maxopenfiles # maximum number of files opened during hadd, via -n option dryrun = args.dryrun # run through routine without actually executing hadd, rm, ... ncores = args.ncores # number of cores; validate output files in parallel verbosity = args.verbosity diff --git a/PicoProducer/python/storage/EOS.py b/PicoProducer/python/storage/EOS.py index 9d3bfd117..78514a159 100644 --- a/PicoProducer/python/storage/EOS.py +++ b/PicoProducer/python/storage/EOS.py @@ -9,9 +9,9 @@ class EOS(StorageSystem): - def __init__(self,path,verb=0,ensure=False,eos=False): + def __init__(self,path,verb=0,ensure=False,eos=False,**kwargs): """EOS is mounted on lxplus, so no special override are necessary.""" - super(EOS,self).__init__(path,verb=verb,ensure=False) + super(EOS,self).__init__(path,verb=verb,ensure=False,**kwargs) if not self.mounted: # EOS is mounted on lxplus if eos: # use EOS command # https://cern.service-now.com/service-portal?id=kb_article&n=KB0001998 diff --git a/PicoProducer/python/storage/GridKA_NRG.py b/PicoProducer/python/storage/GridKA_NRG.py index 57a27d964..520fede3b 100644 --- a/PicoProducer/python/storage/GridKA_NRG.py +++ b/PicoProducer/python/storage/GridKA_NRG.py @@ -8,9 +8,9 @@ class GridKA_NRG(StorageSystem): - def __init__(self, path, verb=0, ensure=False): + def __init__(self,path,verb=0,ensure=False,**kwargs): """NRG Storage at GridKA""" - super(GridKA_NRG, self).__init__(path, verb=verb, ensure=False) + super(GridKA_NRG, self).__init__(path,verb=verb,ensure=False,**kwargs) self.lscmd = "xrdfs" self.lsurl = "root://cmsxrootd-kit.gridka.de/ ls " self.rmcmd = "xrdfs" diff --git a/PicoProducer/python/storage/StorageSystem.py b/PicoProducer/python/storage/StorageSystem.py index 392ffc7c9..83b063cc2 100644 --- a/PicoProducer/python/storage/StorageSystem.py +++ b/PicoProducer/python/storage/StorageSystem.py @@ -12,7 +12,7 @@ class StorageSystem(object): - def __init__(self,path,ensure=False,verb=0,**kwargs): + def __init__(self,path,verb=0,**kwargs): self.path = path.rstrip('/') self.lscmd = 'ls' self.lsurl = '' diff --git a/PicoProducer/python/storage/T2_DESY.py b/PicoProducer/python/storage/T2_DESY.py index 7b7bdb728..d607109db 100644 --- a/PicoProducer/python/storage/T2_DESY.py +++ b/PicoProducer/python/storage/T2_DESY.py @@ -9,9 +9,9 @@ class T2_DESY(StorageSystem): - def __init__(self,path,verb=0,ensure=False): + def __init__(self,path,verb=0,ensure=False,**kwargs): """dCache at DESY""" - super(T2_DESY,self).__init__(path,verb=verb,ensure=False) + super(T2_DESY,self).__init__(path,verb=verb,ensure=False,**kwargs) self.lscmd = "ls" self.lsurl = "" self.rmcmd = "srmrm" diff --git a/PicoProducer/python/storage/T2_PSI.py b/PicoProducer/python/storage/T2_PSI.py index b70e83c39..f1cda2471 100644 --- a/PicoProducer/python/storage/T2_PSI.py +++ b/PicoProducer/python/storage/T2_PSI.py @@ -6,9 +6,9 @@ class T2_PSI(StorageSystem): - def __init__(self,path,verb=0,ensure=False): + def __init__(self,path,verb=0,ensure=False,**kwargs): """T2 SE on PSI.""" - super(T2_PSI,self).__init__(path,verb=verb,ensure=False) + super(T2_PSI,self).__init__(path,verb=verb,ensure=False,**kwargs) self.lscmd = "uberftp -ls" self.lsurl = "gsiftp://storage01.lcg.cscs.ch/" #self.rmcmd = "uberftp -rm" diff --git a/PicoProducer/python/storage/T3_PSI.py b/PicoProducer/python/storage/T3_PSI.py index 70f8ba393..8452f067f 100644 --- a/PicoProducer/python/storage/T3_PSI.py +++ b/PicoProducer/python/storage/T3_PSI.py @@ -5,9 +5,9 @@ class T3_PSI(StorageSystem): - def __init__(self,path,verb=0,ensure=False): + def __init__(self,path,verb=0,ensure=False,**kwargs): """T3 SE on PSI.""" - super(T3_PSI,self).__init__(path,verb=verb,ensure=False) + super(T3_PSI,self).__init__(path,verb=verb,ensure=False,**kwargs) self.rmcmd = 'uberftp -rm' self.rmurl = 'gsiftp://t3se01.psi.ch/' self.mkdrcmd = "LD_LIBRARY_PATH='' PYTHONPATH='' gfal-mkdir -p" diff --git a/PicoProducer/python/storage/utils.py b/PicoProducer/python/storage/utils.py index 9a766b447..f36f664e3 100644 --- a/PicoProducer/python/storage/utils.py +++ b/PicoProducer/python/storage/utils.py @@ -40,30 +40,31 @@ def gettmpdirs(): return tmpskimdir, tmphadddir -def getstorage(path,verb=0,ensure=False): +def getstorage(path,**kwargs): """Guess the storage system based on the path.""" + verb = kwargs.get('verb',0) if path.startswith('/eos/'): from TauFW.PicoProducer.storage.EOS import EOS - storage = EOS(path,ensure=ensure,verb=verb) + storage = EOS(path,**kwargs) #elif path.startswith('/castor/'): - # storage = Castor(path,verb=verb) + # storage = Castor(path,**kwargs) elif path.startswith('/pnfs/psi.ch/'): from TauFW.PicoProducer.storage.T3_PSI import T3_PSI - storage = T3_PSI(path,ensure=ensure,verb=verb) + storage = T3_PSI(path,**kwargs) elif path.startswith('/pnfs/desy.de/'): from TauFW.PicoProducer.storage.T2_DESY import T2_DESY - storage = T2_DESY(path,ensure=ensure,verb=verb) + storage = T2_DESY(path,**kwargs) elif path.startswith("/store/user") and ("etp" in host and "ekp" in host): from TauFW.PicoProducer.storage.GridKA_NRG import GridKA_NRG - storage = GridKA_NRG(path,ensure=ensure,verb=verb) + storage = GridKA_NRG(path,**kwargs) elif path.startswith('/pnfs/lcg.cscs.ch/'): from TauFW.PicoProducer.storage.T2_PSI import T2_PSI - storage = T2_PSI(path,ensure=ensure,verb=verb) + storage = T2_PSI(path,**kwargs) #elif path.startswith('/pnfs/iihe/'): - # return T2_IIHE(path,verb=verb) + # return T2_IIHE(path,**kwargs) else: from TauFW.PicoProducer.storage.StorageSystem import Local - storage = Local(path,ensure=ensure,verb=verb) + storage = Local(path,**kwargs) if not os.path.exists(path): LOG.warning("Could not find storage directory %r. Make sure it exists and is mounted. "%(path)+\ "If it is a special system, you need to subclass StorageSystem, see " diff --git a/PicoProducer/scripts/pico.py b/PicoProducer/scripts/pico.py index 1355d9c18..a37a2975d 100755 --- a/PicoProducer/scripts/pico.py +++ b/PicoProducer/scripts/pico.py @@ -105,10 +105,10 @@ def main_install(args): metavar='NFILES', help="divide default number of files per job, default=%(const)d") parser_job.add_argument('--tmpdir', dest='tmpdir', type=str, default=None, help="for skimming only: temporary output directory befor copying to outdir") - parser_hdd_.add_argument('-m','--maxopenfiles',dest='maxopenfiles', type=int, default=CONFIG.maxopenfiles, - metavar='NFILES', help="maximum numbers to be opened during hadd, default=%(default)d") parser_hdd_.add_argument('--haddcmd', dest='haddcmd', default=CONFIG.haddcmd, help="alternative hadd command, e.g. haddnano.py") + parser_hdd_.add_argument('-m','--maxopenfiles',dest='maxopenfiles', type=int, default=CONFIG.maxopenfiles, + metavar='NFILES', help="maximum numbers to be opened during hadd, default=%(default)d") # SUBCOMMANDS subparsers = parser.add_subparsers(title="sub-commands",dest='subcommand',help="sub-command help") From bce39acd7e25d164685394f4323d072639a0d913 Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Thu, 8 Jun 2023 17:00:04 +0200 Subject: [PATCH 41/55] raw_input -> input; split HTCondor batch systems; guess functions --- PicoProducer/python/batch/BatchSystem.py | 3 +- PicoProducer/python/batch/HTCondor.py | 4 ++- PicoProducer/python/batch/HTCondor_DESY.py | 8 +++++ PicoProducer/python/batch/HTCondor_KIT.py | 8 +++++ PicoProducer/python/batch/HTCondor_NAF.py | 8 +++++ PicoProducer/python/batch/SLURM.py | 4 ++- PicoProducer/python/batch/utils.py | 18 ++++++++++ PicoProducer/python/pico/job.py | 24 ++++++------- PicoProducer/python/storage/StorageSystem.py | 4 +-- PicoProducer/python/storage/utils.py | 6 ++-- PicoProducer/python/tools/config.py | 13 +++---- PicoProducer/test/testBatch.py | 38 ++++++++++---------- 12 files changed, 91 insertions(+), 47 deletions(-) create mode 100644 PicoProducer/python/batch/HTCondor_DESY.py create mode 100644 PicoProducer/python/batch/HTCondor_KIT.py create mode 100644 PicoProducer/python/batch/HTCondor_NAF.py diff --git a/PicoProducer/python/batch/BatchSystem.py b/PicoProducer/python/batch/BatchSystem.py index e5d0930c9..e799bce15 100644 --- a/PicoProducer/python/batch/BatchSystem.py +++ b/PicoProducer/python/batch/BatchSystem.py @@ -16,6 +16,7 @@ def __init__(self,verb=1): self.verbosity = verb self.statusdict = { } self.system = self.__class__.__name__ + self.script = "python/batch/submit_%s.sub"%(self.system) def __str__(self): return self.system @@ -62,7 +63,7 @@ def parsejobs(self,rows,**kwargs): return jobs @abstractmethod - def submit(self,script,taskfile=None,**kwargs): + def submit(self,script=None,taskfile=None,**kwargs): """Submit a script with some optional parameters.""" raise NotImplementedError("BatchSystem.submit is an abstract method. Please implement in a subclass.") diff --git a/PicoProducer/python/batch/HTCondor.py b/PicoProducer/python/batch/HTCondor.py index 094130527..edf140720 100644 --- a/PicoProducer/python/batch/HTCondor.py +++ b/PicoProducer/python/batch/HTCondor.py @@ -14,7 +14,7 @@ def __init__(self,verb=False): self.statusdict = { 'q': ['1'], 'r': ['2','3'], 'f': ['4','5','6'], 'c': ['5'] } self.jobidrexp = re.compile("submitted to cluster (\d+).") - def submit(self,script,taskfile=None,**kwargs): + def submit(self,script=None,taskfile=None,**kwargs): """Submit a script with some optional parameters.""" #jobname = kwargs.get('name', 'job' ) #queue = kwargs.get('queue', 'microcentury' ) @@ -30,6 +30,8 @@ def submit(self,script,taskfile=None,**kwargs): failflags = ["no jobs queued"] jobids = [ ] subcmd = "condor_submit" + if script==None: + script = self.script if not isinstance(appcmds,list): appcmds = [appcmds] if name: diff --git a/PicoProducer/python/batch/HTCondor_DESY.py b/PicoProducer/python/batch/HTCondor_DESY.py new file mode 100644 index 000000000..4bd0127f6 --- /dev/null +++ b/PicoProducer/python/batch/HTCondor_DESY.py @@ -0,0 +1,8 @@ +# Author: Izaak Neutelings (June 2023) +from TauFW.PicoProducer.batch.HTCondor import HTCondor + +class HTCondor_DESY(HTCondor): + """Subclass to load different .sub script.""" + def __init__(self,verb=False): + super(HTCondor,self).__init__(verb=verb) + \ No newline at end of file diff --git a/PicoProducer/python/batch/HTCondor_KIT.py b/PicoProducer/python/batch/HTCondor_KIT.py new file mode 100644 index 000000000..464946f2b --- /dev/null +++ b/PicoProducer/python/batch/HTCondor_KIT.py @@ -0,0 +1,8 @@ +# Author: Izaak Neutelings (June 2023) +from TauFW.PicoProducer.batch.HTCondor import HTCondor + +class HTCondor_KIT(HTCondor): + """Subclass to load different .sub script.""" + def __init__(self,verb=False): + super(HTCondor,self).__init__(verb=verb) + \ No newline at end of file diff --git a/PicoProducer/python/batch/HTCondor_NAF.py b/PicoProducer/python/batch/HTCondor_NAF.py new file mode 100644 index 000000000..2b7e252c6 --- /dev/null +++ b/PicoProducer/python/batch/HTCondor_NAF.py @@ -0,0 +1,8 @@ +# Author: Izaak Neutelings (June 2023) +from TauFW.PicoProducer.batch.HTCondor import HTCondor + +class HTCondor_NAF(HTCondor): + """Subclass to load different .sub script.""" + def __init__(self,verb=False): + super(HTCondor,self).__init__(verb=verb) + \ No newline at end of file diff --git a/PicoProducer/python/batch/SLURM.py b/PicoProducer/python/batch/SLURM.py index fa8d73bff..48403e134 100644 --- a/PicoProducer/python/batch/SLURM.py +++ b/PicoProducer/python/batch/SLURM.py @@ -14,7 +14,7 @@ def __init__(self,verb=False): self.jobidrexp = re.compile("Submitted batch job (\d+)") self.user = getpass.getuser() - def submit(self,script,taskfile=None,**kwargs): + def submit(self,script=None,taskfile=None,**kwargs): """Submit a script with some optional parameters.""" name = kwargs.get('name', None ) array = kwargs.get('array', None ) @@ -30,6 +30,8 @@ def submit(self,script,taskfile=None,**kwargs): failflags = ["error"] jobids = [ ] subcmd = "sbatch" + if script==None: + script = self.script if name: subcmd += " -J %s"%(name) if array: diff --git a/PicoProducer/python/batch/utils.py b/PicoProducer/python/batch/utils.py index 7f7b1a920..8d46429c7 100644 --- a/PicoProducer/python/batch/utils.py +++ b/PicoProducer/python/batch/utils.py @@ -2,6 +2,7 @@ from past.builtins import basestring # for python2 compatibility import os, re, glob import importlib +import platform from TauFW.common.tools.file import ensureTFile import TauFW.PicoProducer.tools.config as GLOB from TauFW.PicoProducer.batch import moddir @@ -15,6 +16,23 @@ evtsplitexp = re.compile(r"(.+\.root):(\d+):(\d+)$") # input file split by events +def guess_batch(): + """Guess the batch system for a host.""" + host = platform.node() + batch = "HTCondor" + if 'lxplus' in host: + batch = "HTCondor" + elif "etp" in host: + batch = "HTCondor_KIT" + ###elif "etp" in host: + ### batch = "HTCondor_DESY" + ###elif "etp" in host: + ### batch = "HTCondor_NAF" + elif "t3" in host and "psi.ch" in host: + batch = "SLURM" + return sedir + + def chunkify_by_evts(fnames,maxevts,evenly=True,evtdict=None,verb=0): """Split list of files into chunks with total events per chunks less than given maximum, and update input fnames to bookkeep first event and maximum events. diff --git a/PicoProducer/python/pico/job.py b/PicoProducer/python/pico/job.py index cdd323409..64056d42a 100755 --- a/PicoProducer/python/pico/job.py +++ b/PicoProducer/python/pico/job.py @@ -11,7 +11,8 @@ from TauFW.PicoProducer.storage.utils import getstorage, getsamples, isvalid, itervalid, print_no_samples from TauFW.PicoProducer.pico.run import getmodule from TauFW.PicoProducer.pico.common import * - +if sys.version_info.major<3: # for compatiblity with python2 + input = raw_input #################### @@ -169,7 +170,7 @@ def preparejobs(args): elif args.prompt: LOG.warning("Job configuration %r already exists and might cause conflicting job output!"%(cfgname)) while True: - submit = raw_input(">>> Submit anyway? [y/n] "%(nchunks)) + submit = input(">>> Submit anyway? [y/n] "%(nchunks)) if 'f' in submit.lower(): # submit this job, and stop asking print(">>> Force all.") force = True; skip = True; break @@ -332,7 +333,7 @@ def preparejobs(args): # YIELD yield jobcfg - print + print() if not found: print_no_samples(dtypes,filters,vetoes,[channel],jobdir_,jobcfgs) @@ -791,17 +792,12 @@ def main_submit(args): if nchunks<=0: print(">>> Nothing to %ssubmit!"%('re' if resubmit else '')) continue + script = batch.script if batch.system=='HTCondor': - # use specific settings for KIT condor - if 'etp' in GLOB.host: - script = "python/batch/submit_HTCondor_KIT.sub" - else: - script = "python/batch/submit_HTCondor.sub" appcmds = ["initialdir=%s"%(jobdir), "mylogfile='log/%s.$(ClusterId).$(ProcId).log'"%(jobname)] jkwargs.update({ 'app': appcmds }) elif batch.system=='SLURM': - script = "python/batch/submit_SLURM.sh" logfile = os.path.join(logdir,"%x.%A.%a.log") # $JOBNAME.o$JOBID.$TASKID.log jkwargs.update({ 'log': logfile, 'array': nchunks }) #elif batch.system=='SGE': @@ -812,7 +808,7 @@ def main_submit(args): # SUBMIT if args.prompt: # ask user confirmation before submitting while True: - submit = raw_input(">>> Do you want to submit %d jobs to the batch system? [y/n] "%(nchunks)) + submit = input(">>> Do you want to submit %d jobs to the batch system? [y/n] "%(nchunks)) if any(s in submit.lower() for s in ['q','exit']): # quit this script print(">>> Quitting...") exit(0) @@ -821,7 +817,7 @@ def main_submit(args): submit = 'y' args.prompt = False # stop asking for next samples if 'y' in submit.lower(): # submit this job - jobid = batch.submit(script,joblist,**jkwargs) + jobid = batch.submit(script,taskfile=joblist,**jkwargs) break elif 'n' in submit.lower(): # do not submit this job print(">>> Not submitting.") @@ -829,7 +825,7 @@ def main_submit(args): else: print(">>> '%s' is not a valid answer, please choose y/n."%submit) else: - jobid = batch.submit(script,joblist,**jkwargs) + jobid = batch.submit(script,taskfile=joblist,**jkwargs) # WRITE JOBCONFIG if jobid!=None: @@ -912,7 +908,7 @@ def main_status(args): print(">>> Found samples: "+", ".join(repr(s.name) for s in samples)) if subcmd in ['hadd','haddclean'] and 'skim' in channel.lower(): LOG.warning("Hadding into one file not available for skimming...") - print + print() continue # SAMPLE over SAMPLES @@ -1012,7 +1008,7 @@ def main_status(args): checkchunks(sample,channel=channel_,tag=tag,jobs=jobs,showlogs=showlogs,checkqueue=checkqueue, checkevts=checkevts,das=checkdas,ncores=ncores,verb=verbosity) - print + print() if not found: print_no_samples(dtypes,filters,vetoes,[channel],jobdir_,jobcfgs) diff --git a/PicoProducer/python/storage/StorageSystem.py b/PicoProducer/python/storage/StorageSystem.py index 83b063cc2..1c4373517 100644 --- a/PicoProducer/python/storage/StorageSystem.py +++ b/PicoProducer/python/storage/StorageSystem.py @@ -224,8 +224,8 @@ def chmod(self,file,perm=None,**kwargs): class Local(StorageSystem): - def __init__(self,path,verb=0,ensure=False): - super(Local,self).__init__(path,verb=verb,ensure=ensure) + def __init__(self,path,verb=0,ensure=False,**kwargs): + super(Local,self).__init__(path,verb=verb,ensure=ensure,**kwargs) if ensure: self.ensuredir(self.path,verb=verb) diff --git a/PicoProducer/python/storage/utils.py b/PicoProducer/python/storage/utils.py index f36f664e3..995b6268a 100644 --- a/PicoProducer/python/storage/utils.py +++ b/PicoProducer/python/storage/utils.py @@ -13,7 +13,7 @@ host = platform.node() -def getsedir(): +def guess_sedir(): """Guess the storage element path for a given user and host.""" user = getpass.getuser() sedir = "" @@ -26,7 +26,7 @@ def getsedir(): return sedir -def gettmpdirs(): +def guess_tmpdirs(): """Guess the temporary directory for a given user and host.""" user = getpass.getuser() tmphadddir = "/tmp/%s/"%(user) # temporary dir for creating intermediate hadd files @@ -252,5 +252,5 @@ def print_no_samples(dtype=[],filter=[],veto=[],channel=[],jobdir="",jobcfgs="") strings.append("channel%s %s"%('s' if len(channel)>1 else "",quotestrs(channel))) string += " with "+', '.join(strings) print(string) - print + print() diff --git a/PicoProducer/python/tools/config.py b/PicoProducer/python/tools/config.py index b7409bb02..cca9367f6 100644 --- a/PicoProducer/python/tools/config.py +++ b/PicoProducer/python/tools/config.py @@ -9,7 +9,8 @@ from TauFW.PicoProducer import basedir from TauFW.common.tools.file import ensuredir, ensurefile from TauFW.common.tools.log import Logger, color, bold, header -from TauFW.PicoProducer.storage.utils import getsedir, gettmpdirs +from TauFW.PicoProducer.storage.utils import guess_sedir, guess_tmpdirs +from TauFW.PicoProducer.batch.utils import guess_batch # DEFAULTS @@ -29,15 +30,15 @@ ('test','test.py'), ('mutau','ModuleMuTauSimple') ]) -_sedir = getsedir() # guess storage element on current host -_tmpskimdir, _tmphadddir = gettmpdirs() # _tmphadddir: temporary dir for creating intermediate hadd files +_sedir = guess_sedir() # guess storage element on current host +_tmpskimdir, _tmphadddir = guess_tmpdirs() # _tmphadddir: temporary dir for creating intermediate hadd files # _tmpskimdir: temporary dir for creating skimmed file before copying to outdir _jobdir = "output/$ERA/$CHANNEL/$SAMPLE" # for job config and log files _outdir = _tmphadddir+_jobdir # for job output _picodir = _sedir+"analysis/$ERA/$GROUP" # for storage of analysis ("pico") tuples after hadd _nanodir = _sedir+"samples/nano/$ERA/$DAS" # for storage of (skimmed) nanoAOD _filelistdir = "samples/files/$ERA/$SAMPLE.txt" # location to save list of files -_batchsystem = 'HTCondor' # batch system (HTCondor, SLURM, ...) +_batchsystem = guess_batch() # batch system (HTCondor, SLURM, ...) _queue = "" # batch queue / job flavor _nfilesperjob = 1 # group files per job _maxevtsperjob = -1 # maximum number of events per job (split large files) @@ -68,8 +69,8 @@ def getconfig(verb=0,refresh=False): cfgname = os.path.join(cfgdir,"config.json") bkpname = os.path.join(cfgdir,"config.json.bkp") # back up to recover config if reset cfgdict = _cfgdefaults.copy() - rqdstrs = [k for k,v in _cfgdefaults.items() if isinstance(v,basestring)] - rqddicts = [k for k,v in _cfgdefaults.items() if isinstance(v,dict)] + rqdstrs = [k for k,v in _cfgdefaults.items() if isinstance(v,basestring)] # required string type + rqddicts = [k for k,v in _cfgdefaults.items() if isinstance(v,dict)] # required dictionary type # GET CONFIG if os.path.isfile(cfgname): diff --git a/PicoProducer/test/testBatch.py b/PicoProducer/test/testBatch.py index 52f1ba77d..4490292a2 100755 --- a/PicoProducer/test/testBatch.py +++ b/PicoProducer/test/testBatch.py @@ -10,7 +10,7 @@ def createtasks(fname,ntasks=2,pause=10): with open(fname,'w') as file: - for i in xrange(ntasks): + for i in range(ntasks): file.write("echo 'This is task number %d with environment:'; sleep %d; env\n"%(i,pause)) return fname @@ -35,18 +35,18 @@ def testBatch(path,verb=0): LOG.header("__init__") #batch = ensuremodule(system,"PicoProducer.batch."+batch) batch = getbatch(args.batch,verb=verbosity+1) - print ">>> %r"%(batch) - print ">>> %-10s = %s"%('jobname',jobname) - print ">>> %-10s = %s"%('ntasks',ntasks) - print ">>> %-10s = %s"%('nchecks',nchecks) - print ">>> %-10s = %s"%('outdir',outdir) - print ">>> %-10s = %s"%('logdir',logdir) - print ">>> %-10s = %s"%('dryrun',dryrun) - print ">>> %-10s = %s"%('queue',queue) - print ">>> %-10s = %s"%('time',time) - print ">>> %-10s = %s"%('batchopts',batchopts) - print ">>> %-10s = %s"%('verbosity',verbosity) - print ">>> " + print(">>> %r"%(batch)) + print(">>> %-10s = %s"%('jobname',jobname)) + print(">>> %-10s = %s"%('ntasks',ntasks)) + print(">>> %-10s = %s"%('nchecks',nchecks)) + print(">>> %-10s = %s"%('outdir',outdir)) + print(">>> %-10s = %s"%('logdir',logdir)) + print(">>> %-10s = %s"%('dryrun',dryrun)) + print(">>> %-10s = %s"%('queue',queue)) + print(">>> %-10s = %s"%('time',time)) + print(">>> %-10s = %s"%('batchopts',batchopts)) + print(">>> %-10s = %s"%('verbosity',verbosity)) + print(">>> ") # PREPARE JOBS createtasks(tasklist,ntasks) @@ -75,17 +75,17 @@ def testBatch(path,verb=0): else: LOG.throw(NotImplementedError,"Submission for batch system '%s' has not been implemented (yet)..."%(batch.system)) jobid = batch.submit(script,tasklist,**jkwargs) - print ">>> jobid: %s"%(jobid) + print(">>> jobid: %s"%(jobid)) # CHECK JOBS LOG.header("Check jobs") - for i in xrange(nchecks): + for i in range(nchecks): jobs = batch.jobs(jobid,verb=verbosity-1) # get refreshed job list #jobs = batch.jobs(verb=verbosity-1) # get refreshed job list - print ">>> job objects: %r"%(jobs) - print ">>> " + print(">>> job objects: %r"%(jobs)) + print(">>> ") #for job in jobs: - # print ">>> Found job %r, status=%r, args=%r"%(job,job.getstatus(),job.args.rstrip()) + # print(">>> Found job %r, status=%r, args=%r"%(job,job.getstatus(),job.args.rstrip())) if i>> Done." + print("\n>>> Done.") From e97544463cd665d2df9a2c27ff2b04a814df8b3a Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Thu, 8 Jun 2023 17:17:01 +0200 Subject: [PATCH 42/55] make python3 compatible (test/, missed 'print' statements without arguments) --- Fitter/HighPT/DatacardsWToMuNu.py | 8 +- Fitter/HighPT/DatacardsWToTauNu.py | 22 ++--- Fitter/HighPT/FakeFactorHighPt.py | 18 ++-- Fitter/HighPT/MetTrigger.py | 6 +- Fitter/HighPT/PlotWToMuNu.py | 2 +- Fitter/HighPT/PlotWToTauNu.py | 2 +- Fitter/paper/writecards.py | 2 +- Fitter/python/HighPT/utilsHighPT.py | 6 +- PicoProducer/data/btag/getBTagEfficiencies.py | 8 +- PicoProducer/data/pileup/getPileupProfiles.py | 6 +- PicoProducer/data/pileup/pileupCalc.py | 2 +- .../python/analysis/GenFilterMuTau.py | 2 +- PicoProducer/python/pico/run.py | 2 +- PicoProducer/python/storage/das.py | 2 +- PicoProducer/test/testEventBased.py | 28 +++--- PicoProducer/test/testSFs.py | 92 +++++++++---------- PicoProducer/test/testSample.py | 58 ++++++------ PicoProducer/test/testStorage.py | 64 ++++++------- PicoProducer/utils/compareNano.py | 4 +- PicoProducer/utils/comparePico.py | 2 +- PicoProducer/utils/getDASParents.py | 2 +- PicoProducer/utils/pickEvents.py | 4 +- Plotter/python/plot/Selection.py | 6 +- Plotter/python/sample/Sample.py | 21 +---- Plotter/test/WstarplotPico.py | 4 +- Plotter/test/plotPico.py | 4 +- Plotter/test/pseudoSamples.py | 6 +- Plotter/test/testColors.py | 4 +- Plotter/test/testLatex.py | 6 +- Plotter/test/testLegend.py | 2 +- Plotter/test/testMacros.py | 68 +++++++------- Plotter/test/testMethods.py | 4 +- Plotter/test/testMultiDraw.py | 36 ++++---- Plotter/test/testMultiThread.py | 92 +++++++++---------- Plotter/test/testPanels.py | 28 +++--- Plotter/test/testPlot.py | 6 +- Plotter/test/testSampleSet.py | 46 +++++----- Plotter/test/testSamples.py | 12 +-- Plotter/test/testSelection.py | 18 ++-- Plotter/test/testStack.py | 8 +- Plotter/test/testStitching.py | 10 +- Plotter/test/testStyle.py | 18 ++-- Plotter/test/testUnroll.py | 22 ++--- Plotter/test/testUnwrapping.py | 12 +-- Plotter/test/testVariables.py | 22 ++--- common/test/testLoadingBar.py | 4 +- common/test/testTable.py | 14 +-- 47 files changed, 399 insertions(+), 416 deletions(-) diff --git a/Fitter/HighPT/DatacardsWToMuNu.py b/Fitter/HighPT/DatacardsWToMuNu.py index 10ee8ede3..e00f3ff06 100755 --- a/Fitter/HighPT/DatacardsWToMuNu.py +++ b/Fitter/HighPT/DatacardsWToMuNu.py @@ -123,7 +123,7 @@ def PlotWToMuNu(h_data_input,h_bkg_input,h_sig_input,era,var): canvas.cd() canvas.SetSelected(canvas) canvas.Update() - print + print() print('Creating control plot') canvas.Print(utils.figuresFolderWMuNu+"/wmunu_"+era+".png") @@ -176,7 +176,7 @@ def CreateCardsWToMuNu(fileName,h_data,h_bkg,h_sig,uncs): basefolder = utils.picoFolder var = args.variable - print + print() print('initializing SingleMuon samples >>>') singlemuSamples = {} # data samples dictionary singlemuNames = utils.singlemu[args.era] @@ -184,14 +184,14 @@ def CreateCardsWToMuNu(fileName,h_data,h_bkg,h_sig,uncs): singlemuSamples[singlemuName] = utils.sampleHighPt(basefolder,args.era, "munu",singlemuName,True) - print + print() print('initializing background samples >>>') bkgSamples = {} # MC bkg samples dictionary for bkgSampleName in bkgSampleNames: bkgSamples[bkgSampleName] = utils.sampleHighPt(basefolder,args.era, "munu",bkgSampleName,False) - print + print() print('initializing signal samples >>>') sigSamples = {} # MC signal samples dictionary for sigSampleName in sigSampleNames: diff --git a/Fitter/HighPT/DatacardsWToTauNu.py b/Fitter/HighPT/DatacardsWToTauNu.py index 3ae40c36a..2e7e28fd6 100755 --- a/Fitter/HighPT/DatacardsWToTauNu.py +++ b/Fitter/HighPT/DatacardsWToTauNu.py @@ -25,7 +25,7 @@ def FitConst(x,par): def ComputeFake(h_wjets,h_dijets,h_fraction,name): nbins = h_wjets.GetNbinsX() hist = h_wjets.Clone(name) - print + print() print('Computing fake histogram ->',name) for i in range(1,nbins+1): x_wjets = h_wjets.GetBinContent(i) @@ -53,7 +53,7 @@ def ComputeFake(h_wjets,h_dijets,h_fraction,name): ################################## def ComputeEWKFraction(h_data,h_mc): - print + print() print('Computing EWK fraction') nbins = h_data.GetNbinsX() h_fraction = h_data.Clone('fraction') @@ -85,7 +85,7 @@ def ComputeEWKFraction(h_data,h_mc): ############################ def PlotClosure(h_data,h_models,wp,era,var): - print + print() print("Plotting closure") h_model = h_models["bkg_fake_mc_wjets"] @@ -305,7 +305,7 @@ def PlotWToTauNu(h_data_input,h_fake_input,h_bkg_input,h_sig_input,wp,era,var): canvas.cd() canvas.SetSelected(canvas) canvas.Update() - print + print() print('Creating control plot') canvas.Print(utils.figuresFolderWTauNu+"/wtaunu_"+wp+"_"+era+".png") @@ -382,7 +382,7 @@ def CreateCardsWToTauNu(fileName,h_data,h_fake,h_bkg,h_sig,uncs_fake,uncs_sig): uncert_names = ["JES","Unclustered","taues_1pr","taues_1pr1pi0","taues_3pr","taues_3pr1pi0"] # uncert_names = ["JES","Unclustered","taues"] - print + print() print('initializing data samples >>>') metSamples = {} # data samples dictionary metNames = utils.met[args.era] @@ -391,7 +391,7 @@ def CreateCardsWToTauNu(fileName,h_data,h_fake,h_bkg,h_sig,uncs_fake,uncs_sig): "taunu",metName,True) metSamples[metName].SetTauNuConfig(fakeFactor,args.wp,wtaunuCuts) - print + print() print('initializing background samples >>>') bkgSamples = {} # MC bkg samples dictionary for bkgSampleName in bkgSampleNames: @@ -399,7 +399,7 @@ def CreateCardsWToTauNu(fileName,h_data,h_fake,h_bkg,h_sig,uncs_fake,uncs_sig): "taunu",bkgSampleName,False) bkgSamples[bkgSampleName].SetTauNuConfig(fakeFactor,args.wp,wtaunuCuts) - print + print() print('initializing signal samples >>>') sigSamples = {} # MC signal samples dictionary for sigSampleName in sigSampleNames: @@ -411,7 +411,7 @@ def CreateCardsWToTauNu(fileName,h_data,h_fake,h_bkg,h_sig,uncs_fake,uncs_sig): hists_sig_notFake = utils.RunSamplesTauNu(sigSamples,var,"",xbins,"_notFake","sig") # running on signal samples (central template and unceertainties) - print + print() print('Running on signal samples >>>') hists_sig_shape = {} commonCut = "metfilter>0.5&&mettrigger>0.5&&extraelec_veto<0.5&&extramuon_veto<0.5&&extratau_veto<0.5&&njets==0&&idDeepTau2017v2p1VSmu_1>=1&&idDeepTau2017v2p1VSe_1>=4&&genmatch_1==5&&idDeepTau2017v2p1VSjet_1>=" + utils.tauWPs[args.wp] @@ -450,12 +450,12 @@ def CreateCardsWToTauNu(fileName,h_data,h_fake,h_bkg,h_sig,uncs_fake,uncs_sig): # running selection on data - print + print() print('Running on data samples >>>') hists_data = utils.RunSamplesTauNu(metSamples,var,"",xbins,"","data") # running selection on bkgd samples - print + print() print('Running on background samples >>>') hists_bkg_fake = utils.RunSamplesTauNu(bkgSamples,var,"",xbins,"_fake","bkg") hists_bkg_notFake = utils.RunSamplesTauNu(bkgSamples,var,"",xbins,"_notFake","bkg") @@ -524,7 +524,7 @@ def CreateCardsWToTauNu(fileName,h_data,h_fake,h_bkg,h_sig,uncs_fake,uncs_sig): # saving histograms to datacard file datacards outputFileName = utils.datacardsFolder + "/taunu_" + args.wp + "_" + args.era - print + print() print("Saving histograms to RooT file",outputFileName+".root") fileOutput = ROOT.TFile(outputFileName+".root","recreate") fileOutput.mkdir("taunu") diff --git a/Fitter/HighPT/FakeFactorHighPt.py b/Fitter/HighPT/FakeFactorHighPt.py index 819a478e0..42e184df9 100755 --- a/Fitter/HighPT/FakeFactorHighPt.py +++ b/Fitter/HighPT/FakeFactorHighPt.py @@ -50,7 +50,7 @@ def FitMass(x,par): ########################### def DrawFF(hist,era,channel,label,WP,**kwargs): - print + print() print('fitting FF histo >>>',era,channel,label) isdata = kwargs.get('isdata',True) @@ -139,16 +139,16 @@ def main(outputfile,dataSamples,mcSamples,sigSamples,**kwargs): channel = kwargs.get("channel","wjets") mode = kwargs.get("mode",0) - print + print() print("+++++++++++++++++++++++++++++++++++++++++++") - print + print() if mode==0: print('Computing FF as a function of tau pT in bins of pt_tau/pt_jet',era,wp,channel) elif mode==1: print('Computing FF as a function of tau mass in bins of DM',era,wp,channel) else: print('Computing FF as a function of tau pT in bins of (pt_tau/pt_jet,DM)',era,wp,channel) - print + print() cutTauDen = "idDeepTau2017v2p1VSjet_2<4" cutTauNum = "idDeepTau2017v2p1VSjet_2>=" + utils.tauWPs[wp] @@ -179,10 +179,10 @@ def main(outputfile,dataSamples,mcSamples,sigSamples,**kwargs): histssig = {} for label in binCuts: xbins = [] - print + print() print('***************************') print('Running over',label) - print + print() if mode==0: xbins=utils.xbinsPt elif mode==1: xbins=utils.xbinsMass[label] @@ -256,7 +256,7 @@ def main(outputfile,dataSamples,mcSamples,sigSamples,**kwargs): singlemuSamples[singlemuName] = utils.sampleHighPt(basefolder,args.era, "wjets",singlemuName,True) - print + print() print('initializing JetHT samples >>>') jethtSamples = {} # data samples disctionary jethtNames = utils.jetht[args.era] @@ -264,7 +264,7 @@ def main(outputfile,dataSamples,mcSamples,sigSamples,**kwargs): jethtSamples[jethtName] = utils.sampleHighPt(basefolder,args.era, "dijets",jethtName,True) - print + print() print('initializing MC samples >>>') mcSamples = {} # mc samples dictionary for mcSampleName in mcSampleNames: @@ -274,7 +274,7 @@ def main(outputfile,dataSamples,mcSamples,sigSamples,**kwargs): else: mcSamples[mcSampleName] = utils.sampleHighPt(basefolder,args.era,"wjets",mcSampleName, False) - print + print() print('initializing W+Jets samples >>>') sigSamples = {} # wjets samples dictionary for sigSampleName in sigSampleNames: diff --git a/Fitter/HighPT/MetTrigger.py b/Fitter/HighPT/MetTrigger.py index c121fe6d9..94c354e62 100755 --- a/Fitter/HighPT/MetTrigger.py +++ b/Fitter/HighPT/MetTrigger.py @@ -47,7 +47,7 @@ def DrawEfficiency(histdata,histmc,era,legend): def main(args): era = args.era - print + print() channel = "munu" basefolder = utils.picoFolder @@ -71,7 +71,7 @@ def main(args): dataSamples[dataSampleName] = utils.sampleHighPt(basefolder,era,channel, dataSampleName,True) - print + print() mcSamples = {} # mc samples dictionary print('Initializing W+Jets MC samples>>>') for mcSampleName in mcSampleNames: @@ -81,7 +81,7 @@ def main(args): else: mcSamples[mcSampleName] = utils.sampleHighPt(basefolder,era,channel, mcSampleName,False) - print + print() histsdata = {} # data histo dictionary histsmc = {} # mc histo dictionary diff --git a/Fitter/HighPT/PlotWToMuNu.py b/Fitter/HighPT/PlotWToMuNu.py index 212746540..1311b0e4f 100755 --- a/Fitter/HighPT/PlotWToMuNu.py +++ b/Fitter/HighPT/PlotWToMuNu.py @@ -98,7 +98,7 @@ def Plot(h_data_input,h_tot_input,h_bkg_input,h_sig_input,era,var,postFit): canvas.cd() canvas.SetSelected(canvas) canvas.Update() - print + print() print('Creating control plot') if postFit: canvas.Print(utils.figuresFolderWMuNu+"/wmunu_"+era+"_postFit.png") diff --git a/Fitter/HighPT/PlotWToTauNu.py b/Fitter/HighPT/PlotWToTauNu.py index 68724c631..e79f6419f 100755 --- a/Fitter/HighPT/PlotWToTauNu.py +++ b/Fitter/HighPT/PlotWToTauNu.py @@ -112,7 +112,7 @@ def Plot(h_data_input,h_tot_input,h_fake_input,h_bkg_input,h_sig_input,wp,era,va canvas.cd() canvas.SetSelected(canvas) canvas.Update() - print + print() print('Creating control plot') if postFit: canvas.Print(utils.figuresFolderWTauNu+"/wtaunu_"+wp+"_"+era+"_postFit.png") diff --git a/Fitter/paper/writecards.py b/Fitter/paper/writecards.py index c771349ee..60be4ab58 100755 --- a/Fitter/paper/writecards.py +++ b/Fitter/paper/writecards.py @@ -240,7 +240,7 @@ def main(): if __name__ == '__main__': - print + print() main() print ">>>\n>>> Done harvesting\n" diff --git a/Fitter/python/HighPT/utilsHighPT.py b/Fitter/python/HighPT/utilsHighPT.py index 4247a5efc..5fa2c8c45 100644 --- a/Fitter/python/HighPT/utilsHighPT.py +++ b/Fitter/python/HighPT/utilsHighPT.py @@ -276,7 +276,7 @@ def extractBinLabels(pt,ptratio): # Run over set of samples and create histogram def RunSamples(samples,var,weight,cut,xbins,name): - print + print() print("Running",name,var,weight,cut) nbins = len(xbins)-1 hist = ROOT.TH1D(name,"",nbins,array('d',list(xbins))) @@ -289,7 +289,7 @@ def RunSamples(samples,var,weight,cut,xbins,name): # Run over set of samples and create histograms for W*->tau+v channel # for each sample loop over Tree entries is performed def RunSamplesTauNu(samples,var,unc,xbins,selection,name): - print + print() print("Running",name,var,unc,selection) nbins = len(xbins)-1 hists = {} # discionary of histograms @@ -418,7 +418,7 @@ def __init__(self,**kwargs): class FakeFactorHighPt: def __init__(self,filename): - print + print() print('Loading fake factors from file',filename," >>>>>") self.fileName = filename self.fileFF = ROOT.TFile(self.fileName,"READ") diff --git a/PicoProducer/data/btag/getBTagEfficiencies.py b/PicoProducer/data/btag/getBTagEfficiencies.py index 64f46ffa1..0adba478e 100755 --- a/PicoProducer/data/btag/getBTagEfficiencies.py +++ b/PicoProducer/data/btag/getBTagEfficiencies.py @@ -87,7 +87,7 @@ def getBTagEfficiencies(tagger,wp,outfname,samples,era,channel,tag="",effdir=Non print ">>> writing to %s..."%(outfname) file = TFile(outfname,'UPDATE') #RECREATE ensureTDirectory(file,channel) - for hname, hist in hists.iteritems(): + for hname, hist in hists.items(): if 'all' in hname: continue hname_all = hname+'_all' @@ -264,7 +264,7 @@ def createEff1D(histnum2D,histden2D): "1.5 < |#eta| < 2.5": [(1,1),(4,4)], } hists = [ ] - for etatitle, bins in etabins.iteritems(): + for etatitle, bins in etabins.items(): histnum = None histden = None for bin1, bin2 in bins: @@ -279,7 +279,7 @@ def createEff1D(histnum2D,histden2D): histnum.SetTitle(etatitle) hists.append(histnum) gDirectory.Delete(histden.GetName()) - #for i in xrange(0,histnum.GetXaxis().GetNbins()+1): + #for i in range(0,histnum.GetXaxis().GetNbins()+1): # print i, histnum.GetBinContent(i) return hists @@ -432,7 +432,7 @@ def main(): if __name__ == '__main__': - print + print() main() print ">>> done\n" diff --git a/PicoProducer/data/pileup/getPileupProfiles.py b/PicoProducer/data/pileup/getPileupProfiles.py index b48fa1aa4..4741cc5c4 100755 --- a/PicoProducer/data/pileup/getPileupProfiles.py +++ b/PicoProducer/data/pileup/getPileupProfiles.py @@ -223,7 +223,7 @@ def getFlatProfile(outfname,max=75,nbins=100,xmin=0,xmax=100): hist = TH1F('pileup','pileup',nbins,xmin,xmax) hist.Sumw2() binc = 1./max - for i in xrange(1,max+1): + for i in range(1,max+1): hist.SetBinContent(i,binc) hist.Scale(1./hist.Integral()) file = TFile(outfname,'RECREATE') @@ -521,7 +521,7 @@ def main(args): # DATA datahists = { period: [ ] for period in jsons } if 'data' in types: #and False: - for period, json in jsons.iteritems(): + for period, json in jsons.items(): for minbias in minbiases: filename = "Data_PileUp_%s_%s.root"%(period,str(minbias).replace('.','p')) datahist = getDataProfile(filename,json,pileup,100,era,minbias) @@ -590,7 +590,7 @@ def main(args): parser.add_argument('-v', '--verbose', dest='verbosity', type=int, nargs='?', const=1, default=0, help="set verbosity" ) args = parser.parse_args() - print + print() main(args) print ">>> Done!\n" diff --git a/PicoProducer/data/pileup/pileupCalc.py b/PicoProducer/data/pileup/pileupCalc.py index 54190f4a4..962d63e7f 100755 --- a/PicoProducer/data/pileup/pileupCalc.py +++ b/PicoProducer/data/pileup/pileupCalc.py @@ -251,7 +251,7 @@ def fillPileupHistogram (lumiInfo, calcOption, hist, minbXsec, Nbins): # now, we have to find the information for the input runs and LumiSections # in the Lumi/Pileup list. First, loop over inputs - for (run, lslist) in sorted(six.iteritems(inputRange)): + for (run, lslist) in sorted(six.items(inputRange)): # now, look for matching run, then match lumi sections if options.verbose: print("Searching for run %d..."%(run)) diff --git a/PicoProducer/python/analysis/GenFilterMuTau.py b/PicoProducer/python/analysis/GenFilterMuTau.py index a21b0876e..575d633a6 100755 --- a/PicoProducer/python/analysis/GenFilterMuTau.py +++ b/PicoProducer/python/analysis/GenFilterMuTau.py @@ -418,7 +418,7 @@ def endJob(self): ###gStyle.SetPaintTextFormat(".0f") ###for ix in range(1,nxbins+1): # loop over columns ### ntot = hist.GetBinContent(ix,ix) - ### key = [k for k, v in statusflags_dict.iteritems() if v==ix-1][0] + ### key = [k for k, v in statusflags_dict.items() if v==ix-1][0] ### hist.GetXaxis().SetBinLabel(ix,key) ### hist.GetYaxis().SetBinLabel(ix,key) ### for iy in range(1,nybins+1): # normalize rows diff --git a/PicoProducer/python/pico/run.py b/PicoProducer/python/pico/run.py index 7cb55c21a..c4eec0450 100755 --- a/PicoProducer/python/pico/run.py +++ b/PicoProducer/python/pico/run.py @@ -167,7 +167,7 @@ def main_run(args): if not dryrun: #execute(runcmd,dry=dryrun,verb=verbosity+1) # real-time print out does not work well with python script os.system(runcmd) - print + print() diff --git a/PicoProducer/python/storage/das.py b/PicoProducer/python/storage/das.py index 829f4819c..91fc3ff40 100644 --- a/PicoProducer/python/storage/das.py +++ b/PicoProducer/python/storage/das.py @@ -21,7 +21,7 @@ def dasgoclient(query,**kwargs): LOG.verb(repr(dascmd),verbosity) cmdout = execute(dascmd,verb=verbosity-1) except CalledProcessError as e: - print + print() LOG.error("Failed to call 'dasgoclient' command. Please make sure:\n" " 1) 'dasgoclient' command exists.\n" " 2) You have a valid VOMS proxy. Use 'voms-proxy-init -voms cms -valid 200:0' or 'source utils/setupVOMS.sh'.\n" diff --git a/PicoProducer/test/testEventBased.py b/PicoProducer/test/testEventBased.py index 4fa69cdc7..560e86787 100755 --- a/PicoProducer/test/testEventBased.py +++ b/PicoProducer/test/testEventBased.py @@ -28,7 +28,7 @@ def chunkify_by_evts(fnames,nmax,evenly=True): file = ensureTFile(fname,'READ') nevts = file.Get('Events').GetEntries() file.Close() - print "%10d %s"%(nevts,fname) + print("%10d %s"%(nevts,fname)) if nevts>> Compare job output..." + print(">>> Compare job output...") nbins = 100000 fname1 = "/scratch/ineuteli/analysis/2016/DY/DYJetsToLL_M-2000to3000_tautau.root" # file-based split fname2 = "/scratch/ineuteli/analysis/2016/DY/DYJetsToLL_M-2000to3000_tautau_test.root" # event-based split @@ -121,24 +121,24 @@ def compare_output(args,verb=0): fname1, fname2 = args.infiles[:2] tname = args.tree ename = args.evt - print ">>> %s"%(fname1) - print ">>> %s"%(fname2) + print(">>> %s"%(fname1)) + print(">>> %s"%(fname2)) file1, tree1 = gettree(fname1,tname) file2, tree2 = gettree(fname2,tname) hist1 = TH1F('h1','h1',nbins,0,1000000) hist2 = TH1F('h2','h2',nbins,0,1000000) tree1.Draw("%s >> h1"%(ename),"","gOff") tree2.Draw("%s >> h2"%(ename),"","gOff") - print ">>> tree1: %9d, hist1: %9d"%(tree1.GetEntries(),hist1.GetEntries()) - print ">>> tree2: %9d, hist2: %9d"%(tree2.GetEntries(),hist2.GetEntries()) + print(">>> tree1: %9d, hist1: %9d"%(tree1.GetEntries(),hist1.GetEntries())) + print(">>> tree2: %9d, hist2: %9d"%(tree2.GetEntries(),hist2.GetEntries())) hist1.Add(hist2,-1) nfound = 0 for i in range(0,nbins+2): if nfound==20: - print ">>> BREAK! Already found 20 different bins" + print(">>> BREAK! Already found 20 different bins") break if hist1.GetBinContent(i)!=0.0: - print ">>> difference %3d in bin %3d, [%3d,%3d]!"%(i,hist1.GetBinContent(i),hist1.GetXaxis().GetBinLowEdge(i),hist1.GetXaxis().GetBinUpEdge(i)) + print(">>> difference %3d in bin %3d, [%3d,%3d]!"%(i,hist1.GetBinContent(i),hist1.GetXaxis().GetBinLowEdge(i),hist1.GetXaxis().GetBinUpEdge(i))) nfound += 1 if file1: file1.Close() @@ -170,5 +170,5 @@ def main(args): args = parser.parse_args() LOG.verbosity = args.verbosity main(args) - print "\n>>> Done." + print("\n>>> Done.") diff --git a/PicoProducer/test/testSFs.py b/PicoProducer/test/testSFs.py index b92db627c..aafaa8f70 100755 --- a/PicoProducer/test/testSFs.py +++ b/PicoProducer/test/testSFs.py @@ -2,40 +2,40 @@ # Author: Izaak Neutelings (December 2018) import time start0 = time.time() -print -print ">>> importing modules..." +print() +print(">>> importing modules...") from TauFW.common.tools.log import Logger LOG = Logger("testSF") start1 = time.time() from ROOT import TFile -print ">>> imported ROOT classes after %.1f seconds"%(time.time()-start1) +print(">>> imported ROOT classes after %.1f seconds"%(time.time()-start1)) start1 = time.time() from TauFW.PicoProducer.corrections.ScaleFactorTool import * -print ">>> imported ScaleFactorTool classes after %.1f seconds"%(time.time()-start1) +print(">>> imported ScaleFactorTool classes after %.1f seconds"%(time.time()-start1)) start1 = time.time() from TauFW.PicoProducer.corrections.MuonSFs import * -print ">>> imported MuonSFs classes after %.1f seconds"%(time.time()-start1) +print(">>> imported MuonSFs classes after %.1f seconds"%(time.time()-start1)) start1 = time.time() from TauFW.PicoProducer.corrections.ElectronSFs import * -print ">>> imported ElectronSFs classes after %.1f seconds"%(time.time()-start1) +print(">>> imported ElectronSFs classes after %.1f seconds"%(time.time()-start1)) start1 = time.time() from TauFW.PicoProducer.corrections.TauTriggerSFs import * -print ">>> imported TauTriggerSFs classes after %.1f seconds"%(time.time()-start1) +print(">>> imported TauTriggerSFs classes after %.1f seconds"%(time.time()-start1)) start1 = time.time() from TauFW.PicoProducer.corrections.BTagTool import * -print ">>> imported BTagTool classes after %.1f seconds"%(time.time()-start1) +print(">>> imported BTagTool classes after %.1f seconds"%(time.time()-start1)) start1 = time.time() from TauFW.PicoProducer.corrections.PileupTool import * -print ">>> imported PileupTool classes after %.1f seconds"%(time.time()-start1) -print ">>> imported everything after %.1f seconds"%(time.time()-start0) -print ">>> " +print(">>> imported PileupTool classes after %.1f seconds"%(time.time()-start1)) +print(">>> imported everything after %.1f seconds"%(time.time()-start0)) +print(">>> ") # PATHS path = 'data/lepton/' @@ -50,15 +50,15 @@ def printtable(name,method,ptvals=None,etavals=None): if etavals==None: etavals = [ 0.0, 0.5, 1.1, 1.9, 2.3, 2.4, 2.8, 3.4 ] etavals = [-eta for eta in reversed(etavals)]+etavals - print ">>> %s:"%name + print(">>> %s:"%name) TAB = LOG.table("%9.2f"+" %9.2f"*len(etavals)+" ") - #print ">>> %10s"%('pt\eta')+' '.join('%10.2f'%eta for eta in etavals) + #print(">>> %10s"%('pt\eta')+' '.join('%10.2f'%eta for eta in etavals)) TAB.printheader("pt\eta",*[str(eta) for eta in etavals]) for pt in ptvals: - #print ">>> %10.2f"%(pt)+' '.join('%10.3f'%method(pt,eta) for eta in etavals) + #print(">>> %10.2f"%(pt)+' '.join('%10.3f'%method(pt,eta) for eta in etavals)) TAB.printrow(pt,*[method(pt,eta) for eta in etavals]) - print ">>> got %d SFs in %.3f seconds"%(len(ptvals)*len(etavals),time.time()-start2) - print ">>> " + print(">>> got %d SFs in %.3f seconds"%(len(ptvals)*len(etavals),time.time()-start2)) + print(">>> ") def muonPOG(): @@ -66,29 +66,29 @@ def muonPOG(): # TRIGGER (Muon POG) start1 = time.time() - print ">>> initializing trigger SFs from Muon POG..." + print(">>> initializing trigger SFs from Muon POG...") sftool_trig = ScaleFactor(path+"MuonPOG/Run2017/EfficienciesAndSF_RunBtoF_Nov17Nov2017.root","IsoMu27_PtEtaBins/abseta_pt_ratio",'mu_trig',ptvseta=True) - print ">>> initialized in %.1f seconds"%(time.time()-start1) + print(">>> initialized in %.1f seconds"%(time.time()-start1)) printtable('trigger POG',sftool_trig.getSF) # ID (Muon POG) start1 = time.time() sftool_id = ScaleFactor(path+"MuonPOG/Run2018/RunABCD_SF_ID.root","NUM_MediumID_DEN_genTracks_pt_abseta",'mu_id',ptvseta=False) - print ">>> initialized in %.1f seconds"%(time.time()-start1) + print(">>> initialized in %.1f seconds"%(time.time()-start1)) printtable('id POG',sftool_id.getSF) # ISO (Muon POG) start1 = time.time() sftool_iso = ScaleFactor(path+"MuonPOG/Run2018/RunABCD_SF_ISO.root","NUM_TightRelIso_DEN_MediumID_pt_abseta",'mu_iso',ptvseta=False) - print ">>> initialized in %.1f seconds"%(time.time()-start1) + print(">>> initialized in %.1f seconds"%(time.time()-start1)) printtable('iso POG',sftool_iso.getSF) # ID/ISO (Muon POG) start1 = time.time() sftool_idiso = sftool_id*sftool_iso - print ">>> initialized in %.1f seconds"%(time.time()-start1) + print(">>> initialized in %.1f seconds"%(time.time()-start1)) printtable('idiso POG',sftool_idiso.getSF) - print ">>> " + print(">>> ") def muonHTT(): @@ -96,18 +96,18 @@ def muonHTT(): # TRIGGER (HTT) start1 = time.time() - print ">>> initializing trigger SFs from HTT..." + print(">>> initializing trigger SFs from HTT...") sftool_mu_trig_HTT = ScaleFactorHTT(pathHTT_mu+"Muon_IsoMu24orIsoMu27.root","ZMass",'mu_idiso') - print ">>> initialized in %.1f seconds"%(time.time()-start1) + print(">>> initialized in %.1f seconds"%(time.time()-start1)) printtable('trigger HTT',sftool_mu_trig_HTT.getSF) ## ID ISO (HTT) #start1 = time.time() - #print ">>> initializing idiso SFs from HTT..." + #print(">>> initializing idiso SFs from HTT...") #sftool_mu_idiso_HTT = ScaleFactorHTT(pathHTT_mu+"Muon_IdIso_IsoLt0p15_eff_RerecoFall17.root","ZMass",'mu_idiso') - #print ">>> initialized in %.1f seconds"%(time.time()-start1) + #print(">>> initialized in %.1f seconds"%(time.time()-start1)) #printtable('idiso HTT',sftool_mu_idiso_HTT.getSF) - #print ">>> " + #print(">>> ") def electronHTT(): @@ -127,36 +127,36 @@ def muonSFs(): # MUON SFs start1 = time.time() - print ">>> initializing MuonSF object..." + print(">>> initializing MuonSF object...") muSFs = MuonSFs() - print ">>> initialized in %.1f seconds"%(time.time()-start1) + print(">>> initialized in %.1f seconds"%(time.time()-start1)) # GET SFs printtable('trigger',muSFs.getTriggerSF) printtable('idiso',muSFs.getIdIsoSF) - print ">>> " + print(">>> ") # MUON 2018 SFs start1 = time.time() - print ">>> initializing MuonSF(2018) object..." + print(">>> initializing MuonSF(2018) object...") muSFs = MuonSFs(year=2018) - print ">>> initialized in %.1f seconds"%(time.time()-start1) + print(">>> initialized in %.1f seconds"%(time.time()-start1)) # GET SFs printtable('trigger',muSFs.getTriggerSF) printtable('idiso',muSFs.getIdIsoSF) - print ">>> " + print(">>> ") def electronSFs(): LOG.header("electronSFs") # ELECTRON SFs - print ">>> " + print(">>> ") start1 = time.time() - print ">>> initializing ElectronSFs object..." + print(">>> initializing ElectronSFs object...") eleSFs = ElectronSFs() - print ">>> initialized in %.1f seconds"%(time.time()-start1) + print(">>> initialized in %.1f seconds"%(time.time()-start1)) # GET SFs printtable('trigger',eleSFs.getTriggerSF) @@ -181,11 +181,11 @@ def btagSFs(tagger='CSVv2'): LOG.header("btagSFs") # BTAG SFs - print ">>> " + print(">>> ") start1 = time.time() - print ">>> initializing BTagWeightTool(%r) object..."%tagger + print(">>> initializing BTagWeightTool(%r) object..."%tagger) btagSFs = BTagWeightTool(tagger) - print ">>> initialized in %.1f seconds"%(time.time()-start1) + print(">>> initialized in %.1f seconds"%(time.time()-start1)) # GET SFs printtable('%s g'%tagger,lambda p,e: btagSFs.getSF(p,e,0,True)) @@ -200,11 +200,11 @@ def pileupSFs(era='UL2017'): LOG.header("pileupSFs") # PILE UP TOOL - print ">>> " + print(">>> ") start1 = time.time() - print ">>> initializing PileupTool(%r) object..."%era + print(">>> initializing PileupTool(%r) object..."%era) puTool = PileupWeightTool(era) - print ">>> initialized in %.1f seconds"%(time.time()-start1) + print(">>> initialized in %.1f seconds"%(time.time()-start1)) ## GET SFs start2 = time.time() @@ -213,7 +213,7 @@ def pileupSFs(era='UL2017'): npus = [0,1,2,5,10,15,18,20,24,25,26,28,30,35,40,50,60,70,80,100] for npu in npus: TAB.printrow(npu,puTool.getWeight(npu)) - print ">>> got %d SFs in %.3f seconds"%(len(npus),time.time()-start2) + print(">>> got %d SFs in %.3f seconds"%(len(npus),time.time()-start2)) if __name__ == "__main__": @@ -228,6 +228,6 @@ def pileupSFs(era='UL2017'): btagSFs('DeepCSV') pileupSFs('2017') pileupSFs('UL2017') - print ">>> " - print ">>> done after %.1f seconds"%(time.time()-start0) - print + print(">>> ") + print(">>> done after %.1f seconds"%(time.time()-start0)) + print() diff --git a/PicoProducer/test/testSample.py b/PicoProducer/test/testSample.py index 97092ba36..3769081ae 100755 --- a/PicoProducer/test/testSample.py +++ b/PicoProducer/test/testSample.py @@ -47,50 +47,50 @@ def testSample(args): listname = "test/files/$ERA/$SAMPLE.txt" for sample in samples: LOG.header(sample.name) - print ">>> %-14s = %r"%("group",sample.group) - print ">>> %-14s = %r"%("name",sample.name) - print ">>> %-14s = %r"%("paths",sample.paths) - print ">>> %-14s = %r"%("url",sample.url) - print ">>> %-14s = %r"%("era",sample.era) - print ">>> %-14s = %r"%("channels",sample.channels) - print ">>> %-14s = %r"%("storage",sample.storage) - print ">>> %-14s = %r"%("extraopts",sample.extraopts) - print ">>> %-14s = %r"%("nfilesperjob",sample.nfilesperjob) - print ">>> %-14s = %r"%("files",sample.files) - print ">>> %-14s = %r"%("nevents",sample.nevents) + print(">>> %-14s = %r"%("group",sample.group)) + print(">>> %-14s = %r"%("name",sample.name)) + print(">>> %-14s = %r"%("paths",sample.paths)) + print(">>> %-14s = %r"%("url",sample.url)) + print(">>> %-14s = %r"%("era",sample.era)) + print(">>> %-14s = %r"%("channels",sample.channels)) + print(">>> %-14s = %r"%("storage",sample.storage)) + print(">>> %-14s = %r"%("extraopts",sample.extraopts)) + print(">>> %-14s = %r"%("nfilesperjob",sample.nfilesperjob)) + print(">>> %-14s = %r"%("files",sample.files)) + print(">>> %-14s = %r"%("nevents",sample.nevents)) # MATCH - print ">>> Testing matching:" + print(">>> Testing matching:") for term in terms: match = sample.match(term,verb=verbosity) match = color('YES','green',b=True) if match else color('NO','red',b=True) - print ">>> %r matches to %r: %s"%(sample.name,term,match) + print(">>> %r matches to %r: %s"%(sample.name,term,match)) if filters and not all(sample.match(f,verb=verbosity) for f in filters): continue # WRITE fname = repkey(listname,ERA=era) - print ">>>\n>>> Write..." + print(">>>\n>>> Write...") sample.writefiles(fname,nevts=True) # write Sample.files to txt file - print ">>> %-14s = %r"%("listname",fname) - #print ">>> %-14s = %r"%("files",sample.files) - print ">>> %-14s = %r"%("nfiles",len(sample.files)) - print ">>> %-14s = %r"%("nevents",sample.nevents) + print(">>> %-14s = %r"%("listname",fname)) + #print(">>> %-14s = %r"%("files",sample.files)) + print(">>> %-14s = %r"%("nfiles",len(sample.files))) + print(">>> %-14s = %r"%("nevents",sample.nevents)) # LOAD - print ">>>\n>>> Reset..." + print(">>>\n>>> Reset...") newsample = Sample(sample.group,sample.name,*sample.paths, store=storage,url=url,files=fname,opts=sample.extraopts) - print ">>> %-14s = %r"%("listname",fname) - print ">>> %-14s = %r"%("files",newsample.files) - print ">>> %-14s = %r"%("nfiles",len(newsample.files)) - print ">>> %-14s = %r"%("nevents",newsample.nevents) - print ">>> Call Sample.getfiles..." # load Sample.files from txt file + print(">>> %-14s = %r"%("listname",fname)) + print(">>> %-14s = %r"%("files",newsample.files)) + print(">>> %-14s = %r"%("nfiles",len(newsample.files))) + print(">>> %-14s = %r"%("nevents",newsample.nevents)) + print(">>> Call Sample.getfiles..." # load Sample.files from txt file) files = newsample.getfiles() - print ">>> %-14s = %r"%("files",newsample.files) - print ">>> %-14s = %r"%("nfiles",len(newsample.files)) - print ">>> %-14s = %r"%("nevents",newsample.nevents) - print ">>> %-14s = %r"%("filenevts",newsample.filenevts) + print(">>> %-14s = %r"%("files",newsample.files)) + print(">>> %-14s = %r"%("nfiles",len(newsample.files))) + print(">>> %-14s = %r"%("nevents",newsample.nevents)) + print(">>> %-14s = %r"%("filenevts",newsample.filenevts)) def testModule(era): @@ -117,5 +117,5 @@ def main(args): args = parser.parse_args() LOG.verbosity = args.verbosity main(args) - print "\n>>> Done." + print("\n>>> Done.") diff --git a/PicoProducer/test/testStorage.py b/PicoProducer/test/testStorage.py index e5ddc4cd3..790913919 100755 --- a/PicoProducer/test/testStorage.py +++ b/PicoProducer/test/testStorage.py @@ -12,7 +12,7 @@ def createdummy(fname): - print ">>> Creating dummy file %r..."%(fname) + print(">>> Creating dummy file %r..."%(fname)) with open(fname,'w') as file: file.write("# This is a dummy file for TauFW/PicoProducer/test/testStorage.py") file.write("# If you read this, it has probably not been removed correctly.") @@ -21,7 +21,7 @@ def createdummy(fname): def createdummyroot(fname,nevts=10000): - print ">>> Creating dummy ROOT file %r..."%(fname) + print(">>> Creating dummy ROOT file %r..."%(fname)) file = TFile(fname,'RECREATE') tree = TTree('tree','tree') hist = TH1F('hist','hist',50,-2,2) @@ -29,7 +29,7 @@ def createdummyroot(fname,nevts=10000): phi = array('d',[0]) tree.Branch("pt", pt, 'normal/D') tree.Branch("phi", phi, 'uniform/D') - for i in xrange(nevts): + for i in range(nevts): pt[0] = gRandom.Landau(40,20) phi[0] = gRandom.Uniform(-1.57,1.57) hist.Fill(gRandom.Landau(0,1)) @@ -45,14 +45,14 @@ def testStorage(path,readonly=False,hadd=True,verb=0): LOG.header("__init__") #storage = ensuremodule(system,"PicoProducer.storage" storage = getstorage(path,ensure=True,verb=verb) - print ">>> %r"%(storage) - print ">>> %-10s = %s"%('path',storage.path) - print ">>> %-10s = %s"%('rmcmd',storage.rmcmd) - print ">>> %-10s = %s"%('lscmd',storage.lscmd) - print ">>> %-10s = %s"%('mkdrcmd',storage.mkdrcmd) - print ">>> %-10s = %s"%('cpcmd',storage.cpcmd) - print ">>> %-10s = %s"%('tmpdir',storage.tmpdir) - print ">>> " + print(">>> %r"%(storage)) + print(">>> %-10s = %s"%('path',storage.path)) + print(">>> %-10s = %s"%('rmcmd',storage.rmcmd)) + print(">>> %-10s = %s"%('lscmd',storage.lscmd)) + print(">>> %-10s = %s"%('mkdrcmd',storage.mkdrcmd)) + print(">>> %-10s = %s"%('cpcmd',storage.cpcmd)) + print(">>> %-10s = %s"%('tmpdir',storage.tmpdir)) + print(">>> ") # EXPAND PATH LOG.header("expandpath") @@ -67,36 +67,36 @@ def testStorage(path,readonly=False,hadd=True,verb=0): ] for patharg in pathargs: for pathkwarg in pathkwargs: - LOG.color("storage.expandpath(%s,%s)"%(','.join(repr(a) for a in patharg),','.join("%s=%r"%(k,v) for k,v in pathkwarg.iteritems()))) + LOG.color("storage.expandpath(%s,%s)"%(','.join(repr(a) for a in patharg),','.join("%s=%r"%(k,v) for k,v in pathkwarg.items()))) result = storage.expandpath(*patharg,**pathkwarg) - print ">>> %r"%(result) + print(">>> %r"%(result)) # LS LOG.header("ls") LOG.color("storage.ls(verb=%d)"%(verb)) contents = storage.ls(verb=verb) - print ">>> Found %d items"%(len(contents)) - print ">>> Contents: %s"%(contents) + print(">>> Found %d items"%(len(contents))) + print(">>> Contents: %s"%(contents)) # FILES LOG.header("getfiles") LOG.color("storage.getfiles(verb=%d)"%(verb)) contents = storage.getfiles(verb=verb) - print ">>> Found %d items"%(len(contents)) - print ">>> Contents: %s"%(contents) - print ">>> " + print(">>> Found %d items"%(len(contents))) + print(">>> Contents: %s"%(contents)) + print(">>> ") LOG.color("storage.getfiles(filter='*.*',verb=%d)"%(verb)) contents = storage.getfiles(filter='*.*',verb=verb) - print ">>> Found %d files"%(len(contents)) - print ">>> Contents: %s"%(contents) - print ">>> " + print(">>> Found %d files"%(len(contents))) + print(">>> Contents: %s"%(contents)) + print(">>> ") LOG.color("storage.getfiles(filter='*.*',url=None,verb=%d)"%(verb)) contents = storage.getfiles(filter='*.*',url=None,verb=verb) - print ">>> Found %d files"%(len(contents)) - print ">>> Contents: %s"%(contents) + print(">>> Found %d files"%(len(contents))) + print(">>> Contents: %s"%(contents)) if readonly: - print ">>> Read only. Skip test for cp, rm, mkdir, hadd..." + print(">>> Read only. Skip test for cp, rm, mkdir, hadd...") return # CP @@ -110,7 +110,7 @@ def testStorage(path,readonly=False,hadd=True,verb=0): LOG.header("exists") LOG.color("storage.exists(%r,verb=%d)"%(fname,verb)) result = storage.exists(fname,verb=verb) - print ">>> Exists: %r"%(result) + print(">>> Exists: %r"%(result)) storage.ls(verb=verb) # RM @@ -119,7 +119,7 @@ def testStorage(path,readonly=False,hadd=True,verb=0): try: storage.rm(fname,verb=verb) except Exception as error: - print error + print(error) storage.ls(verb=verb) # MKDIR @@ -131,20 +131,20 @@ def testStorage(path,readonly=False,hadd=True,verb=0): storage.ls(verb=verb) storage.ls(dirname,here=True,verb=verb) result = storage.exists(dirname,verb=verb) - print ">>> Exists: %r"%(result) + print(">>> Exists: %r"%(result)) except Exception as error: - print error + print(error) # RM DIRECTORY LOG.header("rm directory") - submit = raw_input(">>> Careful! Do you really want to remove %r? [y/n] "%(storage.expandpath(dirname,here=True))) + submit = input(">>> Careful! Do you really want to remove %r? [y/n] "%(storage.expandpath(dirname,here=True))) if submit=='y': LOG.color("storage.rm(%r,verb=%d)"%(dirname,verb)) try: storage.rm(dirname,verb=verb) storage.ls(verb=verb) except Exception as error: - print error + print(error) # HADD if hadd: @@ -158,7 +158,7 @@ def testStorage(path,readonly=False,hadd=True,verb=0): storage.ls(verb=verb) storage.rm(outfile,verb=verb) except Exception as error: - print error + print(error) def main(args): @@ -177,5 +177,5 @@ def main(args): args = parser.parse_args() LOG.verbosity = args.verbosity main(args) - print "\n>>> Done." + print("\n>>> Done.") diff --git a/PicoProducer/utils/compareNano.py b/PicoProducer/utils/compareNano.py index 2f49a6422..bcd7794b2 100755 --- a/PicoProducer/utils/compareNano.py +++ b/PicoProducer/utils/compareNano.py @@ -74,7 +74,7 @@ def comparefiles(fnamesets,vars,**kwargs): plot.drawtext(text) plot.saveas(fname) plot.close() - print + print() #for tree in trees: # tree.Close() @@ -141,7 +141,7 @@ def comparevars(fnames,varsets,**kwargs): plot.drawtext(text) plot.saveas(fname) plot.close() - print + print() def main(args): diff --git a/PicoProducer/utils/comparePico.py b/PicoProducer/utils/comparePico.py index 366d744a2..c11d642e9 100755 --- a/PicoProducer/utils/comparePico.py +++ b/PicoProducer/utils/comparePico.py @@ -76,7 +76,7 @@ def compare(fnames,variables,**kwargs): plot.drawtext(text) plot.saveas(fname) plot.close() - print + print() def main(args): diff --git a/PicoProducer/utils/getDASParents.py b/PicoProducer/utils/getDASParents.py index 58471c29c..0ef49a8bf 100755 --- a/PicoProducer/utils/getDASParents.py +++ b/PicoProducer/utils/getDASParents.py @@ -86,7 +86,7 @@ def main(args): # if i0: print ">>> %s"%(fname) - for i in xrange(nevts): + for i in range(nevts): run = int(vec_run[i]) lumi = int(vec_lumi[i]) evt = int(vec_evt[i]) diff --git a/Plotter/python/plot/Selection.py b/Plotter/python/plot/Selection.py index a7b9531ec..e2df92e71 100644 --- a/Plotter/python/plot/Selection.py +++ b/Plotter/python/plot/Selection.py @@ -89,9 +89,9 @@ def __mul__(self, weight): result = None weight = joinweights(self.weight,weight) if isinstance(weight,str): - result = Selection("%s (%s)"(self.name,weight),joincuts(self.selection,weight=weight)) + result = Selection("%s (%s)"%(self.name,weight),joincuts(self.selection,weight=weight)) else: - result = Selection("%s (%s)"(self.name,weight.title),joincuts(self.selection,weight=weight)) + result = Selection("%s (%s)"%(self.name,weight.title),joincuts(self.selection,weight=weight)) return result def contains(self, string, **kwargs): @@ -201,4 +201,4 @@ def match(self, *terms, **kwargs): return match(terms,[self.name,self.title,self.selection]) Sel = Selection # short alias -from TauFW.Plotter.plot.string import * \ No newline at end of file +from TauFW.Plotter.plot.string import * diff --git a/Plotter/python/sample/Sample.py b/Plotter/python/sample/Sample.py index 0178140b8..fbad18174 100644 --- a/Plotter/python/sample/Sample.py +++ b/Plotter/python/sample/Sample.py @@ -262,23 +262,6 @@ def gethist_from_file(self,hname,tag="",close=True,**kwargs): print(">>> Sample.gethist_from_file: %sbin 5 = %s after normalization"%(indent,hist.GetBinContent(5))) return hist - @property - def tree(self): - if not self.file: # file does not exist: open & get tree - LOG.verb("Sample.tree: Opening file %s to get tree %r..."%(self.filename,self.treename),level=3) - self._tree = self.getfile() - elif self._tree and isinstance(self._tree,TTree): # file & tree do exist - LOG.verb("Sample.tree: Getting existing tree %s..."%(self._tree),level=3) - else: # file does exist, but tree does not: get tree - LOG.verb("Sample.tree: No valid tree (%s). Retrieving tree %r from file %s..."%(self._tree,self.treename,self.filename),level=3) - self._tree = self.file.Get(self.treename) - setaliases(self._tree,self.aliases) - return self._tree - - @tree.setter - def tree(self, value): - self._tree = value - def __copy__(self): cls = self.__class__ result = cls.__new__(cls) @@ -669,9 +652,9 @@ def getentries(self, selection, **kwargs): # GET NUMBER OF EVENTS file, tree = self.get_newfile_and_tree() # create new file and tree for thread safety nevents = tree.GetEntries(cuts) - if scale: - nevents *= scale file.Close() + if scale: + nevents *= scale # PRINT if verbosity>=3: diff --git a/Plotter/test/WstarplotPico.py b/Plotter/test/WstarplotPico.py index af028f627..d96293882 100755 --- a/Plotter/test/WstarplotPico.py +++ b/Plotter/test/WstarplotPico.py @@ -60,7 +60,7 @@ def plotSampleSet(channel,sampleset,tag="",outdir="plots"): fname = "%s/plotPico_$VAR%s"%(outdir,tag) for selection in selections: stacks = sampleset.getstack(variables,selection, parallel=parallel) #method='QCD_OSSS' - for stack, variable in stacks.iteritems(): + for stack, variable in stacks.items(): #position = "" #variable.position or 'topright' stack.draw() stack.drawlegend() #position) @@ -86,5 +86,5 @@ def main(): args = parser.parse_args() LOG.verbosity = args.verbosity main() - print "\n>>> Done." + print("\n>>> Done.") diff --git a/Plotter/test/plotPico.py b/Plotter/test/plotPico.py index bea410eec..db8ee46df 100755 --- a/Plotter/test/plotPico.py +++ b/Plotter/test/plotPico.py @@ -77,7 +77,7 @@ def plotSampleSet(channel,sampleset,tag="",outdir="plots"): fname = "%s/plotPico_$VAR%s"%(outdir,tag) for selection in selections: stacks = sampleset.getstack(variables,selection,method='QCD_OSSS',parallel=parallel) - for stack, variable in stacks.iteritems(): + for stack, variable in stacks.items(): #position = "" #variable.position or 'topright' stack.draw() stack.drawlegend() #position) @@ -104,5 +104,5 @@ def main(): args = parser.parse_args() LOG.verbosity = args.verbosity main() - print "\n>>> Done." + print("\n>>> Done.") diff --git a/Plotter/test/pseudoSamples.py b/Plotter/test/pseudoSamples.py index 82e8c283b..5904ba951 100755 --- a/Plotter/test/pseudoSamples.py +++ b/Plotter/test/pseudoSamples.py @@ -150,7 +150,7 @@ def getgenerator(var,sample): # get random generator from dictionary if sample in vardict[var]: return vardict[var][sample] else: return vardict[var]['*'] def fill(sample,tree,nevts): # help function to fill trees - for i in xrange(nevts): + for i in range(nevts): genmatch_2[0] = 5 if gRandom.Uniform(1.)>> Generating pseudo MC..." + print(">>> Generating pseudo MC...") #time0 = time.time() for sample in samples: if sample=='Data': continue @@ -180,7 +180,7 @@ def fill(sample,tree,nevts): # help function to fill trees # PSEUDO DATA if 'Data' in samples: - print ">>> Generating pseudo data..." + print(">>> Generating pseudo data...") file, tree = filedict['Data'] file.cd() #time0 = time.time() diff --git a/Plotter/test/testColors.py b/Plotter/test/testColors.py index 2c8e376b3..d8c6db53d 100755 --- a/Plotter/test/testColors.py +++ b/Plotter/test/testColors.py @@ -30,7 +30,7 @@ def makeTColorWheel(): canvas.SaveAs("TColorWheel.pdf") -def drawColorTable(clist=range(0,50),nrow=None,ncol=None,cmax=10,tag="",label=False,RBG=False,newRBG=True,div=2): +def drawColorTable(clist=list(range(0,50)),nrow=None,ncol=None,cmax=10,tag="",label=False,RBG=False,newRBG=True,div=2): # https://root.cern.ch/doc/master/src_2TPad_8cxx_source.html#l01611 if not ncol: @@ -139,7 +139,7 @@ def findClosestExistingColor(*args,**kwargs): return imin, cmin, dmin def findNonWhiteColors(N=924): - for i in xrange(0,N): + for i in range(0,N): color = gROOT.GetColor(i) if i==kWhite: yield kWhite, color diff --git a/Plotter/test/testLatex.py b/Plotter/test/testLatex.py index 1f86ed606..f26ed7885 100755 --- a/Plotter/test/testLatex.py +++ b/Plotter/test/testLatex.py @@ -15,7 +15,7 @@ def checklatex(texts,tag=""): fname = "%s/testLatex%s"%(output,tag) xdim = 500 ydim = 50*(len(texts)+2.5) - print ">>> Canvas: %sx%s (nlines=%d)"%(xdim,ydim,len(texts)) + print(">>> Canvas: %sx%s (nlines=%d)"%(xdim,ydim,len(texts))) canvas = TCanvas('canvas','canvas',xdim,int(ydim)) #pave1 = TPaveText(0.0,0,0.5,1,'ARC') #,'BR') pave2 = TPaveText(0.04,0.04,0.96,0.96) #'ARC') #,'BR') @@ -34,7 +34,7 @@ def checklatex(texts,tag=""): #pave2.Copy(pave1) for line in texts: latex = makelatex(line) - print ">>> %r -> %r"%(line,latex) + print(">>> %r -> %r"%(line,latex)) #pave1.AddText(line) pave2.AddText(latex) #pave1.Draw() @@ -105,5 +105,5 @@ def main(): args = parser.parse_args() LOG.verbosity = args.verbosity main() - print ">>>\n>>> Done." + print(">>>\n>>> Done.") diff --git a/Plotter/test/testLegend.py b/Plotter/test/testLegend.py index 6aca1b89c..bed046d4b 100755 --- a/Plotter/test/testLegend.py +++ b/Plotter/test/testLegend.py @@ -37,7 +37,7 @@ def plothist(xtitle,hists,ratio=False,logy=False,norm=False,tag="",**kwargs): plot.saveas(fname+".png") #plot.saveas(fname+".pdf") plot.close(keep=False) - print + print() def testposition(): diff --git a/Plotter/test/testMacros.py b/Plotter/test/testMacros.py index aedc0e61d..56a8ce3bf 100755 --- a/Plotter/test/testMacros.py +++ b/Plotter/test/testMacros.py @@ -11,7 +11,7 @@ def testUtils(): """Test utils.C macro.""" - print header("testUtils") + print(header("testUtils")) # LOAD MACRO gROOT.ProcessLine(".L python/macros/utils.C+O") @@ -25,8 +25,8 @@ def testUtils(): phivals = [ 0.0, 1.5, ] #3.0 ] etavals = [ 0.0, 1.5, ] #3.0 ] tlvs = [(pt,e,p,m) for pt in ptvals for m in mvals for e in etavals for p in phivals if pt>m] - print ">>> %7s %5s %5s %6s %8s %8s %8s | %6s %5s %5s %6s %8s %8s %8s | %10s %10s %10s %9s"%( - 'pt1','eta1','phi1','m1','p1','tlv1.P','tlv1.E','pt2','eta2','phi2','m2','p2','tlv2.P','tlv2.E','InvMass','tlv.M','diff [%]','tlv.E') + print(">>> %7s %5s %5s %6s %8s %8s %8s | %6s %5s %5s %6s %8s %8s %8s | %10s %10s %10s %9s"%( + 'pt1','eta1','phi1','m1','p1','tlv1.P','tlv1.E','pt2','eta2','phi2','m2','p2','tlv2.P','tlv2.E','InvMass','tlv.M','diff [%]','tlv.E')) for pt1, eta1, phi1, m1 in tlvs: for pt2, eta2, phi2, m2 in tlvs: tlv1 = TLorentzVector() @@ -39,16 +39,16 @@ def testUtils(): mdiff = 100.0*(invm1-invm2) p1 = Mom(pt1,eta1) p2 = Mom(pt2,eta2) - print ">>> %7.2f %5.2f %5.2f %6.2f %8.2f %8.2f %8.2f | %6.2f %5.2f %5.2f %6.2f %8.2f %8.2f %8.2f | %10.5f %10.5f %10.7f %9.3f"%( - pt1,eta1,phi1,m1,p1,tlv1.P(),tlv1.E(),pt2,eta2,phi2,m2,p2,tlv2.P(),tlv2.E(),invm1,tlv.M(),mdiff,tlv.E()) - print ">>> " + print(">>> %7.2f %5.2f %5.2f %6.2f %8.2f %8.2f %8.2f | %6.2f %5.2f %5.2f %6.2f %8.2f %8.2f %8.2f | %10.5f %10.5f %10.7f %9.3f"%( + pt1,eta1,phi1,m1,p1,tlv1.P(),tlv1.E(),pt2,eta2,phi2,m2,p2,tlv2.P(),tlv2.E(),invm1,tlv.M(),mdiff,tlv.E())) + print(">>> ") # CHECK DeltaPhi, DeltaR phivals = [ 0.0, 1.5, 2.5, ] #3.0 ] etavals = [ 0.0, 1.5, 2.5, ] #3.0 ] vecs = [(e,p) for e in etavals for p in phivals] - print ">>> %7s %5s %5s %5s | %8s %11s %11s | %8s %9s"%( - 'eta1','phi1','eta2','phi2','DeltaPhi','V3.DeltaPhi','DeltaPhi2Pi','DeltaR','V3.DeltaR') + print(">>> %7s %5s %5s %5s | %8s %11s %11s | %8s %9s"%( + 'eta1','phi1','eta2','phi2','DeltaPhi','V3.DeltaPhi','DeltaPhi2Pi','DeltaR','V3.DeltaR')) for eta1, phi1 in vecs: for eta2, phi2 in vecs: dphi1 = DeltaPhi(phi1,phi2) @@ -58,14 +58,14 @@ def testUtils(): vec2 = TVector3() vec1.SetPtEtaPhi(10.,eta1,phi1) vec2.SetPtEtaPhi(10.,eta2,phi2) - print ">>> %7.2f %5.2f %5.2f %5.2f | %8.4f %11.4f %11.4f | %8.4f %9.4f"%( - eta1,phi1,eta2,phi2,dphi1,vec1.DeltaPhi(vec2),dphi2,dR,vec1.DeltaR(vec2)) - print ">>> " + print(">>> %7.2f %5.2f %5.2f %5.2f | %8.4f %11.4f %11.4f | %8.4f %9.4f"%( + eta1,phi1,eta2,phi2,dphi1,vec1.DeltaPhi(vec2),dphi2,dR,vec1.DeltaR(vec2))) + print(">>> ") def testPileup(): """Test pileup.C macro.""" - print header("testPileup") + print(header("testPileup")) # LOAD MACRO gROOT.ProcessLine(".L python/macros/pileup.C+O") @@ -86,17 +86,17 @@ def testPileup(): loadPU(fname_data,fname_mc) else: loadPU() - print ">>> data=%r, mc=%r"%(fname_data,fname_mc) - print ">>> %7s %9s"%('npv','data/mc') + print(">>> data=%r, mc=%r"%(fname_data,fname_mc)) + print(">>> %7s %9s"%('npv','data/mc')) for npv in npvs: weight = getPUWeight(npv) - print ">>> %7d %9.4f"%(npv,weight) - print ">>> " + print(">>> %7d %9.4f"%(npv,weight)) + print(">>> ") def testTopPtWeight(): """Test topptweight.C macro.""" - print header("testTopPtWeight") + print(header("testTopPtWeight")) # LOAD MACRO gROOT.ProcessLine(".L python/macros/topptweight.C+O") @@ -116,8 +116,8 @@ def testTopPtWeight(): ('NNLO/NLO',getTopPtSF_NNLO, getTopPtWeight_NNLO), ] for title, sffunc, weightfunc in funcs: - print ">>> "+title - print ">>> %11s %9s %9s %9s %15s %10s"%('pt1','pt2','sf(pt1)','sf(pt2)','sf(pt1)*sf(pt2)','weight') + print(">>> "+title) + print(">>> %11s %9s %9s %9s %15s %10s"%('pt1','pt2','sf(pt1)','sf(pt2)','sf(pt1)*sf(pt2)','weight')) for pt1 in pts1: sf1 = sffunc(pt1) for pt2 in pts2: @@ -125,13 +125,13 @@ def testTopPtWeight(): sf2 = sffunc(pt2) weight1 = sf1*sf2 weight2 = weightfunc(pt1,pt2) - print ">>> %11.1f %9.1f %9.4f %9.4f %15.4f %10.4f"%(pt1,pt2,sf1,sf2,weight1,weight2) - print ">>> " + print(">>> %11.1f %9.1f %9.4f %9.4f %15.4f %10.4f"%(pt1,pt2,sf1,sf2,weight1,weight2)) + print(">>> ") def testTauIDSF(): """Test tauIDSF.C macro.""" - print header("testTauIDSF") + print(header("testTauIDSF")) # LOAD MACRO gROOT.ProcessLine(".L python/macros/tauIDSF.C+O") @@ -143,7 +143,7 @@ def testTauIDSF(): sffile = "/t3home/ineuteli/eos/public/forTAU/TauID_SF_dm_DeepTau2017v2p1VSjet_2016Legacy_ptgt20.root" hname = "Medium" loadTauIDSF(sffile,hname) - print ">>> %4s %9s %9s %9s %9s"%('dm','sf(dm)','sf(dm,0)','sf(dm,-1)','sf(dm,+1)') #,'sf(dm,Up)','sf(dm,Down)') + print(">>> %4s %9s %9s %9s %9s"%('dm','sf(dm)','sf(dm,0)','sf(dm,-1)','sf(dm,+1)')) #,'sf(dm,Up)','sf(dm,Down)') for dm in range(0,14): sf = getTauIDSF(dm,5) sf0 = getTauIDSF(dm,5,0) @@ -152,7 +152,7 @@ def testTauIDSF(): sfdn2 = getTauIDSF(dm,5,-1) #sfup2 = getTauIDSF(dm,TIDUp) #sfdn2 = getTauIDSF(dm,TIDDown) - print ">>> %4s %9.4f %9.4f %9.4f %9.4f"%(dm,sf,sf0,sfdn,sfup) #,sfdn2,sfup2) + print(">>> %4s %9.4f %9.4f %9.4f %9.4f"%(dm,sf,sf0,sfdn,sfup)) #,sfdn2,sfup2) # LOAD TREE setera(2016) @@ -171,7 +171,7 @@ def testTauIDSF(): ("sf_dmDown","getTauIDSF(dm_2,genmatch_2,-1)"), ] for alias, expr in aliases: - print '>>> tree.SetAlias("%s","%s")'%(alias,expr) + print('>>> tree.SetAlias("%s","%s")'%(alias,expr)) tree.SetAlias(alias,expr) # DRAW HISTOGRAMS @@ -192,9 +192,9 @@ def testTauIDSF(): hname = makehistname("testhist_",sf) hist = TH1D(hname,sf,25,50,150) dcmd = "m_vis >> %s"%(hname) - print '>>> tree.Draw(%s,"(%s)*%s")'%(dcmd,cut,sf) + print('>>> tree.Draw(%s,"(%s)*%s")'%(dcmd,cut,sf)) out = tree.Draw(dcmd,"(%s)*%s"%(cut,sf),'gOff') - print out + print(out) hists.append(hist) # PLOT HISTOGRAMS @@ -207,7 +207,7 @@ def testTauIDSF(): def testLoadHist(): """Test loadHist.C macro.""" - print header("testLoadHist") + print(header("testLoadHist")) # LOAD MACRO gROOT.ProcessLine(".L python/macros/loadHist.C+O") @@ -218,14 +218,14 @@ def testLoadHist(): sffile = "/t3home/ineuteli/eos/public/forTAU/TauID_SF_dm_DeepTau2017v2p1VSjet_2016Legacy_ptgt20.root" hname = "Medium" loadHist(sffile,hname) - print ">>> %4s %9s %9s %9s %9s"%('dm','sf(dm)','sf(dm,0)','sf(dm,-1)','sf(dm,+1)') + print(">>> %4s %9s %9s %9s %9s"%('dm','sf(dm)','sf(dm,0)','sf(dm,-1)','sf(dm,+1)')) for dm in range(0,14): sf = getBin(dm) sf0 = getBin(dm,0) sfdn = getBin(dm,-1) sfup = getBin(dm,+1) sfdn2 = getBin(dm,-1) - print ">>> %4s %9.4f %9.4f %9.4f %9.4f"%(dm,sf,sf0,sfdn,sfup) + print(">>> %4s %9.4f %9.4f %9.4f %9.4f"%(dm,sf,sf0,sfdn,sfup)) # LOAD TREE setera(2016) @@ -242,7 +242,7 @@ def testLoadHist(): ("sf_dmDown","getBin(dm_2,-1)"), ] for alias, expr in aliases: - print '>>> tree.SetAlias("%s","%s")'%(alias,expr) + print('>>> tree.SetAlias("%s","%s")'%(alias,expr)) tree.SetAlias(alias,expr) # DRAW HISTOGRAMS @@ -261,9 +261,9 @@ def testLoadHist(): hname = makehistname("testhist_",sf) hist = TH1D(hname,sf,25,50,150) dcmd = "m_vis >> %s"%(hname) - print '>>> tree.Draw(%s,"(%s)*%s")'%(dcmd,cut,sf) + print('>>> tree.Draw(%s,"(%s)*%s")'%(dcmd,cut,sf)) out = tree.Draw(dcmd,"(%s)*%s"%(cut,sf),'gOff') - print out + print(out) hists.append(hist) # PLOT HISTOGRAMS @@ -284,5 +284,5 @@ def main(): if __name__ == '__main__': main() - print ">>> Done" + print(">>> Done") diff --git a/Plotter/test/testMethods.py b/Plotter/test/testMethods.py index 29672c487..231205016 100755 --- a/Plotter/test/testMethods.py +++ b/Plotter/test/testMethods.py @@ -9,7 +9,7 @@ def main(): methods = [ 'QCD_OSSS' ] for method in methods: methodmod = ensuremodule(method,'Plotter.methods') - print ">>> hasattr(SampleSet,%r) = %s, module=%s"%(method,hasattr(SampleSet,method),methodmod) + print(">>> hasattr(SampleSet,%r) = %s, module=%s"%(method,hasattr(SampleSet,method),methodmod)) if __name__ == "__main__": @@ -21,5 +21,5 @@ def main(): args = parser.parse_args() LOG.verbosity = args.verbosity main() - print "\n>>> Done." + print("\n>>> Done.") diff --git a/Plotter/test/testMultiDraw.py b/Plotter/test/testMultiDraw.py index 4d18b8194..39bf6f2ed 100755 --- a/Plotter/test/testMultiDraw.py +++ b/Plotter/test/testMultiDraw.py @@ -21,11 +21,11 @@ def singledraw(tree,variables,selections,predefine=False,outdir='plots'): fname = "%s/testMultiDraw.root"%(outdir) file = TFile(fname,'RECREATE') hists = [ ] - print ">>> singledraw: Filling histograms with TTree::Draw for %s..."%(fname) + print(">>> singledraw: Filling histograms with TTree::Draw for %s..."%(fname)) for i, (selection, weight) in enumerate(selections): cut = "(%s)*%s"%(selection,weight) - print ">>> cut=%r"%(cut) - print ">>> \033[4m %-18s %10s %12s %10s %12s %s\033[0m"%("varname","mean","std. dev.","entries","integral","draw command"+' '*16) + print(">>> cut=%r"%(cut)) + print(">>> \033[4m %-18s %10s %12s %10s %12s %s\033[0m"%("varname","mean","std. dev.","entries","integral","draw command"+' '*16)) for variable in variables: varname = variable[0] binning = variable[1:] @@ -51,12 +51,12 @@ def singledraw(tree,variables,selections,predefine=False,outdir='plots'): tree.Draw(dcmd,cut,'gOff') hist = gDirectory.Get(hname) hists.append(hist) - print ">>> %-18r %10.2f %12.2f %10d %12.1f %r"%(varname,hist.GetMean(),hist.GetStdDev(),hist.GetEntries(),hist.Integral(),dcmd) + print(">>> %-18r %10.2f %12.2f %10d %12.1f %r"%(varname,hist.GetMean(),hist.GetStdDev(),hist.GetEntries(),hist.Integral(),dcmd)) for hist in hists: hist.Write(hist.GetName(),TH1D.kOverwrite) file.Close() dtime = time()-start - print ">>> Took %.2fs"%(dtime) + print(">>> Took %.2fs"%(dtime)) return dtime @@ -66,10 +66,10 @@ def multidraw(tree,variables,selections,predefine=False,outdir='plots'): fname = "%s/testMultiDraw.root"%(outdir) file = TFile(fname,'RECREATE') hists = [ ] - print ">>> multidraw: Filling histograms with MultiDraw for %s..."%(fname) + print(">>> multidraw: Filling histograms with MultiDraw for %s..."%(fname)) for i, (selection, weight) in enumerate(selections): cut = "(%s)*%s"%(selection,weight) - print ">>> cut=%r"%(cut) + print(">>> cut=%r"%(cut)) varexps = [ ] for variable in variables: varname = variable[0] @@ -96,15 +96,15 @@ def multidraw(tree,variables,selections,predefine=False,outdir='plots'): varexps.append(dcmd) results = tree.MultiDraw(varexps,cut,hists=hists) assert len(varexps)==len(results), "Mismatch between histograms (%s) and draw commands (%s)!"%(results,varexps) - print ">>> \033[4m %-18s %10s %12s %10s %12s %s\033[0m"%("varname","mean","std. dev.","entries","integral","draw command"+' '*16) + print(">>> \033[4m %-18s %10s %12s %10s %12s %s\033[0m"%("varname","mean","std. dev.","entries","integral","draw command"+' '*16)) for variable, dcmd, hist in zip(variables,varexps,results): varname = variable[0] assert hist.GetName() in dcmd, "Mismatch between histogram (%r) and draw command (%r)!"%(hist.GetName(),dcmd) - print ">>> %-18r %10.2f %12.2f %10d %12.1f %r"%(varname,hist.GetMean(),hist.GetStdDev(),hist.GetEntries(),hist.Integral(),dcmd) + print(">>> %-18r %10.2f %12.2f %10d %12.1f %r"%(varname,hist.GetMean(),hist.GetStdDev(),hist.GetEntries(),hist.Integral(),dcmd)) hist.Write(hist.GetName(),TH1D.kOverwrite) file.Close() dtime = time()-start - print ">>> Took %.2fs"%(dtime) + print(">>> Took %.2fs"%(dtime)) return dtime @@ -114,10 +114,10 @@ def multidraw2D(tree,variables,selections,predefine=False,outdir='plots'): fname = "%s/testMultiDraw.root"%(outdir) file = TFile(fname,'RECREATE') hists = [ ] - print ">>> multidraw2D: Filling histograms with MultiDraw for %s..."%(fname) + print(">>> multidraw2D: Filling histograms with MultiDraw for %s..."%(fname)) for i, (selection, weight) in enumerate(selections): cut = "(%s)*%s"%(selection,weight) - print ">>> cut=%r"%(cut) + print(">>> cut=%r"%(cut)) varexps = [ ] for xvar, nxbins, xmin, xmax, yvar, nybins, ymin, ymax in variables: hname = ("%s_%s_sel%d_single"%(xvar,yvar,i+1)).replace('+','_').replace('(','').replace(')','') @@ -131,16 +131,16 @@ def multidraw2D(tree,variables,selections,predefine=False,outdir='plots'): varexps.append(dcmd) results = tree.MultiDraw(varexps,cut,hists=hists) assert len(varexps)==len(results), "Mismatch between histograms (%s) and draw commands (%s)!"%(results,varexps) - print ">>> \033[4m %-14s %-14s %10s %12s %10s %12s %s\033[0m"%("xvar","yvar","mean","std. dev.","entries","integral","draw command"+' '*16) + print(">>> \033[4m %-14s %-14s %10s %12s %10s %12s %s\033[0m"%("xvar","yvar","mean","std. dev.","entries","integral","draw command"+' '*16)) for variable, dcmd, hist in zip(variables,varexps,results): xvar = variable[0] yvar = variable[4] assert hist.GetName() in dcmd, "Mismatch between histogram (%r) and draw command (%r)!"%(hist.GetName(),dcmd) - print ">>> %-14r %-14r %10.2f %12.2f %10d %12.1f %r"%(xvar,yvar,hist.GetMean(1),hist.GetStdDev(1),hist.GetEntries(),hist.Integral(),dcmd) + print(">>> %-14r %-14r %10.2f %12.2f %10d %12.1f %r"%(xvar,yvar,hist.GetMean(1),hist.GetStdDev(1),hist.GetEntries(),hist.Integral(),dcmd)) hist.Write(hist.GetName(),TH1D.kOverwrite) file.Close() dtime = time()-start - print ">>> Took %.2fs"%(dtime) + print(">>> Took %.2fs"%(dtime)) return dtime @@ -152,7 +152,7 @@ def main(): filedict = makesamples(nevts,sample=sample,outdir=outdir) file, tree = filedict[sample] nevts = tree.GetEntries() - print ">>> Using pseudo data %s..."%(file.GetName()) + print(">>> Using pseudo data %s..."%(file.GetName())) variables = [ ('m_vis', 20, 0, 140), @@ -183,10 +183,10 @@ def main(): dtime2 = multidraw(tree,variables,selections,outdir=outdir,predefine=predefine) dtime3 = multidraw2D(tree,variables2D,selections,outdir=outdir,predefine=predefine) file.Close() - print ">>> Result: MultiDraw is %.2f times faster than TTree::Draw for %s events and %s variables!"%(dtime1/dtime2,nevts,len(variables)) + print(">>> Result: MultiDraw is %.2f times faster than TTree::Draw for %s events and %s variables!"%(dtime1/dtime2,nevts,len(variables))) if __name__ == '__main__': main() - print ">>> Done!" + print(">>> Done!") diff --git a/Plotter/test/testMultiThread.py b/Plotter/test/testMultiThread.py index 76d312a8f..bb97dff12 100755 --- a/Plotter/test/testMultiThread.py +++ b/Plotter/test/testMultiThread.py @@ -10,9 +10,9 @@ def foo(i,bar,**kwargs): """Simple test function to be multithreaded.""" if kwargs: - print 'foo %d says "%s", with extra options: %s'%(i,bar,kwargs) + print('foo %d says "%s", with extra options: %s'%(i,bar,kwargs)) else: - print 'foo %d says "%s"'%(i,bar) + print('foo %d says "%s"'%(i,bar)) time.sleep(2) #for i in range(100000): # pass @@ -28,7 +28,7 @@ def draw(histname): tree = file.Get('tree') hist = TH1D(histname,histname,100,0,200) tree.Draw("m_vis >> %s"%histname,"",'gOff') #getFakeRate(pt_2,m_2,eta_2,decayMode_2) - print ">>> Drawing %s... %s"%(histname,hist) + print(">>> Drawing %s... %s"%(histname,hist)) #gDirectory.Delete(histname) hist.SetDirectory(0) file.Close() @@ -41,7 +41,7 @@ def drawWithSharedFile(file,histname): tree = file.Get('tree') hist = TH1D(histname,histname,100,0,200) tree.Draw("m_vis >> %s"%histname,"",'gOff') - print ">>> Drawing %s... %s"%(histname,hist) + print(">>> Drawing %s... %s"%(histname,hist)) #gDirectory.Delete(histname) hist.SetDirectory(0) file.Close() @@ -52,15 +52,15 @@ def testProcess(N=5): """Test multiprocessing behavior.""" LOG.header("testProcess") - print ">>> Sequential:" + print(">>> Sequential:") start = time.time() for i in range(1,N+1): name = "thread %d"%i result = foo(i,"Hello world!") - print ">>> foo returns:", result - print ">>> Took %.1f seconds"%(time.time()-start) + print(">>> foo returns:", result) + print(">>> Took %.1f seconds"%(time.time()-start)) - print "\n>>> Parallel:" + print("\n>>> Parallel:") start = time.time() threads = [ ] for i in range(1,N+1): @@ -70,22 +70,22 @@ def testProcess(N=5): threads.append(thread) for thread in threads: result = thread.join() - print ">>> Took %.1f seconds"%(time.time()-start) - print + print(">>> Took %.1f seconds"%(time.time()-start)) + print() def testMultiProcessor(N=5): """Test multiprocessing behavior.""" LOG.header("testMultiProcessor") - print ">>> Sequential:" + print(">>> Sequential:") start = time.time() for i in range(1,N+1): result = foo(i,"Hello world!") - print ">>> foo returns:",result - print ">>> Took %.1f seconds"%(time.time()-start) + print(">>> foo returns:",result) + print(">>> Took %.1f seconds"%(time.time()-start)) - print "\n>>> Parallel:" + print("\n>>> Parallel:") start = time.time() processor = MultiProcessor() for i in range(1,N+1): @@ -93,24 +93,24 @@ def testMultiProcessor(N=5): processor.start(target=foo,args=(i,"Hello world!")) for process in processor: result = process.join() # wait for processes to end - print ">>> foo returns:", result - print ">>> Took %.1f seconds"%(time.time()-start) - print + print(">>> foo returns:", result) + print(">>> Took %.1f seconds"%(time.time()-start)) + print() def testMultiProcessorWithDraw(N=5): """Test multiprocessing behavior with TTree:Draw.""" LOG.header("testMultiProcessorWithDraw") - print ">>> Sequential:" + print(">>> Sequential:") start = time.time() for i in range(1,N+1): name = "hist_%d"%i result = draw(name) - print ">>> draw returns:", result - print ">>> Took %.1f seconds"%(time.time()-start) + print(">>> draw returns:", result) + print(">>> Took %.1f seconds"%(time.time()-start)) - print "\n>>> Parallel:" + print("\n>>> Parallel:") start = time.time() processor = MultiProcessor() for i in range(1,N+1): @@ -118,23 +118,23 @@ def testMultiProcessorWithDraw(N=5): processor.start(target=draw,args=(name,)) for process in processor: result = process.join() # wait for processes to end - print ">>> draw returns:", result - print ">>> Took %.1f seconds"%(time.time()-start) - print + print(">>> draw returns:", result) + print(">>> Took %.1f seconds"%(time.time()-start)) + print() def testThread(N=5): """Test threading behavior.""" LOG.header("testThread") - print ">>> Sequential:" + print(">>> Sequential:") start = time.time() for i in range(1,N+1): result = foo(i,"Hello world!") - print ">>> foo returns:", result - print ">>> Took %.1f seconds"%(time.time()-start) + print(">>> foo returns:", result) + print(">>> Took %.1f seconds"%(time.time()-start)) - print "\n>>> Parallel:" + print("\n>>> Parallel:") start = time.time() threads = [ ] for i in range(1,N+1): @@ -144,25 +144,25 @@ def testThread(N=5): threads.append(thread) for thread in threads: result = thread.join() - print ">>> %s done, foo returns: %s"%(thread.name,result) - print ">>> Took %.1f seconds"%(time.time()-start) - print + print(">>> %s done, foo returns: %s"%(thread.name,result)) + print(">>> Took %.1f seconds"%(time.time()-start)) + print() def testThreadWithDraw(N=5): """Test threading behavior with TTree:Draw.""" LOG.header("testThreadWithDraw") - print ">>> Sequential:" + print(">>> Sequential:") start = time.time() for i in range(1,N+1): name = "hist_%d"%i result = draw(name) gDirectory.Delete(name) - print ">>> %s done, draw returns: %s"%(name,result) - print "Took %.1f seconds"%(time.time()-start) + print(">>> %s done, draw returns: %s"%(name,result)) + print("Took %.1f seconds"%(time.time()-start)) - print "\n>>> Parallel:" + print("\n>>> Parallel:") start = time.time() threads = [ ] for i in range(1,N+1): @@ -172,9 +172,9 @@ def testThreadWithDraw(N=5): threads.append(thread) for thread in threads: thread.join() - print ">>> %s done"%(thread.name) - print "Took %.1f seconds"%(time.time()-start) - print + print(">>> %s done"%(thread.name)) + print("Took %.1f seconds"%(time.time()-start)) + print() @@ -184,16 +184,16 @@ def testThreadWithSharedTFile(N=5): filename = "/scratch/ineuteli/analysis/LQ_2018/SingleMuon/SingleMuon_Run2018_mutau.root" file = TFile.Open(filename) - print ">>> Sequential:" + print(">>> Sequential:") start = time.time() for i in range(1,N+1): name = "hist_%d"%i drawWithSharedFile(file,name) gDirectory.Delete(name) - print ">>> %s done"%(name) - print "Took %.1f seconds"%(time.time()-start) + print(">>> %s done"%(name)) + print("Took %.1f seconds"%(time.time()-start)) - print "\n>>> Parallel:" + print("\n>>> Parallel:") start = time.time() threads = [ ] for i in range(1,N+1): @@ -203,9 +203,9 @@ def testThreadWithSharedTFile(N=5): threads.append(thread) for thread in threads: thread.join() - print ">>> %s done"%(thread.name) - print "Took %.1f seconds"%(time.time()-start) - print + print(">>> %s done"%(thread.name)) + print("Took %.1f seconds"%(time.time()-start)) + print() def main(): @@ -221,5 +221,5 @@ def main(): if __name__ == '__main__': main() - print ">>> Done!" + print(">>> Done!") diff --git a/Plotter/test/testPanels.py b/Plotter/test/testPanels.py index 1dcea9776..d631e09db 100755 --- a/Plotter/test/testPanels.py +++ b/Plotter/test/testPanels.py @@ -49,16 +49,16 @@ def plotpanels(ratio,**kwargs): BM = max(BH,BW) Tm = min(TH,TW) Bm = min(BH,BW) - print ">>> %6s: %8s %8s %8s %8s %8s %8s %8s"%('pad','Wh','HNDC','Wh*HNDC','Ww','WNDC','Ww*WNDC','Wh*HNDC/Ww*WNDC') - print ">>> %6s: %8.1f %8.3f %8.1f %8.1f %8.3f %8.1f"%( + print(">>> %6s: %8s %8s %8s %8s %8s %8s %8s"%('pad','Wh','HNDC','Wh*HNDC','Ww','WNDC','Ww*WNDC','Wh*HNDC/Ww*WNDC')) + print(">>> %6s: %8.1f %8.3f %8.1f %8.1f %8.3f %8.1f"%( 'canvas',canvas.GetWh(),canvas.GetHNDC(),canvas.GetWh()*canvas.GetHNDC(), - canvas.GetWw(),canvas.GetWNDC(),canvas.GetWw()*canvas.GetWNDC()) - print ">>> %6s: %8.1f %8.3f %8.1f %8.1f %8.3f %8.1f %10.3f"%( + canvas.GetWw(),canvas.GetWNDC(),canvas.GetWw()*canvas.GetWNDC())) + print(">>> %6s: %8.1f %8.3f %8.1f %8.1f %8.3f %8.1f %10.3f"%( 'pad1',pad1.GetWh(),pad1.GetHNDC(),pad1.GetWh()*pad1.GetHNDC(), - pad1.GetWw(),pad1.GetWNDC(),pad1.GetWw()*pad1.GetWNDC(),TR) - print ">>> %6s: %8.1f %8.3f %8.1f %8.1f %8.3f %8.1f %10.3f"%( + pad1.GetWw(),pad1.GetWNDC(),pad1.GetWw()*pad1.GetWNDC(),TR)) + print(">>> %6s: %8.1f %8.3f %8.1f %8.1f %8.3f %8.1f %10.3f"%( 'pad2',pad2.GetWh(),pad2.GetHNDC(),pad2.GetWh()*pad2.GetHNDC(), - pad2.GetWw(),pad2.GetWNDC(),pad2.GetWw()*pad2.GetWNDC(),BR) + pad2.GetWw(),pad2.GetWNDC(),pad2.GetWw()*pad2.GetWNDC(),BR)) #scale = 1.0 #scale = 1./ratio #scale = (1.-ratio)/ratio @@ -72,18 +72,18 @@ def plotpanels(ratio,**kwargs): toffset = 1.2 boffset = toffset*oscale #boffset = 1.0+(toffset-1)*oscale - print ">>> 1/r=%.4f, (1-r)/r=%.4f, HNDC1/HNDC2=%.4f, WNDC1/WNDC2=%.4f, TR/BR=%.4f, TM/BM=%.4f, Tm/Bm=%.4f"%( - 1./ratio,(1.-ratio)/ratio,pad1.GetHNDC()/pad2.GetHNDC(),pad1.GetWNDC()/pad2.GetWNDC(),TR/BR,TM/BM,Tm/Bm) - print ">>> tsize=%.4f, bsize=%.4f, scale=%.4f"%(tsize,bsize,scale) - print ">>> toffset=%.4f, boffset=%.4f, scale=%.4f"%(toffset,boffset,oscale) + print(">>> 1/r=%.4f, (1-r)/r=%.4f, HNDC1/HNDC2=%.4f, WNDC1/WNDC2=%.4f, TR/BR=%.4f, TM/BM=%.4f, Tm/Bm=%.4f"%( + 1./ratio,(1.-ratio)/ratio,pad1.GetHNDC()/pad2.GetHNDC(),pad1.GetWNDC()/pad2.GetWNDC(),TR/BR,TM/BM,Tm/Bm)) + print(">>> tsize=%.4f, bsize=%.4f, scale=%.4f"%(tsize,bsize,scale)) + print(">>> toffset=%.4f, boffset=%.4f, scale=%.4f"%(toffset,boffset,oscale)) # TOP HIST canvas.cd(1) thist = TH1D('top','top',10,0,100) thist.GetXaxis().SetTitle("X title") thist.GetYaxis().SetTitle("Y title") - print ">>> thist.GetXaxis().GetTitleOffset()=%.4f"%(thist.GetXaxis().GetTitleOffset()) - print ">>> thist.GetYaxis().GetTitleOffset()=%.4f"%(thist.GetYaxis().GetTitleOffset()) + print(">>> thist.GetXaxis().GetTitleOffset()=%.4f"%(thist.GetXaxis().GetTitleOffset())) + print(">>> thist.GetYaxis().GetTitleOffset()=%.4f"%(thist.GetYaxis().GetTitleOffset())) thist.GetXaxis().SetTitleSize(tsize) thist.GetYaxis().SetTitleSize(tsize) #thist.GetXaxis().SetTitleOffset(toffset) @@ -107,7 +107,7 @@ def plotpanels(ratio,**kwargs): # FINISH canvas.SaveAs(fname) canvas.Close() - print ">>> " + print(">>> ") def main(): diff --git a/Plotter/test/testPlot.py b/Plotter/test/testPlot.py index 1b6cf9d16..6ca7b2539 100755 --- a/Plotter/test/testPlot.py +++ b/Plotter/test/testPlot.py @@ -41,7 +41,7 @@ def plothist(xtitle,hists,ratio=False,logy=False,norm=False,cwidth=None): #plot.saveas(fname+".png",fname+".C") #plot.saveas(fname,ext=['png','pdf']) plot.close() - print + print() def createhists(nhist=3): @@ -52,13 +52,13 @@ def createhists(nhist=3): rrange = 0.5 hists = [ ] gRandom.SetSeed(1777) - for i in xrange(1,nhist+1): + for i in range(1,nhist+1): mu = 48+i sigma = 10 hname = "hist%d"%(i) htitle = "#mu = %s, #sigma = %s"%(mu,sigma) hist = TH1D(hname,hname,nbins,xmin,xmax) - for j in xrange(nevts): + for j in range(nevts): hist.Fill(gRandom.Gaus(mu,sigma)) hists.append(hist) return hists diff --git a/Plotter/test/testSampleSet.py b/Plotter/test/testSampleSet.py index 34e7d0610..6952f45d0 100755 --- a/Plotter/test/testSampleSet.py +++ b/Plotter/test/testSampleSet.py @@ -53,7 +53,7 @@ def makeSampleSet(sampleset,filedict,join=False): datasample = sample else: expsamples.append(sample) - print ">>> Joining samples into one set"%(expsamples) + print(">>> Joining samples into one set"%(expsamples)) if join: color = expsamples[0].fillcolor bkgsample = MergedSample('Bkg',"Background",expsamples[1:],color=color) @@ -65,11 +65,11 @@ def makeSampleSet(sampleset,filedict,join=False): ('TTT',"ttbar real tau_{h}","genmatch_2==5"), ('TTJ',"ttbar fake tau_{h}","genmatch_2!=5"), ],start=True) - print ">>> samples=%s"%samples - print ">>> " + print(">>> samples=%s"%samples) + print(">>> ") samples.printtable() samples.printobjs() - print ">>> " + print(">>> ") return samples @@ -80,32 +80,32 @@ def testget(samples): for regex in [True,False]: for unique in [True,False]: kwargs = { 'unique':unique, 'incl':incl, 'regex':regex } - print ">>> unique=%s, incl=%s, regex=%s"%(LOG.getcolor(unique),incl,LOG.getcolor(regex)) - print ">>> SampleSet.get('DY', unique=%s,regex=%s): %r"%(unique,regex,samples.get('DY',**kwargs)) - print ">>> SampleSet.get('ZTT',unique=%s,regex=%s): %r"%(unique,regex,samples.get('ZTT',**kwargs)) - print ">>> SampleSet.get('WJ', unique=%s,regex=%s): %r"%(unique,regex,samples.get('WJ',**kwargs)) - print ">>> SampleSet.get('TT', unique=%s,regex=%s): %r"%(unique,regex,samples.get('TT',**kwargs)) + print(">>> unique=%s, incl=%s, regex=%s"%(LOG.getcolor(unique),incl,LOG.getcolor(regex))) + print(">>> SampleSet.get('DY', unique=%s,regex=%s): %r"%(unique,regex,samples.get('DY',**kwargs))) + print(">>> SampleSet.get('ZTT',unique=%s,regex=%s): %r"%(unique,regex,samples.get('ZTT',**kwargs))) + print(">>> SampleSet.get('WJ', unique=%s,regex=%s): %r"%(unique,regex,samples.get('WJ',**kwargs))) + print(">>> SampleSet.get('TT', unique=%s,regex=%s): %r"%(unique,regex,samples.get('TT',**kwargs))) if not regex: - print ">>> SampleSet.get('*TT',unique=%s,regex=%s): %r"%(unique,regex,samples.get('*TT',**kwargs)) - print ">>> SampleSet.get('?TT',unique=%s,regex=%s): %r"%(unique,regex,samples.get('?TT',**kwargs)) + print(">>> SampleSet.get('*TT',unique=%s,regex=%s): %r"%(unique,regex,samples.get('*TT',**kwargs))) + print(">>> SampleSet.get('?TT',unique=%s,regex=%s): %r"%(unique,regex,samples.get('?TT',**kwargs))) else: - print ">>> SampleSet.get('.*TT',unique=%s,regex=%s): %r"%(unique,regex,samples.get('.*TT',**kwargs)) - print ">>> SampleSet.get('.TT',unique=%s,regex=%s): %r"%(unique,regex,samples.get('.TT', **kwargs)) - print ">>> " + print(">>> SampleSet.get('.*TT',unique=%s,regex=%s): %r"%(unique,regex,samples.get('.*TT',**kwargs))) + print(">>> SampleSet.get('.TT',unique=%s,regex=%s): %r"%(unique,regex,samples.get('.TT', **kwargs))) + print(">>> ") regex = False for incl in [True,False]: for unique in [True,False]: kwargs = { 'unique':unique, 'incl':incl, 'regex':regex } - print ">>> unique=%s, incl=%s, regex=%s"%(LOG.getcolor(unique),LOG.getcolor(incl),regex) - print ">>> SampleSet.get('ZTT','TT',unique=%s,incl=%s): %r"%(unique,incl,samples.get('ZTT','TT',**kwargs)) - print ">>> SampleSet.get('QCD','WJ',unique=%s,incl=%s): %r"%(unique,incl,samples.get('QCD','WJ',**kwargs)) - print ">>> " - print ">>> SampleSet.get('TT?',unique=False,regex=False,split=True): %r"%(samples.get('TT?',unique=False,regex=False,split=True)) - print ">>> SampleSet.getexp(): %r"%(samples.getexp()) - print ">>> SampleSet.getexp('TT'): %r"%(samples.getexp('TT')) + print(">>> unique=%s, incl=%s, regex=%s"%(LOG.getcolor(unique),LOG.getcolor(incl),regex)) + print(">>> SampleSet.get('ZTT','TT',unique=%s,incl=%s): %r"%(unique,incl,samples.get('ZTT','TT',**kwargs))) + print(">>> SampleSet.get('QCD','WJ',unique=%s,incl=%s): %r"%(unique,incl,samples.get('QCD','WJ',**kwargs))) + print(">>> ") + print(">>> SampleSet.get('TT?',unique=False,regex=False,split=True): %r"%(samples.get('TT?',unique=False,regex=False,split=True))) + print(">>> SampleSet.getexp(): %r"%(samples.getexp())) + print(">>> SampleSet.getexp('TT'): %r"%(samples.getexp('TT'))) #print ">>> SampleSet.getmc(): %r"%(samples.getmc()) #print ">>> SampleSet.getdata(): %r"%(samples.getdata()) - print ">>> " + print(">>> ") def plotSampleSet(samples,tag="",singlevar=False): @@ -180,5 +180,5 @@ def main(args): LOG.verbosity = args.verbosity PLOG.verbosity = args.verbosity-1 main(args) - print ">>>\n>>> Done." + print(">>>\n>>> Done.") diff --git a/Plotter/test/testSamples.py b/Plotter/test/testSamples.py index cb33668e0..75788a937 100755 --- a/Plotter/test/testSamples.py +++ b/Plotter/test/testSamples.py @@ -54,7 +54,7 @@ def plotsamples(datasample,expsamples,tag=""): for sample in [datasample]+expsamples: hists = sample.gethist(variables,selection) histdict[sample] = hists - print ">>> %r: %s"%(sample.name,[repr(h.GetName()) for h in hists]) + print(">>> %r: %s"%(sample.name,[repr(h.GetName()) for h in hists])) # PLOT for i, variable in enumerate(variables): @@ -77,24 +77,24 @@ def plotsamples2D(datasample,expsamples): for sample in [datasample]+expsamples: hists = sample.gethist2D(variables2D,selection) histdict[sample] = hists - print ">>> %r: %s"%(sample.name,[repr(h.GetName()) for h in hists]) + print(">>> %r: %s"%(sample.name,[repr(h.GetName()) for h in hists])) def testMergedSamples(datasample,expsamples): """Test MergedSample class: join samples, print out, and plot.""" LOG.header("testMergedSamples") - print ">>> Joining samples %s"%(expsamples) + print(">>> Joining samples %s"%(expsamples)) #color = expsamples[0].fillcolor STYLE.sample_colors['Exp'] = STYLE.kOrange STYLE.sample_colors['Bkg'] = STYLE.kOrange bkgsample = MergedSample("Bkg","Background",expsamples[1:]) #,color=color) expsamples = [expsamples[0],bkgsample] expsample = MergedSample("Exp","Expected",expsamples) #,color=color) - print ">>> " + print(">>> ") expsample.printheader() expsample.printrow() expsample.printobjs() - print ">>> " + print(">>> ") plotsamples(datasample,[expsample],tag='_merged') @@ -147,5 +147,5 @@ def main(args): LOG.verbosity = args.verbosity PLOG.verbosity = args.verbosity-1 main(args) - print ">>>\n>>> Done." + print(">>>\n>>> Done.") diff --git a/Plotter/test/testSelection.py b/Plotter/test/testSelection.py index b232294b4..0c82c9395 100755 --- a/Plotter/test/testSelection.py +++ b/Plotter/test/testSelection.py @@ -34,19 +34,19 @@ def main(): for selection in selections: LOG.header(selection.name) - print ">>> name='%s', filename='%s', title='%s', cut='%s'"%(color(selection.name),color(selection.filename),color(selection.title),color(selection.selection)) - print ">>> weight=%r, drawcmd=%r"%(selection.weight,selection.drawcmd()) + print(">>> name='%s', filename='%s', title='%s', cut='%s'"%(color(selection.name),color(selection.filename),color(selection.title),color(selection.selection))) + print(">>> weight=%r, drawcmd=%r"%(selection.weight,selection.drawcmd())) sum1 = selection + "dzeta>-40" - print '>>> sum1 = selection + "dzeta>-40"' - print ">>> name=%r, filename=%r, title=%r"%(sum1.name,sum1.filename,sum1.title) - print ">>> cut=%r"%(sum1.selection) + print('>>> sum1 = selection + "dzeta>-40"') + print(">>> name=%r, filename=%r, title=%r"%(sum1.name,sum1.filename,sum1.title)) + print(">>> cut=%r"%(sum1.selection)) sum2 = selection + Selection("dzeta","dzeta>-40") - print '>>> sum2 = selection + Selection("dzeta","dzeta>-40")' - print ">>> name=%r, filename=%r, title=%r"%(sum2.name,sum2.filename,sum2.title) - print ">>> cut=%r"%(sum2.selection) + print('>>> sum2 = selection + Selection("dzeta","dzeta>-40")') + print(">>> name=%r, filename=%r, title=%r"%(sum2.name,sum2.filename,sum2.title)) + print(">>> cut=%r"%(sum2.selection)) if __name__ == "__main__": main() - print + print() diff --git a/Plotter/test/testStack.py b/Plotter/test/testStack.py index 0d560efea..3dd46332a 100755 --- a/Plotter/test/testStack.py +++ b/Plotter/test/testStack.py @@ -46,7 +46,7 @@ def plotstack(xname,xtitle,datahist,exphists,ratio=False,logy=False,fraction=Fal #plot.saveas(fname+".png",fname+".C") #plot.saveas(fname,ext=['png','pdf']) plot.close() - print + print() def createhists(procs,binning,nevts): @@ -68,7 +68,7 @@ def createhists(procs,binning,nevts): for hname, htitle, scale, generator, args in procs: hist = TH1D(hname,htitle,*binning) hist.Sumw2() - for j in xrange(nevts): + for j in range(nevts): hist.Fill(generator(*args)) hist.Scale(scale) hist.SetFillColor(coldict.get(hname,kWhite)) @@ -82,11 +82,11 @@ def createhists(procs,binning,nevts): datahist = TH1D('data','Observed',*binning) datahist.SetBinErrorOption(TH1D.kPoisson) if LOG.verbosity>=1: - print ">>> createhists: Creating pseudo data:" + print(">>> createhists: Creating pseudo data:") TAB = LOG.table("%5s [%5s, %5s] %-14s %-20s", "%5d [%5s, %5s] %8.1f +- %5.1f %8d +%5.1f -%5.1f") TAB.printheader('bin','xlow','xup','exp','data') - for ibin in xrange(0,nbins+2): + for ibin in range(0,nbins+2): exp = tothist.GetBinContent(ibin) xlow = hist.GetXaxis().GetBinLowEdge(ibin) xup = hist.GetXaxis().GetBinUpEdge(ibin) diff --git a/Plotter/test/testStitching.py b/Plotter/test/testStitching.py index 7232a496e..7e9f0a28d 100755 --- a/Plotter/test/testStitching.py +++ b/Plotter/test/testStitching.py @@ -8,9 +8,9 @@ def printsamples(samples,title=None): if title: - print ">>> %s:"%(title) + print(">>> %s:"%(title)) for sample in samples: - print ">>> %r: %r"%(sample.name,sample) + print(">>> %r: %r"%(sample.name,sample)) def joinsamples(samples,tag="",outdir="plots"): @@ -32,7 +32,7 @@ def joinSampleSet(samples,tag="",outdir="plots"): samples = SampleSet(samples) samples.printtable("Before:") samples.join("DY*Jets",name=name) - print ">>> " + print(">>> ") samples.printtable("After:") @@ -44,7 +44,7 @@ def stitchSampleSet(samples,tag="",outdir="plots",xsec=1.00): samples = SampleSet(samples) samples.printtable("Before:") samples.stitch("DY*Jets",incl='DYJ',name="DY",title="Drell-Yan",xsec=1.00) - print ">>> " + print(">>> ") samples.printtable("After:") @@ -89,5 +89,5 @@ def main(): args = parser.parse_args() LOG.verbosity = args.verbosity main() - print "\n>>> Done." + print("\n>>> Done.") diff --git a/Plotter/test/testStyle.py b/Plotter/test/testStyle.py index dd1b71baa..b9b139c57 100755 --- a/Plotter/test/testStyle.py +++ b/Plotter/test/testStyle.py @@ -20,7 +20,7 @@ def testCMSera(): eras = ensurelist(eras) args = ','.join(repr(y) for y in eras) result = CMSStyle.setCMSEra(*eras) - print ">>> CMSStyle.setCMSera(%s) = %r"%(args,result) + print(">>> CMSStyle.setCMSera(%s) = %r"%(args,result)) def testCMSlogo(iPosX=0,width=800,height=750,lmargin=0.14,rmargin=0.04,tmargin=0.06,out=True,**kwargs): @@ -46,17 +46,17 @@ def testCMSlogo(iPosX=0,width=800,height=750,lmargin=0.14,rmargin=0.04,tmargin=0 # https://root.cern.ch/doc/master/classTPad.html H, W = canvas.GetWh()*canvas.GetHNDC(), canvas.GetWw()*canvas.GetWNDC() R, M, m = H/W, max(H,W), min(H,W) - print ">>> %6s: %8s %8s %8s %8s %8s %8s %8s"%('pad','Wh','HNDC','Wh*HNDC','Ww','WNDC','Ww*WNDC','Wh*HNDC/Ww*WNDC') - print ">>> %6s: %8.1f %8.3f %8.1f %8.1f %8.3f %8.1f"%( + print(">>> %6s: %8s %8s %8s %8s %8s %8s %8s"%('pad','Wh','HNDC','Wh*HNDC','Ww','WNDC','Ww*WNDC','Wh*HNDC/Ww*WNDC')) + print(">>> %6s: %8.1f %8.3f %8.1f %8.1f %8.3f %8.1f"%( 'canvas',canvas.GetWh(),canvas.GetHNDC(),canvas.GetWh()*canvas.GetHNDC(), - canvas.GetWw(),canvas.GetWNDC(),canvas.GetWw()*canvas.GetWNDC()) + canvas.GetWw(),canvas.GetWNDC(),canvas.GetWw()*canvas.GetWNDC())) # HIST hist = TH1F('hist','hist',10,0,100) hist.GetXaxis().SetTitle("X title") hist.GetYaxis().SetTitle("Y title") - print ">>> hist.GetXaxis().GetTitleOffset()=%.4f"%(hist.GetXaxis().GetTitleOffset()) - print ">>> hist.GetYaxis().GetTitleOffset()=%.4f"%(hist.GetYaxis().GetTitleOffset()) + print(">>> hist.GetXaxis().GetTitleOffset()=%.4f"%(hist.GetXaxis().GetTitleOffset())) + print(">>> hist.GetYaxis().GetTitleOffset()=%.4f"%(hist.GetYaxis().GetTitleOffset())) hist.GetXaxis().SetTitleSize(0.045) hist.GetYaxis().SetTitleSize(0.045) hist.GetXaxis().SetLabelSize(0.040) @@ -71,7 +71,7 @@ def testCMSlogo(iPosX=0,width=800,height=750,lmargin=0.14,rmargin=0.04,tmargin=0 # FINISH canvas.SaveAs(fname) canvas.Close() - print ">>> " + print(">>> ") def checklegend(samples,tag=""): @@ -85,7 +85,7 @@ def checklegend(samples,tag=""): ydim = 50*(len(samples)+2) #width = 0.4 #x1, y1 = 0.1, 0.9 - print ">>> Canvas: %sx%s (nsamples=%d)"%(xdim,ydim,len(samples)) + print(">>> Canvas: %sx%s (nsamples=%d)"%(xdim,ydim,len(samples))) canvas = TCanvas('canvas','canvas',xdim,ydim) #legend = TLegend(x1,y1,x1+width,y1-height) legend = TLegend(0,0,1,1) @@ -147,5 +147,5 @@ def main(): if __name__ == "__main__": main() - print ">>>\n>>> Done." + print(">>>\n>>> Done.") diff --git a/Plotter/test/testUnroll.py b/Plotter/test/testUnroll.py index 9c7594a06..1e8dc33b1 100755 --- a/Plotter/test/testUnroll.py +++ b/Plotter/test/testUnroll.py @@ -46,7 +46,7 @@ def red(string): def test_unroll(fname,xvars,yvars,cut="",verb=0): """Test unrolling of 2D histogram and create 4D response matrix.""" if verb>=1: - print ">>> test_unroll(%s)"%(fname) + print(">>> test_unroll(%s)"%(fname)) start = time() file = TFile(fname,'READ') assert file and not file.IsZombie(), "Could not open %s"%(fname) @@ -105,16 +105,16 @@ def test_unroll(fname,xvars,yvars,cut="",verb=0): nx, ny = 2*nxbins, 2*nybins # number of points to scan xbinw = float(xmax-xmin)/nx ybinw = float(ymax-ymin)/ny - print ">>> nxbins=%s, xmin=%s, xmax=%s"%(nxbins,xmin,xmax) - print ">>> nybins=%s, ymin=%s, ymax=%s"%(nybins,ymin,ymax) - print ">>> nx=%s, xbinw=%s, ny=%s, ybinw=%s"%(nx,xbinw,ny,ybinw) + print(">>> nxbins=%s, xmin=%s, xmax=%s"%(nxbins,xmin,xmax)) + print(">>> nybins=%s, ymin=%s, ymax=%s"%(nybins,ymin,ymax)) + print(">>> nx=%s, xbinw=%s, ny=%s, ybinw=%s"%(nx,xbinw,ny,ybinw)) xvals = [xmin+xbinw*i for i in range(-2,nx+3)] yvals = [ymin+ybinw*i for i in range(-2,ny+3)] #xvals = [-1,0,1,10,50,100,200] #yvals = [-1,0,1,10,50,100,200] - print ">>>" - print ">>> %7s |"%(r'y\x') + ' '.join("%6s"%x for x in xvals) - print ">>> %8s+"%('-'*7) + '-'*7*len(xvals) + print(">>>") + print(">>> %7s |"%(r'y\x') + ' '.join("%6s"%x for x in xvals)) + print(">>> %8s+"%('-'*7) + '-'*7*len(xvals)) for y in yvals: #print ">>> %7s |"%x+' '.join("%6s"%(Unroll.GetBin(x,y)) for x in xvals) row = ">>> %7s |"%y @@ -124,10 +124,10 @@ def test_unroll(fname,xvars,yvars,cut="",verb=0): if bin<1 or bin>=nxbins*nybins+1: str = red(str) row += str - print row - print ">>>" + print(row) + print(">>>") - print ">>> Took %.2fs"%(time()-start) + print(">>> Took %.2fs"%(time()-start)) def draw(hist,xtitle,nbins=0): @@ -239,5 +239,5 @@ def main(): if __name__ == '__main__': main() - print ">>> Done" + print(">>> Done") diff --git a/Plotter/test/testUnwrapping.py b/Plotter/test/testUnwrapping.py index 40d8b7852..a3db06e3f 100755 --- a/Plotter/test/testUnwrapping.py +++ b/Plotter/test/testUnwrapping.py @@ -19,9 +19,9 @@ def colvar(string): def printIO(name,func,*args): - print colvar(">>> %s(%s) returns "%(name,','.join(repr(a) for a in args))) + print(colvar(">>> %s(%s) returns "%(name,','.join(repr(a) for a in args)))) for result in func(*args): - print ">>> ",colvar(repr(result)) + print(">>> ",colvar(repr(result))) def main(): @@ -57,7 +57,7 @@ def main(): printIO(name,func,xvar,selection) printIO(name,func,varlist1,selection) printIO(name,func,varlist2,selection) - print ">>> " + print(">>> ") # UNWRAP args to gethist2D LOG.header("unwrap_gethist2D_args") @@ -72,11 +72,11 @@ def main(): printIO(name,func,[(xvar,yvar)],selection) printIO(name,func,xvarlist1,yvarlist1,selection) printIO(name,func,xvarlist2,yvarlist2,selection) - printIO(name,func,zip(xvarlist1,yvarlist1),selection) - print ">>> " + printIO(name,func,list(zip(xvarlist1,yvarlist1)),selection) + print(">>> ") if __name__ == "__main__": main() - print + print() diff --git a/Plotter/test/testVariables.py b/Plotter/test/testVariables.py index 43dffe670..d329cf538 100755 --- a/Plotter/test/testVariables.py +++ b/Plotter/test/testVariables.py @@ -54,21 +54,21 @@ def main(args): for var in variables: LOG.header(var.name) - print ">>> string=%s, repr=%r"%(var,var) - print ">>> name='%s', filename='%s', title='%s'"%(color(var.name),color(var.filename),color(var.title)) - print ">>> (nbins,xmin,xmax)=(%s,%s,%s), bins=%s"%(var.nbins,var.xmin,var.xmax,var.bins) - print ">>> hasintbins=%s, hasvariablebins=%s"%(var.hasintbins(),var.hasvariablebins()) - print ">>> cut=%r, blindcuts=%r, blind(50,60)=%r"%(var.cut,var.blindcuts,var.blind(50,60)) + print(">>> string=%s, repr=%r"%(var,var)) + print(">>> name='%s', filename='%s', title='%s'"%(color(var.name),color(var.filename),color(var.title))) + print(">>> (nbins,xmin,xmax)=(%s,%s,%s), bins=%s"%(var.nbins,var.xmin,var.xmax,var.bins)) + print(">>> hasintbins=%s, hasvariablebins=%s"%(var.hasintbins(),var.hasvariablebins())) + print(">>> cut=%r, blindcuts=%r, blind(50,60)=%r"%(var.cut,var.blindcuts,var.blind(50,60))) hist = var.gethist() - print ">>> hist=%s, (nbins,xmin,xmax)=(%s,%s,%s), variable=%s"%( - hist,hist.GetXaxis().GetNbins(),hist.GetXaxis().GetXmin(),hist.GetXaxis().GetXmax(),hist.GetXaxis().IsVariableBinSize()) + print(">>> hist=%s, (nbins,xmin,xmax)=(%s,%s,%s), variable=%s"%( + hist,hist.GetXaxis().GetNbins(),hist.GetXaxis().GetXmin(),hist.GetXaxis().GetXmax(),hist.GetXaxis().IsVariableBinSize())) gDirectory.Delete(hist.GetName()) for sel in selections: # context-dependent attributes var.changecontext(sel) - print ">>> context: '%s'"%color(sel,'grey') - print ">>> plotfor=%s, name='%s', title='%s'"%(var.plotfor(sel),color(var.name),color(var.title)) - print ">>> (nbins,xmin,xmax)=(%s,%s,%s), bins=%s, cut=%r"%(var.nbins,var.xmin,var.xmax,var.bins,var.cut) - print + print(">>> context: '%s'"%color(sel,'grey')) + print(">>> plotfor=%s, name='%s', title='%s'"%(var.plotfor(sel),color(var.name),color(var.title))) + print(">>> (nbins,xmin,xmax)=(%s,%s,%s), bins=%s, cut=%r"%(var.nbins,var.xmin,var.xmax,var.bins,var.cut)) + print() if __name__ == "__main__": diff --git a/common/test/testLoadingBar.py b/common/test/testLoadingBar.py index 21c294e0d..e96b54188 100755 --- a/common/test/testLoadingBar.py +++ b/common/test/testLoadingBar.py @@ -86,7 +86,7 @@ def main(): parser.add_argument('-v', '--verbose', dest='verbosity', type=int, nargs='?', const=1, default=0, action='store', help="set verbosity" ) args = parser.parse_args() - print + print() main() - print + print() diff --git a/common/test/testTable.py b/common/test/testTable.py index 96a3366df..fc542aa9b 100755 --- a/common/test/testTable.py +++ b/common/test/testTable.py @@ -22,7 +22,7 @@ def printhist(nevts=10000,verb=0): table.printheader('ibin','content','error') for ibin in range(0,hist.GetXaxis().GetNbins()+1): table.printrow(ibin,hist.GetBinContent(ibin),hist.GetBinError(ibin)) - print + print() # SIMPLE TABLE print ">>> Table from hist" @@ -45,7 +45,7 @@ def main(args): table.printrow(1,3.0,9.0) table.printrow(1,4.0) # missing column table.printrow(1,5.0,25,-1) # surprise extra column - print + print() # SIMPLE TABLE print ">>> Simple table with custom width" @@ -56,7 +56,7 @@ def main(args): table.printrow(1,3.0,9.0) table.printrow(1,4.0) # missing column table.printrow(1,5.0,25,-1) # surprise extra column - print + print() # TABLE with extra symbols print ">>> Table with extra symbols" @@ -68,7 +68,7 @@ def main(args): table.printrow(1,16.0,2.8) # missing columns table.printrow(1,25.0) # missing column table.printrow(1,36.0,4.5,3.5,-1) # surprise extra column - print + print() # TABLE from LOG level = 2 @@ -78,7 +78,7 @@ def main(args): TAB.printrow(1,1.0,1.0) TAB.printrow(1,2.0,4.0) TAB.printrow(1,3.0,9.0) - print + print() # TABLE of histogram printhist(verb=verbosity) @@ -94,7 +94,7 @@ def main(args): help="set verbosity" ) args = parser.parse_args() LOG.verbosity = args.verbosity - print + print() main(args) - print + print() From 2e32d99c5e2d6095bf754f9715f0bb3bc1adc53e Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Thu, 8 Jun 2023 18:15:30 +0200 Subject: [PATCH 43/55] debugging; removing redundant parentheses --- PicoProducer/python/analysis/TestModule.py | 2 +- PicoProducer/python/analysis/utils.py | 6 +++--- PicoProducer/python/batch/SGE.py | 6 +++--- PicoProducer/python/batch/utils.py | 6 ++---- .../python/corrections/ElectronSFs.py | 2 +- .../python/corrections/MetTriggerSF.py | 2 +- PicoProducer/python/corrections/MuonSFs.py | 2 +- .../python/corrections/TrigObjMatcher.py | 21 +++++++++---------- 8 files changed, 22 insertions(+), 25 deletions(-) diff --git a/PicoProducer/python/analysis/TestModule.py b/PicoProducer/python/analysis/TestModule.py index 0956a53b7..50dfa60b8 100755 --- a/PicoProducer/python/analysis/TestModule.py +++ b/PicoProducer/python/analysis/TestModule.py @@ -84,5 +84,5 @@ def analyze(self, event): p = PostProcessor(outdir,infiles,cut=None,branchsel=None,maxEntries=maxevts, modules=modules,postfix=postfix,noOut=False) p.run() - print((">>> TestModule.py done after %.1f seconds"%(time.time()-time0))) + print(">>> TestModule.py done after %.1f seconds"%(time.time()-time0)) diff --git a/PicoProducer/python/analysis/utils.py b/PicoProducer/python/analysis/utils.py index 0dcd1b496..6226c4d0e 100644 --- a/PicoProducer/python/analysis/utils.py +++ b/PicoProducer/python/analysis/utils.py @@ -58,10 +58,10 @@ def ensurebranches(tree,branches): def redirectbranch(oldbranch,newbranch): """Redirect some branch names. newbranch -> oldbranch""" if isinstance(oldbranch,str): # rename - print(("redirectbranch: directing %r -> %r"%(newbranch,oldbranch))) + print("redirectbranch: directing %r -> %r"%(newbranch,oldbranch)) exec("setattr(Event,newbranch,property(lambda self: self._tree.readBranch(%r)))"%(oldbranch)) else: # set default value - print(("redirectbranch: directing %r -> %r"%(newbranch,oldbranch))) + print("redirectbranch: directing %r -> %r"%(newbranch,oldbranch)) exec("setattr(Event,newbranch,%s)"%(oldbranch)) @@ -254,7 +254,7 @@ def getmet(era,var="",useT1=False,verb=0): pt += '_'+var phi += '_'+var funcstr = "lambda e: TLorentzVector(e.%s*cos(e.%s),e.%s*sin(e.%s),0,e.%s)"%(pt,phi,pt,phi,pt) - if verb+2>=1: + if verb>=1: LOG.verb(">>> getmet: %r"%(funcstr)) return eval(funcstr) diff --git a/PicoProducer/python/batch/SGE.py b/PicoProducer/python/batch/SGE.py index 89523b0ef..1132ad110 100644 --- a/PicoProducer/python/batch/SGE.py +++ b/PicoProducer/python/batch/SGE.py @@ -80,20 +80,20 @@ def jobs(self,jobids,**kwargs): rows = self.execute(subcmd,verb=verbosity) jobs = JobList() if rows and self.verbosity>=1: - print((">>> %10s %10s %8s %8s %s"%('user','jobid','taskid','status','args'))) + print(">>> %10s %10s %8s %8s %s"%('user','jobid','taskid','status','args')) for row in rows.split('\n'): values = row.split() if len(values)<5: continue if verbosity>=3: - print((">>> job row: %s"%(row))) + print(">>> job row: %s"%(row)) user = values[0] jobid = values[1] taskid = values[2] status = self.statusdict.get(int(values[3]),'?') args = ' '.join(values[4:]) if self.verbosity>=1: - print((">>> %10s %10s %8s %8s %s"%(user,jobid,taskid,status,args))) + print(">>> %10s %10s %8s %8s %s"%(user,jobid,taskid,status,args)) job = Job(self,jobid,taskid=taskid,args=args,status=status) jobs.append(job) return jobs diff --git a/PicoProducer/python/batch/utils.py b/PicoProducer/python/batch/utils.py index 8d46429c7..88f27c50a 100644 --- a/PicoProducer/python/batch/utils.py +++ b/PicoProducer/python/batch/utils.py @@ -20,9 +20,7 @@ def guess_batch(): """Guess the batch system for a host.""" host = platform.node() batch = "HTCondor" - if 'lxplus' in host: - batch = "HTCondor" - elif "etp" in host: + if "etp" in host: batch = "HTCondor_KIT" ###elif "etp" in host: ### batch = "HTCondor_DESY" @@ -30,7 +28,7 @@ def guess_batch(): ### batch = "HTCondor_NAF" elif "t3" in host and "psi.ch" in host: batch = "SLURM" - return sedir + return batch def chunkify_by_evts(fnames,maxevts,evenly=True,evtdict=None,verb=0): diff --git a/PicoProducer/python/corrections/ElectronSFs.py b/PicoProducer/python/corrections/ElectronSFs.py index 707ef0df4..c094bdd83 100644 --- a/PicoProducer/python/corrections/ElectronSFs.py +++ b/PicoProducer/python/corrections/ElectronSFs.py @@ -5,7 +5,7 @@ # 2018: https://hypernews.cern.ch/HyperNews/CMS/get/higgstautau/1132.html import os from TauFW.PicoProducer import datadir -from .ScaleFactorTool import ScaleFactor, ScaleFactorHTT +from TauFW.PicoProducer.corrections.ScaleFactorTool import ScaleFactor, ScaleFactorHTT pathPOG = os.path.join(datadir,"lepton/EGammaPOG/") pathHTT = os.path.join(datadir,"lepton/HTT/Electron/") "UL2017/egammaEffi.txt_EGM2D_MVA90noIso_UL17.root", diff --git a/PicoProducer/python/corrections/MetTriggerSF.py b/PicoProducer/python/corrections/MetTriggerSF.py index 9775d4905..d34e232bf 100644 --- a/PicoProducer/python/corrections/MetTriggerSF.py +++ b/PicoProducer/python/corrections/MetTriggerSF.py @@ -13,7 +13,7 @@ def __init__(self,filename): self.histo = { } for label in self.labels: self.histo[label] = self.rootfile.Get(label) - print((label,self.histo[label])) + print(label,self.histo[label]) def getWeight(self,metnomu,mhtnomu): diff --git a/PicoProducer/python/corrections/MuonSFs.py b/PicoProducer/python/corrections/MuonSFs.py index 6279874c8..c3a5a146a 100644 --- a/PicoProducer/python/corrections/MuonSFs.py +++ b/PicoProducer/python/corrections/MuonSFs.py @@ -5,7 +5,7 @@ # https://twiki.cern.ch/twiki/bin/view/CMS/MuonLegacy2016 import os from TauFW.PicoProducer import datadir -from .ScaleFactorTool import ScaleFactor, ScaleFactorHTT +from TauFW.PicoProducer.corrections.ScaleFactorTool import ScaleFactor, ScaleFactorHTT pathPOG = os.path.join(datadir,"lepton/MuonPOG/") pathHTT = os.path.join(datadir,"lepton/HTT/Muon/") diff --git a/PicoProducer/python/corrections/TrigObjMatcher.py b/PicoProducer/python/corrections/TrigObjMatcher.py index 51e849869..1a100ab36 100644 --- a/PicoProducer/python/corrections/TrigObjMatcher.py +++ b/PicoProducer/python/corrections/TrigObjMatcher.py @@ -123,13 +123,12 @@ def __init__(self,path,filters,runrange=None,**kwargs): patheval = "e."+path if runrange: patheval = "e.run>=%d and e.run<=%d and %s"%(runrange[0],runrange[1],patheval) - self.filters = filters # list of trigger filters, one per leg - self.runrange = runrange # range of run for this trigger formatted as (first,last); for data only - self.path = path # human readable trigger combination - self.patheval = patheval # trigger evaluation per event 'e' - self.fireddef = "self.fired = lambda e: "+patheval # exact definition of 'fired' function - exec(self.fireddef, locals()) # method to check if trigger was fired for a given event - #self.fired = lambda e: any(e.p for p in self.paths) + self.filters = filters # list of trigger filters, one per leg + self.runrange = runrange # range of run for this trigger formatted as (first,last); for data only + self.path = path # human readable trigger combination + self.patheval = patheval # trigger evaluation per event 'e' + self.fireddef = "lambda e: "+patheval # exact definition of 'fired' function + self.fired = eval(self.fireddef) # method to check if trigger was fired for a given event def __repr__(self): """Returns string representation of Trigger object.""" @@ -264,8 +263,8 @@ def __init__(self,triggers,**kwargs): patheval += '('+trigger.patheval+')' else: patheval += trigger.patheval - path = patheval.replace("e.",'').replace(" or "," || ").replace(" and "," && ") - firedef = "self.fired = lambda e: "+patheval + path = patheval.replace("e.",'').replace(" or "," || ").replace(" and "," && ") + fireddef = "lambda e: "+patheval self.triggers = triggers # list of triggers self.nlegs = nlegs # number of legs = number of filters @@ -275,8 +274,8 @@ def __init__(self,triggers,**kwargs): self.bits = bits # bitwise 'OR'-combination of all filter bits self.path = path # human readable trigger combination self.patheval = patheval # trigger evaluation per event 'e' - self.fireddef = firedef # exact definition of 'fired' function - exec(self.fireddef, locals()) # method to check if any of the triggers was fired for a given event + self.fireddef = fireddef # exact definition of 'fired' function + self.fired = eval(fireddef) # method to check if any of the triggers was fired for a given event def __repr__(self): """Returns string representation of TriggerFilter object.""" From c6c6571c951b9d124ecbc1661af695aa121bc31e Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Thu, 8 Jun 2023 18:19:48 +0200 Subject: [PATCH 44/55] debug --- PicoProducer/python/batch/utils.py | 1 - 1 file changed, 1 deletion(-) diff --git a/PicoProducer/python/batch/utils.py b/PicoProducer/python/batch/utils.py index 88f27c50a..00eacedec 100644 --- a/PicoProducer/python/batch/utils.py +++ b/PicoProducer/python/batch/utils.py @@ -4,7 +4,6 @@ import importlib import platform from TauFW.common.tools.file import ensureTFile -import TauFW.PicoProducer.tools.config as GLOB from TauFW.PicoProducer.batch import moddir from TauFW.common.tools.log import Logger from TauFW.common.tools.file import ensurefile, ensureTFile From cbbb401fff1d8a2d270812dabb049980e51dfb4c Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Fri, 9 Jun 2023 18:12:40 +0200 Subject: [PATCH 45/55] debug: move _cfgdefaults initiation to help function to protect imports --- PicoProducer/python/pico/config.py | 4 +- PicoProducer/python/storage/utils.py | 3 +- PicoProducer/python/tools/config.py | 130 +++++++++++++++------------ 3 files changed, 76 insertions(+), 61 deletions(-) diff --git a/PicoProducer/python/pico/config.py b/PicoProducer/python/pico/config.py index 40fba6f1d..60e263dcc 100755 --- a/PicoProducer/python/pico/config.py +++ b/PicoProducer/python/pico/config.py @@ -245,6 +245,7 @@ def main_write(args): def main_set(args): """Set variables in the config file.""" + global CONFIG if args.verbosity>=1: print(">>> main_set", args) variable = args.variable @@ -266,7 +267,8 @@ def main_set(args): print('-'*80) if variable=='all': if 'default' in value: - GLOB.setdefaultconfig(verb=verb) + CONFIG = GLOB.setdefaultconfig(verb=args.verbosity) + CONFIG.write(backup=False) else: LOG.warning("Did not recognize value '%s'. Did you mean 'default'?"%(value)) elif variable in ['nfilesperjob','maxevtsperjob','ncores']: diff --git a/PicoProducer/python/storage/utils.py b/PicoProducer/python/storage/utils.py index 995b6268a..48ded5f7e 100644 --- a/PicoProducer/python/storage/utils.py +++ b/PicoProducer/python/storage/utils.py @@ -11,11 +11,11 @@ from ROOT import TFile LOG = Logger('Storage') host = platform.node() +user = getpass.getuser() def guess_sedir(): """Guess the storage element path for a given user and host.""" - user = getpass.getuser() sedir = "" if 'lxplus' in host: sedir = "/eos/user/%s/%s/"%(user[0],user) @@ -28,7 +28,6 @@ def guess_sedir(): def guess_tmpdirs(): """Guess the temporary directory for a given user and host.""" - user = getpass.getuser() tmphadddir = "/tmp/%s/"%(user) # temporary dir for creating intermediate hadd files tmpskimdir = "" # temporary dir for creating skimmed file before copying to outdir if 'lxplus' in host: diff --git a/PicoProducer/python/tools/config.py b/PicoProducer/python/tools/config.py index cca9367f6..8b87d34c2 100644 --- a/PicoProducer/python/tools/config.py +++ b/PicoProducer/python/tools/config.py @@ -9,58 +9,70 @@ from TauFW.PicoProducer import basedir from TauFW.common.tools.file import ensuredir, ensurefile from TauFW.common.tools.log import Logger, color, bold, header -from TauFW.PicoProducer.storage.utils import guess_sedir, guess_tmpdirs -from TauFW.PicoProducer.batch.utils import guess_batch # DEFAULTS -LOG = Logger('GLOB') -CONFIG = None -user = getpass.getuser() -host = platform.node() -#basedir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) -dtypes = ['mc','data','embed'] -_eras = OrderedDict([ - ('2016','samples_2016.py'), - ('2017','samples_2017.py'), - ('2018','samples_2018.py') -]) -_channels = OrderedDict([ - ('skim','skimjob.py'), - ('test','test.py'), - ('mutau','ModuleMuTauSimple') -]) -_sedir = guess_sedir() # guess storage element on current host -_tmpskimdir, _tmphadddir = guess_tmpdirs() # _tmphadddir: temporary dir for creating intermediate hadd files - # _tmpskimdir: temporary dir for creating skimmed file before copying to outdir -_jobdir = "output/$ERA/$CHANNEL/$SAMPLE" # for job config and log files -_outdir = _tmphadddir+_jobdir # for job output -_picodir = _sedir+"analysis/$ERA/$GROUP" # for storage of analysis ("pico") tuples after hadd -_nanodir = _sedir+"samples/nano/$ERA/$DAS" # for storage of (skimmed) nanoAOD -_filelistdir = "samples/files/$ERA/$SAMPLE.txt" # location to save list of files -_batchsystem = guess_batch() # batch system (HTCondor, SLURM, ...) -_queue = "" # batch queue / job flavor -_nfilesperjob = 1 # group files per job -_maxevtsperjob = -1 # maximum number of events per job (split large files) -_maxopenfiles = 500 # maximum number of open files during hadd -_ncores = 4 # number of cores for parallel event counting & validating of files -_cfgdefaults = OrderedDict([ # ordered dictionary with defaults - ('channels',_channels), ('eras',_eras), - ('basedir',basedir), - ('jobdir',_jobdir), ('outdir',_outdir), ('nanodir',_nanodir), ('picodir',_picodir), - ('tmpskimdir',_tmpskimdir), - ('batch',_batchsystem), ('queue',_queue), - ('nfilesperjob',_nfilesperjob), ('maxevtsperjob',_maxevtsperjob), - ('filelistdir',_filelistdir), - ('maxopenfiles',_maxopenfiles), ('haddcmd', "" ), # for pico.py hadd - ('ncores',_ncores), -]) +LOG = Logger('GLOB') +CONFIG = None +user = getpass.getuser() +host = platform.node() +dtypes = ['mc','data','embed'] sys.path.append(basedir) +_cfgdefaults = OrderedDict() # initiate once with getdefaultconfig +def getdefaultconfig(verb=0): + """Get default configuration dictionary. Initiate if it does not exist yet.""" + global _cfgdefaults, basedir + if _cfgdefaults: # initiate + LOG.verb(">>> getdefaultconfig: _cfgdefaults already initiated",verb,level=3) + else: + LOG.verb(">>> getdefaultconfig: Initiating _cfgdefaults...",verb,level=3) + from TauFW.PicoProducer.storage.utils import guess_sedir, guess_tmpdirs + from TauFW.PicoProducer.batch.utils import guess_batch + eras = OrderedDict([ + ('2016','samples_2016.py'), + ('2017','samples_2017.py'), + ('2018','samples_2018.py') + ]) + channels = OrderedDict([ + ('skim','skimjob.py'), + ('test','test.py'), + ('mutau','ModuleMuTauSimple') + ]) + sedir = guess_sedir() # guess storage element on current host + tmpskimdir, tmphadddir = guess_tmpdirs() # tmphadddir: temporary dir for creating intermediate hadd files + # tmpskimdir: temporary dir for creating skimmed file before copying to outdir + jobdir = "output/$ERA/$CHANNEL/$SAMPLE" # for job config and log files + outdir = tmphadddir+jobdir # for job output + picodir = sedir+"analysis/$ERA/$GROUP" # for storage of analysis ("pico") tuples after hadd + nanodir = sedir+"samples/nano/$ERA/$DAS" # for storage of (skimmed) nanoAOD + filelistdir = "samples/files/$ERA/$SAMPLE.txt" # location to save list of files + batchsystem = guess_batch() # batch system (HTCondor, SLURM, ...) + queue = "" # batch queue / job flavor + nfilesperjob = 1 # group files per job + maxevtsperjob = -1 # maximum number of events per job (split large files) + maxopenfiles = 500 # maximum number of open files during hadd + ncores = 4 # number of cores for parallel event counting & validating of files + haddcmd = "" # alternative command for hadd'ing, e.g. 'python3 /.../.../haddnano.py' + _cfgdefaults = OrderedDict([ # ordered dictionary with defaults + ('channels',channels), ('eras',eras), + ('basedir',basedir), + ('jobdir',jobdir), ('outdir',outdir), ('nanodir',nanodir), ('picodir',picodir), + ('tmpskimdir',tmpskimdir), + ('batch',batchsystem), ('queue',queue), + ('nfilesperjob',nfilesperjob), ('maxevtsperjob',maxevtsperjob), + ('filelistdir',filelistdir), + ('maxopenfiles',maxopenfiles), ('haddcmd', haddcmd ), # for pico.py hadd + ('ncores',ncores), + ]) + return _cfgdefaults + + def getconfig(verb=0,refresh=False): """Get configuration from JSON file.""" - global _cfgdefaults, basedir, CONFIG + global basedir, CONFIG + cfgdefaults = getdefaultconfig(verb=verb) if CONFIG and not refresh: return CONFIG @@ -68,15 +80,15 @@ def getconfig(verb=0,refresh=False): cfgdir = ensuredir(basedir,"config") cfgname = os.path.join(cfgdir,"config.json") bkpname = os.path.join(cfgdir,"config.json.bkp") # back up to recover config if reset - cfgdict = _cfgdefaults.copy() - rqdstrs = [k for k,v in _cfgdefaults.items() if isinstance(v,basestring)] # required string type - rqddicts = [k for k,v in _cfgdefaults.items() if isinstance(v,dict)] # required dictionary type + cfgdict = cfgdefaults.copy() + rqdstrs = [k for k,v in cfgdefaults.items() if isinstance(v,basestring)] # required string type + rqddicts = [k for k,v in cfgdefaults.items() if isinstance(v,dict)] # required dictionary type # GET CONFIG if os.path.isfile(cfgname): with open(cfgname,'r') as file: cfgdict = json.load(file,object_pairs_hook=OrderedDict) - nmiss = len([0 for k in _cfgdefaults.keys() if k not in cfgdict]) # count missing keys + nmiss = len([0 for k in cfgdefaults.keys() if k not in cfgdict]) # count missing keys if nmiss>=5 and os.path.isfile(bkpname): # recover reset config file print(">>> Config file may have been reset. Opening backup %s..."%(bkpname)) with open(bkpname,'r') as file: @@ -87,10 +99,10 @@ def getconfig(verb=0,refresh=False): cfgdict[key] = bkpcfgdict[key] nmiss += 1 if nmiss>0: - for key in _cfgdefaults.keys(): # check for missing keys + for key in cfgdefaults.keys(): # check for missing keys if key not in cfgdict: - LOG.warning("Key '%s' not set in config file %s. Setting to default %r"%(key,os.path.relpath(cfgname),_cfgdefaults[key])) - cfgdict[key] = _cfgdefaults[key] + LOG.warning("Key '%s' not set in config file %s. Setting to default %r"%(key,os.path.relpath(cfgname),cfgdefaults[key])) + cfgdict[key] = cfgdefaults[key] nmiss += 1 print(">>> Saving updated keys...") with open(cfgname,'w') as file: @@ -124,16 +136,15 @@ def getconfig(verb=0,refresh=False): def setdefaultconfig(verb=0): """Set configuration to default values.""" - global _cfgdefaults, basedir, CONFIG - - # SETTING + global basedir, CONFIG + cfgdefaults = getdefaultconfig(verb=verb) cfgdir = ensuredir(basedir,"config") cfgname = os.path.join(cfgdir,"config.json") - cfgdict = _cfgdefaults.copy() + cfgdict = cfgdefaults.copy() if os.path.isfile(cfgname): LOG.warning("Config file '%s' already exists. Overwriting with defaults..."%(cfgname)) CONFIG = Config(cfgdict,cfgname) - CONFIG.write(backup=False) + CONFIG.write(backup=False,verb=verb) return CONFIG @@ -223,13 +234,16 @@ def get(self,*args,**kwargs): def pop(self,*args,**kwargs): return self._dict.pop(*args,**kwargs) - def write(self,path=None,backup=False): + def write(self,path=None,backup=False,verb=0): if path==None: path = self._path + LOG.verb(">>> Config.write: Writing %r (backup=%r)"%(path,backup),verb,3) with open(path,'w') as outfile: json.dump(self._dict,outfile,indent=2) if backup: # backup to recover if config was reset - with open(path+'.bkp','w') as outfile: + pathbkp = path+'.bkp' + LOG.verb(">>> Config.write: Making back up in %r"%(pathbkp),verb,2) + with open(pathbkp,'w') as outfile: json.dump(self._dict,outfile,indent=2) return path From 0df4dff90eec666ea153ec77c076573e08482768 Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Fri, 9 Jun 2023 18:34:43 +0200 Subject: [PATCH 46/55] improve print out --- PicoProducer/python/pico/config.py | 10 ++++++---- PicoProducer/python/pico/run.py | 2 +- common/python/tools/file.py | 2 +- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/PicoProducer/python/pico/config.py b/PicoProducer/python/pico/config.py index 60e263dcc..77452e6d1 100755 --- a/PicoProducer/python/pico/config.py +++ b/PicoProducer/python/pico/config.py @@ -2,7 +2,7 @@ from past.builtins import basestring # for python2 compatibility import os, glob, json from TauFW.common.tools.file import ensurefile, ensureinit -from TauFW.common.tools.string import repkey, rreplace +from TauFW.common.tools.string import repkey, rreplace, lreplace from TauFW.PicoProducer.analysis.utils import ensuremodule from TauFW.PicoProducer.storage.utils import getsamples from TauFW.PicoProducer.pico.common import * @@ -326,15 +326,17 @@ def main_link(args): module = module.split('python/analysis/')[-1].replace('/','.') module = rreplace(module,'.py') path = os.path.join('python/analysis/','/'.join(module.split('.')[:-1])) - ensureinit(path,by="pico.py") - ensuremodule(module) + ensureinit(path,by="pico.py") # ensure an __init__.py exists in path + modobj = ensuremodule(module) + modpath = lreplace(os.path.relpath(modobj.__file__),"../../../python/TauFW/PicoProducer/") + print(">>> Linked to %s"%(modpath)) value = ' '.join([module]+parts[1:]) elif varkey=='eras': if 'samples/' in value: # useful for tab completion value = ''.join(value.split('samples/')[1:]) path = os.path.join("samples",repkey(value,ERA='*',CHANNEL='*',TAG='*')) LOG.insist(glob.glob(path),"Did not find any sample lists '%s'"%(path)) - ensureinit(os.path.dirname(path),by="pico.py") + ensureinit(os.path.dirname(path),by="pico.py") # ensure an __init__.py exists in path if value!=oldval: print(">>> Converted '%s' to '%s'"%(oldval,value)) CONFIG[varkey][key] = value diff --git a/PicoProducer/python/pico/run.py b/PicoProducer/python/pico/run.py index c4eec0450..6b2323baf 100755 --- a/PicoProducer/python/pico/run.py +++ b/PicoProducer/python/pico/run.py @@ -170,7 +170,7 @@ def main_run(args): print() - + ################## # GET MODULE # ################## diff --git a/common/python/tools/file.py b/common/python/tools/file.py index be4f4f1f3..907c90996 100644 --- a/common/python/tools/file.py +++ b/common/python/tools/file.py @@ -154,7 +154,7 @@ def ensureTDirectory(file,dirname,cd=True,verb=0): def ensureinit(*paths,**kwargs): - """Ensure an __init__.py exists, other wise, create one.""" + """Check if an __init__.py exists. Create one if it does not exist.""" init = os.path.join(os.path.join(*paths),'__init__.py') script = kwargs.get('by',"") if not os.path.isfile(init): From 489dcbb8acbdaf36de52c165c8c1e2ee2d2ebbfe Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Fri, 9 Jun 2023 18:36:59 +0200 Subject: [PATCH 47/55] print() -> print('') --- PicoProducer/data/btag/getBTagEfficiencies.py | 2 +- PicoProducer/data/pileup/getPileupProfiles.py | 2 +- PicoProducer/python/pico/job.py | 6 +++--- PicoProducer/python/pico/run.py | 2 +- PicoProducer/python/storage/das.py | 2 +- PicoProducer/python/storage/utils.py | 2 +- PicoProducer/test/testSFs.py | 4 ++-- PicoProducer/utils/compareNano.py | 4 ++-- PicoProducer/utils/comparePico.py | 2 +- Plotter/test/testLegend.py | 2 +- Plotter/test/testMultiThread.py | 12 ++++++------ Plotter/test/testPlot.py | 2 +- Plotter/test/testSelection.py | 2 +- Plotter/test/testStack.py | 2 +- Plotter/test/testUnwrapping.py | 2 +- Plotter/test/testVariables.py | 2 +- common/test/testLoadingBar.py | 4 ++-- common/test/testTable.py | 14 +++++++------- 18 files changed, 34 insertions(+), 34 deletions(-) diff --git a/PicoProducer/data/btag/getBTagEfficiencies.py b/PicoProducer/data/btag/getBTagEfficiencies.py index 0adba478e..fdb56d7c0 100755 --- a/PicoProducer/data/btag/getBTagEfficiencies.py +++ b/PicoProducer/data/btag/getBTagEfficiencies.py @@ -432,7 +432,7 @@ def main(): if __name__ == '__main__': - print() + print('') main() print ">>> done\n" diff --git a/PicoProducer/data/pileup/getPileupProfiles.py b/PicoProducer/data/pileup/getPileupProfiles.py index 4741cc5c4..3c8d46069 100755 --- a/PicoProducer/data/pileup/getPileupProfiles.py +++ b/PicoProducer/data/pileup/getPileupProfiles.py @@ -590,7 +590,7 @@ def main(args): parser.add_argument('-v', '--verbose', dest='verbosity', type=int, nargs='?', const=1, default=0, help="set verbosity" ) args = parser.parse_args() - print() + print('') main(args) print ">>> Done!\n" diff --git a/PicoProducer/python/pico/job.py b/PicoProducer/python/pico/job.py index 64056d42a..b03ae9422 100755 --- a/PicoProducer/python/pico/job.py +++ b/PicoProducer/python/pico/job.py @@ -333,7 +333,7 @@ def preparejobs(args): # YIELD yield jobcfg - print() + print('') if not found: print_no_samples(dtypes,filters,vetoes,[channel],jobdir_,jobcfgs) @@ -908,7 +908,7 @@ def main_status(args): print(">>> Found samples: "+", ".join(repr(s.name) for s in samples)) if subcmd in ['hadd','haddclean'] and 'skim' in channel.lower(): LOG.warning("Hadding into one file not available for skimming...") - print() + print('') continue # SAMPLE over SAMPLES @@ -1008,7 +1008,7 @@ def main_status(args): checkchunks(sample,channel=channel_,tag=tag,jobs=jobs,showlogs=showlogs,checkqueue=checkqueue, checkevts=checkevts,das=checkdas,ncores=ncores,verb=verbosity) - print() + print('') if not found: print_no_samples(dtypes,filters,vetoes,[channel],jobdir_,jobcfgs) diff --git a/PicoProducer/python/pico/run.py b/PicoProducer/python/pico/run.py index 6b2323baf..bcd366ddb 100755 --- a/PicoProducer/python/pico/run.py +++ b/PicoProducer/python/pico/run.py @@ -167,7 +167,7 @@ def main_run(args): if not dryrun: #execute(runcmd,dry=dryrun,verb=verbosity+1) # real-time print out does not work well with python script os.system(runcmd) - print() + print('') diff --git a/PicoProducer/python/storage/das.py b/PicoProducer/python/storage/das.py index 91fc3ff40..649574d96 100644 --- a/PicoProducer/python/storage/das.py +++ b/PicoProducer/python/storage/das.py @@ -21,7 +21,7 @@ def dasgoclient(query,**kwargs): LOG.verb(repr(dascmd),verbosity) cmdout = execute(dascmd,verb=verbosity-1) except CalledProcessError as e: - print() + print('') LOG.error("Failed to call 'dasgoclient' command. Please make sure:\n" " 1) 'dasgoclient' command exists.\n" " 2) You have a valid VOMS proxy. Use 'voms-proxy-init -voms cms -valid 200:0' or 'source utils/setupVOMS.sh'.\n" diff --git a/PicoProducer/python/storage/utils.py b/PicoProducer/python/storage/utils.py index 48ded5f7e..c27116222 100644 --- a/PicoProducer/python/storage/utils.py +++ b/PicoProducer/python/storage/utils.py @@ -251,5 +251,5 @@ def print_no_samples(dtype=[],filter=[],veto=[],channel=[],jobdir="",jobcfgs="") strings.append("channel%s %s"%('s' if len(channel)>1 else "",quotestrs(channel))) string += " with "+', '.join(strings) print(string) - print() + print('') diff --git a/PicoProducer/test/testSFs.py b/PicoProducer/test/testSFs.py index aafaa8f70..3cb66db11 100755 --- a/PicoProducer/test/testSFs.py +++ b/PicoProducer/test/testSFs.py @@ -2,7 +2,7 @@ # Author: Izaak Neutelings (December 2018) import time start0 = time.time() -print() +print('') print(">>> importing modules...") from TauFW.common.tools.log import Logger LOG = Logger("testSF") @@ -230,4 +230,4 @@ def pileupSFs(era='UL2017'): pileupSFs('UL2017') print(">>> ") print(">>> done after %.1f seconds"%(time.time()-start0)) - print() + print('') diff --git a/PicoProducer/utils/compareNano.py b/PicoProducer/utils/compareNano.py index bcd7794b2..ad69468d8 100755 --- a/PicoProducer/utils/compareNano.py +++ b/PicoProducer/utils/compareNano.py @@ -74,7 +74,7 @@ def comparefiles(fnamesets,vars,**kwargs): plot.drawtext(text) plot.saveas(fname) plot.close() - print() + print('') #for tree in trees: # tree.Close() @@ -141,7 +141,7 @@ def comparevars(fnames,varsets,**kwargs): plot.drawtext(text) plot.saveas(fname) plot.close() - print() + print('') def main(args): diff --git a/PicoProducer/utils/comparePico.py b/PicoProducer/utils/comparePico.py index c11d642e9..45ad4bdca 100755 --- a/PicoProducer/utils/comparePico.py +++ b/PicoProducer/utils/comparePico.py @@ -76,7 +76,7 @@ def compare(fnames,variables,**kwargs): plot.drawtext(text) plot.saveas(fname) plot.close() - print() + print('') def main(args): diff --git a/Plotter/test/testLegend.py b/Plotter/test/testLegend.py index bed046d4b..b6cced090 100755 --- a/Plotter/test/testLegend.py +++ b/Plotter/test/testLegend.py @@ -37,7 +37,7 @@ def plothist(xtitle,hists,ratio=False,logy=False,norm=False,tag="",**kwargs): plot.saveas(fname+".png") #plot.saveas(fname+".pdf") plot.close(keep=False) - print() + print('') def testposition(): diff --git a/Plotter/test/testMultiThread.py b/Plotter/test/testMultiThread.py index bb97dff12..e5492331c 100755 --- a/Plotter/test/testMultiThread.py +++ b/Plotter/test/testMultiThread.py @@ -71,7 +71,7 @@ def testProcess(N=5): for thread in threads: result = thread.join() print(">>> Took %.1f seconds"%(time.time()-start)) - print() + print('') def testMultiProcessor(N=5): @@ -95,7 +95,7 @@ def testMultiProcessor(N=5): result = process.join() # wait for processes to end print(">>> foo returns:", result) print(">>> Took %.1f seconds"%(time.time()-start)) - print() + print('') def testMultiProcessorWithDraw(N=5): @@ -120,7 +120,7 @@ def testMultiProcessorWithDraw(N=5): result = process.join() # wait for processes to end print(">>> draw returns:", result) print(">>> Took %.1f seconds"%(time.time()-start)) - print() + print('') def testThread(N=5): @@ -146,7 +146,7 @@ def testThread(N=5): result = thread.join() print(">>> %s done, foo returns: %s"%(thread.name,result)) print(">>> Took %.1f seconds"%(time.time()-start)) - print() + print('') def testThreadWithDraw(N=5): @@ -174,7 +174,7 @@ def testThreadWithDraw(N=5): thread.join() print(">>> %s done"%(thread.name)) print("Took %.1f seconds"%(time.time()-start)) - print() + print('') @@ -205,7 +205,7 @@ def testThreadWithSharedTFile(N=5): thread.join() print(">>> %s done"%(thread.name)) print("Took %.1f seconds"%(time.time()-start)) - print() + print('') def main(): diff --git a/Plotter/test/testPlot.py b/Plotter/test/testPlot.py index 6ca7b2539..6e3961ad6 100755 --- a/Plotter/test/testPlot.py +++ b/Plotter/test/testPlot.py @@ -41,7 +41,7 @@ def plothist(xtitle,hists,ratio=False,logy=False,norm=False,cwidth=None): #plot.saveas(fname+".png",fname+".C") #plot.saveas(fname,ext=['png','pdf']) plot.close() - print() + print('') def createhists(nhist=3): diff --git a/Plotter/test/testSelection.py b/Plotter/test/testSelection.py index 0c82c9395..fd249000b 100755 --- a/Plotter/test/testSelection.py +++ b/Plotter/test/testSelection.py @@ -48,5 +48,5 @@ def main(): if __name__ == "__main__": main() - print() + print('') diff --git a/Plotter/test/testStack.py b/Plotter/test/testStack.py index 3dd46332a..3b83c90a0 100755 --- a/Plotter/test/testStack.py +++ b/Plotter/test/testStack.py @@ -46,7 +46,7 @@ def plotstack(xname,xtitle,datahist,exphists,ratio=False,logy=False,fraction=Fal #plot.saveas(fname+".png",fname+".C") #plot.saveas(fname,ext=['png','pdf']) plot.close() - print() + print('') def createhists(procs,binning,nevts): diff --git a/Plotter/test/testUnwrapping.py b/Plotter/test/testUnwrapping.py index a3db06e3f..f18c7f142 100755 --- a/Plotter/test/testUnwrapping.py +++ b/Plotter/test/testUnwrapping.py @@ -78,5 +78,5 @@ def main(): if __name__ == "__main__": main() - print() + print('') diff --git a/Plotter/test/testVariables.py b/Plotter/test/testVariables.py index d329cf538..34324aba9 100755 --- a/Plotter/test/testVariables.py +++ b/Plotter/test/testVariables.py @@ -68,7 +68,7 @@ def main(args): print(">>> context: '%s'"%color(sel,'grey')) print(">>> plotfor=%s, name='%s', title='%s'"%(var.plotfor(sel),color(var.name),color(var.title))) print(">>> (nbins,xmin,xmax)=(%s,%s,%s), bins=%s, cut=%r"%(var.nbins,var.xmin,var.xmax,var.bins,var.cut)) - print() + print('') if __name__ == "__main__": diff --git a/common/test/testLoadingBar.py b/common/test/testLoadingBar.py index e96b54188..651f98217 100755 --- a/common/test/testLoadingBar.py +++ b/common/test/testLoadingBar.py @@ -86,7 +86,7 @@ def main(): parser.add_argument('-v', '--verbose', dest='verbosity', type=int, nargs='?', const=1, default=0, action='store', help="set verbosity" ) args = parser.parse_args() - print() + print('') main() - print() + print('') diff --git a/common/test/testTable.py b/common/test/testTable.py index fc542aa9b..fc0293222 100755 --- a/common/test/testTable.py +++ b/common/test/testTable.py @@ -22,7 +22,7 @@ def printhist(nevts=10000,verb=0): table.printheader('ibin','content','error') for ibin in range(0,hist.GetXaxis().GetNbins()+1): table.printrow(ibin,hist.GetBinContent(ibin),hist.GetBinError(ibin)) - print() + print('') # SIMPLE TABLE print ">>> Table from hist" @@ -45,7 +45,7 @@ def main(args): table.printrow(1,3.0,9.0) table.printrow(1,4.0) # missing column table.printrow(1,5.0,25,-1) # surprise extra column - print() + print('') # SIMPLE TABLE print ">>> Simple table with custom width" @@ -56,7 +56,7 @@ def main(args): table.printrow(1,3.0,9.0) table.printrow(1,4.0) # missing column table.printrow(1,5.0,25,-1) # surprise extra column - print() + print('') # TABLE with extra symbols print ">>> Table with extra symbols" @@ -68,7 +68,7 @@ def main(args): table.printrow(1,16.0,2.8) # missing columns table.printrow(1,25.0) # missing column table.printrow(1,36.0,4.5,3.5,-1) # surprise extra column - print() + print('') # TABLE from LOG level = 2 @@ -78,7 +78,7 @@ def main(args): TAB.printrow(1,1.0,1.0) TAB.printrow(1,2.0,4.0) TAB.printrow(1,3.0,9.0) - print() + print('') # TABLE of histogram printhist(verb=verbosity) @@ -94,7 +94,7 @@ def main(args): help="set verbosity" ) args = parser.parse_args() LOG.verbosity = args.verbosity - print() + print('') main(args) - print() + print('') From 89bd349a4caec2b3ec803d04b09ecf93e119c248 Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Mon, 12 Jun 2023 12:08:27 +0200 Subject: [PATCH 48/55] make python3 compatible; remove hardcoded genweight fix for deprecated Summer19UL W+jets samples --- Fitter/TauES/correctionlib/tau_tes.py | 2 +- Fitter/paper/writecards.py | 2 +- PicoProducer/python/analysis/ModuleTauPair.py | 3 --- PicoProducer/python/storage/Sample.py | 2 +- 4 files changed, 3 insertions(+), 6 deletions(-) diff --git a/Fitter/TauES/correctionlib/tau_tes.py b/Fitter/TauES/correctionlib/tau_tes.py index 720b91e1c..9e745db50 100755 --- a/Fitter/TauES/correctionlib/tau_tes.py +++ b/Fitter/TauES/correctionlib/tau_tes.py @@ -366,5 +366,5 @@ def main(): if __name__ == '__main__': main() - print() + print('') diff --git a/Fitter/paper/writecards.py b/Fitter/paper/writecards.py index 60be4ab58..a2aadc9d0 100755 --- a/Fitter/paper/writecards.py +++ b/Fitter/paper/writecards.py @@ -240,7 +240,7 @@ def main(): if __name__ == '__main__': - print() + print('') main() print ">>>\n>>> Done harvesting\n" diff --git a/PicoProducer/python/analysis/ModuleTauPair.py b/PicoProducer/python/analysis/ModuleTauPair.py index 057d969b1..4de7fb0c8 100644 --- a/PicoProducer/python/analysis/ModuleTauPair.py +++ b/PicoProducer/python/analysis/ModuleTauPair.py @@ -173,9 +173,6 @@ def beginFile(self, inputFile, outputFile, inputTree, wrappedOutputTree): ensurebranches(inputTree,branchesV10) else: #v9 ensurebranches(inputTree,branches) # make sure Event object has these branches - - if self.ismc and re.search(r"W[1-5]?JetsToLNu",inputFile.GetName()): # fix genweight bug in Summer19 - redirectbranch(1.,"genWeight") # replace Events.genWeight with single 1.0 value def fillhists(self,event): diff --git a/PicoProducer/python/storage/Sample.py b/PicoProducer/python/storage/Sample.py index 44557cc33..95e3dabd2 100644 --- a/PicoProducer/python/storage/Sample.py +++ b/PicoProducer/python/storage/Sample.py @@ -215,7 +215,7 @@ def filterpath(self,filter=[],veto=[],copy=False,verb=0): if copy: sample = deepcopy(self) sample.paths = paths - for path in list(sample.pathfiles.keys()): + for path in sample.pathfiles.keys(): if path not in paths: sample.pathfiles.pop(path) return sample From 3e67aa9d28ebb9652c39cad3b19cb626b12b4e9b Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Mon, 12 Jun 2023 14:30:58 +0200 Subject: [PATCH 49/55] improve instructions --- README.md | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 5fb012967..9e00e7f6e 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,8 @@ Framework for tau analysis using NanoAOD at CMS. Three main packages are 3. [`Fitter`](Fitter): Tools for measurements and fits in combine. [Under development.] ## Installation + +### CMSSW environment First, setup a CMSSW release, for example,
@@ -16,7 +18,7 @@ First, setup a CMSSW release, for example, ```bash export CMSSW=CMSSW_12_4_8 -export SCRAM_ARCH=el8_amd64_gcc10 +export SCRAM_ARCH=slc7_amd64_gcc10 cmsrel $CMSSW cd $CMSSW/src cmsenv @@ -35,10 +37,14 @@ cmsenv
+On a Linux 8 node like `lxplus8`, you can use the `el8_amd64_gcc10` architecture instead of `slc7_*`. + Which CMSSW version should matter for post-processing of nanoAOD, but if you like to use Combine in the same repository, it is better to use at least the [recommended version](https://cms-analysis.github.io/HiggsAnalysis-CombinedLimit/#setting-up-the-environment-and-installation). -Then, install `TauFW`: + +### TauFW +To install `TauFW`: ``` cd $CMSSW_BASE/src/ git clone https://github.com/cms-tau-pog/TauFW TauFW From 995e3b7a95529d2bd320d82193c28cf04f071737 Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Mon, 12 Jun 2023 16:49:28 +0200 Subject: [PATCH 50/55] update CMSSW 10 -> 11 for python2 --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 9e00e7f6e..447e9a92d 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ Framework for tau analysis using NanoAOD at CMS. Three main packages are First, setup a CMSSW release, for example, - +
nanoAODv10 older versions nanoAODv10 (`python3`) Older versions (`python2`)
@@ -27,8 +27,8 @@ cmsenv ```bash -export CMSSW=CMSSW_10_6_13 -export SCRAM_ARCH=slc7_amd64_gcc700 +export CMSSW=CMSSW_11_3_4 +export SCRAM_ARCH=slc7_amd64_gcc900 cmsrel $CMSSW cd $CMSSW/src cmsenv From 168256d3a61639fb27ccceb060ed601d15397874 Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Mon, 12 Jun 2023 16:56:00 +0200 Subject: [PATCH 51/55] improve instructions --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 447e9a92d..c092a02cd 100644 --- a/README.md +++ b/README.md @@ -39,9 +39,9 @@ cmsenv On a Linux 8 node like `lxplus8`, you can use the `el8_amd64_gcc10` architecture instead of `slc7_*`. -Which CMSSW version should matter for post-processing of nanoAOD, -but if you like to use Combine in the same repository, -it is better to use at least the [recommended version](https://cms-analysis.github.io/HiggsAnalysis-CombinedLimit/#setting-up-the-environment-and-installation). +Which CMSSW version should not really matter for post-processing of nanoAOD, +but if you like to use Combine in the same repository, it is better to use at least the +[recommended version](https://cms-analysis.github.io/HiggsAnalysis-CombinedLimit/#setting-up-the-environment-and-installation). ### TauFW To install `TauFW`: From fe15a53aa2483c58d17e631d5e39d76a094a217c Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Mon, 12 Jun 2023 17:19:34 +0200 Subject: [PATCH 52/55] add table of content --- README.md | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index c092a02cd..3a8cfbc93 100644 --- a/README.md +++ b/README.md @@ -7,11 +7,18 @@ Framework for tau analysis using NanoAOD at CMS. Three main packages are ## Installation +### Table of Contents +* [CMSSW environment](#CMSSW-environment)
+* [TauFW](#TauFW-1)
+* [PicoProducer](#PicoProducer)
+* [Combine](#Combine)
+* [TauID](#TauPOG-corrections)
+ ### CMSSW environment First, setup a CMSSW release, for example, - +
nanoAODv10 (`python3`) Older versions (`python2`) nanoAODv10 (python3) Older versions (python2)
@@ -39,9 +46,10 @@ cmsenv On a Linux 8 node like `lxplus8`, you can use the `el8_amd64_gcc10` architecture instead of `slc7_*`. -Which CMSSW version should not really matter for post-processing of nanoAOD, -but if you like to use Combine in the same repository, it is better to use at least the -[recommended version](https://cms-analysis.github.io/HiggsAnalysis-CombinedLimit/#setting-up-the-environment-and-installation). +Which CMSSW release should not really matter for post-processing of nanoAOD, +but if you like to use `combine` in the same repository, it is better to use at least the +[recommended release for the latest Combine version](https://cms-analysis.github.io/HiggsAnalysis-CombinedLimit/#setting-up-the-environment-and-installation), +see below. ### TauFW To install `TauFW`: @@ -72,9 +80,10 @@ cmsenv scram b -j4 ``` -### Fitter and Combine tools -If you want to use the `Combine` tools in `Fitter`, install -[`Combine`](https://cms-analysis.github.io/HiggsAnalysis-CombinedLimit/#setting-up-the-environment-and-installation), +### Combine +If you want to use the `combine` tools in `Fitter`, install combine following the +[latest instructions](https://cms-analysis.github.io/HiggsAnalysis-CombinedLimit/#setting-up-the-environment-and-installation), +for example ``` cd $CMSSW_BASE/src git clone https://github.com/cms-analysis/HiggsAnalysis-CombinedLimit.git HiggsAnalysis/CombinedLimit @@ -91,15 +100,14 @@ scramv1 b clean; scramv1 b git checkout v2.0.0 # for CMSSW_11_X only ``` -### TauID Scale Factor (SF) JSON and ROOT files creation -To create JSON files for +### TauPOG corrections +For TauPOG-internal work: To create JSON files with TauPOG corrections for [`correctionlib`](https://github.com/cms-nanoAOD/correctionlib), please follow the instructions [here](https://gitlab.cern.ch/cms-tau-pog/jsonpog-integration/-/blob/TauPOG_v2/POG/TAU/README4UPDATES.md). From at least `CMSSW_11_3_X`, `correctionlib` should be pre-installed. - -To create ROOT files including the measured SFs please install [`TauIDSFs` tool](https://github.com/cms-tau-pog/TauFW/#picoproducer) as illustrated above. +To create ROOT files including the measured SFs please install [`TauIDSFs` tool](https://github.com/cms-tau-pog/TauIDSFs). Modify the `TauIDSFs/utils/createSFFiles.py` script to include your measured SFs into the script. Finally, run the `TauFW/scripts/tau_createROOT.sh` to generate your ROOT files. They will be created into `TauFW/scripts/data/` IMPORTANT: please comment and do not delete older SFs From d8005c9932a6ec332a0b29907a67be80e2656de0 Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Mon, 12 Jun 2023 17:56:31 +0200 Subject: [PATCH 53/55] maxtau<-1 -> maxtau!=None --- PicoProducer/python/analysis/ModuleEMu.py | 2 +- PicoProducer/python/analysis/ModuleMuMu.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/PicoProducer/python/analysis/ModuleEMu.py b/PicoProducer/python/analysis/ModuleEMu.py index edb7de9c3..c00673faa 100644 --- a/PicoProducer/python/analysis/ModuleEMu.py +++ b/PicoProducer/python/analysis/ModuleEMu.py @@ -188,7 +188,7 @@ def analyze(self, event): #if ord(tau.idDeepTau2017v2p1VSmu)<1: continue # VLoose maxtau = tau ptmax = tau.pt - if maxtau>-1: + if maxtau!=None: self.out.pt_3[0] = maxtau.pt self.out.eta_3[0] = maxtau.eta self.out.m_3[0] = maxtau.mass diff --git a/PicoProducer/python/analysis/ModuleMuMu.py b/PicoProducer/python/analysis/ModuleMuMu.py index b1063b12f..1d01d27d3 100644 --- a/PicoProducer/python/analysis/ModuleMuMu.py +++ b/PicoProducer/python/analysis/ModuleMuMu.py @@ -180,7 +180,7 @@ def analyze(self, event): #if ord(tau.idDeepTau2017v2p1VSmu)<1: continue # VLoose maxtau = tau ptmax = tau.pt - if maxtau>-1: + if maxtau!=None: self.out.pt_3[0] = maxtau.pt self.out.eta_3[0] = maxtau.eta self.out.m_3[0] = maxtau.mass From b990042070979a1268c8cb1feac6df35f3a8e5c1 Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Mon, 12 Jun 2023 18:10:02 +0200 Subject: [PATCH 54/55] disable DeepTau2018v2p5 --- PicoProducer/python/analysis/ModuleMuMu.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/PicoProducer/python/analysis/ModuleMuMu.py b/PicoProducer/python/analysis/ModuleMuMu.py index 1d01d27d3..8c3f50c10 100644 --- a/PicoProducer/python/analysis/ModuleMuMu.py +++ b/PicoProducer/python/analysis/ModuleMuMu.py @@ -195,10 +195,9 @@ def analyze(self, event): self.out.idDeepTau2017v2p1VSe_3[0] = maxtau.idDeepTau2017v2p1VSe self.out.idDeepTau2017v2p1VSmu_3[0] = maxtau.idDeepTau2017v2p1VSmu self.out.idDeepTau2017v2p1VSjet_3[0] = maxtau.idDeepTau2017v2p1VSjet - - self.out.idDeepTau2018v2p5VSe_3[0] = maxtau.idDeepTau2018v2p5VSe - self.out.idDeepTau2018v2p5VSmu_3[0] = maxtau.idDeepTau2018v2p5VSmu - self.out.idDeepTau2018v2p5VSjet_3[0] = maxtau.idDeepTau2018v2p5VSjet + #self.out.idDeepTau2018v2p5VSe_3[0] = maxtau.idDeepTau2018v2p5VSe + #self.out.idDeepTau2018v2p5VSmu_3[0] = maxtau.idDeepTau2018v2p5VSmu + #self.out.idDeepTau2018v2p5VSjet_3[0] = maxtau.idDeepTau2018v2p5VSjet if self.ismc: self.out.jpt_match_3[0], self.out.jpt_genmatch_3[0] = matchtaujet(event,maxtau,self.ismc) self.out.genmatch_3[0] = maxtau.genPartFlav From 590347a388c93a941ce922727f0347398ef03a89 Mon Sep 17 00:00:00 2001 From: IzaakWN Date: Wed, 14 Jun 2023 11:21:35 +0200 Subject: [PATCH 55/55] tweaks documentation --- Fitter/python/HighPT/utilsHighPT.py | 6 +++--- PicoProducer/python/analysis/TreeProducer.py | 2 ++ README.md | 6 +++--- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/Fitter/python/HighPT/utilsHighPT.py b/Fitter/python/HighPT/utilsHighPT.py index 5fa2c8c45..4d713a40d 100644 --- a/Fitter/python/HighPT/utilsHighPT.py +++ b/Fitter/python/HighPT/utilsHighPT.py @@ -276,7 +276,7 @@ def extractBinLabels(pt,ptratio): # Run over set of samples and create histogram def RunSamples(samples,var,weight,cut,xbins,name): - print() + print('') print("Running",name,var,weight,cut) nbins = len(xbins)-1 hist = ROOT.TH1D(name,"",nbins,array('d',list(xbins))) @@ -289,7 +289,7 @@ def RunSamples(samples,var,weight,cut,xbins,name): # Run over set of samples and create histograms for W*->tau+v channel # for each sample loop over Tree entries is performed def RunSamplesTauNu(samples,var,unc,xbins,selection,name): - print() + print('') print("Running",name,var,unc,selection) nbins = len(xbins)-1 hists = {} # discionary of histograms @@ -418,7 +418,7 @@ def __init__(self,**kwargs): class FakeFactorHighPt: def __init__(self,filename): - print() + print('') print('Loading fake factors from file',filename," >>>>>") self.fileName = filename self.fileFF = ROOT.TFile(self.fileName,"READ") diff --git a/PicoProducer/python/analysis/TreeProducer.py b/PicoProducer/python/analysis/TreeProducer.py index 62869f82f..70c46b288 100644 --- a/PicoProducer/python/analysis/TreeProducer.py +++ b/PicoProducer/python/analysis/TreeProducer.py @@ -140,6 +140,8 @@ def addBranch(self, name, dtype='f', default=None, title=None, arrname=None, **k return branch def setAlias(self,newbranch,oldbranch): + """Set an alias for a variable or mathematical expression of the other branches.""" + # https://root.cern.ch/doc/master/classTTree.html#a7c505db0d8ed56b5581e683375eb78e1 if self.verbosity>=1: print(">>> TreeProducer.setAlias: %r -> %r..."%(oldbranch,newbranch)) self.tree.SetAlias(newbranch,oldbranch) diff --git a/README.md b/README.md index 3a8cfbc93..44dff5720 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ Framework for tau analysis using NanoAOD at CMS. Three main packages are 1. [`PicoProducer`](PicoProducer): Tools to process nanoAOD and make custom analysis ntuples. -2. [`Plotter`](Plotter): Tools for further analysis, auxiliary measurements, validation and plotting. [Under development.] +2. [`Plotter`](Plotter): Tools for further analysis, auxiliary measurements, validation and plotting. 3. [`Fitter`](Fitter): Tools for measurements and fits in combine. [Under development.] ## Installation @@ -12,10 +12,10 @@ Framework for tau analysis using NanoAOD at CMS. Three main packages are * [TauFW](#TauFW-1)
* [PicoProducer](#PicoProducer)
* [Combine](#Combine)
-* [TauID](#TauPOG-corrections)
+* [TauPOG corrections](#TauPOG-corrections)
### CMSSW environment -First, setup a CMSSW release, for example, +First, setup a CMSSW release. For example,
nanoAODv10 (python3) Older versions (python2)