How To: Run SModelS with LHE input with additional width information¶
In [1]:
# Set up the path to SModelS installation folder
import sys; sys.path.append("."); import smodels_paths
In [2]:
from smodels.base import runtime
#Define your model
runtime.modelFile = 'smodels.share.models.mssm'
from smodels.decomposition import decomposer
from smodels.base.physicsUnits import fb, GeV, TeV
from smodels.matching.theoryPrediction import theoryPredictionsFor
from smodels.experiment.databaseObj import Database
from smodels.tools import coverage
from smodels.base.smodelsLogging import setLogLevel
from smodels.share.models.mssm import BSMList
from smodels.share.models.SMparticles import SMList
from smodels.base.model import Model
setLogLevel("info")
Read LHE input:¶
In [3]:
# Path to input file (either a SLHA or LHE file)
lhefile = 'inputFiles/lhe/simplyGluino.lhe'
model = Model(BSMparticles=BSMList, SMparticles=SMList)
model.updateParticles(inputFile=lhefile)
INFO in model.getModelDataFrom() in 222: Using LHE input. All unstable particles will be assumed to have prompt decays. INFO in model.getModelDataFrom() in 223: Using LHE input. All particles not appearing in the events will be removed from the model. INFO in model.updateParticles() in 430: Loaded 4 BSM particles (58 particles not found in inputFiles/lhe/simplyGluino.lhe)
Replace widths¶
In [4]:
# At this point all BSM particles in model have either zero (stable) or infinite (unstable) widths.
# However the widths can be overwritten for the desired (long-lived) particles.
# For instance, the gluino width can be set to 1e-15 GeV as below:
gluino = model.getParticlesWith(pdg = 1000021)[0]
print('old width=',gluino.totalwidth) #Check that the width was indeed infinite
#Assign new width value:
gluino.totalwidth = 1e-15*GeV
print('new width=',gluino.totalwidth)
old width= INF [GeV] new width= 1.00E-15 [GeV]
In [5]:
#The same has to be done for the anti-particles:
gluino = model.getParticlesWith(pdg = -1000021)[0]
gluino.totalwidth = 1e-15*GeV
#The remaining steps are as for any input:
Decompose the input model:¶
In [6]:
# Set main options for decomposition
sigmacut = 0.01 * fb
mingap = 5. * GeV
# Decompose model (use slhaDecomposer for SLHA input or lheDecomposer for LHE input)
toplist = decomposer.decompose(model, sigmacut, massCompress=True, invisibleCompress=True, minmassgap=mingap)
# Access basic information from decomposition, using the topology list and topology objects:
print( "\n Decomposition Results: " )
print( "\t Total number of topologies: %i " %len(toplist) )
nel = len(toplist.getSMSList())
print( "\t Total number of elements = %i " %nel )
Decomposition Results: Total number of topologies: 1 Total number of elements = 1
Load the Database of experimental results:¶
In [7]:
# Set the path to the database
database = Database("official")
# Load the experimental results to be used.
# In this case, all results are employed.
listOfExpRes = database.expResultList
# Print basic information about the results loaded.
# Count the number of loaded UL and EM experimental results:
nUL, nEM = 0, 0
for exp in listOfExpRes:
expType = exp.datasets[0].dataInfo.dataType
if expType == 'upperLimit':
nUL += 1
elif expType == 'efficiencyMap':
nEM += 1
print("\n Loaded Database with %i UL results and %i EM results " %(nUL,nEM))
INFO in databaseObj.loadBinaryFile() in 551: loading binary db file /home/lessa/.cache/smodels/official300.pcl format version 214 INFO in databaseObj.loadBinaryFile() in 558: Loaded database from /home/lessa/.cache/smodels/official300.pcl in 1.8 secs.
Loaded Database with 112 UL results and 62 EM results
Match the decomposed simplified models with the experimental database of constraints:¶
In [8]:
# Compute the theory predictions for each experimental result and print them:
print("\n Theory Predictions and Constraints:")
rmax = 0.
bestResult = None
allPredictions = theoryPredictionsFor(database, toplist, combinedResults=False)
predsDict = {}
for tp in allPredictions:
anaID = tp.analysisId()
if anaID not in predsDict:
predsDict[anaID] = []
predsDict[anaID].append(tp)
for anaID,predictions in predsDict.items():
if not predictions:
continue # Skip if there are no constraints from this result
print('\n %s ' % anaID)
for theoryPrediction in predictions:
dataset = theoryPrediction.dataset
datasetID = theoryPrediction.dataId()
txnames = sorted([str(txname) for txname in theoryPrediction.txnames])
print("------------------------")
print("Dataset = ", datasetID) # Analysis name
print("TxNames = ", txnames)
print("Theory Prediction = ", theoryPrediction.xsection) # Signal cross section
# Get the corresponding upper limit:
print("UL for theory prediction = ", theoryPrediction.upperLimit)
# Compute the r-value
r = theoryPrediction.getRValue()
print("r = %1.3E" % r)
# Compute likelihoods for EM-type results:
if dataset.getType() == 'efficiencyMap':
theoryPrediction.computeStatistics()
print('L_BSM, L_SM, L_max = %1.3E, %1.3E, %1.3E' % (theoryPrediction.likelihood(),
theoryPrediction.lsm(), theoryPrediction.lmax()))
if r > rmax:
rmax = r
bestResult = anaID
# Print the most constraining experimental result
print( "\nThe largest r-value (theory/upper limit ratio) is ",rmax )
if rmax > 1.:
print( "(The input model is likely excluded by %s)" %bestResult )
else:
print( "(The input model is not excluded by the simplified model results)" )
Theory Predictions and Constraints: ATLAS-SUSY-2013-02 ------------------------ Dataset = SR5j TxNames = ['T1'] Theory Prediction = 1.85E-07 [pb] UL for theory prediction = 1.54E+00 [fb] r = 1.200E-04 L_BSM, L_SM, L_max = 1.065E-03, 1.066E-03, 1.066E-03 ------------------------ Dataset = None TxNames = ['T1'] Theory Prediction = 2.62E-01 [pb] UL for theory prediction = 2.52E+06 [fb] r = 1.040E-04 CMS-SUS-13-012 ------------------------ Dataset = None TxNames = ['T1'] Theory Prediction = 2.62E-01 [pb] UL for theory prediction = 2.81E+06 [fb] r = 9.329E-05 ------------------------ Dataset = 6NJet8_800HT1000_300MHT450 TxNames = ['T1'] Theory Prediction = 3.30E-08 [pb] UL for theory prediction = 1.35E+00 [fb] r = 2.448E-05 L_BSM, L_SM, L_max = 3.019E-03, 3.019E-03, 3.890E-03 CMS-SUS-13-019 ------------------------ Dataset = None TxNames = ['T1'] Theory Prediction = 2.62E-01 [pb] UL for theory prediction = 3.39E+06 [fb] r = 7.732E-05 CMS-SUS-12-028 ------------------------ Dataset = None TxNames = ['T1'] Theory Prediction = 2.62E-01 [pb] UL for theory prediction = 5.04E+06 [fb] r = 5.203E-05 The largest r-value (theory/upper limit ratio) is 0.00011997437041535402 (The input model is not excluded by the simplified model results)
In [9]:
#Find out missing topologies for sqrts=8*TeV:
uncovered = coverage.Uncovered(toplist,sqrts=8.*TeV)
#First sort coverage groups by label
groups = sorted(uncovered.groups[:], key = lambda g: g.label)
#Print uncovered cross-sections:
for group in groups:
print("\nTotal cross-section for %s (fb): %10.3E\n" %(group.description,group.getTotalXSec()))
Total cross-section for missing topologies (fb): 0.000E+00 Total cross-section for missing topologies with displaced decays (fb): 0.000E+00 Total cross-section for missing topologies with prompt decays (fb): 0.000E+00 Total cross-section for topologies outside the grid (fb): 0.000E+00
In [10]:
missingTopos = uncovered.getGroup('missing (prompt)')
#Print some of the missing topologies:
if missingTopos.finalStateSMS:
print('Missing topologies (up to 3):' )
for genEl in missingTopos.finalStateSMS[:3]:
print('Element:', genEl)
print('\tcross-section (fb):', genEl.missingX)
else:
print("No missing topologies found\n")
missingDisplaced = uncovered.getGroup('missing (displaced)')
#Print elements with displaced decays:
if missingDisplaced.finalStateSMS:
print('\nElements with displaced vertices (up to 2):' )
for genEl in missingDisplaced.finalStateSMS[:2]:
print('Element:', genEl)
print('\tcross-section (fb):', genEl.missingX)
else:
print("\nNo displaced decays")
No missing topologies found No displaced decays
In [ ]: