blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
25b6583408b37a7b05b7b70cc04dc00c8f310c8a | b6d08d9241ee6c41a045b0500bbc1d7236a5b705 | /count-number-of-1s-in-sorted-binary-array.py | 6b2ec25c01eef0fafb6b3f836e7835e04457a16c | [] | no_license | goyalgaurav64/Arrays | bca7c84bc3d9cdf122a47118b76558eeb10fca23 | bb2f795b3a67585828d3c10fa5e6db326844ce99 | refs/heads/master | 2022-12-26T09:38:07.134709 | 2020-09-27T12:45:40 | 2020-09-27T12:45:40 | 285,988,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | def count1s(a,n,low,high):
if high>=low:
mid=low+(high-low)//2
if(a[mid]==1 and (mid==high or a[mid+1]==0)):
return mid+1
if a[mid]==1:
return count1s(a,n,mid+1,high)
return count1s(a,n,low,mid-1)
return 0
if __name__ == "__main__":
# a=[1, 1, 1, 1, 0, 0, 0]
# a=[0, 0, 0, 0, 0, 0, 0]
a=[1, 1, 1, 1, 1, 1, 1]
ans=count1s(a,len(a),0,len(a)-1)
print("Number of 1s in the sorted binary array are:",ans) | [
"noreply@github.com"
] | goyalgaurav64.noreply@github.com |
dd3305c78e04151d15b36a79266132df6c46604a | b23e51cf4770bd69e41fa9c3a868e074c6f735fd | /convert_from_ssa.py | 3ceb0ea6a6a60c7dbad13e07619fe3f487bb99da | [] | no_license | winniez/final-compiler | 08093ce1e22dca1ff85f43c629c2a2c03092d50d | 616436f83448ef94cbc85347ed0a657a7cbba9b3 | refs/heads/master | 2021-01-23T03:03:52.689846 | 2014-04-18T20:52:47 | 2014-04-18T20:52:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,242 | py | from vis import Visitor
from compiler.ast import *
from explicit import *
from compiler_utilities import *
from find_locals import FindLocalsVisitor
from phi import *
class RemoveSSAVisitor(Visitor):
#need to remove phis from Ifs and Whiles
def visitIf(self, n):
test = self.dispatch(n.tests[0][0])
then = self.dispatch(n.tests[0][1])
else_ = self.dispatch(n.else_)
phi_then = []
phi_else_ = []
for p in n.phis:
phi_then = phi_then + [Assign(nodes=[AssName(p.var, 'OP_ASSIGN')], expr = Name(p.var1))]
phi_else_ = phi_else_ + [Assign(nodes=[AssName(p.var, 'OP_ASSIGN')], expr = Name(p.var2))]
t = []
for node in then.nodes:
t = t + [node]
e = []
for node in else_.nodes:
e = e + [node]
then = Stmt(t + phi_then)
else_ = Stmt(e + phi_else_)
return If([(test, then)], else_, None)
def visitWhile(self, n):
test = self.dispatch(n.test)
body = self.dispatch(n.body)
phi_preloop = []
phi_body = []
for p in n.phis:
phi_preloop = phi_preloop + [Assign(nodes=[AssName(p.var, 'OP_ASSIGN')], expr = Name(p.var1))]
phi_body = phi_body + [Assign(nodes=[AssName(p.var, 'OP_ASSIGN')], expr = Name(p.var2))]
b = []
for node in body.nodes:
b = b + [node]
body = Stmt(b + phi_body)
return Stmt(phi_preloop + [While(test, body, n.else_, None)])
def visitIfExp(self, n):
test = self.dispatch(n.test)
then = self.dispatch(n.then)
else_ = self.dispatch(n.else_)
return IfExp(test, then, else_)
def visitName(self, n):
return n
def visitLet(self, n):
return Let(n.var, self.dispatch(n.rhs), self.dispatch(n.body))
def visitAssign(self, n):
return Assign(nodes=n.nodes, expr = self.dispatch(n.expr))
def visitModule(self, n):
return Module(n.doc, self.dispatch(n.node))
def visitLambda(self, n):
argnames = []
for a in n.argnames:
argnames = argnames + [self.dispatch(a)]
return Lambda(argnames, n.defaults, n.flags, self.dispatch(n.code))
def visitReturn(self, n):
return Return(self.dispatch(n.value))
def visitStmt(self, n):
ss = [self.dispatch(s) for s in n.nodes]
return Stmt(ss)
def visitPrintnl(self, n):
e = self.dispatch(n.nodes[0])
return Printnl([e], n.dest)
def visitConst(self, n):
return n
def visitstr(self, n):
return n
def visitAdd(self, n):
left = self.dispatch(n.left)
right = self.dispatch(n.right)
return Add((left, right))
def visitUnarySub(self, n):
return UnarySub(self.dispatch(n.expr))
def visitCallFunc(self, n):
return CallFunc(self.dispatch(n.node),
[self.dispatch(a) for a in n.args])
def visitCompare(self, n):
left = self.dispatch(n.expr)
right = self.dispatch(n.ops[0][1])
return Compare(left, [(n.ops[0][0], right)])
def visitAnd(self, n):
left = self.dispatch(n.nodes[0])
right = self.dispatch(n.nodes[1])
return And([left, right])
def visitOr(self, n):
left = self.dispatch(n.nodes[0])
right = self.dispatch(n.nodes[1])
return Or([left, right])
def visitNot(self, n):
expr = self.dispatch(n.expr)
return Not(expr)
def visitDict(self, n):
items = [(self.dispatch(k),
self.dispatch(e)) for (k, e) in n.items]
return Dict(items)
def visitList(self, n):
return List([self.dispatch(e) for e in n.nodes])
def visitSubscript(self, n):
expr = self.dispatch(n.expr)
subs = [self.dispatch(e) for e in n.subs]
return Subscript(expr, n.flags, subs)
def visitSetSubscript(self, n):
c = self.dispatch(n.container)
k = self.dispatch(n.key)
v = self.dispatch(n.val)
return SetSubscript(c, k, v)
def visitDiscard(self, n):
e = self.dispatch(n.expr)
return Discard(e)
def visitInjectFrom(self, n):
return InjectFrom(n.typ, self.dispatch(n.arg))
def visitProjectTo(self, n):
return ProjectTo(n.typ, self.dispatch(n.arg))
def visitGetTag(self, n):
return GetTag(self.dispatch(n.arg))
| [
"chris.bubernak@gmail.com"
] | chris.bubernak@gmail.com |
7590a822008ae275d29a72c9cc0693eaf2a6fa35 | e439fd12714bf0b59c68fc8027976ca8cd04ec43 | /0x06-python-classes/100-singly_linked_list.py | 926caff902e7b81c962406deab51b91d0a2933af | [] | no_license | martincorredor/holbertonschool-higher_level_programming | 95bf91ff993b3a48b64cd3b642aa48e59c19801c | 89fddeece2eaba87cda43c003bf40ad7d0c66b9a | refs/heads/main | 2023-05-04T13:32:08.188762 | 2021-05-13T03:14:16 | 2021-05-13T03:14:16 | 291,816,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,963 | py |
#!/usr/bin/python3
"""
Module 100-singly_linked_list
Defines class Node (with private data and next_node)
Defines class SinglyLinkedList (with private head and public sorted_insert)
"""
class Node:
"""
class Node definition
Args:
data (int): private
next_node : private; can be None or Node object
Functions:
__init__(self, data, next_node=None)
data(self)
data(self, value)
next_node(self)
next_node(self, value)
"""
def __init__(self, data, next_node=None):
"""
Initializes node
Attributes:
data (int): private
next_node : private; can be None or Node object
"""
self.data = data
self.next_node = next_node
@property
def data(self):
""""
Getter
Return: data
"""
return self.__data
@data.setter
def data(self, value):
"""
Setter
Args:
value: sets data to value if int
"""
if type(value) is not int:
raise TypeError("data must be an integer")
else:
self.__data = value
@property
def next_node(self):
""""
Getter
Return: next_node
"""
return self.__next_node
@next_node.setter
def next_node(self, value):
"""
Setter
Args:
value: sets next_node if value is next_node or None
"""
if type(value) is not Node and value is not None:
raise TypeError("next_node must be a Node object")
else:
self.__next_node = value
class SinglyLinkedList:
"""
class SinglyLinkedList definition
Args:
head: private
Functions:
__init__(self)
sorted_insert(self, value)
"""
def __init__(self):
"""
Initializes singly linked list
Attributes:
head: private
"""
self.__head = None
def __str__(self):
"""
String representation of singly linked list needed to print
"""
string = ""
tmp = self.__head
while tmp is not None:
string += str(tmp.data)
tmp = tmp.next_node
if tmp is not None:
string += "\n"
return string
def sorted_insert(self, value):
"""
Inserts new nodes into singly linked list in sorted order
Args:
value: int data for node
"""
new = Node(value)
if self.__head is None:
self.__head = new
return
tmp = self.__head
if new.data < tmp.data:
new.next_node = self.__head
self.__head = new
return
while (tmp.next_node is not None) and (new.data > tmp.next_node.data):
tmp = tmp.next_node
new.next_node = tmp.next_node
tmp.next_node = new
return
| [
"dev.martincorredor@gmail.com"
] | dev.martincorredor@gmail.com |
16ffec30ff35fe9c3e0c9763c7a430088738c46f | c9f67529e10eb85195126cfa9ada2e80a834d373 | /lib/python3.5/site-packages/cloudpickle/__init__.py | 35e5df340d353e27242a6b1bba4359a8c662f922 | [
"Apache-2.0"
] | permissive | chilung/dllab-5-1-ngraph | 10d6df73ea421bfaf998e73e514972d0cbe5be13 | 2af28db42d9dc2586396b6f38d02977cac0902a6 | refs/heads/master | 2022-12-17T19:14:46.848661 | 2019-01-14T12:27:07 | 2019-01-14T12:27:07 | 165,513,937 | 0 | 1 | Apache-2.0 | 2022-12-08T04:59:31 | 2019-01-13T14:19:16 | Python | UTF-8 | Python | false | false | 101 | py | from __future__ import absolute_import
from cloudpickle.cloudpickle import *
__version__ = '0.6.1'
| [
"chilung.cs06g@nctu.edu.tw"
] | chilung.cs06g@nctu.edu.tw |
fad508316f7c8af514e13695746f1d929139ecad | 6e3a8e94ae42cefaf68281e3531c05d348dd8033 | /7-5recursive.py | ad4aea737e383624ebd2ffe3f5c243cb04cb3ee2 | [] | no_license | aqutw/python_01_practice | 1b7cf00cae4694a0dccb14658a3409b32c434be1 | b96187d424e84d54bcfbd8037cb392eb0de5b5ec | refs/heads/master | 2016-09-06T09:32:20.908656 | 2015-03-22T08:33:26 | 2015-03-22T08:33:44 | 32,628,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | # -*- coding: utf-8 -*-
def fact(n):
if n==1:
return 1
return fact(n-1) * n
print fact(3)
print '-----do Task-----'
#汉诺塔
def move(n, a, b, c):
if n==1:
print a, '-->', c
return
move(n-1, a, c, b)
print a, '-->', c
move(n-1, b, a, c)
move(4, 'A', 'B', 'C')
| [
"aquarian.ex@gmail.com"
] | aquarian.ex@gmail.com |
f5b479e335963e856113cd228c818fae352dfc66 | ae6d307ea7b5609a9fec854a042b821716f14081 | /build/config.gypi | af346fdfd3cfe3de4ca21846d485b433269597bd | [] | no_license | icimence/express-html | 5ce0f450c71602ae4b606a1ee94836d257ba11e2 | 7026673953e3a52e82903fdec1b0fc5df48ab44b | refs/heads/main | 2023-02-09T17:18:09.151754 | 2021-01-02T04:38:52 | 2021-01-02T04:38:52 | 324,801,923 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,622 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": [],
"msbuild_toolset": "v142",
"msvs_windows_target_platform_version": "10.0.18362.0"
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"dcheck_always_on": 0,
"debug_nghttp2": "false",
"debug_node": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "..\\..\\deps/icu-small\\source/data/in\\icudt67l.dat",
"icu_default_data": "",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "67",
"is_debug": 0,
"napi_build_version": "6",
"nasm_version": "2.14",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "true",
"node_module_version": 72,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_brotli": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "false",
"node_use_etw": "true",
"node_use_node_code_cache": "true",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "true",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"shlib_suffix": "so.72",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_enable_lite_mode": 0,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"v8_use_snapshot": 1,
"want_separate_host_toolset": 0,
"nodedir": "C:\\Users\\lenovo\\AppData\\Local\\node-gyp\\Cache\\12.18.3",
"standalone_static_library": 1,
"msbuild_path": "E:\\Microsoft Visual Studio\\MSBuild\\Current\\Bin\\MSBuild.exe"
}
}
| [
"icimence@outlook.com"
] | icimence@outlook.com |
349c5fca253e2c1e2137c8828c00c8274e70bb3f | eb7dbc0f737811c81dfe366de43d38ac5cab5d8e | /ChewUmoD_ML_notebooks/Dual_input_images_regression_for_upload.py | 59b71c75b16a498cc1031ba75b70933e7119de02 | [] | no_license | daninolab/proteus-mirabilis-engineered | 767a39a09483d9ca8e38fce3cbcf8eab19f71f84 | 0e0dea16a273bc5d49adefe7752379eb0ee95c4f | refs/heads/main | 2023-04-13T21:59:51.091094 | 2023-02-13T22:32:42 | 2023-02-13T22:32:42 | 601,348,090 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,944 | py | # -*- coding: utf-8 -*-
"""01_25_23_Dual Input Images Regression For Upload
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1E8xEIVUIZnADIoV6bCxWW4iPMu5-IkVo
"""
# NOTE: IN this notebook, you have to start, then restart runtime. Otherwise tensorflow isn't the right version
import os
import shutil
import time
import math
import random
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm
from pprint import pprint
import pathlib
import PIL
from PIL import Image, ImageOps
import matplotlib.pyplot as plt
import pickle
import tensorflow as tf
from tensorflow.keras.preprocessing.image import load_img, ImageDataGenerator
from IPython.display import clear_output
import cv2 as cv
from sklearn import model_selection
# %config Completer.use_jedi = False
# torch.manual_seed(0)
# Get the date for later saving
from datetime import date
today = date.today()
date_string = today.strftime("%m%d%y")
!pip install tensorflow-gpu
!pip install tensorflow==2.8
!apt install --allow-change-held-packages libcudnn8=8.1.0.77-1+cuda11.2
from tensorflow.keras.applications import *
from tensorflow.keras import models, layers, callbacks
#Use this to check if the GPU is configured correctly
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
print(tf.version.VERSION)
IMG_SIZE = 512
def convert_all_to_jpgs(source_folder, target_folder=None, delete_existing_jpg_folder=True, source_ext=".tiff"):
target_folder = source_folder + "_jpg"
if delete_existing_jpg_folder and os.path.exists(target_folder):
shutil.rmtree(target_folder)
os.mkdir(target_folder)
for root, dirs, files in os.walk(source_folder, topdown=False):
for dirname in dirs:
os.mkdir(f'{target_folder}/{dirname}')
for root, dirs, files in os.walk(source_folder, topdown=False):
for name in files:
infile = os.path.join(root, name)
if '.tiff' in infile:
outfile = infile.replace(source_folder, target_folder).replace('.tiff', ".jpg")
with open(outfile, 'w+') as f:
im = Image.open(infile)
im.thumbnail(im.size)
im.save(f, "JPEG", quality=100)
# Mount my google drive
from google.colab import drive # will need to use verification code here
drive.mount('/content/gdrive', force_remount = True)
# Import all the images into the local environment i guess?
# May need to convert them to grayscale & size 512
def get_available_image_paths(data_root):
#Get all image paths from my Drive
all_image_paths = [str(path) for path in data_root.glob('**/*.jpg')]
print('We have {} images'.format(len(all_image_paths)))
print(all_image_paths[:2]) #get a sense of where the images are
return all_image_paths
def load_and_preprocess_image(path, img_size = IMG_SIZE):
#Can't use tensorflow preprocessing because tifs
img = plt.imread(path)
img = tf.image.resize(img, [img_size, img_size])
# img /= 255.0 # normalize pixels to 0,1
return img
root_path = 'gdrive/My Drive/Danino_Lab/Patterning_Scans/cheW_umoD_Expts/Updated_ML_Dataset_103022/All_Ims_For_Upload'
data_root = pathlib.Path(root_path)
target_folder = 'images_transformed_512'
if not os.path.exists(target_folder):
os.mkdir(target_folder)
# Get list of folders in the root folder & replicate in the local folder
for root, dirs, files in os.walk(data_root, topdown=False):
for dirname in dirs:
if not os.path.exists(f'{target_folder}/{dirname}'):
os.mkdir(f'{target_folder}/{dirname}')
print(dirname)
image_paths = get_available_image_paths(data_root)
for i, image_path in enumerate(image_paths):
try:
im = Image.open(image_path)
im.thumbnail((512, 512))
# gray_image = ImageOps.grayscale(im)
# processed = load_and_preprocess_image(image_path, img_size = 512)
newpath = image_path.replace('gdrive/My Drive/Danino_Lab/Patterning_Scans/cheW_umoD_Expts/Updated_ML_Dataset_103022/All_Ims_For_Upload', 'images_transformed_512/')
# gray_image.save(newpath)
im.save(newpath)
except Exception as e:
print(e)
print(image_path)
break
# now let's get the labels
new_image_paths = get_available_image_paths(pathlib.Path('images_transformed_512'))
print(new_image_paths[0:3])
# Let's set from the get go whether we want to normalize the labels or not
normalize_conc_labels = True
# # let's make a dict for the labels
if normalize_conc_labels:
class_dict = {"0_iptg_0.1_ara": [0, 0.5],
"0_iptg_0.2_ara": [0, 1],
"0_iptg_0_ara": [0, 0],
"2.5_iptg_0.1_ara": [0.5, 0.5],
"2.5_iptg_0.2_ara": [0.5, 1],
"2.5_iptg_0_ara": [0.5, 0],
"5_iptg_0.1_ara": [1.0, 0.5],
"5_iptg_0.2_ara": [1.0, 1],
"5_iptg_0_ara": [1.0, 0]}
else:
class_dict = {"0_iptg_0.1_ara": [0, 0.1],
"0_iptg_0.2_ara": [0, 0.2],
"0_iptg_0_ara": [0, 0],
"2.5_iptg_0.1_ara": [2.5, 0.1],
"2.5_iptg_0.2_ara": [2.5, 0.2],
"2.5_iptg_0_ara": [2.5, 0],
"5_iptg_0.1_ara": [5.0, 0.1],
"5_iptg_0.2_ara": [5.0, 0.2],
"5_iptg_0_ara": [5.0, 0]}
for key in class_dict:
print(key)
print(class_dict[key])
print(os.listdir('images_transformed_512'))
total = 0
for root, dirs, main_files in os.walk('images_transformed_512'):
for dir in dirs:
print(dir)
if not "Intermediate" in dir:
print(class_dict[dir])
for img_root, img_dirs, img_files in os.walk(os.path.join('images_transformed_512', dir)):
for img_file in img_files:
total += 1
print("Num ims for training")
print(total)
# Get number of images per class
total = 0
for root, dirs, main_files in os.walk('images_transformed_512'):
for dir in dirs:
print(dir)
if not "Intermediate" in dir:
# print(class_dict[dir])
for img_root, img_dirs, img_files in os.walk(os.path.join('images_transformed_512', dir)):
for img_file in img_files:
total += 1
print(total)
total = 0
# get the new image paths
data_dir = pathlib.Path('/content/images_transformed_512')
new_image_paths = [str(path) for path in data_dir.glob('*/*.jpg')]
print(new_image_paths[0:3])
PIL.Image.open(str(list(data_dir.glob('*/*.jpg'))[0]))
"""# Importing & labeling the data
"""
# Make the main training set & intermediate evaluation set at same time
X_full = []
y_full = []
img_paths_full = []
X_full_int = []
y_full_int = []
img_paths_full_int = []
for img_path in new_image_paths:
if not '091419' in img_path:
if not "Intermediate" in img_path:
img_dir = img_path.split('/')[3]
img_number = class_dict[img_dir]
# preprocess image for inceptionv3
im = cv.imread(img_path)
im = cv.resize(im, (IMG_SIZE, IMG_SIZE))
# img_processed = tf.keras.applications.inception_v3.preprocess_input(im, data_format=None)
# img_processed = load_and_preprocess_image(image_path, img_size = 512)
# make dataset in full
X_full.append(im)
# for regression
y_full.append(img_number)
# in case we need it later, save the image path
img_paths_full.append(img_path)
else:
# This is a diff concentration image
# Get the IPTG, ara
img_name = img_path.split('/')[4]
img_name_split = img_name.split('_')
img_number = [float(img_name_split[0]), float(img_name_split[2])]
# decide whether we need to normalize or not:
if normalize_conc_labels:
img_number[0] = img_number[0]/5.0
img_number[1] = img_number[1]/0.2
# resize image and add to matrix
im = cv.imread(img_path)
im = cv.resize(im, (IMG_SIZE, IMG_SIZE))
X_full.append(im)
y_full.append(img_number)
img_paths_full.append(img_path)
print(len(X_full))
print(X_full[0].shape)
print(len(y_full))
print(y_full[0])
# Double check the images
plt.figure()
plt.imshow(X_full[0])
plt.title(y_full[0])
plt.figure()
plt.imshow(X_full[1])
plt.title(y_full[1])
"""Convert the datasets to tensors"""
# conver to tensors
X_full = tf.convert_to_tensor(X_full)
y_full = tf.convert_to_tensor(y_full)
assert len(X_full) == len(y_full)
print('{} length data'.format(len(X_full)))
# Shuffle into train & val
indices = tf.range(start=0, limit=len(X_full), dtype=tf.int32)
shuffled_indices = tf.random.shuffle(indices)
X_full = tf.gather(X_full, shuffled_indices)
y_full = tf.gather(y_full, shuffled_indices)
# IF WE WANT A SEPARATE TEST SET
split_index = int(0.8 * len(X_full))
split_index_2 = int(0.1*len(X_full)) + split_index;
X_train, y_train, X_val, y_val, X_test, y_test = X_full[:split_index], y_full[:split_index], X_full[split_index:split_index_2], y_full[split_index:split_index_2], X_full[split_index_2:], y_full[split_index_2:]
# # IF WE WANT ONLY TRAIN AND VAL
# split_index = int(0.9 * len(X_full))
# X_train, y_train, X_val, y_val = X_full[:split_index], y_full[:split_index], X_full[split_index:], y_full[split_index:]
print(X_train.shape)
print(y_train.shape)
print(X_val.shape)
print(y_val.shape)
print(X_test.shape)
print(y_test.shape)
# See which classes are present in val & test
from collections import Counter
print("val")
y_val_array = np.asarray(y_val)
unique_rows, counts = (np.unique(y_val_array, axis=0, return_counts=True))
print(unique_rows)
print(counts)
print("test")
y_test_array = np.asarray(y_test)
unique_rows, counts = (np.unique(y_test_array, axis=0, return_counts=True))
print(unique_rows)
print(counts)
# Write the image files used in each set to files
print(img_paths_full[0])
img_paths_full = tf.gather(img_paths_full, shuffled_indices)
print(bytes.decode(img_paths_full[0].numpy()))
# Split into train val test
paths_train, paths_val, paths_test = img_paths_full[:split_index], img_paths_full[split_index:split_index_2], img_paths_full[split_index_2:]
# Get the date for later saving
from datetime import date
today = date.today()
date_string = today.strftime("%m%d%y")
with open(f'gdrive/My Drive/Danino_Lab/Patterning_Scans/cheW_umoD_Expts/models_and_histories/{date_string}_trainims.txt', 'w') as writefile:
for line in paths_train:
img_path = bytes.decode(line.numpy())
writefile.write(img_path + "\n")
with open(f'gdrive/My Drive/Danino_Lab/Patterning_Scans/cheW_umoD_Expts/models_and_histories/{date_string}_valims.txt', 'w') as writefile:
for line in paths_val:
img_path = bytes.decode(line.numpy())
writefile.write(img_path + "\n")
with open(f'gdrive/My Drive/Danino_Lab/Patterning_Scans/cheW_umoD_Expts/models_and_histories/{date_string}_testims.txt', 'w') as writefile:
for line in paths_test:
img_path = bytes.decode(line.numpy())
writefile.write(img_path + "\n")
"""new approach is with a datagenerator"""
# Switching to a datagenerator
# DON't rescale will do later
train_datagen = ImageDataGenerator(
rotation_range= 0.5,
horizontal_flip= True,
width_shift_range=0.1,
height_shift_range=0.1)
# width_shift_range=0.2,
# height_shift_range=0.2)
# featurewise_center=True,
# featurewise_std_normalization=True,
# let's test the image data generator
temp_gen = train_datagen.flow(X_train, y_train, batch_size=32)
temp = temp_gen.next()
print(len(temp))
data_augmentations = tf.keras.Sequential([
tf.keras.layers.experimental.preprocessing.RandomFlip('horizontal'),
tf.keras.layers.experimental.preprocessing.RandomRotation(0.5, fill_mode='constant'),
tf.keras.layers.experimental.preprocessing.RandomTranslation(0.1, 0.1, fill_mode='constant')
])
"""# Define the model
"""
RESHAPED_SIZE = 224
base_model = EfficientNetB0(weights="imagenet", include_top=False, input_shape=(RESHAPED_SIZE, RESHAPED_SIZE, 3))
print("Number of layers in the base model: ", len(base_model.layers))
def build_efficientnet(architecture='EfficientNetB0', pretrained=False, finetune=True, data_augmentation=True, load_from=None):
base_model = None
preprocessing = None
RESHAPED_SIZE = 224
# Note that all the efficientnets do the rescaling from 0-255 to 0-1 internally
# Download architecture w/ pretrained weights and freeze them
if architecture.lower() == 'EfficientNetB0'.lower():
base_model = EfficientNetB0(weights="imagenet" if pretrained else None, include_top=False, input_shape=(RESHAPED_SIZE, RESHAPED_SIZE, 3))
elif architecture.lower() == 'EfficientNetB1'.lower():
base_model = EfficientNetB1(weights="imagenet" if pretrained else None, include_top=False, input_shape=(RESHAPED_SIZE, RESHAPED_SIZE, 3))
elif architecture.lower() == 'EfficientNetB2'.lower():
base_model = EfficientNetB2(weights="imagenet" if pretrained else None, include_top=False, input_shape=(RESHAPED_SIZE, RESHAPED_SIZE, 3))
elif architecture.lower() == 'EfficientNetB3'.lower():
base_model = EfficientNetB3(weights="imagenet" if pretrained else None, include_top=False, input_shape=(RESHAPED_SIZE, RESHAPED_SIZE, 3))
elif architecture.lower() == 'EfficientNetB4'.lower():
base_model = EfficientNetB4(weights="imagenet" if pretrained else None, include_top=False, input_shape=(RESHAPED_SIZE, RESHAPED_SIZE, 3))
elif architecture.lower() == 'EfficientNetB5'.lower():
base_model = EfficientNetB5(weights="imagenet" if pretrained else None, include_top=False, input_shape=(RESHAPED_SIZE, RESHAPED_SIZE, 3))
elif architecture.lower() == 'EfficientNetB6'.lower():
base_model = EfficientNetB6(weights="imagenet" if pretrained else None, include_top=False, input_shape=(RESHAPED_SIZE, RESHAPED_SIZE, 3))
elif architecture.lower() == 'EfficientNetB7'.lower():
base_model = EfficientNetB7(weights="imagenet" if pretrained else None, include_top=False, input_shape=(RESHAPED_SIZE, RESHAPED_SIZE, 3))
# Let's build the rest of the model
preprocessing = tf.keras.Sequential([tf.keras.layers.Resizing(RESHAPED_SIZE, RESHAPED_SIZE)])
# Specify how much we want to fine-tune
if pretrained and finetune:
# Let's take a look to see how many layers are in the base model
print("Number of layers in the base model: ", len(base_model.layers))
# Fine-tune from this layer onwards
fine_tune_at = len(base_model.layers) // 3
# Freeze all the layers before the `fine_tune_at` layer
for layer in base_model.layers[:fine_tune_at]:
layer.trainable = False
elif pretrained:
base_model.trainable = False
# Build the classification head for regression--2 outputs, linear activation only
classification_head = tf.keras.Sequential([
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(2)
# No activation specified should give us a linear activation output
])
# Start to put together the full model, specifying augmentations if desired
inputs = tf.keras.Input(shape=(IMG_SIZE, IMG_SIZE, 3))
if data_augmentation:
x = data_augmentations(inputs)
else:
x = inputs
x = preprocessing(x)
x = base_model(x, training=True)
outputs = classification_head(x)
model = tf.keras.Model(inputs, outputs)
# compile the model
learning_rate = 0.0001
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
learning_rate,
decay_steps=100000,
decay_rate=0.96)
model.compile(
# optimizer=tf.keras.optimizers.Adam(learning_rate=lr_schedule),
optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001),
# THIS IS THE OTHER CHANGE--going to use MSE for regression output
loss = tf.keras.losses.MeanSquaredError(),
metrics = ['MeanAbsoluteError'],
steps_per_execution = 1
)
return model
#Since our data generator has augmentation, we won't add it into the models here
try_models = {
# 'b0': build_efficientnet(architecture = 'EfficientNetB0', pretrained=True, finetune=True, data_augmentation=False),
# 'b1': build_efficientnet(architecture = 'EfficientNetB1', pretrained=True, finetune=True, data_augmentation=False),
'b2': build_efficientnet(architecture = 'EfficientNetB2', pretrained=True, finetune=True, data_augmentation=False)
# 'b3': build_efficientnet(architecture = 'EfficientNetB3', pretrained=True, finetune=True, data_augmentation=False)
# 'b4': build_efficientnet(architecture = 'EfficientNetB4', pretrained=True, finetune=True, data_augmentation=False)
# 'b5': build_efficientnet(architecture = 'EfficientNetB5', pretrained=True, finetune=True, data_augmentation=False),
# 'b6': build_efficientnet(architecture = 'EfficientNetB6', pretrained=True, finetune=True, data_augmentation=False),
# 'b7': build_efficientnet(architecture = 'EfficientNetB7', pretrained=True, finetune=True, data_augmentation=False),
}
# Define early stopping
early_stopping = callbacks.EarlyStopping(monitor="val_loss", patience=4)
# Get the date for later saving
from datetime import date
today = date.today()
date_string = today.strftime("%m%d%y")
# Let's fit all these models
# Evaluate on the val dataset the MSE for each
from sklearn.metrics import mean_squared_error
# y_pred = model.predict(X_test)
# print("MSE: %.4f" % mean_squared_error(y_test, y_pred))
batch_size = 32
epochs = 50
histories = {}
val_results = {}
for model_key, model in try_models.items():
# Define saving best model only checkpoint
checkpoint_filepath = os.path.join(f'gdrive/My Drive/Danino_Lab/Patterning_Scans/cheW_umoD_Expts/models_and_histories/{date_string}_{model_key}_alldata', 'ckpt') #, f'weights.{epoch:02d}-{val_loss:.2f}.hdf5')
# checkpoint_filepath = 'gdrive/My Drive/Danino_Lab/Patterning_Scans/cheW_umoD_Expts/{date_string}_{model_key}'
model_checkpoint_callback = callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
save_weights_only=True,
monitor='val_loss',
mode='min',
save_best_only=True)
print(f'\n\n Training {model_key} \n\n')
histories[model_key] = model.fit(
train_datagen.flow(X_train, y_train, batch_size=batch_size),
epochs=epochs,
callbacks=[model_checkpoint_callback],
steps_per_epoch=int(len(X_train) / batch_size),
validation_data = (X_val, y_val))
# save the history
with open(f'gdrive/My Drive/Danino_Lab/Patterning_Scans/cheW_umoD_Expts/models_and_histories/{date_string}_{model_key}_all_data_history_file', 'wb') as file_pi:
pickle.dump(histories[model_key].history, file_pi)
# Store the MSE on the validation set as well
y_pred = model.predict(X_val)
val_results[model_key] = mean_squared_error(y_val, y_pred)
print(val_results)
y_pred = model.predict(X_test)
test_results = {}
test_results[model_key] = mean_squared_error(y_test, y_pred)
print(test_results)
"""# Review the results
Generate a plot of the training mean squared error and mean absolute errors throughout, add legend
Plot the MSE on val dataset for each model
"""
import seaborn as sns
sns.set_style('white')
sns.set_theme()
def plot_multiple(history, model_name, linecolor, ALPHA, MARKER):
# The history object contains results on the training and test
# sets for each epoch
acc = history['mean_absolute_error']
val_acc = history['val_mean_absolute_error']
loss = history['loss']
val_loss = history['val_loss']
min_val_loss = min(val_loss)
best_epoch = val_loss.index(min_val_loss) + 1
max_loss = max(loss)
# Get the number of epochs
epochs_completed = len(loss)
epochs_range = range(1,epochs_completed+1)
# loss
plt.subplot(1, 2, 1)
plt.plot(epochs_range, loss, color=linecolor, linestyle='dashed', alpha = ALPHA, label=f'Train {model_name}')
plt.plot(epochs_range, val_loss, color=linecolor, alpha = ALPHA, label=f'Val {model_name}')
plt.scatter(x=best_epoch,y=min_val_loss,marker=MARKER,alpha = ALPHA,c=linecolor)
plt.ylim([0, max_loss+0.05])
# acc
plt.subplot(1, 2, 2)
plt.plot(epochs_range, acc, color=linecolor, linestyle='dashed', alpha = ALPHA, label=f'Train {model_name}')
plt.plot(epochs_range, val_acc, color=linecolor, alpha = ALPHA, label=f'Val {model_name}')
def format_histories_plot(max_epochs):
plt.subplot(1, 2, 1)
plt.title('Mean Squared Error')
plt.xlabel('Epoch',fontsize=10)
plt.ylabel('MSQ',fontsize=10)
plt.xticks(ticks=np.arange(1,max_epochs+1,4))
plt.yticks(ticks=np.arange(0,0.45,0.1))
#plt.legend(bbox_to_anchor=(1.33, 1))
plt.legend(facecolor='white',edgecolor='gray')
plt.subplot(1, 2, 2)
plt.title('Mean Absolute Error')
plt.xlabel('Epoch',fontsize=10)
plt.ylabel('MAE',fontsize=10)
plt.xticks(ticks=np.arange(1,max_epochs+1,4))
plt.yticks(ticks=np.arange(0,0.6,0.2))
#plt.legend(bbox_to_anchor=(1.33, 1))
fig.suptitle('Regression on cheWumoD Images with Efficientnets trained on All Data',y=0.91,fontweight='bold')
fig, ax = plt.subplots(figsize = [30, 20])
idx = 0
palette = sns.color_palette("viridis", 25).as_hex()
#palette_2 = ['#e6194b', '#3cb44b', '#ffe119', '#4363d8', '#f58231', '#911eb4', '#46f0f0', '#f032e6', '#bcf60c', '#fabebe', '#008080', '#e6beff', '#9a6324', '#fffac8', '#800000', '#aaffc3', '#808000', '#ffd8b1', '#000075', '#808080', '#ffffff', '#000000']
palette_2 = ['#e6194b', '#3cb44b', '#911eb4', '#46f0f0', '#e6194b', '#3cb44b', '#911eb4', '#46f0f0']
all_epochs_completed = list()
for model_key, history_obj in histories.items():
model_name = model_key
history = history_obj.history
eps = len(history['loss'])
all_epochs_completed.append(eps)
ALPHA = 1
MARKER = 'o'
LINESTYLE = 'solid'
linecolor = palette_2[idx]
plot_multiple(history, model_name, linecolor, ALPHA, MARKER)
idx += 1
max_epochs = np.amax(all_epochs_completed)
format_histories_plot(max_epochs)
plt.show() | [
"anjali@Anjalis-MacBook-Pro.local"
] | anjali@Anjalis-MacBook-Pro.local |
acfaf5dda6e06b01dcb40a040a6fa2a14cf04e07 | 8a2cd06a24f65e41f4b38a5e6e2cf045a8ad15fd | /HabNet_MC/train.py | 0171ec9b858bcb637931605e176815baf0621c19 | [] | no_license | RingBDStack/HabNet | fffb3606aa7b0c515555219188619e6bdf6706ce | 208efe26d06c96390b4689404f936f91ee4775ee | refs/heads/main | 2023-03-07T01:52:02.355389 | 2021-02-23T05:59:31 | 2021-02-23T05:59:31 | 306,551,517 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 13,558 | py | import tensorflow as tf
from datetime import datetime
from data_reader import DataReader
from model import Model
from utils import read_vocab, count_parameters, load_glove, create_folder_if_not_exists
import sklearn.metrics
import utils_plots
import matplotlib
matplotlib.use('Agg') # http://stackoverflow.com/questions/2801882/generating-a-png-with-matplotlib-when-display-is-undefined
import matplotlib.pyplot as plt
import os
import numpy as np
# Parameters
# ==================================================
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string("checkpoint_dir", 'checkpoints',
"""Path to checkpoint folder""")
tf.flags.DEFINE_string("log_dir", 'logs',
"""Path to log folder""")
tf.flags.DEFINE_integer("cell_dim", 50,
"""Hidden dimensions of GRU cells (default: 50)""")
tf.flags.DEFINE_integer("att_dim", 100,
"""Dimensionality of attention spaces (default: 100)""")
tf.flags.DEFINE_integer("emb_size", 100,
"""Dimensionality of word embedding (default: 200)""")
tf.flags.DEFINE_integer("num_classes", 10,
"""Number of classes (default: 5)""")
tf.flags.DEFINE_integer("num_checkpoints", 1,
"""Number of checkpoints to store (default: 1)""")
tf.flags.DEFINE_integer("num_epochs", 50,
"""Number of training epochs (default: 20)""")
tf.flags.DEFINE_integer("batch_size", 23,
"""Batch size (default: 64)""")
tf.flags.DEFINE_integer("display_step", 20,
"""Number of steps to display log into TensorBoard (default: 20)""")
tf.flags.DEFINE_float("learning_rate", 0.0005,
"""Learning rate (default: 0.0005)""")
tf.flags.DEFINE_float("max_grad_norm", 5.0,
"""Maximum value of the global norm of the gradients for clipping (default: 5.0)""")
tf.flags.DEFINE_float("dropout_rate", 0.5,
"""Probability of dropping neurons (default: 0.5)""")
tf.flags.DEFINE_boolean("allow_soft_placement", True,
"""Allow device soft device placement""")
if not tf.gfile.Exists(FLAGS.checkpoint_dir):
tf.gfile.MakeDirs(FLAGS.checkpoint_dir)
if not tf.gfile.Exists(FLAGS.log_dir):
tf.gfile.MakeDirs(FLAGS.log_dir)
train_writer = tf.summary.FileWriter(FLAGS.log_dir + '/train')
valid_writer = tf.summary.FileWriter(FLAGS.log_dir + '/valid')
test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test')
def loss_fn(labels, logits):
onehot_labels = tf.one_hot(labels, depth=FLAGS.num_classes)
cross_entropy_loss = tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels,
logits=logits)
tf.summary.scalar('loss', cross_entropy_loss)
return cross_entropy_loss
def train_fn(loss):
trained_vars = tf.trainable_variables()
count_parameters(trained_vars)
# Gradient clipping
gradients = tf.gradients(loss, trained_vars)
clipped_grads, global_norm = tf.clip_by_global_norm(gradients, FLAGS.max_grad_norm)
tf.summary.scalar('global_grad_norm', global_norm)
# Define optimizer
global_step = tf.train.get_or_create_global_step()
optimizer = tf.train.RMSPropOptimizer(FLAGS.learning_rate)
train_op = optimizer.apply_gradients(zip(clipped_grads, trained_vars),
name='train_op',
global_step=global_step)
return train_op, global_step
def eval_fn(labels, logits):
predictions = tf.argmax(logits, axis=-1)
correct_preds = tf.equal(predictions, tf.cast(labels, tf.int64))
batch_acc = tf.reduce_mean(tf.cast(correct_preds, tf.float32))
tf.summary.scalar('accuracy', batch_acc)
total_acc, acc_update = tf.metrics.accuracy(labels, predictions, name='metrics/acc')
metrics_vars = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope="metrics")
metrics_init = tf.variables_initializer(var_list=metrics_vars)
return batch_acc, total_acc, acc_update, metrics_init, predictions
def save_optimized_presicion(all_y_true, all_y_pred, stats_graph_folder, name, epoch):
output_filepath = os.path.join(stats_graph_folder, '{0}_optimized_precision.txt'.format(name))
classification_report = sklearn.metrics.classification_report(all_y_true, all_y_pred, digits=4)
lines = classification_report.split('\n')
acc = sklearn.metrics.accuracy_score(all_y_true, all_y_pred)
classification_report = ['Accuracy: {:05.2f}%'.format(acc * 100)]
recalls = sklearn.metrics.recall_score(all_y_true,all_y_pred,average=None)
l=len(recalls)
recalls_tile = np.tile(recalls,(len(recalls),1))
recalls_vstack = np.array(10*[recalls[0]])
for each in recalls[1:len(recalls)]:
temp = np.array(10 * [each])
recalls_vstack = np.vstack((recalls_vstack,temp))
diff = recalls_vstack -recalls_tile
diff1 = np.abs(diff)
op = acc - diff1.sum()/(18*recalls.sum())
print(name+" optimized precision: " + str(op))
with open(output_filepath, 'a', encoding='utf-8') as fp:
fp.write("{:.6f}\n".format(op))
def save_distance_measure(all_y_true, all_y_pred, stats_graph_folder, name, epoch):
output_filepath = os.path.join(stats_graph_folder, '{0}_distance_measure.txt'.format(name))
y_true = np.array(all_y_true)
y_pred = np.array(all_y_pred)
dist = np.abs(y_pred-y_true)
dist2 = np.mean(dist)
dist_final = 1.0-dist2/9.0
print(name+" distance measure: " + str(dist_final))
with open(output_filepath, 'a', encoding='utf-8') as fp:
fp.write("{:.6f}\n".format(dist_final))
def save_results(all_y_true, all_y_pred, stats_graph_folder, name, epoch):
output_filepath = os.path.join(stats_graph_folder, 'classification_report_for_epoch_{0:04d}_in_{1}.txt'.format(epoch, name))
plot_format = 'pdf'
# result_save_counter += 1
unique_labels = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
# classification_report = sklearn.metrics.classification_report(labels, predictions, digits=4,
# labels=unique_labels)
classification_report = sklearn.metrics.classification_report(all_y_true, all_y_pred, digits=4)
acc = sklearn.metrics.accuracy_score(all_y_true, all_y_pred)
lines = classification_report.split('\n')
classification_report = ['Accuracy: {:05.2f}%'.format(sklearn.metrics.accuracy_score(all_y_true, all_y_pred) * 100)]
for line in lines[2: (len(lines) - 1)]:
new_line = []
t = line.strip().replace(' avg', '-avg').split()
if len(t) < 2: continue
new_line.append((' ' if t[0].isdigit() else '') + t[0])
new_line += ['{:05.2f}'.format(float(x) * 100) for x in t[1: len(t) - 1]]
new_line.append(t[-1])
classification_report.append('\t'.join(new_line))
classification_report = '\n'.join(classification_report)
print('\n\n' + classification_report + '\n', flush=True)
with open(output_filepath, 'a', encoding='utf-8') as fp:
fp.write(classification_report)
output_filepath_acc = os.path.join(stats_graph_folder, '{0}_accuracy.txt'.format(name))
with open(output_filepath_acc, 'a', encoding='utf-8') as f:
f.write("{:.2f}\n".format(acc * 100))
# save confusion matrix and generate plots
confusion_matrix = sklearn.metrics.confusion_matrix(all_y_true, all_y_pred)
#results['confusion_matrix'] = confusion_matrix.tolist()
title = 'Confusion matrix for epoch {0} in {1}\n'.format(epoch, name)
xlabel = 'Predicted'
ylabel = 'True'
xticklabels = yticklabels = unique_labels
utils_plots.heatmap(confusion_matrix, title, xlabel, ylabel, xticklabels, yticklabels, figure_width=40,
figure_height=20, correct_orientation=True, fmt="%d", remove_diagonal=True)
plt.savefig(os.path.join(stats_graph_folder,
'confusion_matrix_for_epoch_{0:04d}_in_{1}_{2}.{2}'.format(epoch, name,
plot_format)),
dpi=300, format=plot_format, bbox_inches='tight')
plt.close()
def main(_):
vocab = read_vocab('data/ICLR_Review_all-w2i.pkl')
glove_embs = load_glove('glove.6B.{}d.txt'.format(FLAGS.emb_size), FLAGS.emb_size, vocab)
data_reader = DataReader(train_file='data/ICLR_Review_all-train.pkl',
dev_file='data/ICLR_Review_all-dev.pkl',
test_file='data/ICLR_Review_all-test.pkl')
config = tf.ConfigProto(allow_soft_placement=FLAGS.allow_soft_placement)
with tf.Session(config=config) as sess:
model = Model(cell_dim=FLAGS.cell_dim,
att_dim=FLAGS.att_dim,
vocab_size=len(vocab),
emb_size=FLAGS.emb_size,
num_classes=FLAGS.num_classes,
dropout_rate=FLAGS.dropout_rate,
pretrained_embs=glove_embs)
loss = loss_fn(model.labels, model.logits)
train_op, global_step = train_fn(loss)
batch_acc, total_acc, acc_update, metrics_init, predictions = eval_fn(model.labels, model.logits)
summary_op = tf.summary.merge_all()
sess.run(tf.global_variables_initializer())
train_writer.add_graph(sess.graph)
saver = tf.train.Saver(max_to_keep=FLAGS.num_checkpoints)
print('\n{}> Start training'.format(datetime.now()))
result_save_folder = str(datetime.now())
output_folder = os.path.join('.', 'output')
create_folder_if_not_exists(output_folder)
stats_graph_folder = os.path.join(output_folder, result_save_folder) # Folder where to save graphs
create_folder_if_not_exists(stats_graph_folder)
epoch = 0
valid_step = 0
test_step = 0
train_test_prop = len(data_reader.train_data) / len(data_reader.test_data)
test_batch_size = int(FLAGS.batch_size / train_test_prop)
best_acc = float('-inf')
while epoch < FLAGS.num_epochs:
epoch += 1
print('\n{}> Epoch: {}'.format(datetime.now(), epoch))
sess.run(metrics_init)
all_labels = []
all_y_pred = []
for batch_docs, batch_labels in data_reader.read_train_set(FLAGS.batch_size, shuffle=True):
_step, _, _loss, _acc, _, y_pred_batch = sess.run([global_step, train_op, loss, batch_acc, acc_update, predictions],
feed_dict=model.get_feed_dict(batch_docs, batch_labels, training=True))
all_labels += batch_labels
#y_pred_batch_array = y_pred_batch.eval(session=sess)
y_pred_batch_list = y_pred_batch.tolist()
all_y_pred += y_pred_batch_list
if _step % FLAGS.display_step == 0:
_summary = sess.run(summary_op, feed_dict=model.get_feed_dict(batch_docs, batch_labels))
train_writer.add_summary(_summary, global_step=_step)
print('Training accuracy = {:.2f}'.format(sess.run(total_acc) * 100))
save_results(all_labels, all_y_pred, stats_graph_folder, 'train', epoch)
sess.run(metrics_init)
all_valid_labels = []
all_valid_y_pred = []
for batch_docs, batch_labels in data_reader.read_valid_set(test_batch_size):
_loss, _acc, _, valid_y_pred_batch = sess.run([loss, batch_acc, acc_update, predictions], feed_dict=model.get_feed_dict(batch_docs, batch_labels))
all_valid_labels += batch_labels
valid_y_pred_batch_list = valid_y_pred_batch.tolist()
all_valid_y_pred += valid_y_pred_batch_list
valid_step += 1
if valid_step % FLAGS.display_step == 0:
_summary = sess.run(summary_op, feed_dict=model.get_feed_dict(batch_docs, batch_labels))
valid_writer.add_summary(_summary, global_step=valid_step)
print('Validation accuracy = {:.2f}'.format(sess.run(total_acc) * 100))
save_optimized_presicion(all_valid_labels, all_valid_y_pred, stats_graph_folder, 'valid', epoch)
save_distance_measure(all_valid_labels, all_valid_y_pred, stats_graph_folder, 'valid', epoch)
save_results(all_valid_labels, all_valid_y_pred, stats_graph_folder, 'valid', epoch)
sess.run(metrics_init)
all_test_labels = []
all_test_y_pred = []
for batch_docs, batch_labels in data_reader.read_test_set(test_batch_size):
_loss, _acc, _, test_y_pred_batch = sess.run([loss, batch_acc, acc_update, predictions], feed_dict=model.get_feed_dict(batch_docs, batch_labels))
all_test_labels += batch_labels
test_y_pred_batch_list = test_y_pred_batch.tolist()
all_test_y_pred += test_y_pred_batch_list
test_step += 1
if test_step % FLAGS.display_step == 0:
_summary = sess.run(summary_op, feed_dict=model.get_feed_dict(batch_docs, batch_labels))
test_writer.add_summary(_summary, global_step=test_step)
test_acc = sess.run(total_acc) * 100
print('Testing accuracy = {:.2f}'.format(test_acc))
save_optimized_presicion(all_test_labels, all_test_y_pred, stats_graph_folder, 'test', epoch)
save_distance_measure(all_test_labels, all_test_y_pred, stats_graph_folder, 'test', epoch)
save_results(all_test_labels, all_test_y_pred, stats_graph_folder, 'test', epoch)
if test_acc > best_acc:
best_acc = test_acc
saver.save(sess, FLAGS.checkpoint_dir)
print('Best testing accuracy = {:.2f}'.format(best_acc))
print("{} Optimization Finished!".format(datetime.now()))
print('Best testing accuracy = {:.2f}'.format(best_acc))
if __name__ == '__main__':
tf.app.run()
| [
"noreply@github.com"
] | RingBDStack.noreply@github.com |
42bab51452a048ba617e3a805f0048d4492cbe4b | 6205a1a8c7fc27e23b69a2cfe0a4998ab1d9b01f | /LR/__init__.py | b6be70bf905e8e21b250a59546b2453719e560ae | [] | no_license | sliu1013/ML_Exercise | dbb6a8d081b7abc38b60790bb374f660d624de96 | a909e2290ba37261216ec3d8f8541601d5eec422 | refs/heads/master | 2020-04-17T07:34:04.395265 | 2019-01-18T09:10:41 | 2019-01-18T09:10:41 | 166,375,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | #!/usr/bin/env python
#coding=utf-8
# *******************************************************************
# Filename @ __init__.py.py
# Author @ LiuSong
# Create date @ 2019/1/8 18:09
# Email @
# Description @
# license @ (C) Copyright 2015-2018, DevOps Corporation Limited.
# ******************************************************************** | [
"liusong@cmss.chinamobile.com"
] | liusong@cmss.chinamobile.com |
4f7ed658cf910265ba6d8203ffa1366e2069ff3f | 65d844f57c93b97459ba58a7f8d31fcddaef2c57 | /example/data_reader.py | d859e4febe7c2ef2c5f74ecb408e32e3ed809e36 | [
"Apache-2.0"
] | permissive | tonywenuon/keras_dialogue_generation_toolkit | 797411838e8213422cce4f5ac94f4e98e56cc912 | 75d82e7a281cd17a70bd9905fcebf2b906a6deec | refs/heads/master | 2023-04-04T14:39:10.117825 | 2021-03-29T11:25:23 | 2021-03-29T11:25:23 | 215,433,666 | 24 | 2 | null | 2023-03-24T22:45:14 | 2019-10-16T01:55:53 | Python | UTF-8 | Python | false | false | 23,866 | py |
import sys, os
project_path = os.path.sep.join(os.path.abspath(__file__).split(os.path.sep)[:-2])
if project_path not in sys.path:
sys.path.append(project_path)
import argparse
import numpy as np
from sklearn.utils import shuffle
from typing import Callable, Optional, Sequence, Iterable
from run_script.args_parser import seq2seq_attn_add_arguments
from commonly_used_code import config, helper_fn
class DataSet:
def __init__(self, args):
self.args = args
self.__set_file_path()
# get global token and ids
self.src_token_ids, self.src_id_tokens, self.src_vocab_size = self.__read_global_ids(self.src_global_token_path)
self.tar_token_ids, self.tar_id_tokens, self.tar_vocab_size = self.__read_global_ids(self.tar_global_token_path)
self.train_sample_num = 0
self.valid_sample_num = 0
self.test_sample_num = 0
self.__get_sample_numbers()
def __set_file_path(self):
if self.args.data_set == 'example_data':
self.train_set_path = config.example_data_train_qa_path
self.train_conv_path = config.example_data_train_conv_path
self.train_sent_fact_path = config.example_data_train_sent_fact_path
self.valid_set_path = config.example_data_valid_qa_path
self.valid_conv_path = config.example_data_valid_conv_path
self.valid_sent_fact_path = config.example_data_valid_sent_fact_path
self.test_set_path = config.example_data_test_qa_path
self.test_conv_path = config.example_data_test_conv_path
self.test_sent_fact_path = config.example_data_test_sent_fact_path
self.src_global_token_path = config.example_data_global_token_path
self.tar_global_token_path = config.example_data_global_token_path
elif self.args.data_set == 'reddit':
self.train_set_path = config.reddit_train_qa_path
self.train_conv_path = config.reddit_train_conv_path
self.train_sent_fact_path = config.reddit_train_sent_fact_path
self.valid_set_path = config.reddit_valid_qa_path
self.valid_conv_path = config.reddit_valid_conv_path
self.valid_sent_fact_path = config.reddit_valid_sent_fact_path
self.test_set_path = config.reddit_test_qa_path
self.test_conv_path = config.reddit_test_conv_path
self.test_sent_fact_path = config.reddit_test_sent_fact_path
self.src_global_token_path = config.reddit_global_token_path
self.tar_global_token_path = config.reddit_global_token_path
elif self.args.data_set == 'de_en':
self.train_set_path = config.de_en_train_qa_path
self.valid_set_path = config.de_en_valid_qa_path
self.test_set_path = config.de_en_test_qa_path
self.train_conv_path = None
self.train_sent_fact_path = None
self.valid_conv_path = None
self.valid_sent_fact_path = None
self.test_conv_path = None
self.test_sent_fact_path = None
self.src_global_token_path = config.de_en_src_global_token_path
self.tar_global_token_path = config.de_en_tar_global_token_path
def __get_sample_numbers(self):
print('Getting total samples numbers...')
with open(self.train_set_path) as f:
for line in f:
self.train_sample_num += 1
with open(self.valid_set_path) as f:
for line in f:
self.valid_sample_num += 1
with open(self.test_set_path) as f:
for line in f:
self.test_sample_num += 1
# read all of the data to the memory. This is suitable for small data set
def read_file(self, file_type, max_src_len, max_tar_len,
max_fact_len=30, max_conv_len=30,
get_fact=False,
get_conv=False,
get_one_hot=False):
'''
:param file_type: This is supposed to be: train, valid, or test
:param max_src_len: This is maximem source (question) length
:param max_tar_len: This is maximem target (anwser) length
:param max_fact_len: This is maximem fact (external knowledge) length, should be the same with source
:param max_conv_len: This is maximem conversation (context) length
:param get_fact: This is a boolean value to indicate whether read fact file
:param get_conv: This is a boolean value to indicate whether read conv file
'''
assert(max_src_len > 0)
assert(max_tar_len > 0)
assert(max_fact_len > 0)
assert(max_conv_len > 0)
assert file_type == 'train' or file_type == 'valid' or file_type == 'test'
print('current file type: %s' % file_type)
src_len = max_src_len - config.src_reserved_pos
tar_len = max_tar_len - config.tar_reserved_pos
if file_type == 'train':
qa_path = self.train_set_path
conv_path = self.train_conv_path
fact_path = self.train_sent_fact_path
elif file_type == 'valid':
qa_path = self.valid_set_path
conv_path = self.valid_conv_path
fact_path = self.valid_sent_fact_path
elif file_type == 'test':
qa_path = self.test_set_path
conv_path = self.test_conv_path
fact_path = self.test_sent_fact_path
# read source and target
print(qa_path)
f = open(qa_path)
indexes = []
source_ids = []
target_ids = []
target_loss_ids = [] # Use to calculate loss. Only END sign, dont have START sign
for line in f:
elems = line.strip().split('\t')
if len(elems) < 3:
raise ValueError('Exceptd input to be 3 dimension, but received %d' % len(elems))
indexes.append(int(elems[0].strip()))
text = elems[1].strip()
seq = [self.src_token_ids.get(token, self.unk_id) for token in text.split()]
seq = seq[:src_len]
new_seq = helper_fn.pad_with_start_end(seq, max_src_len, self.start_id, self.end_id, self.pad_id)
source_ids.append(new_seq)
text = elems[2].strip()
seq = [self.tar_token_ids.get(token, self.unk_id) for token in text.split()]
seq = seq[:tar_len]
new_seq = helper_fn.pad_with_start(seq, max_tar_len, self.start_id, self.pad_id)
target_ids.append(new_seq)
new_seq = helper_fn.pad_with_end(seq, max_tar_len, self.end_id, self.pad_id)
target_loss_ids.append(new_seq)
f.close()
if get_one_hot == True:
target_one_hot = np.zeros((len(target_ids), len(target_ids[0]), self.vocab_size), dtype='int32')
for i, target in enumerate(target_ids):
for t, term_idx in enumerate(target):
if t > 0:
intaa = 0
target_one_hot[i, t - 1, term_idx] = 1
target_loss_ids = target_one_hot
pad_seqs = helper_fn.pad_with_pad([self.pad_id], max_fact_len, self.pad_id)
facts_ids = []
if get_fact == True:
print(fact_path)
with open(fact_path) as f:
for index, line in enumerate(f):
line = line.strip()
fact_ids = []
elems = line.split('\t')
# if there is no fact, add pad sequence
if elems[1] == config.NO_FACT:
fact_ids.append(pad_seqs)
else:
for text in elems[1:]:
seq = [self.src_token_ids.get(token, self.unk_id) for token in text.split()]
seq = seq[:max_fact_len]
new_seq = helper_fn.pad_with_pad(seq, max_fact_len, self.pad_id)
fact_ids.append(new_seq)
facts_ids.append(fact_ids)
# keep facts to be the same number. If there is no so many fact, use pad_id as fact to pad it.
facts_ids_tmp = []
for facts in facts_ids:
facts = facts[:self.args.fact_number]
facts = facts + [pad_seqs] * (self.args.fact_number - len(facts))
facts_ids_tmp.append(facts)
facts_ids = facts_ids_tmp
#pad_convs = [self.pad_id] * max_conv_len
pad_seqs = helper_fn.pad_with_pad([self.pad_id], max_conv_len, self.pad_id)
convs_ids = []
if get_conv == True:
print(conv_path)
with open(conv_path) as f:
for index, line in enumerate(f):
line = line.strip()
conv_ids = []
elems = line.split('\t')
# if there is no context, add pad sequence
if elems[1] == config.NO_CONTEXT:
conv_ids.append(pad_seqs)
else:
for text in elems[1:]:
seq = [self.src_token_ids.get(token, self.unk_id) for token in text.split()]
seq = seq[:max_conv_len]
new_seq = helper_fn.pad_with_pad(seq, max_conv_len, self.pad_id)
conv_ids.append(new_seq)
convs_ids.append(conv_ids)
# keep conv to be the same number. If there is no so many conv, use pad_id as conv to pad it.
convs_ids_tmp = []
for convs in convs_ids:
convs = convs[:self.args.conv_number]
convs = convs + [pad_seqs] * (self.args.conv_number- len(convs))
convs_ids_tmp.append(convs)
convs_ids = convs_ids_tmp
assert(len(source_ids) == len(indexes))
assert(len(source_ids) == len(target_ids))
if get_fact == True:
assert(len(source_ids) == len(facts_ids))
if get_conv == True:
assert(len(source_ids) == len(convs_ids))
## [[[ if for Zeyang to output ordered index, not shuffiling.
#if get_fact == True and get_conv == True:
# indexes, source_ids, target_ids, convs_ids, facts_ids = shuffle(indexes, source_ids, target_ids, convs_ids, facts_ids)
#elif get_fact == True:
# indexes, source_ids, target_ids, facts_ids = shuffle(indexes, source_ids, target_ids, facts_ids)
#else:
# indexes, source_ids, target_ids = shuffle(indexes, source_ids, target_ids)
## ]]]
return indexes, source_ids, target_ids, target_loss_ids, convs_ids, facts_ids
# This is a data generator, which is suitable for large-scale data set
def data_generator(self, file_type, model_type, max_src_len, max_tar_len,
max_fact_len=30, max_conv_len=30,
get_fact=False,
get_conv=False
):
'''
:param file_type: This is supposed to be: train, valid, or test
:param max_src_len: This is maximem source (question) length
:param max_tar_len: This is maximem target (anwser) length
:param max_fact_len: This is maximem fact (external knowledge) length, which should be the same with source
:param max_conv_len: This is maximem conversation (context) length
:param get_fact: This is a boolean value to indicate whether read fact file
:param get_conv: This is a boolean value to indicate whether read conv file
'''
print('This is in data generator...')
assert(max_src_len > 0)
assert(max_tar_len > 0)
assert(max_fact_len > 0)
assert(max_conv_len > 0)
assert file_type == 'train' or file_type == 'valid' or file_type == 'test'
src_len = max_src_len - config.src_reserved_pos
tar_len = max_tar_len - config.tar_reserved_pos
if file_type == 'train':
qa_path = self.train_set_path
conv_path = self.train_conv_path
fact_path = self.train_sent_fact_path
elif file_type == 'valid':
qa_path = self.valid_set_path
conv_path = self.valid_conv_path
fact_path = self.valid_sent_fact_path
elif file_type == 'test':
qa_path = self.test_set_path
conv_path = self.test_conv_path
fact_path = self.test_sent_fact_path
def _deal_qa(f):
source_ids = []
target_ids = []
target_loss_ids = [] # Use to calculate loss. Only END sign, dont have START sign
for index, line in enumerate(f):
elems = line.strip().split('\t')
text = elems[1].strip()
seq = [self.src_token_ids.get(token, self.unk_id) for token in text.split()]
#seq = [self.src_token_ids.get(token, self.pad_id) for token in text.split()]
seq = seq[:src_len]
new_seq = helper_fn.pad_with_start_end(seq, max_src_len, self.start_id, self.end_id, self.pad_id)
source_ids.append(new_seq)
text = elems[2].strip()
seq = [self.tar_token_ids.get(token, self.unk_id) for token in text.split()]
#seq = [self.tar_token_ids.get(token, self.pad_id) for token in text.split()]
seq = seq[:tar_len]
new_seq = helper_fn.pad_with_start(seq, max_tar_len, self.start_id, self.pad_id)
target_ids.append(new_seq)
new_seq = helper_fn.pad_with_end(seq, max_tar_len, self.end_id, self.pad_id)
target_loss_ids.append(new_seq)
if ((index + 1) % self.args.batch_size == 0):
res1 = np.asarray(source_ids)
res2 = np.asarray(target_ids)
res3 = np.asarray(target_loss_ids)
res3 = np.reshape(res3, (res3.shape[0], res3.shape[1], 1))
source_ids = []
target_ids = []
target_loss_ids = []
yield res1, res2, res3
if len(source_ids) != 0:
res1 = np.asarray(source_ids)
res2 = np.asarray(target_ids)
res3 = np.asarray(target_loss_ids)
res3 = np.reshape(res3, (res3.shape[0], res3.shape[1], 1))
source_ids = []
target_ids = []
target_loss_ids = []
yield res1, res2, res3
pad_fact_seqs = helper_fn.pad_with_pad([self.pad_id], max_fact_len, self.pad_id)
def _deal_facts(f):
facts_ids = []
for index, line in enumerate(f):
line = line.strip()
fact_ids = []
elems = line.split('\t')
# if there is no fact, add pad sequence
if elems[1] == config.NO_FACT:
fact_ids.append(pad_fact_seqs)
else:
for text in elems[1:]:
seq = [self.src_token_ids.get(token, self.unk_id) for token in text.split()]
#seq = [self.src_token_ids.get(token, self.pad_id) for token in text.split()]
seq = seq[:max_fact_len]
new_seq = helper_fn.pad_with_pad(seq, max_fact_len, self.pad_id)
fact_ids.append(new_seq)
# pad fact number
fact_ids = fact_ids[:self.args.fact_number]
fact_ids = fact_ids + [pad_fact_seqs] * (self.args.fact_number - len(fact_ids))
facts_ids.append(fact_ids)
if ((index + 1) % self.args.batch_size == 0):
res = np.asarray(facts_ids)
facts_ids = []
yield res
if len(facts_ids) != 0:
res = np.asarray(facts_ids)
facts_ids = []
yield res
pad_conv_seqs = helper_fn.pad_with_pad([self.pad_id], max_conv_len, self.pad_id)
def _deal_convs(f):
convs_ids = []
for index, line in enumerate(f):
line = line.strip()
conv_ids = []
elems = line.split('\t')
# if there is no context, add pad sequence
if elems[1] == config.NO_CONTEXT:
conv_ids.append(pad_conv_seqs)
else:
for text in elems[1:]:
seq = [self.src_token_ids.get(token, self.unk_id) for token in text.split()]
#seq = [self.src_token_ids.get(token, self.pad_id) for token in text.split()]
seq = seq[:max_conv_len]
new_seq = helper_fn.pad_with_pad(seq, max_conv_len, self.pad_id)
conv_ids.append(new_seq)
conv_ids = conv_ids[:self.args.conv_number]
conv_ids = conv_ids + [pad_conv_seqs] * (self.args.conv_number- len(conv_ids))
convs_ids.append(conv_ids)
if ((index + 1) % self.args.batch_size == 0):
res = np.asarray(convs_ids)
convs_ids = []
yield res
if len(convs_ids) != 0:
res = np.asarray(convs_ids)
convs_ids = []
yield res
def _check_and_shuffle(source_ids, target_ids, target_loss_ids, convs_ids, facts_ids):
assert(len(source_ids) == len(target_ids))
assert(len(source_ids) == len(target_loss_ids))
if get_fact == True:
assert(len(source_ids) == len(facts_ids))
if get_conv == True:
assert(len(source_ids) == len(convs_ids))
if get_fact == True and get_conv == True:
source_ids, target_ids, target_loss_ids, convs_ids, facts_ids = \
shuffle(source_ids, target_ids, target_loss_ids, convs_ids, facts_ids)
elif get_fact == True:
source_ids, target_ids, target_loss_ids, facts_ids = shuffle(source_ids, target_ids, target_loss_ids, facts_ids)
else:
source_ids, target_ids, target_loss_ids = shuffle(source_ids, target_ids, target_loss_ids)
return (source_ids, target_ids, target_loss_ids, convs_ids, facts_ids)
def _fit_model(res_src, res_tar, res_tar_loss, res_conv, res_fact):
'''
Please carefully choose the output type to fit with your model's inputs
'''
no_fact_list = ['pg_only_ques_transformer', 'universal_transformer', 'transformer', 'seq2seq', 'copy_mechanism']
fact_list = ['decex_transformer', 'vspg_transformer', 'spg_transformer', 'pg_transformer', 'memnn']
conv_fact_list = ['transformer_conv_fact', 'universal_transformer_conv_fact']
if model_type in no_fact_list:
# only return question and answer as inputs
return ([res_src, res_tar], res_tar_loss)
elif model_type in fact_list:
# return question, answer and facts as inputs
return ([res_src, res_tar, res_fact], res_tar_loss)
elif model_type in conv_fact_list:
# return question, answer, context and facts as inputs
return ([res_src, res_tar, res_conv, res_fact], res_tar_loss)
else:
raise ValueError('The input model type: %s is not available. ' \
'Please chech the file: data_reader.py line: _fit_model' % model_type)
while True:
source_ids, target_ids, target_loss_ids, convs_ids, facts_ids = None, None, None, None, None
print(qa_path)
f_qa = open(qa_path)
res_src, res_tar, res_tar_loss, res_fact, res_conv = None, None, None, None, None
if get_fact == True and get_conv == True:
f_fact = open(fact_path)
f_conv = open(conv_path)
for ((source_ids, target_ids, target_loss_ids), facts_ids, convs_ids) in \
zip(_deal_qa(f_qa), _deal_facts(f_fact), _deal_convs(f_conv)):
res_src, res_tar, res_tar_loss, res_conv, res_fact = \
_check_and_shuffle(source_ids, target_ids, target_loss_ids, convs_ids, facts_ids)
yield _fit_model(res_src, res_tar, res_tar_loss, res_conv, res_fact)
elif get_fact == True:
f_fact = open(fact_path)
for ((source_ids, target_ids, target_loss_ids), facts_ids) in \
zip(_deal_qa(f_qa), _deal_facts(f_fact)):
res_src, res_tar, res_tar_loss, res_conv, res_fact = \
_check_and_shuffle(source_ids, target_ids, target_loss_ids, convs_ids, facts_ids)
yield _fit_model(res_src, res_tar, res_tar_loss, res_conv, res_fact)
else:
for (source_ids, target_ids, target_loss_ids) in _deal_qa(f_qa):
res_src, res_tar, res_tar_loss, res_conv, res_fact = \
_check_and_shuffle(source_ids, target_ids, target_loss_ids, convs_ids, facts_ids)
yield _fit_model(res_src, res_tar, res_tar_loss, res_conv, res_fact)
def __read_global_ids(self, token_path):
f = open(token_path)
token_ids = dict()
id_tokens = dict()
vocab_size = 0
for line in f:
elems = line.strip().split('\t')
word = elems[0]
index = int(elems[1])
token_ids[word] = index
id_tokens[index] = word
vocab_size += 1
self.start_id = token_ids.get(config.START_TOKEN, -1)
self.end_id = token_ids.get(config.END_TOKEN, -1)
self.pad_id = token_ids.get(config.PAD_TOKEN, -1)
self.unk_id = token_ids.get(config.UNK_TOKEN, -1)
assert(self.start_id != -1)
assert(self.end_id != -1)
assert(self.pad_id != -1)
assert(self.unk_id != -1)
return token_ids, id_tokens, vocab_size
if __name__ == "__main__":
parser = argparse.ArgumentParser()
seq2seq_attn_add_arguments(parser)
args = parser.parse_args()
ds = DataSet(args)
#ds.read_file('test',
# max_src_len=30,
# max_tar_len=30,
# max_fact_len=30,
# max_conv_len=30,
# get_fact=True,
# get_conv=False,
# get_one_hot=True)
# no_fact_list
for index, ([source_ids, target_ids, facts_ids], target_loss_ids) in enumerate(ds.data_generator(
# fact_list
#for index, ([source_ids, target_ids, facts_ids], target_loss_ids) in enumerate(ds.data_generator(
# conv fact list
#for index, ([source_ids, target_ids, convs_ids, facts_ids], target_loss_ids) in enumerate(ds.data_generator(
'test', 'decex_transformer',
max_src_len=30,
max_tar_len=30,
max_fact_len=30,
get_fact=True,
)):
print('*' * 100)
print(index)
#print(len(source_ids))
#print(len(target_ids))
#print(len(target_loss_ids))
idx = 0
print('source: ', source_ids[idx])
print('target: ', target_ids[idx])
print('target loss: ',target_loss_ids[idx])
print('facts: ', facts_ids[idx])
#print(len(facts_ids))
#print(len(facts_ids[2]))
#print(len(facts_ids[5]))
#print(len(convs_ids))
#print(len(convs_ids[4]))
#print(len(convs_ids[5]))
| [
"you@example.com"
] | you@example.com |
0f01bd122aff616cc4566f5b04312038457abcf5 | 739371c6439e98007e223f469620a94f414335b7 | /contacts.py | bece8ba7b1bf11d081b9f966fd97fc12e13de887 | [] | no_license | SoftRaven/DevOpsCourse | ce45715cadeeff12bff38a6a1eabe5975f39e2e1 | 1487d72a7ed0a625a799dd0557e9a27fe2ee673d | refs/heads/master | 2023-01-11T07:14:10.018387 | 2020-11-13T07:16:48 | 2020-11-13T07:16:48 | 312,483,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | class Contacts:
def __init__(self):
self.contact_list = []
print("adding notes") | [
"arseniy.myak@gmail.com"
] | arseniy.myak@gmail.com |
efa40ee0e6b20a910e49d9e974fb84bb28db98a6 | 092a4ebf83943f2278fd2c4f447a8acac0960d2d | /tests/pages/mobile/user_albums_page.py | f29ca8d4db6f8855ca50bad8f6ab82866bd1bb4e | [] | no_license | akenoq/homework-4 | 3be9270a54aad2b5d1df45da106600f04e49afe6 | d54239092cf4540a6d63ed3615c3c4c3dbde6dd9 | refs/heads/master | 2020-03-19T15:06:16.270474 | 2018-05-25T16:15:24 | 2018-05-25T16:15:24 | 131,852,660 | 0 | 0 | null | 2018-05-02T13:13:47 | 2018-05-02T13:13:47 | null | UTF-8 | Python | false | false | 1,402 | py | from tests.pages.mobile.like_component import LikeComponent
from tests.pages.mobile.page import Page, Component
from tests.utils.waits import wait_until_url_changes
class UserAlbumsPage(Page):
PATH = '/dk?st.cmd=userAlbums'
@property
def albums_list(self):
return AlbumsList(self.driver)
@property
def header(self):
return AlbumsHeader(self.driver)
class AlbumsHeader(Component):
CREATE_BUTTON = 'addition-button'
@wait_until_url_changes
def create_album(self):
self.driver.find_element_by_class_name(self.CREATE_BUTTON).click()
class AlbumsList(Component):
ITEM = 'photos_album-grid-w'
TITLE = 'albm'
def includes(self, album_name):
albums = self.driver.find_elements_by_class_name(self.TITLE)
for album in albums:
if album.text == album_name:
return True
return False
@property
def first(self):
return AlbumItem(self.driver, 2)
class AlbumItem(Component):
BASE = '//ul[@id="user-albums"]/li[{}]'
LIKE = '//a[@data-func="performLike"]'
CANCEL_LIKE = '//a[@data-func="unReact"]'
LIKES_COUNT = '//span[@class="ecnt"]'
def __init__(self, driver, id):
super().__init__(driver)
self.id = id
self.base = self.BASE.format(id)
@property
def like(self):
return LikeComponent(self.driver, self.base)
| [
"kiryanenkoav@mail.ru"
] | kiryanenkoav@mail.ru |
7169b4896ba5fecee91e361b5cb2a4f7d43ca8fc | 98bce760b5a563d563ebea4a5fcebc0d6eb0272f | /Student_Management_System (1).py | 80d0faae94e6d5eb260c108f9cbfa6919eead79b | [] | no_license | vaishnavigavi/student-management-system | 126338d7e2ea59fdf1570797819517495f81a0d8 | b52a1e60afd7873bb065c9dd54e300753d9b0b11 | refs/heads/main | 2023-06-27T08:54:31.986973 | 2021-07-25T17:04:17 | 2021-07-25T17:04:17 | 389,399,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,234 | py | from tkinter import *
from tkinter.messagebox import *
from tkinter.scrolledtext import *
from sqlite3 import *
import matplotlib.pyplot as plt
import requests
from tkinter import ttk
import bs4
def add_btn():
root.withdraw()
add_window.deiconify()
def view_btn():
root.withdraw()
view_window.deiconify()
info = ""
scrolled_text.delete(1.0, END)
try:
con = connect("kit.db")
cursor = con.cursor()
sql = "select * from student"
cursor.execute(sql)
data = cursor.fetchall()
if data == []:
showinfo("Message", "No record available")
if con is not None:
con.close()
view_window.withdraw()
root.deiconify()
else:
for d in data:
info = info + "R_no = " + str(d[0]) + ", Name = " + str(d[1]) + ", Marks = " + str(d[2]) + "\n\n"
scrolled_text.insert(INSERT, info)
except Exception as e:
showerror("Issue", e)
finally:
if con is not None:
con.close()
def update_btn():
root.withdraw()
update_window.deiconify()
def delete_btn():
root.withdraw()
delete_window.deiconify()
def back_add():
add_window.withdraw()
root.deiconify()
def back_view():
view_window.withdraw()
root.deiconify()
def back_update():
update_window.withdraw()
root.deiconify()
def back_delete():
delete_window.withdraw()
root.deiconify()
#-----------------------------ADD----------------------------------------------------------------------
def add():
if (add_window_ent_rno.get() == "" and add_window_ent_name.get() == "" and add_window_ent_marks.get() == ""):
showerror("OOPS!", "Please fill all the details")
elif (add_window_ent_rno.get() == ""):
showerror('OOPS!',"Rollno. cannot be blank")
add_window_ent_rno.delete(0, END)
add_window_ent_name.delete(0, END)
add_window_ent_marks.delete(0, END)
elif (add_window_ent_name.get() == ""):
showerror('OOPS!',"Name cannot be blank")
add_window_ent_rno.delete(0, END)
add_window_ent_name.delete(0, END)
add_window_ent_marks.delete(0, END)
elif (add_window_ent_marks.get() == ""):
showerror('OOPS!',"Marks cannot be blank")
add_window_ent_rno.delete(0, END)
add_window_ent_name.delete(0, END)
add_window_ent_marks.delete(0, END)
elif (add_window_ent_rno.get().isalpha() == True):
showerror('OOPS!',"Roll number can have integers only")
add_window_ent_rno.delete(0, END)
add_window_ent_name.delete(0, END)
add_window_ent_marks.delete(0, END)
elif (int(add_window_ent_rno.get()) <= 0) :
showerror('OOPS!',"Roll number cannot be negative")
add_window_ent_rno.delete(0, END)
add_window_ent_name.delete(0, END)
add_window_ent_marks.delete(0, END)
elif (len(add_window_ent_name.get()) < 2):
showerror('OOPS!',"Name should have atleast two alphabet")
add_window_ent_rno.delete(0, END)
add_window_ent_name.delete(0, END)
add_window_ent_marks.delete(0, END)
elif ((((add_window_ent_name.get()).replace(" ","")).isalpha()) == False):
showerror('OOPS!',"Name can't consist of digits")
add_window_ent_rno.delete(0, END)
add_window_ent_name.delete(0, END)
add_window_ent_marks.delete(0, END)
elif (add_window_ent_marks.get().isdigit() == False):
showerror('OOPS!',"Marks can be integers only")
add_window_ent_rno.delete(0, END)
add_window_ent_name.delete(0, END)
add_window_ent_marks.delete(0, END)
elif int(add_window_ent_marks.get()) < 0:
showerror('OOPS!',"Marks can't be negative")
add_window_ent_rno.delete(0, END)
add_window_ent_name.delete(0, END)
add_window_ent_marks.delete(0, END)
elif int(add_window_ent_marks.get()) > 100:
showerror('OOPS!',"Marks can't be greater than 100")
add_window_ent_rno.delete(0, END)
add_window_ent_name.delete(0, END)
add_window_ent_marks.delete(0, END)
else:
con = None
try:
rno = int(add_window_ent_rno.get())
name = add_window_ent_name.get()
marks = int(add_window_ent_marks.get())
con = connect("kit.db")
cursor = con.cursor()
sql = "insert into student values('%d', '%s', '%d')"
cursor.execute(sql % (rno, name, marks))
con.commit()
showinfo("Success", "Record inserted")
add_window_ent_rno.delete(0, END)
add_window_ent_name.delete(0, END)
add_window_ent_marks.delete(0, END)
except Exception as e:
showerror("Issue", e)
add_window_ent_rno.delete(0, END)
add_window_ent_name.delete(0, END)
add_window_ent_marks.delete(0, END)
finally:
if con is not None:
con.close()
#---------------------------------Update------------------------------------------------------------------------
def update():
if (update_window_ent_rno.get() == "" and update_window_ent_name.get() == "" and update_window_ent_marks.get() == ""):
showerror("OOPS!", "Please fill all the details")
elif (update_window_ent_rno.get() == ""):
showerror('OOPS!',"Rollno. cannot be blank")
update_window_ent_rno.delete(0, END)
update_window_ent_name.delete(0, END)
update_window_ent_marks.delete(0, END)
elif (update_window_ent_name.get() == ""):
showerror('OOPS!',"Name cannot be blank")
update_window_ent_rno.delete(0, END)
update_window_ent_name.delete(0, END)
update_window_ent_marks.delete(0, END)
elif (update_window_ent_marks.get() == ""):
showerror('OOPS!',"Marks cannot be blank")
update_window_ent_rno.delete(0, END)
update_window_ent_name.delete(0, END)
update_window_ent_marks.delete(0, END)
elif (update_window_ent_rno.get().isalpha() == True):
showerror('OOPS!',"Roll number can have integers only")
update_window_ent_rno.delete(0, END)
update_window_ent_name.delete(0, END)
update_window_ent_marks.delete(0, END)
elif (int(update_window_ent_rno.get()) <= 0) :
showerror('OOPS!',"Roll number can't be negative")
update_window_ent_rno.delete(0, END)
update_window_ent_name.delete(0, END)
update_window_ent_marks.delete(0, END)
elif (len(update_window_ent_name.get()) < 2):
showerror('OOPS!',"Name should have atleast two alphabet")
update_window_ent_rno.delete(0, END)
update_window_ent_name.delete(0, END)
update_window_ent_marks.delete(0, END)
elif ((((update_window_ent_name.get()).replace(" ","")).isalpha()) == False):
showerror('OOPS!',"Name can't consist of digits")
update_window_ent_rno.delete(0, END)
update_window_ent_name.delete(0, END)
update_window_ent_marks.delete(0, END)
elif (update_window_ent_marks.get().isdigit() == False):
showerror('OOPS!',"Marks can be integers only")
update_window_ent_rno.delete(0, END)
update_window_ent_name.delete(0, END)
update_window_ent_marks.delete(0, END)
elif int(update_window_ent_marks.get()) < 0:
showerror('OOPS!',"Marks can't be negative")
update_window_ent_rno.delete(0, END)
update_window_ent_name.delete(0, END)
update_window_ent_marks.delete(0, END)
elif int(update_window_ent_marks.get()) > 100:
showerror('OOPS!',"Marks can't be greater than 100")
update_window_ent_rno.delete(0, END)
update_window_ent_name.delete(0, END)
update_window_ent_marks.delete(0, END)
else:
con = None
try:
rno = int(update_window_ent_rno.get())
name = update_window_ent_name.get()
marks = int(update_window_ent_marks.get())
con = connect("kit.db")
cursor = con.cursor()
sql = "update student set name = '%s', marks = '%d' where rno = '%d'"
cursor.execute(sql % (name, marks, rno))
if cursor.rowcount > 0:
con.commit()
showinfo("Success", "Details updated successfully")
update_window_ent_rno.delete(0, END)
update_window_ent_name.delete(0, END)
update_window_ent_marks.delete(0, END)
else:
showwarning('OOPS!',"Record does not exist")
update_window_ent_rno.delete(0, END)
update_window_ent_name.delete(0, END)
update_window_ent_marks.delete(0, END)
except Exception as e:
showerror("Issue", e)
finally:
if con is not None:
con.close()
#------------------------------------DELETE---------------------------------------------------------------------------
def delete():
con = None
if (delete_window_ent_rno.get() == ""):
showerror('OOPS!',"Please enter roll number")
elif ((delete_window_ent_rno.get()).isalpha() == True):
showerror('OOPS!',"Roll number can consist of integers only")
delete_window_ent_rno.delete(0, END)
elif (int(delete_window_ent_rno.get()) <= 0):
showerror('OOPS!',"Roll number can't be negative")
delete_window_ent_rno.delete(0, END)
else:
try:
con = connect("kit.db")
cursor = con.cursor()
rno = int(delete_window_ent_rno.get())
sql = "delete from student where rno = '%d' "
cursor.execute(sql % (rno))
if cursor.rowcount > 0:
con.commit()
showinfo("Success", "Student deleted successfully :)")
delete_window_ent_rno.delete(0, END)
else:
showerror("Failure", "Student does not exist")
delete_window_ent_rno.delete(0, END)
except Exception as e:
showerror("Issue", e)
delete_window_ent_rno.delete(0, END)
finally:
if con is not None:
con.close()
#--------------------------------CHARTS----------------------------------------------------------------------------------
def charts():
list_marks = []
list_names = []
con=None
try:
con=connect('kit.db')
cursor=con.cursor()
sql="select marks from student"
cursor.execute(sql)
data=cursor.fetchall()
# print(data)
for d in data:
list_marks.append(int(str(d[0])))
#print(list_marks)
except Exception as e:
showerror("Issue", e)
finally:
if con is not None:
con.close()
con=None
try:
con=connect('kit.db')
cursor=con.cursor()
sql="select name from student"
cursor.execute(sql)
data=cursor.fetchall()
#print(data)
for d in data:
list_names.append(str(d[0]))
#print(list_names)
except Exception as e:
showerror("Issue", e)
finally:
if con is not None:
con.close()
plt.bar(list_names, list_marks, width = 0.6, color = ['red', 'green', 'cyan', 'orange'])
plt.title("Batch Information!")
plt.xlabel("Students")
plt.ylabel("Marks")
plt.show()
#-----------------------------LOCATION_LABEL--------------------------------------
try:
wa="https://ipinfo.io/"
res=requests.get(wa)
#print(res)
data=res.json()
city=data['city']
except Exception as e:
showerror("Issue",e)
#-----------------------------Temperature_Label------------------------------------
try:
a1 = "http://api.openweathermap.org/data/2.5/weather?units=metric"
a2 = "&q=kalyan"
a3 = "&appid=" + "c6e315d09197cec231495138183954bd"
wa = a1 + a2 + a3
res = requests.get(wa)
#print(res)
data = res.json()
temp = data['main']['temp']
#print(temp)
except Exception as e:
print("Issue",e)
#----------------------------------QOTD--------------------------------------------
try:
wa="https://www.brainyquote.com/quote_of_the_day"
res=requests.get(wa)
data = bs4.BeautifulSoup(res.text, 'html.parser')
#print(data)
info = data.find('img', {'class' : 'p-qotd'})
#print(info)
qotd = info['alt']
#print(qotd)
except Exception as e:
print("Issue",e)
#----------------------------------SMS Window-------------------------------------------
root=Tk()
root.geometry('500x530+400+100')
root.title('Student.Management.System - Singhaniya')
root.resizable(height=False , width=False)
root.configure(bg='honeydew2')
f=('Calibri',20,'bold ')
f1=('Arial',20)
btn_add=Button(root,text='Add',width=12,font=f,relief='solid',borderwidth=1,command=add_btn)
btn_view=Button(root,text='View',width=12,font=f,relief='solid',borderwidth=1,command=view_btn)
btn_update=Button(root,text='Update',width=12,font=f,relief='solid',borderwidth=1,command=update_btn)
btn_delete=Button(root,text='Delete',width=12,font=f,relief='solid',borderwidth=1,command=delete_btn)
btn_charts=Button(root,text='Charts',width=12,font=f,relief='solid',borderwidth=1,command=charts)
lbl_location=Label(root,text='Location:'+city,pady=5,width=31,font=f1,bg='honeydew2',relief='solid',borderwidth=1,anchor="w")
lbl_temp=Label(root,text='Temp:'+str(temp)+"\u00B0"+"C",padx=15,pady=1,font=f1,bg='honeydew2')
lbl_qotd=Label(root,text='QOTD:'+qotd,width=31,height=3,font=f1,bg='honeydew2',relief='solid',borderwidth=1,anchor="w",wraplength = 500)
btn_add.pack(pady=10)
btn_view.pack(pady=0)
btn_update.pack(pady=10)
btn_delete.pack(pady=0)
btn_charts.pack(pady=10)
lbl_location.place(x=0,y=375)
lbl_temp.place(x=265,y=380)
lbl_qotd.place(x=0,y=423)
#----------------------------------------ADD WINDOW----------------------------------------------------------------------
add_window = Toplevel(root)
add_window.title("Add St.")
add_window.geometry("500x600+400+25")
add_window.configure(bg='lavender')
f=('Calibri',20,'bold ')
add_window_lbl_rno = Label(add_window, text = "Enter roll number", font=f,bg='lavender')
add_window_ent_rno = Entry(add_window, bd = 5, font=f,relief='solid',borderwidth=1)
add_window_lbl_name = Label(add_window, text = "Enter name", font=f,bg='lavender')
add_window_ent_name = Entry(add_window, bd = 5, font=f,relief='solid',borderwidth=1)
add_window_lbl_marks = Label(add_window, text = "Enter marks", font=f,bg='lavender')
add_window_ent_marks = Entry(add_window, bd = 5, font=f,relief='solid',borderwidth=1)
add_window_btn_save = Button(add_window, text = 'Save',font = ('Arial', 19, 'bold'), width = 10, command = add)
add_window_btn_back = Button(add_window, text = 'Back',font = ('Arial', 19, 'bold'), width = 10,command = back_add)
add_window_lbl_rno.pack(pady = 10)
add_window_ent_rno.pack(pady = 10)
add_window_lbl_name.pack(pady = 10)
add_window_ent_name.pack(pady = 10)
add_window_lbl_marks.pack(pady = 10)
add_window_ent_marks.pack(pady = 10)
add_window_btn_save.pack(pady = 5)
add_window_btn_back.pack(pady = 5)
add_window.withdraw()
#---------------------------------------VIEW WINDOW-------------------------------------------------------------------
view_window = Toplevel(root)
view_window.resizable(height=False , width=False)
view_window.title("View student")
view_window.geometry("500x600+400+25")
view_window.configure(bg='yellow')
f=('Calibri',20,'bold ')
scrolled_text = ScrolledText(view_window, width = 40, height = 15, font = ("Arial", 16, "bold"))
view_window_btn_back = Button(view_window, text = 'Back', borderwidth = 9, font=f, width = 10,command = back_view)
scrolled_text.pack(pady = 10)
view_window_btn_back.pack(pady = 10)
view_window.withdraw()
#-------------------------------------------UPDATE WINDOW----------------------------------------------------------------------------------------
update_window = Toplevel(root)
update_window.resizable(height=False , width=False)
update_window.title('Update St.')
update_window.geometry('500x500+400+100')
update_window.configure(bg='peach puff')
update_window_lbl_rno = Label(update_window, text = "Enter roll number", font=f,bg='peach puff')
update_window_ent_rno = Entry(update_window, bd = 5, font=f,relief='solid',borderwidth=1)
update_window_lbl_name = Label(update_window, text = "Enter name", font=f,bg='peach puff')
update_window_ent_name = Entry(update_window, bd = 5, font=f,relief='solid',borderwidth=1)
update_window_lbl_marks = Label(update_window, text = "Enter marks", font=f,bg='peach puff')
update_window_ent_marks = Entry(update_window, bd = 5, font=f,relief='solid',borderwidth=1)
update_window_btn_save = Button(update_window, text = 'Save',font=("Arial",19,'bold'), width = 10,command = update)
update_window_btn_back = Button(update_window, text = 'Back',font=("Arial",19,'bold'), width = 10,command = back_update)
update_window_lbl_rno.pack(pady = 10)
update_window_ent_rno.pack(pady = 10)
update_window_lbl_name.pack(pady = 10)
update_window_ent_name.pack(pady = 10)
update_window_lbl_marks.pack(pady = 10)
update_window_ent_marks.pack(pady = 10)
update_window_btn_save.pack(pady = 5)
update_window_btn_back.pack(pady = 5)
update_window.withdraw()
#---------------------------------DELETE WINDOW-------------------------------------------------------------------------------------
delete_window = Toplevel(root)
delete_window.resizable(height=False , width=False)
delete_window.title('Delete St.')
delete_window.geometry('500x500+400+100')
delete_window.configure(bg='LightSteelBlue1')
f=('Calibri',20,'bold ')
delete_window_lbl_rno = Label(delete_window, text = "Enter roll number", font=f,bg='LightSteelBlue1')
delete_window_ent_rno = Entry(delete_window, bd = 5, font =f,relief='solid',borderwidth=1)
delete_window_btn_save = Button(delete_window, text='Save',font = ('Arial', 19, 'bold'), width = 10,command = delete)
delete_window_btn_back = Button(delete_window, text='Back',font = ('Arial', 19, 'bold'), width = 10,command = back_delete)
delete_window_lbl_rno.pack(pady = 10)
delete_window_ent_rno.pack(pady = 10)
delete_window_btn_save.pack(pady = 5)
delete_window_btn_back.pack(pady =5)
delete_window.withdraw()
#-----------------------------------------------------------------------------------------------------------------------------------------
root.mainloop()
| [
"noreply@github.com"
] | vaishnavigavi.noreply@github.com |
47608734e7a96721c677be5e5504dc4038801690 | d3d5b933fe0672ba141b3cfde0ad942438b38304 | /Exam-07may17/footbalLeague.py | 17357e9a1bca92ada008bf3e43797b8b5226a7c6 | [
"MIT"
] | permissive | wesenu/Python-3 | 065aa49b7978c6f0cc1ebdd364d7b02059ab4dc6 | 55163496dac452a7110b7f76edc6894ee195f1fe | refs/heads/master | 2023-03-15T11:52:54.877026 | 2018-08-20T11:56:34 | 2018-08-20T11:56:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | stadiumCapacity = int(input())
fansCount = int(input())
procentA = 0
procentB = 0
procentV = 0
procentG = 0
for i in range(fansCount):
n = input().upper()
if n == 'A':
procentA += 1
elif n == 'B':
procentB += 1
elif n == 'V':
procentV += 1
elif n == 'G':
procentG += 1
a = (procentA / fansCount) * 100
b = (procentB / fansCount) * 100
v = (procentV / fansCount) * 100
g = (procentG / fansCount) * 100
sum = (fansCount / stadiumCapacity) * 100
print('{0:.2f}% \n{1:.2f}%\n{2:.2f}% \n{3:.2f}%\n{4:.2f}%'
.format(a,b,v,g,sum)) | [
"nikolay.vutov.nv@gmail.com"
] | nikolay.vutov.nv@gmail.com |
a1e2778620208756f91bf8adc8a60a96acb8081a | e8051b081aceb54e23d518e75e61c8fa21b696d8 | /build/lib/feature_space_tree/attributes/filters_terms_config.py | 79d7dadc20c5afc6803faa6ad4eca31abaaba80a | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jesusmiguelgarcia/FSTmikes | b6d9377e761675db86b1dca0bfd241d9ae975cb4 | 34d68c979b0c019510db87793e5870eea8f9154f | refs/heads/master | 2021-01-20T22:19:03.381529 | 2016-07-08T17:08:04 | 2016-07-08T17:08:04 | 61,218,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,517 | py | #!/usr/local/bin/python
# coding: utf-8
# Copyright (C) 2011-2012 FeatureSpaceTree Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# FeatureSpaceTree: Filters module
#
# Author: Adrian Pastor Lopez-Monroy <pastor@ccc.inaoep.mx>
# URL: <https://github.com/beiceman/FeatureSpaceTree>
#
# Language Technologies Lab,
# Department of Computer Science,
# Instituto Nacional de Astrofísica, Óptica y Electrónica
#
# For license information, see:
# * The header of this file
# * The LICENSE.TXT included in the project dir
# ==============================================================================
# ------------------------------------------------------------------------------
# The REALLY important classes here are all that contains the name vocabulary. This
# is since they are used when the PROPROCESSING_OPTION is set to SIMPLE, this
# means that we will process the terms as SETs and as Dictionaries (nltk FDIST).
# The option FULL we would have FILTER the tokens just as they are retrieved
# so, the preprocessing is slower.
# ------------------------------------------------------------------------------
import json
from abc import ABCMeta, abstractmethod
from filters_terms import *
class EnumFiltersVocabulary(object):
(FIXED_TOP,
PERCENTAGE_TOP,
BIAS_FREQ,
FIXED_RAND,
PERCENTAGE_RAND,
TRANSPARENT,
SPECIFIC_TOKENS,
REMOVE_SPECIFIC_TOKENS) = range(8)
class FactoryFilterVocabulary(object):
__metaclass__ = ABCMeta
def build(self, option, kwargs, vocabulary_object):
option = eval(option)
return self.create(option, kwargs, vocabulary_object)
@abstractmethod
def create(self, option, kwargs, vocabulary_object):
pass
class FactorySimpleFilterVocabulary(FactoryFilterVocabulary):
def create(self, option, kwargs, vocabulary_object):
if option == EnumFiltersVocabulary.FIXED_TOP:
return FixedTopVocabulary(vocabulary_object, kwargs["fixed_top"])
if option == EnumFiltersVocabulary.PERCENTAGE_TOP:
return PercentageTopVocabulary(vocabulary_object, kwargs["percentage_top"])
if option == EnumFiltersVocabulary.BIAS_FREQ:
return BiasFreqVocabulary(vocabulary_object, kwargs["bias_freq"])
if option == EnumFiltersVocabulary.FIXED_RAND:
return FixedRandomVocabulary(vocabulary_object, kwargs["n_terms"], kwargs["caos"])
if option == EnumFiltersVocabulary.PERCENTAGE_RAND:
return PercentageRandomVocabulary(vocabulary_object, kwargs["percentage"], kwargs["caos"])
if option == EnumFiltersVocabulary.TRANSPARENT:
return FilterVocabulary(vocabulary_object)
if option == EnumFiltersVocabulary.SPECIFIC_TOKENS:
validated = False
if "validated" in kwargs:
validated = kwargs["validated"]
json_of_specific_tokens = open(kwargs["json_of_specific_tokens"], "r")
list_specific_tokens = json.load(json_of_specific_tokens)
json_of_specific_tokens.close()
return SpecificTokensVocabulary(vocabulary_object, list_specific_tokens, validated)
if option == EnumFiltersVocabulary.REMOVE_SPECIFIC_TOKENS:
validated = False
if "validated" in kwargs:
validated = kwargs["validated"]
json_of_specific_tokens = open(kwargs["json_of_specific_tokens"], "r")
list_specific_tokens = json.load(json_of_specific_tokens)
json_of_specific_tokens.close()
return RemoveSpecificTokensVocabulary(vocabulary_object, list_specific_tokens, validated)
class EnumFiltersTermsList(object):
(FIXED_TOP,
PERCENTAGE_TOP,
BIAS_FREQ,
FIXED_RAND,
PERCENTAGE_RAND,
TRANSPARENT,
ORDER) = range(7)
class FactorySimpleFilterTermsList(object):
@staticmethod
def create(option, kwargs, terms_list):
option = eval (option)
if option == EnumFiltersTermsList.FIXED_TOP:
return FixedTopTermsList(terms_list, kwargs["fixed_top"])
if option == EnumFiltersTermsList.PERCENTAGE_TOP:
return PercentageTopTermsList(terms_list, kwargs["percentage_top"])
if option == EnumFiltersTermsList.BIAS_FREQ:
return BiasFreqTermsList(terms_list, kwargs["bias_freq"])
if option == EnumFiltersTermsList.FIXED_RAND:
return FixedRandomTermsList(terms_list, kwargs["n_terms"], kwargs["caos"])
if option == EnumFiltersTermsList.PERCENTAGE_RAND:
return PercentageRandomTermsList(terms_list, kwargs["percentage"], kwargs["caos"])
if option == EnumFiltersTermsList.ORDER:
return OrderTermsList(terms_list)
if option == EnumFiltersTermsList.TRANSPARENT:
return TransparentTermsList(terms_list) | [
"miguel@miguel-desktop"
] | miguel@miguel-desktop |
e89063f004ef56318689c8df2ebf442192e2aa44 | a39e95a0536d312311531a49dec90bcc8f7ab0c5 | /Lesson6_FunctionCompileRE/main.py | 46adef70714ec420ee43ff944b6b4cdcde1257cb | [] | no_license | Hadirback/python_part2 | 095010ca4866a4b6c9e5ca092602b43edbd344e8 | a4b00aeb30f88df55751d5f23e570c33face113d | refs/heads/master | 2020-08-11T09:13:03.793607 | 2019-11-04T23:10:45 | 2019-11-04T23:10:45 | 214,536,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,406 | py | # compile
# re compile - если нужно найти и изменить что то подходящее под
# шаблон в нескольких переменных
import re
text1 = """
Сбо́рная Франции по футбо́лу 34-я минута представляет Францию в международных
матчах и турнирах по футболу. """
text2 = """
Управляющая организация 56-й номер — Федерация футбола Франции.
"""
text3 = """
Федерация является членом ФИФА с 1904 года, членом УЕФА с 1954
года. Французы 1-й час были одними из основателей обеих этих организаций.
"""
# вытаскиваем из всех текстов минуты
pattern_string = "\d{1,2}\-[йя]"
print(re.findall(pattern_string, text1))
print(re.findall(pattern_string, text2))
print(re.findall(pattern_string, text3))
# pattern_string постоянно преобразуется к паттерну что
# достаточно трудоемкая задача
pattern = re.compile("\d{1,2}\-[йя]")
print(type(pattern))
print(pattern.findall(text2))
print(pattern.findall(text1))
print(pattern.findall(text3))
print(re.sub(pattern, "n", text3))
# compile выполняется быстрее
| [
"mail.evgeny.filippov@gmail.com"
] | mail.evgeny.filippov@gmail.com |
f9251076f87015000034cf64c9c76592c169b75f | 3f03f75f91ac3ed0ad5d86bad598a09c9e56fbfa | /Andy/lab1/indexer.py | db99a3591f40c4b62e488fabe8db00b962190b06 | [] | no_license | AndyT94/EDAN20-Language_Technology | 00722666d28adcae955d739cc1f8259a04c12c67 | bca876a90887b7323c5e026bcc4eccea28ee3396 | refs/heads/master | 2021-01-20T21:12:07.217167 | 2017-10-04T18:00:32 | 2017-10-04T18:00:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,807 | py | import sys
import pickle
import regex as re
import os
import math
def get_files(dir, suffix):
"""
__author__ = EDAN20
Returns all the files in a folder ending with suffix
:param dir:
:param suffix:
:return: the list of file names
"""
files = []
for file in os.listdir(dir):
if file.endswith(suffix):
files.append(file)
return files
def index(text, dict, filename):
regex = re.compile(r'\p{L}+')
for match in regex.finditer(text.lower()):
if dict.get(match.group()) is None:
dict[match.group()] = {}
dict[match.group()][filename] = [match.start()]
elif dict[match.group()].get(filename) is None:
dict[match.group()][filename] = [match.start()]
else:
dict[match.group()][filename] += [match.start()]
def getNbrWords(files, dict):
docWords = {}
for file in files:
nbrWords = 0
for word in dict:
i = dict.get(word)
if i.get(file) is not None:
nbrWords += len(i.get(file))
docWords[file] = nbrWords
return docWords
def calctfidf(files, dict):
docWords = getNbrWords(files, dict)
tfidf = {}
for file in files:
tfidf[file] = {}
for word in dict.keys():
for file in files:
if dict.get(word).get(file) is None:
tfidf[file][word] = 0.0
else:
tf = len(dict.get(word).get(file)) / docWords.get(file)
idf = math.log10(len(files) / len(dict.get(word)))
tfidf[file][word] = tf * idf
return tfidf
def cosSim(tfidf):
sim = {}
for file in tfidf.keys():
for f in tfidf.keys():
if f != file:
sumAB = 0
sumAA = 0
sumBB = 0
for word in tfidf.get(file):
sumAB += tfidf.get(file).get(word) * tfidf.get(f).get(word)
sumAA += tfidf.get(file).get(word) * tfidf.get(file).get(word)
sumBB += tfidf.get(f).get(word) * tfidf.get(f).get(word)
sim[file + ' <-> ' + f] = sumAB / (math.sqrt(sumAA) * math.sqrt(sumBB))
return sim
def main(args):
dict = {}
files = get_files(args, '.txt')
for file in files:
f = open(args + '/' + file).read()
index(f, dict, file)
tfidf = calctfidf(files, dict)
sim = cosSim(tfidf)
maxval = max(sim.values())
maxdocs = ''
for k in sim.keys():
if sim[k] == maxval:
maxdocs += k + ' '
print(maxdocs, maxval)
pickle.dump(dict, open('masterindex.idx', 'wb'))
if __name__ == '__main__':
main(sys.argv[1])
| [
"Andy@DESKTOP-53TB5O7.localdomain"
] | Andy@DESKTOP-53TB5O7.localdomain |
b5201ac8ea345d410ca6aa25b78229ace84af6ab | 74edd5d32efc3d31f363c2a4205ec1a49f42c05e | /mygamev0.3.py | cf2c0623fff9f29778d581acd602ef6f3d133756 | [] | no_license | zjost/BrucePlatform | 0e4c1ec2a68dd7219a5450d79ee32d21937a7c86 | 127a99c1ea8bba7e2ea1c5d1a547684357af0cff | refs/heads/master | 2020-06-03T14:18:43.203258 | 2015-04-19T21:08:58 | 2015-04-19T21:08:58 | 34,224,107 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,989 | py | import pygame
import sys
from pygame.locals import *
from random import randint
class Player(pygame.sprite.Sprite):
'''The class that holds the main player, and controls how they jump. nb. The player doens't move left or right, the world moves around them'''
def __init__(self, start_x, start_y, width, height):
pygame.sprite.Sprite.__init__(self)
# Define list of images
self.images = []
self.images.append(pygame.transform.scale(
pygame.image.load(player_image), (width, height)))
self.images.append(pygame.transform.scale(
pygame.image.load(player_image2), (width, height)))
# Create an index for the images
self.index = 0
# Initialize to first image
self.image = self.images[self.index]
self.rect = self.image.get_rect()
self.rect.x = start_x
self.rect.y = start_y
self.speed_y = 0
self.base = pygame.Rect(start_x+int(width/4),
start_y + height, int(width/2), 2)
self.sound = pygame.mixer.Sound(jump_sound)
self.sound.set_volume(0.2)
self.dogsound = pygame.mixer.Sound(dog_sound)
self.dogsound.set_volume(1.0)
def move_y(self):
'''this calculates the y-axis movement for the player in the
current speed'''
collided_y = world.collided_get_y(self.base)
# If upward velocity or not colliding, treat as free
if self.speed_y <= 0 or collided_y < 0:
# Move according to speed
self.rect.y = self.rect.y + self.speed_y
# Accelerate speed downward
self.speed_y = self.speed_y + gravity
# If colliding and not moving upward
if collided_y >0 and self.speed_y > 0:
# y position is at the top of the block
self.rect.bottom = collided_y
self.base.y = self.rect.y+self.rect.height
def jump(self, speed):
'''This sets the player to jump, but it only can if its feet are on the floor'''
if world.collided_get_y(self.base)>0:
self.speed_y = speed
self.index = 1
self.image = self.images[self.index]
self.sound.play()
def update(self):
''' This changes the image back to standard if touching ground '''
if world.collided_get_y(self.base)>0 and self.index >0 :
self.index = 0
self.image = self.images[self.index]
def animate(self, ind):
self.image = self.images[ind]
class World():
'''This will hold the platforms and the goal.
nb. In this game, the world moves left and right rather than the player'''
def __init__(self, level, block_size):
self.platforms = []
self.goals = []
self.posn_y = 0
self.block_size = block_size
self.sound = pygame.mixer.Sound(level_sound)
self.sound.play()
for line in level:
self.posn_x = 0
for block in line:
if block == '-':
self.platforms.append(Blocks(
self.posn_x, self.posn_y, block_size))
if block == 'G':
self.goals.append(Goals(self.posn_x, self.posn_y))
self.posn_x = self.posn_x + block_size
self.posn_y = self.posn_y + block_size
def move(self, dist):
'''move the world dist pixels right (a negative dist means left)'''
for block in self.platforms:
block.move_x(dist)
for block in self.goals:
block.move_x(dist)
def collided_get_y(self, player_rect):
'''get the y value of the platform the player is currently on'''
return_y = -1
for block in self.platforms:
if block.rect.colliderect(player_rect):
return_y = block.rect.y + 1
return return_y
def at_goal(self, player_rect):
'''return True if the player is currently in contact with the goal. False otherwise'''
for block in self.goals:
if block.rect.colliderect(player_rect):
return True
return False
def update(self, screen):
'''draw all the rectangles onto the screen'''
for block in self.platforms:
self.platform_plain = pygame.sprite.RenderPlain(block)
self.platform_plain.draw(screen)
for block in self.goals:
self.ball_plain = pygame.sprite.RenderPlain(self.goals)
self.ball_plain.draw(screen)
def side_collide(self, player_rect):
collide = 0
for block in self.platforms:
# Collision with right side of block
if player_rect.collidepoint(block.rect.right,
block.rect.bottom-int(block_size/2)):
collide = 1
# Collision with left side of block
if player_rect.collidepoint(block.rect.left-2,
block.rect.bottom-int(block_size/2)):
overlap = player_rect.right - block.rect.left
self.move(overlap)
collide = -1
return collide
class Doom():
'''this class holds all the things that can kill the player'''
def __init__(self, fireball_num, pit_depth):
# Holds the base lava blocks
self.lava = []
i = 0
while i < screen_x:
self.lava.append(Lava(i, pit_depth))
i += block_size
self.lava_plain = pygame.sprite.RenderPlain(self.lava)
self.fireballs = []
for i in range(0, fireball_num):
self.fireballs.append(Fireball())
self.fireball_plain = pygame.sprite.RenderPlain(self.fireballs)
self.sound = pygame.mixer.Sound(explo_sound)
self.sound.set_volume(0.5)
def move(self, dist):
'''move everything right dist pixels (negative dist means left)'''
for fireball in self.fireballs:
fireball.move_x(dist)
def update(self, screen):
'''move fireballs down, and draw everything on the screen'''
for fireball in self.fireballs:
fireball.move_y()
self.fireball_plain.draw(screen)
self.lava_plain.draw(screen)
def collided(self, player_rect):
'''check if the player is currently in contact with any of the doom.
nb. shrink the rectangle for the fireballs to make it fairer'''
for fireball in self.fireballs:
if fireball.rect.colliderect(player_rect):
hit_box = fireball.rect.inflate(
-int(fireball_size/2),
-int(fireball_size/2))
if hit_box.colliderect(player_rect):
return True
for lava in self.lava:
if lava.rect.colliderect(player_rect):
return True
return False
class Fireball(pygame.sprite.Sprite):
'''this class holds the fireballs that fall from the sky'''
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.transform.scale(
pygame.image.load(fireball_image),
(fireball_size, fireball_size))
self.rect = self.image.get_rect()
self.reset()
def reset(self):
'''re-generate the fireball a random distance along the screen and give them a random speed'''
self.y = 0
self.speed_y = randint(fireball_low_speed, fireball_high_speed)
self.x = randint(0, screen_x)
self.rect.topleft = self.x, self.y
def move_x(self, dist):
'''move the fireballs dist pixels to the right
(negative dist means left)'''
self.rect.move_ip(dist, 0)
if self.rect.x < -50 or self.rect.x > screen_x:
self.reset()
def move_y(self):
'''move the fireball the appropriate distance down the screen
nb. fireballs don't accellerate with gravity, but have a random speed. if the fireball has reached the bottom of the screen,
regenerate it'''
self.rect.move_ip(0, self.speed_y)
if self.rect.y > screen_y:
self.reset()
def update(self, screen, colour):
'''draw the fireball onto the screen'''
pass
class Goals(pygame.sprite.Sprite):
'''this class holds the fireballs that fall from the sky'''
def __init__(self, xpos, ypos):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.transform.scale(
pygame.image.load(ball_image),
(ball_size, ball_size))
self.rect = self.image.get_rect()
self.rect.topleft = xpos, ypos
def move_x(self, dist):
'''move the fireballs dist pixels to the right
(negative dist means left)'''
self.rect.move_ip(dist, 0)
def update(self, screen, colour):
pass
class Blocks(pygame.sprite.Sprite):
'''this class holds the fireballs that fall from the sky'''
def __init__(self, xpos, ypos, block_size):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.transform.scale(
pygame.image.load(platform_image),
(block_size, block_size))
self.rect = self.image.get_rect()
self.rect.topleft = xpos, ypos
def move_x(self, dist):
'''move the fireballs dist pixels to the right
(negative dist means left)'''
self.rect.move_ip(dist, 0)
def update(self, screen, colour):
pass
class Lava(pygame.sprite.Sprite):
'''this class holds the fireballs that fall from the sky'''
def __init__(self, xpos, ypos):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.transform.scale(
pygame.image.load(lava_image),
(block_size, block_size))
self.rect = self.image.get_rect()
self.rect.x = xpos
self.rect.y = ypos
# options
mult = 1.5
screen_x = int(600*mult)
screen_y = int(400*mult)
block_size = int(30*mult)
game_name = "Awesome Raspberrylock size 30, posn_x 7050 posn_y 0"
player_spawn_x = 50
player_spawn_y = 200
player_image = "bruceside.png"
player_image2 = "bruce_turn_small.png"
player_width, player_height = int(30*mult), int(28*mult)
#player_width, player_height = 30*2, 28*2
ball_size = int(15*mult)
ball_image = "dryer_ball.png"
gravity = 0.8*mult
jump_speed = -8*mult
level=[
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" - G ",
" - -- --- - ",
" -- - -- "]
#"--------------------"
platform_color = (100, 100, 100)
platform_image = "stone_block.png"
lava_image = "fire_block.png"
pit_depth = screen_y - block_size
fireball_size = int(20*mult)
fireball_number = 10
fireball_low_speed = 3
fireball_high_speed = 7
fireball_image = "flame.png"
# Sounds
jump_sound = "qubodup-cfork-ccby3-jump.ogg"
level_sound = "ambientmain_0_0.ogg"
explo_sound = "explosion1.ogg"
dog_sound = "dog_sound_1.ogg"
# initialise pygame.mixer
pygame.mixer.pre_init(44100, -16, 8, 2048)
pygame.mixer.init()
# initialise pygame
pygame.init()
window = pygame.display.set_mode((screen_x, screen_y))
pygame.display.set_caption(game_name)
screen = pygame.display.get_surface()
# load level
if len(sys.argv) > 1:
with open(sys.argv[1]) as f:
level = f.readlines()
# initialise variables
clock = pygame.time.Clock()
finished = False
player = Player(player_spawn_x, player_spawn_y, player_width, player_height)
player_plain = pygame.sprite.RenderPlain(player)
world = World(level, block_size)
doom = Doom(fireball_number, pit_depth)
# setup the background
background_image = "Catacomb_Entrance_Hall_by_KingCloud_crop.png"
#background_image = "Catacomb_Entrance_Hall_by_KingCloud.jpg"
#background_image = "ex_background.png"
background = pygame.transform.scale(pygame.image.load(background_image),
(screen_x, screen_y)).convert()
bg_1_x = 0
bg_2_x = screen_x
while not finished:
# blank screen
screen.fill((0,0,0))
# check events
for event in pygame.event.get():
if event.type == QUIT:
finished = True
# check which keys are held
key_state = pygame.key.get_pressed()
if key_state[K_LEFT]:
if key_state[K_RSHIFT] or key_state[K_LSHIFT]:
shift_mult = 2
else:
shift_mult = 1
# If not colliding with side faces of rectangles?
if world.side_collide(player.rect) < 1:
world.move(2*mult*shift_mult)
doom.move(2*mult*shift_mult)
# move background at slower pace
bg_1_x = bg_1_x + 1*shift_mult
bg_2_x = bg_2_x + 1*shift_mult
# if going into negative space, bring screen 2 to -screen_x + amount negative
if bg_1_x > 0:
bg_2_x = -screen_x+bg_1_x
elif key_state[K_RIGHT]:
if key_state[K_RSHIFT] or key_state[K_LSHIFT]:
shift_mult = 2
else:
shift_mult = 1
if world.side_collide(player.rect) > -1:
world.move(-2*mult*shift_mult)
doom.move(-2*mult*shift_mult)
# move background at slower pace
bg_1_x = bg_1_x - 1*shift_mult
bg_2_x = bg_2_x - 1*shift_mult
# if have moved to edge, put screen 1 in front
if bg_2_x < 0:
bg_1_x = screen_x+bg_2_x
# if going from negative to positive space, bring screen 2 back in front minus movement amount
if bg_1_x < 0:
bg_2_x = screen_x+bg_1_x
if key_state[K_SPACE]:
if key_state[K_RSHIFT] or key_state[K_LSHIFT]:
shift_mult = 1.5
else:
shift_mult = 1
player.jump(jump_speed*shift_mult)
# move the player with gravity
player.move_y()
# Update the image based on whether jumping or not
player.update()
# render the frame
screen.blit(background, (bg_1_x,0))
screen.blit(background, (bg_2_x,0))
player_plain.draw(screen)
world.update(screen)
doom.update(screen)
# update the display
pygame.display.update()
# check if the player is dead
if doom.collided(player.rect):
doom.sound.play()
pygame.time.wait(1500)
print("You Lose!")
finished = True
# check if the player has completed the level
if world.at_goal(player.rect):
player.animate(1)
player_plain.draw(screen)
pygame.display.update()
player.dogsound.play()
pygame.time.wait(1000)
print("Winner!")
finished = True
# set the speed in fps
clock.tick(20)
pygame.quit()
| [
"zjost85@gmail.com"
] | zjost85@gmail.com |
8e607fc4217dc65098282671cc9a5b511508ff7f | b9c3c2c8e1253f6af7e1b8153be958dade5f980b | /MyUtils/chrome/__init__.py | 642fd339d14cd821ffcde7330dc18e667cb1fb78 | [] | no_license | xuhao1108/NewEgg | 67e63e25d0ecf0b585446042e3c405f2335da5fc | 4ed5dd5e56c2ea1409b2cbe4fc64f27962aebd75 | refs/heads/master | 2023-07-30T22:43:24.744990 | 2021-09-24T12:39:26 | 2021-09-24T12:39:26 | 409,957,031 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/7/22 16:43
# @Author : 闫旭浩
# @Email : 874591940@qq.com
# @desc : ...
from .create_proxy_auth_extension import create_proxy_auth_extension
from .selenium_chrome import ChromeDriver
| [
"874591940@qq.com"
] | 874591940@qq.com |
20fe73cbf8d62e74ac7dab0a275c51edd56c00f0 | f5f5394e3278da1e33312c434f10e105cc0b5d5f | /paymentsos/charges.py | 8f24214f9d08ab268f14d96f4dcd77d8d645c620 | [
"MIT"
] | permissive | GearPlug/paymentsos-python | f3c7b213b416f888d575da7f0c853b58b76c7c80 | 2f32ba83ae890c96799b71d49fc6740bc1081f89 | refs/heads/master | 2020-04-08T14:03:57.664559 | 2019-01-23T01:49:56 | 2019-01-23T01:49:56 | 159,420,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,421 | py | class Charge(object):
def __init__(self, client):
self.client = client
def create_charge(self, *, payment_id, payment_method, reconciliation_id=None, provider_specific_data=None,
user_agent=None, ip_address=None, **kwargs):
headers = self.client._get_private_headers()
headers.update({
'x-client-user-agent': user_agent,
'x-client-ip-address': ip_address,
})
payload = {
"payment_method": payment_method,
"provider_specific_data": provider_specific_data,
"reconciliation_id": reconciliation_id
}
payload.update(kwargs)
endpoint = '/payments/{}/charges'.format(payment_id)
return self.client._post(self.client.URL_BASE + endpoint, json=payload, headers=headers)
def retrieve_all_charges(self, *, payment_id, query_params=None):
headers = self.client._get_private_headers()
endpoint = '/payments/{}/charges'.format(payment_id)
return self.client._get(self.client.URL_BASE + endpoint, headers=headers, params=query_params)
def retrieve_charge(self, *, payment_id, charge_id, query_params=None):
headers = self.client._get_private_headers()
endpoint = '/payments/{}/charges/{}'.format(payment_id, charge_id)
return self.client._get(self.client.URL_BASE + endpoint, headers=headers, params=query_params)
| [
"ingferrermiguel@gmail.com"
] | ingferrermiguel@gmail.com |
8ab8402fffc166c8478085628e71a66de2a3b595 | 2b3f1cbe698ae0b23c0137f399bb6c08fcd94294 | /CTFlearn/GandalfTheWise/gandalf.py | 0abc41cd65817e9dd3c21dce0f49b8ac7f61b4c5 | [] | no_license | param373r/WriteUps | baa8c573a290b4d5db6d8ea16a09f41b3d770ee8 | 1225a0e92ae950b1ad0fa826622c2556e79cbbed | refs/heads/master | 2023-03-10T17:31:04.597888 | 2021-02-23T20:19:49 | 2021-02-23T20:19:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | x="xD6kfO2UrE5SnLQ6WgESK4kvD/Y/rDJPXNU45k/p"
y="h2riEIj13iAp29VUPmB+TadtZppdw3AuO7JRiDyU"
dumpx = x.decode('base64').encode('hex')
dumpy = y.decode('base64').encode('hex')
flag_dump = hex(int(dumpx,16) ^ int(dumpy,16)).rstrip("L").lstrip("0x")
print flag_dump.decode('hex')
| [
"noreply@github.com"
] | param373r.noreply@github.com |
f4237bacb70f0bf7b6208fb5ea7d30591d9f5aa4 | 1dfbc64569e7d1eff0eed6b6abfabc3d6c637cd0 | /doutu_document.py | 67ae6464d03079665fd25de6fb8093dc4665b318 | [] | no_license | Vrolist/ScriptPython-ImageSpider | 274ded7791b8c48ca931c524f7f08cbff4c76be6 | 61b67ac894c162069aa25e394a608cc03419f489 | refs/heads/master | 2020-04-03T18:58:04.131741 | 2018-10-31T05:42:51 | 2018-10-31T05:42:51 | 155,504,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,443 | py | import requests
from lxml import etree
from time import sleep
from concurrent import futures
url = 'http://www.doutula.com/article/list/?page=2'
headers = {
'Referer':'http://www.doutula.com/',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.92 Safari/537.36',
}
def parse_page(url):
resp = requests.get(url,headers=headers)
html = etree.HTML(resp.text)
imgs = html.xpath('.//div[@class="col-sm-9"]//img/@data-original')
for img in imgs:
try:
download_img(img)
except:
pass
if imgs:
return True
else:
return False
def download_img(src):
filename = src.split('/')[-1]
img = requests.get(src, headers=headers)
# img是图片响应,不能字符串解析;
# img.content是图片的字节内容
with open('img/' + filename, 'wb') as file:
file.write(img.content)
# print(src, filename)
base_url = 'http://www.doutula.com/article/list/?page={}'
i = 1
error_time = 0
next_link = True
while next_link:
sleep(0.5)
try:
next_link = parse_page(base_url.format(i))
except:
next_link = True
if next_link :
i += 1
error_time = 0
else:
if error_time>=3:
print(error_time,'break')
break
i+=1
error_time+=1
next_link = True
print(i,error_time)
print('~OVER~') | [
"kellyhu112@qq.com"
] | kellyhu112@qq.com |
c0f1e901dcca4adb401428c29867545733d2648e | 522c92cf2a7f06e7fa2243e6d8d284c81eff7da8 | /s_可视化/matplotlib_test/rw_visual.py | c6ca2106c3fd02ceafcf36d9ef8a0f47fa4989f9 | [] | no_license | qiuyunzhao/python_basis | 480323dda42f7a85dd1e0d7d693595bf1cce00e5 | cdf5622f1ac6dc8e6b206b13aa3ef4cd3a6654b0 | refs/heads/master | 2021-01-02T19:21:03.556686 | 2020-05-24T14:52:14 | 2020-05-24T14:52:14 | 239,762,702 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,130 | py | import matplotlib.pyplot as plt
from s_可视化.matplotlib_test.random_walk import RandomWalk
def draw_trace():
while True:
# 创建一个RandomWalk实例,并将其包含的点都绘制出来
rw = RandomWalk()
rw.fill_walk()
# 设置绘图窗口的尺寸
plt.figure(figsize=(10, 6))
# 突出路径,后绘制的颜色更深
# 参数:
# c:颜色
# cmap:颜色映射
# edgecolor:去除点的边框
# s:点的大小
point_numbers = list(range(rw.num_points))
plt.scatter(rw.x_values, rw.y_values, c=point_numbers, cmap=plt.cm.Blues, edgecolor='none', s=5)
# 突出起点和终点
plt.scatter(0, 0, c='green', edgecolors='none', s=100) # 起点
plt.scatter(rw.x_values[-1], rw.y_values[-1], c='red', edgecolors='none', s=100) # 终点
# 隐藏坐标轴
plt.xticks([])
plt.yticks([])
plt.show()
keep_running = input("重新绘制吗? (y/n): ")
if keep_running == 'n':
break
if __name__ == '__main__':
draw_trace()
| [
"1102401880@qq.com"
] | 1102401880@qq.com |
062014beb87368f116bd692d7d5fe58a1ed81f7f | 83978433a5b78e99fdfb2cbf3e9eaac6d88240f9 | /tests/test_run.py | 4528252de70c7f5980000c068c714ef025705699 | [] | no_license | kbtony/my-teleprompt | b80ec5c7acaba5dce5c7e59b853f268ace7785c6 | 2cdfdc6322b422cdf3977c7b1722a07449d447e8 | refs/heads/master | 2023-03-18T18:13:08.808915 | 2021-03-16T00:14:01 | 2021-03-16T00:14:01 | 347,022,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,699 | py | from datetime import datetime
from teleprompter.run import CommandLine, utcoffset_to_seconds, Query, lookup
def test_command_line():
argv = ["run.py", "example.csv", "2021-06-19T15:27:30+10:00"]
user_input = CommandLine(argv)
assert user_input.filename == "example.csv"
assert user_input.query == "2021-06-19T15:27:30+10:00"
def test_utcoffset_to_seconds():
time = ["11:30", "0425", "01"]
assert utcoffset_to_seconds(time[0]) == 41400
assert utcoffset_to_seconds(time[1]) == 15900
assert utcoffset_to_seconds(time[2]) == 3600
def test_query():
query = ["2021-06-19T15:27:30Z", "2020-11-19T15:27:30+10:00", "2008-06-08T19:37:22-09:22"]
my_query = [Query(query[0]), Query(query[1]), Query(query[2])]
assert my_query[0].date.isoformat() == "2021-06-19T15:27:30"
assert my_query[0].check == "19/6/21"
assert my_query[1].time == "10:00"
assert my_query[1].date.isoformat() == "2020-11-19T05:27:30"
assert my_query[2].time == "09:22"
assert my_query[2].date.isoformat() == "2008-06-09T04:59:22"
assert not my_query[2].early
def test_lookup():
program_list = [['T.V Shopping', 'G', '19/6/21', '0:00', '10800'], ['Days of Our Lives', 'PG', '19/6/21', '3:00', '3600'], ['M.A.S.H', 'PG', '19/6/21', '4:00', '5400'], ['The Simpsons', 'PG', '19/6/21', '5:30', '1800'], ['Futurama', 'PG', '19/6/21', '6:00', '3600'], ['Family Guy', 'M', '19/6/21', '7:00', '3000']]
date = datetime.fromisoformat("2021-06-19T05:27:30").replace(tzinfo=None)
result = lookup(program_list, date, "19/6/21", 36000, True)
assert result == "[15:30] That was M.A.S.H, up next is The Simpsons which is rated PG and coming up later is Futurama."
| [
"tony790511@yahoo.com.tw"
] | tony790511@yahoo.com.tw |
3961c786a9f3d1cbf6f062bda1b52c9befbbe95e | b144cb0c9e497136c99e608c1cf3cf0b2e0e3c2d | /D3Q/src/deep_dialog/dialog_system/dialog_manager.py | 968859f34f9ae6a60b666d5e409fffb0929876a4 | [] | no_license | loremdai/A2C_PPO | 924182d780836a4774bc304c0bb460a1ef22c143 | f8135e4f9e3109a8861166b05f2090a1389188a9 | refs/heads/master | 2023-06-02T10:52:34.839587 | 2021-06-30T09:56:54 | 2021-06-30T09:56:54 | 381,645,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,392 | py | """
Created on May 17, 2016
@author: xiul, t-zalipt
"""
import json
from . import StateTracker
from deep_dialog import dialog_config
class DialogManager:
""" A dialog manager to mediate the interaction between an agent and a customer """
def __init__(self, agent, user, user_planning, act_set, slot_set, movie_dictionary, discriminator):
self.state_tracker = StateTracker(act_set, slot_set, movie_dictionary)
self.discriminator = discriminator
self.agent = agent
self.user = user # 基于规则的用户模拟器
self.user_planning = user_planning # 基于模型的用户模拟器
self.running_user = self.user
self.act_set = act_set
self.slot_set = slot_set
self.reward = 0
self.episode_over = False
self.use_model = False
self.session_idx = 0
self.user_actions_for_dump = [] # after warm start
self.user_action = None
def initialize_episode(self, warm_start=False):
""" Refresh state for new dialog """
self.reward = 0
self.episode_over = False
self.use_model = False
#self.running_user=self.user
# 根据是否采用热启动,选择对应的用户模拟器
if not warm_start: # 未采用热启动
self.running_user = self.user_planning # 使用"基于模型的用户模拟器"作为"正在运行的用户模拟器"
self.use_model = True # 使用世界模型
else: # 采用热启动
self.running_user = self.user # 使用"基于规则的用户模拟器"作为"正在运行的用户模拟器"
self.use_model = False # 不使用世界模型
self.state_tracker.initialize_episode() # DST初始化episode
self.agent.initialize_episode() # 智能体初始化episode
self.user_action = self.running_user.initialize_episode() # 用户模拟器初始化episode
self.state_tracker.update(user_action=self.user_action) # 更新状态
if warm_start: # 若采用热启动
self.user_planning.sample_goal = self.user.sample_goal
# 与run_mode相关的打印设置
if dialog_config.run_mode < 3: # 若未处于训练模式(run_mode3)(以DA、NL、DA-NL模式显示),则追加打印以下内容:
print("New episode, user goal:")
print(json.dumps(self.user.goal, indent=2))
self.print_function(user_action=self.user_action)
def next_turn(self,
record_training_data=True,
record_training_data_for_user=True,
simulation_for_discriminator=False,
filter_experience_by_discriminator=False
):
""" This function initiates each subsequent exchange between agent and user (agent first) """
########################################################################
# CALL AGENT TO TAKE HER TURN
########################################################################
self.state = self.state_tracker.get_state_for_agent()
self.agent_action = self.agent.state_to_action(self.state) # 根据状态做出动作
########################################################################
# Register AGENT action with the state_tracker
########################################################################
self.state_tracker.update(agent_action=self.agent_action) # 更新状态
self.agent.add_nl_to_action(self.agent_action) # add NL to Agent Dia_Act
self.print_function(agent_action=self.agent_action['act_slot_response'])
########################################################################
# CALL USER TO TAKE HER TURN
########################################################################
self.state_user = self.state_tracker.get_state_for_user()
self.sys_action = self.state_tracker.dialog_history_dictionaries()[-1]
if self.use_model: # 若使用世界模型,则奖励由世界模型给出
self.user_action, self.episode_over, self.reward = self.running_user.next(self.state_user,
self.agent.action)
else: # 若不使用世界模型,则奖励由自定规则给出
self.user_action, self.episode_over, dialog_status = self.running_user.next(self.sys_action)
self.reward = self.reward_function(dialog_status)
########################################################################
# Update state tracker with latest user action
########################################################################
if self.episode_over != True: # 若该episode仍未结束
self.state_tracker.update(user_action=self.user_action) # 更新状态
self.print_function(user_action=self.user_action) # 打印
self.state_user_next = self.state_tracker.get_state_for_agent()
########################################################################
# add into the pool
########################################################################
if not simulation_for_discriminator:
# store experiences for the discriminator
if self.use_model: # 若使用世界模型
self.discriminator.store_user_model_experience((
self.state_user, self.agent.action, self.state_user_next,
self.reward, self.episode_over, self.user_action))
else: # 未使用世界模型 (注意区别函数)
self.discriminator.store_user_experience((
self.state_user, self.agent.action, self.state_user_next,
self.reward, self.episode_over, self.user_action))
# store the experiences for the agent
if self.use_model and filter_experience_by_discriminator: # 若使用了世界模型和鉴别器
discriminate_check = self.discriminator.single_check((self.state, self.agent_action, self.reward,
self.state_tracker.get_state_for_agent(),
self.episode_over, self.state_user,
self.use_model)) # 鉴别器检验
# 若鉴别器检验通过且被标记为训练数据时,才把该经验加入进回放缓存池
if discriminate_check and record_training_data:
self.agent.register_experience_replay_tuple(self.state, self.agent_action, self.reward,
self.state_tracker.get_state_for_agent(),
self.episode_over, self.state_user, self.use_model)
elif record_training_data:
self.agent.register_experience_replay_tuple(self.state, self.agent_action, self.reward,
self.state_tracker.get_state_for_agent(), self.episode_over,
self.state_user, self.use_model)
# store the experiences for the world model
if record_training_data_for_user and not self.use_model: # 未使用世界模型
self.user_planning.register_experience_replay_tuple(self.state_user, self.agent.action,
self.state_user_next, self.reward,
self.episode_over, self.user_action)
if self.use_model and filter_experience_by_discriminator: # 使用世界模型和鉴别器
return (self.episode_over, self.reward, discriminate_check)
else:
return (self.episode_over, self.reward)
else:
return (
self.state_user, self.agent.action, self.state_user_next, self.reward, self.episode_over,
self.user_action)
# Reward Function 1: a reward function based on the dialog_status
def reward_function(self, dialog_status):
""" Reward Function 1: a reward function based on the dialog_status """
if dialog_status == dialog_config.FAILED_DIALOG:
reward = -self.user.max_turn # -40 (-L)
elif dialog_status == dialog_config.SUCCESS_DIALOG:
reward = 2 * self.user.max_turn # +80 (2*L)
else:
reward = -1 # in each turn, a reward -1 is provided to encourage shorter dialogues
return reward
# Reward Function 2: a reward function without penalty on per turn and failure dialog
def reward_function_without_penalty(self, dialog_status):
""" Reward Function 2: a reward function without penalty on per turn and failure dialog """
if dialog_status == dialog_config.FAILED_DIALOG:
reward = 0
elif dialog_status == dialog_config.SUCCESS_DIALOG:
reward = 2 * self.user.max_turn
else:
reward = 0
return reward
# 根据run_mode打印agent/user方的动作
def print_function(self, agent_action=None, user_action=None):
""" Print Function """
if agent_action:
if dialog_config.run_mode == 0:
if self.agent.__class__.__name__ != 'AgentCmd':
print("Turn %d sys: %s" % (agent_action['turn'], agent_action['nl']))
elif dialog_config.run_mode == 1:
if self.agent.__class__.__name__ != 'AgentCmd':
print("Turn %d sys: %s, inform_slots: %s, request slots: %s" % (
agent_action['turn'], agent_action['diaact'], agent_action['inform_slots'],
agent_action['request_slots']))
elif dialog_config.run_mode == 2: # debug mode
print("Turn %d sys: %s, inform_slots: %s, request slots: %s" % (
agent_action['turn'], agent_action['diaact'], agent_action['inform_slots'],
agent_action['request_slots']))
print("Turn %d sys: %s" % (agent_action['turn'], agent_action['nl']))
if dialog_config.auto_suggest == 1:
print('(Suggested Values: %s)' % (
self.state_tracker.get_suggest_slots_values(agent_action['request_slots'])))
elif user_action:
if dialog_config.run_mode == 0:
print("Turn %d usr: %s" % (user_action['turn'], user_action['nl']))
elif dialog_config.run_mode == 1:
print("Turn %s usr: %s, inform_slots: %s, request_slots: %s" % (
user_action['turn'], user_action['diaact'], user_action['inform_slots'],
user_action['request_slots']))
elif dialog_config.run_mode == 2: # debug mode, show both
print("Turn %d usr: %s, inform_slots: %s, request_slots: %s" % (
user_action['turn'], user_action['diaact'], user_action['inform_slots'],
user_action['request_slots']))
print("Turn %d usr: %s" % (user_action['turn'], user_action['nl']))
if self.agent.__class__.__name__ == 'AgentCmd': # command line agent
user_request_slots = user_action['request_slots']
if 'ticket' in user_request_slots.keys(): del user_request_slots['ticket']
if len(user_request_slots) > 0:
possible_values = self.state_tracker.get_suggest_slots_values(user_action['request_slots'])
for slot in possible_values.keys():
if len(possible_values[slot]) > 0:
print('(Suggested Values: %s: %s)' % (slot, possible_values[slot]))
elif len(possible_values[slot]) == 0:
print('(Suggested Values: there is no available %s)' % (slot))
else:
kb_results = self.state_tracker.get_current_kb_results()
print('(Number of movies in KB satisfying current constraints: %s)' % len(kb_results))
| [
"etienn3dai@gmail.com"
] | etienn3dai@gmail.com |
dc754f772d8f4ba92fad2186439121b62cc5581a | bbeb8f05519831e5ba0ec1a542be6970591e1cd5 | /Recursion/001_bisection_search.py | 1e36a69a4eed13daf085e03afa240235435e541b | [] | no_license | b1ck0/python_coding_problems | fdebb2390a26ed8f60530494dcfdc8cdf10c33ce | 93edeaf06d1d06fd211e60fc0181baf954355d14 | refs/heads/master | 2022-08-21T14:14:51.675278 | 2022-08-03T12:07:37 | 2022-08-03T12:07:37 | 253,064,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,348 | py | def search(element, array: list):
"""
:param element: some element
:param array: sorted list
:return: element in list
"""
if len(array) == 0 or element == '' or element is None:
return False
if len(array) == 1:
return element == array[0]
n = len(array)
mid = n // 2
if element == array[mid]:
return True
elif element < array[mid]:
return search(element, array[:mid])
else:
return search(element, array[mid:])
if __name__ == "__main__":
from nose.tools import assert_equal
class TestClass(object):
def test(self, sol):
assert_equal(sol('a', ''), False)
assert_equal(sol('u', 'llmrtwy'), False)
assert_equal(sol('e', 'ceeijlpvv'), True)
assert_equal(sol('w', 'aehirww'), True)
assert_equal(sol('d', 'cdfgiiilnpprvwxxzz'), True)
assert_equal(sol('g', 'bdefffgklmooqrwx'), True)
assert_equal(sol('f', 'cefgijlmoorrtttxyy'), True)
assert_equal(sol('u', 'lmosstuuvv'), True)
assert_equal(sol('x', 'bcddefghijnppqruy'), False)
assert_equal(sol('p', 'acddeghkkpqsuxy'), True)
assert_equal(sol(5, [1, 2, 3, 4, 5, 6]), True)
assert_equal(sol(10, [1, 2, 3, 4, 5, 6]), False)
print('ALL TEST CASES PASSED')
# Run Tests
t = TestClass()
t.test(search)
| [
"vasil.yordanov88@gmail.com"
] | vasil.yordanov88@gmail.com |
e3af78f714a605f48e356026f633369aabb2a7f4 | 6131ff2e3370b59a77b87ae619acd247d4cf537b | /_WEBPRINT/recepientsapp/migrations/0017_sentenvelop_registry.py | 90b7acd06d29dd90d4a44abc41359e7ac23e82bf | [] | no_license | d051a/webprint | 038ae54dedb20eea7adb5ab7c4a93d5d41aa0325 | 557238568b8bb3e905b080e9e7cad88fecb90eaa | refs/heads/master | 2020-03-19T12:35:28.727050 | 2019-12-11T13:03:47 | 2019-12-11T13:03:47 | 135,196,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-12-06 11:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('recepientsapp', '0016_registry'),
]
operations = [
migrations.AddField(
model_name='sentenvelop',
name='registry',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='recepientsapp.Registry'),
),
]
| [
"akargavin@gmail.com"
] | akargavin@gmail.com |
d0937d391db976cdd9ce380dfda1333e8c5e6cfd | 6ffc398b4a27c339f24938e8a0b9c565e33539ce | /site-packages-27/fpdf/__init__.py | e1f6d0ec86f11b94c27e9cf80fc511a1e065dabb | [] | no_license | zwlyn/awesome-pdf | 8f4483d717130a54545f2ba8b05313da99103039 | 8223929db5433c7b4ed61bceb4f5808c12e1ad85 | refs/heads/master | 2023-01-24T23:52:35.415117 | 2020-04-05T12:05:31 | 2020-04-05T12:05:31 | 253,162,782 | 2 | 0 | null | 2023-01-05T10:50:08 | 2020-04-05T05:31:20 | Python | UTF-8 | Python | false | false | 415 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"FPDF for python"
__license__ = "LGPL 3.0"
__version__ = "1.7.9"
from .fpdf import FPDF, FPDF_FONT_DIR, FPDF_VERSION, SYSTEM_TTFONTS, set_global, FPDF_CACHE_MODE, FPDF_CACHE_DIR
try:
from .html import HTMLMixin
except ImportError:
import warnings
warnings.warn("web2py gluon package not installed, required for html2pdf")
from .template import Template
| [
"1666013677@qq.com"
] | 1666013677@qq.com |
c8eb3aeda662891d7280421a60fa273f048c7670 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/stdlib-big-1496.py | c138c5f1b37ad1d5b8f878c60570148520f70029 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,999 | py | # ChocoPy library functions
def int_to_str(x: int) -> str:
digits:[str] = None
result:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str2(x: int, x2: int) -> str:
digits:[str] = None
digits2:[str] = None
result:str = ""
result2:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str3(x: int, x2: int, x3: int) -> str:
digits:[str] = None
digits2:[str] = None
digits3:[str] = None
result:str = ""
result2:str = ""
result3:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str4(x: int, x2: int, x3: int, x4: int) -> str:
digits:[str] = None
digits2:[str] = None
digits3:[str] = None
digits4:[str] = None
result:str = ""
result2:str = ""
result3:str = ""
result4:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def int_to_str5(x: int, x2: int, x3: int, x4: int, x5: int) -> str:
digits:[str] = None
digits2:[str] = None
digits3:[str] = None
digits4:[str] = None
digits5:[str] = None
result:str = ""
result2:str = ""
result3:str = ""
result4:str = ""
result5:str = ""
# Set-up digit mapping
digits = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# Write sign if necessary
if x < 0:
result = "-"
x = -x
# Write digits using a recursive call
if x >= 10:
result = result + int_to_str(x // 10)
result = result + digits[x % 10]
return result
def str_to_int(x: str) -> int:
result:int = 0
digit:int = 0
char:str = ""
sign:int = 1
first_char:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int2(x: str, x2: str) -> int:
result:int = 0
result2:int = 0
digit:int = 0
digit2:int = 0
char:str = ""
char2:str = ""
sign:int = 1
sign2:int = 1
first_char:bool = True
first_char2:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif $Exp == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int3(x: str, x2: str, x3: str) -> int:
result:int = 0
result2:int = 0
result3:int = 0
digit:int = 0
digit2:int = 0
digit3:int = 0
char:str = ""
char2:str = ""
char3:str = ""
sign:int = 1
sign2:int = 1
sign3:int = 1
first_char:bool = True
first_char2:bool = True
first_char3:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int4(x: str, x2: str, x3: str, x4: str) -> int:
result:int = 0
result2:int = 0
result3:int = 0
result4:int = 0
digit:int = 0
digit2:int = 0
digit3:int = 0
digit4:int = 0
char:str = ""
char2:str = ""
char3:str = ""
char4:str = ""
sign:int = 1
sign2:int = 1
sign3:int = 1
sign4:int = 1
first_char:bool = True
first_char2:bool = True
first_char3:bool = True
first_char4:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
def str_to_int5(x: str, x2: str, x3: str, x4: str, x5: str) -> int:
result:int = 0
result2:int = 0
result3:int = 0
result4:int = 0
result5:int = 0
digit:int = 0
digit2:int = 0
digit3:int = 0
digit4:int = 0
digit5:int = 0
char:str = ""
char2:str = ""
char3:str = ""
char4:str = ""
char5:str = ""
sign:int = 1
sign2:int = 1
sign3:int = 1
sign4:int = 1
sign5:int = 1
first_char:bool = True
first_char2:bool = True
first_char3:bool = True
first_char4:bool = True
first_char5:bool = True
# Parse digits
for char in x:
if char == "-":
if not first_char:
return 0 # Error
sign = -1
elif char == "0":
digit = 0
elif char == "1":
digit = 1
elif char == "2":
digit = 2
elif char == "3":
digit = 3
elif char == "3":
digit = 3
elif char == "4":
digit = 4
elif char == "5":
digit = 5
elif char == "6":
digit = 6
elif char == "7":
digit = 7
elif char == "8":
digit = 8
elif char == "9":
digit = 9
else:
return 0 # On error
first_char = False
result = result * 10 + digit
# Compute result
return result * sign
# Input parameters
c:int = 42
c2:int = 42
c3:int = 42
c4:int = 42
c5:int = 42
n:int = 10
n2:int = 10
n3:int = 10
n4:int = 10
n5:int = 10
# Run [-nc, nc] with step size c
s:str = ""
s2:str = ""
s3:str = ""
s4:str = ""
s5:str = ""
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
i = -n * c
# Crunch
while i <= n * c:
s = int_to_str(i)
print(s)
i = str_to_int(s) + c
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
3d2838a04450c37cd315fe937c37005bf75676ba | 2c18130c914a387dee1aea4e9aa375bd3aa9158d | /amazon/previous-greater-element.py | 0a9e268967bee0a1cc9956270a47f827440771fc | [] | no_license | rohitjain994/Leetcode | 5231360a5cb117d1ad15b90090fad60efaaedfc1 | 4f30284b18cc24e6bfeac88f3306c341b9b7cd31 | refs/heads/main | 2023-04-04T22:11:29.617809 | 2021-04-13T18:41:55 | 2021-04-13T18:41:55 | 341,184,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | # arr = [ 10, 4, 2, 20, 40, 12, 30 ]
# res = [ -1, 10, 4, -1, -1, 40, 40 ]
def previousGreaterElement(arr : List[int])-> List[int]:
stack = [arr[0]]
res = [-1]
for i in range(1,len(arr)):
while len(stack)>0 and stack[-1]<arr[i]:
stack.pop()
if len(stack)==0:
res.append(-1)
else:
res.append(stack[-1])
stack.append(arr[i])
return res
| [
"rohitjain@Rohits-MacBook-Pro.local"
] | rohitjain@Rohits-MacBook-Pro.local |
7116d372dc82524a2a12f2f1ffeb890f02a621ab | 29998d8edc1a5b4ab71eb6c22797382bd01532cb | /Neural_networks.py | f09587e4f43cb1f6548dd860c996505dd90e9c70 | [] | no_license | arunadevikaruppasamy/Data-Science-from-Scratch-Python | 32d3979f9f3e15ccce5104f210ec9407a50bb1b6 | fa84a819777aca9d5affd8179b152726beb88f1b | refs/heads/master | 2022-04-07T15:00:53.667392 | 2020-02-25T23:31:23 | 2020-02-25T23:31:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,582 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 13 16:55:24 2020
@author: Ruchika
"""
#####################################################################################################
## Neural Network
#####################################################################################################
from Vector_operations_on_data import Vector, dot
def step_function(x: float) -> float:
return 1.0 if x>=0 else 0.0
def perceptron_output(weights: Vector, bias: float, x: Vector) -> float:
# Returns 1 if the perceptron 'fires, 0 if not
return (step_function(dot(weights,x)+bias))
#####################################################################################################
# AND gate
#####################################################################################################
and_weights = [2., 2.]
and_bias = -3
print(perceptron_output(and_weights, and_bias, [1, 1]))
print(perceptron_output(and_weights, and_bias, [0, 1]))
print(perceptron_output(and_weights, and_bias, [1, 0]))
print(perceptron_output(and_weights, and_bias, [0, 0]))
#####################################################################################################
# OR gate
#####################################################################################################
and_weights = [2., 2.]
and_bias = -1
print(perceptron_output(and_weights, and_bias, [1, 1]))
print(perceptron_output(and_weights, and_bias, [0, 1]))
print(perceptron_output(and_weights, and_bias, [1, 0]))
print(perceptron_output(and_weights, and_bias, [0, 0]))
#####################################################################################################
# NOT gate
#####################################################################################################
and_weights = [-2.]
and_bias = 1
print(perceptron_output(and_weights, and_bias, [0]))
print(perceptron_output(and_weights, and_bias, [1]))
import math
def sigmoid(t: float) -> float:
return 1/(1+math.exp(-t))
t = [i for i in range(-20,21,1)]
sigmoid_t = [sigmoid(x) for x in t]
step_t = [step_function(x) for x in t]
import matplotlib.pyplot as plt
plt.plot(t, sigmoid_t, label = 'sigmoid')
plt.plot(t, step_t, 'm--', label = 'step function')
plt.legend()
plt.show()
def neuron_output(weights:Vector, inputs: Vector) -> float:
return sigmoid(dot(weights, inputs))
from typing import List
def feed_forward(neural_network: List[List[Vector]],
input_vector: Vector) -> List[Vector]:
"""Feeds the input vector through the neural network.
Returns the outputs of all layers (not just the last one)."""
outputs: List[Vector] = []
for layer in neural_network:
input_with_bias = input_vector + [1.0] # Adds a constant for bias
output = [neuron_output(input_with_bias, neuron)
for neuron in layer]
outputs.append(output)
# Then the input to the next layer is the output of this layer
input_vector = output
return outputs
xor_network = [# hidden layer
[[-20., 20, -30], # 'and neuron'
[20., 20, -10]], # 'or neuron'
# Output layer
[[-60., 60., -30.]]]
print(feed_forward(xor_network, [1, 1]))
print(feed_forward(xor_network, [0, 1]))
print(feed_forward(xor_network, [1, 0]))
print(feed_forward(xor_network, [0, 0]))
#####################################################################################################
# Backpropagation
#####################################################################################################
def sqerror_gradients(network: List[List[Vector]],
input_vector: Vector,
target_vector: Vector) -> List[List[Vector]]:
"""Given a neural network, an input vector and a target vector,
makes a prediction and computes the gradient of squared error loss
with respect to the neuron weights."""
# forward pass
hidden_outputs, outputs = feed_forward(network, input_vector)
# gradients with respect to output neuron pre-activation outputs
output_deltas = [output*(1-output)*(output-target)
for output, target in zip(outputs, target_vector)]
# gradients with respect to output neuron weights
output_grads = [[output_deltas[i] * hidden_output
for hidden_output in hidden_outputs + [1]]
for i, output_neuron in enumerate(network[-1])]
# gradients with respect to hidden neuron pre-activation outputs
hidden_deltas = [hidden_output*(1-hidden_output)*
dot(output_deltas,[n[i] for n in network[-1]])
for i, hidden_output in enumerate(hidden_outputs)]
# gradients with respect to hidden neuron weights
hidden_grads = [[hidden_deltas[i] * input for input in input_vector + [1]]
for i, hidden_neuron in enumerate(network[0])]
return [hidden_grads, output_grads]
#####################################################################################################
# Train neural network for XOR operation
#####################################################################################################
import random
random.seed(0)
# training data
xs = [[0.,0.],[0.,1.],[1.,0.],[1.,1.]]
ys = [[0.], [1.], [1.], [0.]]
# start with random weights
network = [# hidden layer: 2 inputs -> 2 outputs
[[random.random() for _ in range(2 + 1)], # 1st hidden neuron
[random.random() for _ in range(2 + 1)]], # 2nd hidden neuron
# output layer: 2 inputs -> 1 output
[[random.random() for _ in range(2 + 1)]] # 1st output neuron
]
from gradient_descent import gradient_step;
learning_rate = 1.0
import tqdm
for epoch in tqdm.trange(20000, desc = "neural net for xor"):
for x,y in zip(xs, ys):
gradients = sqerror_gradients(network, x, y)
# Take a gradient step for each neuron in the layer
network = [[gradient_step(neuron, grad, -learning_rate)
for neuron, grad in zip(layer, layer_grad)]
for layer, layer_grad in zip(network, gradients)]
print(f"feed_forward(network, [0,0])[-1][0] = {feed_forward(network, [0,0])[-1][0]}")
print(f"feed_forward(network, [0,1])[-1][0] = {feed_forward(network, [0,1])[-1][0]}")
print(f"feed_forward(network, [1,0])[-1][0] = {feed_forward(network, [1,0])[-1][0]}")
print(f"feed_forward(network, [1,1])[-1][0] = {feed_forward(network, [1,1])[-1][0]}")
#####################################################################################################
#####################################################################################################
"""
Fizz Buzz problem¶
If a number is
divisible by 3 -> print "fizz"
divisible by 5 -> print "buzz"
divisible by 15 -> print "fizzbuzz"
"""
#####################################################################################################
#####################################################################################################
def fizz_buzz_encode(x: int) -> Vector:
if x % 15 == 0:
return [0,0,0,1]
elif x % 5 == 0:
return [0,0,1,0]
elif x % 3 == 0:
return [0,1,0,0]
else:
return [1,0,0,0]
print(fizz_buzz_encode(2))
print(fizz_buzz_encode(6))
print(fizz_buzz_encode(25))
print(fizz_buzz_encode(45))
def binary_encode(x: int) -> Vector:
binary: List[float] = []
for i in range(10):
binary.append(x%2)
x = x // 2
return binary
binary_encode(3)
xs = [binary_encode(n) for n in range(101,1024)] #Training data
ys = [fizz_buzz_encode(n) for n in range(101, 1024)] #Training labels
NUM_HIDDEN = 25 # Number of hidden neurons
network = [# hidden layers: 10 inputs -> NUM_HIDDEN outputs
[[random.random() for _ in range(10 + 1)] for _ in range(NUM_HIDDEN)],
# Output_layer: NUM_HIDDEN inputs -> 4 outputs
[[random.random() for _ in range(NUM_HIDDEN + 1)] for _ in range(4)]]
from Vector_operations_on_data import squared_distance
learning_rate = 1.0
with tqdm.trange(500) as t:
for epoch in t:
epoch_loss = 0.0
for x,y in zip(xs, ys):
predicted = feed_forward(network, x)[-1]
epoch_loss += squared_distance(predicted, y)
gradients = sqerror_gradients(network, x, y)
# Take gradient step for each neuron in each layer
network = [[gradient_step(neuron, grad, -learning_rate)
for neuron, grad in zip(layer, layer_grad)]
for layer, layer_grad in zip(network, gradients)]
t.set_description(f"fizz buzz (loss: {epoch_loss})")
def argmax(xs: list) -> int:
"""Returns the index of the largest value"""
return max(range(len(xs)), key = lambda i: xs[i])
num_correct = 0
## Testing
for n in range(1,101):
x = binary_encode(n)
predicted = argmax(feed_forward(network, x)[-1])
actual = argmax(fizz_buzz_encode(n))
labels = [str(n), "fizz","buzz","fizzbuzz"]
print(n, labels[predicted], labels[actual])
if predicted == actual:
num_correct += 1
print(num_correct, "/", 100)
| [
"noreply@github.com"
] | arunadevikaruppasamy.noreply@github.com |
3c7e0440225397cc5fbb7fc3eff1269632386e67 | 8653b096dbf744d75d6e85c3a9318f6aad938a86 | /python_learn/day_9/4 面向对象-类关系.py | 150343ad1dd6e95df9ddf4b5f1d83798f3bdbf01 | [] | no_license | shuipingyang-14/python_programer | 08400b0bc22d1acd305209ad88c9e85b50519f86 | 6bff2897cfd91ddad3b1cfb6d32004c32ca5d0bf | refs/heads/master | 2022-11-27T19:07:50.251316 | 2020-07-20T11:27:45 | 2020-07-20T11:27:45 | 256,217,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,319 | py | # -*- coding:utf-8
"""
@ author: ysp
@ time: 2020/7/13 10:31
@ file: 4 面向对象-类关系.py
@ IDE: PyCharm
@ version: python 3.8.3
"""
# 类与类之间的关系:
# 1. 依赖关系
class Elphant:
def __init__(self, name):
self.name = name
def open(self, obj):
"""开门"""
print('大象要开门了,默念三声,开')
obj.open_door()
def close(self):
"""关门"""
print('大象要关门了,默念三声,关')
class Refrigerator:
def open_door(self):
print("冰箱被打开了")
def close_door(self):
print("冰箱被关上了")
elp = Elphant('大象')
haler = Refrigerator()
elp.open(haler)
# 2. 关联关系:两种事物必须是互相关联的,但是在某些特殊情况下是可以更改和更换的
class Boy:
def __init__(self, name, girlFriend = None):
self.name = name
self.girlFriend = girlFriend
def have_a_diner(self):
if self.girlFriend:
print('%s 和 %s 一起晚饭' % (self.name, self.girlFriend.name))
else:
print('单身狗,吃什么饭')
class Girl:
def __init__(self, name):
self.name = name
# 单身狗
b = Boy('alex')
b.have_a_diner()
# 交了女朋友
b.girlFriend = Girl('jefrry')
b.have_a_diner()
# wusir 生下来就有女朋友 服不服
gg = Girl('小花')
bb = Boy('wusir', gg)
bb.have_a_diner()
#分手了
bb.girlFriend = None
bb.have_a_diner()
# 老师属于学校,必须有学校才可以工作
class School:
def __init__(self,name,address):
self.name = name
self.address = address
self.teacher_list = []
def append_teacher(self, teacher):
self.teacher_list.append(teacher)
class Teacher:
def __init__(self,name,school):
self.name = name
self.school = school
s1 = School('北京校区','美丽的沙河')
s2 = School('上海校区','上海迪士尼旁边')
s3 = School('深圳校区','南山区')
t1 = Teacher('武大',s1)
t2 = Teacher('海峰',s2)
t3 = Teacher('日天',s3)
s1.append_teacher(t1)
s1.append_teacher(t2)
s1.append_teacher(t3)
print(s1.teacher_list)
for teacher in s1.teacher_list:
print(teacher.name)
# 3. 组合关系:属于关联关系中的⼀种特例,侧重点是xxx和xxx聚合成xxx. 各⾃有各⾃的声明周期
class Gamerole:
def __init__(self, name, ad, hp):
self.name = name
self.ad = ad
self.hp = hp
def attack(self, p1):
p1.hp -= self.ad
print('%s攻击%s,%s掉了%s血,还剩%s血' % (self.name, p1.name, p1.name, self.ad, p1.hp))
def equip_weapon(self, wea):
self.wea = wea # 组合:给一个对象封装一个属性改属性是另一个类的对象
class Weapon:
def __init__(self, name, ad):
self.name = name
self.ad = ad
def weapon_attack(self, p1, p2):
p2.hp = p2.hp - self.ad - p1.ad
print('%s 利用 %s 攻击了%s,%s还剩%s血'
% (p1.name, self.name, p2.name, p2.name, p2.hp))
# 实例化三个人物对象:
barry = Gamerole('太白', 10, 200)
panky = Gamerole('金莲', 20, 50)
pillow = Weapon('绣花枕头', 2)
# 给人物装备武器对象
barry.equip_weapon(pillow)
# 开始攻击
barry.wea.weapon_attack(barry, panky)
| [
"xy_1521675822@163.com"
] | xy_1521675822@163.com |
f7a39c745bda8601750dba690274a4eff2008467 | 09a45530dfa6744281bbace4960b47d5d9f98432 | /froghopgame.py | db76fb497a89d377e5bf4b21a0ac949b86b7906f | [] | no_license | Murtaza-Kazmi/frog-hop-game | cc69ea729116b1395467bf1ce1fb1edf8461746c | 9e288060d2ff0cad554925cd8f48c5c843927668 | refs/heads/master | 2023-01-12T14:20:57.072121 | 2020-11-22T17:37:32 | 2020-11-22T17:37:32 | 289,695,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,141 | py | # Recursive Solution for the Frog Hopping Puzzle/Game
#this function resturns a boolean, telling whether a frog at
# index = frogindex can move or is stuck
def frogcanmove(frogindex, lst):
if lst[frogindex] == "":
return False
if lst[frogindex] == "LtR":
if frogindex == len(lst)-1:
return False
if frogindex == len(lst)-2:
if lst[frogindex+1] != "":
return False
return True
if lst[frogindex+1] == "":
return True
if lst[frogindex +1] != "":
if lst[frogindex+2] == "":
return True
return False
else:
if frogindex == 0:
return False
if frogindex == 1:
if lst[0] != "":
return False
return True
if lst[frogindex-1] == "":
return True
if lst[frogindex-1] != "":
if lst[frogindex-2] == "":
return True
return False
#the func below prints all possible steps that you need to follow to win a game
#lst = initial state of game, path is what it prints if it was able to reach
# the goal, count is for printing steps for user's ease
#initial state can be kept arbitrary + you can also add as many frogs as you want
def tellpath(lst, path = "", count = 1):
originallst = lst + []
originalpath = path + ""
ocount = count + 0
if lst == ["RtL", "RtL", "RtL", "", "LtR", "LtR", "LtR"]:
print(path+"\n")
return
#move whichever frog can move and call recursively
for frogindex in range(0, len(lst)):
lst = originallst + []
path = originalpath + ""
count = ocount + 0
if frogcanmove(frogindex, lst):
if lst[frogindex] == "LtR":
if lst[frogindex+1] == "":
lst[frogindex+1] = lst[frogindex]
lst[frogindex] = ""
toindex = frogindex+1
else:
lst[frogindex+2] = lst[frogindex]
lst[frogindex] = ""
toindex = frogindex+2
else:
if lst[frogindex-1] == "":
lst[frogindex-1] = lst[frogindex]
lst[frogindex] = ""
toindex = frogindex-1
else:
lst[frogindex-2] = lst[frogindex]
lst[frogindex] = ""
toindex = frogindex-2
path += str(count) + ". Move frog at index " + str(frogindex) + " to the index " + str(toindex) + ".\n" + str(lst) + "\n"
tellpath(lst, path, count+1)
return
print("Note: 'LtR' = frog that's supposed to move Left to Right, 'RtL' = frog that is supposed to move Right to Left \n")
print("Initial state = ['LtR', 'LtR', 'LtR', '', 'RtL', 'RtL', 'RtL']" + "\n")
tellpath(["LtR", "LtR", "LtR", "", "RtL", "RtL", "RtL"])
#it can be inferred that solutions for this problem are symmetrical
#i.e. vice versa of each other
| [
"noreply@github.com"
] | Murtaza-Kazmi.noreply@github.com |
0230bff69699b5a0550cd2eb0957ea65a26a0e03 | c3f760355ccce2deee52b9a77ba07b7799e44134 | /icode/icode_logo.py | aaab203c961112d7de9a2a9782e7bd47b01454e3 | [] | no_license | lvandroid/Python | 8be142d197da5341cf304a77b7f85662a7fee213 | db3494eec21cc15080b6a3259f3deebafd47460e | refs/heads/master | 2020-04-07T08:09:24.566908 | 2018-11-22T08:54:29 | 2018-11-22T08:54:29 | 158,202,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 652 | py | import random
import turtle
import math
pen = turtle.Turtle()
def my_goto(x, y):
pen.penup()
pen.goto(x, y)
pen.pendown()
def head():
a = 1
pen.seth(90)
for i in range(120):
if 0 <= i < 30 or 60 <= i < 90:
a += 0.5
pen.rt(3)
pen.fd(a)
else:
a -= 0.5
pen.rt(3)
pen.fd(a)
def body():
my_goto()
if __name__ == '__main__':
turtle.screensize(800, 600)
pen.pensize(3)
pen.speed(3)
my_goto(-200, 200)
head()
# turtle.exitonclick()
# china()
# hero()
# walk()
# flower()
turtle.exitonclick()
| [
"lvandroid@outlook.com"
] | lvandroid@outlook.com |
07c91c10ae908df3e84a285449ef439ed298eb2d | e1a5c0f95bdcb4e3b89b8b2b0c061ad17ad8becc | /policy/lab/create_projects.py | 73311d367cb578b761411f10a1cb2537327f8c9d | [] | no_license | kamidzi/openstack-policy | 7a50fc0a202dddfec88903aebac7a138d1aa8e69 | dc47a35014f76982346337c23f128dac6696ed62 | refs/heads/master | 2022-12-13T21:48:48.212025 | 2018-05-15T21:04:54 | 2018-05-15T21:05:12 | 94,116,605 | 0 | 1 | null | 2022-12-08T00:00:43 | 2017-06-12T16:18:24 | Python | UTF-8 | Python | false | false | 878 | py | #!/usr/bin/env python
from ks_auth import ks
from pprint import pprint
import sys
import keystoneclient
try:
import config
except ImportError:
sys.exit('Place a config.py in project directory.')
domain = 'Default'
if __name__ == '__main__':
# add projects
projects = []
for name in config.project_names:
try:
p = ks.projects.create(name=name, domain=domain)
projects.append(p)
except keystoneclient.exceptions.Conflict, e:
sys.stderr.write('WARNING - "{}": {}\n'.format(name, e.message))
try:
plist = ks.projects.list(name=name)
projects.extend(plist)
# already there
except:
sys.stderr.write('ERROR - {}:{}\n'.format(e, e.message))
# some error?
sys.exit()
map(pprint, projects)
| [
"kmidzi@bloomberg.net"
] | kmidzi@bloomberg.net |
b7f8be099faecc0c85b9d955313d797037d8ca2e | f232ee143db5e2d35a71299e373459c207ff0084 | /python_module/setup.py | 8276a94334b624d1a5721727a231b82f876c587c | [
"Apache-2.0",
"MIT"
] | permissive | mohdsanadzakirizvi/SuperGLU | 70a1efa8577b75f1964fbcfdde0923093d2aebc1 | 3150dbb33cd46e4a18cefd04a4130c37e5d412bc | refs/heads/master | 2020-03-28T02:36:16.657937 | 2018-09-05T19:39:04 | 2018-09-05T19:39:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,116 | py | from setuptools import setup, find_packages
# Note: Manifest.in not really used for anything (possibly being deprecated)
setup(
name = 'SuperGLU',
packages = find_packages(),
version = '0.1.7',
description = 'Base Generalized Learning Utilities (GLU) library for communicating data between different learning technologies and intelligent systems.',
author = 'Benjamin D. Nye',
author_email = 'benjamin.nye@gmail.com',
url = 'https://github.com/GeneralizedLearningUtilities/SuperGLU',
download_url = 'https://github.com/GeneralizedLearningUtilities/SuperGLU/archive/0.1.tar.gz',
include_package_data = True,
exclude_package_data = {'': ['.gitignore', '.travis.yml', 'requirements.txt']},
keywords = ['ITS', "Adaptive Learning", 'Messaging', 'HTML5', "Websockets", "Service"],
classifiers = [
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: JavaScript",
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research"],
setup_requires = [ "setuptools_git >= 0.3", ]
) | [
"benjamin.nye@gmail.com"
] | benjamin.nye@gmail.com |
11e285264dc4e9dfa59d3104016bb4dac7ca203d | 8a462163df6ab7655c69840900d59723c0b50e58 | /ocs_ci/ocs/couchbase.py | 1d1c4830510d532264e8b20feccadb23576efde3 | [
"MIT"
] | permissive | gobindadas/ocs-ci | 5adcd08433524d36f7b92378527f30c827cc770f | 68aa8370dde2bc630fa4bc4f8ef40bd8f47edb59 | refs/heads/master | 2022-11-22T06:02:04.984119 | 2020-07-07T18:47:58 | 2020-07-07T18:47:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,341 | py | """
Couchbase workload class
"""
import logging
from ocs_ci.ocs.resources.ocs import OCS
from ocs_ci.ocs.ocp import OCP, switch_to_project
from ocs_ci.utility import templating
from ocs_ci.utility.utils import TimeoutSampler
from ocs_ci.ocs.utils import get_pod_name_by_pattern
from ocs_ci.ocs import constants
from ocs_ci.ocs.pillowfight import PillowFight
from ocs_ci.ocs.ocp import switch_to_default_rook_cluster_project
log = logging.getLogger(__name__)
class CouchBase(PillowFight):
"""
CouchBase workload operation
"""
WAIT_FOR_TIME = 600
admission_parts = [
constants.COUCHBASE_ADMISSION_SERVICE_ACCOUNT_YAML,
constants.COUCHBASE_ADMISSION_CLUSTER_ROLE_YAML,
constants.COUCHBASE_ADMISSION_CLUSTER_ROLE_BINDING_YAML,
constants.COUCHBASE_ADMISSION_SECRET_YAML,
constants.COUCHBASE_ADMISSION_DEPLOYMENT_YAML,
constants.COUCHBASE_ADMISSION_SERVICE_YAML,
constants.COUCHBASE_MUTATING_WEBHOOK_YAML,
constants.COUCHBASE_VALIDATING_WEBHOOK_YAML
]
pod_obj = OCP(kind='pod')
couchbase_pod = OCP(kind='pod')
secretsadder = OCP(kind='pod')
admission_pod = []
cb_worker = OCS()
cb_examples = OCS()
def __init__(self, **kwargs):
"""
Initializer function
"""
super().__init__(**kwargs)
def is_up_and_running(self, pod_name, ocp_value):
"""
Test if the pod specified is up and running.
Args:
pod_name (str): Name of pod being checked.
ocp_value (object): object used for running oc commands
Returns:
bool; True if pod is running, False otherwise
"""
if not pod_name:
return False
pod_info = ocp_value.exec_oc_cmd(f"get pods {pod_name} -o json")
if pod_info['status']['containerStatuses'][0]['ready']:
if 'running' in pod_info['status']['containerStatuses'][0]['state']:
return True
return False
def setup_cb(self):
"""
Creating admission parts,couchbase operator pod, couchbase worker secret
"""
# Create admission controller
log.info("Create admission controller process for Couchbase")
switch_to_project('default')
self.up_adm_chk = OCP(namespace="default")
self.up_check = OCP(namespace=constants.COUCHBASE_OPERATOR)
for adm_yaml in self.admission_parts:
adm_data = templating.load_yaml(adm_yaml)
adm_obj = OCS(**adm_data)
adm_obj.create()
# Wait for admission pod to be created
for adm_pod in TimeoutSampler(
self.WAIT_FOR_TIME,
3,
get_pod_name_by_pattern,
'couchbase-operator-admission',
'default'
):
try:
if self.is_up_and_running(adm_pod[0], self.up_adm_chk):
self.admission_pod = adm_pod[0]
break
except IndexError:
log.info("Admission pod is not ready yet")
# Wait for admission pod to be running
log.info("Waiting for admission pod to be running")
self.pod_obj.wait_for_resource(
condition='Running',
resource_name=self.admission_pod,
timeout=self.WAIT_FOR_TIME,
sleep=10,
)
self.pod_obj.new_project(constants.COUCHBASE_OPERATOR)
couchbase_data = templating.load_yaml(
constants.COUCHBASE_CRD_YAML
)
self.couchbase_obj = OCS(**couchbase_data)
self.couchbase_obj.create()
op_data = templating.load_yaml(constants.COUCHBASE_OPERATOR_ROLE)
self.operator_role = OCS(**op_data)
self.operator_role.create()
self.serviceaccount = OCP(namespace=constants.COUCHBASE_OPERATOR)
self.serviceaccount.exec_oc_cmd(
"create serviceaccount couchbase-operator"
)
dockercfgs = self.serviceaccount.exec_oc_cmd("get secrets")
startloc = dockercfgs.find('couchbase-operator-dockercfg')
newdockerstr = dockercfgs[startloc:]
endloc = newdockerstr.find(' ')
dockerstr = newdockerstr[:endloc]
self.secretsadder.exec_oc_cmd(
f"secrets link serviceaccount/couchbase-operator secrets/{dockerstr}"
)
self.rolebinding = OCP(namespace=constants.COUCHBASE_OPERATOR)
rolebind_cmd = "".join([
"create rolebinding couchbase-operator-rolebinding ",
"--role couchbase-operator ",
"--serviceaccount couchbase-operator-namespace:couchbase-operator"
])
self.rolebinding.exec_oc_cmd(rolebind_cmd)
dep_data = templating.load_yaml(constants.COUCHBASE_OPERATOR_DEPLOY)
self.cb_deploy = OCS(**dep_data)
self.cb_deploy.create()
# Wait for couchbase operator pod to be running
for couchbase_pod in TimeoutSampler(
self.WAIT_FOR_TIME,
3,
get_pod_name_by_pattern,
'couchbase-operator',
constants.COUCHBASE_OPERATOR
):
try:
if self.is_up_and_running(couchbase_pod[0], self.up_check):
break
except IndexError:
log.info("Couchbase operator is not up")
cb_work = templating.load_yaml(constants.COUCHBASE_WORKER_SECRET)
self.cb_worker = OCS(**cb_work)
self.cb_worker.create()
def create_couchbase_worker(self, replicas=1):
"""
Deploy a Couchbase server and pillowfight workload using operator
The couchbase workers do not come up unless there is an admission controller
running. The admission controller is started from the default project prior
to bringing up the operator. Secrets, rolebindings and serviceaccounts
need to also be generated.
Once the couchbase operator is running, we need to wait for the three
worker pods to also be up. Then a pillowfight task is started.
After the pillowfight task has finished, the log is collected and
analyzed.
Raises:
Exception: If pillowfight results indicate that a minimum performance
level is not reached (1 second response time, less than 1000 ops
per second)
"""
logging.info('Creating pods..')
cb_example = templating.load_yaml(constants.COUCHBASE_WORKER_EXAMPLE)
cb_example['spec']['servers'][0]['size'] = replicas
self.cb_examples = OCS(**cb_example)
self.cb_examples.create()
# Wait for last of three workers to be running.
logging.info('Waiting for the pods to Running')
for cb_wrk_pods in TimeoutSampler(
self.WAIT_FOR_TIME,
3,
get_pod_name_by_pattern,
'cb-example',
constants.COUCHBASE_OPERATOR
):
try:
if len(cb_wrk_pods) == replicas:
counter = 0
for cb_pod in cb_wrk_pods:
if self.is_up_and_running(cb_pod, self.up_check):
counter += 1
logging.info(f'Couchbase worker {cb_pod} is up')
if counter == replicas:
break
except IndexError:
logging.info(
f'Expected number of couchbase pods are {replicas} '
f'but only found {len(cb_wrk_pods)}'
)
def run_workload(self, replicas):
"""
Running workload with pillow fight operator
Args:
replicas (int): Number of pods
"""
logging.info('Running IOs...')
PillowFight.run_pillowfights(self, replicas=replicas)
def analyze_run(self):
"""
Analyzing the workload run logs
"""
logging.info('Analyzing workload run logs..')
PillowFight.analyze_all(self)
def teardown(self):
"""
Delete objects created in roughly reverse order of how they were created.
"""
self.cb_examples.delete()
self.cb_worker.delete()
self.cb_deploy.delete()
self.pod_obj.exec_oc_cmd(
command="delete rolebinding couchbase-operator-rolebinding"
)
self.pod_obj.exec_oc_cmd(
command="delete serviceaccount couchbase-operator"
)
self.operator_role.delete()
self.couchbase_obj.delete()
switch_to_project('default')
self.pod_obj.delete_project(constants.COUCHBASE_OPERATOR)
for adm_yaml in self.admission_parts:
adm_data = templating.load_yaml(adm_yaml)
adm_obj = OCS(**adm_data)
adm_obj.delete()
# Before the code below was added, the teardown task would sometimes
# fail with the leftover objects because it would still see one of the
# couchbase pods.
for admin_pod in TimeoutSampler(
self.WAIT_FOR_TIME,
3,
get_pod_name_by_pattern,
'couchbase',
'default'
):
if admin_pod:
continue
else:
break
PillowFight.cleanup(self)
switch_to_default_rook_cluster_project()
| [
"sshreeka@redhat.com"
] | sshreeka@redhat.com |
90fc9a11b36c7ec3937a286038d3b1c0a4812f9d | 3529ecaa44a53172094ba13498097057c8972723 | /Questiondir/520.detect-capital/520.detect-capital_93512141.py | 4b4f3fe2c57d6a464255034820308ab06b71b8df | [] | no_license | cczhong11/Leetcode-contest-code-downloader | 0681f0f8c9e8edd5371fd8d0a1d37dcc368566b6 | db64a67869aae4f0e55e78b65a7e04f5bc2e671c | refs/heads/master | 2021-09-07T15:36:38.892742 | 2018-02-25T04:15:17 | 2018-02-25T04:15:17 | 118,612,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | class Solution(object):
def detectCapitalUse(self, word):
"""
:type word: str
:rtype: bool
"""
if word == word.upper():
return True
if word == word.lower():
return True
if (word[:1] == word[:1].upper()) and (word[1:] == word[1:].lower()):
return True
return False
| [
"tczhong24@gmail.com"
] | tczhong24@gmail.com |
8fe3e72a4fe1168fd5eb38c66f4f4aa526bd5ad0 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/templates/100_Python_Exercises_Evaluate_and_Improve_Your_Skills/Exercise 12 - More Ranges NUKE.py | 7bb9127b558afdf2d3b6aa97628abdcdb2897719 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 163 | py | #Create a script that generates a list whose items are products of the original list items multiplied by 10
my_range r..(1, 21)
print([10 * x ___ x __ my_range])
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
0b27098a205541a2d26cb36a3e45250255628c2a | 6a0e979c5c644ef70b3fbda9692c9726aa53e2aa | /shapenet_utils/shapenet_synset_dict.py | f9eef7bfec68cce9751e56d024b9e2a9a1f61ec3 | [] | no_license | kosuke55/shapenet_utils | 1c7be42da0826b2016859b7e4388062c2d3ab020 | 92656a9a932fabf7cd8717c810157e352ea57238 | refs/heads/master | 2023-02-20T15:42:33.688884 | 2021-01-22T12:17:42 | 2021-01-22T12:17:42 | 302,083,235 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,572 | py | synset_to_label = {
'02691156': 'airplane',
'02747177': 'trash_bin',
'02773838': 'bag',
'02801938': 'basket',
'02808440': 'bathtub',
'02818832': 'bed',
'02828884': 'bench',
'02834778': 'bicycle',
'02843684': 'birdhouse',
'02871439': 'bookshelf',
'02876657': 'bottle',
'02880940': 'bowl',
'02924116': 'bus',
'02933112': 'cabinet',
'02942699': 'camera',
'02946921': 'can',
'02954340': 'cap',
'02992529': 'cellular_telephone',
'02958343': 'car',
'03001627': 'chair',
'03046257': 'clock',
'03085013': 'keyboard',
'03207941': 'dishwasher',
'03211117': 'display',
'03261776': 'earphone',
'03325088': 'faucet',
'03337140': 'file_cabinet',
'03467517': 'guitar',
'03513137': 'helmet',
'03593526': 'jar',
'03624134': 'knife',
'03636649': 'lamp',
'03642806': 'laptop',
'03691459': 'loudspeaker',
'03710193': 'mailbox',
'03761084': 'microwave',
'03759954': 'microphone',
'03790512': 'motorbike',
'03797390': 'mug',
'03928116': 'piano',
'03938244': 'pillow',
'03991062': 'pot',
'03948459': 'pistol',
'04004475': 'printer',
'04074963': 'remote_control',
'04090263': 'rifle',
'04099429': 'rocket',
'04225987': 'skateboard',
'04256520': 'sofa',
'04330267': 'stove',
'04379243': 'table',
'04401088': 'telephone',
'04460130': 'tower',
'04468005': 'train',
'04530566': 'watercraft',
'04554684': 'washer'
}
label_to_synset = {v: k for k, v in synset_to_label.items()} | [
"kosuke.tnp@gmail.com"
] | kosuke.tnp@gmail.com |
5ce16c87fcae922693b17176739c6d94acb13cce | 4a7ddf37f09fc9defc5b0de24218e6d412b3ffc9 | /data/RegisterForm.py | 61e36b033d16d0ea2d88ba2eb8302290082c9ca5 | [] | no_license | YLNN2/API2 | f1d27f6b079c4a627bc497ed20868a3497e8b002 | 6d745de973475f920f6c9fc59b8d2762a82c3df4 | refs/heads/master | 2021-05-17T15:06:00.744769 | 2020-03-28T15:58:03 | 2020-03-28T15:58:03 | 250,835,024 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 601 | py | from flask_wtf import FlaskForm
from wtforms import PasswordField, StringField, TextAreaField, SubmitField
from wtforms.validators import DataRequired, Email
class RegisterForm(FlaskForm):
email = StringField("Email: ", validators=[Email()])
password = PasswordField('Пароль', validators=[DataRequired()])
password_again = PasswordField('Повторите пароль', validators=[DataRequired()])
name = StringField('Имя пользователя', validators=[DataRequired()])
about = TextAreaField("Немного о себе")
submit = SubmitField('Войти') | [
"lguseva87@gmail.com"
] | lguseva87@gmail.com |
788ed2e5916d24970d79524c60e182a03ad4ecfb | a884039e1a8b0ab516b80c2186e0e3bad28d5147 | /Livros/Introdução à Programação - 500 Algoritmos resolvidos/Capitulo 2/Exercicios 2a/Algoritmo36_lea9.py | a368507b3389f066630181aa6ff943bc3796ea6c | [
"MIT"
] | permissive | ramonvaleriano/python- | 6e744e8bcd58d07f05cd31d42a5092e58091e9f0 | ada70918e945e8f2d3b59555e9ccc35cf0178dbd | refs/heads/main | 2023-04-10T14:04:24.497256 | 2021-04-22T18:49:11 | 2021-04-22T18:49:11 | 340,360,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | # Program: Algoritmo36_lea9.py
# Author: Ramon R. Valeriano
# Description:
# Developed: 14/03/2020 - 19:55
# Updated:
number1 = int(input("Enter with firs number: "))
number2 = int(input("Enter with second number: "))
sum_ = number1 + number2
print("The sum: %d" %sum_)
| [
"rrvaleriano@gmail.com"
] | rrvaleriano@gmail.com |
fe7851112c97a05e45bf29f35a97bfe85e188722 | aa35e638ed7b73ab64681bb28011951c4ea1da4e | /request-3d-skeletons.py | 38970014865c6fe0b9f83cffaf785b8ec7b23515 | [] | no_license | felippe-mendonca/dataset-creator | be3fd6b3b6955e6ff32488af38be0c590b17789e | 2a57d200b550806a35f6f8abef83d74f856748c1 | refs/heads/master | 2023-06-23T11:46:21.790864 | 2019-06-18T18:16:33 | 2019-06-18T18:16:33 | 150,271,810 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 8,152 | py | import os
import re
import sys
import cv2
import json
import time
import socket
import datetime
import numpy as np
from collections import defaultdict
from enum import Enum
from is_wire.core import Channel, Subscription, Message, Logger, ContentType
from is_msgs.image_pb2 import ObjectAnnotations
from utils import load_options, AnnotationsFetcher
from google.protobuf.json_format import MessageToDict
from pprint import pprint
MIN_REQUESTS = 50
MAX_REQUESTS = 300
DEADLINE_SEC = 5.0
class State(Enum):
MAKE_REQUESTS = 1
RECV_REPLIES = 2
CHECK_END_OF_SEQUENCE_AND_SAVE = 3
CHECK_FOR_TIMEOUTED_REQUESTS = 4
EXIT = 5
LOCALIZATION_FILE = 'p{:03d}g{:02d}_3d.json'
log = Logger(name='Request3dSkeletons')
options = load_options(print_options=False)
if not os.path.exists(options.folder):
log.critical("Folder '{}' doesn't exist", options.folder)
files = next(os.walk(options.folder))[2] # only files from first folder level
annotation_files = list(filter(lambda x: x.endswith('_2d.json'), files))
log.debug('Parsing Annotation Files')
entries = defaultdict(lambda: defaultdict(list))
n_annotations = defaultdict(lambda: defaultdict(dict))
for annotation_file, n in zip(annotation_files, range(len(annotation_files))):
matches = re.search("p([0-9]{3})g([0-9]{2})c([0-9]{2})_2d.json", annotation_file)
if matches is None:
continue
person_id = int(matches.group(1))
gesture_id = int(matches.group(2))
camera_id = int(matches.group(3))
entries[person_id][gesture_id].append(camera_id)
annotation_path = os.path.join(options.folder, annotation_file)
with open(annotation_path, 'r') as f:
n = len(json.load(f)['annotations'])
n_annotations[person_id][gesture_id][camera_id] = n
log.debug('Checking if detections files already exists')
cameras = [int(camera_cfg.id) for camera_cfg in options.cameras]
pending_localizations = []
n_localizations = defaultdict(dict)
for person_id, gestures in entries.items():
for gesture_id, camera_ids in gestures.items():
if set(camera_ids) != set(cameras):
log.warn("PERSON_ID: {:03d} GESTURE_ID: {:02d} | Can't find all detections file.",
person_id, gesture_id)
continue
n_an = list(n_annotations[person_id][gesture_id].values())
if not all(map(lambda x: x == n_an[0], n_an)):
log.warn("PERSON_ID: {:03d} GESTURE_ID: {:02d} | Annotations size inconsistent.",
person_id, gesture_id)
continue
file = os.path.join(options.folder, LOCALIZATION_FILE.format(person_id, gesture_id))
if os.path.exists(file):
with open(file, 'r') as f:
n_loc = len(json.load(f)['localizations'])
if n_loc == n_an[0]:
log.info('PERSON_ID: {:03d} GESTURE_ID: {:02d} | Already have localization file.',
person_id, gesture_id)
continue
n_localizations[person_id][gesture_id] = n_an[0]
pending_localizations.append({
'person_id': person_id,
'gesture_id': gesture_id,
'n_localizations': n_an[0]
})
if len(pending_localizations) == 0:
log.info("Exiting...")
sys.exit(0)
channel = Channel(options.broker_uri)
subscription = Subscription(channel)
requests = {}
localizations_received = defaultdict(lambda: defaultdict(dict))
state = State.MAKE_REQUESTS
annotations_fetcher = AnnotationsFetcher(
pending_localizations=pending_localizations, cameras=cameras, base_folder=options.folder)
while True:
if state == State.MAKE_REQUESTS:
state = State.RECV_REPLIES
if len(requests) < MIN_REQUESTS:
while len(requests) <= MAX_REQUESTS:
person_id, gesture_id, pos, annotations = annotations_fetcher.next()
if pos is None:
if len(requests) == 0:
state = State.EXIT
break
msg = Message(reply_to=subscription, content_type=ContentType.JSON)
body = json.dumps({'list': annotations}).encode('utf-8')
msg.body = body
msg.timeout = DEADLINE_SEC
channel.publish(msg, topic='SkeletonsGrouper.Localize')
requests[msg.correlation_id] = {
'body': body,
'person_id': person_id,
'gesture_id': gesture_id,
'pos': pos,
'requested_at': time.time()
}
continue
elif state == State.RECV_REPLIES:
try:
msg = channel.consume(timeout=1.0)
if msg.status.ok():
localizations = msg.unpack(ObjectAnnotations)
cid = msg.correlation_id
if cid in requests:
person_id = requests[cid]['person_id']
gesture_id = requests[cid]['gesture_id']
pos = requests[cid]['pos']
localizations_received[person_id][gesture_id][pos] = MessageToDict(
localizations,
preserving_proto_field_name=True,
including_default_value_fields=True)
del requests[cid]
state = State.CHECK_END_OF_SEQUENCE_AND_SAVE
except socket.timeout:
state = State.CHECK_FOR_TIMEOUTED_REQUESTS
continue
elif state == State.CHECK_END_OF_SEQUENCE_AND_SAVE:
done_sequences = []
for person_id, gestures in localizations_received.items():
for gesture_id, localizations_dict in gestures.items():
if len(localizations_dict) < n_localizations[person_id][gesture_id]:
continue
output_localizations = {
'localizations': [x[1] for x in sorted(localizations_dict.items())],
'created_at': datetime.datetime.now().isoformat()
}
filename = 'p{:03d}g{:02d}_3d.json'.format(person_id, gesture_id)
filepath = os.path.join(options.folder, filename)
with open(filepath, 'w') as f:
json.dump(output_localizations, f, indent=2)
done_sequences.append((person_id, gesture_id))
localizations_count = [
len(l['objects']) for l in output_localizations['localizations']
]
count_dict = map(lambda x: list(map(str, x)),
np.unique(localizations_count, return_counts=True))
count_info = json.dumps(dict(zip(*count_dict))).replace('"', '')
log.info('PERSON_ID: {:03d} GESTURE_ID: {:02d} Done! {}', person_id, gesture_id,
count_info)
for person_id, gesture_id in done_sequences:
del localizations_received[person_id][gesture_id]
state = State.CHECK_FOR_TIMEOUTED_REQUESTS
continue
elif state == State.CHECK_FOR_TIMEOUTED_REQUESTS:
new_requests = {}
for cid in list(requests.keys()):
request = requests[cid]
if (request['requested_at'] + DEADLINE_SEC) > time.time():
continue
msg = Message(reply_to=subscription, content_type=ContentType.JSON)
msg.body = request['body']
msg.timeout = DEADLINE_SEC
channel.publish(msg, topic='SkeletonsGrouper.Localize')
new_requests[msg.correlation_id] = {
'body': request['body'],
'person_id': request['gesture_id'],
'gesture_id': request['gesture_id'],
'pos': request['pos'],
'requested_at': time.time()
}
del requests[cid]
log.warn("Message '{}' timeouted. Sending another request.", cid)
requests.update(new_requests)
state = State.MAKE_REQUESTS
continue
elif state == State.EXIT:
log.info("Exiting...")
sys.exit(-1)
else:
state = State.MAKE_REQUESTS
continue
| [
"mendonca.felippe@gmail.com"
] | mendonca.felippe@gmail.com |
421a0b4b3adb4a231c693a21ffb4d19b7fd44a5c | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp/CTRON-AppleTalk-ROUTER-MIB.py | cd4f746fc5022649fc87c04124d71cea11af6658 | [
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 76,271 | py | #
# PySNMP MIB module CTRON-AppleTalk-ROUTER-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CTRON-AppleTalk-ROUTER-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:26:48 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "ConstraintsUnion")
nwRtrProtoSuites, nwRouter = mibBuilder.importSymbols("ROUTER-OIDS", "nwRtrProtoSuites", "nwRouter")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Unsigned32, iso, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, Bits, TimeTicks, MibIdentifier, ObjectIdentity, ModuleIdentity, Gauge32, Integer32, Counter32, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "iso", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "Bits", "TimeTicks", "MibIdentifier", "ObjectIdentity", "ModuleIdentity", "Gauge32", "Integer32", "Counter32", "Counter64")
DisplayString, PhysAddress, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "PhysAddress", "TextualConvention")
nwRtrExperimental = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 4))
nwAtRouter = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4))
nwAtMibs = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 1))
nwAtComponents = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2))
nwAtSystem = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 1))
nwAtForwarding = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2))
nwAtTopology = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4))
nwAtFib = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 5))
nwAtEndSystems = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 6))
nwAtAccessControl = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 7))
nwAtFilters = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 8))
nwAtRedirector = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 9))
nwAtEvent = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 10))
nwAtWorkGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 11))
nwAtNetDiag = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 12))
nwAtSysConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 1, 1))
nwAtSysAdministration = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 1, 2))
nwAtFwdSystem = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 1))
nwAtFwdInterfaces = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2))
nwAtFwdCounters = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 1, 1))
nwAtFwdIfConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 1))
nwAtFwdIfCounters = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 2))
nwAtDistanceVector = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1))
nwAtLinkState = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 2))
nwAtProto = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1))
nwAtProtoSystem = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 1))
nwAtProtoInterface = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 2))
nwAtProtoConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 1, 1))
nwAtProtoCounters = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 1, 2))
nwAtProtoIfConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 2, 1))
nwAtProtoIfCounters = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 2, 2))
nwAtHostsSystem = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 6, 1))
nwAtHostsInterfaces = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 6, 2))
nwAtHostsToMedia = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 6, 3))
nwAtEventLogConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 10, 1))
nwAtEventLogFilterTable = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 10, 2))
nwAtEventLogTable = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 10, 3))
nwAtNetDiagPing = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 12, 1))
nwAtNetDiagTelnet = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 12, 2))
nwAtNetDiagOutbound = MibIdentifier((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 12, 3))
class AtNetworkNumber(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(2, 2)
fixedLength = 2
class AtDdpNodeAddress(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(3, 3)
fixedLength = 3
class AtName(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(0, 32)
nwAtMibRevText = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 1, 1), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtMibRevText.setStatus('mandatory')
nwAtSysRouterId = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 1, 1, 1), AtNetworkNumber()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtSysRouterId.setStatus('mandatory')
nwAtSysAdminSTATUS = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 1, 2, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtSysAdminSTATUS.setStatus('mandatory')
nwAtSysOperSTATUS = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 1, 2, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3), ("pending-disable", 4), ("pending-enable", 5), ("invalid-config", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtSysOperSTATUS.setStatus('mandatory')
nwAtSysAdminReset = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 1, 2, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("other", 1), ("reset", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtSysAdminReset.setStatus('mandatory')
nwAtSysOperationalTime = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 1, 2, 4), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtSysOperationalTime.setStatus('mandatory')
nwAtSysVersion = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 1, 2, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtSysVersion.setStatus('mandatory')
nwAtFwdCtrAdminSTATUS = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtFwdCtrAdminSTATUS.setStatus('mandatory')
nwAtFwdCtrReset = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("other", 1), ("reset", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtFwdCtrReset.setStatus('mandatory')
nwAtFwdCtrOperationalTime = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 1, 1, 3), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdCtrOperationalTime.setStatus('mandatory')
nwAtFwdCtrInPkts = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdCtrInPkts.setStatus('mandatory')
nwAtFwdCtrOutPkts = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdCtrOutPkts.setStatus('mandatory')
nwAtFwdCtrFwdPkts = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdCtrFwdPkts.setStatus('mandatory')
nwAtFwdCtrFilteredPkts = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdCtrFilteredPkts.setStatus('mandatory')
nwAtFwdCtrDiscardPkts = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdCtrDiscardPkts.setStatus('mandatory')
nwAtFwdCtrAddrErrPkts = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdCtrAddrErrPkts.setStatus('mandatory')
nwAtFwdCtrLenErrPkts = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdCtrLenErrPkts.setStatus('mandatory')
nwAtFwdCtrHdrErrPkts = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 1, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdCtrHdrErrPkts.setStatus('mandatory')
nwAtFwdCtrInBytes = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 1, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdCtrInBytes.setStatus('mandatory')
nwAtFwdCtrOutBytes = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 1, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdCtrOutBytes.setStatus('mandatory')
nwAtFwdCtrFwdBytes = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 1, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdCtrFwdBytes.setStatus('mandatory')
nwAtFwdCtrFilteredBytes = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 1, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdCtrFilteredBytes.setStatus('mandatory')
nwAtFwdCtrDiscardBytes = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 1, 1, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdCtrDiscardBytes.setStatus('mandatory')
nwAtFwdCtrHostInPkts = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 1, 1, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdCtrHostInPkts.setStatus('mandatory')
nwAtFwdCtrHostOutPkts = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 1, 1, 18), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdCtrHostOutPkts.setStatus('mandatory')
nwAtFwdCtrHostDiscardPkts = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 1, 1, 19), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdCtrHostDiscardPkts.setStatus('mandatory')
nwAtFwdCtrHostInBytes = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 1, 1, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdCtrHostInBytes.setStatus('mandatory')
nwAtFwdCtrHostOutBytes = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 1, 1, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdCtrHostOutBytes.setStatus('mandatory')
nwAtFwdCtrHostDiscardBytes = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 1, 1, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdCtrHostDiscardBytes.setStatus('mandatory')
nwAtFwdIfTable = MibTable((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 1, 1), )
if mibBuilder.loadTexts: nwAtFwdIfTable.setStatus('mandatory')
nwAtFwdIfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 1, 1, 1), ).setIndexNames((0, "CTRON-AppleTalk-ROUTER-MIB", "nwAtFwdIfIndex"))
if mibBuilder.loadTexts: nwAtFwdIfEntry.setStatus('mandatory')
nwAtFwdIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 1, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdIfIndex.setStatus('mandatory')
nwAtFwdIfAdminSTATUS = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtFwdIfAdminSTATUS.setStatus('mandatory')
nwAtFwdIfOperSTATUS = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3), ("pending-disable", 4), ("pending-enable", 5), ("invalid-config", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdIfOperSTATUS.setStatus('mandatory')
nwAtFwdIfOperationalTime = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 1, 1, 1, 4), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdIfOperationalTime.setStatus('mandatory')
nwAtFwdIfControl = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("add", 2), ("delete", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtFwdIfControl.setStatus('mandatory')
nwAtFwdIfMtu = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 1, 1, 1, 6), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtFwdIfMtu.setStatus('mandatory')
nwAtFwdIfForwarding = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 1, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtFwdIfForwarding.setStatus('mandatory')
nwAtFwdIfFrameType = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 1, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 8, 9, 11, 16, 17))).clone(namedValues=NamedValues(("other", 1), ("ethernet", 2), ("snap", 3), ("nativewan", 8), ("encapenet", 9), ("encapenetsnap", 11), ("encapfddisnap", 16), ("canonical", 17))).clone('snap')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtFwdIfFrameType.setStatus('mandatory')
nwAtFwdIfAclIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 1, 1, 1, 9), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtFwdIfAclIdentifier.setStatus('mandatory')
nwAtFwdIfAclSTATUS = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 1, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtFwdIfAclSTATUS.setStatus('mandatory')
nwAtFwdIfCacheControl = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 1, 1, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disable", 2), ("enable", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtFwdIfCacheControl.setStatus('mandatory')
nwAtFwdIfCacheEntries = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 1, 1, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdIfCacheEntries.setStatus('mandatory')
nwAtFwdIfCacheHits = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 1, 1, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdIfCacheHits.setStatus('mandatory')
nwAtFwdIfCacheMisses = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 1, 1, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdIfCacheMisses.setStatus('mandatory')
nwAtportTable = MibTable((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 1, 2), )
if mibBuilder.loadTexts: nwAtportTable.setStatus('mandatory')
nwAtportEntry = MibTableRow((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 1, 2, 1), ).setIndexNames((0, "CTRON-AppleTalk-ROUTER-MIB", "nwAtportIndex"))
if mibBuilder.loadTexts: nwAtportEntry.setStatus('mandatory')
nwAtportIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 1, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtportIndex.setStatus('mandatory')
nwAtportDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 1, 2, 1, 2), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtportDescr.setStatus('mandatory')
nwAtportType = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 1, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24))).clone(namedValues=NamedValues(("other", 1), ("localtalk", 2), ("ethertalk1", 3), ("ethertalk2", 4), ("tokentalk", 5), ("iptalk", 6), ("serialPPP", 7), ("serialNonstandard", 8), ("virtual", 9), ("fdditalk", 10), ("arctalk", 11), ("smdstalk", 12), ("aurp", 13), ("frameRelay", 14), ("x25", 15), ("ip", 16), ("osi", 17), ("decnetIV", 18), ("arap", 19), ("isdnInThePacketMode", 20), ("nonAppleTalk3Com", 21), ("ipx", 22), ("arns", 23), ("hdlc", 24)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtportType.setStatus('mandatory')
nwAtportNetStart = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 1, 2, 1, 4), AtNetworkNumber()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtportNetStart.setStatus('mandatory')
nwAtportNetEnd = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 1, 2, 1, 5), AtNetworkNumber()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtportNetEnd.setStatus('mandatory')
nwAtportNetAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 1, 2, 1, 6), AtDdpNodeAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtportNetAddress.setStatus('mandatory')
nwAtportSTATUS = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 1, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("routing", 1), ("unconfigured", 2), ("off", 3), ("invalid", 4), ("endNode", 5), ("offDueToConflict", 6), ("other", 7)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtportSTATUS.setStatus('mandatory')
nwAtportNetConfig = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 1, 2, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("conflictOrientedSeed", 1), ("garnered", 2), ("guessed", 3), ("unconfigured", 4), ("conflictAverseSeed", 5), ("softSeed", 6)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtportNetConfig.setStatus('mandatory')
nwAtportZoneConfig = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 1, 2, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("conflictOrientedSeed", 1), ("garnered", 2), ("guessed", 3), ("unconfigured", 4), ("conflictAverseSeed", 5), ("softSeed", 6)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtportZoneConfig.setStatus('mandatory')
nwAtportZoneDefault = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 1, 2, 1, 10), AtName()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtportZoneDefault.setStatus('mandatory')
nwAtportIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 1, 2, 1, 11), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtportIfIndex.setStatus('mandatory')
nwAtportNetFrom = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 1, 2, 1, 12), AtDdpNodeAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtportNetFrom.setStatus('mandatory')
nwAtportZoneFrom = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 1, 2, 1, 13), AtDdpNodeAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtportZoneFrom.setStatus('mandatory')
nwAtportInPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 1, 2, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtportInPkts.setStatus('mandatory')
nwAtportOutPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 1, 2, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtportOutPkts.setStatus('mandatory')
nwAtportHome = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 1, 2, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("home", 1), ("notHome", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtportHome.setStatus('mandatory')
nwAtportCurrentZone = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 1, 2, 1, 17), AtName()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtportCurrentZone.setStatus('mandatory')
nwAtportConflictPhysAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 1, 2, 1, 18), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtportConflictPhysAddr.setStatus('mandatory')
nwAtFwdIfCtrTable = MibTable((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 2, 1), )
if mibBuilder.loadTexts: nwAtFwdIfCtrTable.setStatus('mandatory')
nwAtFwdIfCtrEntry = MibTableRow((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 2, 1, 1), ).setIndexNames((0, "CTRON-AppleTalk-ROUTER-MIB", "nwAtFwdIfCtrIfIndex"))
if mibBuilder.loadTexts: nwAtFwdIfCtrEntry.setStatus('mandatory')
nwAtFwdIfCtrIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 2, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdIfCtrIfIndex.setStatus('mandatory')
nwAtFwdIfCtrAdminSTATUS = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtFwdIfCtrAdminSTATUS.setStatus('mandatory')
nwAtFwdIfCtrReset = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("other", 1), ("reset", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtFwdIfCtrReset.setStatus('mandatory')
nwAtFwdIfCtrOperationalTime = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 2, 1, 1, 4), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdIfCtrOperationalTime.setStatus('mandatory')
nwAtFwdIfCtrInPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 2, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdIfCtrInPkts.setStatus('mandatory')
nwAtFwdIfCtrOutPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 2, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdIfCtrOutPkts.setStatus('mandatory')
nwAtFwdIfCtrFwdPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 2, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdIfCtrFwdPkts.setStatus('mandatory')
nwAtFwdIfCtrFilteredPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 2, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdIfCtrFilteredPkts.setStatus('mandatory')
nwAtFwdIfCtrDiscardPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 2, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdIfCtrDiscardPkts.setStatus('mandatory')
nwAtFwdIfCtrAddrErrPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 2, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdIfCtrAddrErrPkts.setStatus('mandatory')
nwAtFwdIfCtrLenErrPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 2, 1, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdIfCtrLenErrPkts.setStatus('mandatory')
nwAtFwdIfCtrHdrErrPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 2, 1, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdIfCtrHdrErrPkts.setStatus('mandatory')
nwAtFwdIfCtrInBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 2, 1, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdIfCtrInBytes.setStatus('mandatory')
nwAtFwdIfCtrOutBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 2, 1, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdIfCtrOutBytes.setStatus('mandatory')
nwAtFwdIfCtrFwdBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 2, 1, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdIfCtrFwdBytes.setStatus('mandatory')
nwAtFwdIfCtrFilteredBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 2, 1, 1, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdIfCtrFilteredBytes.setStatus('mandatory')
nwAtFwdIfCtrDiscardBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 2, 1, 1, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdIfCtrDiscardBytes.setStatus('mandatory')
nwAtFwdIfCtrHostInPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 2, 1, 1, 18), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdIfCtrHostInPkts.setStatus('mandatory')
nwAtFwdIfCtrHostOutPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 2, 1, 1, 19), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdIfCtrHostOutPkts.setStatus('mandatory')
nwAtFwdIfCtrHostDiscardPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 2, 1, 1, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdIfCtrHostDiscardPkts.setStatus('mandatory')
nwAtFwdIfCtrHostInBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 2, 1, 1, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdIfCtrHostInBytes.setStatus('mandatory')
nwAtFwdIfCtrHostOutBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 2, 1, 1, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdIfCtrHostOutBytes.setStatus('mandatory')
nwAtFwdIfCtrHostDiscardBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 2, 2, 2, 1, 1, 23), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFwdIfCtrHostDiscardBytes.setStatus('mandatory')
nwAtProtoAdminSTATUS = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtProtoAdminSTATUS.setStatus('mandatory')
nwAtProtoOperSTATUS = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3), ("pending-disable", 4), ("pending-enable", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtProtoOperSTATUS.setStatus('mandatory')
nwAtProtoAdminReset = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("other", 1), ("reset", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtProtoAdminReset.setStatus('mandatory')
nwAtProtoOperationalTime = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 1, 1, 4), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtProtoOperationalTime.setStatus('mandatory')
nwAtProtoVersion = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 1, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtProtoVersion.setStatus('mandatory')
nwAtProtoStackSize = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 1, 1, 6), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtProtoStackSize.setStatus('mandatory')
nwAtProtoThreadPriority = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 1, 1, 7), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtProtoThreadPriority.setStatus('mandatory')
nwAtProtoDatabaseThreshold = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 1, 1, 8), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtProtoDatabaseThreshold.setStatus('mandatory')
nwAtProtoAgeOut = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 1, 1, 9), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtProtoAgeOut.setStatus('mandatory')
nwAtProtoHoldDown = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 1, 1, 10), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtProtoHoldDown.setStatus('mandatory')
nwAtProtoCtrAdminSTATUS = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 1, 2, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtProtoCtrAdminSTATUS.setStatus('mandatory')
nwAtProtoCtrReset = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 1, 2, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("other", 1), ("reset", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtProtoCtrReset.setStatus('mandatory')
nwAtProtoCtrOperationalTime = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 1, 2, 3), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtProtoCtrOperationalTime.setStatus('mandatory')
nwAtProtoCtrInPkts = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 1, 2, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtProtoCtrInPkts.setStatus('mandatory')
nwAtProtoCtrOutPkts = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 1, 2, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtProtoCtrOutPkts.setStatus('mandatory')
nwAtProtoCtrFilteredPkts = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 1, 2, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtProtoCtrFilteredPkts.setStatus('mandatory')
nwAtProtoCtrDiscardPkts = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 1, 2, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtProtoCtrDiscardPkts.setStatus('mandatory')
nwAtProtoCtrInBytes = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 1, 2, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtProtoCtrInBytes.setStatus('mandatory')
nwAtProtoCtrOutBytes = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 1, 2, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtProtoCtrOutBytes.setStatus('mandatory')
nwAtProtoCtrFilteredBytes = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 1, 2, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtProtoCtrFilteredBytes.setStatus('mandatory')
nwAtProtoCtrDiscardBytes = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 1, 2, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtProtoCtrDiscardBytes.setStatus('mandatory')
nwAtProtoIfTable = MibTable((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 2, 1, 1), )
if mibBuilder.loadTexts: nwAtProtoIfTable.setStatus('mandatory')
nwAtProtoIfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 2, 1, 1, 1), ).setIndexNames((0, "CTRON-AppleTalk-ROUTER-MIB", "nwAtProtoIfIndex"))
if mibBuilder.loadTexts: nwAtProtoIfEntry.setStatus('mandatory')
nwAtProtoIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 2, 1, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtProtoIfIndex.setStatus('mandatory')
nwAtProtoIfAdminSTATUS = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 2, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtProtoIfAdminSTATUS.setStatus('mandatory')
nwAtProtoIfOperSTATUS = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 2, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3), ("pending-disable", 4), ("pending-enable", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtProtoIfOperSTATUS.setStatus('mandatory')
nwAtProtoIfOperationalTime = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 2, 1, 1, 1, 4), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtProtoIfOperationalTime.setStatus('mandatory')
nwAtProtoIfVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 2, 1, 1, 1, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtProtoIfVersion.setStatus('mandatory')
nwAtProtoIfAdvertisement = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 2, 1, 1, 1, 6), Integer32().clone(60)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtProtoIfAdvertisement.setStatus('mandatory')
nwAtProtoIfFloodDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 2, 1, 1, 1, 7), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtProtoIfFloodDelay.setStatus('mandatory')
nwAtProtoIfRequestDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 2, 1, 1, 1, 8), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtProtoIfRequestDelay.setStatus('mandatory')
nwAtProtoIfPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 2, 1, 1, 1, 9), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtProtoIfPriority.setStatus('mandatory')
nwAtProtoIfHelloTimer = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 2, 1, 1, 1, 10), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtProtoIfHelloTimer.setStatus('mandatory')
nwAtProtoIfSplitHorizon = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 2, 1, 1, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtProtoIfSplitHorizon.setStatus('mandatory')
nwAtProtoIfPoisonReverse = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 2, 1, 1, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtProtoIfPoisonReverse.setStatus('mandatory')
nwAtProtoIfSnooping = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 2, 1, 1, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtProtoIfSnooping.setStatus('mandatory')
nwAtProtoIfType = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 2, 1, 1, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("bma", 2), ("nbma", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtProtoIfType.setStatus('mandatory')
nwAtProtoIfXmitCost = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 2, 1, 1, 1, 15), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtProtoIfXmitCost.setStatus('mandatory')
nwAtProtoIfAclIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 2, 1, 1, 1, 16), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtProtoIfAclIdentifier.setStatus('mandatory')
nwAtProtoIfAclSTATUS = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 2, 1, 1, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtProtoIfAclSTATUS.setStatus('mandatory')
nwAtProtoIfCtrTable = MibTable((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 2, 2, 1), )
if mibBuilder.loadTexts: nwAtProtoIfCtrTable.setStatus('mandatory')
nwAtProtoIfCtrEntry = MibTableRow((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 2, 2, 1, 1), ).setIndexNames((0, "CTRON-AppleTalk-ROUTER-MIB", "nwAtProtoIfCtrIfIndex"))
if mibBuilder.loadTexts: nwAtProtoIfCtrEntry.setStatus('mandatory')
nwAtProtoIfCtrIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 2, 2, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtProtoIfCtrIfIndex.setStatus('mandatory')
nwAtProtoIfCtrAdminSTATUS = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 2, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtProtoIfCtrAdminSTATUS.setStatus('mandatory')
nwAtProtoIfCtrReset = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 2, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("other", 1), ("reset", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtProtoIfCtrReset.setStatus('mandatory')
nwAtProtoIfCtrOperationalTime = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 2, 2, 1, 1, 4), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtProtoIfCtrOperationalTime.setStatus('mandatory')
nwAtProtoIfCtrInPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 2, 2, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtProtoIfCtrInPkts.setStatus('mandatory')
nwAtProtoIfCtrOutPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 2, 2, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtProtoIfCtrOutPkts.setStatus('mandatory')
nwAtProtoIfCtrFilteredPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 2, 2, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtProtoIfCtrFilteredPkts.setStatus('mandatory')
nwAtProtoIfCtrDiscardPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 2, 2, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtProtoIfCtrDiscardPkts.setStatus('mandatory')
nwAtProtoIfCtrInBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 2, 2, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtProtoIfCtrInBytes.setStatus('mandatory')
nwAtProtoIfCtrOutBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 2, 2, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtProtoIfCtrOutBytes.setStatus('mandatory')
nwAtProtoIfCtrFilteredBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 2, 2, 1, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtProtoIfCtrFilteredBytes.setStatus('mandatory')
nwAtProtoIfCtrDiscardBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 4, 1, 1, 2, 2, 1, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtProtoIfCtrDiscardBytes.setStatus('mandatory')
nwAtFibTable = MibTable((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 5, 1), )
if mibBuilder.loadTexts: nwAtFibTable.setStatus('mandatory')
nwAtFibEntry = MibTableRow((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 5, 1, 1), ).setIndexNames((0, "CTRON-AppleTalk-ROUTER-MIB", "nwAtFibStartNet"))
if mibBuilder.loadTexts: nwAtFibEntry.setStatus('mandatory')
nwAtFibStartNet = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 5, 1, 1, 1), AtNetworkNumber()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFibStartNet.setStatus('mandatory')
nwAtFibEndNet = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 5, 1, 1, 2), AtNetworkNumber()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFibEndNet.setStatus('mandatory')
nwAtFibNextHop = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 5, 1, 1, 3), AtDdpNodeAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFibNextHop.setStatus('mandatory')
nwAtFibNextHopIf = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 5, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFibNextHopIf.setStatus('mandatory')
nwAtFibHops = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 5, 1, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFibHops.setStatus('mandatory')
nwAtFibRouteType = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 5, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("other", 1), ("appleTalk", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtFibRouteType.setStatus('mandatory')
nwAtHostsTimeToLive = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 6, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtHostsTimeToLive.setStatus('mandatory')
nwAtHostsRetryCount = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 6, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtHostsRetryCount.setStatus('mandatory')
nwAtHostCtlTable = MibTable((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 6, 2, 1), )
if mibBuilder.loadTexts: nwAtHostCtlTable.setStatus('mandatory')
nwAtHostCtlEntry = MibTableRow((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 6, 2, 1, 1), ).setIndexNames((0, "CTRON-AppleTalk-ROUTER-MIB", "nwAtHostCtlIfIndex"))
if mibBuilder.loadTexts: nwAtHostCtlEntry.setStatus('mandatory')
nwAtHostCtlIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 6, 2, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtHostCtlIfIndex.setStatus('mandatory')
nwAtHostCtlAdminSTATUS = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 6, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disable", 2), ("enable", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtHostCtlAdminSTATUS.setStatus('mandatory')
nwAtHostCtlOperSTATUS = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 6, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3), ("pending-disable", 4), ("pending-enable", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtHostCtlOperSTATUS.setStatus('mandatory')
nwAtHostCtlOperationalTime = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 6, 2, 1, 1, 4), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtHostCtlOperationalTime.setStatus('mandatory')
nwAtHostCtlProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 6, 2, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disable", 2), ("enable", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtHostCtlProtocol.setStatus('mandatory')
nwAtHostCtlSnooping = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 6, 2, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disable", 2), ("enable", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtHostCtlSnooping.setStatus('mandatory')
nwAtHostCtlProxy = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 6, 2, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disable", 2), ("enable", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtHostCtlProxy.setStatus('mandatory')
nwAtHostCtlCacheMax = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 6, 2, 1, 1, 8), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtHostCtlCacheMax.setStatus('mandatory')
nwAtHostCtlCacheSize = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 6, 2, 1, 1, 9), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtHostCtlCacheSize.setStatus('mandatory')
nwAtHostCtlNumStatics = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 6, 2, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtHostCtlNumStatics.setStatus('mandatory')
nwAtHostCtlNumDynamics = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 6, 2, 1, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtHostCtlNumDynamics.setStatus('mandatory')
nwAtHostCtlCacheHits = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 6, 2, 1, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtHostCtlCacheHits.setStatus('mandatory')
nwAtHostCtlCacheMisses = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 6, 2, 1, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtHostCtlCacheMisses.setStatus('mandatory')
nwAtAclValidEntries = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 7, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtAclValidEntries.setStatus('mandatory')
nwAtAclTable = MibTable((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 7, 2), )
if mibBuilder.loadTexts: nwAtAclTable.setStatus('mandatory')
nwAtAclEntry = MibTableRow((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 7, 2, 1), ).setIndexNames((0, "CTRON-AppleTalk-ROUTER-MIB", "nwAtAclIdentifier"), (0, "CTRON-AppleTalk-ROUTER-MIB", "nwAtAclSequence"))
if mibBuilder.loadTexts: nwAtAclEntry.setStatus('mandatory')
nwAtAclIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 7, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtAclIdentifier.setStatus('mandatory')
nwAtAclSequence = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 7, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtAclSequence.setStatus('mandatory')
nwAtAclPermission = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 7, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("other", 1), ("invalid", 2), ("permit", 3), ("deny", 4), ("permit-bidirectional", 5), ("deny-bidirectional", 6)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtAclPermission.setStatus('mandatory')
nwAtAclMatches = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 7, 2, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtAclMatches.setStatus('mandatory')
nwAtAclDestZone = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 7, 2, 1, 5), AtName()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtAclDestZone.setStatus('mandatory')
nwAtAclSrcZone = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 7, 2, 1, 6), AtName()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtAclSrcZone.setStatus('mandatory')
nwAtEventAdminSTATUS = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 10, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtEventAdminSTATUS.setStatus('mandatory')
nwAtEventMaxEntries = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 10, 1, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtEventMaxEntries.setStatus('mandatory')
nwAtEventTraceAll = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 10, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("disabled", 2), ("enabled", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtEventTraceAll.setStatus('mandatory')
nwAtEventFilterTable = MibTable((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 10, 2, 1), )
if mibBuilder.loadTexts: nwAtEventFilterTable.setStatus('mandatory')
nwAtEventFilterEntry = MibTableRow((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 10, 2, 1, 1), ).setIndexNames((0, "CTRON-AppleTalk-ROUTER-MIB", "nwAtEventFltrProtocol"), (0, "CTRON-AppleTalk-ROUTER-MIB", "nwAtEventFltrIfNum"))
if mibBuilder.loadTexts: nwAtEventFilterEntry.setStatus('mandatory')
nwAtEventFltrProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 10, 2, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtEventFltrProtocol.setStatus('mandatory')
nwAtEventFltrIfNum = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 10, 2, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtEventFltrIfNum.setStatus('mandatory')
nwAtEventFltrControl = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 10, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("delete", 2), ("add", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtEventFltrControl.setStatus('mandatory')
nwAtEventFltrType = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 10, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 4, 8, 16, 32))).clone(namedValues=NamedValues(("misc", 1), ("timer", 2), ("rcv", 4), ("xmit", 8), ("event", 16), ("error", 32)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtEventFltrType.setStatus('mandatory')
nwAtEventFltrSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 10, 2, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("highest", 1), ("highmed", 2), ("highlow", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtEventFltrSeverity.setStatus('mandatory')
nwAtEventFltrAction = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 10, 2, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("log", 1), ("trap", 2), ("log-trap", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtEventFltrAction.setStatus('mandatory')
nwAtEventTable = MibTable((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 10, 3, 1), )
if mibBuilder.loadTexts: nwAtEventTable.setStatus('mandatory')
nwAtEventEntry = MibTableRow((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 10, 3, 1, 1), ).setIndexNames((0, "CTRON-AppleTalk-ROUTER-MIB", "nwAtEventNumber"))
if mibBuilder.loadTexts: nwAtEventEntry.setStatus('mandatory')
nwAtEventNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 10, 3, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtEventNumber.setStatus('mandatory')
nwAtEventTime = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 10, 3, 1, 1, 2), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtEventTime.setStatus('mandatory')
nwAtEventType = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 10, 3, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 4, 8, 16, 32))).clone(namedValues=NamedValues(("misc", 1), ("timer", 2), ("rcv", 4), ("xmit", 8), ("event", 16), ("error", 32)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtEventType.setStatus('mandatory')
nwAtEventSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 10, 3, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("highest", 1), ("highmed", 2), ("highlow", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtEventSeverity.setStatus('mandatory')
nwAtEventProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 10, 3, 1, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtEventProtocol.setStatus('mandatory')
nwAtEventIfNum = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 10, 3, 1, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtEventIfNum.setStatus('mandatory')
nwAtEventTextString = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 10, 3, 1, 1, 7), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtEventTextString.setStatus('mandatory')
nwAtNetDiagOutboundNetAddress = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 12, 3, 1), AtDdpNodeAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtNetDiagOutboundNetAddress.setStatus('mandatory')
nwAtNetDiagOutboundPort = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 12, 3, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtNetDiagOutboundPort.setStatus('mandatory')
nwAtNetDiagOutboundTimeout = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 12, 3, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtNetDiagOutboundTimeout.setStatus('mandatory')
nwAtNetDiagOutboundRetries = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 12, 3, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtNetDiagOutboundRetries.setStatus('mandatory')
nwAtNetDiagOutboundATEchoType = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 12, 3, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noAction", 1), ("sendEchoRequest", 2), ("other", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtNetDiagOutboundATEchoType.setStatus('mandatory')
nwAtNetDiagOutboundATEchoStatus = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 12, 3, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("inactive", 1), ("inProgress", 2), ("timeout", 3), ("success", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtNetDiagOutboundATEchoStatus.setStatus('mandatory')
nwAtNetDiagOutboundNBPEntityObject = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 12, 3, 7), AtName()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtNetDiagOutboundNBPEntityObject.setStatus('mandatory')
nwAtNetDiagOutboundNBPEntityType = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 12, 3, 8), AtName()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtNetDiagOutboundNBPEntityType.setStatus('mandatory')
nwAtNetDiagOutboundNBPEntityZone = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 12, 3, 9), AtName()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtNetDiagOutboundNBPEntityZone.setStatus('mandatory')
nwAtNetDiagOutboundNBPType = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 12, 3, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))).clone(namedValues=NamedValues(("noAction", 1), ("localRequest", 2), ("lookupMcast", 3), ("lookupBcast", 4), ("lookupDirect", 5), ("bcastRequestBcast", 6), ("bcastRequestDirect", 7), ("forwardRequestBcast", 8), ("forwardRequestDirect", 9), ("other", 10)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtNetDiagOutboundNBPType.setStatus('mandatory')
nwAtNetDiagOutboundNBPStatus = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 12, 3, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("inactive", 1), ("inProgress", 2), ("done", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtNetDiagOutboundNBPStatus.setStatus('mandatory')
nwAtNetDiagOutboundRTMPType = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 12, 3, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("noAction", 1), ("sendRequest", 2), ("bcastRequest", 3), ("sendRDRequestSplitHorizon", 4), ("bcastRDRequestSplitHorizon", 5), ("sendRDRequestFullTable", 6), ("bcastRDRequestFullTable", 7), ("other", 8)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtNetDiagOutboundRTMPType.setStatus('mandatory')
nwAtNetDiagOutboundRTMPStatus = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 12, 3, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("inactive", 1), ("inProgress", 2), ("done", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtNetDiagOutboundRTMPStatus.setStatus('mandatory')
nwAtNetDiagOutboundRTMPNetStart = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 12, 3, 14), AtNetworkNumber()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtNetDiagOutboundRTMPNetStart.setStatus('mandatory')
nwAtNetDiagOutboundRTMPNetEnd = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 12, 3, 15), AtNetworkNumber()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtNetDiagOutboundRTMPNetEnd.setStatus('mandatory')
nwAtNetDiagOutboundZIPType = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 12, 3, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9))).clone(namedValues=NamedValues(("noAction", 1), ("sendQuery", 2), ("bcastQuery", 3), ("sendGetZonesList", 4), ("sendGetLocalZones", 5), ("sendGetMyZone", 6), ("sendGetNetInfo", 7), ("bcastGetNetInfo", 8), ("other", 9)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtNetDiagOutboundZIPType.setStatus('mandatory')
nwAtNetDiagOutboundZIPStatus = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 12, 3, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("inactive", 1), ("queryInProgress", 2), ("atpInProgress", 3), ("gniInProgress", 4), ("done", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtNetDiagOutboundZIPStatus.setStatus('mandatory')
nwAtNetDiagOutboundZIPQueryNet = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 12, 3, 18), AtNetworkNumber()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtNetDiagOutboundZIPQueryNet.setStatus('mandatory')
nwAtNetDiagOutboundZIPQueryZone = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 12, 3, 19), AtName()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtNetDiagOutboundZIPQueryZone.setStatus('mandatory')
nwAtNetDiagOutboundZIPGetNetInfoZone = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 12, 3, 20), AtName()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtNetDiagOutboundZIPGetNetInfoZone.setStatus('mandatory')
nwAtNetDiagOutboundZIPGetNetInfoNetStart = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 12, 3, 21), AtNetworkNumber()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtNetDiagOutboundZIPGetNetInfoNetStart.setStatus('mandatory')
nwAtNetDiagOutboundZIPGetNetInfoNetEnd = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 12, 3, 22), AtNetworkNumber()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtNetDiagOutboundZIPGetNetInfoNetEnd.setStatus('mandatory')
nwAtNetDiagOutboundZIPGetNetInfoMulticast = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 12, 3, 23), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtNetDiagOutboundZIPGetNetInfoMulticast.setStatus('mandatory')
nwAtNetDiagOutboundZIPGetNetInfoDefaultZone = MibScalar((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 12, 3, 24), AtName()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtNetDiagOutboundZIPGetNetInfoDefaultZone.setStatus('mandatory')
nwAtportZoneTable = MibTable((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 4, 1), )
if mibBuilder.loadTexts: nwAtportZoneTable.setStatus('mandatory')
nwAtportZoneEntry = MibTableRow((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 4, 1, 1), ).setIndexNames((0, "CTRON-AppleTalk-ROUTER-MIB", "nwAtportZonePort"), (0, "CTRON-AppleTalk-ROUTER-MIB", "nwAtportZoneName"))
if mibBuilder.loadTexts: nwAtportZoneEntry.setStatus('mandatory')
nwAtportZonePort = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 4, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtportZonePort.setStatus('mandatory')
nwAtportZoneName = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 4, 1, 1, 2), AtName()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtportZoneName.setStatus('mandatory')
nwAtportZoneStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 4, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("valid", 1), ("invalid", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtportZoneStatus.setStatus('mandatory')
nwAtHostMapTable = MibTable((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 6, 3, 1), )
if mibBuilder.loadTexts: nwAtHostMapTable.setStatus('mandatory')
nwAtHostMapEntry = MibTableRow((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 6, 3, 1, 1), ).setIndexNames((0, "CTRON-AppleTalk-ROUTER-MIB", "nwAtHostMapIfIndex"), (0, "CTRON-AppleTalk-ROUTER-MIB", "nwAtHostMapAtAddr"))
if mibBuilder.loadTexts: nwAtHostMapEntry.setStatus('mandatory')
nwAtHostMapIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 6, 3, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtHostMapIfIndex.setStatus('mandatory')
nwAtHostMapAtAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 6, 3, 1, 1, 2), AtDdpNodeAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtHostMapAtAddr.setStatus('mandatory')
nwAtHostMapPhysAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 6, 3, 1, 1, 3), PhysAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtHostMapPhysAddr.setStatus('mandatory')
nwAtHostMapType = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 6, 3, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("other", 1), ("invalid", 2), ("dynamic", 3), ("static", 4), ("inactive", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtHostMapType.setStatus('mandatory')
nwAtHostMapCircuitID = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 6, 3, 1, 1, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtHostMapCircuitID.setStatus('mandatory')
nwAtHostMapFraming = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 6, 3, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 8, 9, 11, 16, 17))).clone(namedValues=NamedValues(("other", 1), ("ethernet", 2), ("snap", 3), ("nativewan", 8), ("encapenet", 9), ("encapenetsnap", 11), ("encapfddisnap", 16), ("canonical", 17)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: nwAtHostMapFraming.setStatus('mandatory')
nwAtHostMapPortNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 52, 4, 2, 2, 2, 3, 4, 2, 6, 3, 1, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: nwAtHostMapPortNumber.setStatus('mandatory')
mibBuilder.exportSymbols("CTRON-AppleTalk-ROUTER-MIB", nwAtEventTraceAll=nwAtEventTraceAll, nwAtRedirector=nwAtRedirector, nwAtSysAdministration=nwAtSysAdministration, nwAtFwdCtrAdminSTATUS=nwAtFwdCtrAdminSTATUS, nwAtProtoIfCtrTable=nwAtProtoIfCtrTable, nwAtEventNumber=nwAtEventNumber, nwAtFwdCtrInPkts=nwAtFwdCtrInPkts, nwAtNetDiagOutboundZIPGetNetInfoDefaultZone=nwAtNetDiagOutboundZIPGetNetInfoDefaultZone, nwAtProtoIfAdminSTATUS=nwAtProtoIfAdminSTATUS, nwAtHostCtlProtocol=nwAtHostCtlProtocol, nwAtSysOperSTATUS=nwAtSysOperSTATUS, nwAtMibs=nwAtMibs, nwAtFwdIfCtrIfIndex=nwAtFwdIfCtrIfIndex, nwAtSysRouterId=nwAtSysRouterId, nwAtProtoIfPriority=nwAtProtoIfPriority, nwAtFibHops=nwAtFibHops, nwAtHostCtlNumDynamics=nwAtHostCtlNumDynamics, nwAtNetDiagOutboundATEchoStatus=nwAtNetDiagOutboundATEchoStatus, nwAtProtoDatabaseThreshold=nwAtProtoDatabaseThreshold, nwAtProtoIfSnooping=nwAtProtoIfSnooping, nwAtAclEntry=nwAtAclEntry, nwAtFibTable=nwAtFibTable, nwAtportZonePort=nwAtportZonePort, nwAtProtoSystem=nwAtProtoSystem, nwAtportZoneName=nwAtportZoneName, nwAtFwdCtrFwdPkts=nwAtFwdCtrFwdPkts, nwAtProtoIfXmitCost=nwAtProtoIfXmitCost, nwAtFwdCtrHostOutPkts=nwAtFwdCtrHostOutPkts, nwAtHostMapFraming=nwAtHostMapFraming, nwAtFwdIfCacheMisses=nwAtFwdIfCacheMisses, nwRtrExperimental=nwRtrExperimental, nwAtProtoIfAclIdentifier=nwAtProtoIfAclIdentifier, nwAtProtoIfVersion=nwAtProtoIfVersion, nwAtProtoIfCtrDiscardBytes=nwAtProtoIfCtrDiscardBytes, nwAtFibStartNet=nwAtFibStartNet, nwAtAclIdentifier=nwAtAclIdentifier, nwAtFwdIfAclIdentifier=nwAtFwdIfAclIdentifier, nwAtFwdSystem=nwAtFwdSystem, nwAtProtoAdminReset=nwAtProtoAdminReset, nwAtportNetEnd=nwAtportNetEnd, nwAtHostCtlOperSTATUS=nwAtHostCtlOperSTATUS, nwAtFwdIfCtrReset=nwAtFwdIfCtrReset, nwAtProtoCtrDiscardBytes=nwAtProtoCtrDiscardBytes, nwAtFwdIfOperationalTime=nwAtFwdIfOperationalTime, nwAtEventMaxEntries=nwAtEventMaxEntries, nwAtNetDiagOutboundNBPEntityType=nwAtNetDiagOutboundNBPEntityType, nwAtNetDiagOutboundNBPEntityZone=nwAtNetDiagOutboundNBPEntityZone, nwAtProto=nwAtProto, nwAtFwdIfCtrOutPkts=nwAtFwdIfCtrOutPkts, nwAtportType=nwAtportType, nwAtportInPkts=nwAtportInPkts, nwAtNetDiagOutboundRetries=nwAtNetDiagOutboundRetries, nwAtNetDiagOutboundRTMPNetStart=nwAtNetDiagOutboundRTMPNetStart, nwAtFwdIfCtrLenErrPkts=nwAtFwdIfCtrLenErrPkts, nwAtMibRevText=nwAtMibRevText, nwAtHostCtlNumStatics=nwAtHostCtlNumStatics, nwAtEventSeverity=nwAtEventSeverity, nwAtFwdIfMtu=nwAtFwdIfMtu, nwAtForwarding=nwAtForwarding, nwAtFwdCtrOperationalTime=nwAtFwdCtrOperationalTime, nwAtEventFltrSeverity=nwAtEventFltrSeverity, nwAtNetDiagOutboundZIPGetNetInfoNetEnd=nwAtNetDiagOutboundZIPGetNetInfoNetEnd, nwAtFwdCtrDiscardBytes=nwAtFwdCtrDiscardBytes, nwAtFwdIfCtrAdminSTATUS=nwAtFwdIfCtrAdminSTATUS, nwAtHostCtlIfIndex=nwAtHostCtlIfIndex, nwAtNetDiagOutboundZIPGetNetInfoZone=nwAtNetDiagOutboundZIPGetNetInfoZone, nwAtFwdCtrOutPkts=nwAtFwdCtrOutPkts, nwAtEventType=nwAtEventType, nwAtFwdIfCacheEntries=nwAtFwdIfCacheEntries, nwAtportTable=nwAtportTable, nwAtEventLogConfig=nwAtEventLogConfig, nwAtFwdIfCtrTable=nwAtFwdIfCtrTable, nwAtProtoIfSplitHorizon=nwAtProtoIfSplitHorizon, nwAtFwdIfAdminSTATUS=nwAtFwdIfAdminSTATUS, nwAtFwdIfTable=nwAtFwdIfTable, nwAtFilters=nwAtFilters, nwAtHostMapCircuitID=nwAtHostMapCircuitID, nwAtHostCtlCacheMax=nwAtHostCtlCacheMax, nwAtFwdIfCtrFwdPkts=nwAtFwdIfCtrFwdPkts, nwAtNetDiagOutboundNetAddress=nwAtNetDiagOutboundNetAddress, nwAtportNetConfig=nwAtportNetConfig, nwAtEventLogFilterTable=nwAtEventLogFilterTable, nwAtProtoIfCtrInBytes=nwAtProtoIfCtrInBytes, nwAtProtoCtrFilteredBytes=nwAtProtoCtrFilteredBytes, nwAtFwdIfCtrHostOutBytes=nwAtFwdIfCtrHostOutBytes, nwAtNetDiagOutboundATEchoType=nwAtNetDiagOutboundATEchoType, nwAtFwdCtrHdrErrPkts=nwAtFwdCtrHdrErrPkts, nwAtProtoCounters=nwAtProtoCounters, nwAtportDescr=nwAtportDescr, nwAtHostMapTable=nwAtHostMapTable, nwAtProtoIfConfig=nwAtProtoIfConfig, nwAtProtoVersion=nwAtProtoVersion, nwAtProtoHoldDown=nwAtProtoHoldDown, nwAtHostCtlProxy=nwAtHostCtlProxy, nwAtHostMapAtAddr=nwAtHostMapAtAddr, nwAtFwdIfConfig=nwAtFwdIfConfig, nwAtFwdIfCtrDiscardBytes=nwAtFwdIfCtrDiscardBytes, nwAtNetDiagOutboundZIPStatus=nwAtNetDiagOutboundZIPStatus, nwAtFwdIfCacheControl=nwAtFwdIfCacheControl, nwAtFwdIfCtrFwdBytes=nwAtFwdIfCtrFwdBytes, AtName=AtName, nwAtProtoIfFloodDelay=nwAtProtoIfFloodDelay, nwAtFwdIfCtrHdrErrPkts=nwAtFwdIfCtrHdrErrPkts, nwAtFwdCtrInBytes=nwAtFwdCtrInBytes, nwAtFwdIfCtrHostDiscardBytes=nwAtFwdIfCtrHostDiscardBytes, nwAtEventTable=nwAtEventTable, nwAtFwdCtrAddrErrPkts=nwAtFwdCtrAddrErrPkts, nwAtProtoIfPoisonReverse=nwAtProtoIfPoisonReverse, nwAtProtoIfCtrDiscardPkts=nwAtProtoIfCtrDiscardPkts, nwAtProtoCtrReset=nwAtProtoCtrReset, nwAtEndSystems=nwAtEndSystems, nwAtFwdIfOperSTATUS=nwAtFwdIfOperSTATUS, nwAtFwdIfCtrOperationalTime=nwAtFwdIfCtrOperationalTime, nwAtProtoConfig=nwAtProtoConfig, nwAtProtoAgeOut=nwAtProtoAgeOut, nwAtFwdCounters=nwAtFwdCounters, nwAtNetDiagOutboundNBPEntityObject=nwAtNetDiagOutboundNBPEntityObject, nwAtHostsInterfaces=nwAtHostsInterfaces, nwAtTopology=nwAtTopology, nwAtEvent=nwAtEvent, nwAtFwdIfCtrHostInBytes=nwAtFwdIfCtrHostInBytes, nwAtEventFilterEntry=nwAtEventFilterEntry, nwAtEventIfNum=nwAtEventIfNum, nwAtNetDiagOutboundZIPType=nwAtNetDiagOutboundZIPType, nwAtHostCtlEntry=nwAtHostCtlEntry, nwAtFwdCtrFilteredPkts=nwAtFwdCtrFilteredPkts, nwAtNetDiagOutboundTimeout=nwAtNetDiagOutboundTimeout, nwAtHostsSystem=nwAtHostsSystem, nwAtProtoIfCounters=nwAtProtoIfCounters, nwAtportCurrentZone=nwAtportCurrentZone, nwAtEventFltrAction=nwAtEventFltrAction, nwAtEventTextString=nwAtEventTextString, nwAtEventAdminSTATUS=nwAtEventAdminSTATUS, nwAtEventProtocol=nwAtEventProtocol, nwAtFwdCtrFilteredBytes=nwAtFwdCtrFilteredBytes, AtDdpNodeAddress=AtDdpNodeAddress, nwAtProtoIfCtrEntry=nwAtProtoIfCtrEntry, nwAtNetDiagOutboundZIPQueryNet=nwAtNetDiagOutboundZIPQueryNet, nwAtFwdIfIndex=nwAtFwdIfIndex, nwAtFibEntry=nwAtFibEntry, nwAtProtoCtrAdminSTATUS=nwAtProtoCtrAdminSTATUS, nwAtProtoIfCtrFilteredBytes=nwAtProtoIfCtrFilteredBytes, nwAtSysAdminSTATUS=nwAtSysAdminSTATUS, nwAtProtoIfCtrFilteredPkts=nwAtProtoIfCtrFilteredPkts, nwAtHostMapPortNumber=nwAtHostMapPortNumber, nwAtportZoneFrom=nwAtportZoneFrom, nwAtProtoIfOperSTATUS=nwAtProtoIfOperSTATUS, nwAtAclDestZone=nwAtAclDestZone, nwAtportZoneTable=nwAtportZoneTable, nwAtProtoIfAdvertisement=nwAtProtoIfAdvertisement, AtNetworkNumber=AtNetworkNumber, nwAtFwdIfFrameType=nwAtFwdIfFrameType, nwAtHostCtlCacheSize=nwAtHostCtlCacheSize, nwAtFwdCtrHostDiscardPkts=nwAtFwdCtrHostDiscardPkts, nwAtProtoIfRequestDelay=nwAtProtoIfRequestDelay, nwAtNetDiagOutboundZIPQueryZone=nwAtNetDiagOutboundZIPQueryZone, nwAtFwdIfCacheHits=nwAtFwdIfCacheHits, nwAtHostCtlAdminSTATUS=nwAtHostCtlAdminSTATUS, nwAtProtoIfEntry=nwAtProtoIfEntry, nwAtHostCtlSnooping=nwAtHostCtlSnooping, nwAtFibEndNet=nwAtFibEndNet, nwAtAclSrcZone=nwAtAclSrcZone, nwAtAclSequence=nwAtAclSequence, nwAtSysOperationalTime=nwAtSysOperationalTime, nwAtLinkState=nwAtLinkState, nwAtportHome=nwAtportHome, nwAtProtoIfType=nwAtProtoIfType, nwAtProtoCtrFilteredPkts=nwAtProtoCtrFilteredPkts, nwAtHostMapEntry=nwAtHostMapEntry, nwAtportIndex=nwAtportIndex, nwAtProtoCtrDiscardPkts=nwAtProtoCtrDiscardPkts, nwAtNetDiagOutboundRTMPType=nwAtNetDiagOutboundRTMPType, nwAtProtoIfCtrIfIndex=nwAtProtoIfCtrIfIndex, nwAtFwdIfCtrInBytes=nwAtFwdIfCtrInBytes, nwAtNetDiagOutboundPort=nwAtNetDiagOutboundPort, nwAtHostsToMedia=nwAtHostsToMedia, nwAtFwdCtrOutBytes=nwAtFwdCtrOutBytes, nwAtFwdCtrHostDiscardBytes=nwAtFwdCtrHostDiscardBytes, nwAtHostCtlTable=nwAtHostCtlTable, nwAtFwdIfCtrAddrErrPkts=nwAtFwdIfCtrAddrErrPkts, nwAtFwdIfCtrOutBytes=nwAtFwdIfCtrOutBytes, nwAtFwdIfCtrInPkts=nwAtFwdIfCtrInPkts, nwAtSystem=nwAtSystem, nwAtFwdIfCtrEntry=nwAtFwdIfCtrEntry, nwAtProtoCtrInPkts=nwAtProtoCtrInPkts, nwAtHostCtlCacheMisses=nwAtHostCtlCacheMisses, nwAtNetDiagOutboundZIPGetNetInfoMulticast=nwAtNetDiagOutboundZIPGetNetInfoMulticast, nwAtEventTime=nwAtEventTime, nwAtFwdIfCounters=nwAtFwdIfCounters, nwAtportConflictPhysAddr=nwAtportConflictPhysAddr, nwAtNetDiagOutboundRTMPNetEnd=nwAtNetDiagOutboundRTMPNetEnd, nwAtSysVersion=nwAtSysVersion, nwAtProtoIfCtrOutBytes=nwAtProtoIfCtrOutBytes, nwAtportNetAddress=nwAtportNetAddress, nwAtRouter=nwAtRouter, nwAtSysAdminReset=nwAtSysAdminReset, nwAtFwdIfCtrHostInPkts=nwAtFwdIfCtrHostInPkts, nwAtAclValidEntries=nwAtAclValidEntries, nwAtProtoAdminSTATUS=nwAtProtoAdminSTATUS, nwAtEventFltrProtocol=nwAtEventFltrProtocol, nwAtNetDiagOutboundZIPGetNetInfoNetStart=nwAtNetDiagOutboundZIPGetNetInfoNetStart, nwAtFwdIfAclSTATUS=nwAtFwdIfAclSTATUS, nwAtProtoIfOperationalTime=nwAtProtoIfOperationalTime, nwAtProtoIfCtrAdminSTATUS=nwAtProtoIfCtrAdminSTATUS, nwAtHostsTimeToLive=nwAtHostsTimeToLive, nwAtAccessControl=nwAtAccessControl, nwAtportZoneDefault=nwAtportZoneDefault, nwAtFibRouteType=nwAtFibRouteType, nwAtEventFltrType=nwAtEventFltrType, nwAtportNetStart=nwAtportNetStart, nwAtportZoneStatus=nwAtportZoneStatus, nwAtHostMapIfIndex=nwAtHostMapIfIndex, nwAtProtoIfCtrInPkts=nwAtProtoIfCtrInPkts, nwAtFib=nwAtFib, nwAtFwdInterfaces=nwAtFwdInterfaces, nwAtFwdCtrDiscardPkts=nwAtFwdCtrDiscardPkts, nwAtProtoOperSTATUS=nwAtProtoOperSTATUS, nwAtProtoIfCtrOperationalTime=nwAtProtoIfCtrOperationalTime, nwAtEventFilterTable=nwAtEventFilterTable, nwAtportEntry=nwAtportEntry, nwAtNetDiagOutboundNBPType=nwAtNetDiagOutboundNBPType, nwAtHostsRetryCount=nwAtHostsRetryCount, nwAtProtoIfCtrReset=nwAtProtoIfCtrReset, nwAtFwdCtrReset=nwAtFwdCtrReset, nwAtAclTable=nwAtAclTable, nwAtFwdIfEntry=nwAtFwdIfEntry, nwAtAclPermission=nwAtAclPermission, nwAtportSTATUS=nwAtportSTATUS, nwAtportIfIndex=nwAtportIfIndex, nwAtNetDiagOutbound=nwAtNetDiagOutbound, nwAtFibNextHop=nwAtFibNextHop, nwAtNetDiag=nwAtNetDiag, nwAtWorkGroup=nwAtWorkGroup, nwAtFwdIfCtrHostDiscardPkts=nwAtFwdIfCtrHostDiscardPkts, nwAtFwdCtrHostOutBytes=nwAtFwdCtrHostOutBytes, nwAtFwdCtrHostInBytes=nwAtFwdCtrHostInBytes, nwAtProtoIfAclSTATUS=nwAtProtoIfAclSTATUS, nwAtFibNextHopIf=nwAtFibNextHopIf, nwAtHostCtlOperationalTime=nwAtHostCtlOperationalTime, nwAtProtoIfTable=nwAtProtoIfTable, nwAtportNetFrom=nwAtportNetFrom, nwAtEventFltrControl=nwAtEventFltrControl, nwAtProtoInterface=nwAtProtoInterface, nwAtProtoIfHelloTimer=nwAtProtoIfHelloTimer, nwAtEventEntry=nwAtEventEntry, nwAtProtoCtrOutBytes=nwAtProtoCtrOutBytes, nwAtHostMapType=nwAtHostMapType, nwAtNetDiagOutboundRTMPStatus=nwAtNetDiagOutboundRTMPStatus, nwAtEventLogTable=nwAtEventLogTable, nwAtProtoIfIndex=nwAtProtoIfIndex, nwAtFwdIfForwarding=nwAtFwdIfForwarding, nwAtProtoStackSize=nwAtProtoStackSize, nwAtProtoCtrOperationalTime=nwAtProtoCtrOperationalTime, nwAtNetDiagTelnet=nwAtNetDiagTelnet)
mibBuilder.exportSymbols("CTRON-AppleTalk-ROUTER-MIB", nwAtFwdIfControl=nwAtFwdIfControl, nwAtportZoneConfig=nwAtportZoneConfig, nwAtFwdCtrHostInPkts=nwAtFwdCtrHostInPkts, nwAtFwdIfCtrHostOutPkts=nwAtFwdIfCtrHostOutPkts, nwAtSysConfig=nwAtSysConfig, nwAtAclMatches=nwAtAclMatches, nwAtFwdIfCtrFilteredPkts=nwAtFwdIfCtrFilteredPkts, nwAtProtoCtrOutPkts=nwAtProtoCtrOutPkts, nwAtProtoThreadPriority=nwAtProtoThreadPriority, nwAtHostMapPhysAddr=nwAtHostMapPhysAddr, nwAtComponents=nwAtComponents, nwAtProtoOperationalTime=nwAtProtoOperationalTime, nwAtHostCtlCacheHits=nwAtHostCtlCacheHits, nwAtProtoIfCtrOutPkts=nwAtProtoIfCtrOutPkts, nwAtFwdIfCtrFilteredBytes=nwAtFwdIfCtrFilteredBytes, nwAtProtoCtrInBytes=nwAtProtoCtrInBytes, nwAtNetDiagOutboundNBPStatus=nwAtNetDiagOutboundNBPStatus, nwAtportZoneEntry=nwAtportZoneEntry, nwAtFwdIfCtrDiscardPkts=nwAtFwdIfCtrDiscardPkts, nwAtFwdCtrFwdBytes=nwAtFwdCtrFwdBytes, nwAtEventFltrIfNum=nwAtEventFltrIfNum, nwAtFwdCtrLenErrPkts=nwAtFwdCtrLenErrPkts, nwAtDistanceVector=nwAtDistanceVector, nwAtNetDiagPing=nwAtNetDiagPing, nwAtportOutPkts=nwAtportOutPkts)
| [
"dcwangmit01@gmail.com"
] | dcwangmit01@gmail.com |
3a21a3002317189d4a3b56f50fc93790be543058 | 05e6fce4d597da471079a73ba474fd6998f054f5 | /Ene-Jun-2022/jesus-raul-alvarado-torres/práctica-2/capítulo-10/Verify User.py | 8d03538569308fdb2e62307ddfa79ca2a65a2a3a | [
"MIT"
] | permissive | AnhellO/DAS_Sistemas | ec6b9c06d58caf2b648a00dffa1777c4c5e02b24 | 97ceccf26dd45d6e947d2499919a4e4d5ad3d2a3 | refs/heads/ene-jun-2022 | 2023-06-01T04:40:37.350266 | 2022-06-15T01:52:33 | 2022-06-15T01:52:33 | 102,045,109 | 56 | 213 | MIT | 2023-05-23T02:49:44 | 2017-08-31T21:05:59 | Python | UTF-8 | Python | false | false | 906 | py | import json
def get_stored_username():
filename = 'username.json'
try:
with open(filename) as f_obj:
username = json.load(f_obj)
except FileNotFoundError:
return None
else:
return username
def get_new_username():
username = input("¿Como te llamas? ")
filename = 'username.json'
with open(filename, 'w') as f_obj:
json.dump(username, f_obj)
return username
def greet_user():
username = get_stored_username()
if username:
correct = input("¿Eres " + username + "? (y/n) ")
if correct == 'y':
print("Bienvenido, " + username)
else:
username = get_new_username()
print("Te recordare la proxima vez " + username)
else:
username = get_new_username()
print("Te recordare la proxima vez " + username)
greet_user() | [
"noreply@github.com"
] | AnhellO.noreply@github.com |
f3376571e9de062e5692bbe67cff3a2378440502 | 55c0f1f73677511439e794baa699ef60cceaf4d6 | /reader.py | 9d5834526be10fc86594e5b24a81dd39ef4761f7 | [] | no_license | klejbroda/startech | fa81675b3652575789ac37a65823b9c23c8c1b1e | 1ef5d129db2cfccd73e254bbf4f1576611f6a485 | refs/heads/master | 2021-03-17T21:28:34.903945 | 2020-03-13T18:24:22 | 2020-03-13T18:24:22 | 247,019,343 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 582 | py | class LogReader:
def __init__(self):
self.file = None
def create_dataset(self):
with open(self.file, "r") as self.file:
list_of_rows = []
contents = self.file.readlines()
contents.pop(0)
for row in contents:
split_rows = row.splitlines()
for lines in split_rows:
line = lines.split(",")
new_tuple = (line[1], line[2], line[3], line[4])
list_of_rows.append(new_tuple)
return list_of_rows
| [
"noreply@github.com"
] | klejbroda.noreply@github.com |
05be3c6193f89bc5f3be46293ad8f4dda8d7aff8 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2551/60771/296110.py | 9c1d23a5339719691c0cae541b95c62c95ea2fb3 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | #15
ori = input().split(" ")
N = int(ori[0])
M = int(ori[1])
lights = [False]*N
for i in range(0,M):
ori = input().split(" ")
a = int(ori[1])
b = int(ori[2])
if ori[0] == "0":
for j in range(a-1,b):
lights[j] = not lights[j]
if ori[0] == "1":
res = 0
for j in range(a-1,b):
if lights[j] == True:
res += 1
print(res) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
a16386f8932d6143a5ca5f588dba0a5af313b3e0 | 1ff5ea65ad64be5ee2756b22488de38aeb4c9506 | /numexpr/necompiler.py | dd0b5606cb1481f018735b36d2865013fb9b935e | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | gdementen/numexpr-numba | b988f92d672fcd3828f4d12adabd08bf1916c554 | 6713b40c9d2ff3b24b1222e02d157583ba3fd29f | refs/heads/master | 2021-01-22T18:14:21.870541 | 2013-07-01T05:44:31 | 2013-07-01T05:44:31 | 10,475,590 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,099 | py | ###################################################################
# Numexpr - Fast numerical array expression evaluator for NumPy.
#
# License: MIT
# Author: See AUTHORS.txt
#
# See LICENSE.txt and LICENSES/*.txt for details about copyright and
# rights to use.
####################################################################
import __future__
import sys
import ast
from ctypes import pythonapi, c_void_p
import threading
import numpy as np
from numba import jit, void, b1, i4, i8, f4, f8, c16, string_
import meta
# is_cpu_amd_intel is imported *from here* by pytables :(
from numexpr import expressions, use_vml, is_cpu_amd_intel
from numexpr.utils import CacheDict
from numexpr import utils
# Declare a double type that does not exist in Python space
double = np.double
if sys.version_info[0] < 3:
int_ = int
long_ = long
else:
int_ = np.int32
long_ = np.int64
typecode_to_kind = {'b': 'bool', 'i': 'int', 'l': 'long', 'f': 'float',
'd': 'double', 'c': 'complex', 's': 'bytes', 'n' : 'none'}
kind_to_typecode = {'bool': 'b', 'int': 'i', 'long': 'l', 'float': 'f',
'double': 'd', 'complex': 'c', 'bytes': 's', 'none' : 'n'}
type_to_numba = {bool: b1, int_: i4, long_: i8, float: f4,
double: f8, complex: c16, bytes: string_}
type_to_typecode = {bool: 'b', int_: 'i', long_:'l', float:'f',
double: 'd', complex: 'c', bytes: 's'}
type_to_kind = expressions.type_to_kind
kind_to_type = expressions.kind_to_type
default_type = kind_to_type[expressions.default_kind]
# Final addtions for Python 3 (mainly for PyTables needs)
if sys.version_info[0] > 2:
typecode_to_kind['s'] = 'str'
kind_to_typecode['str'] = 's'
type_to_typecode[str] = 's'
scalar_constant_kinds = kind_to_typecode.keys()
class ASTNode(object):
def __init__(self, astType='generic', astKind='unknown',
value=None, children=()):
raise NotImplementedError()
def expressionToAST(ex):
raise NotImplementedError()
def typeCompileAst(ast):
raise NotImplementedError()
def stringToExpression(s, types, context):
"""Given a string, convert it to a tree of ExpressionNode's.
"""
old_ctx = expressions._context.get_current_context()
try:
expressions._context.set_new_context(context)
# first compile to a code object to determine the names
if context.get('truediv', False):
flags = __future__.division.compiler_flag
else:
flags = 0
c = compile(s, '<expr>', 'eval', flags)
# make VariableNode's for the names
names = {}
for name in c.co_names:
if name == "None":
names[name] = None
elif name == "True":
names[name] = True
elif name == "False":
names[name] = False
else:
t = types.get(name, default_type)
names[name] = expressions.VariableNode(name, type_to_kind[t])
names.update(expressions.functions)
# now build the expression
ex = eval(c, names)
if expressions.isConstant(ex):
ex = expressions.ConstantNode(ex, expressions.getKind(ex))
elif not isinstance(ex, expressions.ExpressionNode):
raise TypeError("unsupported expression type: %s" % type(ex))
finally:
expressions._context.set_new_context(old_ctx)
return ex
def get_argnames(ex):
return sorted({a.value for a in ex.allOf('variable')})
context_info = [
('optimization', ('none', 'moderate', 'aggressive'), 'aggressive'),
('truediv', (False, True, 'auto'), 'auto')
]
def getContext(kwargs, frame_depth=1):
d = kwargs.copy()
context = {}
for name, allowed, default in context_info:
value = d.pop(name, default)
if value in allowed:
context[name] = value
else:
raise ValueError("'%s' must be one of %s" % (name, allowed))
if d:
raise ValueError("Unknown keyword argument '%s'" % d.popitem()[0])
if context['truediv'] == 'auto':
caller_globals = sys._getframe(frame_depth + 1).f_globals
context['truediv'] = \
caller_globals.get('division', None) == __future__.division
return context
class ArraySubscripter(ast.NodeTransformer):
def visit_Name(self, node):
if node.id not in py_funcs and node.id not in ('imag', 'real', 'abs', 'double'):
# print node.id
new_node = ast.Subscript(node,
ast.Index(ast.Name('i', ast.Load())),
ast.Load())
return ast.copy_location(new_node, node)
return node
class TemplateFiller(ast.NodeTransformer):
def __init__(self, expr, argnames):
self.expr = expr
self.argnames = argnames
def visit_Name(self, node):
if node.id == '__expr_placeholder__':
return ast.copy_location(self.expr, node)
else:
return node
def visit_arguments(self, node):
assert node.args[0].id == '__args_placeholder__'
argnames = ['__result__'] + self.argnames
node.args = [ast.Name(name, ast.Param()) for name in argnames]
return node
savethread = pythonapi.PyEval_SaveThread
savethread.argtypes = []
savethread.restype = c_void_p
restorethread = pythonapi.PyEval_RestoreThread
restorethread.argtypes = [c_void_p]
restorethread.restype = None
def template_func(__args_placeholder__):
_threadstate = savethread()
for i in range(len(__result__)):
__result__[i] = __expr_placeholder__
restorethread(_threadstate)
template_ast = meta.decompiler.decompile_func(template_func)
from copy import deepcopy
def ast_expr_to_ast_func(ast_expr, arg_names):
# subscripted_expr = ArraySubscripter().visit(ast_expr.body[0].value)
subscripted_expr = ArraySubscripter().visit(ast_expr)
# print ast.dump(subscripted_expr, annotate_fields=False)
template_filler = TemplateFiller(subscripted_expr, arg_names)
ast_func = template_filler.visit(deepcopy(template_ast))
ast_func.name = '__expr_func__'
# print ast.dump(ast_func, annotate_fields=False)
ast_module = ast.Module([ast_func])
return ast.fix_missing_locations(ast_module)
import math
py_funcs = {
'savethread': savethread,
'restorethread': restorethread,
# 'abs': abs,
# 'absolute': abs,
'complex': complex,
'sqrt': math.sqrt,
'sin': math.sin,
'cos': math.cos,
'tan': math.tan,
'arcsin': math.asin,
'arccos': math.acos,
'arctan': math.atan,
'sinh': math.sinh,
'cosh': math.cosh,
'tanh': math.tanh,
'arcsinh': math.asinh,
'arccosh': math.acosh,
'arctanh': math.atanh,
'fmod': math.fmod,
'arctan2': math.atan2,
'log': math.log,
'log1p': math.log1p,
'log10': math.log10,
'exp': math.exp,
'expm1': math.expm1,
'copy': np.copy,
'ones_like': np.ones_like,
'double': f8,
}
def ast_func_to_func(ast_func):
code = compile(ast_func, '<expr>', 'exec')
context = {'np': np}
context.update(py_funcs)
exec code in context
return context['__expr_func__']
def precompile(ex, signature=(), context={}):
"""Compile the expression to an intermediate form.
"""
if isinstance(ex, (str, unicode)):
#XXX: we might want to work directly with (python's) AST
# and do numexpr transformations directly at that level instead of going
# str -> Expression -> ast -> ...
types = dict(signature)
ex = stringToExpression(ex, types, context)
if signature:
argnames = [name for (name, type_) in signature]
else:
# this can only occur when NumExpr() is called directly without
# signature, and in that case we have no other choice than use
# the default type for all arguments (double)
argnames = get_argnames(ex)
signature = [(name, double) for name in argnames]
if ex.value in ('sum', 'prod'):
reduction_func = getattr(np, ex.value)
args = ex.children
# axis is second arg
assert len(args) == 2
ex, axis = args
axis = axis.value
else:
reduction_func = None
axis = None
ast_expr = ex.toPython()
# print ast.dump(ast_expr, annotate_fields=False)
ast_func = ast_expr_to_ast_func(ast_expr, argnames)
inner_func = ast_func_to_func(ast_func)
# print ast.dump(ast_func, annotate_fields=False)
res_type = kind_to_type[ex.astKind]
full_sig = [('__result__', res_type)] + signature
arg_types = [type_to_numba[type_] for name, type_ in full_sig]
jit_signature = void(*[t[:] for t in arg_types])
inner_func_nb = jit(jit_signature, nopython=True)(inner_func)
if reduction_func is not None:
# this is a hack. To do it (more) correctly (with multithreading),
# I would need to use a different template_func:
# for i in range(len(__result__)):
# __result__[0] += __expr_placeholder__
def func(*args, **kwargs):
# order, casting, ex_uses_vml
out = kwargs.pop('out', None)
if out is not None:
raise NotImplementedError()
shape = args[0].shape
args = [a.ravel() for a in args]
tmp_out = np.empty(shape, dtype=res_type)
inner_func_nb(tmp_out.ravel(), *args)
return reduction_func(tmp_out, axis=axis)
else:
def func(*args, **kwargs):
numthreads = utils.num_threads
# only scalars
if any(isinstance(arg, np.ndarray) for arg in args):
shape = args[0].shape
if any(arg.shape != shape for arg in args[1:]):
args = np.broadcast_arrays(*args)
shape = args[0].shape
out = kwargs.pop('out', None)
if out is None:
out = np.empty(shape, dtype=res_type)
# "flatten" arguments
# we cannot use order="K" which is most efficient, in case arguments
# have not the same in-memory layout, because we need the same
# target memory layout for all arguments.
#XXX: can't we test for that and convert only if really necessary?
args = [out.ravel()] + [a.ravel() for a in args]
length = len(args[0])
# TODO: it might be better to make sure the starting bounds
# are aligned to X bytes
# TODO: it might be better to not multithread at all if
# length < THRESHOLD
chunklen = (length + numthreads - 1) // numthreads
bounds = [(i * chunklen, min((i + 1) * chunklen, length))
for i in range(numthreads)]
assert bounds[-1][1] == length
chunks = [[arg[start:stop] for arg in args]
for start, stop in bounds]
threads = [threading.Thread(target=inner_func_nb, args=chunk)
for chunk in chunks[:-1]]
for thread in threads:
thread.start()
# the main thread handles the last chunk
inner_func_nb(*chunks[-1])
for thread in threads:
thread.join()
return out
else:
# all arguments are scalar
out = np.empty(1, dtype=res_type)
args = [out] + [np.array([a]) for a in args]
inner_func_nb(*args)
return out[0]
def run(*args, **kwargs):
return func(*args, **kwargs)
func.run = run
return func
def NumExpr(ex, signature=(), copy_args=(), **kwargs):
"""
Compile an expression built using E.<variable> variables to a function.
ex can also be specified as a string "2*a+3*b".
The order of the input variables and their types can be specified using the
signature parameter, which is a list of (name, type) pairs.
Returns a `NumExpr` object containing the compiled function.
"""
# NumExpr can be called either directly by the end-user, in which case
# kwargs need to be sanitized by getContext, or by evaluate,
# in which case kwargs are in already sanitized.
# In that case frame_depth is wrong (it should be 2) but it doesn't matter
# since it will not be used (because truediv='auto' has already been
# translated to either True or False).
# NOTE: `copy_args` is not necessary from 2.0 on. It remains here
# basically because PyTables trusted on it for certain operations.
# I have filed a ticket for PyTables asking for its removal:
# https://github.com/PyTables/PyTables/issues/117
context = getContext(kwargs, frame_depth=1)
return precompile(ex, signature, context)
def disassemble(nex):
"""
Given a NumExpr object, return a list which is the program disassembled.
"""
raise NotImplementedError()
def getType(a):
kind = a.dtype.kind
if kind == 'b':
return bool
if kind in 'iu':
if a.dtype.itemsize > 4:
return long_ # ``long`` is for integers of more than 32 bits
if kind == 'u' and a.dtype.itemsize == 4:
return long_ # use ``long`` here as an ``int`` is not enough
return int_
if kind == 'f':
if a.dtype.itemsize > 4:
return double # ``double`` is for floats of more than 32 bits
return float
if kind == 'c':
return complex
if kind == 'S':
return bytes
raise ValueError("unkown type %s" % a.dtype.name)
#XXX: no tan? log10? fmod?
#XXX: what is "inv"?
vml_funcs = set([
'sqrt',
'sin', 'cos',
'arcsin', 'arccos', 'arctan',
'sinh', 'cosh', 'tanh',
'arcsinh', 'arccosh', 'arctanh',
'arctan2',
'log', 'log1p',
'exp', 'expm1',
'abs',
'pow', 'div',
'inv',
])
def getExprNames(text, context):
ex = stringToExpression(text, {}, context)
argnames = get_argnames(ex)
if not use_vml:
ex_uses_vml = False
return argnames, ex_uses_vml
else:
# try to figure out if vml operations are used by expression
ex_uses_vml = any(node.value in vml_funcs for node in ex.allOf('op'))
return argnames, ex_uses_vml
# Dictionaries for caching variable names and compiled expressions
_names_cache = CacheDict(256)
_numexpr_cache = CacheDict(256)
def evaluate(ex, local_dict=None, global_dict=None,
out=None, order='K', casting='safe', **kwargs):
"""Evaluate a simple array expression element-wise, using the new iterator.
ex is a string forming an expression, like "2*a+3*b". The values for "a"
and "b" will by default be taken from the calling function's frame
(through use of sys._getframe()). Alternatively, they can be specifed
using the 'local_dict' or 'global_dict' arguments.
Parameters
----------
local_dict : dictionary, optional
A dictionary that replaces the local operands in current frame.
global_dict : dictionary, optional
A dictionary that replaces the global operands in current frame.
out : NumPy array, optional
An existing array where the outcome is going to be stored. Care is
required so that this array has the same shape and type than the
actual outcome of the computation. Useful for avoiding unnecessary
new array allocations.
order : {'C', 'F', 'A', or 'K'}, optional
Controls the iteration order for operands. 'C' means C order, 'F'
means Fortran order, 'A' means 'F' order if all the arrays are
Fortran contiguous, 'C' order otherwise, and 'K' means as close to
the order the array elements appear in memory as possible. For
efficient computations, typically 'K'eep order (the default) is
desired.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when making a copy or
buffering. Setting this to 'unsafe' is not recommended, as it can
adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
"""
if not isinstance(ex, (str, unicode)):
raise ValueError("must specify expression as a string")
# Get the names for this expression
context = getContext(kwargs, frame_depth=1)
expr_key = (ex, tuple(sorted(context.items())))
if expr_key not in _names_cache:
_names_cache[expr_key] = getExprNames(ex, context)
names, ex_uses_vml = _names_cache[expr_key]
# Get the arguments based on the names.
call_frame = sys._getframe(1)
if local_dict is None:
local_dict = call_frame.f_locals
if global_dict is None:
global_dict = call_frame.f_globals
arguments = []
for name in names:
try:
a = local_dict[name]
except KeyError:
a = global_dict[name]
arguments.append(np.asarray(a))
# Create a signature
signature = [(name, getType(arg)) for (name, arg) in zip(names, arguments)]
# Look up numexpr if possible.
numexpr_key = expr_key + (tuple(signature),)
try:
compiled_ex = _numexpr_cache[numexpr_key]
except KeyError:
compiled_ex = _numexpr_cache[numexpr_key] = \
NumExpr(ex, signature, **context)
kwargs = {'out': out, 'order': order, 'casting': casting,
'ex_uses_vml': ex_uses_vml}
return compiled_ex(*arguments, **kwargs)
| [
"gdementen@gmail.com"
] | gdementen@gmail.com |
1f5efb06eab8edbd3e09147a821a534f8f2d7483 | 1154fa5ae6fe517151e41f5f4746d1bada23e1a5 | /scenes/cup_generator/model.py | 7e8f6d5861759736d796c1fb6a1e135ab6258a3d | [] | no_license | joaomonteirof/SMART_COUSP_Reconstruction | 9f7aac2eb08bc67f3d8b7e786ff66a5c1c9dadf4 | 79ea702d75875bec399721b04cdaecf4fc6a6a0e | refs/heads/master | 2023-09-04T00:05:20.981615 | 2021-10-13T17:26:10 | 2021-10-13T17:26:10 | 106,738,046 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,485 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
class Generator(torch.nn.Module):
def __init__(self, input_dim=128, num_filters=[1024, 512, 256, 128, 64, 32], output_dim=1):
super(Generator, self).__init__()
# Hidden layers
self.hidden_layer = torch.nn.Sequential()
for i in range(len(num_filters)):
# Deconvolutional layer
if i == 0:
deconv = nn.ConvTranspose2d(input_dim, num_filters[i], kernel_size=4, stride=1, padding=0)
else:
deconv = nn.ConvTranspose2d(num_filters[i - 1], num_filters[i], kernel_size=4, stride=2, padding=1)
deconv_name = 'deconv' + str(i + 1)
self.hidden_layer.add_module(deconv_name, deconv)
# Initializer
nn.init.normal_(deconv.weight, mean=0.0, std=0.02)
nn.init.constant_(deconv.bias, 0.0)
# Batch normalization
bn_name = 'bn' + str(i + 1)
self.hidden_layer.add_module(bn_name, torch.nn.BatchNorm2d(num_filters[i]))
# Activation
act_name = 'act' + str(i + 1)
self.hidden_layer.add_module(act_name, torch.nn.ReLU())
# Output layer
self.output_layer = torch.nn.Sequential()
# Deconvolutional layer
out = torch.nn.ConvTranspose2d(num_filters[i], output_dim, kernel_size=4, stride=2, padding=1)
self.output_layer.add_module('out', out)
# Initializer
nn.init.normal_(out.weight, mean=0.0, std=0.02)
nn.init.constant_(out.bias, 0.0)
# Activation
self.output_layer.add_module('act', torch.nn.Sigmoid())
def forward(self, x):
if x.dim()==2:
x = x.unsqueeze(-1).unsqueeze(-1)
elif not x.dim()==4:
print('WRONG INPUT DIMENSION!!')
exit(1)
h = self.hidden_layer(x)
out = self.output_layer(h)
return out
class Discriminator(torch.nn.Module):
def __init__(self, optimizer, lr, betas, input_dim=1, num_filters=[32, 64, 128, 256, 512, 1024], output_dim=1, batch_norm=False):
super(Discriminator, self).__init__()
self.projection = nn.Conv2d(input_dim, 1, kernel_size=8, stride=2, padding=3, bias=False)
with torch.no_grad():
self.projection.weight /= torch.norm(self.projection.weight.squeeze()).item()
# Hidden layers
self.hidden_layer = torch.nn.Sequential()
for i in range(len(num_filters)):
# Convolutional layer
if i == 0:
conv = nn.Conv2d(1, num_filters[i], kernel_size=4, stride=2, padding=1)
else:
conv = nn.Conv2d(num_filters[i - 1], num_filters[i], kernel_size=4, stride=2, padding=1)
conv_name = 'conv' + str(i + 1)
self.hidden_layer.add_module(conv_name, conv)
# Initializer
nn.init.normal_(conv.weight, mean=0.0, std=0.02)
nn.init.constant_(conv.bias, 0.0)
# Batch normalization
if i != 0 and batch_norm:
bn_name = 'bn' + str(i + 1)
self.hidden_layer.add_module(bn_name, torch.nn.BatchNorm2d(num_filters[i]))
# Activation
act_name = 'act' + str(i + 1)
self.hidden_layer.add_module(act_name, torch.nn.LeakyReLU(0.2))
# Output layer
self.output_layer = torch.nn.Sequential()
# Convolutional layer
out = nn.Conv2d(num_filters[i], output_dim, kernel_size=4, stride=1, padding=1)
self.output_layer.add_module('out', out)
# Initializer
nn.init.normal_(out.weight, mean=0.0, std=0.02)
nn.init.constant_(out.bias, 0.0)
# Activation
self.output_layer.add_module('act', nn.Sigmoid())
self.optimizer = optimizer(list(self.hidden_layer.parameters()) + list(self.output_layer.parameters()), lr=lr, betas=betas)
def forward(self, x):
x = self.projection(x)
h = self.hidden_layer(x)
out = self.output_layer(h)
return out.squeeze()
| [
"joaomonteirof@gmail.com"
] | joaomonteirof@gmail.com |
c12730826a6aa9d5f5d486adc9b4fbd73d3e312c | 87b4518e55c0e465aba39d86e65ba56f56502198 | /css/postprocess.py | 787db97ecfb72ae1d5d3a86a4fc9aaf218d47c28 | [
"MIT"
] | permissive | Serkan-devel/m.css | 302831008d8949a2fb7b91565621b47dd638e38f | 3c0e3d7875bc9ab63c93322cc02cab62239804d7 | refs/heads/master | 2020-04-01T02:00:17.005772 | 2019-01-12T11:36:33 | 2019-01-12T11:36:33 | 152,761,732 | 0 | 0 | MIT | 2019-01-12T11:36:34 | 2018-10-12T14:20:51 | Python | UTF-8 | Python | false | false | 7,550 | py | #!/usr/bin/env python
#
# This file is part of m.css.
#
# Copyright © 2017, 2018, 2019 Vladimír Vondruš <mosra@centrum.cz>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import argparse
import re
import os
import sys
import_rx = re.compile("^@import url\\('(?P<file>[^']+)'\\);$")
opening_brace_rx = re.compile("^\\s*:root\s*{\\s*$")
closing_brace_rx = re.compile("^\\s*}\\s*$")
comment_rx = re.compile("^\\s*(/\\*.*\\*/)?\\s*$")
comment_start_rx = re.compile("^\\s*(/\\*.*)\\s*$")
comment_end_rx = re.compile("^\\s*(.*\\*/)\\s*$")
variable_declaration_rx = re.compile("^\\s*(?P<key>--[a-z-]+)\\s*:\\s*(?P<value>[^;]+)\\s*;\\s*(/\\*.*\\*/)?\\s*$")
variable_use_rx = re.compile("^(?P<before>.+)var\\((?P<key>--[a-z-]+)\\)(?P<after>.+)$")
def postprocess(files, process_imports, out_file):
directory = os.path.dirname(files[0])
if not out_file:
basename, ext = os.path.splitext(files[0])
out_file = basename + ".compiled" + ext
variables = {}
imported_files = []
def parse(f):
nonlocal variables, imported_files
not_just_variable_declarations = False
in_variable_declarations = False
in_comment = False
for line in f:
# In comment and the comment is not ending yet, ignore
if in_comment:
if comment_end_rx.match(line):
in_comment = False
continue
# Import statement: add the file to additionally processed files
# unless it's disabled
match = import_rx.match(line)
if match:
if process_imports:
imported_files += [match.group('file')]
continue
# Variable use, replace with actual value
# TODO: more variables on the same line?
match = variable_use_rx.match(line)
if match and match.group('key') in variables:
out.write(match.group('before'))
out.write(variables[match.group('key')])
# Strip the trailing comment, if there, to save some bytes
if match.group('after').endswith('*/'):
out.write(match.group('after')[:match.group('after').rindex('/*')].rstrip())
else:
out.write(match.group('after'))
out.write("\n")
continue
# Opening brace of variable declaration block
match = opening_brace_rx.match(line)
if match:
in_variable_declarations = True
continue
# Variable declaration
match = variable_declaration_rx.match(line)
if match and in_variable_declarations:
variables[match.group('key')] = match.group('value')
continue
# Comment or empty line, ignore
if comment_rx.match(line):
continue
# Comment start line, ignore this and the next lines
if comment_start_rx.match(line):
in_comment = True
continue
# Closing brace of variable declaration block. If it was not just
# variable declarations, put the closing brace to the output as
# well.
match = closing_brace_rx.match(line)
if match and in_variable_declarations:
if not_just_variable_declarations: out.write("}\n")
in_variable_declarations = False
continue
# If inside variable declaration block, include also the opening
# brace and remeber to put the closing brace there as well
if in_variable_declarations:
out.write(":root {\n")
not_just_variable_declarations = True
# Something else, copy verbatim to the output. Strip the trailing
# comment, if there, to save some bytes.
if line.rstrip().endswith('*/'):
out.write(line[:line.rindex('/*')].rstrip() + '\n')
else:
out.write(line)
with open(out_file, mode='w') as out:
# Put a helper comment and a license blob on top
out.write("""/* Generated using `./postprocess.py {}`. Do not edit. */
/*
This file is part of m.css.
Copyright © 2017, 2018, 2019 Vladimír Vondruš <mosra@centrum.cz>
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*/
""".format(' '.join(sys.argv[1:])))
# Parse the top-level file
with open(files[0]) as f: parse(f)
# Now open the imported files and parse them as well. Not doing any
# recursive parsing.
for i, file in enumerate(imported_files + files[1:]):
if i: out.write('\n')
with open(file) as f: parse(f)
return 0
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=r"""
Postprocessor for removing @import statements and variables from CSS files.
Combines all files into a new *.compiled.css file. The basename is taken
implicitly from the first argument. The -o option can override the output
filename.""")
parser.add_argument('files', nargs='+', help="input CSS file(s)")
parser.add_argument('--no-import', help="ignore @import statements", action='store_true')
parser.add_argument('-o', '--output', help="output file", default='')
args = parser.parse_args()
exit(postprocess(args.files, not args.no_import, args.output))
| [
"mosra@centrum.cz"
] | mosra@centrum.cz |
3073ee6cbe530e37a8ef22de9c4144d820e41f0d | 6cca2f3998cfe0a78f8ae0b10d5cad770737efc8 | /tools/cryptotools.py | f908250645bc24c4727d7e1f0c2b124c6e7ce90d | [] | no_license | snieradkiewicz/security-tools-wargames | 5e7c796b1de345957b48d986c482caa82734eac6 | 15a2fe48ee5ecb8357f1f1751a50bab08c7b3e05 | refs/heads/master | 2020-07-29T00:48:43.728490 | 2019-10-20T18:05:12 | 2019-10-20T18:05:12 | 209,603,586 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,899 | py | import base64
from tools import assets
from tools import aes
def evaluate_english_score(message, message_contains_words=False):
text = str(message.decode("utf-8", errors='ignore')).upper()
score = 0.0
# use proper dictionary depends if you expect to find whole words in message
if message_contains_words:
english_letter_frequency = assets.Assets().english_letters_and_words_frequency
else:
english_letter_frequency = assets.Assets().english_letter_frequency
# calculate score by multiplying frequency by dictionary element occurrences
for letter, freq in english_letter_frequency.items():
score += freq * text.count(str(letter))
# substract from score if message contain non-printable characters
for letter in text:
if not letter.isprintable() and letter != "\x0a" and score > -100:
score -= 10.0
return score
def count_hamming_distance(set1, set2):
result = bytearray()
count = 0
i = 0
# count number of set bytes in xored bytes from set1 and set2. Limited to shorten of set1, set2
while i < len(set1) and i < len(set2):
result.append(set1[i] ^ set2[i])
count += bin(set1[i] ^ set2[i])[2:].count("1")
i += 1
return count
def find_n_best_results(all_results, top_results):
results = all_results
recently_added_score = -1.0
recently_added_key = -1
best_results = []
# loop /top_results/ times
for top in range(0, top_results):
tmp_val = -1.0
tmp_key = 0
# pick next best result
for key, val in results.items():
if (val >= recently_added_score and key != recently_added_key) and val < tmp_val or tmp_val == -1:
tmp_val = val
tmp_key = key
recently_added_key = tmp_key
recently_added_score = tmp_val
best_results.append(recently_added_key)
return best_results
def bytes_xor(bytes1: bytes, bytes2: bytes):
bytes1 = bytearray(bytes1)
length = len(bytes2)
# iterate through every byte and do xor
for i in range(0, len(bytes1)):
bytes1[i] = bytes1[i] ^ bytes2[i % length]
return bytes1
def brute_single_byte_unxor(message):
max_score = -10000000
best_char = bytes()
best_xored_string = bytes()
# Iterate for each possible byte
for i in range(0, 256):
x = bytes([i])
xored_string = bytes(bytes_xor(message, x))
current_score = evaluate_english_score(xored_string)
if current_score > max_score:
max_score = current_score
best_char = x
best_xored_string = xored_string
cs2 = current_score / len(message)
return best_char, max_score, bytes(bytes_xor(message, best_char)), best_xored_string
def rot_range(msg, min = 1, max= 26):
for i in range(min, max):
message = bytearray(msg.encode('ascii'))
for j in range(0, len(message)):
if 90 >= int(message[j]) >= 65:
message[j] = (((int(message[j]) + i) - 65) % 26) + 65
else:
if 122 >= int(message[j]) >= 97:
message[j] = (((int(message[j]) + i) - 97) % 26) + 97
print("For rot: " + str(i) + " the message is : " + str(message))
def compare_bytearrays(array1, array2):
if len(array1) != len(array2):
return False
for i in range(0, len(array1)):
if array1[i] != array2[i]:
return False
return True
class CryptoTools:
def __init__(self, key=bytes(), message=bytes(), encrypted=bytes()):
self.__key = key
self.__message = message
self.__encrypted = encrypted
def get_encrypted(self):
return self.__encrypted
def get_message(self):
return self.__message
def get_decrypted(self):
return self.get_message()
def find_key_length(self, min_len=2, max_len=40, fragments=4, top_results=3):
"""
Calculate /top_results/ number of most probable key lengths
:param min_len: minimum length of key
:param max_len: maximum length of key
:param fragments: maximum fragments to compute (usually more is better)
:param top_results: number of returning results
:return: ARRAY of most probable key lengths
"""
encrypted = self.__encrypted
key_size = min_len
min_distance = -1.0
results = dict()
# probing encrypted message for key lengths from range(min_len, max_len)
while key_size <= max_len:
if len(encrypted) < (key_size * fragments):
if fragments > 2:
fragments -= 1
continue
else:
print("Data stream is too short. Stopped at " + str(fragments)
+ "fragments with key_size: " + str(key_size))
break
frags = [encrypted[0:key_size]]
# slice encrypted to /fragments/ number of blocks
for j in range(2, fragments + 1):
frags.append(encrypted[key_size * (j - 1):key_size * j])
count = 0
distance = 0
# calculate and sum up hamming distance between each sliced blocks
for i in range(0, fragments):
for j in range(i + 1, fragments):
distance += count_hamming_distance(bytes(frags[i]), bytes(frags[j]))
count += 1
# normalize hamming distance sum by dividing it by mutations and key size
score = distance / count / key_size
if min_distance < 0 or score < min_distance:
min_distance = score
results[key_size] = score
key_size += 1
frags.clear()
# find n best results
best_results = find_n_best_results(results, top_results)
return best_results
def find_key_multibyte_xor(self, key_len):
"""
find key for given key length
:param key_len: length of a key (in bytes)
:return:
"""
encrypted = self.__encrypted
# initialize table of size of keylen to store each block in it
blocks = []
for i in range(0, key_len):
blocks.append(bytearray())
# reorder encrypted to each n'th % keysize byte go to its block
for i in range(0, len(encrypted)):
blocks[i % key_len].append(encrypted[i])
key = bytes(b"")
for block in blocks:
res = brute_single_byte_unxor(bytes(block))
key += res[0]
self.__key = key
return key
def is_key_and_message_plain_ascii(self):
"""
Check if key and message are both plaintext (ASCII) so each byte of key and message is in range (\x00 - \x7f)
then most significant bit of each byte in key would be unset (0) - so XOR of it is also unset (0).
:return: Returns True if so, otherwise False
"""
encrypted = self.__encrypted
for byte in encrypted:
calc = byte & int('10000000', 2)
if calc == int('10000000', 2):
return False
return True
def print_in_columns(self, key_len):
zerofill = len(str(key_len))
key = self.find_key_multibyte_xor(key_len)
result = bytes_xor(self.get_encrypted(), key).decode(encoding='utf-8', errors='ignore').replace('\n', ' ')
for i in range(0, len(result), key_len):
line = result[i:i + key_len].replace('\n', ' ')
print(str(len(line)).zfill(zerofill) + ' : ' + line)
def encrypt_aes_128_ecb(self):
self.__encrypted = aes.encrypt(self.__message, self.__key)
def decrypt_aes_128_ecb(self):
self.__message = aes.decrypt(self.__encrypted, self.__key)
def is_aes_128_ecb(self):
return aes.detect_aes_128_ecb(self.__encrypted, 128)
| [
"nieradkiewicz@gmail.com"
] | nieradkiewicz@gmail.com |
5d6be64c33047c1f55721e5878a4fbbded441aec | 24f02ab32164114731c303767d61e068302bff51 | /HW4/plot.py | 97d3af9ffe20ef2e7e3c87abb028a6f0af6edd0c | [] | no_license | XueminLiu111/UCBerkeley_DRL_CS294-112 | 3ff54a2c7a0f4c8a9f14ccbe25716d2c06f42ee2 | acb5b12e9c5c7a2b4a3b12a040d1d523fff22cb7 | refs/heads/main | 2023-06-25T22:01:04.090348 | 2021-07-20T19:49:15 | 2021-07-20T19:49:15 | 387,898,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 946 | py | import os
import argparse
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import pandas
parser = argparse.ArgumentParser()
parser.add_argument('--exps', nargs='+', type=str)
parser.add_argument('--save', type=str, default=None)
args = parser.parse_args()
f, ax = plt.subplots(1, 1)
for i, exp in enumerate(args.exps):
log_fname = os.path.join('data', exp, 'log.csv')
csv = pandas.read_csv(log_fname)
color = cm.viridis(i / float(len(args.exps)))
ax.plot(csv['Itr'], csv['ReturnAvg'], color=color, label=exp)
ax.fill_between(csv['Itr'], csv['ReturnAvg'] - csv['ReturnStd'], csv['ReturnAvg'] + csv['ReturnStd'],
color=color, alpha=0.2)
ax.legend()
ax.set_xlabel('Iteration')
ax.set_ylabel('Return')
if args.save:
os.makedirs('plots', exist_ok=True)
#f.savefig(os.path.join('plots', args.save + '.jpg'))
f.savefig(os.path.join('plots', args.save + '.png'))
else:
plt.show()
| [
"liuxuemin@xuemins-mbp.dhcp.nd.edu"
] | liuxuemin@xuemins-mbp.dhcp.nd.edu |
1eda220ebf78f8269181594eccd28bc02e85cb10 | 664ea23f1203ba31edb2ea3c21ba88cb14d8fdac | /hierarcicalClustering.py | b219d676a53128672e84fa0140da200e36f7f1a9 | [] | no_license | volkaankarakus/machineLearning | f5423fb88add14fe1869ecf96fc4b6cf2dd491cc | 7eb395ceb110488fd7cd39376a79652255d54155 | refs/heads/main | 2023-03-05T11:52:47.842975 | 2021-02-21T00:49:47 | 2021-02-21T00:49:47 | 338,939,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,278 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 19 19:13:19 2021
@author: VolkanKarakuş
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#%% create dataset
# class 1
x1=np.random.normal(25,5,100) # normal demek Gaussian demek. 25 ortalamaya sahip, sigmasi 5 , 1000 tane deger uret.
y1=np.random.normal(25,5,100)
# class 2
x2=np.random.normal(55,5,100)
y2=np.random.normal(60,5,100)
# class 3
x3=np.random.normal(55,5,100)
y3=np.random.normal(15,5,100)
x=np.concatenate((x1,x2,x3),axis=0)
y=np.concatenate((y1,y2,y3),axis=0)
dictionary={'x':x,'y':y}
data=pd.DataFrame(dictionary)
dataInfo=data.info() # Data columns (total 2 columns):
# Column Non-Null Count Dtype
# --- ------ -------------- -----
# 0 x 3000 non-null float64
# 1 y 3000 non-null float64
dataDescribe=data.describe() # mean,std,min,max
plt.scatter(x1,y1,color='black')
plt.scatter(x2,y2,color='black')
plt.scatter(x3,y3,color='black')
plt.show()
#%% Dendogram
from scipy.cluster.hierarchy import linkage, dendrogram # linkage, dendogram cizdirmek icin hiearcical algoritma
merg=linkage(data,method='ward') # ward, cluster icindeki yayilimlari minimize eden algoritma.
dendrogram(merg,leaf_rotation=90)
plt.xlabel('data points')
plt.ylabel('euclidian distance')
plt.show()
#plot ettirdigimizde en uzun mesafeyi bulup, enine bir threshold cekince, en mantikli cluster secimi : 3
#%% Hierarcital Clustering
from sklearn.cluster import AgglomerativeClustering # AgglomerativeClustering: tumevarim.
hc=AgglomerativeClustering(n_clusters=3,affinity='euclidean',linkage='ward')
cluster=hc.fit_predict(data) # fit, benim modelimi olusturuyordu. datami kullanarak hc'imi fit ediyor. prediction ederek
# de cluster'larimi olustur.
data['label']=cluster
plt.scatter(data.x[data.label==0],data.y[data.label==0],color='red') # label'a gore filtrelicem, x ve y eksenine gore cizdiricem.
plt.scatter(data.x[data.label==1],data.y[data.label==1],color='green')
plt.scatter(data.x[data.label==2],data.y[data.label==2],color='blue')
plt.show()
| [
"noreply@github.com"
] | volkaankarakus.noreply@github.com |
062537f35efefd0a3cdffb6f4de890ff488c1948 | eb35d0d2293d8e998d001142b7d4c2e780557562 | /jd_spider/pipelines.py | 59c93f502dee0ecb37fe05de463016e0fe0a5c3d | [] | no_license | lhr0323/jdspider | b8d05917f88327c0b78b2a0215e74da1bd1d7779 | aea4c0cbf6bb80592cf7831dd1f673958c7d0a14 | refs/heads/master | 2020-04-25T09:31:48.239471 | 2019-02-26T09:22:56 | 2019-02-26T09:22:56 | 172,678,225 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,584 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import MySQLdb.cursors
from twisted.enterprise import adbapi
from scrapy.xlib.pydispatch import dispatcher
from scrapy import signals
from scrapy.utils.project import get_project_settings
from scrapy import log
SETTINGS = get_project_settings()
class MySQLPipeline(object):
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.stats)
def __init__(self, stats):
# Instantiate DB
self.dbpool = adbapi.ConnectionPool('MySQLdb',
host=SETTINGS['DB_HOST'],
user=SETTINGS['DB_USER'],
passwd=SETTINGS['DB_PASSWD'],
port=SETTINGS['DB_PORT'],
db=SETTINGS['DB_DB'],
charset='utf8',
use_unicode=True,
cursorclass=MySQLdb.cursors.DictCursor
)
self.stats = stats
dispatcher.connect(self.spider_closed, signals.spider_closed)
def spider_closed(self, spider):
""" Cleanup function, called after crawing has finished to close open
objects.
Close ConnectionPool. """
self.dbpool.close()
def process_item(self, item, spider):
query = self.dbpool.runInteraction(self._insert_record, item)
query.addErrback(self._handle_error)
return item
def _insert_record(self, tx, item):
ID = item['ID'][0]
name = item['name'][0]
comment_num = str(item['comment_num'])
# shop_name = item['shop_name'][0]
link = item['link'][0]
commentVersion = str(item['commentVersion'])
commentVersion = commentVersion[1:-1]
score1count = str(item['score1count'])
score2count = str(item['score2count'])
score3count = str(item['score3count'])
score4count = str(item['score4count'])
score5count = str(item['score5count'])
price = str(item['price'])
ID = ID.encode('utf-8')
name = name.encode('utf-8')
comment_num = comment_num.encode('utf-8')
# shop_name = shop_name.encode('utf-8')
link = link.encode('utf-8')
commentVersion = commentVersion.encode('utf-8')
score1count = score1count.encode('utf-8')
score2count = score2count.encode('utf-8')
score3count = score3count.encode('utf-8')
score4count = score4count.encode('utf-8')
score5count = score5count.encode('utf-8')
price = price.encode('utf-8')
sql = "INSERT INTO jd_goods VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s')" % \
(ID, name, comment_num, link, commentVersion, score1count, score2count, score3count,
score4count, score5count, price)
tx.execute(sql)
print "yes"
def _handle_error(self, e):
log.err(e)
class CommentPipeline(object):
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.stats)
def __init__(self, stats):
# Instantiate DB
self.dbpool = adbapi.ConnectionPool('MySQLdb',
host=SETTINGS['DB_HOST'],
user=SETTINGS['DB_USER'],
passwd=SETTINGS['DB_PASSWD'],
port=SETTINGS['DB_PORT'],
db=SETTINGS['DB_DB'],
charset='utf8',
use_unicode=True,
cursorclass=MySQLdb.cursors.DictCursor
)
self.stats = stats
dispatcher.connect(self.spider_closed, signals.spider_closed)
def spider_closed(self, spider):
""" Cleanup function, called after crawing has finished to close open
objects.
Close ConnectionPool. """
self.dbpool.close()
def process_item(self, item, spider):
query = self.dbpool.runInteraction(self._insert_record, item)
query.addErrback(self._handle_error)
return item
def _insert_record(self, tx, item):
user_name = item['user_name']
user_ID = item['user_ID']
userProvince = item['userProvince']
content = item['content']
good_ID = item['good_ID']
good_name = item['good_name']
date = item['date']
replyCount = item['replyCount']
score = item['score']
status = item['status']
title = item['title']
#userRegisterTime = item['userRegisterTime']
#productColor = item['productColor']
#productSize = item['productSize']
#isMobile = item['isMobile']
# days = item['days']
# tags = item['commentTags']
sql = "INSERT INTO jd_comment VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s'," \
"'%s')" % \
(user_name, user_ID, userProvince, content, good_ID, good_name, date, replyCount, score,
status, title)
tx.execute(sql)
print "yes"
def _handle_error(self, e):
log.err(e)
| [
"lhr0323@126.com"
] | lhr0323@126.com |
47cf4c2ecce8403d5625bf20cbcfc02177da830a | f932637f162b34e06fc5b8b667b0eb0969ecec05 | /Progra (1).py | eff2f10c36b569af97adcb9996b684b0a7c460fc | [] | no_license | ElizabethCordoba/Proyecto-Programado-3-Lenguajes | 990fde86507e422c5fb709767b04f06e0897c2cf | 1e6ac99008153ae403b8f412103050ad298697a4 | refs/heads/master | 2020-12-13T20:55:19.256349 | 2013-11-07T12:00:08 | 2013-11-07T12:00:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121,739 | py | ## funcion que lee y archivo y mete en la lista las expresiones
def leer_archivo(archivo):
archivo=open(archivo,"r")
cont=-1
lista_var=[]
linea=(archivo.readline().strip("\n")).strip("\t")
aux=""
while linea!="":
if linea[0:3]!="val" and linea[0:3]!="let":
lista_var[cont]=lista_var[cont]+" "+linea
linea=(archivo.readline().strip("\n")).strip("\t")
else:
cont+=1
lista_var=lista_var+[linea]
linea=(archivo.readline().strip("\n")).strip("\t")
archivo.close
return (lista_var)
for i in lista_var:
print (i)
l1=leer_archivo("Codigo.txt")
for i in l1:
i=i.split(" ")
# print (i)
## Funcion que elimina espacios en blanco de una expresion
def eliminaEspacios(expresion):
largo=len(expresion)
i=0
result=""
while i!=largo:
if expresion[i]!=" ":
result+=expresion[i]
i+=1
else:
i+=1
return result
## Valida si lo que entra es un flotante, entero o string
def prueba (palabra):
resultado = ""
try:
int (palabra)
resultado = "entero"
return resultado
except:
try:
float (palabra)
resultado = "flotante"
return resultado
except:
resultado = "string"
return resultado
## funcion que separa las expresiones cuando vienen un let
def separarLet():
lista= leer_archivo("Codigo.txt")
result=[]
largo =len(lista)
i=0
while largo!=i:
if lista[i].find("let")!=-1 and lista[i][(lista[i].find("let"))-1]!="=":
result+=[lista[i][:lista[i].find("let")]]
result+=[lista[i][lista[i].find("let"):]]
i+=1
else:
result+=[lista[i]]
i+=1
return result
# Recibe una lista y realiza la evaluación de expresiones sencillas
def evaluarSencillo ():
lista = separarLet()
resultado = []
largo = len (lista)
#print (largo)
i=0
while largo!=i:
if lista[i]!="":
if lista[i][:3]!="let":
expresion = lista[i].strip("val")
expresion = expresion.strip(" ")
##expresion=eliminaEspacios(expresion)
if expresion.find("let")==-1:
if expresion[2]=="i" and expresion[3]=="f":
resultado+=[[expresion[:1],expresion[2:]]]
i+=1
else:
expresion=eliminaEspacios(expresion)
expresion=expresion.split("=")
if prueba(expresion[1])=="entero":
resultado +=[[expresion[0],"int"]]
i+=1
elif prueba(expresion[1])=="flotante":
resultado +=[[expresion[0],"double"]]
i+=1
else:
if expresion[1][0]=='"':
resultado += [[expresion[0],"string"]]
i+=1
else:
if expresion[1]=="true" or expresion[1] == "false":
resultado += [[expresion[0],"bool"]]
i+=1
else:
resultado += [[expresion[0], expresion[1]]]
i+=1
else:
posicion = expresion.find("=")
resultado+=[[expresion[:posicion],expresion[posicion+1:]]]
i+=1
else:
resultado+=[[lista[i][:3],lista[i][3:]]]
i+=1
else:
i+=1
return resultado
## funcion que separa los elementos de una expresion compleja
## que estan en la lista de evaluar sencillo.
def separarExpresion(expre):
i=0
lista=[]
largo = len(expre)
while i != largo:
lista1=[]
if expre[i]=="(":
while expre[i]!= ")":
lista1+= expre[i]
i+=1
lista1+=expre[i]
lista += lista1
i+=1
else:
lista+=expre[i]
i+=1
return lista
## funcion que evalua expresiones compuestas por variables y constantes
def evaluarCompleja():
lista=evaluarSencillo()
i=0;
largo = len (lista)
lista1=[]
while i!=largo:
if lista[i][1]=="int" or lista[i][1]=="double" or lista[i][1]=="string" or lista[i][1]== "bool":
lista1+= [lista[i]]
i+=1
else:
if lista[i][1].find("if")!=-1 or lista[i][1].find("let")!=-1 or lista[i][1].find("val")!=-1 or lista[i][1].find("in")!=-1 :
lista1+= [lista[i]]
i+=1
else:
if lista[i][1].find(",")!=-1 or lista[i][1].find("[")!=-1 :
lista1+= [lista[i]]
i+=1
else:
listaExpresion= separarExpresion(lista[i][1])
cont = len(listaExpresion)
j=0
while j!=cont:
if listaExpresion[j]=="(" or listaExpresion[j]==")":
if j==cont-1:
lista[i][1]
lista1 +=[[lista[i][0],lista[i][1]]]
j=cont
else:
j+=1
else:
if prueba(listaExpresion[j])=="entero":
lista1 +=[[lista[i][0],"int"]]
j=cont
elif prueba(listaExpresion[j])=="flotante":
lista1 +=[[lista[i][0],"double"]]
j=cont
elif listaExpresion[j]=='"':
lista1 +=[[lista[i][0],"string"]]
j=cont
else:
if j==cont-1:
lista[i][1]
lista1 +=[[lista[i][0],lista[i][1]]]
j=cont
else:
j+=1
i+=1
return lista1
## funcion que busca los elementos de una lista
def buscar(expresion,posicion,lista):
i=0
resultado=[]
while i!=posicion:
if len(lista[i])!=3:
if lista[i][0]==expresion:
resultado+=[lista[i][1]]
i+=1
else:
i+=1
else:
i+=1
return resultado
## funcion que determina el tipo de las expresiones compuestas solo por variables
def evaluarVariable():
lista= evaluarCompleja()
i=0
lista1=[]
largo = len(lista)
while i!=largo:
if lista[i][1]=="int" or lista[i][1]=="double" or lista[i][1]=="string" or lista[i][1]== "bool":
lista1+= [lista[i]]
i+=1
else:
if lista[i][1].find("if")!=-1 or lista[i][1].find("let")!=-1 or lista[i][1].find("val")!=-1 or lista[i][1].find("in")!=-1 :
lista1+= [lista[i]]
i+=1
else:
if lista[i][1].find(",")!=-1 or lista[i][1].find("[")!=-1 :
lista1+= [lista[i]]
i+=1
else:
j=0
while lista [i][1][j]=="(":
j+=1
expresionBuscar = buscar(lista[i][1][j],i,lista1)
lista1+=[[lista[i][0], expresionBuscar[-1]]]
i+=1
return lista1
# Funcion que determina la cantidad de elementos que componen una tupla
def elementosTupla(expresion):
i=0
largo=len(expresion)
contador=0
while i!=largo:
if expresion[i]==",":
contador+=1
i+=1
else:
i+=1
return contador
## Funcion que evalua expresiones y retorna el tipo de la expresion
def evaluarComplejaTupla(lista):
j=0
expresion=""
while j!=len(lista):
expresion+=lista[j]
j+=1
expresion
i=0;
largo = len (lista)
resultado=""
while i!=largo:
if prueba(lista[i])=="entero" and expresion.find(".")==-1:
resultado="int"
i=largo
elif expresion.find(".")!=-1:
resultado="double"
i=largo
elif lista[i]=='"':
resultado="string"
i=largo
elif (lista[i]=="t" and lista[i+1]=="r") or (lista[i]=="f" and lista[i+1]=="a"):
resultado="bool"
i=largo
else:
i+=1
return resultado
# Funcion que evalua expresiones compuestas unicamente con variables y retorna el tipo de la expresion
def evaluarVariableTupla(lista,j):
listaEvaluada=evaluarVariable()
i=0
resultado=""
largo = len(lista)
while i!=largo:
if lista[i]!="+" and lista[i]!="-" and lista[i]!="*" and lista[i]!="^" and lista[i]!="(" and lista[i]!=")":
expresionBuscar = buscar(lista[i],j,listaEvaluada)
if expresionBuscar!=[]:
resultado= expresionBuscar[-1]
i=largo
else:
resultado
i=largo
else:
i+=1
return resultado
## Funcion que evalua y determina los tipos de las tuplas
def evaluarTuplasSencillo():
lista=evaluarVariable()
i=0
largo=len(lista)
resultado=[]
tipo=""
while i!=largo:
if lista[i][1]=="int" or lista[i][1]=="double" or lista[i][1]=="string" or lista[i][1]== "bool":
resultado+= [lista[i]]
i+=1
else:
if lista[i][1].find("if")!=-1 or lista[i][1].find("let")!=-1 or lista[i][1].find("val")!=-1 or lista[i][1].find("in")!=-1 or lista[i][1].find("[")!=-1 :
resultado+= [lista[i]]
i+=1
else:
if lista[i][1].find(",")!=-1:
expresion=separarExpresion(lista[i][1])
if elementosTupla(expresion)==1:
posicion=lista[i][1].find(",")
listaparte1=expresion[:posicion]
listaparte2=expresion[posicion+1:]
if (evaluarComplejaTupla(listaparte1)=="int" or evaluarComplejaTupla(listaparte1)=="double" or evaluarComplejaTupla(listaparte1)=="string")and(evaluarComplejaTupla(listaparte2)=="int" or evaluarComplejaTupla(listaparte2)=="double" or evaluarComplejaTupla(listaparte2)=="string"):
tipo=evaluarComplejaTupla(listaparte1)
tipo+="*"
tipo+=evaluarComplejaTupla(listaparte2)
resultado+=[[lista[i][0],tipo]]
i+=1
elif (evaluarVariableTupla(listaparte1,i)!="")and (evaluarVariableTupla(listaparte1,i)=="int" or evaluarVariableTupla(listaparte1,i)=="double" or evaluarVariableTupla(listaparte1,i)=="string") and (evaluarVariableTupla(listaparte2,i)!="")and(evaluarVariableTupla(listaparte2,i)=="int" or evaluarVariableTupla(listaparte2,i)=="double" or evaluarVariableTupla(listaparte2,i)=="string"):
tipo=evaluarVariableTupla(listaparte1,i)
tipo+="*"
tipo+=evaluarVariableTupla(listaparte2,i)
resultado+=[[lista[i][0],tipo]]
i+=1
elif (evaluarComplejaTupla(listaparte1)=="int" or evaluarComplejaTupla(listaparte1)=="double" or evaluarComplejaTupla(listaparte1)=="string") and (evaluarVariableTupla(listaparte2,i)!="") and(evaluarVariableTupla(listaparte2,i)=="int" or evaluarVariableTupla(listaparte2,i)=="double" or evaluarVariableTupla(listaparte2,i)=="string"):
tipo=evaluarComplejaTupla(listaparte1)
tipo+="*"
tipo+=evaluarVariableTupla(listaparte2,i)
resultado+=[[lista[i][0],tipo]]
i+=1
elif (evaluarVariableTupla(listaparte1,i)!="") and(evaluarVariableTupla(listaparte1,i)=="int" or evaluarVariableTupla(listaparte1,i)=="double" or evaluarVariableTupla(listaparte1,i)=="string") and (evaluarComplejaTupla(listaparte2)=="int" or evaluarComplejaTupla(listaparte2)=="double" or evaluarComplejaTupla(listaparte2)=="string"):
tipo=evaluarVariableTupla(listaparte1,i)
tipo+="*"
tipo+=evaluarComplejaTupla(listaparte2)
resultado+=[[lista[i][0],tipo]]
i+=1
else:
resultado+=[lista[i]]
i+=1
else:
resultado+=[lista[i]]
i+=1
else:
i+=1
return resultado
## funcion que separa los elementos cuando vienen una tupla dentro de otra tupla
def separarTuplaTupla(lista):
largo= len(lista)
i=0
cont=0
while i!=largo:
if lista[i]=="," and (lista[i-1]==")" or lista[i+1]=="("):
cont=i
i=largo
else:
i+=1
return cont
## funcion que retorne la posicion hasta donde se encuentra una coma
def encontrarComa(lista):
largo= len(lista)
i=0
cont=0
while i!=largo:
if lista[i]==",":
cont=i
i=largo
else:
i+=1
return cont
## funcion que recibe una tupla y retorna su tipo
def evaluarTipoTupla(expresion,j):
i=0
largo = len(expresion)
tipo=""
while i!=largo:
if encontrarComa(expresion)!=0:
posicion=encontrarComa(expresion)
listaparte1=expresion[:posicion]
listaparte2=expresion[posicion+1:]
if (evaluarComplejaTupla(listaparte1)=="int" or evaluarComplejaTupla(listaparte1)=="double" or evaluarComplejaTupla(listaparte1)=="string")and(evaluarComplejaTupla(listaparte2)=="int" or evaluarComplejaTupla(listaparte2)=="double" or evaluarComplejaTupla(listaparte2)=="string"):
tipo+="("
tipo+=evaluarComplejaTupla(listaparte1)
tipo+="*"
tipo+=evaluarComplejaTupla(listaparte2)
tipo+=")"
i=largo
elif (evaluarVariableTupla(listaparte1,j)!="")and (evaluarVariableTupla(listaparte1,j)=="int" or evaluarVariableTupla(listaparte1,j)=="double" or evaluarVariableTupla(listaparte1,j)=="string") and (evaluarVariableTupla(listaparte2,j)!="")and(evaluarVariableTupla(listaparte2,j)=="int" or evaluarVariableTupla(listaparte2,j)=="double" or evaluarVariableTupla(listaparte2,j)=="string"):
tipo+="("
tipo+=evaluarVariableTupla(listaparte1,j)
tipo+="*"
tipo+=evaluarVariableTupla(listaparte2,j)
tipo+=")"
i=largo
elif (evaluarComplejaTupla(listaparte1)=="int" or evaluarComplejaTupla(listaparte1)=="double" or evaluarComplejaTupla(listaparte1)=="string") and (evaluarVariableTupla(listaparte2,j)!="") and(evaluarVariableTupla(listaparte2,j)=="int" or evaluarVariableTupla(listaparte2,j)=="double" or evaluarVariableTupla(listaparte2,j)=="string"):
tipo+="("
tipo+=evaluarComplejaTupla(listaparte1)
tipo+="*"
tipo+=evaluarVariableTupla(listaparte2,j)
tipo+=")"
i=largo
elif (evaluarVariableTupla(listaparte1,j)!="") and(evaluarVariableTupla(listaparte1,j)=="int" or evaluarVariableTupla(listaparte1,j)=="double" or evaluarVariableTupla(listaparte1,j)=="string") and (evaluarComplejaTupla(listaparte2)=="int" or evaluarComplejaTupla(listaparte2)=="double" or evaluarComplejaTupla(listaparte2)=="string"):
tipo+="("
tipo+=evaluarVariableTupla(listaparte1,j)
tipo+="*"
tipo+=evaluarComplejaTupla(listaparte2)
tipo+=")"
i=largo
else:
i=largo
else:
if (evaluarComplejaTupla(expresion)=="int" or evaluarComplejaTupla(expresion)=="double" or evaluarComplejaTupla(expresion)=="string"):
tipo=evaluarComplejaTupla(expresion)
i=largo
else:
if (expresion[i]=="t" and expresion[i+1]=="r") or (expresion[i]=="f" and expresion[i+1]=="a"):
tipo="bool"
i=largo
else:
tipo=evaluarVariableTupla(expresion,j)
i=largo
return tipo
## funcion que evalua tuplas de tuplas
def evaluarTuplaTuplas():
lista=evaluarTuplasSencillo()
i=0
largo=len(lista)
resultado=[]
tipo=""
while i!=largo:
if lista[i][1]=="int" or lista[i][1]=="double" or lista[i][1]=="string" or lista[i][1]== "bool":
resultado+= [lista[i]]
i+=1
else:
if lista[i][1].find("if")!=-1 or lista[i][1].find("let")!=-1 or lista[i][1].find("val")!=-1 or lista[i][1].find("in")!=-1 or lista[i][1].find("[")!=-1 :
resultado+= [lista[i]]
i+=1
else:
expresion=separarExpresion(lista[i][1])
posicion= separarTuplaTupla(expresion)
listaparte1=expresion[:posicion]
listaparte2=expresion[posicion+1:]
tipo=evaluarTipoTupla(listaparte1,i)
tipo+="*"
tipo+=evaluarTipoTupla(listaparte2,i)
resultado+=[[lista[i][0],tipo]]
i+=1
return resultado
## funcion que evalua los elementos que son listas
def evaluarListaSencilla():
lista=evaluarTuplaTuplas()
i=0
largo=len(lista)
resultado=[]
while i!=largo:
if lista[i][1][1]=="[" or lista[i][1]=="int" or lista[i][1]=="double" or lista[i][1]=="string" or lista[i][1]== "bool" or (lista[i][1]=="int" and lista[i][1].find("*")!=-1) or (lista[i][1]=="double" and lista[i][1].find("*")!=-1) or (lista[i][1]=="string" and lista[i][1].find("*")!=-1) :
resultado+= [lista[i]]
i+=1
else:
if lista[i][1].find("if")!=-1 or lista[i][1].find("let")!=-1 or lista[i][1].find("val")!=-1 or lista[i][1].find("in")!=-1 or lista[i][1][0]=="(":
resultado+= [lista[i]]
i+=1
else:
expresion = lista[i][1]
if expresion[1]!="[":
if prueba(expresion[1])=="entero":
resultado+=[[lista[i][0],"int list"]]
i+=1
elif prueba(expresion[1])=="flotante":
resultado+=[[lista[i][0],"double list"]]
i+=1
elif expresion[1]=='"':
resultado+=[[lista[i][0],"string list"]]
i+=1
elif (expresion[1]=="t" and expresion[2]=="r") or (expresion[1]=="f" and expresion[2]=="a") :
resultado+=[[lista[i][0],"bool list"]]
i+=1
else:
j=1
while j!=len(expresion):
if expresion[j]!="(":
if expresion[j]!=")" or expresion[j]!="+" or expresion[j]!="-" or expresion[j]!="*" or expresion[j]!="^" or (expresion[j]!="m" and expresion[j+1]!="o") or (expresion[j]!="d" and expresion[j+1]!="i"):
if prueba(expresion[j])=="entero":
if prueba(expresion[j])=="entero" and expresion.find(".")==-1:
resultado+=[[lista[i][0],"int"+" list"]]
j=len(expresion)
else:
resultado+=[[lista[i][0],"double"+" list"]]
j=len(expresion)
elif (expresion[j]=="t" and expresion[j+1]=="r") or (expresion[j]=="f" and expresion[j+1]=="a") :
resultado+=[[lista[i][0],"bool list"]]
j=len(expresion)
else:
if j == (len(expresion)-1):
resultado+=[lista[i]]
j=len(expresion)
else:
j+=1
else:
j+=1
else:
if expresion[j]=="(":
pos=expresion.find(")")
aux=expresion[j:]
aux=aux[:pos]
if aux.find(",")!=-1:
resultado+=[lista[i]]
j=len(expresion)
else:
j+=1
else:
j+=1
i+=1
return resultado
## Función que evalua los elementos que son listas yson complejas, es decir formados por variables
def evaluarListaCompleja():
lista=evaluarListaSencilla()
i=0
largo=len(lista)
resultado=[]
while i!=largo:
if (lista[i][1][1]=="[" or lista[i][1]=="int" or lista[i][1]=="double" or lista[i][1]=="string" or lista[i][1]== "bool") or (lista[i][1]=="int" and lista[i][1].find("*")!=-1) or (lista[i][1]=="double" and lista[i][1].find("*")!=-1) or (lista[i][1]=="string" and lista[i][1].find("*")!=-1) :
resultado+= [lista[i]]
i+=1
else:
if lista[i][1].find("if")!=-1 or lista[i][1].find("let")!=-1 or lista[i][1].find("val")!=-1 or lista[i][1].find("in")!=-1 or lista[i][1][0]=="(":
resultado+= [lista[i]]
i+=1
else:
if lista[i][1]=="int list" or lista[i][1]=="string list" or lista[i][1]=="bool list" or lista[i][1]=="double list":
resultado+= [lista[i]]
i+=1
else:
expresion=lista[i][1]
j=1
while j!=len(expresion):
if expresion[j]!="(":
if expresion[j]!=")" or expresion[j]!="+" or expresion[j]!="-" or expresion[j]!="*" or expresion[j]!="^" or (expresion[j]!="m" and expresion[j+1]!="o") or (expresion[j]!="d" and expresion[j+1]!="i"):
expresionBuscar=buscar(expresion[j],i,lista)
if expresionBuscar!=[]:
resultado+=[[lista[i][0],expresionBuscar[-1]+" list"]]
j=len(expresion)
else:
j+=1
else:
j+=1
else:
if expresion[j]=="(":
pos=expresion.find(")")
aux=expresion[j:]
aux=aux[:pos]
if aux.find(",")!=-1:
resultado+=[lista[i]]
j=len(expresion)
else:
j+=1
else:
j+=1
i+=1
return resultado
def evaluarTuplasLista():
lista=evaluarListaCompleja()
i=0
largo=len(lista)
resultado=[]
while i!=largo:
if (lista[i][1]=="int" or lista[i][1]=="double" or lista[i][1]=="string" or lista[i][1]== "bool") or (lista[i][1]=="int" and lista[i][1].find("*")!=-1) or (lista[i][1]=="double" and lista[i][1].find("*")!=-1) or (lista[i][1]=="string" and lista[i][1].find("*")!=-1) :
resultado+= [lista[i]]
i+=1
else:
if lista[i][1].find("if")!=-1 or lista[i][1].find("let")!=-1 or lista[i][1].find("val")!=-1 or lista[i][1].find("in")!=-1 or lista[i][1][0]=="(":
resultado+= [lista[i]]
i+=1
else:
if lista[i][1]=="int list" or lista[i][1]=="string list" or lista[i][1]=="bool list" or lista[i][1]=="double list":
resultado+= [lista[i]]
i+=1
else:
expresion= lista[i][1]
j=1
while j!=len(expresion):
if expresion[j]!="[":
pos=expresion.find(")")
if pos+2!=len(expresion):
if expresion[pos+2]=="(":
exp=separarExpresion(lista[i][1])
posicion= separarTuplaTupla(exp)
listaparte1=exp[1:posicion]
listaparte2=exp[posicion+1:]
resultado+=[[lista[i][0],evaluarTipoTupla(listaparte1,i)+"*"+evaluarTipoTupla(listaparte2,i)+"list"]]
j=len(expresion)
else:
resultado+= [lista[i]]
j=len(expresion)
else:
aux=expresion[j:]
aux=aux[:pos]
expre=separarExpresion(aux)
resultado+=[[lista[i][0],evaluarTipoTupla(expre,i)+"list"]]
j=len(expresion)
else:
resultado+=[lista[i]]
j=len(expresion)
i+=1
return resultado
## funcion que determina el tipo de las listas
def evaluarTipoLista(lista,i):
j=1
largo=len(lista)
resultado=""
lista1=evaluarTuplasLista()
if prueba(lista[1])=="entero" and lista.find(".")==-1:
resultado="int list"
elif lista.find(".")!=-1:
resultado="double list"
elif lista[1]=='"':
resultado="string list"
elif (lista[1]=="t" and lista[2]=="r") or (lista[1]=="f" and lista[2]=="a") :
resultado="bool list"
else:
while j!=len(lista):
if lista!= "]" or lista[j]!=")" or lista[j]!="+" or lista[j]!="-" or lista[j]!="*" or lista[j]!="^" or (lista[j]!="m" and lista[j+1]!="o") or (lista[j]!="d" and lista[j+1]!="i"):
if prueba(lista[j])=="entero":
if prueba(lista[j])=="entero" and lista.find(".")==-1:
resultado="int list"
j=len(lista)
else:
resultado="double"+" list"
j=len(lista)
elif (lista[j]=="t" and lista[j+1]=="r") or (lista[j]=="f" and lista[j+1]=="a") :
resultado="bool list"
j=len(lista)
else:
j+=1
else:
j+=1
if resultado=="":
j=1
while j!=len(lista):
if lista!= "]" or lista[j]!=")" or lista[j]!="+" or lista[j]!="-" or lista[j]!="*" or lista[j]!="^" or (lista[j]!="m" and lista[j+1]!="o") or (lista[j]!="d" and lista[j+1]!="i"):
expresionBuscar=buscar(lista[j],i,lista1)
if expresionBuscar!=[]:
resultado=expresionBuscar[-1]+" list"
j=len(lista)
else:
j+=1
else:
j+=1
return resultado
## funcion que evalua el tipo de lista de listas
def evaluarListaListas():
lista= evaluarTuplasLista()
i=0
largo=len(lista)
resultado=[]
while i!=largo:
if (lista[i][1]=="int" or lista[i][1]=="double" or lista[i][1]=="string" or lista[i][1]== "bool") or (lista[i][1]=="int" and lista[i][1].find("*")!=-1) or (lista[i][1]=="double" and lista[i][1].find("*")!=-1) or (lista[i][1]=="string" and lista[i][1].find("*")!=-1) :
resultado+= [lista[i]]
i+=1
else:
if lista[i][1].find("if")!=-1 or lista[i][1].find("let")!=-1 or lista[i][1].find("val")!=-1 or lista[i][1].find("in")!=-1 or lista[i][1][0]=="(":
resultado+= [lista[i]]
i+=1
else:
if lista[i][1]=="int list" or lista[i][1]=="string list" or lista[i][1]=="bool list" or lista[i][1]=="double list":
resultado+= [lista[i]]
i+=1
else:
expresion=lista[i][1]
lar=len(expresion)
expresion=expresion[1:lar-1]
resultado+=[[lista[i][0],"("+evaluarTipoLista(expresion,i)+")"+" list"]]
i+=1
return resultado
## funcion que determina el tipo de las listas
def evaluarExpresiones(lista,i,lista1):
j=1
largo=len(lista)
resultado=""
if lista.find(".")!=-1:
resultado="double list"
elif prueba(lista[1])=="entero":
resultado="int list"
elif lista[1]=='"':
resultado="string list"
elif (lista[1]=="t" and lista[2]=="r") or (lista[1]=="f" and lista[2]=="a") :
resultado="bool list"
else:
while j!=len(lista):
if lista[j]!= "]" or lista[j]!=")" or lista[j]!="+" or lista[j]!="-" or lista[j]!="*" or lista[j]!="^" or (lista[j]!="m" and lista[j+1]!="o") or (lista[j]!="d" and lista[j+1]!="i"):
if prueba(lista[j])=="entero":
if prueba(lista[j])=="entero" and lista.find(".")==-1:
resultado="int list"
j=len(lista)
else:
resultado="double"+" list"
j=len(lista)
elif (lista[j]=="t" and lista[j+1]=="r") or (lista[j]=="f" and lista[j+1]=="a") :
resultado="bool list"
j=len(lista)
else:
j+=1
else:
j+=1
if resultado=="":
j=1
while j!=len(lista):
if lista[j]!= "]" or lista[j]!=")" or lista[j]!="+" or lista[j]!="-" or lista[j]!="*" or lista[j]!="^" or (lista[j]!="m" and lista[j+1]!="o") or (lista[j]!="d" and lista[j+1]!="i"):
expresionBuscar=buscar(lista[j],i,lista1)
if expresionBuscar!=[]:
resultado=expresionBuscar[-1]+" list"
j=len(lista)
else:
j+=1
else:
j+=1
return resultado
## funcion que evalua los vals
def evaluarVals(expresion,i,result):
pos=expresion.find("val")
tipo=""
if pos!=-1:
expr_temp=expresion[:pos-1]
expresion=expresion[pos+4:]
po= expr_temp.find("=")
exp3=expr_temp[:po]
exp4="["+expr_temp[po+1:]+"]"
if exp4[1]=="[" and exp4[2]!="[":
exp4=exp1[po+1:]
tipo=evaluarTipoLista(exp4,i)
result+=[[exp3,tipo]]
if expresion!="":
evaluarVals(expresion,i,result)
else:
result
elif exp4[1]=="[" and exp4[2]=="[":
exp4=exp4[1:len(exp4)-1]
tipo=evaluarTipoLista(exp4,i)+" list"
result+=[[exp3,tipo]]
if expresion!="":
evaluarVals(expresion,i,result)
else:
result
else:
tipo=evaluarTipoLista(exp4,i)
tipo=tipo.rstrip("list")
if tipo[0]=="[" and tipo[1]=="[":
lar=len(tipo)
tipo=tipo[1:lar-1]
tipo="("+evaluarTipoLista(tipo,i)+")"+" list"
result+=[[exp3,tipo]]
if expresion!="":
evaluarVals(expresion,i,result)
else:
result
else:
tipo
result+=[[exp3,tipo]]
if expresion!="":
evaluarVals(expresion,i,result)
else:
result
else:
po= expresion.find("=")
exp3=expresion[:po]
exp4="["+expresion[po+1:]+"]"
if exp4[1]=="[" and exp4[2]!="[":
exp4=expresion[po+1:]
tipo=evaluarTipoLista(exp4,i)
result+=[[exp3,tipo]]
elif exp4[1]=="[" and exp4[2]=="[":
exp4=exp4[1:len(exp4)-1]
tipo=evaluarTipoLista(exp4,i)+" list"
result+=[[exp3,tipo]]
else:
tipo=evaluarTipoLista(exp4,i)
tipo=tipo.rstrip("list")
if tipo[0]=="[" and tipo[1]=="[":
lar=len(tipo)
tipo=tipo[1:lar-1]
tipo="("+evaluarTipoLista(tipo,i)+")"+" list"
result+=[[exp3,tipo]]
else:
tipo
result+=[[exp3,tipo]]
return result
## funcion que evalua los let sencillos
def evaluarLetSencillo():
lista = evaluarListaListas()
largo=len (lista)
i=0
lista_temp=[]
resultado=[]
while i!=largo:
if lista[i][0]!="let":
resultado+=[lista[i]]
i+=1
else:
expresion= lista[i][1]
if expresion.find("let")!=-1 or expresion.find("if")!=-1:
resultado+=[lista[i]]
i+=1
else:
pos=expresion.find("in")
exp1=expresion[:pos]
exp1=exp1.strip("val ")
exp2=expresion[pos+2:]
exp2=exp2.rstrip(" end")
if exp1.find("val")!=-1:
lista_temp=evaluarVals(exp1,i,[])
if exp2.find("=")==-1:
exp5="["+exp2+"]"
if exp5[0]=="[" and exp5[1]!="[":
tipo=evaluarExpresiones(exp2,len(lista_temp),lista_temp)
tipo=tipo.rstrip("list")
resultado+=[[exp2,tipo,1]]
i+=1
elif exp5[1]=="[" and exp5[2]=="[":
tipo=evaluarExpresiones(exp2,len(lista_temp),lista_temp)
tipo+=" list"
resultado+=[[exp2,tipo,1]]
i+=1
else:
tipo=evaluarExpresiones(exp2,len(lista_temp),lista_temp)
resultado+=[[exp2,tipo,1]]
i+=1
else:
exp5="["+exp2[exp2.find("=")+1:]+"]"
if exp5[0]=="[" and exp5[1]!="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
tipo=tipo.rstrip("list")
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=[[exp2[:exp2.find("=")],tipo,1]]
i+=1
else:
resultado+=[[exp2[5:exp2.find("=")],tipo,1]]
i+=1
elif exp5[1]=="[" and exp5[2]=="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
tipo+=" list"
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=[[exp2[:exp2.find("=")],tipo,1]]
i+=1
else:
resultado+=[[exp2[5:exp2.find("=")],tipo,1]]
i+=1
else:
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=[[exp2[:exp2.find("=")],tipo,1]]
i+=1
else:
resultado+=[[exp2[5:exp2.find("=")],tipo,1]]
i+=1
else:
po= exp1.find("=")
exp3=exp1[:po]
exp4="["+exp1[po+1:]+"]"
if exp4[1]=="[" and exp4[2]!="[":
exp4=exp1[po+1:]
tipo=evaluarTipoLista(exp4,i)
lista_temp+=[[exp3,tipo]]
if exp2.find("=")==-1:
resultado+=[[exp3,tipo,1]]
i+=1
else:
exp5="["+exp2[exp2.find("=")+1:]+"]"
if exp5[0]=="[" and exp5[1]!="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
tipo=tipo.rstrip("list")
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=[[exp2[:exp2.find("=")],tipo,1]]
i+=1
else:
resultado+=[[exp2[5:exp2.find("=")],tipo,1]]
i+=1
elif exp5[1]=="[" and exp5[2]=="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
tipo+=" list"
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=[[exp2[:exp2.find("=")],tipo,1]]
i+=1
else:
resultado+=[[exp2[5:exp2.find("=")],tipo,1]]
i+=1
else:
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=[[exp2[:exp2.find("=")],tipo,1]]
i+=1
else:
resultado+=[[exp2[5:exp2.find("=")],tipo,1]]
i+=1
elif exp4[1]=="[" and exp4[2]=="[":
exp4=exp4[1:len(exp4)-1]
tipo=evaluarTipoLista(exp4,i)+" list"
lista_temp+=[[exp3,tipo]]
if exp2.find("=")==-1:
resultado+=[[exp3,tipo,1]]
i+=1
else:
exp5="["+exp2[exp2.find("=")+1:]+"]"
if exp5[0]=="[" and exp5[1]!="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
tipo=tipo.rstrip("list")
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=[[exp2[:exp2.find("=")],tipo,1]]
i+=1
else:
resultado+=[[exp2[5:exp2.find("=")],tipo,1]]
i+=1
elif exp5[1]=="[" and exp5[2]=="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
tipo+=" list"
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=[[exp2[:exp2.find("=")],tipo,1]]
i+=1
else:
resultado+=[[exp2[5:exp2.find("=")],tipo,1]]
i+=1
else:
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=[[exp2[:exp2.find("=")],tipo,1]]
i+=1
else:
resultado+=[[exp2[5:exp2.find("=")],tipo,1]]
i+=1
else:
tipo=evaluarTipoLista(exp4,i)
tipo=tipo.rstrip("list")
if tipo[0]=="[" and tipo[1]=="[":
lar=len(tipo)
tipo=tipo[1:lar-1]
tipo="("+evaluarTipoLista(tipo,i)+")"+" list"
lista_temp+=[[exp3,tipo]]
if exp2.find("=")==-1:
resultado+=[[exp3,tipo,1]]
i+=1
else:
exp5="["+exp2[exp2.find("=")+1:]+"]"
if exp5[0]=="[" and exp5[1]!="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
tipo=tipo.rstrip("list")
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=[[exp2[:exp2.find("=")],tipo,1]]
i+=1
else:
resultado+=[[exp2[5:exp2.find("=")],tipo,1]]
i+=1
elif exp5[1]=="[" and exp5[2]=="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
tipo+=" list"
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=[[exp2[:exp2.find("=")],tipo,1]]
i+=1
else:
resultado+=[[exp2[5:exp2.find("=")],tipo,1]]
i+=1
else:
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=[[exp2[:exp2.find("=")],tipo,1]]
i+=1
else:
resultado+=[[exp2[5:exp2.find("=")],tipo,1]]
i+=1
else:
tipo
lista_temp+=[[exp3,tipo]]
if exp2.find("=")==-1:
resultado+=[[exp3,tipo,1]]
i+=1
else:
exp5="["+exp2[exp2.find("=")+1:]+"]"
if exp5[0]=="[" and exp5[1]!="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
tipo=tipo.rstrip("list")
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=[[exp2[:exp2.find("=")],tipo,1]]
i+=1
else:
resultado+=[[exp2[5:exp2.find("=")],tipo,1]]
i+=1
elif exp5[1]=="[" and exp5[2]=="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
tipo+=" list"
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=[[exp2[:exp2.find("=")],tipo,1]]
i+=1
else:
resultado+=[[exp2[5:exp2.find("=")],tipo,1]]
i+=1
else:
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=[[exp2[:exp2.find("=")],tipo,1]]
i+=1
else:
resultado+=[[exp2[5:exp2.find("=")],tipo,1]]
i+=1
return resultado
## funcion que evalua el tipo del if
def tipo_if(expresion,lista,i):
resultado=""
exp2=expresion.find("else")
exp3=expresion[exp2+5:]
exp3=exp3.rstrip(" ")
exp3="["+exp3+"]"
## tipo=evaluarTipoLista(exp3,i)
if exp3[0]=="[" and exp3[1]!="[":
tipo=evaluarExpresiones(exp3,i,lista)
tipo=tipo.rstrip("list")
resultado+=tipo
elif exp3[1]=="[" and exp3[2]=="[":
tipo=evaluarExpresiones(exp3,i,lista)
tipo+=" list"
resultado+=tipo
else:
tipo=evaluarExpresiones(exp3,i,lista)
resultado+=tipo
return resultado
## Funcion que
def evaluarExpresionValsLet(expresion,result,lista_temporal):
pos=expresion.find("val")
tipo=""
if pos!=-1:
expr_temp=expresion[:pos-1]
expresion=expresion[pos+4:]
if expr_temp.find("if")==-1:
po= expr_temp.find("=")
exp3=expr_temp[:po]
exp4="["+expr_temp[po+1:]+"]"
if exp4[1]=="[" and exp4[2]!="[":
exp4=exp1[po+1:]
tipo=evaluarExpresiones(exp4,len(lista_temporal),lista_temporal)
result+=[[exp3,tipo]]
if expresion!="":
evaluarExpresionValsLet(expresion,result,lista_temporal)
else:
result
elif exp4[1]=="[" and exp4[2]=="[":
exp4=exp4[1:len(exp4)-1]
tipo=evaluarExpresiones(exp4,len(lista_temporal),lista_temporal)+" list"
result+=[[exp3,tipo]]
if expresion!="":
evaluarExpresionValsLet(expresion,result,lista_temporal)
else:
result
else:
tipo=evaluarExpresiones(exp4,len(lista_temporal),lista_temporal)
tipo=tipo.rstrip("list")
if tipo[0]=="[" and tipo[1]=="[":
lar=len(tipo)
tipo=tipo[1:lar-1]
tipo="("+evaluarExpresiones((tipo,len(lista_temporal),lista_temporal))+")"+" list"
result+=[[exp3,tipo]]
if expresion!="":
evaluarExpresionValsLet(expresion,result,lista_temporal)
else:
result
else:
tipo
result+=[[exp3,tipo]]
if expresion!="":
evaluarExpresionValsLet(expresion,result,lista_temporal)
else:
result
else:
exp3=expr_temp[:expr_temp.find("=")]
exp4=expr_temp[expr_temp.find("=")+1:]
tipo=tipo_if(exp4,lista_temporal,len(lista_temporal))
result+=[[exp3,tipo]]
if expresion!="":
evaluarExpresionValsLet(expresion,result,lista_temporal)
else:
result
else:
po= expresion.find("=")
exp3=expresion[:po]
exp4=expresion[po+1:]
if exp4.find("if")==-1:
exp4="["+expresion[po+1:]+"]"
if exp4[1]=="[" and exp4[2]!="[":
exp4=expresion[po+1:]
tipo=evaluarExpresiones(exp4,len(lista_temporal),lista_temporal)
result+=[[exp3,tipo]]
elif exp4[1]=="[" and exp4[2]=="[":
exp4=exp4[1:len(exp4)-1]
tipo=evaluarExpresiones(exp4,len(lista_temporal),lista_temporal)+" list"
result+=[[exp3,tipo]]
else:
tipo=evaluarExpresiones(exp4,len(lista_temporal),lista_temporal)######
tipo=tipo.rstrip("list")
if tipo[0]=="[" and tipo[1]=="[":
lar=len(tipo)
tipo=tipo[1:lar-1]
tipo="("+evaluarExpresiones(tipo,len(lista_temporal),lista_temporal)+")"+" list"
result+=[[exp3,tipo]]
else:
tipo
result+=[[exp3,tipo]]
else:
tipo=tipo_if(exp4,lista_temporal,len(lista_temporal))
result+=[[exp3,tipo]]
return result
#### Funcion que recibe un let y retorna el tipo del resultado
def evaluarTipoLetLet(expresion,lista_TEMP):
lista_temp=[]
resultado=""
pos=expresion.find("in")
exp1=expresion[:pos]
exp1.strip(" ")
exp1=exp1.strip("val ")
exp2=expresion[pos+2:]
exp2=exp2.strip(" ")
exp2=exp2.rstrip(" ")
exp2=exp2.rstrip(" end")
if exp1.find("val")!=-1:
lista_temp=evaluarExpresionValsLet(exp1,[],lista_TEMP)
if exp2.find("=")==-1:
exp5="["+exp2+"]"
if exp5[0]=="[" and exp5[1]!="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
tipo=tipo.rstrip("list")
if tipo=="":
tipo=evaluarExpresiones(exp5,len(lista_TEMP),lista_TEMP)
resultado+=tipo
else:
resultado+=tipo
elif exp5[1]=="[" and exp5[2]=="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
tipo+=" list"
if tipo=="":
tipo=evaluarExpresiones(exp5,len(lista_TEMP),lista_TEMP)
resultado+=tipo
else:
resultado+=tipo
else:
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
if tipo=="":
tipo=evaluarExpresiones(exp5,len(lista_TEMP),lista_TEMP)
resultado+=tipo
else:
resultado+=tipo
else:
exp5="["+exp2[exp2.find("=")+1:]+"]"
if exp5[0]=="[" and exp5[1]!="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
if tipo=="":
tipo=evaluarExpresiones(exp5,len(lista_TEMP),lista_TEMP)
tipo=tipo.rstrip("list")
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=tipo
else:
resultado+=tipo
else:
tipo=tipo.rstrip("list")
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=tipo
else:
resultado+=tipo
elif exp5[1]=="[" and exp5[2]=="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
if tipo=="":
tipo=evaluarExpresiones(exp5,len(lista_TEMP),lista_TEMP)
tipo+=" list"
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=tipo
else:
resultado+=tipo
else:
tipo+=" list"
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=tipo
else:
resultado+=tipo
else:
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
if tipo=="":
tipo=evaluarExpresiones(exp5,len(lista_TEMP),lista_TEMP)
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=tipo
else:
resultado+=tipo
else:
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=tipo
else:
resultado+=tipo
else: #############
po= exp1.find("=")
exp3=exp1[:po]
exp4="["+exp1[po+1:]+"]"
if exp4[1]=="[" and exp4[2]!="[":
exp4=exp1[po+1:]
tipo=evaluarTipoLista(exp4,i)
lista_temp+=[[exp3,tipo]]
if exp2.find("=")==-1:
resultado+=tipo
else:
exp5="["+exp2[exp2.find("=")+1:]+"]"
if exp5[0]=="[" and exp5[1]!="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
if tipo=="":
tipo=evaluarExpresiones(exp5,len(lista_TEMP),lista_TEMP)
tipo=tipo.rstrip("list")
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=tipo
else:
resultado+=tipo
else:
tipo=tipo.rstrip("list")
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=tipo
else:
resultado+=tipo
elif exp5[1]=="[" and exp5[2]=="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
if tipo=="":
tipo=evaluarExpresiones(exp5,len(lista_TEMP),lista_TEMP)
tipo+=" list"
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=tipo
else:
resultado+=tipo
else:
tipo+=" list"
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=tipo
else:
resultado+=tipo
else:
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
if tipo=="":
tipo=evaluarExpresiones(exp5,len(lista_TEMP),lista_TEMP)
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=tipo
else:
resultado+=tipo
else:
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=tipo
else:
resultado+=tipo
elif exp4[1]=="[" and exp4[2]=="[":
exp4=exp4[1:len(exp4)-1]
tipo=evaluarTipoLista(exp4,i)+" list"
lista_temp+=[[exp3,tipo]]
if exp2.find("=")==-1:
resultado+=tipo
else:
exp5="["+exp2[exp2.find("=")+1:]+"]"
if exp5[0]=="[" and exp5[1]!="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
if tipo=="":
tipo=evaluarExpresiones(exp5,len(lista_TEMP),lista_TEMP)
tipo=tipo.rstrip("list")
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=tipo
else:
resultado+=tipo
else:
tipo=tipo.rstrip("list")
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=tipo
else:
resultado+=tipo
elif exp5[1]=="[" and exp5[2]=="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
if tipo=="":
tipo=evaluarExpresiones(exp5,len(lista_TEMP),lista_TEMP)
tipo+=" list"
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=tipo
else:
resultado+=tipo
else:
tipo+=" list"
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=tipo
else:
resultado+=tipo
else:
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
if tipo=="":
tipo=evaluarExpresiones(exp5,len(lista_TEMP),lista_TEMP)
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=tipo
else:
resultado+=tipo
else:
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=tipo
else:
resultado+=tipo
else:
tipo=evaluarExpresiones(exp4,len(lista_TEMP),lista_TEMP)
tipo=tipo.rstrip("list")
if tipo[0]=="[" and tipo[1]=="[":
lar=len(tipo)
tipo=tipo[1:lar-1]
tipo="("+evaluarTipoLista(tipo,i)+")"+" list"
lista_temp+=[[exp3,tipo]]
if exp2.find("=")==-1:
resultado+=tipo
else:
exp5="["+exp2[exp2.find("=")+1:]+"]"
if exp5[0]=="[" and exp5[1]!="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
if tipo=="":
tipo=evaluarExpresiones(exp5,len(lista_TEMP),lista_TEMP)
tipo=tipo.rstrip("list")
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=tipo
else:
resultado+=tipo
else:
tipo=tipo.rstrip("list")
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=tipo
else:
resultado+=tipo
elif exp5[1]=="[" and exp5[2]=="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
if tipo=="":
tipo=evaluarExpresiones(exp5,len(lista_TEMP),lista_TEMP)
tipo+=" list"
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=tipo
else:
resultado+=tipo
else:
tipo+=" list"
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=tipo
else:
resultado+=tipo
else:
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
if tipo=="":
tipo=evaluarExpresiones(exp5,len(lista_TEMP),lista_TEMP)
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=tipo
else:
resultado+=tipo
else:
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=tipo
else:
resultado+=tipo
else:
tipo
lista_temp+=[[exp3,tipo]]
if exp2.find("=")==-1:
exp5="["+exp2+"]"
if exp5[0]=="[" and exp5[1]!="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
if tipo=="":
tipo=evaluarExpresiones(exp5,len(lista_TEMP),lista_TEMP)
tipo=tipo.rstrip("list")
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=tipo
else:
resultado+=tipo
else:
tipo=tipo.rstrip("list")
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=tipo
else:
resultado+=tipo
elif exp5[1]=="[" and exp5[2]=="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
if tipo=="":
tipo=evaluarExpresiones(exp5,len(lista_TEMP),lista_TEMP)
tipo+=" list"
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=tipo
else:
resultado+=tipo
else:
tipo+=" list"
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=tipo
else:
resultado+=tipo
else:
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
if tipo=="":
tipo=evaluarExpresiones(exp5,len(lista_TEMP),lista_TEMP)
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=tipo
else:
resultado+=tipo
else:
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=tipo
else:
resultado+=tipo
else:
exp5="["+exp2[exp2.find("=")+1:]+"]"
if exp5[0]=="[" and exp5[1]!="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
if tipo=="":
tipo=evaluarExpresiones(exp5,len(lista_TEMP),lista_TEMP)
tipo=tipo.rstrip("list")
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=tipo
else:
resultado+=tipo
else:
tipo=tipo.rstrip("list")
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=tipo
else:
resultado+=tipo
elif exp5[1]=="[" and exp5[2]=="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
if tipo=="":
tipo=evaluarExpresiones(exp5,len(lista_TEMP),lista_TEMP)
tipo+=" list"
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=tipo
else:
resultado+=tipo
else:
tipo+=" list"
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=tipo
else:
resultado+=tipo
else:
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
if tipo=="":
tipo=evaluarExpresiones(exp5,len(lista_TEMP),lista_TEMP)
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=tipo
else:
resultado+=tipo
else:
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=tipo
else:
resultado+=tipo
return resultado
## Funcion que evalua el tipo de un let dentro de otro let
def evaluarLetComplejo():
lista = evaluarLetSencillo()
largo=len (lista)
i=0
lista_temp=[]
resultado=[]
while i!=largo:
if lista[i][0]!="let":
resultado+=[lista[i]]
i+=1
else:
expresion= lista[i][1]
if expresion.find("if")!=-1:
resultado+=[lista[i]]
i+=1
else:
pos=expresion.find("in")
exp1=expresion[:pos]
exp1=exp1.strip("val ")
exp2=expresion[pos+2:]
if exp2[1]=="(":
exp2=exp2.strip(" (")
exp2=exp2.strip("let ")
exp2=exp2.rstrip(" end)")
exp2=exp2.rstrip(" end")
if exp1.find("val")!=-1:
lista_temp=evaluarVals(exp1,i,[])
if exp2.find("let")==-1:
tipo=evaluarTipoLetLet(exp2,lista_temp)
val=exp2[exp2.find("in")+2:]
if val.find("val ")==-1:
resultado+=[["Resultado",tipo,1]]
i+=1
else:
val=val.strip("val ")
resultado+=[[val[:val.find("=")],tipo,1]]
i+=1
else:
aux1=exp2[:exp2.find("let")]
aux1=aux1.strip("(val ")
aux1=aux1[:aux1.find(" end")]
aux2=exp2[exp2.find("let")+4:]
aux2=aux2.strip("val ")
tipo1=evaluarTipoLetLet(aux1,lista_temp)
tipo2=evaluarTipoLetLet(aux2,lista_temp)
resultado+=[["Resultado",tipo1,1]]
i+=1
else:
po= exp1.find("=")
exp3=exp1[:po]
exp4="["+exp1[po+1:]+"]"
if exp4[1]=="[" and exp4[2]!="[":
exp4=exp1[po+1:]
tipo=evaluarTipoLista(exp4,i)
lista_temp+=[[exp3,tipo]]
if exp2.find("let")==-1:
tipo=evaluarTipoLetLet(exp2,lista_temp)
val=exp2[exp2.find("in")+2:]
if val.find("val ")==-1:
resultado+=[["Resultado",tipo,1]]
i+=1
else:
val=val.strip("val ")
resultado+=[[val[:val.find("=")],tipo,1]]
i+=1
else:
aux1=exp2[:exp2.find("let")]
aux1=aux1.strip("(val ")
aux1=aux1.strip("val ")
aux1=aux1[:aux1.find(" end")]
aux2=exp2[exp2.find("let")+4:]
aux2=aux2.strip("val ")
aux2=aux2[:aux2.find(" end")]
tipo1=evaluarTipoLetLet(aux1,lista_temp)
tipo2=evaluarTipoLetLet(aux2,lista_temp)
resultado+=[["Resultado",tipo1,1]]
i+=1
elif exp4[1]=="[" and exp4[2]=="[":
exp4=exp4[1:len(exp4)-1]
tipo=evaluarTipoLista(exp4,i)+" list"
lista_temp+=[[exp3,tipo]]
if exp2.find("let")==-1:
tipo=evaluarTipoLetLet(exp2,lista_temp)
val=exp2[exp2.find("in")+2:]
if val.find("val ")==-1:
resultado+=[["Resultado",tipo,1]]
i+=1
else:
val=val.strip("val ")
resultado+=[[val[:val.find("=")],tipo,1]]
i+=1
else:
aux1=exp2[:exp2.find("let")]
aux1=aux1.strip("(val ")
aux1=aux1.strip("val ")
aux1=aux1[:aux1.find(" end")]
aux2=exp2[exp2.find("let")+4:]
aux2=aux2.strip("val ")
aux2=aux2[:aux2.find(" end")]
tipo1=evaluarTipoLetLet(aux1,lista_temp)
tipo2=evaluarTipoLetLet(aux2,lista_temp)
resultado+=[["Resultado",tipo1,1]]
i+=1
else:
tipo=evaluarTipoLista(exp4,i)
tipo=tipo.rstrip("list")
if tipo[0]=="[" and tipo[1]=="[":
lar=len(tipo)
tipo=tipo[1:lar-1]
tipo="("+evaluarTipoLista(tipo,i)+")"+" list"
lista_temp+=[[exp3,tipo]]
if exp2.find("let")==-1:
tipo=evaluarTipoLetLet(exp2,lista_temp)
val=exp2[exp2.find("in")+2:]
if val.find("val ")==-1:
resultado+=[["Resultado",tipo,1]]
i+=1
else:
val=val.strip("val ")
resultado+=[[val[:val.find("=")],tipo,1]]
i+=1
else:
aux1=exp2[:exp2.find("let")]
aux1=aux1.strip("(val ")
aux1=aux1.strip("val ")
aux1=aux1[:aux1.find(" end")]
aux2=exp2[exp2.find("let")+4:]
aux2=aux2.strip("val ")
aux2=aux2[:aux2.find(" end")]
tipo1=evaluarTipoLetLet(aux1,lista_temp)
tipo2=evaluarTipoLetLet(aux2,lista_temp)
resultado+=[["Resultado",tipo1,1]]
i+=1
else:
tipo
lista_temp+=[[exp3,tipo]]
if exp2.find("let")==-1:
tipo=evaluarTipoLetLet(exp2,lista_temp)
val=exp2[exp2.find("in")+2:]
if val.find("val ")==-1:
resultado+=[["Resultado",tipo,1]]
i+=1
else:
val=val.strip("val ")
resultado+=[[val[:val.find("=")],tipo,1]]
i+=1
else:
aux1=exp2[:exp2.find("let")]
aux1=aux1.strip("(val ")
aux1=aux1.strip("val ")
aux1=aux1[:aux1.find(" end")]
aux2=exp2[exp2.find("let")+4:]
aux2=aux2.strip("val ")
aux2=aux2[:aux2.find(" end")]
tipo1=evaluarTipoLetLet(aux1,lista_temp)
tipo2=evaluarTipoLetLet(aux2,lista_temp)
resultado+=[["Resultado",tipo1,1]]
i+=1
else:
exp2=exp2.strip("let ")
exp2=exp2.rstrip(" end")
if exp1.find("val")!=-1:
lista_temp=evaluarVals(exp1,i,[])
if exp2.find("let")==-1:
tipo=evaluarTipoLetLet(exp2,lista_temp)
val=exp2[exp2.find("in")+2:]
if val.find("val ")==-1:
resultado+=[["Resultado",tipo,1]]
i+=1
else:
val=val.strip("val ")
resultado+=[[val[:val.find("=")],tipo,1]]
i+=1
else:
aux1=exp2[:exp2.find("let")]
aux1=aux1.strip("(val ")
aux1=aux1[:aux1.find(" end")]
aux2=exp2[exp2.find("let")+4:]
aux2=aux2.strip("val ")
tipo1=evaluarTipoLetLet(aux1,lista_temp)
tipo2=evaluarTipoLetLet(aux2,lista_temp)
resultado+=[["Resultado",tipo1,1]]
i+=1
else:
po= exp1.find("=")
exp3=exp1[:po]
exp4="["+exp1[po+1:]+"]"
if exp4[1]=="[" and exp4[2]!="[":
exp4=exp1[po+1:]
tipo=evaluarTipoLista(exp4,i)
lista_temp+=[[exp3,tipo]]
if exp2.find("let")==-1:
tipo=evaluarTipoLetLet(exp2,lista_temp)
val=exp2[exp2.find("in")+2:]
if val.find("val ")==-1:
resultado+=[["Resultado",tipo,1]]
i+=1
else:
val=val.strip("val ")
resultado+=[[val[:val.find("=")],tipo,1]]
i+=1
else:
aux1=exp2[:exp2.find("let")]
aux1=aux1.strip("(val ")
aux1=aux1.strip("val ")
aux1=aux1[:aux1.find(" end")]
aux2=exp2[exp2.find("let")+4:]
aux2=aux2.strip("val ")
aux2=aux2[:aux2.find(" end")]
tipo1=evaluarTipoLetLet(aux1,lista_temp)
tipo2=evaluarTipoLetLet(aux2,lista_temp)
resultado+=[["Resultado",tipo1,1]]
i+=1
elif exp4[1]=="[" and exp4[2]=="[":
exp4=exp4[1:len(exp4)-1]
tipo=evaluarTipoLista(exp4,i)+" list"
lista_temp+=[[exp3,tipo]]
if exp2.find("let")==-1:
tipo=evaluarTipoLetLet(exp2,lista_temp)
val=exp2[exp2.find("in")+2:]
if val.find("val ")==-1:
resultado+=[["Resultado",tipo,1]]
i+=1
else:
val=val.strip("val ")
resultado+=[[val[:val.find("=")],tipo,1]]
i+=1
else:
aux1=exp2[:exp2.find("let")]
aux1=aux1.strip("(val ")
aux1=aux1.strip("val ")
aux1=aux1[:aux1.find(" end")]
aux2=exp2[exp2.find("let")+4:]
aux2=aux2.strip("val ")
aux2=aux2[:aux2.find(" end")]
tipo1=evaluarTipoLetLet(aux1,lista_temp)
tipo2=evaluarTipoLetLet(aux2,lista_temp)
resultado+=[["Resultado",tipo1,1]]
i+=1
else:
tipo=evaluarTipoLista(exp4,i)
tipo=tipo.rstrip("list")
if tipo[0]=="[" and tipo[1]=="[":
lar=len(tipo)
tipo=tipo[1:lar-1]
tipo="("+evaluarTipoLista(tipo,i)+")"+" list"
lista_temp+=[[exp3,tipo]]
if exp2.find("let")==-1:
tipo=evaluarTipoLetLet(exp2,lista_temp)
val=exp2[exp2.find("in")+2:]
if val.find("val ")==-1:
resultado+=[["Resultado",tipo,1]]
i+=1
else:
val=val.strip("val ")
resultado+=[[val[:val.find("=")],tipo,1]]
i+=1
else:
aux1=exp2[:exp2.find("let")]
aux1=aux1.strip("(val ")
aux1=aux1.strip("val ")
aux1=aux1[:aux1.find(" end")]
aux2=exp2[exp2.find("let")+4:]
aux2=aux2.strip("val ")
aux2=aux2[:aux2.find(" end")]
tipo1=evaluarTipoLetLet(aux1,lista_temp)
tipo2=evaluarTipoLetLet(aux2,lista_temp)
resultado+=[["Resultado",tipo1,1]]
i+=1
else:
tipo
lista_temp+=[[exp3,tipo]]
if exp2.find("let")==-1:
tipo=evaluarTipoLetLet(exp2,lista_temp)
val=exp2[exp2.find("in")+2:]
if val.find("val ")==-1:
resultado+=[["Resultado",tipo,1]]
i+=1
else:
val=val.strip("val ")
resultado+=[[val[:val.find("=")],tipo,1]]
i+=1
else:
aux1=exp2[:exp2.find("let")]
aux1=aux1.strip("(val ")
aux1=aux1.strip("val ")
aux1=aux1[:aux1.find(" end")]
aux2=exp2[exp2.find("let")+4:]
aux2=aux2.strip("val ")
aux2=aux2[:aux2.find(" end")]
tipo1=evaluarTipoLetLet(aux1,lista_temp)
tipo2=evaluarTipoLetLet(aux2,lista_temp)
resultado+=[["Resultado",tipo1,1]]
i+=1
return resultado
## Funcion que evaluael tipo de una variable determina por un let sencillo
def evaluarLetSencillo2():
lista = evaluarLetComplejo()
largo=len (lista)
i=0
lista_temp=[]
resultado=[]
while i!=largo:
if lista[i][1].find("let")==-1:
resultado+=[lista[i]]
i+=1
else:
expresion= lista[i][1]
expresion=expresion[expresion.find("let")+4:]
if expresion.find("let")!=-1 or expresion.find("if")!=-1:
resultado+=[lista[i]]
i+=1
else:
pos=expresion.find("in")
exp1=expresion[:pos]
exp1=exp1.strip("val ")
exp2=expresion[pos+2:]
exp2=exp2.rstrip(" end")
if exp1.find("val")!=-1:
lista_temp=evaluarVals(exp1,i,[])
if exp2.find("=")==-1:
exp5="["+exp2+"]"
if exp5[0]=="[" and exp5[1]!="[":
tipo=evaluarExpresiones(exp2,len(lista_temp),lista_temp)
tipo=tipo.rstrip("list")
resultado+=[[lista[i][0],tipo]]
i+=1
elif exp5[1]=="[" and exp5[2]=="[":
tipo=evaluarExpresiones(exp2,len(lista_temp),lista_temp)
tipo+=" list"
resultado+=[[lista[i][0],tipo]]
i+=1
else:
tipo=evaluarExpresiones(exp2,len(lista_temp),lista_temp)
resultado+=[[lista[i][0],tipo]]
i+=1
else:
exp5="["+exp2[exp2.find("=")+1:]+"]"
if exp5[0]=="[" and exp5[1]!="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
tipo=tipo.rstrip("list")
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=[[lista[i][0],tipo]]
i+=1
else:
resultado+=[[lista[i][0],tipo]]
i+=1
elif exp5[1]=="[" and exp5[2]=="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
tipo+=" list"
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=[[lista[i][0],tipo]]
i+=1
else:
resultado+=[[lista[i][0],tipo]]
i+=1
else:
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=[[lista[i][0],tipo]]
i+=1
else:
resultado+=[[lista[i][0],tipo]]
i+=1
else:
po= exp1.find("=")
exp3=exp1[:po]
exp4="["+exp1[po+1:]+"]"
if exp4[1]=="[" and exp4[2]!="[":
exp4=exp1[po+1:]
tipo=evaluarTipoLista(exp4,i)
lista_temp+=[[exp3,tipo]]
if exp2.find("=")==-1:
resultado+=[[lista[i][0],tipo]]
i+=1
else:
exp5="["+exp2[exp2.find("=")+1:]+"]"
if exp5[0]=="[" and exp5[1]!="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
tipo=tipo.rstrip("list")
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=[[lista[i][0],tipo]]
i+=1
else:
resultado+=[[lista[i][0],tipo]]
i+=1
elif exp5[1]=="[" and exp5[2]=="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
tipo+=" list"
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=[[lista[i][0],tipo]]
i+=1
else:
resultado+=[[lista[i][0],tipo]]
i+=1
else:
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=[[lista[i][0],tipo]]
i+=1
else:
resultado+=[[lista[i][0],tipo]]
i+=1
elif exp4[1]=="[" and exp4[2]=="[":
exp4=exp4[1:len(exp4)-1]
tipo=evaluarTipoLista(exp4,i)+" list"
lista_temp+=[[exp3,tipo]]
if exp2.find("=")==-1:
resultado+=[[lista[i][0],tipo]]
i+=1
else:
exp5="["+exp2[exp2.find("=")+1:]+"]"
if exp5[0]=="[" and exp5[1]!="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
tipo=tipo.rstrip("list")
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=[[lista[i][0],tipo]]
i+=1
else:
resultado+=[[lista[i][0],tipo]]
i+=1
elif exp5[1]=="[" and exp5[2]=="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
tipo+=" list"
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=[[lista[i][0],tipo]]
i+=1
else:
resultado+=[[lista[i][0],tipo]]
i+=1
else:
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=[[lista[i][0],tipo]]
i+=1
else:
resultado+=[[lista[i][0],tipo]]
i+=1
else:
tipo=evaluarTipoLista(exp4,i)
tipo=tipo.rstrip("list")
if tipo[0]=="[" and tipo[1]=="[":
lar=len(tipo)
tipo=tipo[1:lar-1]
tipo="("+evaluarTipoLista(tipo,i)+")"+" list"
lista_temp+=[[exp3,tipo]]
if exp2.find("=")==-1:
resultado+=[[lista[i][0],tipo]]
i+=1
else:
exp5="["+exp2[exp2.find("=")+1:]+"]"
if exp5[0]=="[" and exp5[1]!="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
tipo=tipo.rstrip("list")
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=[[lista[i][0],tipo]]
i+=1
else:
resultado+=[[lista[i][0],tipo]]
i+=1
elif exp5[1]=="[" and exp5[2]=="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
tipo+=" list"
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=[[lista[i][0],tipo]]
i+=1
else:
resultado+=[[lista[i][0],tipo]]
i+=1
else:
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=[[lista[i][0],tipo]]
i+=1
else:
resultado+=[[lista[i][0],tipo]]
i+=1
else:
tipo
lista_temp+=[[exp3,tipo]]
if exp2.find("=")==-1:
resultado+=[[lista[i][0],tipo]]
i+=1
else:
exp5="["+exp2[exp2.find("=")+1:]+"]"
if exp5[0]=="[" and exp5[1]!="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
tipo=tipo.rstrip("list")
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=[[lista[i][0],tipo]]
i+=1
else:
resultado+=[[lista[i][0],tipo]]
i+=1
elif exp5[1]=="[" and exp5[2]=="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
tipo+=" list"
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=[[lista[i][0],tipo]]
i+=1
else:
resultado+=[[lista[i][0],tipo]]
i+=1
else:
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=[[lista[i][0],tipo]]
i+=1
else:
resultado+=[[lista[i][0],tipo]]
i+=1
return resultado
## Funcion que determina el tipo de una variable que esta determinada por uno o varios lets
def evaluarLetComplejo2():
lista = evaluarLetSencillo2()
largo=len (lista)
i=0
lista_temp=[]
resultado=[]
while i!=largo:
if lista[i][1].find("let ")==-1:
resultado+=[lista[i]]
i+=1
else:
expresion= lista[i][1]
expresion= expresion[expresion.find(" val"):]
if expresion.find("if")!=-1:
resultado+=[lista[i]]
i+=1
else:
pos=expresion.find("in")
exp1=expresion[:pos]
exp1=exp1.strip("val ")
exp2=expresion[pos+2:]
if exp2[1]=="(":
exp2=exp2.strip(" (")
exp2=exp2.strip("let ")
exp2=exp2.rstrip(" end)")
exp2=exp2.rstrip(" end")
if exp1.find("val")!=-1:
lista_temp=evaluarVals(exp1,i,[])
if exp2.find("let")==-1:
tipo=evaluarTipoLetLet(exp2,lista_temp)
val=exp2[exp2.find("in")+2:]
if val.find("val ")==-1:
resultado+=[[lista[i][0],tipo]]
i+=1
else:
val=val.strip("val ")
resultado+=[[lista[i][0],tipo]]
i+=1
else:
aux1=exp2[:exp2.find("let")]
aux1=aux1.strip("(val ")
aux1=aux1[:aux1.find(" end")]
aux2=exp2[exp2.find("let")+4:]
aux2=aux2.strip("val ")
tipo1=evaluarTipoLetLet(aux1,lista_temp)
tipo2=evaluarTipoLetLet(aux2,lista_temp)
resultado+=[[lista[i][0],tipo]]
i+=1
else:
po= exp1.find("=")
exp3=exp1[:po]
exp4="["+exp1[po+1:]+"]"
if exp4[1]=="[" and exp4[2]!="[":
exp4=exp1[po+1:]
tipo=evaluarTipoLista(exp4,i)
lista_temp+=[[exp3,tipo]]
if exp2.find("let")==-1:
tipo=evaluarTipoLetLet(exp2,lista_temp)
val=exp2[exp2.find("in")+2:]
if val.find("val ")==-1:
resultado+=[[lista[i][0],tipo]]
i+=1
else:
val=val.strip("val ")
resultado+=[[lista[i][0],tipo]]
i+=1
else:
aux1=exp2[:exp2.find("let")]
aux1=aux1.strip("(val ")
aux1=aux1.strip("val ")
aux1=aux1[:aux1.find(" end")]
aux2=exp2[exp2.find("let")+4:]
aux2=aux2.strip("val ")
aux2=aux2[:aux2.find(" end")]
tipo1=evaluarTipoLetLet(aux1,lista_temp)
tipo2=evaluarTipoLetLet(aux2,lista_temp)
resultado+=[[lista[i][0],tipo]]
i+=1
elif exp4[1]=="[" and exp4[2]=="[":
exp4=exp4[1:len(exp4)-1]
tipo=evaluarTipoLista(exp4,i)+" list"
lista_temp+=[[exp3,tipo]]
if exp2.find("let")==-1:
tipo=evaluarTipoLetLet(exp2,lista_temp)
val=exp2[exp2.find("in")+2:]
if val.find("val ")==-1:
resultado+=[[lista[i][0],tipo]]
i+=1
else:
val=val.strip("val ")
resultado+=[[lista[i][0],tipo]]
i+=1
else:
aux1=exp2[:exp2.find("let")]
aux1=aux1.strip("(val ")
aux1=aux1.strip("val ")
aux1=aux1[:aux1.find(" end")]
aux2=exp2[exp2.find("let")+4:]
aux2=aux2.strip("val ")
aux2=aux2[:aux2.find(" end")]
tipo1=evaluarTipoLetLet(aux1,lista_temp)
tipo2=evaluarTipoLetLet(aux2,lista_temp)
resultado+=[[lista[i][0],tipo]]
i+=1
else:
tipo=evaluarTipoLista(exp4,i)
tipo=tipo.rstrip("list")
if tipo[0]=="[" and tipo[1]=="[":
lar=len(tipo)
tipo=tipo[1:lar-1]
tipo="("+evaluarTipoLista(tipo,i)+")"+" list"
lista_temp+=[[exp3,tipo]]
if exp2.find("let")==-1:
tipo=evaluarTipoLetLet(exp2,lista_temp)
val=exp2[exp2.find("in")+2:]
if val.find("val ")==-1:
resultado+=[[lista[i][0],tipo]]
i+=1
else:
val=val.strip("val ")
resultado+=[[lista[i][0],tipo]]
i+=1
else:
aux1=exp2[:exp2.find("let")]
aux1=aux1.strip("(val ")
aux1=aux1.strip("val ")
aux1=aux1[:aux1.find(" end")]
aux2=exp2[exp2.find("let")+4:]
aux2=aux2.strip("val ")
aux2=aux2[:aux2.find(" end")]
tipo1=evaluarTipoLetLet(aux1,lista_temp)
tipo2=evaluarTipoLetLet(aux2,lista_temp)
resultado+=[[lista[i][0],tipo]]
i+=1
else:
tipo
lista_temp+=[[exp3,tipo]]
if exp2.find("let")==-1:
tipo=evaluarTipoLetLet(exp2,lista_temp)
val=exp2[exp2.find("in")+2:]
if val.find("val ")==-1:
resultado+=[[lista[i][0],tipo]]
i+=1
else:
val=val.strip("val ")
resultado+=[[lista[i][0],tipo]]
i+=1
else:
aux1=exp2[:exp2.find("let")]
aux1=aux1.strip("(val ")
aux1=aux1.strip("val ")
aux1=aux1[:aux1.find(" end")]
aux2=exp2[exp2.find("let")+4:]
aux2=aux2.strip("val ")
aux2=aux2[:aux2.find(" end")]
tipo1=evaluarTipoLetLet(aux1,lista_temp)
tipo2=evaluarTipoLetLet(aux2,lista_temp)
resultado+=[[lista[i][0],tipo]]
i+=1
else:
exp2=exp2.strip("let ")
exp2=exp2.rstrip(" end")
if exp1.find("val")!=-1:
lista_temp=evaluarVals(exp1,i,[])
if exp2.find("let")==-1:
tipo=evaluarTipoLetLet(exp2,lista_temp)
val=exp2[exp2.find("in")+2:]
if val.find("val ")==-1:
resultado+=[[lista[i][0],tipo]]
i+=1
else:
val=val.strip("val ")
resultado+=[[lista[i][0],tipo]]
i+=1
else:
aux1=exp2[:exp2.find("let")]
aux1=aux1.strip("(val ")
aux1=aux1[:aux1.find(" end")]
aux2=exp2[exp2.find("let")+4:]
aux2=aux2.strip("val ")
tipo1=evaluarTipoLetLet(aux1,lista_temp)
tipo2=evaluarTipoLetLet(aux2,lista_temp)
resultado+=[[lista[i][0],tipo]]
i+=1
else:
po= exp1.find("=")
exp3=exp1[:po]
exp4="["+exp1[po+1:]+"]"
if exp4[1]=="[" and exp4[2]!="[":
exp4=exp1[po+1:]
tipo=evaluarTipoLista(exp4,i)
lista_temp+=[[exp3,tipo]]
if exp2.find("let")==-1:
tipo=evaluarTipoLetLet(exp2,lista_temp)
val=exp2[exp2.find("in")+2:]
if val.find("val ")==-1:
resultado+=[[lista[i][0],tipo]]
i+=1
else:
val=val.strip("val ")
resultado+=[[lista[i][0],tipo]]
i+=1
else:
aux1=exp2[:exp2.find("let")]
aux1=aux1.strip("(val ")
aux1=aux1.strip("val ")
aux1=aux1[:aux1.find(" end")]
aux2=exp2[exp2.find("let")+4:]
aux2=aux2.strip("val ")
aux2=aux2[:aux2.find(" end")]
tipo1=evaluarTipoLetLet(aux1,lista_temp)
tipo2=evaluarTipoLetLet(aux2,lista_temp)
resultado+=[[lista[i][0],tipo]]
i+=1
elif exp4[1]=="[" and exp4[2]=="[":
exp4=exp4[1:len(exp4)-1]
tipo=evaluarTipoLista(exp4,i)+" list"
lista_temp+=[[exp3,tipo]]
if exp2.find("let")==-1:
tipo=evaluarTipoLetLet(exp2,lista_temp)
val=exp2[exp2.find("in")+2:]
if val.find("val ")==-1:
resultado+=[[lista[i][0],tipo]]
i+=1
else:
val=val.strip("val ")
resultado+=[[lista[i][0],tipo]]
i+=1
else:
aux1=exp2[:exp2.find("let")]
aux1=aux1.strip("(val ")
aux1=aux1.strip("val ")
aux1=aux1[:aux1.find(" end")]
aux2=exp2[exp2.find("let")+4:]
aux2=aux2.strip("val ")
aux2=aux2[:aux2.find(" end")]
tipo1=evaluarTipoLetLet(aux1,lista_temp)
tipo2=evaluarTipoLetLet(aux2,lista_temp)
resultado+=[[lista[i][0],tipo]]
i+=1
else:
tipo=evaluarTipoLista(exp4,i)
tipo=tipo.rstrip("list")
if tipo[0]=="[" and tipo[1]=="[":
lar=len(tipo)
tipo=tipo[1:lar-1]
tipo="("+evaluarTipoLista(tipo,i)+")"+" list"
lista_temp+=[[exp3,tipo]]
if exp2.find("let")==-1:
tipo=evaluarTipoLetLet(exp2,lista_temp)
val=exp2[exp2.find("in")+2:]
if val.find("val ")==-1:
resultado+=[[lista[i][0],tipo]]
i+=1
else:
val=val.strip("val ")
resultado+=[[lista[i][0],tipo]]
i+=1
else:
aux1=exp2[:exp2.find("let")]
aux1=aux1.strip("(val ")
aux1=aux1.strip("val ")
aux1=aux1[:aux1.find(" end")]
aux2=exp2[exp2.find("let")+4:]
aux2=aux2.strip("val ")
aux2=aux2[:aux2.find(" end")]
tipo1=evaluarTipoLetLet(aux1,lista_temp)
tipo2=evaluarTipoLetLet(aux2,lista_temp)
resultado+=[[lista[i][0],tipo]]
i+=1
else:
tipo
lista_temp+=[[exp3,tipo]]
if exp2.find("let")==-1:
tipo=evaluarTipoLetLet(exp2,lista_temp)
val=exp2[exp2.find("in")+2:]
if val.find("val ")==-1:
resultado+=[[lista[i][0],tipo]]
i+=1
else:
val=val.strip("val ")
resultado+=[[lista[i][0],tipo]]
i+=1
else:
aux1=exp2[:exp2.find("let")]
aux1=aux1.strip("(val ")
aux1=aux1.strip("val ")
aux1=aux1[:aux1.find(" end")]
aux2=exp2[exp2.find("let")+4:]
aux2=aux2.strip("val ")
aux2=aux2[:aux2.find(" end")]
tipo1=evaluarTipoLetLet(aux1,lista_temp)
tipo2=evaluarTipoLetLet(aux2,lista_temp)
resultado+=[[lista[i][0],tipo]]
i+=1
return resultado
## Funcion que evalua un if dentro de un val simple
def evaluarValIf():
lista=evaluarLetComplejo2()
largo=len(lista)
i=0
resultado=[]
tipo=""
while i!=largo:
if lista[i][1].find("if")!=0:
resultado+=[lista[i]]
i+=1
else:
expresion=lista[i][1]
tipo+=tipo_if(expresion,lista,i)
resultado+=[[lista[i][0],tipo]]
i+=1
return resultado
## funcion que evalua los let con if
def evaluarLetIfSimple():
lista = evaluarValIf()
largo=len (lista)
i=0
lista_temp=[]
resultado=[]
while i!=largo:
if lista[i][0]!="let":
resultado+=[lista[i]]
i+=1
else:
expresion= lista[i][1]
if expresion.find("let")!=-1:
resultado+=[lista[i]]
i+=1
else:
pos=expresion.find("in")
exp1=expresion[:pos]
exp1=exp1.strip("val ")
exp2=expresion[pos+2:]
exp2=exp2.rstrip(" end")
if exp2.find("if")!=-1:
if exp1.find("val")!=-1:
lista_temp=evaluarExpresionValsLet(exp1,[],lista)
if exp2.find("=")!=-1:
exp2=exp2.strip(" val ")
print(exp2,"hhhhhhhhhhh")
exp6=exp2[:exp2.find("=")]
exp5=exp2[exp2.find("=")+1:]
tipo=tipo_if(exp5,lista_temp,len(lista_temp))
resultado+=[[exp6,tipo,1]]
i+=1
else:
tipo=tipo_if(exp2,lista_temp,len(lista_temp))
resultado+=[["Resultado",tipo,1]]
i+=1
else:
po= exp1.find("=")
exp3=exp1[:po]
exp4=exp1[po+1:]
tipo=tipo_if(exp4,lista,i)
lista_temp+=[[exp3,tipo]]
if exp2.find("=")!=-1:
exp6=exp2[:exp2.find("=")]
exp5=exp2[exp2.find("=")+1:]
tipo=tipo_if(exp5,lista_temp,len(lista_temp))
resultado+=[[exp6,tipo,1]]
i+=1
else:
tipo=tipo_if(exp2,lista_temp,len(lista_temp))
resultado+=[["Resultado",tipo,1]]
i+=1
else:
if exp1.find("val")!=-1:
lista_temp=evaluarExpresionValsLet(exp1,[],lista)
if exp2.find("=")==-1:
exp5="["+exp2+"]"
if exp5[0]=="[" and exp5[1]!="[":
tipo=evaluarExpresiones(exp2,len(lista_temp),lista_temp)
tipo=tipo.rstrip("list")
resultado+=[[exp2,tipo,1]]
i+=1
elif exp5[1]=="[" and exp5[2]=="[":
tipo=evaluarExpresiones(exp2,len(lista_temp),lista_temp)
tipo+=" list"
resultado+=[[exp2,tipo,1]]
i+=1
else:
tipo=evaluarExpresiones(exp2,len(lista_temp),lista_temp)
resultado+=[[exp2,tipo,1]]
i+=1
else:
exp5="["+exp2[exp2.find("=")+1:]+"]"
if exp5[0]=="[" and exp5[1]!="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
tipo=tipo.rstrip("list")
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=[[exp2[:exp2.find("=")],tipo,1]]
i+=1
else:
resultado+=[[exp2[5:exp2.find("=")],tipo,1]]
i+=1
elif exp5[1]=="[" and exp5[2]=="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
tipo+=" list"
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=[[exp2[:exp2.find("=")],tipo,1]]
i+=1
else:
resultado+=[[exp2[5:exp2.find("=")],tipo,1]]
i+=1
else:
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=[[exp2[:exp2.find("=")],tipo,1]]
i+=1
else:
resultado+=[[exp2[5:exp2.find("=")],tipo,1]]
i+=1
else:
po= exp1.find("=")
exp3=exp1[:po]
exp4=exp1[po+1:]
tipo=tipo_if(exp4,lista,i)
lista_temp+=[[exp3,tipo]]
if exp2.find("=")==-1:
resultado+=[[exp3,tipo,1]]
i+=1
else:
exp5="["+exp2[exp2.find("=")+1:]+"]"
if exp5[0]=="[" and exp5[1]!="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
tipo=tipo.rstrip("list")
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=[[exp2[:exp2.find("=")],tipo,1]]
i+=1
else:
resultado+=[[exp2[5:exp2.find("=")],tipo,1]]
i+=1
elif exp5[1]=="[" and exp5[2]=="[":
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
tipo+=" list"
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=[[exp2[:exp2.find("=")],tipo,1]]
i+=1
else:
resultado+=[[exp2[5:exp2.find("=")],tipo,1]]
i+=1
else:
tipo=evaluarExpresiones(exp5,len(lista_temp),lista_temp)
if exp2[:exp2.find("=")].find("val")==-1:
resultado+=[[exp2[:exp2.find("=")],tipo,1]]
i+=1
else:
resultado+=[[exp2[5:exp2.find("=")],tipo,1]]
i+=1
return resultado
## Funcion que evalua el ambiente estatico e imprime la tabla
def Estatico():
lista= evaluarLetIfSimple()
largo= len(lista)
i=0
print("*************************AMBIENTE ESTÁTICO*******************************")
print("")
print("")
print ("\t","\t","VARIABLE","\t","\t", "TIPO")
print("")
while i!=largo:
if lista[i][0]=="Resultado":
print ("\t","\t",lista[i][0],"\t","\t",lista[i][1])
i+=1
else:
print ("\t","\t",lista[i][0],"\t","\t","\t",lista[i][1])
i+=1
##Funcion en cargada de administrar las diferentes expreciones que se puedan encontrar en el codigo :3
def Procesar_Exp(L):
tabla=[]
for i in L:
if "+" in i or"-" in i or"*" in i or"div" in i:
tabla += [i[4]+" "+"-->"+(operacion(i))]
elif i[8:11] == "True" or i[8:11] == "False":
tabla+=[i[4]+" "+"-->"+i[8:11]]
elif i[8:11]!="if "and i[8:11]!= "let":
tabla+=[i[4]+" "+"-->"+i[8:]]
elif i[8:11]=="if":
return procesar_if(i)
else:
return precesar_let(i)
return imprimirTab(tabla)
##def procesar_if(e):
## return""
##def procesar_let():
## return ""
## for i in tabla:
## if e[12]==i[0] and e==i[]:
##
##
def operacion(L,i=0,temp1="",temp2="",oper=""):
L=L[8:]
z=len(L)
while i<z:
if oper=="" and (esnumint(L[0])==True) :
temp1+=L[0]
i+=1
L=L[1:]
elif L[0]=="+" or L[0]=="-" or L[0]=="*":
oper+=L[0]
i+=1
L=L[1:]
elif L[0:3]=="div":
oper=oper+"div"
i+=3
L=L[3:]
elif oper!="" and(esnumint(L[0]))==True:
temp2+=L[0]
i+=1
L=L[1:]
else:
L=L[1:]
return(operar2num(temp1,oper,temp2))
def operar2num(a,b,c):
if b=="+":
return str((tonum(a)+tonum(c)))
elif b=="-":
return (tonum(a)-tonum(c))
elif b=="*":
return (tonum(a)*tonum(c))
elif b=="div":
return (tonum(a)//tonum(c))
## elif oper!="" and (esnumint(L[i])==False)and L[i]==",":
## end=False
def esnumint(s):
try:
int (s)
return True
except ValueError:
return False
def tonum(s):
try:
return int(s)
except ValueError:
return False
##def convertStr(s):
## try:
## ret = int(s)
## except ValueError:
##
## ret = float(s)
## return ret
##
##
def imprimirTab(t):
print(" Tabla Dinamica")
print("--------------------")
for i in t:
print(i)
print("--------------------")
return ""
def Menu():
print("******************BIENVENIDO******************")
print(" ")
print ("Digite 1 si desea ver el ambiente estático")
print ("Digite 2 si desea ver el ambiente dinámico")
opcion=int(input("Cual ambiente desea ver?: "))
if opcion==1:
print(" ")
Estatico()
opcion=input("Desea volver al menu? Y/N ")
if opcion=="Y":
print(" ")
print(" ")
Menu()
else:
print(" ")
print ("Gracias :D")
else:
print(" ")
Procesar_Exp(leer_archivo("Codigo.txt"))
opcion=input("Desea volver al menu? Y/N")
if opcion=="Y":
print(" ")
print(" ")
Menu()
else:
print(" ")
print ("Gracias :D")
Menu()
| [
"elizabethcordoba.a.70@gmail.com"
] | elizabethcordoba.a.70@gmail.com |
100392ad1e0121031ceff449ca347b8017cd7778 | 8cd3fffebd2d91e22923ce83642348fd07169298 | /dpca_local.py | ade94095994215ba151707e0dcebe7bf55e60e5b | [] | no_license | trendscenter/dPCA | e5231bb8af10d59c993878a0ee57922ee9134b2b | 53267145d22b88aa83ca9e2f5a4fa92df0c34b12 | refs/heads/master | 2021-08-09T00:27:03.828445 | 2017-11-11T18:42:30 | 2017-11-11T18:42:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,527 | py | import json;
import argparse
from os import listdir
from os.path import isfile, join
import sys
import numpy as np
parser = argparse.ArgumentParser(description='help read in my data from my local machine!')
parser.add_argument('--run', type=str, help='grab coinstac args')
args = parser.parse_args()
args.run = json.loads(args.run)
username = args.run['username']
# inspect what args were passed
# runInputs = json.dumps(args.run, sort_keys=True, indent=4, separators=(',', ': '))
# sys.stderr.write(runInputs + "\n")
if 'remoteResult' in args.run and \
'data' in args.run['remoteResult'] and \
username in args.run['remoteResult']['data']:
sys.exit(0); # no-op! we already contributed our data
passedDir = args.run['userData']['dirs'][0]
sys.stderr.write("reading files from dir: " + passedDir)
files = [f for f in listdir(passedDir) if isfile(join(passedDir, f))]
# allFileData = {}
for f in files:
X = np.load(join(passedDir,f))
d, n = X.shape
K = 8
C = (1.0 / n) * np.dot(X, X.T)
U, S, V = np.linalg.svd(C)
Uk = U[:, :K]
Sk = np.diag(S)[:K, :K]
P = np.dot(Uk, np.sqrt(Sk))
en = np.trace(np.dot(Uk.T, np.dot(C, Uk)))
computationOutput = json.dumps({'P': P.tolist(), 'en': en, 'C': C.tolist()}, sort_keys=True, indent=4, separators=(',', ': '))
#computationOutput = json.dumps({'en': en}, sort_keys=True, indent=4, separators=(',', ': '))
# preview output data
# sys.stderr.write(computationOutput + "\n")
# send results
sys.stdout.write(computationOutput)
| [
"hafizimtiaz@sarwate-imac-4.engr.rutgers.edu"
] | hafizimtiaz@sarwate-imac-4.engr.rutgers.edu |
fb80e471da8b7561971f88e437a7d2eb084ee749 | 2aff50ef8d9750b92c32a5b9da04f2bfc0c6cb3f | /accounts/admin.py | d8c0dd3e6ec16864b2f16859a0d0ec58c78049c2 | [] | no_license | boon-teck/food_que_be | e3992911baa88cc0aa1e65b55285c249f2bf6da5 | 92da67300b60881e71d1375c7cf9789988cdb918 | refs/heads/master | 2023-06-25T14:48:01.556516 | 2021-07-15T18:48:41 | 2021-07-15T18:48:41 | 387,481,026 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | from django.contrib import admin
# from django.contrib.auth.admin import UserAdmin
from accounts.models import User
# Register your models here.
admin.site.register(User)
| [
"boonteck.soh@hotmail.com"
] | boonteck.soh@hotmail.com |
013a365cc06741507c0e6c074be9b9af84751cc1 | 2a5682a3214f9270a23867b9ce2d4c4d1a3c0686 | /bookmarket/bookshop/models.py | 19ebcff3a2492f8e775240d2da8eef7355fb7b44 | [] | no_license | AGiribabu/book_seller | 14c682d5277249e20c94f3c67b0d8ac05dccd4bb | 1cdd270ccb2cb9bbed9eff5cb65e1f39eb83e232 | refs/heads/master | 2020-04-13T18:25:34.444855 | 2018-12-28T06:42:58 | 2018-12-28T06:42:58 | 163,373,791 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,627 | py | from django.db import models
class Genre(models.Model):
'''
Model for Genre
'''
name = models.CharField(max_length=20,blank=False)
genredetails = models.TextField(blank=False)
slug = models.CharField(max_length=20, blank=False)
MetaData = models.TextField(blank=True)
Objects = models.Manager()
class SubGenre(models.Model):
'''
Models for sub genre
'''
name = models.CharField(max_length=20, blank=False)
genre = models.ForeignKey(Genre, on_delete=models.CASCADE,related_name='SubGenre')
description = models.TextField(blank=False)
slug = models.CharField(max_length=20, blank=False)
Objects = models.Manager()
class Author(models.Model):
'''
Author model with forien key "author in Book model
'''
author_name = models.CharField(max_length=20,blank=False)
sub_genre = models.ForeignKey(SubGenre, on_delete=models.CASCADE, related_name='Author')
author_rating = models.IntegerField(blank=True)
about_author = models.TextField(blank=True)
Objects = models.Manager()
class Book(models.Model):
'''
Model represent books
'''
name = models.CharField(max_length=100, blank=False)
author = models.ForeignKey(Author,on_delete=models.CASCADE, related_name='Book')
status = models.CharField(max_length=25, blank=False, default='Unpublished')
cost = models.DecimalField(default=0.0, blank=False, max_digits=6, decimal_places=2)
currency = models.CharField(max_length=20,default='Ruppes', blank=False)
publish_time = models.DateTimeField(auto_now=True)
Objects = models.Manager() | [
"AGiribabu@gmail.com"
] | AGiribabu@gmail.com |
85b2a652dca6bb5f8af37a384d5e096c49a696d6 | ca2c82aecbe9bf6ef8fe227c60bbcacf26ae4837 | /dataset_preparation/split_tfrecords_vgg2.py | b7da326ef564dc7a9b9933e92bdc4d2d20ca1c9f | [
"Apache-2.0",
"BSD-2-Clause"
] | permissive | vmelement/ALAE | fb7483ca9766c9ce958f3bd22f62459ae97145bc | 523c1bfaf2d6bbe5798b43fa547bec5c9cf68fa4 | refs/heads/master | 2022-12-03T07:16:45.480355 | 2020-08-12T18:48:25 | 2020-08-12T18:48:25 | 284,524,903 | 0 | 0 | null | 2020-08-02T19:10:40 | 2020-08-02T19:10:40 | null | UTF-8 | Python | false | false | 3,787 | py | # Copyright 2019-2020 Stanislav Pidhorskyi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import sys
import argparse
import logging
import tensorflow as tf
from defaults import get_cfg_defaults
def split_tfrecord(cfg, logger):
tfrecord_path = cfg.DATASET.FFHQ_SOURCE
ffhq_train_size = 739005
part_size = ffhq_train_size // cfg.DATASET.PART_COUNT
logger.info("Splitting into % size parts" % part_size)
for i in range(2, cfg.DATASET.MAX_RESOLUTION_LEVEL + 1):
with tf.Graph().as_default(), tf.compat.v1.Session() as sess:
ds = tf.data.TFRecordDataset(tfrecord_path % i)
ds = ds.batch(part_size)
batch = tf.compat.v1.data.make_one_shot_iterator(ds).get_next()
part_num = 0
while True:
try:
records = sess.run(batch)
if part_num < cfg.DATASET.PART_COUNT:
part_path = cfg.DATASET.PATH % (i, part_num)
os.makedirs(os.path.dirname(part_path), exist_ok=True)
with tf.io.TFRecordWriter(part_path) as writer:
for record in records:
writer.write(record)
else:
part_path = cfg.DATASET.PATH_TEST % (i, part_num - cfg.DATASET.PART_COUNT)
os.makedirs(os.path.dirname(part_path), exist_ok=True)
with tf.io.TFRecordWriter(part_path) as writer:
for record in records:
writer.write(record)
part_num += 1
except tf.errors.OutOfRangeError:
break
def run():
parser = argparse.ArgumentParser(description="ALAE. Split FFHQ into parts for training and testing")
parser.add_argument(
"--config-file",
default="configs/vgg2_metric.yaml",
metavar="FILE",
help="path to config file",
type=str,
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
cfg = get_cfg_defaults()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
logger = logging.getLogger("logger")
logger.setLevel(logging.DEBUG)
output_dir = cfg.OUTPUT_DIR
os.makedirs(output_dir, exist_ok=True)
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
fh = logging.FileHandler(os.path.join(output_dir, 'log.txt'))
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.info(args)
logger.info("Loaded configuration file {}".format(args.config_file))
with open(args.config_file, "r") as cf:
config_str = "\n" + cf.read()
logger.info(config_str)
logger.info("Running with config:\n{}".format(cfg))
split_tfrecord(cfg, logger)
if __name__ == '__main__':
run()
| [
"vm@discoverelement.com"
] | vm@discoverelement.com |
6726f4d241edca9a488fdffc69e739ecfe01af3e | d5f5cb6c35d551533dd37f89a7458c23a74ef3c8 | /semisup/test.py | 331cee0f3919d10526b8a47c3982a7fbcbd7cd4c | [
"Apache-2.0"
] | permissive | ahsanshahenshah/lba | 548b797d4786e8fe776df45a54db61b994cb778d | 551a9b6ebadeeb5dfdf465c1596ed5d6840899f3 | refs/heads/master | 2020-04-11T08:33:05.336763 | 2018-12-13T14:54:43 | 2018-12-13T14:54:43 | 161,647,427 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,139 | py | import tensorflow as tf
from time import time
import numpy as np
import tensorflow.contrib.eager as tfe
#tfe.enable_eager_execution()
#asd = tfe.Variable(1)
#n = #files = tf.data.Dataset.list_files(n)
#t = files.make_one_shot_iterator()
#dataset = tf.data.TFRecordDataset(files,num_parallel_reads=1)
#dataset = dataset.batch(1)
#dataset.repeat()
#iterator = dataset.make_one_shot_iterator()
#next_element = t.get_next()
def _parse_function(filename, label):
image_string = tf.read_file(filename)
image_decoded = tf.image.decode_jpeg(image_string)
image_resized = tf.image.resize_images(image_decoded, [28, 28])
return image_resized, label
# A vector of filenames.
filenames = tf.gfile.ListDirectory("/data/Dropbox/Data_new/id_workplace/lviv_data/idscam-dataset/test/blaupunkt/")
filenames = ["/data/Dropbox/Data_new/id_workplace/lviv_data/idscam-dataset/test/blaupunkt/"+f for f in filenames]
# `labels[i]` is the label for the image in `filenames[i].
labels = tf.constant([1 for _ in range(len(filenames))])
print labels
'''dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))
dataset = dataset.shuffle(len(filenames))
dataset = dataset.map(_parse_function,num_parallel_calls=16).repeat()
dataset = dataset.batch(100)
dataset = dataset.apply(tf.contrib.data.prefetch_to_device('/gpu:0'))
iterator = dataset.make_one_shot_iterator()
im_ba = iterator.get_next()
start = time()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(10):
imgs, lbls = sess.run(im_ba)
print len(imgs)
print tf.one_hot(5,6).eval()
#for i in range(10):
# imgs, lbls = iterator.get_next()
print time()-start
print (imgs.shape)
'''
def make_datasets(n):
dataset = tf.data.Dataset.range(n).repeat().batch(n)
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
return next_element
a=[]
for i in range (10):
a.append(make_datasets(i+1))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(200):
for j in range(10):
value = sess.run(a[j])
print (value) | [
"ahsan.shahensha@logivations.com"
] | ahsan.shahensha@logivations.com |
d081b498cb6178774bf8f35a2a67d72c12e3fec6 | 0ab6557072a2321b635d5c96042c4594ab055b57 | /mytopo.py | 3f9b59d1ecc636ab70aea3064a2bf368c76f1aed | [] | no_license | cccoca/Computer-Network | 4fdc0232a05d015ac3a9e26e6b05e33d0dde0f4a | 575e1847f3acc5ee3f15345ed87efe5c83695652 | refs/heads/master | 2020-03-19T14:14:56.206144 | 2018-06-08T12:29:16 | 2018-06-08T12:29:16 | 136,615,079 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,444 | py | from mininet.topo import Topo
class MyTopo( Topo ):
"Simple topology example."
def __init__( self ):
"Create custom topo."
# Initialize topology
Topo.__init__( self )
# Add hosts
Host1 = self.addHost( 'h1' )
Host2 = self.addHost( 'h2' )
Host3 = self.addHost( 'h3' )
Host4 = self.addHost( 'h4' )
Host5 = self.addHost( 'h5' )
Host6 = self.addHost( 'h6' )
Host7 = self.addHost( 'h7' )
Host8 = self.addHost( 'h8' )
# Add switches
Switch1 = self.addSwitch( 's1' )
Switch2 = self.addSwitch( 's2' )
Switch3 = self.addSwitch( 's3' )
Switch4 = self.addSwitch( 's4' )
Switch5 = self.addSwitch( 's5' )
Switch6 = self.addSwitch( 's6' )
Switch7 = self.addSwitch( 's7' )
# Add links
self.addLink( Host1, Switch4 )
self.addLink( Host2, Switch4 )
self.addLink( Host3, Switch5 )
self.addLink( Host4, Switch5 )
self.addLink( Host5, Switch6 )
self.addLink( Host6, Switch6 )
self.addLink( Host7, Switch7 )
self.addLink( Host8, Switch7 )
self.addLink( Switch4, Switch2 )
self.addLink( Switch5, Switch2 )
self.addLink( Switch6, Switch3 )
self.addLink( Switch7, Switch3 )
self.addLink( Switch2, Switch1 )
self.addLink( Switch3, Switch1 )
topos = { 'mytopo': ( lambda: MyTopo() ) }
| [
"947493464@qq.com"
] | 947493464@qq.com |
28a46aba01574e1c43d45eafd77186fa39ec2fa1 | 4298360a664170a54c4cac29c4e22da8f7447af3 | /setup.py | 2871c139c141e1922918fe9b7d37a2fc4c573c53 | [] | no_license | kpj/PySOFT | bf5beebd5029554994d6bdeed913c55581b75321 | ad96f7406ccacce6811f8a4d712e117ecfa44413 | refs/heads/master | 2021-01-25T08:32:24.877472 | 2018-03-05T17:24:49 | 2018-03-05T17:24:49 | 30,477,571 | 0 | 2 | null | 2018-03-05T17:24:50 | 2015-02-08T02:01:51 | Python | UTF-8 | Python | false | false | 484 | py | from setuptools import setup
def readme():
with open('README.md') as f:
return f.read()
setup(
name='PySOFT',
version='0.0.6',
description='SOFT (Simple Omnibus Format in Text) file parser',
long_description=readme(),
url='https://github.com/kpj/PySOFT',
author='kpj',
author_email='kpjkpjkpjkpjkpjkpj@gmail.com',
license='MIT',
packages=['pysoft', 'pysoft.tests'],
test_suite='nose.collector',
tests_require=['nose'],
scripts=['bin/pysoft'],
install_requires=[]
)
| [
"kpjkpjkpjkpjkpjkpj@gmail.com"
] | kpjkpjkpjkpjkpjkpj@gmail.com |
664f09053f27f2b77899c5910bdf31676aa50d20 | 20a3cc1106fa86fc2d45cd1728cc87d5db97e1f7 | /boost/__init__.py | 5fcaf0454eb4a7d2110c8944fffced5eb4adc99e | [] | no_license | sarahboufelja54/galatea | f5664f0b3117629b2c5bbe078a1bd52bb5e359e6 | 002a9f2905868be25b71770190fb2d5eda11c861 | refs/heads/master | 2020-12-04T13:45:07.697189 | 2018-12-12T16:27:09 | 2018-12-12T16:27:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,999 | py | from collections import OrderedDict
import theano.tensor as T
from pylearn2.costs.cost import Cost
from theano.printing import Print
from pylearn2.expr.nnet import softmax_ratio
from pylearn2.models.mlp import MLP
from pylearn2.utils import block_gradient
from pylearn2.utils import safe_izip
from pylearn2.utils import sharedX
from theano.sandbox.rng_mrg import MRG_RandomStreams
import warnings
class BoostTry1(Cost):
"""
This isn't thought through all that carefully, probably not correct at all
"""
supervised = True
def __call__(self, model, X, Y, **kwargs):
Y = Y * 2 - 1
# Get the approximate ensemble predictions
Y_hat = model.fprop(X, apply_dropout=False)
# Pull out the argument to the sigmoid
assert hasattr(Y_hat, 'owner')
owner = Y_hat.owner
assert owner is not None
op = owner.op
if not hasattr(op, 'scalar_op'):
raise ValueError("Expected Y_hat to be generated by an Elemwise op, got "+str(op)+" of type "+str(type(op)))
assert isinstance(op.scalar_op, T.nnet.sigm.ScalarSigmoid)
F ,= owner.inputs
weights = - Y * T.nnet.softmax(-(Y * F).T).T
weights = block_gradient(weights)
# Get the individual model predictions
Y_hat = model.fprop(X, apply_dropout=True)
# Pull out the argument to the sigmoid
assert hasattr(Y_hat, 'owner')
owner = Y_hat.owner
assert owner is not None
op = owner.op
if not hasattr(op, 'scalar_op'):
raise ValueError("Expected Y_hat to be generated by an Elemwise op, got "+str(op)+" of type "+str(type(op)))
assert isinstance(op.scalar_op, T.nnet.sigm.ScalarSigmoid)
f ,= owner.inputs
cost = (weights * T.exp(-Y * f)).mean()
assert cost.ndim == 0
return cost
class BoostTry2(Cost):
"""
This isn't thought through all that carefully, probably not correct at all
"""
supervised = True
def __call__(self, model, X, Y, **kwargs):
Y_hat = model.fprop(X, apply_dropout=False)
prob = Y_hat * Y + (1-Y_hat) * (1-Y)
weight = 1./(.1 + prob)
weight = block_gradient(weight)
Y_hat = model.fprop(X, apply_dropout=True)
# Pull out the argument to the sigmoid
assert hasattr(Y_hat, 'owner')
owner = Y_hat.owner
assert owner is not None
op = owner.op
if not hasattr(op, 'scalar_op'):
raise ValueError("Expected Y_hat to be generated by an Elemwise op, got "+str(op)+" of type "+str(type(op)))
assert isinstance(op.scalar_op, T.nnet.sigm.ScalarSigmoid)
Z ,= owner.inputs
term_1 = Y * T.nnet.softplus(-Z)
term_2 = (1 - Y) * T.nnet.softplus(Z)
total = term_1 + term_2
total = weight * total
ave = total.mean()
return ave
#Try3 had a bug
class BoostTry4(Cost):
supervised = True
def __init__(self, k = 1, alpha = 1, beta =1):
self.k = k
self.alpha = alpha
self.beta = beta
def get_weight(self, model, X, Y):
ensemble_Y = model.fprop(X, apply_dropout=False)
prob_of = (ensemble_Y * Y).sum(axis=1)
weight = 1./ (self.k + self.alpha * (prob_of - self.beta * 1./T.cast(Y.shape[1], 'float32')))
weight = weight / weight.sum()
weight = block_gradient(weight)
return weight
def get_monitoring_channels(self, model, X, Y, ** kwargs):
weight = self.get_weight(model, X, Y)
return { 'weight_min': weight.min(),
'weight_max': weight.max(),
'weight_mean' : weight.mean() }
def __call__(self, model, X, Y, **kwargs):
weight = self.get_weight(model, X, Y)
Y_hat = model.fprop(X, apply_dropout=True)
assert hasattr(Y_hat, 'owner')
owner = Y_hat.owner
assert owner is not None
op = owner.op
if isinstance(op, Print):
assert len(owner.inputs) == 1
Y_hat, = owner.inputs
owner = Y_hat.owner
op = owner.op
assert isinstance(op, T.nnet.Softmax)
z ,= owner.inputs
assert z.ndim == 2
z = z - z.max(axis=1).dimshuffle(0, 'x')
log_prob = z - T.log(T.exp(z).sum(axis=1).dimshuffle(0, 'x'))
# we use sum and not mean because this is really one variable per row
log_prob_of = (Y * log_prob).sum(axis=1)
assert log_prob_of.ndim == 1
weighted_log_prob_of = T.dot(weight, log_prob_of)
return - weighted_log_prob_of
class EnsembleLikelihoodTrainOne(Cost):
supervised = True
def __call__(self, model, X, Y, **kwargs):
Y_hat_e = model.fprop(X)
Y_hat = model.fprop(X, apply_dropout=True)
softmax_r = softmax_ratio(Y_hat_e, Y_hat)
softmax_r = block_gradient(softmax_r)
neg_terms = softmax_r * Y_hat
neg = - neg_terms.sum(axis=1).mean(axis=0)
assert hasattr(Y_hat, 'owner')
owner = Y_hat.owner
assert owner is not None
op = owner.op
if isinstance(op, Print):
assert len(owner.inputs) == 1
Y_hat, = owner.inputs
owner = Y_hat.owner
op = owner.op
assert isinstance(op, T.nnet.Softmax)
z ,= owner.inputs
assert z.ndim == 2
z = z - z.max(axis=1).dimshuffle(0, 'x')
log_prob = z - T.log(T.exp(z).sum(axis=1).dimshuffle(0, 'x'))
# we use sum and not mean because this is really one variable per row
log_prob_of = (Y * log_prob).sum(axis=1)
assert log_prob_of.ndim == 1
log_prob_of = log_prob_of.mean()
return -(log_prob_of + neg)
class PoE_SameMask(Cost):
supervised = True
def __init__(self, alpha = 1):
self.alpha = alpha
def __call__(self, model, X, Y, **kwargs):
Y_hat_e = model.fprop(X)
Y_hat = model.fprop(X, apply_dropout=True)
assert hasattr(Y_hat, 'owner')
owner = Y_hat.owner
assert owner is not None
op = owner.op
if isinstance(op, Print):
assert len(owner.inputs) == 1
Y_hat, = owner.inputs
owner = Y_hat.owner
op = owner.op
assert isinstance(op, T.nnet.Softmax)
z ,= owner.inputs
assert z.ndim == 2
z_weight = Y_hat - Y_hat_e
z_weight = block_gradient(z_weight)
neg = z_weight * z
neg = neg.sum(axis=1).mean()
z = z - z.max(axis=1).dimshuffle(0, 'x')
log_prob = z - T.log(T.exp(z).sum(axis=1).dimshuffle(0, 'x'))
# we use sum and not mean because this is really one variable per row
log_prob_of = (Y * log_prob).sum(axis=1)
assert log_prob_of.ndim == 1
log_prob_of = log_prob_of.mean()
return -(log_prob_of + self.alpha * neg)
class DropoutBoosting(Cost):
"""
Like PoE_SameMask but with control over dropout probabilities and scaling
"""
supervised = True
def __init__(self, default_input_include_prob=.5, input_include_probs=None,
default_input_scale=2., input_scales=None, alpha = 1.):
"""
During training, each input to each layer is randomly included or excluded
for each example. The probability of inclusion is independent for each input
and each example. Each layer uses "default_input_include_prob" unless that
layer's name appears as a key in input_include_probs, in which case the input
inclusion probability is given by the corresponding value.
Each feature is also multiplied by a scale factor. The scale factor for each
layer's input scale is determined by the same scheme as the input probabilities.
"""
if input_include_probs is None:
input_include_probs = {}
if input_scales is None:
input_scales = {}
self.__dict__.update(locals())
del self.self
def __call__(self, model, X, Y, ** kwargs):
Y_hat = model.dropout_fprop(X, default_input_include_prob=self.default_input_include_prob,
input_include_probs=self.input_include_probs, default_input_scale=self.default_input_scale,
input_scales=self.input_scales
)
Y_hat_e = model.fprop(X)
assert hasattr(Y_hat, 'owner')
owner = Y_hat.owner
assert owner is not None
op = owner.op
if isinstance(op, Print):
assert len(owner.inputs) == 1
Y_hat, = owner.inputs
owner = Y_hat.owner
op = owner.op
assert isinstance(op, T.nnet.Softmax)
z ,= owner.inputs
assert z.ndim == 2
z_weight = Y_hat - Y_hat_e
z_weight = block_gradient(z_weight)
neg = z_weight * z
neg = neg.sum(axis=1).mean()
z = z - z.max(axis=1).dimshuffle(0, 'x')
log_prob = z - T.log(T.exp(z).sum(axis=1).dimshuffle(0, 'x'))
# we use sum and not mean because this is really one variable per row
log_prob_of = (Y * log_prob).sum(axis=1)
assert log_prob_of.ndim == 1
log_prob_of = log_prob_of.mean()
return -(log_prob_of + self.alpha * neg)
class PerLayerRescaler(MLP):
def __init__(self, mlp, max_scale = 10.):
self.__dict__.update(locals())
del self.self
self._params = []
for layer in mlp.layers:
self._params.append(sharedX(1.))
self.batch_size = mlp.batch_size
self.force_batch_size = mlp.force_batch_size
def get_input_space(self):
return self.mlp.get_input_space()
def get_params(self):
return list(self._params)
def censor_updates(self, updates):
for key in updates:
updates[key] = T.clip(updates[key], 0, self.max_scale)
def fprop(self, state_below):
for layer, scale in safe_izip(self.mlp.layers, self._params):
state_below = layer.fprop(state_below * scale)
return state_below
def get_monitoring_channels(self, X=None, Y=None):
"""
Note: X and Y may both be None, in the case when this is
a layer of a bigger MLP.
"""
state = X
rval = OrderedDict()
for layer, scale in safe_izip(self.mlp.layers, self._params):
state = state * scale
ch = layer.get_monitoring_channels()
for key in ch:
rval[layer.layer_name+'_'+key] = ch[key]
state = layer.fprop(state)
args = [state]
if layer is self.mlp.layers[-1]:
args.append(Y)
ch = layer.get_monitoring_channels_from_state(*args)
for key in ch:
rval[layer.layer_name+'_'+key] = ch[key]
for i in xrange(len(self._params)):
rval['scale_input_to_' + self.mlp.layers[i].layer_name] = self._params[i]
return rval
def get_output_space(self):
return self.mlp.layers[-1].get_output_space()
def get_weights(self):
return self.mlp.get_weights()
def get_weights_format(self):
return self.mlp.get_weights_format()
def get_weights_topo(self):
return self.mlp.get_weights_topo()
def cost(self, Y, Y_hat):
return self.mlp.layers[-1].cost(Y, Y_hat)
def get_lr_scalers(self):
return {}
class PerUnitRescaler(MLP):
def __init__(self, mlp, max_scale = 10.):
self.__dict__.update(locals())
del self.self
self._params = []
for layer in mlp.layers:
self._params.append(sharedX(layer.get_input_space().get_origin() + 1.))
self.batch_size = mlp.batch_size
self.force_batch_size = mlp.force_batch_size
def get_input_space(self):
return self.mlp.get_input_space()
def get_params(self):
return list(self._params)
def censor_updates(self, updates):
for key in updates:
updates[key] = T.clip(updates[key], 0, self.max_scale)
def fprop(self, state_below):
for layer, scale in safe_izip(self.mlp.layers, self._params):
state_below = layer.fprop(self.scale(state_below, layer, scale))
return state_below
def scale(self, state, layer, scale):
axes = range(state.ndim)
if state.ndim == 2:
axes = ('x', 0)
else:
assert tuple(layer.get_input_space().axes) == tuple(['c', 0, 1, 'b'])
axes = (0, 1, 2, 'x')
scaler = scale.dimshuffle(*axes)
return state * scaler
def get_monitoring_channels(self, X=None, Y=None):
"""
Note: X and Y may both be None, in the case when this is
a layer of a bigger MLP.
"""
state = X
rval = OrderedDict()
for layer, scale in safe_izip(self.mlp.layers, self._params):
state = self.scale(state, layer, scale)
ch = layer.get_monitoring_channels()
for key in ch:
rval[layer.layer_name+'_'+key] = ch[key]
state = layer.fprop(state)
args = [state]
if layer is self.mlp.layers[-1]:
args.append(Y)
ch = layer.get_monitoring_channels_from_state(*args)
for key in ch:
rval[layer.layer_name+'_'+key] = ch[key]
for i in xrange(len(self._params)):
rval['scale_input_to_' + self.mlp.layers[i].layer_name + '_min'] = self._params[i].min()
rval['scale_input_to_' + self.mlp.layers[i].layer_name + '_min'] = self._params[i].mean()
rval['scale_input_to_' + self.mlp.layers[i].layer_name + '_min'] = self._params[i].max()
return rval
def get_output_space(self):
return self.mlp.layers[-1].get_output_space()
def get_weights(self):
return self.mlp.get_weights()
def get_weights_format(self):
return self.mlp.get_weights_format()
def get_weights_topo(self):
return self.mlp.get_weights_topo()
def cost(self, Y, Y_hat):
return self.mlp.layers[-1].cost(Y, Y_hat)
def get_lr_scalers(self):
return {}
class LoneRangerDropoutBoosting(Cost):
"""
Like PoE_SameMask but with control over dropout probabilities and scaling
"""
supervised = True
def __init__(self, default_input_include_prob=.5, input_include_probs=None,
default_input_scale=2., input_scales=None, alpha = 1., scale_ensemble=False,
dont_drop_input = None):
"""
During training, each input to each layer is randomly included or excluded
for each example. The probability of inclusion is independent for each input
and each example. Each layer uses "default_input_include_prob" unless that
layer's name appears as a key in input_include_probs, in which case the input
inclusion probability is given by the corresponding value.
Each feature is also multiplied by a scale factor. The scale factor for each
layer's input scale is determined by the same scheme as the input probabilities.
"""
if dont_drop_input is None:
dont_drop_input = []
if input_include_probs is None:
input_include_probs = {}
if input_scales is None:
input_scales = {}
self.__dict__.update(locals())
del self.self
def __call__(self, model, X, Y, ** kwargs):
Y_hat, Y_hat_e = model.lone_ranger_dropout_fprop(X, default_input_include_prob=self.default_input_include_prob,
input_include_probs=self.input_include_probs, default_input_scale=self.default_input_scale,
input_scales=self.input_scales, scale_ensemble=self.scale_ensemble, dont_drop_input = self.dont_drop_input
)
assert hasattr(Y_hat, 'owner')
owner = Y_hat.owner
assert owner is not None
op = owner.op
if isinstance(op, Print):
assert len(owner.inputs) == 1
Y_hat, = owner.inputs
owner = Y_hat.owner
op = owner.op
assert isinstance(op, T.nnet.Softmax)
z ,= owner.inputs
assert z.ndim == 2
z_weight = Y_hat - Y_hat_e
z_weight = block_gradient(z_weight)
neg = z_weight * z
neg = neg.sum(axis=1).mean()
z = z - z.max(axis=1).dimshuffle(0, 'x')
log_prob = z - T.log(T.exp(z).sum(axis=1).dimshuffle(0, 'x'))
# we use sum and not mean because this is really one variable per row
log_prob_of = (Y * log_prob).sum(axis=1)
assert log_prob_of.ndim == 1
log_prob_of = log_prob_of.mean()
return -(log_prob_of + self.alpha * neg)
class LoneRanger(MLP):
def lone_ranger_dropout_fprop(self, state_below, default_input_include_prob=0.5, input_include_probs=None,
default_input_scale=2., input_scales=None, scale_ensemble=False, dont_drop_input = None):
"""
state_below: The input to the MLP
Returns the output of the MLP, when applying dropout to the input and intermediate layers.
Each input to each layer is randomly included or excluded
for each example. The probability of inclusion is independent for each input
and each example. Each layer uses "default_input_include_prob" unless that
layer's name appears as a key in input_include_probs, in which case the input
inclusion probability is given by the corresponding value.
Each feature is also multiplied by a scale factor. The scale factor for each
layer's input scale is determined by the same scheme as the input probabilities.
"""
if dont_drop_input is None:
dont_drop_input = []
warnings.warn("dropout should be implemented with fixed_var_descr to"
" make sure it works with BGD, this is just a hack to get it"
"working with SGD")
if input_include_probs is None:
input_include_probs = {}
if input_scales is None:
input_scales = {}
assert all(layer_name in self.layer_names for layer_name in input_include_probs)
assert all(layer_name in self.layer_names for layer_name in input_scales)
theano_rng = MRG_RandomStreams(self.rng.randint(2**15))
state_below = (state_below, state_below)
for layer in self.layers:
layer_name = layer.layer_name
if layer_name in input_include_probs:
include_prob = input_include_probs[layer_name]
else:
include_prob = default_input_include_prob
if layer_name in input_scales:
scale = input_scales[layer_name]
else:
scale = default_input_scale
if layer_name not in dont_drop_input:
state_below = self.apply_lone_ranger_dropout(state=state_below,
include_prob=include_prob,
theano_rng=theano_rng,
scale=scale, scale_ensemble=scale_ensemble)
state_below = (layer.fprop(state_below[0]), layer.fprop(state_below[1]))
return state_below
def apply_lone_ranger_dropout(self, state, include_prob, scale, theano_rng,
scale_ensemble=False):
if include_prob in [None, 1.0, 1]:
return state
assert scale is not None
assert isinstance(state, tuple)
assert len(state) == 2
lone_ranger_state, ensemble_state = state
if isinstance(lone_ranger_state, tuple) or isinstance(ensemble_state, tuple):
raise NotImplementedError()
d = theano_rng.binomial(p=include_prob, size=lone_ranger_state.shape, dtype=lone_ranger_state.dtype)
ensemble_scale = 1
if scale_ensemble:
ensemble_scale = scale
return (lone_ranger_state * d * scale, ensemble_state * (1 -d) * ensemble_scale)
| [
"goodfellow.ian@gmail.com"
] | goodfellow.ian@gmail.com |
4ee09b6d1e1b60ca98c62f82e745121f889643a0 | a667d0e196e2772f995aa125e466f8bacb0186e1 | /cortex/server/server.py | 17d73c31af025e3517b024f0f16b7b7830b48ad9 | [] | no_license | taufnast/cortex | 8c0bf0457084e575cc918eea290b4fd27cc8919c | 7bf8b9768f99b0c62a776ab57e3cae4a0b8ab763 | refs/heads/master | 2022-09-06T06:37:11.932084 | 2020-06-01T20:26:07 | 2020-06-01T20:26:07 | 245,861,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,181 | py | import numpy as np
import yaml
import json
import copy
from flask import Flask
from flask import request
from pika import BasicProperties
from cortex.msgbrokers import find_msg_broker
from cortex.reader import parse_from
from google.protobuf.json_format import MessageToDict, MessageToJson, ParseDict
from pathlib import Path
from secrets import token_hex
def get_parsers():
"""
load yaml configuration of parsers
:return: parsers configuration as a dict
"""
with open('config/parsers.yaml') as f:
data = yaml.load(f, Loader=yaml.FullLoader)
# print(data)
if "parsers" in data:
return data["parsers"]
return {}
def save_data(parsers, snapshot):
"""
save data is looking for 'data' field (as it stores the 'big data', based on the cortex.proto)
:param parsers: parsers as a dict (as return value of get_parsers())
:param snapshot: as protobuf object
:return: paths dict {parser_name : path__where_data_is_stored}
"""
parser_paths = {}
# parsers as appear in the parsers.yaml file should include path option
# which will be used for saving big data
for parser, config in parsers.items():
if config is not None and "path" in config:
parser_dir = Path(config["path"])
parser_dir.mkdir(parents=True, exist_ok=True)
parser_paths[parser] = parser_dir
for desc, val in snapshot.ListFields():
if desc.name in parser_paths:
if desc.name == "color_image": # save bytes object
parser_paths[desc.name] = parser_paths[desc.name] / ("data_" + token_hex(5) + ".snap")
with open(parser_paths[desc.name], "wb") as f:
f.write(snapshot.color_image.data)
elif desc.name == "depth_image": # save numpy array
parser_paths[desc.name] = parser_paths[desc.name] / ("data_" + token_hex(5) + ".npy")
np.save(parser_paths[desc.name], snapshot.depth_image.data)
return parser_paths
def snapshot_to_dict(parsers, snapshot_path, snapshot):
"""
ParseDict (see next line) function raises an error in Rabbitmq because it can't recognize protobuf descriptors
serial_dic = ParseDict(dic_snap, create_empty_snapshot(), ignore_unknown_fields=True)
so we created a copy (of type dictionary)
:param parsers: parsers as a dict (as return value of get_parsers())
:param snapshot_path: a path that will help us to relate a snapshot to a user (users/user_id/snapshots/snapshot_id)
:param snapshot: protobuf object
:return: serialized dictionary
"""
data_paths = save_data(parsers, snapshot)
dic_snap = MessageToDict(snapshot, including_default_value_fields=True, preserving_proto_field_name=True)
# replace data attr with data_path
for parser, data_path in data_paths.items():
if parser in dic_snap:
del dic_snap[parser]["data"]
dic_snap[parser]["data_path"] = str(data_paths[parser])
# add snapshot path (will be used to relate between the user and its snapshots)
dic_snap["snapshot_path"] = snapshot_path if snapshot_path else ""
# print("ds:", dic_snap)
new_snap_dic = copy.deepcopy(dic_snap)
return json.dumps(new_snap_dic)
class FlaskInit:
def __init__(self, publish=None, msg_queue_url=""):
self.publish = publish
self.msg_queue_url = msg_queue_url
self.parsers = get_parsers()
self.msg_broker = None
def setup_publisher(self, data, props=None):
if self.publish:
self.publish(json.loads(data))
elif self.msg_queue_url != "":
with find_msg_broker(self.msg_queue_url) as msq:
msq.declare_exchange()
msq.publish(data, props)
else:
return False
return True
def create_app(self):
"""Initialize the core application."""
app = Flask(__name__)
with app.app_context():
# Include our Routes
@app.route('/new_user', methods=['POST'])
def add_user():
assert request.method == 'POST'
if not self.setup_publisher(json.dumps(request.form.to_dict())):
data = {"error": "no publisher and no message queue url were supplied."}
return data
parsers = [k for k in self.parsers.keys()]
# return list of available parsers
return {"parsers": parsers}
@app.route('/snapshot/<int:user_id>/<int:snapshot_id>', methods=['POST'])
def add_snapshot(user_id, snapshot_id):
snapshot_path = Path("users") / str(user_id) / "snapshots" / str(snapshot_id)
Path(snapshot_path).mkdir(parents=True, exist_ok=True)
snap_file = request.files["file"].filename
with open(snap_file, "rb") as f:
snap = parse_from(f.read())
snap_serial_dic = snapshot_to_dict(self.parsers, str(snapshot_path), snap)
if not self.setup_publisher(
snap_serial_dic,
BasicProperties(
headers={"snapshot_id": snapshot_id, "user_id": user_id},
message_id="snap_"+str(snapshot_id)+"_"+str(user_id))
):
data = {"error": "no publisher and no message queue url were supplied."}
# headers = {"Content-Type": "application/json"}
return data
return ""
return app
def run_server(host='127.0.0.1', port=8000, publish=None, msg_queue_url=""):
"""
run server on host:port and publish results with supplied publish function
otherwise publish snapshots to msgqueue
:param host:
:param port:
:param publish:
:param msg_queue_url: e.g 'rabbitmq://127.0.0.1:5672/'
:return:
"""
if publish is None and msg_queue_url != "":
flaskinit = FlaskInit(msg_queue_url=msg_queue_url)
else:
flaskinit = FlaskInit(publish)
app = flaskinit.create_app()
app.run(host, port)
| [
"anastasia@certora.com"
] | anastasia@certora.com |
b3c81f15bd5f9eb552e420fac36517d1df4688d2 | 5e5516d7511fe88441c6f01c5881336b80afafc3 | /couchapy/decorators.py | c171af6f5298a1d1b16009195a09ede98903ac1b | [
"Apache-2.0"
] | permissive | aisenor/couchapy | 6e0ea006769024e83cca1a692e950719173de2f5 | 3431c606cea78b90db2bf2afc9e8a530966d18d6 | refs/heads/master | 2022-06-18T15:39:15.735704 | 2020-04-06T12:00:33 | 2020-04-06T12:00:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,182 | py | from functools import wraps
import requests
from re import sub
import couchapy.error
def _process_filter_format(filter_format, filter):
if (filter_format is not None):
for key in filter.keys():
if key not in filter_format:
raise couchapy.error.InvalidKeysException("The provided filter does not meet the expected format.")
def _build_uri(template: str, segments: dict):
def replace_with_segment(matches):
# dynamic segments are expected, but not provided at all
if segments is None and len(matches.groups()) > 0:
identifier = matches.group(1)
raise Exception((
'Invalid URI. This endpoint contains dynamic segments, but none were provided. '
f'Expected segment definition for "{identifier}". '
'Did you forget to pass a uri_segments dict?'))
# a specific segment not provided
elif len(matches.groups()) == 1 and matches.group(1) not in segments:
identifier = matches.group(1)
raise Exception(f'Invalid URI. Expected a dynamic segment for "{identifier}", but none was provided.')
else:
return segments[matches.group(1)]
return sub(':([\w_]+):', replace_with_segment, template)
def endpoint(*args, **kwargs):
endpoint = args[0]
allowed_query_parameter_keys = kwargs.get('query_keys', None)
allowed_data_keys = kwargs.get('data_keys', None)
request_method = kwargs.get('method', 'get')
request_action = getattr(requests, request_method)
def set_endpoint(*eargs):
fn = eargs[0]
@wraps(fn)
def wrapper(self, *query_params, **kwargs):
dynamic_segments = getattr(self, '_predefined_segments', {})
dynamic_segments.update(kwargs.get('uri_segments', {}))
cookies = {'AuthSession': self.parent.session.auth_token or None}
uri = f'{self.parent.host}:{self.parent.port}{_build_uri(endpoint, dynamic_segments)}'
if ('data' in kwargs):
_process_filter_format(allowed_data_keys, kwargs.get('data'))
if ('params' in kwargs):
_process_filter_format(allowed_query_parameter_keys, kwargs.get('params'))
if (request_method == 'post'or request_method == 'put'):
response = request_action(uri,
headers=self.parent._headers,
cookies=cookies,
params=kwargs.get('params', None),
json=kwargs.get('data'))
elif request_method == 'head':
response = request_action(uri,
headers=self.parent._headers,
cookies=cookies,
params=kwargs.get('params', None),
json=kwargs.get('data'))
return fn(self, response.headers.get('ETag'))
else:
response = request_action(uri,
headers=self.parent._headers,
cookies=cookies,
params=kwargs.get('params', None))
if (response.status_code in [requests.codes['ok'], requests.codes['created'], requests.codes['accepted']]):
self.parent.session.set_auth_token_from_headers(response.headers)
ret_val = response.json()
if isinstance(ret_val, str):
ret_val = {'data': ret_val}
else:
result = response.json()
if isinstance(result, str):
result = {'data': result}
ret_val = couchapy.error.CouchError(**result)
else:
ret_val = couchapy.error.CouchError(error=response.reason, reason=response.reason, status_code=response.status_code)
return fn(self, ret_val)
return wrapper
return set_endpoint
class AllowedKeys():
SERVER__ALL_DBS__PARAMS = {'descending': bool, 'limit': int, 'skip': int,
'startkey': [], 'start_key': [], 'endkey': [], 'end_key': []}
SERVER__DBS_INFO__PARAMS = {'keys': []}
SERVER__CLUSTER_SETUP__PARAMS = {'ensure_dbs_exist': []}
SERVER__CLUSTER_SETUP__DATA = {'action': str, 'bind_address': str,
'host': str, 'port': int,
'node_code': int, 'remote_node': str,
'username': str, 'password': str,
'remote_current_user': str, 'remote_current_password': str,
'ensure_dbs_exist': [], }
SERVER__DB_UPDATES__PARAMS = {'feed': str, 'timeout': int, 'heartbeat': int, 'since': str}
SERVER__REPLICATE__DATA = {'cancel': bool, 'continuous': bool,
'create_target': bool, 'doc_ids': [],
'filter': str, 'proxy': str,
'source': {}, 'target': {}}
SERVER__SCHEDULER_JOBS__PARAMS = {'limit': int, 'skip': int}
SERVER__SCHEDULER_DOCS__PARAMS = {'limit': int, 'skip': int}
SERVER__UUIDS__PARAMS = {'count': int}
DATABASE__DB__CREATE_PARAMS = {'q': int, 'n': int}
DATABASE__DB__SAVE__PARAMS = {'batch': str}
VIEW__PARAMS = {'conflicts': bool, 'descending': bool,
'startkey': [], 'start_key': [],
'startkey_docid': str, 'start_key_doc_id': str,
'endkey': [], 'end_key': [],
'endkey_docid': str, 'end_key_doc_id': str,
'group': bool, 'group_level': int,
'attachments': bool, 'att_encoding_info': bool,
'include_docs': bool, 'inclusive_end': bool,
'key': [], 'keys': [[]],
'limit': int, 'skip': int, 'reduce': bool, 'sorted': bool,
'stable': bool, 'stale': str,
'update': str, 'update_seq': bool}
DATABASE__ALL_DOCS__DATA = {'keys': []}
DATABASE__ALL_DOCS_QUERIES__DATA = {'queries': []}
DATABASE__DESIGN_DOCS_QUERIES__DATA = {'queries': []}
DATABASE__DESIGN_DOCS__DATA = {'keys': []}
DATABASE__LOCAL_DOCS_QUERIES__DATA = {'queries': []}
DATABASE__BULK_GET__PARAMS = {'revs': bool}
DATABASE__BULK_GET__DATA = {'docs': [{}]}
DATABASE__BULK_DOCS__DATA = {'docs': [{}], 'new_edits': bool}
DATABASE__FIND__DATA = {'selector': {}, 'limit': int, 'skip': int,
'sort': {}, 'fields': [], 'use_index': [], 'r': int,
'bookmark': str, 'update': bool,
'stable': bool, 'stale': str, 'execution_stats': bool}
DATABASE__INDEX__DATA = {'index': {}, 'ddoc': str, 'name': str,
'type': str, 'partial_filter_selector': {}}
DATABASE__CHANGES__PARAMS = {'doc_ids': [], 'conflicts': bool, 'descending': bool,
'feed': str, 'filter': str, 'heartbeat': int,
'include_docs': bool, 'attachments': bool, 'att_encoding_info': bool,
'last-event-id': int, 'limit': int, 'since': str, 'style': str,
'timeout': int, 'view': str, 'seq_interval': int}
DATABASE__CHANGES__DATA = {'doc_ids': []}
DATABASE__SECURITY__DATA = {'admins': {}, 'members': {}}
DATABASE__DOCUMENT__PARAMS = {'attachments': bool, 'att_encoding_info': bool,
'atts_since': [], 'conflicts': bool, 'deleted_conflicts': bool,
'latest': bool, 'local_seq': bool, 'meta': bool, 'open_revs': [],
'rev': str, 'revs': bool, 'revs_info': bool}
DATABASE__DOCUMENT__NAMED_DOC__PARAMS = {'rev': str, 'batch': str, 'new_edits': bool}
DATABASE__DOCUMENT__DELETE__PARAMS = {'rev': str, 'batch': str}
DATABASE__DOCUMENT__COPY__PARAMS = {'rev': str, 'batch': str}
DATABASE__ATTACHMENT__GET__PARAMS = {'rev': str}
DATABASE__ATTACHMENT__SAVE__PARAMS = {'rev': str}
DATABASE__ATTACHMENT__INFO_PARAMS = {'rev': str}
DATABASE__ATTACHMENT__DELETE__PARAMS = {'rev': str, 'batch': str}
DATABASE__LOCAL_DOCS__PARAMS = {'conflicts': bool, 'descending': bool,
'startkey': [], 'start_key': [],
'startkey_docid': str, 'start_key_doc_id': str,
'endkey': [], 'end_key': [],
'endkey_docid': str, 'end_key_doc_id': str,
'include_docs': bool, 'inclusive_end': bool,
'key': [], 'keys': [[]],
'limit': int, 'skip': int, 'update_seq': bool}
DATABASE__LOCAL_DOCS__DATA = {'keys': []}
DATABASE__VIEW_BY_KEY__DATA = {'keys': []}
DATABASE__VIEW_QUERIES__DATA = {'queries': []}
| [
"lee@torusoft.com"
] | lee@torusoft.com |
ab6f3740bc734f29f73b2de48ec28c14cbb6e245 | 07929b2a3b0955a0b1aae938bc2afddc8cd2060a | /companies/migrations/0001_initial.py | 6f260c58c6ad6d0dcc77721c597d5d7d21847ec5 | [
"MIT"
] | permissive | Antman261/pdpdmeetup | fb70a7a08c1fd2eb983007b788f3ecce1de1e7cb | 25f7c8fc4092b7a3fa105a98c03185fe639bdd5b | refs/heads/master | 2021-01-18T07:42:44.791905 | 2016-06-29T10:39:13 | 2016-06-29T10:39:13 | 62,816,828 | 1 | 0 | null | 2016-07-07T15:14:51 | 2016-07-07T15:14:51 | null | UTF-8 | Python | false | false | 753 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-05-25 14:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('abn', models.CharField(max_length=12)),
('description', models.CharField(max_length=200)),
('logo', models.ImageField(null=True, upload_to='images/')),
],
),
]
| [
"josemuar@hotmail.com"
] | josemuar@hotmail.com |
3aca5990393a22c05a881749cc4244289bd6ecee | b7affc938e20e21a351cfa1465b10137c6ca914c | /twoslug/model/__init__.py | 5cb775b69a012b8f302a060213fae58aaefa5320 | [
"Apache-2.0"
] | permissive | aliles-heroku/twoslug | 83009ac378dde937e14c0e26642361c4088b1629 | f129b4ca2f54ab4efc81e4ae395abc172a23e2dd | refs/heads/master | 2016-09-06T15:24:20.350575 | 2014-08-04T03:21:12 | 2014-08-04T03:21:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py | from __future__ import absolute_import
from . import wordnet
from . import doge
from . import markov
| [
"aaron.iles@gmail.com"
] | aaron.iles@gmail.com |
7c0bf7ade1f8db725a4ef41dc22305288b4582ce | 2716d8e04c957aebc5137b3dbb719cbb31eaf013 | /user_extent/users/models.py | 74d53617f24544e6ce4de123d9e95b400466ebb0 | [] | no_license | anlaganlag/mini_proj_component | 01d5fdd641cbc2a5199865d64b21431603704bd1 | 1def0fc576bb422b6819bd2df56b8e7cd48d3368 | refs/heads/master | 2021-01-06T23:54:13.921612 | 2020-02-20T10:28:55 | 2020-02-20T10:28:55 | 241,518,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class CustomUser(AbstractUser):
pass
def __str__(self):
return self.username
| [
"tanjhu@163.com"
] | tanjhu@163.com |
a825d645ab9c589d0f945d9e74f5db46b2e79929 | d5c989d018d7adac078e26bff63da4a636d2a88c | /hackerearth/migrations/0002_conversation_bot_context.py | 37f6b864a5e1a6fc0bc992ffea7c854f4984ca28 | [] | no_license | edufanelli/hackerearth2019 | 1b70d641f7dc223e38accb69eb68a7cfc28bae35 | bd5f80bbab0f7bbd9c274a325021e91e4f522494 | refs/heads/master | 2020-09-21T11:25:20.328159 | 2019-12-04T01:20:00 | 2019-12-04T01:20:00 | 224,774,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 949 | py | # Generated by Django 2.2.7 on 2019-12-01 21:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hackerearth', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Conversation_bot_context',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('conversation', models.CharField(help_text='conversation id', max_length=50, null=True)),
('value_type', models.CharField(help_text='value_type identification', max_length=50, null=True)),
('value', models.CharField(help_text='value identification', max_length=50, null=True)),
],
options={
'verbose_name': 'Conversation_context',
'verbose_name_plural': 'Conversation_contexts',
},
),
]
| [
"eduardofanelli@gmail.com"
] | eduardofanelli@gmail.com |
a2092c854e8742bccf19c23c9363f11036719fb6 | d50820254bac6ff547f5ffbf62297b5d3fe7328b | /seq_data_processing/nucleotide_processing/wormbase_api_querying_tests.py | 0b8b5448282679570d254f02751060174aaa9507 | [] | no_license | billsun9/PhenotypePrediction | 007ccc4ab4460dde9793e5e2a5d9d40b7f9dd250 | d02cd2b8a208f644b122654358fd70db88b219b9 | refs/heads/main | 2023-06-04T10:16:09.660514 | 2021-06-27T17:45:53 | 2021-06-27T17:45:53 | 361,290,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,262 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Jun 27 11:43:40 2021
@author: Bill Sun
"""
# %%
import requests
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
import re
import pickle
# %%
base_url = 'https://wormbase.org/species/c_elegans/variation/'
variation = 'e1370'
url = base_url+variation
req = requests.get(url)
print(req.history) # if no redirect: [], if redirect: status code [307]
print(req.url) # new url??
tmp = req.url
wormbaseVarId = tmp.split('/')[-1].split('?')[0]
api_r = requests.get('http://rest.wormbase.org/rest/field/variation/%s/sequence_context' % (wormbaseVarId))
# %%
with open('./tmp/intervention_to_ids.pickle', 'rb') as handle:
intervention_to_id = pickle.load(handle)
# %%
test_ivs = ["ad609", "ad1116", "ak47", "cxTi9279"]
test_ivs_ids = [intervention_to_id[iv] for iv in test_ivs]
test_id = test_ivs_ids[3]
req_url = 'http://rest.wormbase.org/rest/field/variation/%s/sequence_context' % (test_id)
api_r = requests.get(req_url)
output = api_r.json()
# %%
wild = output['sequence_context']['data']['wildtype']['positive_strand']['sequence']
mutant= output['sequence_context']['data']['mutant']['positive_strand']['sequence']
# %%
t1 = set(list(key.lower() for key in intervention_to_id.keys())) | [
"billsun9@gmail.com"
] | billsun9@gmail.com |
758845a98bd96b61545b99c95504cc4dd0519915 | 3a2118fa46f98a2ce0b1b6f8073ea4bd4b5c217d | /api/api/migrations/0028_remove_testeusuario_user.py | 3f587115685412197dee03bd57eb1b2d56f0a699 | [] | no_license | daviwesley/easy-chamadas | 46a1f258139701428c85bc02d2b60aae7644e051 | 691ce6c7b8559a8ca0b9638fdcf98e31e4f54384 | refs/heads/master | 2022-12-10T09:49:38.505005 | 2019-02-25T21:34:05 | 2019-02-25T21:34:05 | 150,799,314 | 0 | 1 | null | 2022-12-08T14:36:01 | 2018-09-28T22:10:26 | Python | UTF-8 | Python | false | false | 328 | py | # Generated by Django 2.1.2 on 2018-11-16 02:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0027_auto_20181111_0421'),
]
operations = [
migrations.RemoveField(
model_name='testeusuario',
name='user',
),
]
| [
"daviwesley@alu.ufc.br"
] | daviwesley@alu.ufc.br |
9a9235ab91d07c7b900e6f176a36438246f30741 | 8f1a28adba48a30898f3d3797822ff621f2ff9b2 | /utils.py | da800ba2939b8a687b364f7c7a3400d31bae9cbe | [] | no_license | lucmski/trendy-twitter-bot | e9d74b0f53618d2692b863e67eddd5e6beda9101 | 4b24196984e20fb4789880205a5cdc82a6d1264f | refs/heads/master | 2020-07-31T15:04:22.038804 | 2018-08-12T07:18:13 | 2018-08-12T07:18:13 | 210,646,398 | 1 | 0 | null | 2019-09-24T16:15:04 | 2019-09-24T16:15:03 | null | UTF-8 | Python | false | false | 2,250 | py | import re
import nltk
import markovify
import requests
import random
from credentials import GIPHY_API_KEY
class POSifiedNewlineText(markovify.NewlineText):
def word_split(self, sentence):
words = re.split(self.word_split_pattern, sentence)
words = [ "::".join(tag) for tag in nltk.pos_tag(words) ]
return words
def word_join(self, words):
sentence = " ".join(word.split("::")[0] for word in words)
return sentence
class EmojiTranslator:
def __init__(self):
self.table_re = re.compile(r'(.+) (.+)', re.UNICODE)
self.tag_re = re.compile(r'Emoji: (.+)')
self.table = {}
with open('emoji.txt', 'r') as fp:
for line in fp:
m = self.table_re.match(line)
self.table[m.group(1)] = m.group(2)
def encode(self, tag):
m = self.tag_re.match(tag)
if m:
cldr = m.group(1).lower()
if cldr in self.table:
return self.table[cldr]
return None
def get_gif(string):
keywords = string.strip().split(' ')
query = '+'.join(keywords)
res = requests.get(url='http://api.giphy.com/v1/gifs/search?q={}&api_key={}&limit=10'.format(query, GIPHY_API_KEY))
gifs = res.json()['data']
gif = random.choice(gifs)
url = gif['bitly_gif_url']
return url
def contains_one_of(string, parts):
for part in parts:
if part in string:
return True
return False
def load_corpus(filename):
num_lines = 0
lines = []
# pattern = re.compile(r'<Emoji: .*?>')
with open(filename, 'r') as fp:
for line in fp:
line = re.sub(r'[<>]', '%%', line).strip()
line = line.split('%%')
result = []
for term in line:
if 'Emoji:' in term:
term = trans.encode(term)
if not term or '.com' in term or '.ly' in term or '@' in term:
continue
result.append(term)
lines.append(' '.join(result))
num_lines += 1
if num_lines < 500:
print('The corpus is not large enough to generate a good tweet!')
return None
return '\n'.join(lines) | [
"tim.shur@gmail.com"
] | tim.shur@gmail.com |
b14147fcf4d54a024e26e20ab40bdbe257953f51 | 5f6b48db5b402541caa089ef676e679bd4b21ef9 | /test/hlt_files/hlt_MuHad.py | 8450f70f8ffc20416d5c36af80bf5bb88283d908 | [] | no_license | halilg/openHLT | b4eda18bbed6cc05a1381e834458cf0ececdb692 | 49d6f43e14794f7c13c88de012a4009b68699e14 | refs/heads/master | 2020-04-24T08:08:29.923798 | 2013-08-09T09:59:50 | 2013-08-09T09:59:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549,075 | py | # /cdaq/physics/Run2012/8e33/v2.1/HLT/V7 (CMSSW_5_2_7_HLT3)
import FWCore.ParameterSet.Config as cms
process = cms.Process( "TEST" )
process.HLTConfigVersion = cms.PSet(
tableName = cms.string('/cdaq/physics/Run2012/8e33/v2.1/HLT/V7')
)
process.streams = cms.PSet(
A = cms.vstring( 'BJetPlusX',
'BTag',
'Commissioning',
'Cosmics',
'DoubleElectron',
'DoubleMu',
'DoubleMuParked',
'DoublePhoton',
'DoublePhotonHighPt',
'ElectronHad',
'FEDMonitor',
'HLTPhysicsParked',
'HTMHT',
'HTMHTParked',
'HcalHPDNoise',
'HcalNZS',
'JetHT',
'JetMon',
'LogMonitor',
'MET',
'METParked',
'MinimumBias',
'MuEG',
'MuHad',
'MuOnia',
'MuOniaParked',
'MultiJet',
'MultiJet1Parked',
'NoBPTX',
'PhotonHad',
'SingleElectron',
'SingleMu',
'SinglePhoton',
'SinglePhotonParked',
'Tau',
'TauParked',
'TauPlusX',
'VBF1Parked',
'ZeroBiasParked' ),
ALCALUMIPIXELS = cms.vstring( 'AlCaLumiPixels' ),
ALCAP0 = cms.vstring( 'AlCaP0' ),
ALCAPHISYM = cms.vstring( 'AlCaPhiSym' ),
B = cms.vstring( 'ParkingMonitor' ),
Calibration = cms.vstring( 'TestEnablesEcalHcalDT' ),
DQM = cms.vstring( 'OnlineMonitor' ),
EcalCalibration = cms.vstring( 'EcalLaser' ),
Express = cms.vstring( 'ExpressPhysics' ),
HLTDQM = cms.vstring( 'OnlineHltMonitor' ),
HLTMON = cms.vstring( 'OfflineMonitor' ),
NanoDST = cms.vstring( 'L1Accept' ),
PhysicsDST = cms.vstring( 'DataScouting' ),
RPCMON = cms.vstring( 'RPCMonitor' ),
TrackerCalibration = cms.vstring( 'TestEnablesTracker' )
)
process.datasets = cms.PSet(
AlCaLumiPixels = cms.vstring( 'AlCa_LumiPixels_Random_v1',
'AlCa_LumiPixels_ZeroBias_v4',
'AlCa_LumiPixels_v8' ),
AlCaP0 = cms.vstring( 'AlCa_EcalEtaEBonly_v6',
'AlCa_EcalEtaEEonly_v6',
'AlCa_EcalPi0EBonly_v6',
'AlCa_EcalPi0EEonly_v6' ),
AlCaPhiSym = cms.vstring( 'AlCa_EcalPhiSym_v13' ),
BJetPlusX = cms.vstring( 'HLT_DiJet40Eta2p6_BTagIP3DFastPV_v7',
'HLT_DiJet80Eta2p6_BTagIP3DFastPVLoose_v7',
'HLT_DiPFJet80_DiPFJet30_BTagCSVd07d05_v5',
'HLT_DiPFJet80_DiPFJet30_BTagCSVd07d05d03_v5',
'HLT_DiPFJet80_DiPFJet30_BTagCSVd07d05d05_v5',
'HLT_Jet160Eta2p4_Jet120Eta2p4_DiBTagIP3DFastPVLoose_v7',
'HLT_Jet60Eta1p7_Jet53Eta1p7_DiBTagIP3DFastPV_v7',
'HLT_Jet80Eta1p7_Jet70Eta1p7_DiBTagIP3DFastPV_v7',
'HLT_L1DoubleJet36Central_v7',
'HLT_QuadJet75_55_35_20_BTagIP_VBF_v7',
'HLT_QuadJet75_55_35_20_VBF_v1',
'HLT_QuadJet75_55_38_20_BTagIP_VBF_v7',
'HLT_QuadPFJet78_61_44_31_BTagCSV_VBF_v6',
'HLT_QuadPFJet78_61_44_31_VBF_v1',
'HLT_QuadPFJet82_65_48_35_BTagCSV_VBF_v6' ),
BTag = cms.vstring( 'HLT_BTagMu_DiJet110_Mu5_v6',
'HLT_BTagMu_DiJet20_Mu5_v6',
'HLT_BTagMu_DiJet40_Mu5_v6',
'HLT_BTagMu_DiJet70_Mu5_v6',
'HLT_BTagMu_Jet300_Mu5_v6' ),
Commissioning = cms.vstring( 'HLT_Activity_Ecal_SC7_v13',
'HLT_BeamGas_HF_Beam1_v5',
'HLT_BeamGas_HF_Beam2_v5',
'HLT_IsoTrackHB_v14',
'HLT_IsoTrackHE_v15',
'HLT_L1SingleEG12_v6',
'HLT_L1SingleEG5_v6',
'HLT_L1SingleJet16_v7',
'HLT_L1SingleJet36_v7',
'HLT_L1SingleMu12_v2',
'HLT_L1SingleMuOpen_v7' ),
Cosmics = cms.vstring( 'HLT_BeamHalo_v13',
'HLT_L1SingleMuOpen_AntiBPTX_v7',
'HLT_L1TrackerCosmics_v7' ),
DataScouting = cms.vstring( 'DST_Ele8_CaloIdL_CaloIsoVL_TrkIdVL_TrkIsoVL_HT250_v4',
'DST_HT250_v4',
'DST_L1HTT_Or_L1MultiJet_v4',
'DST_Mu5_HT250_v4' ),
DoubleElectron = cms.vstring( 'HLT_DoubleEle10_CaloIdL_TrkIdVL_Ele10_CaloIdT_TrkIdVL_v12',
'HLT_Ele15_Ele8_Ele5_CaloIdL_TrkIdVL_v6',
'HLT_Ele17_CaloIdL_CaloIsoVL_v17',
'HLT_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v19',
'HLT_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_Jet30_v7',
'HLT_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v6',
'HLT_Ele17_CaloIdVT_CaloIsoVT_TrkIdT_TrkIsoVT_Ele8_Mass50_v6',
'HLT_Ele20_CaloIdVT_CaloIsoVT_TrkIdT_TrkIsoVT_SC4_Mass50_v7',
'HLT_Ele23_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_HFT30_v8',
'HLT_Ele27_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_Ele15_CaloIdT_CaloIsoVL_trackless_v8',
'HLT_Ele27_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_HFT15_v8',
'HLT_Ele32_CaloIdT_CaloIsoT_TrkIdT_TrkIsoT_SC17_Mass50_v6',
'HLT_Ele5_SC5_Jpsi_Mass2to15_v4',
'HLT_Ele8_CaloIdL_CaloIsoVL_v17',
'HLT_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_Jet30_v7',
'HLT_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v15',
'HLT_Ele8_CaloIdT_TrkIdVL_EG7_v2',
'HLT_Ele8_CaloIdT_TrkIdVL_Jet30_v7',
'HLT_Ele8_CaloIdT_TrkIdVL_v5',
'HLT_Photon22_R9Id90_HE10_Iso40_EBOnly_v5',
'HLT_Photon36_R9Id90_HE10_Iso40_EBOnly_v5',
'HLT_Photon50_R9Id90_HE10_Iso40_EBOnly_v5',
'HLT_Photon75_R9Id90_HE10_Iso40_EBOnly_v5',
'HLT_Photon90_R9Id90_HE10_Iso40_EBOnly_v5',
'HLT_TripleEle10_CaloIdL_TrkIdVL_v18' ),
DoubleMu = cms.vstring( 'HLT_DoubleMu11_Acoplanarity03_v5',
'HLT_DoubleMu4_Acoplanarity03_v5',
'HLT_DoubleMu5_IsoMu5_v20',
'HLT_L2DoubleMu23_NoVertex_2Cha_Angle2p5_v3',
'HLT_L2DoubleMu23_NoVertex_v11',
'HLT_L2DoubleMu38_NoVertex_2Cha_Angle2p5_v3',
'HLT_Mu13_Mu8_NoDZ_v1',
'HLT_Mu17_Mu8_v22',
'HLT_Mu17_TkMu8_NoDZ_v1',
'HLT_Mu17_TkMu8_v14',
'HLT_Mu17_v5',
'HLT_Mu22_TkMu22_v9',
'HLT_Mu22_TkMu8_v9',
'HLT_Mu8_v18',
'HLT_TripleMu5_v19' ),
DoubleMuParked = cms.vstring( 'HLT_DoubleMu11_Acoplanarity03_v5',
'HLT_DoubleMu4_Acoplanarity03_v5',
'HLT_DoubleMu5_IsoMu5_v20',
'HLT_L2DoubleMu23_NoVertex_2Cha_Angle2p5_v3',
'HLT_L2DoubleMu23_NoVertex_v11',
'HLT_L2DoubleMu38_NoVertex_2Cha_Angle2p5_v3',
'HLT_Mu13_Mu8_NoDZ_v1',
'HLT_Mu13_Mu8_v22',
'HLT_Mu17_Mu8_v22',
'HLT_Mu17_TkMu8_NoDZ_v1',
'HLT_Mu17_TkMu8_v14',
'HLT_Mu17_v5',
'HLT_Mu22_TkMu22_v9',
'HLT_Mu22_TkMu8_v9',
'HLT_Mu8_v18',
'HLT_TripleMu5_v19' ),
DoublePhoton = cms.vstring( 'HLT_Photon26_Photon18_v12',
'HLT_Photon26_R9Id85_OR_CaloId10_Iso50_Photon18_R9Id85_OR_CaloId10_Iso50_Mass70_v2',
'HLT_Photon26_R9Id85_OR_CaloId10_Iso50_Photon18_v5',
'HLT_Photon36_CaloId10_Iso50_Photon22_CaloId10_Iso50_v6',
'HLT_Photon36_CaloId10_Iso50_Photon22_R9Id85_v6',
'HLT_Photon36_Photon22_v6',
'HLT_Photon36_R9Id85_OR_CaloId10_Iso50_Photon10_R9Id85_OR_CaloId10_Iso50_Mass80_v1',
'HLT_Photon36_R9Id85_OR_CaloId10_Iso50_Photon22_R9Id85_OR_CaloId10_Iso50_v6',
'HLT_Photon36_R9Id85_OR_CaloId10_Iso50_Photon22_v5',
'HLT_Photon36_R9Id85_Photon22_CaloId10_Iso50_v6',
'HLT_Photon36_R9Id85_Photon22_R9Id85_v4' ),
DoublePhotonHighPt = cms.vstring( 'HLT_DoubleEle33_CaloIdL_GsfTrkIdVL_v7',
'HLT_DoubleEle33_CaloIdL_v14',
'HLT_DoubleEle33_CaloIdT_v10',
'HLT_DoublePhoton40_CaloIdL_Rsq0p035_v6',
'HLT_DoublePhoton40_CaloIdL_Rsq0p06_v6',
'HLT_DoublePhoton48_HEVT_v8',
'HLT_DoublePhoton53_HEVT_v2',
'HLT_DoublePhoton70_v6',
'HLT_DoublePhoton80_v7' ),
EcalLaser = cms.vstring( 'HLT_EcalCalibration_v3' ),
ElectronHad = cms.vstring( 'HLT_CleanPFNoPUHT300_Ele15_CaloIdT_CaloIsoVL_TrkIdT_TrkIsoVL_PFMET45_v3',
'HLT_CleanPFNoPUHT300_Ele15_CaloIdT_CaloIsoVL_TrkIdT_TrkIsoVL_PFMET50_v3',
'HLT_CleanPFNoPUHT300_Ele40_CaloIdVT_TrkIdT_v3',
'HLT_CleanPFNoPUHT300_Ele60_CaloIdVT_TrkIdT_v3',
'HLT_CleanPFNoPUHT350_Ele5_CaloIdT_CaloIsoVL_TrkIdT_TrkIsoVL_PFMET45_v3',
'HLT_CleanPFNoPUHT350_Ele5_CaloIdT_CaloIsoVL_TrkIdT_TrkIsoVL_PFMET50_v3',
'HLT_DoubleEle14_CaloIdT_TrkIdVL_Mass8_PFMET40_v8',
'HLT_DoubleEle14_CaloIdT_TrkIdVL_Mass8_PFMET50_v8',
'HLT_DoubleEle8_CaloIdT_TrkIdVL_Mass8_PFNoPUHT175_v4',
'HLT_DoubleEle8_CaloIdT_TrkIdVL_Mass8_PFNoPUHT225_v4',
'HLT_DoubleEle8_CaloIdT_TrkIdVL_v12',
'HLT_Ele12_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_DoubleCentralJet65_v4',
'HLT_Ele12_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_RsqMR30_Rsq0p04_MR200_v4',
'HLT_Ele12_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_RsqMR40_Rsq0p04_MR200_v4',
'HLT_Ele30_CaloIdVT_TrkIdT_PFNoPUJet100_PFNoPUJet25_v8',
'HLT_Ele30_CaloIdVT_TrkIdT_PFNoPUJet150_PFNoPUJet25_v8',
'HLT_Ele8_CaloIdT_TrkIdT_DiJet30_v18',
'HLT_Ele8_CaloIdT_TrkIdT_QuadJet30_v18',
'HLT_Ele8_CaloIdT_TrkIdT_TriJet30_v18' ),
ExpressPhysics = cms.vstring( 'HLT_DoublePhoton80_v7',
'HLT_EightJet30_eta3p0_v5',
'HLT_EightJet35_eta3p0_v5',
'HLT_MET400_v7',
'HLT_Mu17_Mu8_v22',
'HLT_Photon300_NoHE_v5',
'HLT_ZeroBias_v7' ),
FEDMonitor = cms.vstring( 'HLT_DTErrors_v3' ),
HLTPhysicsParked = cms.vstring( 'HLT_Physics_Parked_v1' ),
HTMHT = cms.vstring( 'HLT_HT250_AlphaT0p55_v8',
'HLT_HT250_AlphaT0p57_v8',
'HLT_HT300_AlphaT0p53_v8',
'HLT_HT300_AlphaT0p54_v14',
'HLT_HT350_AlphaT0p52_v8',
'HLT_HT350_AlphaT0p53_v19',
'HLT_HT400_AlphaT0p51_v19',
'HLT_HT400_AlphaT0p52_v14',
'HLT_HT450_AlphaT0p51_v14',
'HLT_PFNoPUHT350_PFMET100_v4',
'HLT_PFNoPUHT400_PFMET100_v4',
'HLT_RsqMR40_Rsq0p04_v6',
'HLT_RsqMR55_Rsq0p09_MR150_v6',
'HLT_RsqMR60_Rsq0p09_MR150_v6',
'HLT_RsqMR65_Rsq0p09_MR150_v5' ),
HTMHTParked = cms.vstring( 'HLT_HT200_AlphaT0p57_v8',
'HLT_HT250_AlphaT0p55_v8',
'HLT_HT250_AlphaT0p57_v8',
'HLT_HT300_AlphaT0p53_v8',
'HLT_HT300_AlphaT0p54_v14',
'HLT_HT350_AlphaT0p52_v8',
'HLT_HT350_AlphaT0p53_v19',
'HLT_HT400_AlphaT0p51_v19',
'HLT_HT400_AlphaT0p52_v14',
'HLT_HT450_AlphaT0p51_v14',
'HLT_PFNoPUHT350_PFMET100_v4',
'HLT_PFNoPUHT400_PFMET100_v4',
'HLT_RsqMR40_Rsq0p04_v6',
'HLT_RsqMR45_Rsq0p09_v5',
'HLT_RsqMR55_Rsq0p09_MR150_v6',
'HLT_RsqMR60_Rsq0p09_MR150_v6',
'HLT_RsqMR65_Rsq0p09_MR150_v5' ),
HcalHPDNoise = cms.vstring( 'HLT_GlobalRunHPDNoise_v8',
'HLT_L1Tech_HBHEHO_totalOR_v6',
'HLT_L1Tech_HCAL_HF_single_channel_v4' ),
HcalNZS = cms.vstring( 'HLT_HcalNZS_v10',
'HLT_HcalPhiSym_v11',
'HLT_HcalUTCA_v1' ),
JetHT = cms.vstring( 'HLT_DiPFJetAve320_v10',
'HLT_DiPFJetAve400_v10',
'HLT_FatDiPFJetMass750_DR1p1_Deta1p5_v10',
'HLT_HT200_v6',
'HLT_HT250_v7',
'HLT_HT300_DoubleDisplacedPFJet60_ChgFraction10_v10',
'HLT_HT300_DoubleDisplacedPFJet60_v10',
'HLT_HT300_SingleDisplacedPFJet60_ChgFraction10_v10',
'HLT_HT300_SingleDisplacedPFJet60_v10',
'HLT_HT300_v7',
'HLT_HT350_v7',
'HLT_HT400_v7',
'HLT_HT450_v7',
'HLT_HT500_v7',
'HLT_HT550_v7',
'HLT_HT650_Track50_dEdx3p6_v10',
'HLT_HT650_Track60_dEdx3p7_v10',
'HLT_HT650_v7',
'HLT_HT750_v7',
'HLT_Jet370_NoJetID_v15',
'HLT_MET80_Track50_dEdx3p6_v6',
'HLT_MET80_Track60_dEdx3p7_v6',
'HLT_MET80_v5',
'HLT_PFJet320_v9',
'HLT_PFJet400_v9',
'HLT_PFNoPUHT350_v4',
'HLT_PFNoPUHT650_DiCentralPFNoPUJet80_CenPFNoPUJet40_v4',
'HLT_PFNoPUHT650_v4',
'HLT_PFNoPUHT700_v4',
'HLT_PFNoPUHT750_v4' ),
JetMon = cms.vstring( 'HLT_DiPFJetAve140_v10',
'HLT_DiPFJetAve200_v10',
'HLT_DiPFJetAve260_v10',
'HLT_DiPFJetAve40_v9',
'HLT_DiPFJetAve80_v10',
'HLT_PFJet140_v9',
'HLT_PFJet200_v9',
'HLT_PFJet260_v9',
'HLT_PFJet40_v8',
'HLT_PFJet80_v9',
'HLT_SingleForJet15_v4',
'HLT_SingleForJet25_v4' ),
L1Accept = cms.vstring( 'DST_Physics_v5' ),
LogMonitor = cms.vstring( 'HLT_LogMonitor_v4' ),
MET = cms.vstring( 'HLT_DiCentralJetSumpT100_dPhi05_DiCentralPFJet60_25_PFMET100_HBHENoiseCleaned_v5',
'HLT_DiCentralPFJet30_PFMET80_BTagCSV07_v5',
'HLT_DiCentralPFJet30_PFMET80_v6',
'HLT_DiCentralPFNoPUJet50_PFMETORPFMETNoMu80_v4',
'HLT_DiPFJet40_PFMETnoMu65_MJJ600VBF_LeadingJets_v9',
'HLT_DiPFJet40_PFMETnoMu65_MJJ800VBF_AllJets_v9',
'HLT_L1ETM100_v2',
'HLT_L1ETM30_v2',
'HLT_L1ETM40_v2',
'HLT_L1ETM70_v2',
'HLT_MET120_HBHENoiseCleaned_v6',
'HLT_MET120_v13',
'HLT_MET200_HBHENoiseCleaned_v5',
'HLT_MET200_v12',
'HLT_MET300_HBHENoiseCleaned_v5',
'HLT_MET300_v4',
'HLT_MET400_HBHENoiseCleaned_v5',
'HLT_MET400_v7',
'HLT_MonoCentralPFJet80_PFMETnoMu105_NHEF0p95_v4',
'HLT_PFMET150_v7',
'HLT_PFMET180_v7' ),
METParked = cms.vstring( 'HLT_DiCentralJetSumpT100_dPhi05_DiCentralPFJet60_25_PFMET100_HBHENoiseCleaned_v5',
'HLT_DiCentralPFJet30_PFMET80_BTagCSV07_v5',
'HLT_DiCentralPFJet30_PFMET80_v6',
'HLT_DiCentralPFNoPUJet50_PFMETORPFMETNoMu80_v4',
'HLT_DiPFJet40_PFMETnoMu65_MJJ600VBF_LeadingJets_v9',
'HLT_DiPFJet40_PFMETnoMu65_MJJ800VBF_AllJets_v9',
'HLT_L1ETM100_v2',
'HLT_L1ETM30_v2',
'HLT_L1ETM40_v2',
'HLT_L1ETM70_v2',
'HLT_MET100_HBHENoiseCleaned_v1',
'HLT_MET120_HBHENoiseCleaned_v6',
'HLT_MET120_v13',
'HLT_MET200_HBHENoiseCleaned_v5',
'HLT_MET200_v12',
'HLT_MET300_HBHENoiseCleaned_v5',
'HLT_MET300_v4',
'HLT_MET400_HBHENoiseCleaned_v5',
'HLT_MET400_v7',
'HLT_MET80_Parked_v5',
'HLT_MonoCentralPFJet80_PFMETnoMu105_NHEF0p95_v4',
'HLT_PFMET150_v7',
'HLT_PFMET180_v7' ),
MinimumBias = cms.vstring( 'HLT_Physics_v5',
'HLT_PixelTracks_Multiplicity70_v3',
'HLT_PixelTracks_Multiplicity80_v12',
'HLT_PixelTracks_Multiplicity90_v3',
'HLT_Random_v2',
'HLT_ZeroBiasPixel_DoubleTrack_v2',
'HLT_ZeroBias_v7' ),
MuEG = cms.vstring( 'HLT_DoubleMu5_Ele8_CaloIdT_TrkIdVL_v16',
'HLT_DoubleMu8_Ele8_CaloIdT_TrkIdVL_v5',
'HLT_Mu17_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v9',
'HLT_Mu22_Photon22_CaloIdL_v7',
'HLT_Mu30_Ele30_CaloIdL_v8',
'HLT_Mu7_Ele7_CaloIdT_CaloIsoVL_v7',
'HLT_Mu8_DoubleEle8_CaloIdT_TrkIdVL_v7',
'HLT_Mu8_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v9',
'HLT_Mu8_Ele8_CaloIdT_TrkIdVL_Ele8_CaloIdL_TrkIdVL_v7' ),
MuHad = cms.vstring( 'HLT_DoubleDisplacedMu4_DiPFJet40Neutral_v8',
'HLT_DoubleMu14_Mass8_PFMET40_v8',
'HLT_DoubleMu14_Mass8_PFMET50_v8',
'HLT_DoubleMu8_Mass8_PFNoPUHT175_v4',
'HLT_DoubleMu8_Mass8_PFNoPUHT225_v4',
'HLT_DoubleRelIso1p0Mu5_Mass8_PFNoPUHT175_v4',
'HLT_DoubleRelIso1p0Mu5_Mass8_PFNoPUHT225_v4',
'HLT_IsoMu12_DoubleCentralJet65_v4',
'HLT_IsoMu12_RsqMR30_Rsq0p04_MR200_v4',
'HLT_IsoMu12_RsqMR40_Rsq0p04_MR200_v4',
'HLT_IsoMu17_eta2p1_DiCentralPFNoPUJet30_PFNoPUHT350_PFMHT40_v3',
'HLT_L2TripleMu10_0_0_NoVertex_PFJet40Neutral_v8',
'HLT_Mu14_Ele14_CaloIdT_TrkIdVL_Mass8_PFMET40_v8',
'HLT_Mu14_Ele14_CaloIdT_TrkIdVL_Mass8_PFMET50_v8',
'HLT_Mu40_PFNoPUHT350_v4',
'HLT_Mu60_PFNoPUHT350_v4',
'HLT_Mu8_DiJet30_v7',
'HLT_Mu8_Ele8_CaloIdT_TrkIdVL_Mass8_PFNoPUHT175_v4',
'HLT_Mu8_Ele8_CaloIdT_TrkIdVL_Mass8_PFNoPUHT225_v4',
'HLT_Mu8_QuadJet30_v7',
'HLT_Mu8_TriJet30_v7',
'HLT_PFNoPUHT350_Mu15_PFMET45_v4',
'HLT_PFNoPUHT350_Mu15_PFMET50_v4',
'HLT_PFNoPUHT400_Mu5_PFMET45_v4',
'HLT_PFNoPUHT400_Mu5_PFMET50_v4',
'HLT_RelIso1p0Mu5_Ele8_CaloIdT_TrkIdVL_Mass8_PFNoPUHT175_v4',
'HLT_RelIso1p0Mu5_Ele8_CaloIdT_TrkIdVL_Mass8_PFNoPUHT225_v4' ),
MuOnia = cms.vstring( 'HLT_Dimuon0_Jpsi_Muon_v18',
'HLT_Dimuon0_Jpsi_NoVertexing_v14',
'HLT_Dimuon0_Jpsi_v17',
'HLT_Dimuon0_PsiPrime_v6',
'HLT_Dimuon0_Upsilon_Muon_v18',
'HLT_Dimuon0_Upsilon_v17',
'HLT_Dimuon11_Upsilon_v6',
'HLT_Dimuon3p5_SameSign_v6',
'HLT_Dimuon7_Upsilon_v7',
'HLT_DoubleMu3_4_Dimuon5_Bs_Central_v5',
'HLT_DoubleMu3p5_4_Dimuon5_Bs_Central_v5',
'HLT_DoubleMu4_Dimuon7_Bs_Forward_v5',
'HLT_DoubleMu4_JpsiTk_Displaced_v6',
'HLT_DoubleMu4_Jpsi_Displaced_v12',
'HLT_Mu5_L2Mu3_Jpsi_v8',
'HLT_Mu5_Track2_Jpsi_v21',
'HLT_Mu5_Track3p5_Jpsi_v7',
'HLT_Mu7_Track7_Jpsi_v20',
'HLT_Tau2Mu_ItTrack_v7' ),
MuOniaParked = cms.vstring( 'HLT_BTagMu_Jet20_Mu4_v2',
'HLT_BTagMu_Jet60_Mu4_v2',
'HLT_Dimuon10_Jpsi_v6',
'HLT_Dimuon5_PsiPrime_v6',
'HLT_Dimuon5_Upsilon_v6',
'HLT_Dimuon7_PsiPrime_v3',
'HLT_Dimuon8_Jpsi_v7',
'HLT_Dimuon8_Upsilon_v6',
'HLT_DoubleMu3p5_LowMassNonResonant_Displaced_v6',
'HLT_DoubleMu3p5_LowMass_Displaced_v6',
'HLT_Mu15_TkMu5_Onia_v1' ),
MultiJet = cms.vstring( 'HLT_DiJet80_DiJet60_DiJet20_v6',
'HLT_DoubleJet20_ForwardBackward_v4',
'HLT_EightJet30_eta3p0_v5',
'HLT_EightJet35_eta3p0_v5',
'HLT_ExclDiJet35_HFAND_v4',
'HLT_ExclDiJet35_HFOR_v4',
'HLT_ExclDiJet80_HFAND_v4',
'HLT_QuadJet60_DiJet20_v6',
'HLT_QuadJet70_v6',
'HLT_QuadJet80_v6',
'HLT_QuadJet90_v6',
'HLT_SixJet35_v6',
'HLT_SixJet45_v6',
'HLT_SixJet50_v6' ),
MultiJet1Parked = cms.vstring( 'HLT_DiJet80_DiJet60_DiJet20_v6',
'HLT_DoubleJet20_ForwardBackward_v4',
'HLT_EightJet30_eta3p0_v5',
'HLT_EightJet35_eta3p0_v5',
'HLT_ExclDiJet35_HFAND_v4',
'HLT_ExclDiJet35_HFOR_v4',
'HLT_ExclDiJet80_HFAND_v4',
'HLT_QuadJet45_v1',
'HLT_QuadJet50_v5',
'HLT_QuadJet60_DiJet20_v6',
'HLT_QuadJet70_v6',
'HLT_QuadJet80_v6',
'HLT_QuadJet90_v6',
'HLT_SixJet35_v6',
'HLT_SixJet45_v6',
'HLT_SixJet50_v6' ),
NoBPTX = cms.vstring( 'HLT_JetE30_NoBPTX3BX_NoHalo_v16',
'HLT_JetE30_NoBPTX_v14',
'HLT_JetE50_NoBPTX3BX_NoHalo_v13',
'HLT_JetE70_NoBPTX3BX_NoHalo_v5',
'HLT_L2Mu10_NoVertex_NoBPTX3BX_NoHalo_v4',
'HLT_L2Mu20_NoVertex_2Cha_NoBPTX3BX_NoHalo_v1',
'HLT_L2Mu20_eta2p1_NoVertex_v2',
'HLT_L2Mu30_NoVertex_2Cha_NoBPTX3BX_NoHalo_v1' ),
OfflineMonitor = ( cms.vstring( 'AlCa_EcalEtaEBonly_v6',
'AlCa_EcalEtaEEonly_v6',
'AlCa_EcalPhiSym_v13',
'AlCa_EcalPi0EBonly_v6',
'AlCa_EcalPi0EEonly_v6',
'AlCa_LumiPixels_Random_v1',
'AlCa_LumiPixels_ZeroBias_v4',
'AlCa_LumiPixels_v8',
'AlCa_RPCMuonNoHits_v9',
'AlCa_RPCMuonNoTriggers_v9',
'AlCa_RPCMuonNormalisation_v9',
'DST_Ele8_CaloIdL_CaloIsoVL_TrkIdVL_TrkIsoVL_HT250_v4',
'DST_HT250_v4',
'DST_L1HTT_Or_L1MultiJet_v4',
'DST_Mu5_HT250_v4',
'HLT_Activity_Ecal_SC7_v13',
'HLT_BTagMu_DiJet110_Mu5_v6',
'HLT_BTagMu_DiJet20_Mu5_v6',
'HLT_BTagMu_DiJet40_Mu5_v6',
'HLT_BTagMu_DiJet70_Mu5_v6',
'HLT_BTagMu_Jet20_Mu4_v2',
'HLT_BTagMu_Jet300_Mu5_v6',
'HLT_BTagMu_Jet60_Mu4_v2',
'HLT_BeamGas_HF_Beam1_v5',
'HLT_BeamGas_HF_Beam2_v5',
'HLT_BeamHalo_v13',
'HLT_CleanPFNoPUHT300_Ele15_CaloIdT_CaloIsoVL_TrkIdT_TrkIsoVL_PFMET45_v3',
'HLT_CleanPFNoPUHT300_Ele15_CaloIdT_CaloIsoVL_TrkIdT_TrkIsoVL_PFMET50_v3',
'HLT_CleanPFNoPUHT300_Ele40_CaloIdVT_TrkIdT_v3',
'HLT_CleanPFNoPUHT300_Ele60_CaloIdVT_TrkIdT_v3',
'HLT_CleanPFNoPUHT350_Ele5_CaloIdT_CaloIsoVL_TrkIdT_TrkIsoVL_PFMET45_v3',
'HLT_CleanPFNoPUHT350_Ele5_CaloIdT_CaloIsoVL_TrkIdT_TrkIsoVL_PFMET50_v3',
'HLT_DTErrors_v3',
'HLT_DiCentralJetSumpT100_dPhi05_DiCentralPFJet60_25_PFMET100_HBHENoiseCleaned_v5',
'HLT_DiCentralPFJet30_PFMET80_BTagCSV07_v5',
'HLT_DiCentralPFJet30_PFMET80_v6',
'HLT_DiCentralPFNoPUJet50_PFMETORPFMETNoMu80_v4',
'HLT_DiJet20_MJJ650_AllJets_DEta3p5_HT120_VBF_v1',
'HLT_DiJet30_MJJ700_AllJets_DEta3p5_VBF_v1',
'HLT_DiJet35_MJJ650_AllJets_DEta3p5_VBF_v5',
'HLT_DiJet35_MJJ700_AllJets_DEta3p5_VBF_v5',
'HLT_DiJet35_MJJ750_AllJets_DEta3p5_VBF_v5',
'HLT_DiJet40Eta2p6_BTagIP3DFastPV_v7',
'HLT_DiJet80Eta2p6_BTagIP3DFastPVLoose_v7',
'HLT_DiJet80_DiJet60_DiJet20_v6',
'HLT_DiPFJet40_PFMETnoMu65_MJJ600VBF_LeadingJets_v9',
'HLT_DiPFJet40_PFMETnoMu65_MJJ800VBF_AllJets_v9',
'HLT_DiPFJet80_DiPFJet30_BTagCSVd07d05_v5',
'HLT_DiPFJet80_DiPFJet30_BTagCSVd07d05d03_v5',
'HLT_DiPFJet80_DiPFJet30_BTagCSVd07d05d05_v5',
'HLT_DiPFJetAve140_v10',
'HLT_DiPFJetAve200_v10',
'HLT_DiPFJetAve260_v10',
'HLT_DiPFJetAve320_v10',
'HLT_DiPFJetAve400_v10',
'HLT_DiPFJetAve40_v9',
'HLT_DiPFJetAve80_v10',
'HLT_Dimuon0_Jpsi_Muon_v18',
'HLT_Dimuon0_Jpsi_NoVertexing_v14',
'HLT_Dimuon0_Jpsi_v17',
'HLT_Dimuon0_PsiPrime_v6',
'HLT_Dimuon0_Upsilon_Muon_v18',
'HLT_Dimuon0_Upsilon_v17',
'HLT_Dimuon10_Jpsi_v6',
'HLT_Dimuon11_Upsilon_v6',
'HLT_Dimuon3p5_SameSign_v6',
'HLT_Dimuon5_PsiPrime_v6',
'HLT_Dimuon5_Upsilon_v6',
'HLT_Dimuon7_PsiPrime_v3',
'HLT_Dimuon7_Upsilon_v7',
'HLT_Dimuon8_Jpsi_v7',
'HLT_Dimuon8_Upsilon_v6',
'HLT_DisplacedPhoton65EBOnly_CaloIdVL_IsoL_PFMET30_v4',
'HLT_DisplacedPhoton65_CaloIdVL_IsoL_PFMET25_v4',
'HLT_DoubleDisplacedMu4_DiPFJet40Neutral_v8',
'HLT_DoubleEle10_CaloIdL_TrkIdVL_Ele10_CaloIdT_TrkIdVL_v12',
'HLT_DoubleEle14_CaloIdT_TrkIdVL_Mass8_PFMET40_v8',
'HLT_DoubleEle14_CaloIdT_TrkIdVL_Mass8_PFMET50_v8',
'HLT_DoubleEle33_CaloIdL_GsfTrkIdVL_v7',
'HLT_DoubleEle33_CaloIdL_v14',
'HLT_DoubleEle33_CaloIdT_v10',
'HLT_DoubleEle8_CaloIdT_TrkIdVL_Mass8_PFNoPUHT175_v4',
'HLT_DoubleEle8_CaloIdT_TrkIdVL_Mass8_PFNoPUHT225_v4',
'HLT_DoubleEle8_CaloIdT_TrkIdVL_v12',
'HLT_DoubleIsoL2Tau30_eta2p1_v1',
'HLT_DoubleJet20_ForwardBackward_v4',
'HLT_DoubleMediumIsoPFTau30_Trk1_eta2p1_Jet30_v5',
'HLT_DoubleMediumIsoPFTau30_Trk1_eta2p1_Reg_Jet30_v1',
'HLT_DoubleMediumIsoPFTau30_Trk1_eta2p1_Reg_v1',
'HLT_DoubleMediumIsoPFTau30_Trk1_eta2p1_v4',
'HLT_DoubleMediumIsoPFTau35_Trk1_eta2p1_Prong1_Reg_v1',
'HLT_DoubleMediumIsoPFTau35_Trk1_eta2p1_Prong1_v4',
'HLT_DoubleMediumIsoPFTau35_Trk1_eta2p1_Reg_v1',
'HLT_DoubleMediumIsoPFTau35_Trk1_eta2p1_v4',
'HLT_DoubleMu11_Acoplanarity03_v5',
'HLT_DoubleMu14_Mass8_PFMET40_v8',
'HLT_DoubleMu14_Mass8_PFMET50_v8',
'HLT_DoubleMu3_4_Dimuon5_Bs_Central_v5',
'HLT_DoubleMu3p5_4_Dimuon5_Bs_Central_v5',
'HLT_DoubleMu3p5_LowMassNonResonant_Displaced_v6',
'HLT_DoubleMu3p5_LowMass_Displaced_v6',
'HLT_DoubleMu4_Acoplanarity03_v5',
'HLT_DoubleMu4_Dimuon7_Bs_Forward_v5',
'HLT_DoubleMu4_JpsiTk_Displaced_v6',
'HLT_DoubleMu4_Jpsi_Displaced_v12',
'HLT_DoubleMu5_Ele8_CaloIdT_TrkIdVL_v16',
'HLT_DoubleMu5_IsoMu5_v20',
'HLT_DoubleMu8_Ele8_CaloIdT_TrkIdVL_v5',
'HLT_DoubleMu8_Mass8_PFNoPUHT175_v4',
'HLT_DoubleMu8_Mass8_PFNoPUHT225_v4',
'HLT_DoublePhoton40_CaloIdL_Rsq0p035_v6',
'HLT_DoublePhoton40_CaloIdL_Rsq0p06_v6',
'HLT_DoublePhoton48_HEVT_v8',
'HLT_DoublePhoton53_HEVT_v2',
'HLT_DoublePhoton70_v6',
'HLT_DoublePhoton80_v7',
'HLT_DoubleRelIso1p0Mu5_Mass8_PFNoPUHT175_v4',
'HLT_DoubleRelIso1p0Mu5_Mass8_PFNoPUHT225_v4',
'HLT_EightJet30_eta3p0_v5',
'HLT_EightJet35_eta3p0_v5',
'HLT_Ele12_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_DoubleCentralJet65_v4',
'HLT_Ele12_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_RsqMR30_Rsq0p04_MR200_v4',
'HLT_Ele12_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_RsqMR40_Rsq0p04_MR200_v4',
'HLT_Ele13_eta2p1_WP90NoIso_LooseIsoPFTau20_L1ETM36_v1',
'HLT_Ele13_eta2p1_WP90Rho_LooseIsoPFTau20_L1ETM36_v1',
'HLT_Ele13_eta2p1_WP90Rho_LooseIsoPFTau20_v1',
'HLT_Ele15_Ele8_Ele5_CaloIdL_TrkIdVL_v6',
'HLT_Ele17_CaloIdL_CaloIsoVL_v17',
'HLT_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v19',
'HLT_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_Jet30_v7',
'HLT_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v6',
'HLT_Ele17_CaloIdVT_CaloIsoVT_TrkIdT_TrkIsoVT_Ele8_Mass50_v6',
'HLT_Ele20_CaloIdVT_CaloIsoVT_TrkIdT_TrkIsoVT_SC4_Mass50_v7',
'HLT_Ele22_CaloIdL_CaloIsoVL_v6',
'HLT_Ele22_eta2p1_WP90NoIso_LooseIsoPFTau20_v7',
'HLT_Ele22_eta2p1_WP90Rho_LooseIsoPFTau20_v7',
'HLT_Ele23_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_HFT30_v8',
'HLT_Ele24_WP80_CentralPFJet35_CentralPFJet25_PFMET20_v1',
'HLT_Ele24_WP80_CentralPFJet35_CentralPFJet25_v1',
'HLT_Ele24_WP80_PFJet30_PFJet25_Deta3_CentralPFJet30_v1',
'HLT_Ele24_WP80_PFJet30_PFJet25_Deta3_v1',
'HLT_Ele25_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_CentralPFNoPUJet30_BTagIPIter_v9',
'HLT_Ele25_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_CentralPFNoPUJet30_v8',
'HLT_Ele25_CaloIdVT_CaloIsoVL_TrkIdVL_TrkIsoT_DiCentralPFNoPUJet30_v2',
'HLT_Ele25_CaloIdVT_CaloIsoVL_TrkIdVL_TrkIsoT_TriCentralPFNoPUJet30_v4',
'HLT_Ele25_CaloIdVT_CaloIsoVL_TrkIdVL_TrkIsoT_TriCentralPFNoPUJet45_35_25_v2',
'HLT_Ele25_CaloIdVT_CaloIsoVL_TrkIdVL_TrkIsoT_TriCentralPFNoPUJet50_40_30_v4',
'HLT_Ele27_CaloIdL_CaloIsoVL_TrkIdVL_TrkIsoVL_v11',
'HLT_Ele27_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_Ele15_CaloIdT_CaloIsoVL_trackless_v8',
'HLT_Ele27_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_HFT15_v8',
'HLT_Ele27_WP80_CentralPFJet80_v9',
'HLT_Ele27_WP80_PFMET_MT50_v7',
'HLT_Ele27_WP80_WCandPt80_v9',
'HLT_Ele27_WP80_v11',
'HLT_Ele30_CaloIdVT_TrkIdT_PFNoPUJet100_PFNoPUJet25_v8',
'HLT_Ele30_CaloIdVT_TrkIdT_PFNoPUJet150_PFNoPUJet25_v8',
'HLT_Ele30_CaloIdVT_TrkIdT_v6',
'HLT_Ele32_CaloIdL_CaloIsoVL_TrkIdVL_TrkIsoVL_v11',
'HLT_Ele32_CaloIdT_CaloIsoT_TrkIdT_TrkIsoT_SC17_Mass50_v6',
'HLT_Ele5_SC5_Jpsi_Mass2to15_v4',
'HLT_Ele80_CaloIdVT_GsfTrkIdT_v2',
'HLT_Ele8_CaloIdL_CaloIsoVL_v17',
'HLT_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_Jet30_v7',
'HLT_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v15',
'HLT_Ele8_CaloIdT_TrkIdT_DiJet30_v18',
'HLT_Ele8_CaloIdT_TrkIdT_QuadJet30_v18',
'HLT_Ele8_CaloIdT_TrkIdT_TriJet30_v18',
'HLT_Ele8_CaloIdT_TrkIdVL_EG7_v2',
'HLT_Ele8_CaloIdT_TrkIdVL_Jet30_v7',
'HLT_Ele8_CaloIdT_TrkIdVL_v5',
'HLT_Ele90_CaloIdVT_GsfTrkIdT_v2',
'HLT_ExclDiJet35_HFAND_v4',
'HLT_ExclDiJet35_HFOR_v4',
'HLT_ExclDiJet80_HFAND_v4',
'HLT_FatDiPFJetMass750_DR1p1_Deta1p5_v10',
'HLT_GlobalRunHPDNoise_v8',
'HLT_HT200_AlphaT0p57_v8',
'HLT_HT200_v6',
'HLT_HT250_AlphaT0p55_v8',
'HLT_HT250_AlphaT0p57_v8',
'HLT_HT250_v7',
'HLT_HT300_AlphaT0p53_v8',
'HLT_HT300_AlphaT0p54_v14',
'HLT_HT300_DoubleDisplacedPFJet60_ChgFraction10_v10',
'HLT_HT300_DoubleDisplacedPFJet60_v10',
'HLT_HT300_SingleDisplacedPFJet60_ChgFraction10_v10',
'HLT_HT300_SingleDisplacedPFJet60_v10',
'HLT_HT300_v7',
'HLT_HT350_AlphaT0p52_v8',
'HLT_HT350_AlphaT0p53_v19',
'HLT_HT350_v7',
'HLT_HT400_AlphaT0p51_v19',
'HLT_HT400_AlphaT0p52_v14',
'HLT_HT400_v7',
'HLT_HT450_AlphaT0p51_v14',
'HLT_HT450_v7',
'HLT_HT500_v7',
'HLT_HT550_v7',
'HLT_HT650_Track50_dEdx3p6_v10',
'HLT_HT650_Track60_dEdx3p7_v10',
'HLT_HT650_v7',
'HLT_HT750_v7',
'HLT_HcalCalibration_v3',
'HLT_HcalNZS_v10',
'HLT_HcalPhiSym_v11',
'HLT_HcalUTCA_v1',
'HLT_IsoMu12_DoubleCentralJet65_v4',
'HLT_IsoMu12_RsqMR30_Rsq0p04_MR200_v4',
'HLT_IsoMu12_RsqMR40_Rsq0p04_MR200_v4',
'HLT_IsoMu15_eta2p1_L1ETM20_v7',
'HLT_IsoMu15_eta2p1_LooseIsoPFTau35_Trk20_Prong1_L1ETM20_v10',
'HLT_IsoMu17_eta2p1_CentralPFNoPUJet30_BTagIPIter_v4',
'HLT_IsoMu17_eta2p1_CentralPFNoPUJet30_v4',
'HLT_IsoMu17_eta2p1_DiCentralPFNoPUJet30_PFNoPUHT350_PFMHT40_v3',
'HLT_IsoMu17_eta2p1_DiCentralPFNoPUJet30_v4',
'HLT_IsoMu17_eta2p1_LooseIsoPFTau20_v7',
'HLT_IsoMu17_eta2p1_TriCentralPFNoPUJet30_v4',
'HLT_IsoMu17_eta2p1_TriCentralPFNoPUJet45_35_25_v2',
'HLT_IsoMu18_CentralPFJet30_CentralPFJet25_PFMET20_v1',
'HLT_IsoMu18_CentralPFJet30_CentralPFJet25_v1',
'HLT_IsoMu18_PFJet30_PFJet25_Deta3_CentralPFJet25_v1',
'HLT_IsoMu18_PFJet30_PFJet25_Deta3_v1',
'HLT_IsoMu18_eta2p1_MediumIsoPFTau25_Trk1_eta2p1_Reg_v1',
'HLT_IsoMu18_eta2p1_MediumIsoPFTau25_Trk1_eta2p1_v4',
'HLT_IsoMu20_WCandPt80_v4',
'HLT_IsoMu20_eta2p1_CentralPFJet80_v9',
'HLT_IsoMu20_eta2p1_v7',
'HLT_IsoMu24_eta2p1_v15',
'HLT_IsoMu24_v17',
'HLT_IsoMu30_eta2p1_v15',
'HLT_IsoMu30_v11',
'HLT_IsoMu34_eta2p1_v13',
'HLT_IsoMu40_eta2p1_v10',
'HLT_IsoMu8_eta2p1_LooseIsoPFTau20_L1ETM26_v1',
'HLT_IsoMu8_eta2p1_LooseIsoPFTau20_v1',
'HLT_IsoTrackHB_v14',
'HLT_IsoTrackHE_v15',
'HLT_Jet160Eta2p4_Jet120Eta2p4_DiBTagIP3DFastPVLoose_v7',
'HLT_Jet370_NoJetID_v15',
'HLT_Jet60Eta1p7_Jet53Eta1p7_DiBTagIP3DFastPV_v7',
'HLT_Jet80Eta1p7_Jet70Eta1p7_DiBTagIP3DFastPV_v7',
'HLT_JetE30_NoBPTX3BX_NoHalo_v16',
'HLT_JetE30_NoBPTX_v14',
'HLT_JetE50_NoBPTX3BX_NoHalo_v13',
'HLT_JetE70_NoBPTX3BX_NoHalo_v5',
'HLT_L1DoubleEG3_FwdVeto_v2',
'HLT_L1DoubleJet36Central_v7',
'HLT_L1ETM100_v2',
'HLT_L1ETM30_v2',
'HLT_L1ETM40_v2',
'HLT_L1ETM70_v2',
'HLT_L1SingleEG12_v6',
'HLT_L1SingleEG5_v6',
'HLT_L1SingleJet16_v7',
'HLT_L1SingleJet36_v7')+cms.vstring( 'HLT_L1SingleMu12_v2',
'HLT_L1SingleMuOpen_AntiBPTX_v7',
'HLT_L1SingleMuOpen_v7',
'HLT_L1Tech_HBHEHO_totalOR_v6',
'HLT_L1Tech_HCAL_HF_single_channel_v4',
'HLT_L1TrackerCosmics_v7',
'HLT_L2DoubleMu23_NoVertex_2Cha_Angle2p5_v3',
'HLT_L2DoubleMu23_NoVertex_v11',
'HLT_L2DoubleMu38_NoVertex_2Cha_Angle2p5_v3',
'HLT_L2Mu10_NoVertex_NoBPTX3BX_NoHalo_v4',
'HLT_L2Mu20_NoVertex_2Cha_NoBPTX3BX_NoHalo_v1',
'HLT_L2Mu20_eta2p1_NoVertex_v2',
'HLT_L2Mu30_NoVertex_2Cha_NoBPTX3BX_NoHalo_v1',
'HLT_L2Mu70_2Cha_eta2p1_PFMET55_v2',
'HLT_L2Mu70_2Cha_eta2p1_PFMET60_v2',
'HLT_L2TripleMu10_0_0_NoVertex_PFJet40Neutral_v8',
'HLT_LogMonitor_v4',
'HLT_LooseIsoPFTau35_Trk20_Prong1_MET70_v10',
'HLT_LooseIsoPFTau35_Trk20_Prong1_MET75_v10',
'HLT_LooseIsoPFTau35_Trk20_Prong1_v10',
'HLT_MET100_HBHENoiseCleaned_v1',
'HLT_MET120_HBHENoiseCleaned_v6',
'HLT_MET120_v13',
'HLT_MET200_HBHENoiseCleaned_v5',
'HLT_MET200_v12',
'HLT_MET300_HBHENoiseCleaned_v5',
'HLT_MET300_v4',
'HLT_MET400_HBHENoiseCleaned_v5',
'HLT_MET400_v7',
'HLT_MET80_Parked_v5',
'HLT_MET80_Track50_dEdx3p6_v6',
'HLT_MET80_Track60_dEdx3p7_v6',
'HLT_MET80_v5',
'HLT_MonoCentralPFJet80_PFMETnoMu105_NHEF0p95_v4',
'HLT_Mu12_eta2p1_DiCentral_20_v8',
'HLT_Mu12_eta2p1_DiCentral_40_20_BTagIP3D1stTrack_v8',
'HLT_Mu12_eta2p1_DiCentral_40_20_DiBTagIP3D1stTrack_v8',
'HLT_Mu12_eta2p1_DiCentral_40_20_v8',
'HLT_Mu12_eta2p1_L1Mu10erJetC12WdEtaPhi1DiJetsC_v7',
'HLT_Mu12_v18',
'HLT_Mu13_Mu8_NoDZ_v1',
'HLT_Mu13_Mu8_v22',
'HLT_Mu14_Ele14_CaloIdT_TrkIdVL_Mass8_PFMET40_v8',
'HLT_Mu14_Ele14_CaloIdT_TrkIdVL_Mass8_PFMET50_v8',
'HLT_Mu15_TkMu5_Onia_v1',
'HLT_Mu15_eta2p1_DiCentral_20_v1',
'HLT_Mu15_eta2p1_DiCentral_40_20_v1',
'HLT_Mu15_eta2p1_L1ETM20_v5',
'HLT_Mu15_eta2p1_L1Mu10erJetC12WdEtaPhi1DiJetsC_v3',
'HLT_Mu15_eta2p1_TriCentral_40_20_20_BTagIP3D1stTrack_v8',
'HLT_Mu15_eta2p1_TriCentral_40_20_20_DiBTagIP3D1stTrack_v8',
'HLT_Mu15_eta2p1_TriCentral_40_20_20_v8',
'HLT_Mu15_eta2p1_v5',
'HLT_Mu17_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v9',
'HLT_Mu17_Mu8_v22',
'HLT_Mu17_TkMu8_NoDZ_v1',
'HLT_Mu17_TkMu8_v14',
'HLT_Mu17_eta2p1_CentralPFNoPUJet30_BTagIPIter_v4',
'HLT_Mu17_eta2p1_LooseIsoPFTau20_v7',
'HLT_Mu17_eta2p1_TriCentralPFNoPUJet45_35_25_v2',
'HLT_Mu17_v5',
'HLT_Mu18_CentralPFJet30_CentralPFJet25_v1',
'HLT_Mu18_PFJet30_PFJet25_Deta3_CentralPFJet25_v1',
'HLT_Mu22_Photon22_CaloIdL_v7',
'HLT_Mu22_TkMu22_v9',
'HLT_Mu22_TkMu8_v9',
'HLT_Mu24_eta2p1_v5',
'HLT_Mu24_v16',
'HLT_Mu30_Ele30_CaloIdL_v8',
'HLT_Mu30_eta2p1_v5',
'HLT_Mu30_v16',
'HLT_Mu40_PFNoPUHT350_v4',
'HLT_Mu40_eta2p1_Track50_dEdx3p6_v5',
'HLT_Mu40_eta2p1_Track60_dEdx3p7_v5',
'HLT_Mu40_eta2p1_v11',
'HLT_Mu40_v14',
'HLT_Mu50_eta2p1_v8',
'HLT_Mu5_L2Mu3_Jpsi_v8',
'HLT_Mu5_Track2_Jpsi_v21',
'HLT_Mu5_Track3p5_Jpsi_v7',
'HLT_Mu5_v20',
'HLT_Mu60_PFNoPUHT350_v4',
'HLT_Mu7_Ele7_CaloIdT_CaloIsoVL_v7',
'HLT_Mu7_Track7_Jpsi_v20',
'HLT_Mu8_DiJet30_v7',
'HLT_Mu8_DoubleEle8_CaloIdT_TrkIdVL_v7',
'HLT_Mu8_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v9',
'HLT_Mu8_Ele8_CaloIdT_TrkIdVL_Ele8_CaloIdL_TrkIdVL_v7',
'HLT_Mu8_Ele8_CaloIdT_TrkIdVL_Mass8_PFNoPUHT175_v4',
'HLT_Mu8_Ele8_CaloIdT_TrkIdVL_Mass8_PFNoPUHT225_v4',
'HLT_Mu8_QuadJet30_v7',
'HLT_Mu8_TriJet30_v7',
'HLT_Mu8_eta2p1_LooseIsoPFTau20_L1ETM26_v1',
'HLT_Mu8_v18',
'HLT_PFJet140_v9',
'HLT_PFJet200_v9',
'HLT_PFJet260_v9',
'HLT_PFJet320_v9',
'HLT_PFJet400_v9',
'HLT_PFJet40_v8',
'HLT_PFJet80_v9',
'HLT_PFMET150_v7',
'HLT_PFMET180_v7',
'HLT_PFNoPUHT350_Mu15_PFMET45_v4',
'HLT_PFNoPUHT350_Mu15_PFMET50_v4',
'HLT_PFNoPUHT350_PFMET100_v4',
'HLT_PFNoPUHT350_v4',
'HLT_PFNoPUHT400_Mu5_PFMET45_v4',
'HLT_PFNoPUHT400_Mu5_PFMET50_v4',
'HLT_PFNoPUHT400_PFMET100_v4',
'HLT_PFNoPUHT650_DiCentralPFNoPUJet80_CenPFNoPUJet40_v4',
'HLT_PFNoPUHT650_v4',
'HLT_PFNoPUHT700_v4',
'HLT_PFNoPUHT750_v4',
'HLT_Photon135_v7',
'HLT_Photon150_v4',
'HLT_Photon160_v4',
'HLT_Photon20_CaloIdVL_IsoL_v16',
'HLT_Photon20_CaloIdVL_v4',
'HLT_Photon22_R9Id90_HE10_Iso40_EBOnly_v5',
'HLT_Photon26_Photon18_v12',
'HLT_Photon26_R9Id85_OR_CaloId10_Iso50_Photon18_R9Id85_OR_CaloId10_Iso50_Mass70_v2',
'HLT_Photon26_R9Id85_OR_CaloId10_Iso50_Photon18_v5',
'HLT_Photon300_NoHE_v5',
'HLT_Photon30_CaloIdVL_v14',
'HLT_Photon30_R9Id90_CaloId_HE10_Iso40_EBOnly_Met25_HBHENoiseCleaned_v1',
'HLT_Photon30_R9Id90_CaloId_HE10_Iso40_EBOnly_v1',
'HLT_Photon30_v1',
'HLT_Photon36_CaloId10_Iso50_Photon22_CaloId10_Iso50_v6',
'HLT_Photon36_CaloId10_Iso50_Photon22_R9Id85_v6',
'HLT_Photon36_Photon22_v6',
'HLT_Photon36_R9Id85_OR_CaloId10_Iso50_Photon10_R9Id85_OR_CaloId10_Iso50_Mass80_v1',
'HLT_Photon36_R9Id85_OR_CaloId10_Iso50_Photon22_R9Id85_OR_CaloId10_Iso50_v6',
'HLT_Photon36_R9Id85_OR_CaloId10_Iso50_Photon22_v5',
'HLT_Photon36_R9Id85_Photon22_CaloId10_Iso50_v6',
'HLT_Photon36_R9Id85_Photon22_R9Id85_v4',
'HLT_Photon36_R9Id90_HE10_Iso40_EBOnly_v5',
'HLT_Photon40_CaloIdL_RsqMR40_Rsq0p09_MR150_v6',
'HLT_Photon40_CaloIdL_RsqMR45_Rsq0p09_MR150_v6',
'HLT_Photon40_CaloIdL_RsqMR50_Rsq0p09_MR150_v6',
'HLT_Photon50_CaloIdVL_IsoL_v17',
'HLT_Photon50_CaloIdVL_v10',
'HLT_Photon50_R9Id90_HE10_Iso40_EBOnly_v5',
'HLT_Photon60_CaloIdL_HT300_v4',
'HLT_Photon60_CaloIdL_MHT70_v11',
'HLT_Photon70_CaloIdXL_PFMET100_v7',
'HLT_Photon70_CaloIdXL_PFNoPUHT400_v4',
'HLT_Photon70_CaloIdXL_PFNoPUHT500_v4',
'HLT_Photon75_CaloIdVL_v13',
'HLT_Photon75_R9Id90_HE10_Iso40_EBOnly_v5',
'HLT_Photon90_CaloIdVL_v10',
'HLT_Photon90_R9Id90_HE10_Iso40_EBOnly_v5',
'HLT_Physics_v5',
'HLT_PixelTracks_Multiplicity70_v3',
'HLT_PixelTracks_Multiplicity80_v12',
'HLT_PixelTracks_Multiplicity90_v3',
'HLT_QuadJet45_v1',
'HLT_QuadJet50_v5',
'HLT_QuadJet60_DiJet20_v6',
'HLT_QuadJet70_v6',
'HLT_QuadJet75_55_35_20_BTagIP_VBF_v7',
'HLT_QuadJet75_55_35_20_VBF_v1',
'HLT_QuadJet75_55_38_20_BTagIP_VBF_v7',
'HLT_QuadJet80_v6',
'HLT_QuadJet90_v6',
'HLT_QuadPFJet78_61_44_31_BTagCSV_VBF_v6',
'HLT_QuadPFJet78_61_44_31_VBF_v1',
'HLT_QuadPFJet82_65_48_35_BTagCSV_VBF_v6',
'HLT_Random_v2',
'HLT_RelIso1p0Mu20_v3',
'HLT_RelIso1p0Mu5_Ele8_CaloIdT_TrkIdVL_Mass8_PFNoPUHT175_v4',
'HLT_RelIso1p0Mu5_Ele8_CaloIdT_TrkIdVL_Mass8_PFNoPUHT225_v4',
'HLT_RelIso1p0Mu5_v6',
'HLT_RsqMR40_Rsq0p04_v6',
'HLT_RsqMR45_Rsq0p09_v5',
'HLT_RsqMR55_Rsq0p09_MR150_v6',
'HLT_RsqMR60_Rsq0p09_MR150_v6',
'HLT_RsqMR65_Rsq0p09_MR150_v5',
'HLT_SingleForJet15_v4',
'HLT_SingleForJet25_v4',
'HLT_SixJet35_v6',
'HLT_SixJet45_v6',
'HLT_SixJet50_v6',
'HLT_Tau2Mu_ItTrack_v7',
'HLT_TripleEle10_CaloIdL_TrkIdVL_v18',
'HLT_TripleMu5_v19',
'HLT_ZeroBiasPixel_DoubleTrack_v2',
'HLT_ZeroBias_v7') ),
OnlineHltMonitor = cms.vstring( 'HLT_DiJet80_DiJet60_DiJet20_v6',
'HLT_DiPFJetAve140_v10',
'HLT_DiPFJetAve200_v10',
'HLT_DiPFJetAve260_v10',
'HLT_DiPFJetAve320_v10',
'HLT_DiPFJetAve400_v10',
'HLT_DiPFJetAve40_v9',
'HLT_DiPFJetAve80_v10',
'HLT_Ele22_CaloIdL_CaloIsoVL_v6',
'HLT_Ele27_CaloIdL_CaloIsoVL_TrkIdVL_TrkIsoVL_v11',
'HLT_Ele27_WP80_PFMET_MT50_v7',
'HLT_Ele27_WP80_v11',
'HLT_Ele30_CaloIdVT_TrkIdT_v6',
'HLT_Ele32_CaloIdL_CaloIsoVL_TrkIdVL_TrkIsoVL_v11',
'HLT_Ele80_CaloIdVT_GsfTrkIdT_v2',
'HLT_Ele90_CaloIdVT_GsfTrkIdT_v2',
'HLT_IsoMu20_eta2p1_v7',
'HLT_IsoMu24_eta2p1_v15',
'HLT_IsoMu30_eta2p1_v15',
'HLT_IsoMu34_eta2p1_v13',
'HLT_IsoMu40_eta2p1_v10',
'HLT_Jet370_NoJetID_v15',
'HLT_Mu12_v18',
'HLT_Mu15_eta2p1_v5',
'HLT_Mu17_v5',
'HLT_Mu24_eta2p1_v5',
'HLT_Mu30_eta2p1_v5',
'HLT_Mu40_eta2p1_Track50_dEdx3p6_v5',
'HLT_Mu40_eta2p1_Track60_dEdx3p7_v5',
'HLT_Mu40_eta2p1_v11',
'HLT_Mu5_v20',
'HLT_Mu8_v18',
'HLT_PFJet140_v9',
'HLT_PFJet200_v9',
'HLT_PFJet260_v9',
'HLT_PFJet320_v9',
'HLT_PFJet400_v9',
'HLT_PFJet40_v8',
'HLT_PFJet80_v9',
'HLT_RelIso1p0Mu20_v3',
'HLT_RelIso1p0Mu5_v6',
'HLT_SingleForJet15_v4',
'HLT_SingleForJet25_v4' ),
OnlineMonitor = ( cms.vstring( 'HLT_Activity_Ecal_SC7_v13',
'HLT_BTagMu_DiJet110_Mu5_v6',
'HLT_BTagMu_DiJet20_Mu5_v6',
'HLT_BTagMu_DiJet40_Mu5_v6',
'HLT_BTagMu_DiJet70_Mu5_v6',
'HLT_BTagMu_Jet300_Mu5_v6',
'HLT_BeamGas_HF_Beam1_v5',
'HLT_BeamGas_HF_Beam2_v5',
'HLT_BeamHalo_v13',
'HLT_CleanPFNoPUHT300_Ele15_CaloIdT_CaloIsoVL_TrkIdT_TrkIsoVL_PFMET45_v3',
'HLT_CleanPFNoPUHT300_Ele15_CaloIdT_CaloIsoVL_TrkIdT_TrkIsoVL_PFMET50_v3',
'HLT_CleanPFNoPUHT300_Ele40_CaloIdVT_TrkIdT_v3',
'HLT_CleanPFNoPUHT300_Ele60_CaloIdVT_TrkIdT_v3',
'HLT_CleanPFNoPUHT350_Ele5_CaloIdT_CaloIsoVL_TrkIdT_TrkIsoVL_PFMET45_v3',
'HLT_CleanPFNoPUHT350_Ele5_CaloIdT_CaloIsoVL_TrkIdT_TrkIsoVL_PFMET50_v3',
'HLT_DTErrors_v3',
'HLT_DiCentralJetSumpT100_dPhi05_DiCentralPFJet60_25_PFMET100_HBHENoiseCleaned_v5',
'HLT_DiCentralPFJet30_PFMET80_BTagCSV07_v5',
'HLT_DiCentralPFJet30_PFMET80_v6',
'HLT_DiCentralPFNoPUJet50_PFMETORPFMETNoMu80_v4',
'HLT_DiJet40Eta2p6_BTagIP3DFastPV_v7',
'HLT_DiJet80Eta2p6_BTagIP3DFastPVLoose_v7',
'HLT_DiJet80_DiJet60_DiJet20_v6',
'HLT_DiPFJet40_PFMETnoMu65_MJJ600VBF_LeadingJets_v9',
'HLT_DiPFJet40_PFMETnoMu65_MJJ800VBF_AllJets_v9',
'HLT_DiPFJet80_DiPFJet30_BTagCSVd07d05_v5',
'HLT_DiPFJet80_DiPFJet30_BTagCSVd07d05d03_v5',
'HLT_DiPFJet80_DiPFJet30_BTagCSVd07d05d05_v5',
'HLT_DiPFJetAve140_v10',
'HLT_DiPFJetAve200_v10',
'HLT_DiPFJetAve260_v10',
'HLT_DiPFJetAve320_v10',
'HLT_DiPFJetAve400_v10',
'HLT_DiPFJetAve40_v9',
'HLT_DiPFJetAve80_v10',
'HLT_Dimuon0_Jpsi_Muon_v18',
'HLT_Dimuon0_Jpsi_NoVertexing_v14',
'HLT_Dimuon0_Jpsi_v17',
'HLT_Dimuon0_PsiPrime_v6',
'HLT_Dimuon0_Upsilon_Muon_v18',
'HLT_Dimuon0_Upsilon_v17',
'HLT_Dimuon11_Upsilon_v6',
'HLT_Dimuon3p5_SameSign_v6',
'HLT_Dimuon7_Upsilon_v7',
'HLT_DisplacedPhoton65EBOnly_CaloIdVL_IsoL_PFMET30_v4',
'HLT_DisplacedPhoton65_CaloIdVL_IsoL_PFMET25_v4',
'HLT_DoubleDisplacedMu4_DiPFJet40Neutral_v8',
'HLT_DoubleEle10_CaloIdL_TrkIdVL_Ele10_CaloIdT_TrkIdVL_v12',
'HLT_DoubleEle14_CaloIdT_TrkIdVL_Mass8_PFMET40_v8',
'HLT_DoubleEle14_CaloIdT_TrkIdVL_Mass8_PFMET50_v8',
'HLT_DoubleEle33_CaloIdL_GsfTrkIdVL_v7',
'HLT_DoubleEle33_CaloIdL_v14',
'HLT_DoubleEle33_CaloIdT_v10',
'HLT_DoubleEle8_CaloIdT_TrkIdVL_Mass8_PFNoPUHT175_v4',
'HLT_DoubleEle8_CaloIdT_TrkIdVL_Mass8_PFNoPUHT225_v4',
'HLT_DoubleEle8_CaloIdT_TrkIdVL_v12',
'HLT_DoubleIsoL2Tau30_eta2p1_v1',
'HLT_DoubleJet20_ForwardBackward_v4',
'HLT_DoubleMediumIsoPFTau30_Trk1_eta2p1_Jet30_v5',
'HLT_DoubleMediumIsoPFTau30_Trk1_eta2p1_Reg_Jet30_v1',
'HLT_DoubleMediumIsoPFTau30_Trk1_eta2p1_Reg_v1',
'HLT_DoubleMediumIsoPFTau30_Trk1_eta2p1_v4',
'HLT_DoubleMediumIsoPFTau35_Trk1_eta2p1_Prong1_Reg_v1',
'HLT_DoubleMediumIsoPFTau35_Trk1_eta2p1_Prong1_v4',
'HLT_DoubleMu11_Acoplanarity03_v5',
'HLT_DoubleMu14_Mass8_PFMET40_v8',
'HLT_DoubleMu14_Mass8_PFMET50_v8',
'HLT_DoubleMu3_4_Dimuon5_Bs_Central_v5',
'HLT_DoubleMu3p5_4_Dimuon5_Bs_Central_v5',
'HLT_DoubleMu4_Acoplanarity03_v5',
'HLT_DoubleMu4_Dimuon7_Bs_Forward_v5',
'HLT_DoubleMu4_JpsiTk_Displaced_v6',
'HLT_DoubleMu4_Jpsi_Displaced_v12',
'HLT_DoubleMu5_Ele8_CaloIdT_TrkIdVL_v16',
'HLT_DoubleMu5_IsoMu5_v20',
'HLT_DoubleMu8_Ele8_CaloIdT_TrkIdVL_v5',
'HLT_DoubleMu8_Mass8_PFNoPUHT175_v4',
'HLT_DoubleMu8_Mass8_PFNoPUHT225_v4',
'HLT_DoublePhoton40_CaloIdL_Rsq0p035_v6',
'HLT_DoublePhoton40_CaloIdL_Rsq0p06_v6',
'HLT_DoublePhoton48_HEVT_v8',
'HLT_DoublePhoton53_HEVT_v2',
'HLT_DoublePhoton70_v6',
'HLT_DoublePhoton80_v7',
'HLT_DoubleRelIso1p0Mu5_Mass8_PFNoPUHT175_v4',
'HLT_DoubleRelIso1p0Mu5_Mass8_PFNoPUHT225_v4',
'HLT_EightJet30_eta3p0_v5',
'HLT_EightJet35_eta3p0_v5',
'HLT_Ele12_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_DoubleCentralJet65_v4',
'HLT_Ele12_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_RsqMR30_Rsq0p04_MR200_v4',
'HLT_Ele12_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_RsqMR40_Rsq0p04_MR200_v4',
'HLT_Ele13_eta2p1_WP90NoIso_LooseIsoPFTau20_L1ETM36_v1',
'HLT_Ele13_eta2p1_WP90Rho_LooseIsoPFTau20_L1ETM36_v1',
'HLT_Ele13_eta2p1_WP90Rho_LooseIsoPFTau20_v1',
'HLT_Ele15_Ele8_Ele5_CaloIdL_TrkIdVL_v6',
'HLT_Ele17_CaloIdL_CaloIsoVL_v17',
'HLT_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v19',
'HLT_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_Jet30_v7',
'HLT_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v6',
'HLT_Ele17_CaloIdVT_CaloIsoVT_TrkIdT_TrkIsoVT_Ele8_Mass50_v6',
'HLT_Ele20_CaloIdVT_CaloIsoVT_TrkIdT_TrkIsoVT_SC4_Mass50_v7',
'HLT_Ele22_CaloIdL_CaloIsoVL_v6',
'HLT_Ele22_eta2p1_WP90NoIso_LooseIsoPFTau20_v7',
'HLT_Ele22_eta2p1_WP90Rho_LooseIsoPFTau20_v7',
'HLT_Ele23_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_HFT30_v8',
'HLT_Ele24_WP80_CentralPFJet35_CentralPFJet25_PFMET20_v1',
'HLT_Ele24_WP80_CentralPFJet35_CentralPFJet25_v1',
'HLT_Ele24_WP80_PFJet30_PFJet25_Deta3_CentralPFJet30_v1',
'HLT_Ele24_WP80_PFJet30_PFJet25_Deta3_v1',
'HLT_Ele25_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_CentralPFNoPUJet30_BTagIPIter_v9',
'HLT_Ele25_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_CentralPFNoPUJet30_v8',
'HLT_Ele25_CaloIdVT_CaloIsoVL_TrkIdVL_TrkIsoT_DiCentralPFNoPUJet30_v2',
'HLT_Ele25_CaloIdVT_CaloIsoVL_TrkIdVL_TrkIsoT_TriCentralPFNoPUJet30_v4',
'HLT_Ele25_CaloIdVT_CaloIsoVL_TrkIdVL_TrkIsoT_TriCentralPFNoPUJet45_35_25_v2',
'HLT_Ele25_CaloIdVT_CaloIsoVL_TrkIdVL_TrkIsoT_TriCentralPFNoPUJet50_40_30_v4',
'HLT_Ele27_CaloIdL_CaloIsoVL_TrkIdVL_TrkIsoVL_v11',
'HLT_Ele27_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_Ele15_CaloIdT_CaloIsoVL_trackless_v8',
'HLT_Ele27_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_HFT15_v8',
'HLT_Ele27_WP80_CentralPFJet80_v9',
'HLT_Ele27_WP80_PFMET_MT50_v7',
'HLT_Ele27_WP80_WCandPt80_v9',
'HLT_Ele27_WP80_v11',
'HLT_Ele30_CaloIdVT_TrkIdT_PFNoPUJet100_PFNoPUJet25_v8',
'HLT_Ele30_CaloIdVT_TrkIdT_PFNoPUJet150_PFNoPUJet25_v8',
'HLT_Ele30_CaloIdVT_TrkIdT_v6',
'HLT_Ele32_CaloIdL_CaloIsoVL_TrkIdVL_TrkIsoVL_v11',
'HLT_Ele32_CaloIdT_CaloIsoT_TrkIdT_TrkIsoT_SC17_Mass50_v6',
'HLT_Ele5_SC5_Jpsi_Mass2to15_v4',
'HLT_Ele80_CaloIdVT_GsfTrkIdT_v2',
'HLT_Ele8_CaloIdL_CaloIsoVL_v17',
'HLT_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_Jet30_v7',
'HLT_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v15',
'HLT_Ele8_CaloIdT_TrkIdT_DiJet30_v18',
'HLT_Ele8_CaloIdT_TrkIdT_QuadJet30_v18',
'HLT_Ele8_CaloIdT_TrkIdT_TriJet30_v18',
'HLT_Ele8_CaloIdT_TrkIdVL_EG7_v2',
'HLT_Ele8_CaloIdT_TrkIdVL_Jet30_v7',
'HLT_Ele8_CaloIdT_TrkIdVL_v5',
'HLT_Ele90_CaloIdVT_GsfTrkIdT_v2',
'HLT_ExclDiJet35_HFAND_v4',
'HLT_ExclDiJet35_HFOR_v4',
'HLT_ExclDiJet80_HFAND_v4',
'HLT_FatDiPFJetMass750_DR1p1_Deta1p5_v10',
'HLT_GlobalRunHPDNoise_v8',
'HLT_HT200_v6',
'HLT_HT250_AlphaT0p55_v8',
'HLT_HT250_AlphaT0p57_v8',
'HLT_HT250_v7',
'HLT_HT300_AlphaT0p53_v8',
'HLT_HT300_AlphaT0p54_v14',
'HLT_HT300_DoubleDisplacedPFJet60_ChgFraction10_v10',
'HLT_HT300_DoubleDisplacedPFJet60_v10',
'HLT_HT300_SingleDisplacedPFJet60_ChgFraction10_v10',
'HLT_HT300_SingleDisplacedPFJet60_v10',
'HLT_HT300_v7',
'HLT_HT350_AlphaT0p52_v8',
'HLT_HT350_AlphaT0p53_v19',
'HLT_HT350_v7',
'HLT_HT400_AlphaT0p51_v19',
'HLT_HT400_AlphaT0p52_v14',
'HLT_HT400_v7',
'HLT_HT450_AlphaT0p51_v14',
'HLT_HT450_v7',
'HLT_HT500_v7',
'HLT_HT550_v7',
'HLT_HT650_Track50_dEdx3p6_v10',
'HLT_HT650_Track60_dEdx3p7_v10',
'HLT_HT650_v7',
'HLT_HT750_v7',
'HLT_HcalNZS_v10',
'HLT_HcalPhiSym_v11',
'HLT_HcalUTCA_v1',
'HLT_IsoMu12_DoubleCentralJet65_v4',
'HLT_IsoMu12_RsqMR30_Rsq0p04_MR200_v4',
'HLT_IsoMu12_RsqMR40_Rsq0p04_MR200_v4',
'HLT_IsoMu15_eta2p1_L1ETM20_v7',
'HLT_IsoMu15_eta2p1_LooseIsoPFTau35_Trk20_Prong1_L1ETM20_v10',
'HLT_IsoMu17_eta2p1_CentralPFNoPUJet30_BTagIPIter_v4',
'HLT_IsoMu17_eta2p1_CentralPFNoPUJet30_v4',
'HLT_IsoMu17_eta2p1_DiCentralPFNoPUJet30_PFNoPUHT350_PFMHT40_v3',
'HLT_IsoMu17_eta2p1_DiCentralPFNoPUJet30_v4',
'HLT_IsoMu17_eta2p1_LooseIsoPFTau20_v7',
'HLT_IsoMu17_eta2p1_TriCentralPFNoPUJet30_v4',
'HLT_IsoMu17_eta2p1_TriCentralPFNoPUJet45_35_25_v2',
'HLT_IsoMu18_CentralPFJet30_CentralPFJet25_PFMET20_v1',
'HLT_IsoMu18_CentralPFJet30_CentralPFJet25_v1',
'HLT_IsoMu18_PFJet30_PFJet25_Deta3_CentralPFJet25_v1',
'HLT_IsoMu18_PFJet30_PFJet25_Deta3_v1',
'HLT_IsoMu18_eta2p1_MediumIsoPFTau25_Trk1_eta2p1_Reg_v1',
'HLT_IsoMu18_eta2p1_MediumIsoPFTau25_Trk1_eta2p1_v4',
'HLT_IsoMu20_WCandPt80_v4',
'HLT_IsoMu20_eta2p1_CentralPFJet80_v9',
'HLT_IsoMu20_eta2p1_v7',
'HLT_IsoMu24_eta2p1_v15',
'HLT_IsoMu24_v17',
'HLT_IsoMu30_eta2p1_v15',
'HLT_IsoMu30_v11',
'HLT_IsoMu34_eta2p1_v13',
'HLT_IsoMu40_eta2p1_v10',
'HLT_IsoMu8_eta2p1_LooseIsoPFTau20_L1ETM26_v1',
'HLT_IsoMu8_eta2p1_LooseIsoPFTau20_v1',
'HLT_IsoTrackHB_v14',
'HLT_IsoTrackHE_v15',
'HLT_Jet160Eta2p4_Jet120Eta2p4_DiBTagIP3DFastPVLoose_v7',
'HLT_Jet370_NoJetID_v15',
'HLT_Jet60Eta1p7_Jet53Eta1p7_DiBTagIP3DFastPV_v7',
'HLT_Jet80Eta1p7_Jet70Eta1p7_DiBTagIP3DFastPV_v7',
'HLT_JetE30_NoBPTX3BX_NoHalo_v16',
'HLT_JetE30_NoBPTX_v14',
'HLT_JetE50_NoBPTX3BX_NoHalo_v13',
'HLT_JetE70_NoBPTX3BX_NoHalo_v5',
'HLT_L1DoubleEG3_FwdVeto_v2',
'HLT_L1DoubleJet36Central_v7',
'HLT_L1ETM100_v2',
'HLT_L1ETM30_v2',
'HLT_L1ETM40_v2',
'HLT_L1ETM70_v2',
'HLT_L1SingleEG12_v6',
'HLT_L1SingleEG5_v6',
'HLT_L1SingleJet16_v7',
'HLT_L1SingleJet36_v7',
'HLT_L1SingleMu12_v2',
'HLT_L1SingleMuOpen_AntiBPTX_v7',
'HLT_L1SingleMuOpen_v7',
'HLT_L1Tech_HBHEHO_totalOR_v6',
'HLT_L1Tech_HCAL_HF_single_channel_v4',
'HLT_L1TrackerCosmics_v7',
'HLT_L2DoubleMu23_NoVertex_2Cha_Angle2p5_v3',
'HLT_L2DoubleMu23_NoVertex_v11',
'HLT_L2DoubleMu38_NoVertex_2Cha_Angle2p5_v3',
'HLT_L2Mu10_NoVertex_NoBPTX3BX_NoHalo_v4',
'HLT_L2Mu20_NoVertex_2Cha_NoBPTX3BX_NoHalo_v1',
'HLT_L2Mu20_eta2p1_NoVertex_v2',
'HLT_L2Mu30_NoVertex_2Cha_NoBPTX3BX_NoHalo_v1',
'HLT_L2Mu70_2Cha_eta2p1_PFMET55_v2',
'HLT_L2Mu70_2Cha_eta2p1_PFMET60_v2',
'HLT_L2TripleMu10_0_0_NoVertex_PFJet40Neutral_v8',
'HLT_LooseIsoPFTau35_Trk20_Prong1_MET70_v10',
'HLT_LooseIsoPFTau35_Trk20_Prong1_MET75_v10',
'HLT_LooseIsoPFTau35_Trk20_Prong1_v10',
'HLT_MET120_HBHENoiseCleaned_v6',
'HLT_MET120_v13',
'HLT_MET200_HBHENoiseCleaned_v5',
'HLT_MET200_v12',
'HLT_MET300_HBHENoiseCleaned_v5',
'HLT_MET300_v4',
'HLT_MET400_HBHENoiseCleaned_v5',
'HLT_MET400_v7',
'HLT_MET80_Track50_dEdx3p6_v6',
'HLT_MET80_Track60_dEdx3p7_v6',
'HLT_MET80_v5',
'HLT_MonoCentralPFJet80_PFMETnoMu105_NHEF0p95_v4',
'HLT_Mu12_eta2p1_DiCentral_20_v8',
'HLT_Mu12_eta2p1_DiCentral_40_20_BTagIP3D1stTrack_v8',
'HLT_Mu12_eta2p1_DiCentral_40_20_DiBTagIP3D1stTrack_v8')+cms.vstring( 'HLT_Mu12_eta2p1_DiCentral_40_20_v8',
'HLT_Mu12_eta2p1_L1Mu10erJetC12WdEtaPhi1DiJetsC_v7',
'HLT_Mu12_v18',
'HLT_Mu13_Mu8_NoDZ_v1',
'HLT_Mu14_Ele14_CaloIdT_TrkIdVL_Mass8_PFMET40_v8',
'HLT_Mu14_Ele14_CaloIdT_TrkIdVL_Mass8_PFMET50_v8',
'HLT_Mu15_eta2p1_DiCentral_20_v1',
'HLT_Mu15_eta2p1_DiCentral_40_20_v1',
'HLT_Mu15_eta2p1_L1ETM20_v5',
'HLT_Mu15_eta2p1_L1Mu10erJetC12WdEtaPhi1DiJetsC_v3',
'HLT_Mu15_eta2p1_TriCentral_40_20_20_BTagIP3D1stTrack_v8',
'HLT_Mu15_eta2p1_TriCentral_40_20_20_DiBTagIP3D1stTrack_v8',
'HLT_Mu15_eta2p1_TriCentral_40_20_20_v8',
'HLT_Mu15_eta2p1_v5',
'HLT_Mu17_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v9',
'HLT_Mu17_Mu8_v22',
'HLT_Mu17_TkMu8_NoDZ_v1',
'HLT_Mu17_TkMu8_v14',
'HLT_Mu17_eta2p1_CentralPFNoPUJet30_BTagIPIter_v4',
'HLT_Mu17_eta2p1_LooseIsoPFTau20_v7',
'HLT_Mu17_eta2p1_TriCentralPFNoPUJet45_35_25_v2',
'HLT_Mu17_v5',
'HLT_Mu18_CentralPFJet30_CentralPFJet25_v1',
'HLT_Mu18_PFJet30_PFJet25_Deta3_CentralPFJet25_v1',
'HLT_Mu22_Photon22_CaloIdL_v7',
'HLT_Mu22_TkMu22_v9',
'HLT_Mu22_TkMu8_v9',
'HLT_Mu24_eta2p1_v5',
'HLT_Mu24_v16',
'HLT_Mu30_Ele30_CaloIdL_v8',
'HLT_Mu30_eta2p1_v5',
'HLT_Mu30_v16',
'HLT_Mu40_PFNoPUHT350_v4',
'HLT_Mu40_eta2p1_Track50_dEdx3p6_v5',
'HLT_Mu40_eta2p1_Track60_dEdx3p7_v5',
'HLT_Mu40_eta2p1_v11',
'HLT_Mu40_v14',
'HLT_Mu50_eta2p1_v8',
'HLT_Mu5_L2Mu3_Jpsi_v8',
'HLT_Mu5_Track2_Jpsi_v21',
'HLT_Mu5_Track3p5_Jpsi_v7',
'HLT_Mu5_v20',
'HLT_Mu60_PFNoPUHT350_v4',
'HLT_Mu7_Ele7_CaloIdT_CaloIsoVL_v7',
'HLT_Mu7_Track7_Jpsi_v20',
'HLT_Mu8_DiJet30_v7',
'HLT_Mu8_DoubleEle8_CaloIdT_TrkIdVL_v7',
'HLT_Mu8_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v9',
'HLT_Mu8_Ele8_CaloIdT_TrkIdVL_Ele8_CaloIdL_TrkIdVL_v7',
'HLT_Mu8_Ele8_CaloIdT_TrkIdVL_Mass8_PFNoPUHT175_v4',
'HLT_Mu8_Ele8_CaloIdT_TrkIdVL_Mass8_PFNoPUHT225_v4',
'HLT_Mu8_QuadJet30_v7',
'HLT_Mu8_TriJet30_v7',
'HLT_Mu8_eta2p1_LooseIsoPFTau20_L1ETM26_v1',
'HLT_Mu8_v18',
'HLT_PFJet140_v9',
'HLT_PFJet200_v9',
'HLT_PFJet260_v9',
'HLT_PFJet320_v9',
'HLT_PFJet400_v9',
'HLT_PFJet40_v8',
'HLT_PFJet80_v9',
'HLT_PFMET150_v7',
'HLT_PFMET180_v7',
'HLT_PFNoPUHT350_Mu15_PFMET45_v4',
'HLT_PFNoPUHT350_Mu15_PFMET50_v4',
'HLT_PFNoPUHT350_PFMET100_v4',
'HLT_PFNoPUHT350_v4',
'HLT_PFNoPUHT400_Mu5_PFMET45_v4',
'HLT_PFNoPUHT400_Mu5_PFMET50_v4',
'HLT_PFNoPUHT400_PFMET100_v4',
'HLT_PFNoPUHT650_DiCentralPFNoPUJet80_CenPFNoPUJet40_v4',
'HLT_PFNoPUHT650_v4',
'HLT_PFNoPUHT700_v4',
'HLT_PFNoPUHT750_v4',
'HLT_Photon135_v7',
'HLT_Photon150_v4',
'HLT_Photon160_v4',
'HLT_Photon20_CaloIdVL_IsoL_v16',
'HLT_Photon20_CaloIdVL_v4',
'HLT_Photon22_R9Id90_HE10_Iso40_EBOnly_v5',
'HLT_Photon26_Photon18_v12',
'HLT_Photon26_R9Id85_OR_CaloId10_Iso50_Photon18_R9Id85_OR_CaloId10_Iso50_Mass70_v2',
'HLT_Photon26_R9Id85_OR_CaloId10_Iso50_Photon18_v5',
'HLT_Photon300_NoHE_v5',
'HLT_Photon30_CaloIdVL_v14',
'HLT_Photon36_CaloId10_Iso50_Photon22_CaloId10_Iso50_v6',
'HLT_Photon36_CaloId10_Iso50_Photon22_R9Id85_v6',
'HLT_Photon36_Photon22_v6',
'HLT_Photon36_R9Id85_OR_CaloId10_Iso50_Photon10_R9Id85_OR_CaloId10_Iso50_Mass80_v1',
'HLT_Photon36_R9Id85_OR_CaloId10_Iso50_Photon22_R9Id85_OR_CaloId10_Iso50_v6',
'HLT_Photon36_R9Id85_OR_CaloId10_Iso50_Photon22_v5',
'HLT_Photon36_R9Id85_Photon22_CaloId10_Iso50_v6',
'HLT_Photon36_R9Id85_Photon22_R9Id85_v4',
'HLT_Photon36_R9Id90_HE10_Iso40_EBOnly_v5',
'HLT_Photon40_CaloIdL_RsqMR40_Rsq0p09_MR150_v6',
'HLT_Photon40_CaloIdL_RsqMR45_Rsq0p09_MR150_v6',
'HLT_Photon40_CaloIdL_RsqMR50_Rsq0p09_MR150_v6',
'HLT_Photon50_CaloIdVL_IsoL_v17',
'HLT_Photon50_CaloIdVL_v10',
'HLT_Photon50_R9Id90_HE10_Iso40_EBOnly_v5',
'HLT_Photon60_CaloIdL_HT300_v4',
'HLT_Photon60_CaloIdL_MHT70_v11',
'HLT_Photon70_CaloIdXL_PFMET100_v7',
'HLT_Photon70_CaloIdXL_PFNoPUHT400_v4',
'HLT_Photon70_CaloIdXL_PFNoPUHT500_v4',
'HLT_Photon75_CaloIdVL_v13',
'HLT_Photon75_R9Id90_HE10_Iso40_EBOnly_v5',
'HLT_Photon90_CaloIdVL_v10',
'HLT_Photon90_R9Id90_HE10_Iso40_EBOnly_v5',
'HLT_Physics_v5',
'HLT_PixelTracks_Multiplicity70_v3',
'HLT_PixelTracks_Multiplicity80_v12',
'HLT_PixelTracks_Multiplicity90_v3',
'HLT_QuadJet60_DiJet20_v6',
'HLT_QuadJet70_v6',
'HLT_QuadJet75_55_35_20_BTagIP_VBF_v7',
'HLT_QuadJet75_55_35_20_VBF_v1',
'HLT_QuadJet75_55_38_20_BTagIP_VBF_v7',
'HLT_QuadJet80_v6',
'HLT_QuadJet90_v6',
'HLT_QuadPFJet78_61_44_31_BTagCSV_VBF_v6',
'HLT_QuadPFJet78_61_44_31_VBF_v1',
'HLT_QuadPFJet82_65_48_35_BTagCSV_VBF_v6',
'HLT_Random_v2',
'HLT_RelIso1p0Mu20_v3',
'HLT_RelIso1p0Mu5_Ele8_CaloIdT_TrkIdVL_Mass8_PFNoPUHT175_v4',
'HLT_RelIso1p0Mu5_Ele8_CaloIdT_TrkIdVL_Mass8_PFNoPUHT225_v4',
'HLT_RelIso1p0Mu5_v6',
'HLT_RsqMR40_Rsq0p04_v6',
'HLT_RsqMR55_Rsq0p09_MR150_v6',
'HLT_RsqMR60_Rsq0p09_MR150_v6',
'HLT_RsqMR65_Rsq0p09_MR150_v5',
'HLT_SingleForJet15_v4',
'HLT_SingleForJet25_v4',
'HLT_SixJet35_v6',
'HLT_SixJet45_v6',
'HLT_SixJet50_v6',
'HLT_Tau2Mu_ItTrack_v7',
'HLT_TripleEle10_CaloIdL_TrkIdVL_v18',
'HLT_TripleMu5_v19',
'HLT_ZeroBiasPixel_DoubleTrack_v2',
'HLT_ZeroBias_v7') ),
ParkingMonitor = cms.vstring( 'HLT_BTagMu_Jet20_Mu4_v2',
'HLT_BTagMu_Jet60_Mu4_v2',
'HLT_DiJet20_MJJ650_AllJets_DEta3p5_HT120_VBF_v1',
'HLT_DiJet30_MJJ700_AllJets_DEta3p5_VBF_v1',
'HLT_DiJet35_MJJ650_AllJets_DEta3p5_VBF_v5',
'HLT_DiJet35_MJJ700_AllJets_DEta3p5_VBF_v5',
'HLT_DiJet35_MJJ750_AllJets_DEta3p5_VBF_v5',
'HLT_Dimuon10_Jpsi_v6',
'HLT_Dimuon5_PsiPrime_v6',
'HLT_Dimuon5_Upsilon_v6',
'HLT_Dimuon7_PsiPrime_v3',
'HLT_Dimuon8_Jpsi_v7',
'HLT_Dimuon8_Upsilon_v6',
'HLT_DoubleMediumIsoPFTau35_Trk1_eta2p1_Reg_v1',
'HLT_DoubleMediumIsoPFTau35_Trk1_eta2p1_v4',
'HLT_DoubleMu3p5_LowMassNonResonant_Displaced_v6',
'HLT_DoubleMu3p5_LowMass_Displaced_v6',
'HLT_HT200_AlphaT0p57_v8',
'HLT_MET100_HBHENoiseCleaned_v1',
'HLT_MET80_Parked_v5',
'HLT_Mu13_Mu8_v22',
'HLT_Mu15_TkMu5_Onia_v1',
'HLT_Photon30_R9Id90_CaloId_HE10_Iso40_EBOnly_Met25_HBHENoiseCleaned_v1',
'HLT_Photon30_R9Id90_CaloId_HE10_Iso40_EBOnly_v1',
'HLT_Photon30_v1',
'HLT_Physics_Parked_v1',
'HLT_QuadJet45_v1',
'HLT_QuadJet50_v5',
'HLT_RsqMR45_Rsq0p09_v5',
'HLT_ZeroBias_Parked_v1' ),
PhotonHad = cms.vstring( 'HLT_Photon40_CaloIdL_RsqMR40_Rsq0p09_MR150_v6',
'HLT_Photon40_CaloIdL_RsqMR45_Rsq0p09_MR150_v6',
'HLT_Photon40_CaloIdL_RsqMR50_Rsq0p09_MR150_v6',
'HLT_Photon60_CaloIdL_HT300_v4',
'HLT_Photon60_CaloIdL_MHT70_v11',
'HLT_Photon70_CaloIdXL_PFMET100_v7',
'HLT_Photon70_CaloIdXL_PFNoPUHT400_v4',
'HLT_Photon70_CaloIdXL_PFNoPUHT500_v4' ),
RPCMonitor = cms.vstring( 'AlCa_RPCMuonNoHits_v9',
'AlCa_RPCMuonNoTriggers_v9',
'AlCa_RPCMuonNormalisation_v9' ),
SingleElectron = cms.vstring( 'HLT_Ele22_CaloIdL_CaloIsoVL_v6',
'HLT_Ele24_WP80_CentralPFJet35_CentralPFJet25_PFMET20_v1',
'HLT_Ele24_WP80_CentralPFJet35_CentralPFJet25_v1',
'HLT_Ele24_WP80_PFJet30_PFJet25_Deta3_CentralPFJet30_v1',
'HLT_Ele24_WP80_PFJet30_PFJet25_Deta3_v1',
'HLT_Ele25_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_CentralPFNoPUJet30_BTagIPIter_v9',
'HLT_Ele25_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_CentralPFNoPUJet30_v8',
'HLT_Ele25_CaloIdVT_CaloIsoVL_TrkIdVL_TrkIsoT_DiCentralPFNoPUJet30_v2',
'HLT_Ele25_CaloIdVT_CaloIsoVL_TrkIdVL_TrkIsoT_TriCentralPFNoPUJet30_v4',
'HLT_Ele25_CaloIdVT_CaloIsoVL_TrkIdVL_TrkIsoT_TriCentralPFNoPUJet45_35_25_v2',
'HLT_Ele25_CaloIdVT_CaloIsoVL_TrkIdVL_TrkIsoT_TriCentralPFNoPUJet50_40_30_v4',
'HLT_Ele27_CaloIdL_CaloIsoVL_TrkIdVL_TrkIsoVL_v11',
'HLT_Ele27_WP80_CentralPFJet80_v9',
'HLT_Ele27_WP80_PFMET_MT50_v7',
'HLT_Ele27_WP80_WCandPt80_v9',
'HLT_Ele27_WP80_v11',
'HLT_Ele30_CaloIdVT_TrkIdT_v6',
'HLT_Ele32_CaloIdL_CaloIsoVL_TrkIdVL_TrkIsoVL_v11',
'HLT_Ele80_CaloIdVT_GsfTrkIdT_v2',
'HLT_Ele90_CaloIdVT_GsfTrkIdT_v2' ),
SingleMu = cms.vstring( 'HLT_IsoMu17_eta2p1_CentralPFNoPUJet30_BTagIPIter_v4',
'HLT_IsoMu17_eta2p1_CentralPFNoPUJet30_v4',
'HLT_IsoMu17_eta2p1_DiCentralPFNoPUJet30_v4',
'HLT_IsoMu17_eta2p1_TriCentralPFNoPUJet30_v4',
'HLT_IsoMu17_eta2p1_TriCentralPFNoPUJet45_35_25_v2',
'HLT_IsoMu18_CentralPFJet30_CentralPFJet25_PFMET20_v1',
'HLT_IsoMu18_CentralPFJet30_CentralPFJet25_v1',
'HLT_IsoMu18_PFJet30_PFJet25_Deta3_CentralPFJet25_v1',
'HLT_IsoMu18_PFJet30_PFJet25_Deta3_v1',
'HLT_IsoMu20_WCandPt80_v4',
'HLT_IsoMu20_eta2p1_CentralPFJet80_v9',
'HLT_IsoMu20_eta2p1_v7',
'HLT_IsoMu24_eta2p1_v15',
'HLT_IsoMu24_v17',
'HLT_IsoMu30_eta2p1_v15',
'HLT_IsoMu30_v11',
'HLT_IsoMu34_eta2p1_v13',
'HLT_IsoMu40_eta2p1_v10',
'HLT_L2Mu70_2Cha_eta2p1_PFMET55_v2',
'HLT_L2Mu70_2Cha_eta2p1_PFMET60_v2',
'HLT_Mu12_eta2p1_DiCentral_20_v8',
'HLT_Mu12_eta2p1_DiCentral_40_20_BTagIP3D1stTrack_v8',
'HLT_Mu12_eta2p1_DiCentral_40_20_DiBTagIP3D1stTrack_v8',
'HLT_Mu12_eta2p1_DiCentral_40_20_v8',
'HLT_Mu12_eta2p1_L1Mu10erJetC12WdEtaPhi1DiJetsC_v7',
'HLT_Mu12_v18',
'HLT_Mu15_eta2p1_DiCentral_20_v1',
'HLT_Mu15_eta2p1_DiCentral_40_20_v1',
'HLT_Mu15_eta2p1_L1Mu10erJetC12WdEtaPhi1DiJetsC_v3',
'HLT_Mu15_eta2p1_TriCentral_40_20_20_BTagIP3D1stTrack_v8',
'HLT_Mu15_eta2p1_TriCentral_40_20_20_DiBTagIP3D1stTrack_v8',
'HLT_Mu15_eta2p1_TriCentral_40_20_20_v8',
'HLT_Mu15_eta2p1_v5',
'HLT_Mu17_eta2p1_CentralPFNoPUJet30_BTagIPIter_v4',
'HLT_Mu17_eta2p1_TriCentralPFNoPUJet45_35_25_v2',
'HLT_Mu18_CentralPFJet30_CentralPFJet25_v1',
'HLT_Mu18_PFJet30_PFJet25_Deta3_CentralPFJet25_v1',
'HLT_Mu24_eta2p1_v5',
'HLT_Mu24_v16',
'HLT_Mu30_eta2p1_v5',
'HLT_Mu30_v16',
'HLT_Mu40_eta2p1_Track50_dEdx3p6_v5',
'HLT_Mu40_eta2p1_Track60_dEdx3p7_v5',
'HLT_Mu40_eta2p1_v11',
'HLT_Mu40_v14',
'HLT_Mu50_eta2p1_v8',
'HLT_Mu5_v20',
'HLT_RelIso1p0Mu20_v3',
'HLT_RelIso1p0Mu5_v6' ),
SinglePhoton = cms.vstring( 'HLT_DisplacedPhoton65EBOnly_CaloIdVL_IsoL_PFMET30_v4',
'HLT_DisplacedPhoton65_CaloIdVL_IsoL_PFMET25_v4',
'HLT_L1DoubleEG3_FwdVeto_v2',
'HLT_Photon135_v7',
'HLT_Photon150_v4',
'HLT_Photon160_v4',
'HLT_Photon20_CaloIdVL_IsoL_v16',
'HLT_Photon20_CaloIdVL_v4',
'HLT_Photon300_NoHE_v5',
'HLT_Photon30_CaloIdVL_v14',
'HLT_Photon50_CaloIdVL_IsoL_v17',
'HLT_Photon50_CaloIdVL_v10',
'HLT_Photon75_CaloIdVL_v13',
'HLT_Photon90_CaloIdVL_v10' ),
SinglePhotonParked = cms.vstring( 'HLT_DisplacedPhoton65EBOnly_CaloIdVL_IsoL_PFMET30_v4',
'HLT_DisplacedPhoton65_CaloIdVL_IsoL_PFMET25_v4',
'HLT_L1DoubleEG3_FwdVeto_v2',
'HLT_Photon135_v7',
'HLT_Photon150_v4',
'HLT_Photon160_v4',
'HLT_Photon20_CaloIdVL_IsoL_v16',
'HLT_Photon20_CaloIdVL_v4',
'HLT_Photon300_NoHE_v5',
'HLT_Photon30_CaloIdVL_v14',
'HLT_Photon30_R9Id90_CaloId_HE10_Iso40_EBOnly_Met25_HBHENoiseCleaned_v1',
'HLT_Photon30_R9Id90_CaloId_HE10_Iso40_EBOnly_v1',
'HLT_Photon30_v1',
'HLT_Photon50_CaloIdVL_IsoL_v17',
'HLT_Photon50_CaloIdVL_v10',
'HLT_Photon75_CaloIdVL_v13',
'HLT_Photon90_CaloIdVL_v10' ),
Tau = cms.vstring( 'HLT_DoubleIsoL2Tau30_eta2p1_v1',
'HLT_DoubleMediumIsoPFTau30_Trk1_eta2p1_Jet30_v5',
'HLT_DoubleMediumIsoPFTau30_Trk1_eta2p1_Reg_Jet30_v1',
'HLT_DoubleMediumIsoPFTau30_Trk1_eta2p1_Reg_v1',
'HLT_DoubleMediumIsoPFTau30_Trk1_eta2p1_v4',
'HLT_DoubleMediumIsoPFTau35_Trk1_eta2p1_Prong1_Reg_v1',
'HLT_DoubleMediumIsoPFTau35_Trk1_eta2p1_Prong1_v4',
'HLT_LooseIsoPFTau35_Trk20_Prong1_MET70_v10',
'HLT_LooseIsoPFTau35_Trk20_Prong1_MET75_v10',
'HLT_LooseIsoPFTau35_Trk20_Prong1_v10' ),
TauParked = cms.vstring( 'HLT_DoubleIsoL2Tau30_eta2p1_v1',
'HLT_DoubleMediumIsoPFTau30_Trk1_eta2p1_Jet30_v5',
'HLT_DoubleMediumIsoPFTau30_Trk1_eta2p1_Reg_Jet30_v1',
'HLT_DoubleMediumIsoPFTau30_Trk1_eta2p1_Reg_v1',
'HLT_DoubleMediumIsoPFTau30_Trk1_eta2p1_v4',
'HLT_DoubleMediumIsoPFTau35_Trk1_eta2p1_Prong1_Reg_v1',
'HLT_DoubleMediumIsoPFTau35_Trk1_eta2p1_Prong1_v4',
'HLT_DoubleMediumIsoPFTau35_Trk1_eta2p1_Reg_v1',
'HLT_DoubleMediumIsoPFTau35_Trk1_eta2p1_v4',
'HLT_LooseIsoPFTau35_Trk20_Prong1_MET70_v10',
'HLT_LooseIsoPFTau35_Trk20_Prong1_MET75_v10',
'HLT_LooseIsoPFTau35_Trk20_Prong1_v10' ),
TauPlusX = cms.vstring( 'HLT_Ele13_eta2p1_WP90NoIso_LooseIsoPFTau20_L1ETM36_v1',
'HLT_Ele13_eta2p1_WP90Rho_LooseIsoPFTau20_L1ETM36_v1',
'HLT_Ele13_eta2p1_WP90Rho_LooseIsoPFTau20_v1',
'HLT_Ele22_eta2p1_WP90NoIso_LooseIsoPFTau20_v7',
'HLT_Ele22_eta2p1_WP90Rho_LooseIsoPFTau20_v7',
'HLT_IsoMu15_eta2p1_L1ETM20_v7',
'HLT_IsoMu15_eta2p1_LooseIsoPFTau35_Trk20_Prong1_L1ETM20_v10',
'HLT_IsoMu17_eta2p1_LooseIsoPFTau20_v7',
'HLT_IsoMu18_eta2p1_MediumIsoPFTau25_Trk1_eta2p1_Reg_v1',
'HLT_IsoMu18_eta2p1_MediumIsoPFTau25_Trk1_eta2p1_v4',
'HLT_IsoMu8_eta2p1_LooseIsoPFTau20_L1ETM26_v1',
'HLT_IsoMu8_eta2p1_LooseIsoPFTau20_v1',
'HLT_Mu15_eta2p1_L1ETM20_v5',
'HLT_Mu17_eta2p1_LooseIsoPFTau20_v7',
'HLT_Mu8_eta2p1_LooseIsoPFTau20_L1ETM26_v1' ),
TestEnablesEcalHcalDT = cms.vstring( 'HLT_DTCalibration_v2',
'HLT_EcalCalibration_v3',
'HLT_HcalCalibration_v3' ),
TestEnablesTracker = cms.vstring( 'HLT_TrackerCalibration_v3' ),
VBF1Parked = cms.vstring( 'HLT_DiJet20_MJJ650_AllJets_DEta3p5_HT120_VBF_v1',
'HLT_DiJet30_MJJ700_AllJets_DEta3p5_VBF_v1',
'HLT_DiJet35_MJJ650_AllJets_DEta3p5_VBF_v5',
'HLT_DiJet35_MJJ700_AllJets_DEta3p5_VBF_v5',
'HLT_DiJet35_MJJ750_AllJets_DEta3p5_VBF_v5' ),
ZeroBiasParked = cms.vstring( 'HLT_ZeroBias_Parked_v1' )
)
process.GlobalTag = cms.ESSource( "PoolDBESSource",
globaltag = cms.string( "GR_H_V32::All" ),
RefreshEachRun = cms.untracked.bool( True ),
RefreshOpenIOVs = cms.untracked.bool( False ),
toGet = cms.VPSet(
),
DBParameters = cms.PSet(
authenticationPath = cms.untracked.string( "." ),
connectionRetrialTimeOut = cms.untracked.int32( 60 ),
idleConnectionCleanupPeriod = cms.untracked.int32( 10 ),
messageLevel = cms.untracked.int32( 0 ),
enablePoolAutomaticCleanUp = cms.untracked.bool( False ),
enableConnectionSharing = cms.untracked.bool( True ),
enableReadOnlySessionOnUpdateConnection = cms.untracked.bool( False ),
connectionTimeOut = cms.untracked.int32( 0 ),
connectionRetrialPeriod = cms.untracked.int32( 10 )
),
RefreshAlways = cms.untracked.bool( False ),
connect = cms.string( "frontier://(proxyurl=http://localhost:3128)(serverurl=http://localhost:8000/FrontierOnProd)(serverurl=http://localhost:8000/FrontierOnProd)(retrieve-ziplevel=0)/CMS_COND_31X_GLOBALTAG" ),
ReconnectEachRun = cms.untracked.bool( True ),
BlobStreamerName = cms.untracked.string( "TBufferBlobStreamingService" ),
timetype = cms.string( "runnumber" )
)
process.HepPDTESSource = cms.ESSource( "HepPDTESSource",
pdtFileName = cms.FileInPath( "SimGeneral/HepPDTESSource/data/pythiaparticle.tbl" )
)
process.eegeom = cms.ESSource( "EmptyESSource",
iovIsRunNotTime = cms.bool( True ),
recordName = cms.string( "EcalMappingRcd" ),
firstValid = cms.vuint32( 1 )
)
process.es_hardcode = cms.ESSource( "HcalHardcodeCalibrations",
fromDDD = cms.untracked.bool( False ),
toGet = cms.untracked.vstring( 'GainWidths' )
)
process.hltESSBTagRecord = cms.ESSource( "EmptyESSource",
iovIsRunNotTime = cms.bool( True ),
recordName = cms.string( "JetTagComputerRecord" ),
firstValid = cms.vuint32( 1 )
)
process.hltESSEcalSeverityLevel = cms.ESSource( "EmptyESSource",
iovIsRunNotTime = cms.bool( True ),
recordName = cms.string( "EcalSeverityLevelAlgoRcd" ),
firstValid = cms.vuint32( 1 )
)
process.hltESSHcalSeverityLevel = cms.ESSource( "EmptyESSource",
iovIsRunNotTime = cms.bool( True ),
recordName = cms.string( "HcalSeverityLevelComputerRcd" ),
firstValid = cms.vuint32( 1 )
)
process.magfield = cms.ESSource( "XMLIdealGeometryESSource",
geomXMLFiles = cms.vstring( 'Geometry/CMSCommonData/data/normal/cmsextent.xml',
'Geometry/CMSCommonData/data/cms.xml',
'Geometry/CMSCommonData/data/cmsMagneticField.xml',
'MagneticField/GeomBuilder/data/MagneticFieldVolumes_1103l.xml',
'MagneticField/GeomBuilder/data/MagneticFieldParameters_07_2pi.xml' ),
rootNodeName = cms.string( "cmsMagneticField:MAGF" )
)
process.hltIter4ESPTrajectoryBuilderITReg = cms.ESProducer( "CkfTrajectoryBuilderESProducer",
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
trajectoryFilterName = cms.string( "hltIter4ESPTrajectoryFilterIT" ),
maxCand = cms.int32( 1 ),
ComponentName = cms.string( "hltIter4ESPTrajectoryBuilderITReg" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
MeasurementTrackerName = cms.string( "hltIter4ESPMeasurementTrackerReg" ),
estimator = cms.string( "hltESPChi2MeasurementEstimator16" ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( False ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 )
)
process.hltIter4ESPMeasurementTrackerReg = cms.ESProducer( "MeasurementTrackerESProducer",
StripCPE = cms.string( "StripCPEfromTrackAngle" ),
inactivePixelDetectorLabels = cms.VInputTag( ),
PixelCPE = cms.string( "hltESPPixelCPEGeneric" ),
stripLazyGetterProducer = cms.string( "hltSiStripRawToClustersFacility" ),
OnDemand = cms.bool( True ),
Regional = cms.bool( True ),
UsePixelModuleQualityDB = cms.bool( True ),
pixelClusterProducer = cms.string( "hltSiPixelClustersReg" ),
switchOffPixelsIfEmpty = cms.bool( True ),
inactiveStripDetectorLabels = cms.VInputTag( 'hltSiStripExcludedFEDListProducer' ),
MaskBadAPVFibers = cms.bool( True ),
UseStripStripQualityDB = cms.bool( True ),
UsePixelROCQualityDB = cms.bool( True ),
DebugPixelROCQualityDB = cms.untracked.bool( False ),
UseStripAPVFiberQualityDB = cms.bool( True ),
stripClusterProducer = cms.string( "hltIter4SiStripClustersReg" ),
DebugStripAPVFiberQualityDB = cms.untracked.bool( False ),
DebugStripStripQualityDB = cms.untracked.bool( False ),
SiStripQualityLabel = cms.string( "" ),
badStripCuts = cms.PSet(
TOB = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TID = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TEC = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TIB = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
)
),
DebugStripModuleQualityDB = cms.untracked.bool( False ),
ComponentName = cms.string( "hltIter4ESPMeasurementTrackerReg" ),
DebugPixelModuleQualityDB = cms.untracked.bool( False ),
HitMatcher = cms.string( "StandardMatcher" ),
skipClusters = cms.InputTag( "hltIter4ClustersRefRemovalReg" ),
UseStripModuleQualityDB = cms.bool( True ),
UseStripNoiseDB = cms.bool( False ),
UseStripCablingDB = cms.bool( False )
)
process.hltIter3ESPTrajectoryBuilderITReg = cms.ESProducer( "CkfTrajectoryBuilderESProducer",
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
trajectoryFilterName = cms.string( "hltIter3ESPTrajectoryFilterIT" ),
maxCand = cms.int32( 1 ),
ComponentName = cms.string( "hltIter3ESPTrajectoryBuilderITReg" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
MeasurementTrackerName = cms.string( "hltIter3ESPMeasurementTrackerReg" ),
estimator = cms.string( "hltESPChi2MeasurementEstimator16" ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( False ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 )
)
process.hltIter3ESPMeasurementTrackerReg = cms.ESProducer( "MeasurementTrackerESProducer",
StripCPE = cms.string( "StripCPEfromTrackAngle" ),
inactivePixelDetectorLabels = cms.VInputTag( ),
PixelCPE = cms.string( "hltESPPixelCPEGeneric" ),
stripLazyGetterProducer = cms.string( "hltSiStripRawToClustersFacility" ),
OnDemand = cms.bool( True ),
Regional = cms.bool( True ),
UsePixelModuleQualityDB = cms.bool( True ),
pixelClusterProducer = cms.string( "hltSiPixelClustersReg" ),
switchOffPixelsIfEmpty = cms.bool( True ),
inactiveStripDetectorLabels = cms.VInputTag( 'hltSiStripExcludedFEDListProducer' ),
MaskBadAPVFibers = cms.bool( True ),
UseStripStripQualityDB = cms.bool( True ),
UsePixelROCQualityDB = cms.bool( True ),
DebugPixelROCQualityDB = cms.untracked.bool( False ),
UseStripAPVFiberQualityDB = cms.bool( True ),
stripClusterProducer = cms.string( "hltIter3SiStripClustersReg" ),
DebugStripAPVFiberQualityDB = cms.untracked.bool( False ),
DebugStripStripQualityDB = cms.untracked.bool( False ),
SiStripQualityLabel = cms.string( "" ),
badStripCuts = cms.PSet(
TOB = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TID = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TEC = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TIB = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
)
),
DebugStripModuleQualityDB = cms.untracked.bool( False ),
ComponentName = cms.string( "hltIter3ESPMeasurementTrackerReg" ),
DebugPixelModuleQualityDB = cms.untracked.bool( False ),
HitMatcher = cms.string( "StandardMatcher" ),
skipClusters = cms.InputTag( "hltIter3ClustersRefRemovalReg" ),
UseStripModuleQualityDB = cms.bool( True ),
UseStripNoiseDB = cms.bool( False ),
UseStripCablingDB = cms.bool( False )
)
process.hltIter3ESPLayerTripletsReg = cms.ESProducer( "SeedingLayersESProducer",
layerList = cms.vstring( 'BPix1+BPix2+BPix3',
'BPix1+BPix2+FPix1_pos',
'BPix1+BPix2+FPix1_neg',
'BPix1+FPix1_pos+FPix2_pos',
'BPix1+FPix1_neg+FPix2_neg',
'BPix2+FPix1_pos+FPix2_pos',
'BPix2+FPix1_neg+FPix2_neg',
'FPix1_pos+FPix2_pos+TEC1_pos',
'FPix1_neg+FPix2_neg+TEC1_neg',
'FPix2_pos+TEC2_pos+TEC3_pos',
'FPix2_neg+TEC2_neg+TEC3_neg',
'BPix2+BPix3+TIB1',
'BPix2+BPix3+TIB2',
'BPix1+BPix3+TIB1',
'BPix1+BPix3+TIB2',
'BPix1+BPix2+TIB1',
'BPix1+BPix2+TIB2' ),
ComponentName = cms.string( "hltIter3ESPLayerTripletsReg" ),
TEC = cms.PSet(
useRingSlector = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
minRing = cms.int32( 1 ),
maxRing = cms.int32( 1 )
),
FPix = cms.PSet(
HitProducer = cms.string( "hltSiPixelRecHitsReg" ),
hitErrorRZ = cms.double( 0.0036 ),
useErrorsFromParam = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltIter3ClustersRefRemovalReg" ),
hitErrorRPhi = cms.double( 0.0051 )
),
TID = cms.PSet( ),
BPix = cms.PSet(
HitProducer = cms.string( "hltSiPixelRecHitsReg" ),
hitErrorRZ = cms.double( 0.0060 ),
useErrorsFromParam = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltIter3ClustersRefRemovalReg" ),
hitErrorRPhi = cms.double( 0.0027 )
),
TIB = cms.PSet( TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ) ),
TOB = cms.PSet( )
)
process.hltIter2ESPTrajectoryBuilderITReg = cms.ESProducer( "CkfTrajectoryBuilderESProducer",
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
trajectoryFilterName = cms.string( "hltIter2ESPTrajectoryFilterIT" ),
maxCand = cms.int32( 2 ),
ComponentName = cms.string( "hltIter2ESPTrajectoryBuilderITReg" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
MeasurementTrackerName = cms.string( "hltIter2ESPMeasurementTrackerReg" ),
estimator = cms.string( "hltESPChi2MeasurementEstimator16" ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( False ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 )
)
process.hltIter2ESPPixelLayerPairsReg = cms.ESProducer( "SeedingLayersESProducer",
layerList = cms.vstring( 'BPix1+BPix2',
'BPix1+BPix3',
'BPix2+BPix3',
'BPix1+FPix1_pos',
'BPix1+FPix1_neg',
'BPix1+FPix2_pos',
'BPix1+FPix2_neg',
'BPix2+FPix1_pos',
'BPix2+FPix1_neg',
'BPix2+FPix2_pos',
'BPix2+FPix2_neg',
'FPix1_pos+FPix2_pos',
'FPix1_neg+FPix2_neg' ),
ComponentName = cms.string( "hltIter2ESPPixelLayerPairsReg" ),
TEC = cms.PSet( ),
FPix = cms.PSet(
HitProducer = cms.string( "hltSiPixelRecHitsReg" ),
hitErrorRZ = cms.double( 0.0036 ),
useErrorsFromParam = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltIter2ClustersRefRemovalReg" ),
hitErrorRPhi = cms.double( 0.0051 )
),
TID = cms.PSet( ),
BPix = cms.PSet(
HitProducer = cms.string( "hltSiPixelRecHitsReg" ),
hitErrorRZ = cms.double( 0.0060 ),
useErrorsFromParam = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltIter2ClustersRefRemovalReg" ),
hitErrorRPhi = cms.double( 0.0027 )
),
TIB = cms.PSet( ),
TOB = cms.PSet( )
)
process.hltIter2ESPMeasurementTrackerReg = cms.ESProducer( "MeasurementTrackerESProducer",
StripCPE = cms.string( "StripCPEfromTrackAngle" ),
inactivePixelDetectorLabels = cms.VInputTag( ),
PixelCPE = cms.string( "hltESPPixelCPEGeneric" ),
stripLazyGetterProducer = cms.string( "hltSiStripRawToClustersFacility" ),
OnDemand = cms.bool( True ),
Regional = cms.bool( True ),
UsePixelModuleQualityDB = cms.bool( True ),
pixelClusterProducer = cms.string( "hltSiPixelClustersReg" ),
switchOffPixelsIfEmpty = cms.bool( True ),
inactiveStripDetectorLabels = cms.VInputTag( 'hltSiStripExcludedFEDListProducer' ),
MaskBadAPVFibers = cms.bool( True ),
UseStripStripQualityDB = cms.bool( True ),
UsePixelROCQualityDB = cms.bool( True ),
DebugPixelROCQualityDB = cms.untracked.bool( False ),
UseStripAPVFiberQualityDB = cms.bool( True ),
stripClusterProducer = cms.string( "hltIter2SiStripClustersReg" ),
DebugStripAPVFiberQualityDB = cms.untracked.bool( False ),
DebugStripStripQualityDB = cms.untracked.bool( False ),
SiStripQualityLabel = cms.string( "" ),
badStripCuts = cms.PSet(
TOB = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TID = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TEC = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TIB = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
)
),
DebugStripModuleQualityDB = cms.untracked.bool( False ),
ComponentName = cms.string( "hltIter2ESPMeasurementTrackerReg" ),
DebugPixelModuleQualityDB = cms.untracked.bool( False ),
HitMatcher = cms.string( "StandardMatcher" ),
skipClusters = cms.InputTag( "hltIter2ClustersRefRemovalReg" ),
UseStripModuleQualityDB = cms.bool( True ),
UseStripNoiseDB = cms.bool( False ),
UseStripCablingDB = cms.bool( False )
)
process.hltIter1ESPTrajectoryBuilderITReg = cms.ESProducer( "CkfTrajectoryBuilderESProducer",
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
trajectoryFilterName = cms.string( "hltIter1ESPTrajectoryFilterIT" ),
maxCand = cms.int32( 2 ),
ComponentName = cms.string( "hltIter1ESPTrajectoryBuilderITReg" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
MeasurementTrackerName = cms.string( "hltIter1ESPMeasurementTrackerReg" ),
estimator = cms.string( "hltESPChi2MeasurementEstimator16" ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( False ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 )
)
process.hltIter1ESPPixelLayerTripletsReg = cms.ESProducer( "SeedingLayersESProducer",
layerList = cms.vstring( 'BPix1+BPix2+BPix3',
'BPix1+BPix2+FPix1_pos',
'BPix1+BPix2+FPix1_neg',
'BPix1+FPix1_pos+FPix2_pos',
'BPix1+FPix1_neg+FPix2_neg' ),
ComponentName = cms.string( "hltIter1ESPPixelLayerTripletsReg" ),
TEC = cms.PSet( ),
FPix = cms.PSet(
HitProducer = cms.string( "hltSiPixelRecHitsReg" ),
hitErrorRZ = cms.double( 0.0036 ),
useErrorsFromParam = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltIter1ClustersRefRemovalReg" ),
hitErrorRPhi = cms.double( 0.0051 )
),
TID = cms.PSet( ),
BPix = cms.PSet(
HitProducer = cms.string( "hltSiPixelRecHitsReg" ),
hitErrorRZ = cms.double( 0.0060 ),
useErrorsFromParam = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltIter1ClustersRefRemovalReg" ),
hitErrorRPhi = cms.double( 0.0027 )
),
TIB = cms.PSet( ),
TOB = cms.PSet( )
)
process.hltIter1ESPMeasurementTrackerReg = cms.ESProducer( "MeasurementTrackerESProducer",
StripCPE = cms.string( "StripCPEfromTrackAngle" ),
inactivePixelDetectorLabels = cms.VInputTag( ),
PixelCPE = cms.string( "hltESPPixelCPEGeneric" ),
stripLazyGetterProducer = cms.string( "hltSiStripRawToClustersFacility" ),
OnDemand = cms.bool( True ),
Regional = cms.bool( True ),
UsePixelModuleQualityDB = cms.bool( True ),
pixelClusterProducer = cms.string( "hltSiPixelClustersReg" ),
switchOffPixelsIfEmpty = cms.bool( True ),
inactiveStripDetectorLabels = cms.VInputTag( 'hltSiStripExcludedFEDListProducer' ),
MaskBadAPVFibers = cms.bool( True ),
UseStripStripQualityDB = cms.bool( True ),
UsePixelROCQualityDB = cms.bool( True ),
DebugPixelROCQualityDB = cms.untracked.bool( False ),
UseStripAPVFiberQualityDB = cms.bool( True ),
stripClusterProducer = cms.string( "hltIter1SiStripClustersReg" ),
DebugStripAPVFiberQualityDB = cms.untracked.bool( False ),
DebugStripStripQualityDB = cms.untracked.bool( False ),
SiStripQualityLabel = cms.string( "" ),
badStripCuts = cms.PSet(
TOB = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TID = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TEC = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TIB = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
)
),
DebugStripModuleQualityDB = cms.untracked.bool( False ),
ComponentName = cms.string( "hltIter1ESPMeasurementTrackerReg" ),
DebugPixelModuleQualityDB = cms.untracked.bool( False ),
HitMatcher = cms.string( "StandardMatcher" ),
skipClusters = cms.InputTag( "hltIter1ClustersRefRemovalReg" ),
UseStripModuleQualityDB = cms.bool( True ),
UseStripNoiseDB = cms.bool( False ),
UseStripCablingDB = cms.bool( False )
)
process.hltESPTrajectoryBuilderITReg = cms.ESProducer( "CkfTrajectoryBuilderESProducer",
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
trajectoryFilterName = cms.string( "hltESPTrajectoryFilterIT" ),
maxCand = cms.int32( 2 ),
ComponentName = cms.string( "hltESPTrajectoryBuilderITReg" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
MeasurementTrackerName = cms.string( "hltESPMeasurementTrackerReg" ),
estimator = cms.string( "hltESPChi2MeasurementEstimator9" ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( False ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 )
)
process.hltESPMeasurementTrackerReg = cms.ESProducer( "MeasurementTrackerESProducer",
StripCPE = cms.string( "StripCPEfromTrackAngle" ),
inactivePixelDetectorLabels = cms.VInputTag( ),
PixelCPE = cms.string( "hltESPPixelCPEGeneric" ),
stripLazyGetterProducer = cms.string( "hltSiStripRawToClustersFacility" ),
OnDemand = cms.bool( True ),
Regional = cms.bool( True ),
UsePixelModuleQualityDB = cms.bool( True ),
pixelClusterProducer = cms.string( "hltSiPixelClustersReg" ),
switchOffPixelsIfEmpty = cms.bool( True ),
inactiveStripDetectorLabels = cms.VInputTag( 'hltSiStripExcludedFEDListProducer' ),
MaskBadAPVFibers = cms.bool( True ),
UseStripStripQualityDB = cms.bool( True ),
UsePixelROCQualityDB = cms.bool( True ),
DebugPixelROCQualityDB = cms.untracked.bool( False ),
UseStripAPVFiberQualityDB = cms.bool( True ),
stripClusterProducer = cms.string( "hltSiStripClustersReg" ),
DebugStripAPVFiberQualityDB = cms.untracked.bool( False ),
DebugStripStripQualityDB = cms.untracked.bool( False ),
SiStripQualityLabel = cms.string( "" ),
badStripCuts = cms.PSet(
TOB = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TID = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TEC = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TIB = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
)
),
DebugStripModuleQualityDB = cms.untracked.bool( False ),
ComponentName = cms.string( "hltESPMeasurementTrackerReg" ),
DebugPixelModuleQualityDB = cms.untracked.bool( False ),
HitMatcher = cms.string( "StandardMatcher" ),
skipClusters = cms.InputTag( "" ),
UseStripModuleQualityDB = cms.bool( True ),
UseStripNoiseDB = cms.bool( False ),
UseStripCablingDB = cms.bool( False )
)
process.AnyDirectionAnalyticalPropagator = cms.ESProducer( "AnalyticalPropagatorESProducer",
MaxDPhi = cms.double( 1.6 ),
ComponentName = cms.string( "AnyDirectionAnalyticalPropagator" ),
PropagationDirection = cms.string( "anyDirection" )
)
process.AutoMagneticFieldESProducer = cms.ESProducer( "AutoMagneticFieldESProducer",
label = cms.untracked.string( "" ),
nominalCurrents = cms.untracked.vint32( -1, 0, 9558, 14416, 16819, 18268, 19262 ),
valueOverride = cms.int32( -1 ),
mapLabels = cms.untracked.vstring( '090322_3_8t',
'0t',
'071212_2t',
'071212_3t',
'071212_3_5t',
'090322_3_8t',
'071212_4t' )
)
process.CSCGeometryESModule = cms.ESProducer( "CSCGeometryESModule",
useRealWireGeometry = cms.bool( True ),
appendToDataLabel = cms.string( "" ),
alignmentsLabel = cms.string( "" ),
useGangedStripsInME1a = cms.bool( True ),
debugV = cms.untracked.bool( False ),
useOnlyWiresInME1a = cms.bool( False ),
useDDD = cms.bool( False ),
useCentreTIOffsets = cms.bool( False ),
applyAlignment = cms.bool( True )
)
process.CaloGeometryBuilder = cms.ESProducer( "CaloGeometryBuilder",
SelectedCalos = cms.vstring( 'HCAL',
'ZDC',
'EcalBarrel',
'EcalEndcap',
'EcalPreshower',
'TOWER' )
)
process.CaloTopologyBuilder = cms.ESProducer( "CaloTopologyBuilder" )
process.CaloTowerConstituentsMapBuilder = cms.ESProducer( "CaloTowerConstituentsMapBuilder",
MapFile = cms.untracked.string( "Geometry/CaloTopology/data/CaloTowerEEGeometric.map.gz" )
)
process.CaloTowerGeometryFromDBEP = cms.ESProducer( "CaloTowerGeometryFromDBEP",
applyAlignment = cms.bool( False )
)
process.CastorDbProducer = cms.ESProducer( "CastorDbProducer",
appendToDataLabel = cms.string( "" )
)
process.ClusterShapeHitFilterESProducer = cms.ESProducer( "ClusterShapeHitFilterESProducer",
ComponentName = cms.string( "ClusterShapeHitFilter" )
)
process.DTGeometryESModule = cms.ESProducer( "DTGeometryESModule",
appendToDataLabel = cms.string( "" ),
fromDDD = cms.bool( False ),
applyAlignment = cms.bool( True ),
alignmentsLabel = cms.string( "" )
)
process.EcalBarrelGeometryFromDBEP = cms.ESProducer( "EcalBarrelGeometryFromDBEP",
applyAlignment = cms.bool( True )
)
process.EcalElectronicsMappingBuilder = cms.ESProducer( "EcalElectronicsMappingBuilder" )
process.EcalEndcapGeometryFromDBEP = cms.ESProducer( "EcalEndcapGeometryFromDBEP",
applyAlignment = cms.bool( True )
)
process.EcalLaserCorrectionService = cms.ESProducer( "EcalLaserCorrectionService" )
process.EcalPreshowerGeometryFromDBEP = cms.ESProducer( "EcalPreshowerGeometryFromDBEP",
applyAlignment = cms.bool( True )
)
process.EcalUnpackerWorkerESProducer = cms.ESProducer( "EcalUnpackerWorkerESProducer",
CalibRHAlgo = cms.PSet(
flagsMapDBReco = cms.vint32( 0, 0, 0, 0, 4, -1, -1, -1, 4, 4, 7, 7, 7, 8, 9 ),
Type = cms.string( "EcalRecHitWorkerSimple" ),
killDeadChannels = cms.bool( True ),
ChannelStatusToBeExcluded = cms.vint32( 10, 11, 12, 13, 14 ),
laserCorrection = cms.bool( True ),
EBLaserMIN = cms.double( 0.5 ),
EELaserMIN = cms.double( 0.5 ),
EBLaserMAX = cms.double( 2.0 ),
EELaserMAX = cms.double( 3.0 )
),
ComponentName = cms.string( "" ),
UncalibRHAlgo = cms.PSet( Type = cms.string( "EcalUncalibRecHitWorkerWeights" ) ),
DCCDataUnpacker = cms.PSet(
orderedDCCIdList = cms.vint32( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54 ),
tccUnpacking = cms.bool( False ),
srpUnpacking = cms.bool( False ),
syncCheck = cms.bool( False ),
feIdCheck = cms.bool( True ),
headerUnpacking = cms.bool( True ),
orderedFedList = cms.vint32( 601, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 616, 617, 618, 619, 620, 621, 622, 623, 624, 625, 626, 627, 628, 629, 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, 654 ),
feUnpacking = cms.bool( True ),
forceKeepFRData = cms.bool( False ),
memUnpacking = cms.bool( True )
),
ElectronicsMapper = cms.PSet(
numbXtalTSamples = cms.uint32( 10 ),
numbTriggerTSamples = cms.uint32( 1 )
)
)
process.HcalGeometryFromDBEP = cms.ESProducer( "HcalGeometryFromDBEP",
applyAlignment = cms.bool( False )
)
process.HcalTopologyIdealEP = cms.ESProducer( "HcalTopologyIdealEP" )
process.MaterialPropagator = cms.ESProducer( "PropagatorWithMaterialESProducer",
PropagationDirection = cms.string( "alongMomentum" ),
ComponentName = cms.string( "PropagatorWithMaterial" ),
Mass = cms.double( 0.105 ),
ptMin = cms.double( -1.0 ),
MaxDPhi = cms.double( 1.6 ),
useRungeKutta = cms.bool( False )
)
process.MaterialPropagatorForHI = cms.ESProducer( "PropagatorWithMaterialESProducer",
PropagationDirection = cms.string( "alongMomentum" ),
ComponentName = cms.string( "PropagatorWithMaterialForHI" ),
Mass = cms.double( 0.139 ),
ptMin = cms.double( -1.0 ),
MaxDPhi = cms.double( 1.6 ),
useRungeKutta = cms.bool( False )
)
process.OppositeMaterialPropagator = cms.ESProducer( "PropagatorWithMaterialESProducer",
PropagationDirection = cms.string( "oppositeToMomentum" ),
ComponentName = cms.string( "PropagatorWithMaterialOpposite" ),
Mass = cms.double( 0.105 ),
ptMin = cms.double( -1.0 ),
MaxDPhi = cms.double( 1.6 ),
useRungeKutta = cms.bool( False )
)
process.OppositeMaterialPropagatorForHI = cms.ESProducer( "PropagatorWithMaterialESProducer",
PropagationDirection = cms.string( "oppositeToMomentum" ),
ComponentName = cms.string( "PropagatorWithMaterialOppositeForHI" ),
Mass = cms.double( 0.139 ),
ptMin = cms.double( -1.0 ),
MaxDPhi = cms.double( 1.6 ),
useRungeKutta = cms.bool( False )
)
process.RPCGeometryESModule = cms.ESProducer( "RPCGeometryESModule",
useDDD = cms.untracked.bool( False ),
compatibiltyWith11 = cms.untracked.bool( True )
)
process.SiStripGainESProducer = cms.ESProducer( "SiStripGainESProducer",
printDebug = cms.untracked.bool( False ),
appendToDataLabel = cms.string( "" ),
APVGain = cms.VPSet(
cms.PSet( Record = cms.string( "SiStripApvGainRcd" ),
NormalizationFactor = cms.untracked.double( 1.0 ),
Label = cms.untracked.string( "" )
),
cms.PSet( Record = cms.string( "SiStripApvGain2Rcd" ),
NormalizationFactor = cms.untracked.double( 1.0 ),
Label = cms.untracked.string( "" )
)
),
AutomaticNormalization = cms.bool( False )
)
process.SiStripQualityESProducer = cms.ESProducer( "SiStripQualityESProducer",
appendToDataLabel = cms.string( "" ),
PrintDebugOutput = cms.bool( False ),
PrintDebug = cms.untracked.bool( False ),
ListOfRecordToMerge = cms.VPSet(
cms.PSet( record = cms.string( "SiStripDetVOffRcd" ),
tag = cms.string( "" )
),
cms.PSet( record = cms.string( "SiStripDetCablingRcd" ),
tag = cms.string( "" )
),
cms.PSet( record = cms.string( "SiStripBadChannelRcd" ),
tag = cms.string( "" )
),
cms.PSet( record = cms.string( "SiStripBadFiberRcd" ),
tag = cms.string( "" )
),
cms.PSet( record = cms.string( "SiStripBadModuleRcd" ),
tag = cms.string( "" )
)
),
UseEmptyRunInfo = cms.bool( False ),
ReduceGranularity = cms.bool( False ),
ThresholdForReducedGranularity = cms.double( 0.3 )
)
process.SiStripRecHitMatcherESProducer = cms.ESProducer( "SiStripRecHitMatcherESProducer",
ComponentName = cms.string( "StandardMatcher" ),
NSigmaInside = cms.double( 3.0 )
)
process.SlaveField0 = cms.ESProducer( "UniformMagneticFieldESProducer",
ZFieldInTesla = cms.double( 0.0 ),
label = cms.untracked.string( "slave_0" )
)
process.SlaveField20 = cms.ESProducer( "ParametrizedMagneticFieldProducer",
version = cms.string( "OAE_1103l_071212" ),
parameters = cms.PSet( BValue = cms.string( "2_0T" ) ),
label = cms.untracked.string( "slave_20" )
)
process.SlaveField30 = cms.ESProducer( "ParametrizedMagneticFieldProducer",
version = cms.string( "OAE_1103l_071212" ),
parameters = cms.PSet( BValue = cms.string( "3_0T" ) ),
label = cms.untracked.string( "slave_30" )
)
process.SlaveField35 = cms.ESProducer( "ParametrizedMagneticFieldProducer",
version = cms.string( "OAE_1103l_071212" ),
parameters = cms.PSet( BValue = cms.string( "3_5T" ) ),
label = cms.untracked.string( "slave_35" )
)
process.SlaveField38 = cms.ESProducer( "ParametrizedMagneticFieldProducer",
version = cms.string( "OAE_1103l_071212" ),
parameters = cms.PSet( BValue = cms.string( "3_8T" ) ),
label = cms.untracked.string( "slave_38" )
)
process.SlaveField40 = cms.ESProducer( "ParametrizedMagneticFieldProducer",
version = cms.string( "OAE_1103l_071212" ),
parameters = cms.PSet( BValue = cms.string( "4_0T" ) ),
label = cms.untracked.string( "slave_40" )
)
process.SteppingHelixPropagatorAny = cms.ESProducer( "SteppingHelixPropagatorESProducer",
NoErrorPropagation = cms.bool( False ),
endcapShiftInZPos = cms.double( 0.0 ),
PropagationDirection = cms.string( "anyDirection" ),
useTuningForL2Speed = cms.bool( False ),
useIsYokeFlag = cms.bool( True ),
endcapShiftInZNeg = cms.double( 0.0 ),
SetVBFPointer = cms.bool( False ),
AssumeNoMaterial = cms.bool( False ),
returnTangentPlane = cms.bool( True ),
useInTeslaFromMagField = cms.bool( False ),
VBFName = cms.string( "VolumeBasedMagneticField" ),
useEndcapShiftsInZ = cms.bool( False ),
sendLogWarning = cms.bool( False ),
useMatVolumes = cms.bool( True ),
debug = cms.bool( False ),
ApplyRadX0Correction = cms.bool( True ),
useMagVolumes = cms.bool( True ),
ComponentName = cms.string( "SteppingHelixPropagatorAny" )
)
process.StripCPEfromTrackAngleESProducer = cms.ESProducer( "StripCPEESProducer",
TanDiffusionAngle = cms.double( 0.01 ),
UncertaintyScaling = cms.double( 1.42 ),
ThicknessRelativeUncertainty = cms.double( 0.02 ),
MaybeNoiseThreshold = cms.double( 3.5 ),
ComponentName = cms.string( "StripCPEfromTrackAngle" ),
MinimumUncertainty = cms.double( 0.01 ),
NoiseThreshold = cms.double( 2.3 )
)
process.TrackerDigiGeometryESModule = cms.ESProducer( "TrackerDigiGeometryESModule",
appendToDataLabel = cms.string( "" ),
fromDDD = cms.bool( False ),
applyAlignment = cms.bool( True ),
alignmentsLabel = cms.string( "" )
)
process.TrackerGeometricDetESModule = cms.ESProducer( "TrackerGeometricDetESModule",
fromDDD = cms.bool( False )
)
process.TransientTrackBuilderESProducer = cms.ESProducer( "TransientTrackBuilderESProducer",
ComponentName = cms.string( "TransientTrackBuilder" )
)
process.VBF0 = cms.ESProducer( "VolumeBasedMagneticFieldESProducer",
scalingVolumes = cms.vint32( ),
overrideMasterSector = cms.bool( True ),
useParametrizedTrackerField = cms.bool( True ),
scalingFactors = cms.vdouble( ),
label = cms.untracked.string( "0t" ),
version = cms.string( "grid_1103l_071212_2t" ),
debugBuilder = cms.untracked.bool( False ),
paramLabel = cms.string( "slave_0" ),
cacheLastVolume = cms.untracked.bool( True )
)
process.VBF20 = cms.ESProducer( "VolumeBasedMagneticFieldESProducer",
scalingVolumes = cms.vint32( ),
overrideMasterSector = cms.bool( True ),
useParametrizedTrackerField = cms.bool( True ),
scalingFactors = cms.vdouble( ),
label = cms.untracked.string( "071212_2t" ),
version = cms.string( "grid_1103l_071212_2t" ),
debugBuilder = cms.untracked.bool( False ),
paramLabel = cms.string( "slave_20" ),
cacheLastVolume = cms.untracked.bool( True )
)
process.VBF30 = cms.ESProducer( "VolumeBasedMagneticFieldESProducer",
scalingVolumes = cms.vint32( ),
overrideMasterSector = cms.bool( True ),
useParametrizedTrackerField = cms.bool( True ),
scalingFactors = cms.vdouble( ),
label = cms.untracked.string( "071212_3t" ),
version = cms.string( "grid_1103l_071212_3t" ),
debugBuilder = cms.untracked.bool( False ),
paramLabel = cms.string( "slave_30" ),
cacheLastVolume = cms.untracked.bool( True )
)
process.VBF35 = cms.ESProducer( "VolumeBasedMagneticFieldESProducer",
scalingVolumes = cms.vint32( ),
overrideMasterSector = cms.bool( True ),
useParametrizedTrackerField = cms.bool( True ),
scalingFactors = cms.vdouble( ),
label = cms.untracked.string( "071212_3_5t" ),
version = cms.string( "grid_1103l_071212_3_5t" ),
debugBuilder = cms.untracked.bool( False ),
paramLabel = cms.string( "slave_35" ),
cacheLastVolume = cms.untracked.bool( True )
)
process.VBF38 = cms.ESProducer( "VolumeBasedMagneticFieldESProducer",
scalingVolumes = cms.vint32( 14100, 14200, 17600, 17800, 17900, 18100, 18300, 18400, 18600, 23100, 23300, 23400, 23600, 23800, 23900, 24100, 28600, 28800, 28900, 29100, 29300, 29400, 29600, 28609, 28809, 28909, 29109, 29309, 29409, 29609, 28610, 28810, 28910, 29110, 29310, 29410, 29610, 28611, 28811, 28911, 29111, 29311, 29411, 29611 ),
overrideMasterSector = cms.bool( False ),
useParametrizedTrackerField = cms.bool( True ),
scalingFactors = cms.vdouble( 1.0, 1.0, 0.994, 1.004, 1.004, 1.005, 1.004, 1.004, 0.994, 0.965, 0.958, 0.958, 0.953, 0.958, 0.958, 0.965, 0.918, 0.924, 0.924, 0.906, 0.924, 0.924, 0.918, 0.991, 0.998, 0.998, 0.978, 0.998, 0.998, 0.991, 0.991, 0.998, 0.998, 0.978, 0.998, 0.998, 0.991, 0.991, 0.998, 0.998, 0.978, 0.998, 0.998, 0.991 ),
label = cms.untracked.string( "090322_3_8t" ),
version = cms.string( "grid_1103l_090322_3_8t" ),
debugBuilder = cms.untracked.bool( False ),
paramLabel = cms.string( "slave_38" ),
cacheLastVolume = cms.untracked.bool( True )
)
process.VBF40 = cms.ESProducer( "VolumeBasedMagneticFieldESProducer",
scalingVolumes = cms.vint32( ),
overrideMasterSector = cms.bool( True ),
useParametrizedTrackerField = cms.bool( True ),
scalingFactors = cms.vdouble( ),
label = cms.untracked.string( "071212_4t" ),
version = cms.string( "grid_1103l_071212_4t" ),
debugBuilder = cms.untracked.bool( False ),
paramLabel = cms.string( "slave_40" ),
cacheLastVolume = cms.untracked.bool( True )
)
process.ZdcGeometryFromDBEP = cms.ESProducer( "ZdcGeometryFromDBEP",
applyAlignment = cms.bool( False )
)
process.caloDetIdAssociator = cms.ESProducer( "DetIdAssociatorESProducer",
ComponentName = cms.string( "CaloDetIdAssociator" ),
etaBinSize = cms.double( 0.087 ),
nEta = cms.int32( 70 ),
nPhi = cms.int32( 72 ),
includeBadChambers = cms.bool( False )
)
process.cosmicsNavigationSchoolESProducer = cms.ESProducer( "NavigationSchoolESProducer",
ComponentName = cms.string( "CosmicNavigationSchool" )
)
process.ecalDetIdAssociator = cms.ESProducer( "DetIdAssociatorESProducer",
ComponentName = cms.string( "EcalDetIdAssociator" ),
etaBinSize = cms.double( 0.02 ),
nEta = cms.int32( 300 ),
nPhi = cms.int32( 360 ),
includeBadChambers = cms.bool( False )
)
process.ecalSeverityLevel = cms.ESProducer( "EcalSeverityLevelESProducer",
dbstatusMask = cms.PSet(
kGood = cms.vuint32( 0 ),
kProblematic = cms.vuint32( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ),
kRecovered = cms.vuint32( ),
kTime = cms.vuint32( ),
kWeird = cms.vuint32( ),
kBad = cms.vuint32( 11, 12, 13, 14, 15, 16 )
),
timeThresh = cms.double( 2.0 ),
flagMask = cms.PSet(
kGood = cms.vstring( 'kGood' ),
kProblematic = cms.vstring( 'kPoorReco',
'kPoorCalib',
'kNoisy',
'kSaturated' ),
kRecovered = cms.vstring( 'kLeadingEdgeRecovered',
'kTowerRecovered' ),
kTime = cms.vstring( 'kOutOfTime' ),
kWeird = cms.vstring( 'kWeird',
'kDiWeird' ),
kBad = cms.vstring( 'kFaultyHardware',
'kDead',
'kKilled' )
)
)
process.hcalDetIdAssociator = cms.ESProducer( "DetIdAssociatorESProducer",
ComponentName = cms.string( "HcalDetIdAssociator" ),
etaBinSize = cms.double( 0.087 ),
nEta = cms.int32( 70 ),
nPhi = cms.int32( 72 ),
includeBadChambers = cms.bool( False )
)
process.hcalRecAlgos = cms.ESProducer( "HcalRecAlgoESProducer",
RecoveredRecHitBits = cms.vstring( 'TimingAddedBit',
'TimingSubtractedBit' ),
SeverityLevels = cms.VPSet(
cms.PSet( RecHitFlags = cms.vstring( ),
ChannelStatus = cms.vstring( ),
Level = cms.int32( 0 )
),
cms.PSet( RecHitFlags = cms.vstring( ),
ChannelStatus = cms.vstring( 'HcalCellCaloTowerProb' ),
Level = cms.int32( 1 )
),
cms.PSet( RecHitFlags = cms.vstring( 'HSCP_R1R2',
'HSCP_FracLeader',
'HSCP_OuterEnergy',
'HSCP_ExpFit',
'ADCSaturationBit' ),
ChannelStatus = cms.vstring( ),
Level = cms.int32( 5 )
),
cms.PSet( RecHitFlags = cms.vstring( 'HBHEHpdHitMultiplicity',
'HFDigiTime',
'HBHEPulseShape',
'HOBit',
'HFInTimeWindow',
'ZDCBit',
'CalibrationBit',
'TimingErrorBit',
'HBHEFlatNoise',
'HBHESpikeNoise',
'HBHETriangleNoise',
'HBHETS4TS5Noise' ),
ChannelStatus = cms.vstring( ),
Level = cms.int32( 8 )
),
cms.PSet( RecHitFlags = cms.vstring( 'HFLongShort',
'HFS8S1Ratio',
'HFPET' ),
ChannelStatus = cms.vstring( ),
Level = cms.int32( 11 )
),
cms.PSet( RecHitFlags = cms.vstring( ),
ChannelStatus = cms.vstring( 'HcalCellCaloTowerMask' ),
Level = cms.int32( 12 )
),
cms.PSet( RecHitFlags = cms.vstring( ),
ChannelStatus = cms.vstring( 'HcalCellHot' ),
Level = cms.int32( 15 )
),
cms.PSet( RecHitFlags = cms.vstring( ),
ChannelStatus = cms.vstring( 'HcalCellOff',
'HcalCellDead' ),
Level = cms.int32( 20 )
)
),
DropChannelStatusBits = cms.vstring( 'HcalCellMask',
'HcalCellOff',
'HcalCellDead' )
)
process.hcal_db_producer = cms.ESProducer( "HcalDbProducer" )
process.hltCombinedSecondaryVertex = cms.ESProducer( "CombinedSecondaryVertexESProducer",
categoryVariableName = cms.string( "vertexCategory" ),
useTrackWeights = cms.bool( True ),
useCategories = cms.bool( True ),
pseudoMultiplicityMin = cms.uint32( 2 ),
correctVertexMass = cms.bool( True ),
trackSelection = cms.PSet(
totalHitsMin = cms.uint32( 0 ),
jetDeltaRMax = cms.double( 0.3 ),
qualityClass = cms.string( "any" ),
pixelHitsMin = cms.uint32( 0 ),
sip3dSigMin = cms.double( -99999.9 ),
sip3dSigMax = cms.double( 99999.9 ),
normChi2Max = cms.double( 99999.9 ),
maxDistToAxis = cms.double( 0.07 ),
sip2dValMax = cms.double( 99999.9 ),
maxDecayLen = cms.double( 5.0 ),
ptMin = cms.double( 0.0 ),
sip2dSigMax = cms.double( 99999.9 ),
sip2dSigMin = cms.double( -99999.9 ),
sip3dValMax = cms.double( 99999.9 ),
sip2dValMin = cms.double( -99999.9 ),
sip3dValMin = cms.double( -99999.9 )
),
calibrationRecords = cms.vstring( 'CombinedSVRecoVertex',
'CombinedSVPseudoVertex',
'CombinedSVNoVertex' ),
trackPairV0Filter = cms.PSet( k0sMassWindow = cms.double( 0.03 ) ),
charmCut = cms.double( 1.5 ),
vertexFlip = cms.bool( False ),
minimumTrackWeight = cms.double( 0.5 ),
pseudoVertexV0Filter = cms.PSet( k0sMassWindow = cms.double( 0.05 ) ),
trackMultiplicityMin = cms.uint32( 3 ),
trackPseudoSelection = cms.PSet(
totalHitsMin = cms.uint32( 0 ),
jetDeltaRMax = cms.double( 0.3 ),
qualityClass = cms.string( "any" ),
pixelHitsMin = cms.uint32( 0 ),
sip3dSigMin = cms.double( -99999.9 ),
sip3dSigMax = cms.double( 99999.9 ),
normChi2Max = cms.double( 99999.9 ),
maxDistToAxis = cms.double( 0.07 ),
sip2dValMax = cms.double( 99999.9 ),
maxDecayLen = cms.double( 5.0 ),
ptMin = cms.double( 0.0 ),
sip2dSigMax = cms.double( 99999.9 ),
sip2dSigMin = cms.double( 2.0 ),
sip3dValMax = cms.double( 99999.9 ),
sip2dValMin = cms.double( -99999.9 ),
sip3dValMin = cms.double( -99999.9 )
),
trackSort = cms.string( "sip2dSig" ),
trackFlip = cms.bool( False )
)
process.hltESPAK5CaloL1L2L3 = cms.ESProducer( "JetCorrectionESChain",
correctors = cms.vstring( 'hltESPL1FastJetCorrectionESProducer',
'hltESPL2RelativeCorrectionESProducer',
'hltESPL3AbsoluteCorrectionESProducer' ),
appendToDataLabel = cms.string( "" )
)
process.hltESPAK5CaloL2L3 = cms.ESProducer( "JetCorrectionESChain",
correctors = cms.vstring( 'hltESPL2RelativeCorrectionESProducer',
'hltESPL3AbsoluteCorrectionESProducer' ),
appendToDataLabel = cms.string( "" )
)
process.hltESPAK5PFL1L2L3 = cms.ESProducer( "JetCorrectionESChain",
correctors = cms.vstring( 'hltESPL1PFFastJetCorrectionESProducer',
'hltESPL2PFRelativeCorrectionESProducer',
'hltESPL3PFAbsoluteCorrectionESProducer' ),
appendToDataLabel = cms.string( "" )
)
process.hltESPAK5PFNoPUL1L2L3 = cms.ESProducer( "JetCorrectionESChain",
correctors = cms.vstring( 'hltESPL1PFNoPUFastJetCorrectionESProducer',
'hltESPL2PFNoPURelativeCorrectionESProducer',
'hltESPL3PFNoPUAbsoluteCorrectionESProducer' ),
appendToDataLabel = cms.string( "" )
)
process.hltESPAnalyticalPropagator = cms.ESProducer( "AnalyticalPropagatorESProducer",
MaxDPhi = cms.double( 1.6 ),
ComponentName = cms.string( "hltESPAnalyticalPropagator" ),
PropagationDirection = cms.string( "alongMomentum" )
)
process.hltESPBwdAnalyticalPropagator = cms.ESProducer( "AnalyticalPropagatorESProducer",
MaxDPhi = cms.double( 1.6 ),
ComponentName = cms.string( "hltESPBwdAnalyticalPropagator" ),
PropagationDirection = cms.string( "oppositeToMomentum" )
)
process.hltESPBwdElectronPropagator = cms.ESProducer( "PropagatorWithMaterialESProducer",
PropagationDirection = cms.string( "oppositeToMomentum" ),
ComponentName = cms.string( "hltESPBwdElectronPropagator" ),
Mass = cms.double( 5.11E-4 ),
ptMin = cms.double( -1.0 ),
MaxDPhi = cms.double( 1.6 ),
useRungeKutta = cms.bool( False )
)
process.hltESPChi2EstimatorForRefit = cms.ESProducer( "Chi2MeasurementEstimatorESProducer",
MaxChi2 = cms.double( 100000.0 ),
nSigma = cms.double( 3.0 ),
ComponentName = cms.string( "hltESPChi2EstimatorForRefit" )
)
process.hltESPChi2MeasurementEstimator = cms.ESProducer( "Chi2MeasurementEstimatorESProducer",
MaxChi2 = cms.double( 30.0 ),
nSigma = cms.double( 3.0 ),
ComponentName = cms.string( "hltESPChi2MeasurementEstimator" )
)
process.hltESPChi2MeasurementEstimator16 = cms.ESProducer( "Chi2MeasurementEstimatorESProducer",
MaxChi2 = cms.double( 16.0 ),
nSigma = cms.double( 3.0 ),
ComponentName = cms.string( "hltESPChi2MeasurementEstimator16" )
)
process.hltESPChi2MeasurementEstimator9 = cms.ESProducer( "Chi2MeasurementEstimatorESProducer",
MaxChi2 = cms.double( 9.0 ),
nSigma = cms.double( 3.0 ),
ComponentName = cms.string( "hltESPChi2MeasurementEstimator9" )
)
process.hltESPCkf3HitTrajectoryBuilder = cms.ESProducer( "CkfTrajectoryBuilderESProducer",
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
trajectoryFilterName = cms.string( "hltESPCkf3HitTrajectoryFilter" ),
maxCand = cms.int32( 5 ),
ComponentName = cms.string( "hltESPCkf3HitTrajectoryBuilder" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
MeasurementTrackerName = cms.string( "hltESPMeasurementTracker" ),
estimator = cms.string( "hltESPChi2MeasurementEstimator" ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( True ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 )
)
process.hltESPCkf3HitTrajectoryFilter = cms.ESProducer( "TrajectoryFilterESProducer",
filterPset = cms.PSet(
minPt = cms.double( 0.9 ),
minHitsMinPt = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
maxLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( -1 ),
maxConsecLostHits = cms.int32( 1 ),
minimumNumberOfHits = cms.int32( 3 ),
nSigmaMinPt = cms.double( 5.0 ),
chargeSignificance = cms.double( -1.0 )
),
ComponentName = cms.string( "hltESPCkf3HitTrajectoryFilter" )
)
process.hltESPCkfTrajectoryBuilder = cms.ESProducer( "CkfTrajectoryBuilderESProducer",
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
trajectoryFilterName = cms.string( "hltESPCkfTrajectoryFilter" ),
maxCand = cms.int32( 5 ),
ComponentName = cms.string( "hltESPCkfTrajectoryBuilder" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
MeasurementTrackerName = cms.string( "hltESPMeasurementTracker" ),
estimator = cms.string( "hltESPChi2MeasurementEstimator" ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( True ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 )
)
process.hltESPCkfTrajectoryBuilderForHI = cms.ESProducer( "CkfTrajectoryBuilderESProducer",
propagatorAlong = cms.string( "PropagatorWithMaterialForHI" ),
trajectoryFilterName = cms.string( "hltESPCkfTrajectoryFilterForHI" ),
maxCand = cms.int32( 5 ),
ComponentName = cms.string( "hltESPCkfTrajectoryBuilderForHI" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOppositeForHI" ),
MeasurementTrackerName = cms.string( "hltESPMeasurementTrackerForHI" ),
estimator = cms.string( "hltESPChi2MeasurementEstimator" ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( False ),
intermediateCleaning = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 )
)
process.hltESPCkfTrajectoryFilter = cms.ESProducer( "TrajectoryFilterESProducer",
filterPset = cms.PSet(
minPt = cms.double( 0.9 ),
minHitsMinPt = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
maxLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( -1 ),
maxConsecLostHits = cms.int32( 1 ),
minimumNumberOfHits = cms.int32( 5 ),
nSigmaMinPt = cms.double( 5.0 ),
chargeSignificance = cms.double( -1.0 )
),
ComponentName = cms.string( "hltESPCkfTrajectoryFilter" )
)
process.hltESPCkfTrajectoryFilterForHI = cms.ESProducer( "TrajectoryFilterESProducer",
filterPset = cms.PSet(
minimumNumberOfHits = cms.int32( 6 ),
minHitsMinPt = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
maxLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( -1 ),
maxConsecLostHits = cms.int32( 1 ),
chargeSignificance = cms.double( -1.0 ),
nSigmaMinPt = cms.double( 5.0 ),
minPt = cms.double( 11.0 )
),
ComponentName = cms.string( "hltESPCkfTrajectoryFilterForHI" )
)
process.hltESPCloseComponentsMerger5D = cms.ESProducer( "CloseComponentsMergerESProducer5D",
ComponentName = cms.string( "hltESPCloseComponentsMerger5D" ),
MaxComponents = cms.int32( 12 ),
DistanceMeasure = cms.string( "hltESPKullbackLeiblerDistance5D" )
)
process.hltESPDummyDetLayerGeometry = cms.ESProducer( "DetLayerGeometryESProducer",
ComponentName = cms.string( "hltESPDummyDetLayerGeometry" )
)
process.hltESPESUnpackerWorker = cms.ESProducer( "ESUnpackerWorkerESProducer",
RHAlgo = cms.PSet(
ESRecoAlgo = cms.int32( 0 ),
Type = cms.string( "ESRecHitWorker" )
),
DCCDataUnpacker = cms.PSet( LookupTable = cms.FileInPath( "EventFilter/ESDigiToRaw/data/ES_lookup_table.dat" ) ),
ComponentName = cms.string( "hltESPESUnpackerWorker" )
)
process.hltESPEcalRegionCablingESProducer = cms.ESProducer( "EcalRegionCablingESProducer",
esMapping = cms.PSet( LookupTable = cms.FileInPath( "EventFilter/ESDigiToRaw/data/ES_lookup_table.dat" ) )
)
process.hltESPEcalTrigTowerConstituentsMapBuilder = cms.ESProducer( "EcalTrigTowerConstituentsMapBuilder",
MapFile = cms.untracked.string( "Geometry/EcalMapping/data/EndCap_TTMap.txt" )
)
process.hltESPElectronChi2 = cms.ESProducer( "Chi2MeasurementEstimatorESProducer",
MaxChi2 = cms.double( 2000.0 ),
nSigma = cms.double( 3.0 ),
ComponentName = cms.string( "hltESPElectronChi2" )
)
process.hltESPElectronMaterialEffects = cms.ESProducer( "GsfMaterialEffectsESProducer",
BetheHeitlerParametrization = cms.string( "BetheHeitler_cdfmom_nC6_O5.par" ),
EnergyLossUpdator = cms.string( "GsfBetheHeitlerUpdator" ),
ComponentName = cms.string( "hltESPElectronMaterialEffects" ),
MultipleScatteringUpdator = cms.string( "MultipleScatteringUpdator" ),
Mass = cms.double( 5.11E-4 ),
BetheHeitlerCorrection = cms.int32( 2 )
)
process.hltESPFastSteppingHelixPropagatorAny = cms.ESProducer( "SteppingHelixPropagatorESProducer",
NoErrorPropagation = cms.bool( False ),
endcapShiftInZPos = cms.double( 0.0 ),
PropagationDirection = cms.string( "anyDirection" ),
useTuningForL2Speed = cms.bool( True ),
useIsYokeFlag = cms.bool( True ),
endcapShiftInZNeg = cms.double( 0.0 ),
SetVBFPointer = cms.bool( False ),
AssumeNoMaterial = cms.bool( False ),
returnTangentPlane = cms.bool( True ),
useInTeslaFromMagField = cms.bool( False ),
VBFName = cms.string( "VolumeBasedMagneticField" ),
useEndcapShiftsInZ = cms.bool( False ),
sendLogWarning = cms.bool( False ),
useMatVolumes = cms.bool( True ),
debug = cms.bool( False ),
ApplyRadX0Correction = cms.bool( True ),
useMagVolumes = cms.bool( True ),
ComponentName = cms.string( "hltESPFastSteppingHelixPropagatorAny" )
)
process.hltESPFastSteppingHelixPropagatorOpposite = cms.ESProducer( "SteppingHelixPropagatorESProducer",
NoErrorPropagation = cms.bool( False ),
endcapShiftInZPos = cms.double( 0.0 ),
PropagationDirection = cms.string( "oppositeToMomentum" ),
useTuningForL2Speed = cms.bool( True ),
useIsYokeFlag = cms.bool( True ),
endcapShiftInZNeg = cms.double( 0.0 ),
SetVBFPointer = cms.bool( False ),
AssumeNoMaterial = cms.bool( False ),
returnTangentPlane = cms.bool( True ),
useInTeslaFromMagField = cms.bool( False ),
VBFName = cms.string( "VolumeBasedMagneticField" ),
useEndcapShiftsInZ = cms.bool( False ),
sendLogWarning = cms.bool( False ),
useMatVolumes = cms.bool( True ),
debug = cms.bool( False ),
ApplyRadX0Correction = cms.bool( True ),
useMagVolumes = cms.bool( True ),
ComponentName = cms.string( "hltESPFastSteppingHelixPropagatorOpposite" )
)
process.hltESPFittingSmootherIT = cms.ESProducer( "KFFittingSmootherESProducer",
EstimateCut = cms.double( 10.0 ),
LogPixelProbabilityCut = cms.double( -16.0 ),
Fitter = cms.string( "hltESPTrajectoryFitterRK" ),
MinNumberOfHits = cms.int32( 3 ),
Smoother = cms.string( "hltESPTrajectorySmootherRK" ),
BreakTrajWith2ConsecutiveMissing = cms.bool( True ),
ComponentName = cms.string( "hltESPFittingSmootherIT" ),
NoInvalidHitsBeginEnd = cms.bool( True ),
RejectTracks = cms.bool( True )
)
process.hltESPFittingSmootherRK = cms.ESProducer( "KFFittingSmootherESProducer",
EstimateCut = cms.double( -1.0 ),
LogPixelProbabilityCut = cms.double( -16.0 ),
Fitter = cms.string( "hltESPTrajectoryFitterRK" ),
MinNumberOfHits = cms.int32( 5 ),
Smoother = cms.string( "hltESPTrajectorySmootherRK" ),
BreakTrajWith2ConsecutiveMissing = cms.bool( False ),
ComponentName = cms.string( "hltESPFittingSmootherRK" ),
NoInvalidHitsBeginEnd = cms.bool( False ),
RejectTracks = cms.bool( True )
)
process.hltESPFwdElectronPropagator = cms.ESProducer( "PropagatorWithMaterialESProducer",
PropagationDirection = cms.string( "alongMomentum" ),
ComponentName = cms.string( "hltESPFwdElectronPropagator" ),
Mass = cms.double( 5.11E-4 ),
ptMin = cms.double( -1.0 ),
MaxDPhi = cms.double( 1.6 ),
useRungeKutta = cms.bool( False )
)
process.hltESPGlobalDetLayerGeometry = cms.ESProducer( "GlobalDetLayerGeometryESProducer",
ComponentName = cms.string( "hltESPGlobalDetLayerGeometry" )
)
process.hltESPGlobalTrackingGeometryESProducer = cms.ESProducer( "GlobalTrackingGeometryESProducer" )
process.hltESPGsfElectronFittingSmoother = cms.ESProducer( "KFFittingSmootherESProducer",
EstimateCut = cms.double( -1.0 ),
LogPixelProbabilityCut = cms.double( -16.0 ),
Fitter = cms.string( "hltESPGsfTrajectoryFitter" ),
MinNumberOfHits = cms.int32( 5 ),
Smoother = cms.string( "hltESPGsfTrajectorySmoother" ),
BreakTrajWith2ConsecutiveMissing = cms.bool( True ),
ComponentName = cms.string( "hltESPGsfElectronFittingSmoother" ),
NoInvalidHitsBeginEnd = cms.bool( True ),
RejectTracks = cms.bool( True )
)
process.hltESPGsfTrajectoryFitter = cms.ESProducer( "GsfTrajectoryFitterESProducer",
Merger = cms.string( "hltESPCloseComponentsMerger5D" ),
ComponentName = cms.string( "hltESPGsfTrajectoryFitter" ),
MaterialEffectsUpdator = cms.string( "hltESPElectronMaterialEffects" ),
RecoGeometry = cms.string( "hltESPGlobalDetLayerGeometry" ),
GeometricalPropagator = cms.string( "hltESPAnalyticalPropagator" )
)
process.hltESPGsfTrajectorySmoother = cms.ESProducer( "GsfTrajectorySmootherESProducer",
ErrorRescaling = cms.double( 100.0 ),
RecoGeometry = cms.string( "hltESPGlobalDetLayerGeometry" ),
Merger = cms.string( "hltESPCloseComponentsMerger5D" ),
ComponentName = cms.string( "hltESPGsfTrajectorySmoother" ),
GeometricalPropagator = cms.string( "hltESPBwdAnalyticalPropagator" ),
MaterialEffectsUpdator = cms.string( "hltESPElectronMaterialEffects" )
)
process.hltESPHIMixedLayerPairs = cms.ESProducer( "SeedingLayersESProducer",
layerList = cms.vstring( 'BPix1+BPix2',
'BPix1+BPix3',
'BPix2+BPix3',
'BPix1+FPix1_pos',
'BPix1+FPix1_neg',
'BPix1+FPix2_pos',
'BPix1+FPix2_neg',
'BPix2+FPix1_pos',
'BPix2+FPix1_neg',
'BPix2+FPix2_pos',
'BPix2+FPix2_neg',
'FPix1_pos+FPix2_pos',
'FPix1_neg+FPix2_neg',
'FPix2_pos+TEC1_pos',
'FPix2_pos+TEC2_pos',
'TEC1_pos+TEC2_pos',
'TEC2_pos+TEC3_pos',
'FPix2_neg+TEC1_neg',
'FPix2_neg+TEC2_neg',
'TEC1_neg+TEC2_neg',
'TEC2_neg+TEC3_neg' ),
ComponentName = cms.string( "hltESPHIMixedLayerPairs" ),
TEC = cms.PSet(
useRingSlector = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
minRing = cms.int32( 1 ),
maxRing = cms.int32( 1 )
),
FPix = cms.PSet(
hitErrorRZ = cms.double( 0.0036 ),
hitErrorRPhi = cms.double( 0.0051 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
HitProducer = cms.string( "hltHISiPixelRecHits" ),
useErrorsFromParam = cms.bool( True )
),
TID = cms.PSet( ),
BPix = cms.PSet(
hitErrorRZ = cms.double( 0.0060 ),
hitErrorRPhi = cms.double( 0.0027 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
HitProducer = cms.string( "hltHISiPixelRecHits" ),
useErrorsFromParam = cms.bool( True )
),
TIB = cms.PSet( ),
TOB = cms.PSet( )
)
process.hltESPHIPixelLayerPairs = cms.ESProducer( "SeedingLayersESProducer",
layerList = cms.vstring( 'BPix1+BPix2',
'BPix1+BPix3',
'BPix2+BPix3',
'BPix1+FPix1_pos',
'BPix1+FPix1_neg',
'BPix1+FPix2_pos',
'BPix1+FPix2_neg',
'BPix2+FPix1_pos',
'BPix2+FPix1_neg',
'BPix2+FPix2_pos',
'BPix2+FPix2_neg',
'FPix1_pos+FPix2_pos',
'FPix1_neg+FPix2_neg' ),
ComponentName = cms.string( "hltESPHIPixelLayerPairs" ),
TEC = cms.PSet( ),
FPix = cms.PSet(
useErrorsFromParam = cms.bool( True ),
hitErrorRPhi = cms.double( 0.0051 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
HitProducer = cms.string( "hltHISiPixelRecHits" ),
hitErrorRZ = cms.double( 0.0036 )
),
TID = cms.PSet( ),
BPix = cms.PSet(
useErrorsFromParam = cms.bool( True ),
hitErrorRPhi = cms.double( 0.0027 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
HitProducer = cms.string( "hltHISiPixelRecHits" ),
hitErrorRZ = cms.double( 0.0060 )
),
TIB = cms.PSet( ),
TOB = cms.PSet( )
)
process.hltESPHIPixelLayerTriplets = cms.ESProducer( "SeedingLayersESProducer",
layerList = cms.vstring( 'BPix1+BPix2+BPix3',
'BPix1+BPix2+FPix1_pos',
'BPix1+BPix2+FPix1_neg',
'BPix1+FPix1_pos+FPix2_pos',
'BPix1+FPix1_neg+FPix2_neg' ),
ComponentName = cms.string( "hltESPHIPixelLayerTriplets" ),
TEC = cms.PSet( ),
FPix = cms.PSet(
useErrorsFromParam = cms.bool( True ),
hitErrorRPhi = cms.double( 0.0051 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
HitProducer = cms.string( "hltHISiPixelRecHits" ),
hitErrorRZ = cms.double( 0.0036 )
),
TID = cms.PSet( ),
BPix = cms.PSet(
useErrorsFromParam = cms.bool( True ),
hitErrorRPhi = cms.double( 0.0027 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
HitProducer = cms.string( "hltHISiPixelRecHits" ),
hitErrorRZ = cms.double( 0.0060 )
),
TIB = cms.PSet( ),
TOB = cms.PSet( )
)
process.hltESPHITTRHBuilderWithoutRefit = cms.ESProducer( "TkTransientTrackingRecHitBuilderESProducer",
StripCPE = cms.string( "Fake" ),
Matcher = cms.string( "Fake" ),
ComputeCoarseLocalPositionFromDisk = cms.bool( False ),
PixelCPE = cms.string( "Fake" ),
ComponentName = cms.string( "hltESPHITTRHBuilderWithoutRefit" )
)
process.hltESPKFFittingSmoother = cms.ESProducer( "KFFittingSmootherESProducer",
EstimateCut = cms.double( -1.0 ),
LogPixelProbabilityCut = cms.double( -16.0 ),
Fitter = cms.string( "hltESPKFTrajectoryFitter" ),
MinNumberOfHits = cms.int32( 5 ),
Smoother = cms.string( "hltESPKFTrajectorySmoother" ),
BreakTrajWith2ConsecutiveMissing = cms.bool( False ),
ComponentName = cms.string( "hltESPKFFittingSmoother" ),
NoInvalidHitsBeginEnd = cms.bool( False ),
RejectTracks = cms.bool( True )
)
process.hltESPKFFittingSmootherForL2Muon = cms.ESProducer( "KFFittingSmootherESProducer",
EstimateCut = cms.double( -1.0 ),
LogPixelProbabilityCut = cms.double( -16.0 ),
Fitter = cms.string( "hltESPKFTrajectoryFitterForL2Muon" ),
MinNumberOfHits = cms.int32( 5 ),
Smoother = cms.string( "hltESPKFTrajectorySmootherForL2Muon" ),
BreakTrajWith2ConsecutiveMissing = cms.bool( False ),
ComponentName = cms.string( "hltESPKFFittingSmootherForL2Muon" ),
NoInvalidHitsBeginEnd = cms.bool( False ),
RejectTracks = cms.bool( True )
)
process.hltESPKFFittingSmootherWithOutliersRejectionAndRK = cms.ESProducer( "KFFittingSmootherESProducer",
EstimateCut = cms.double( 20.0 ),
LogPixelProbabilityCut = cms.double( -14.0 ),
Fitter = cms.string( "hltESPRKFitter" ),
MinNumberOfHits = cms.int32( 3 ),
Smoother = cms.string( "hltESPRKSmoother" ),
BreakTrajWith2ConsecutiveMissing = cms.bool( True ),
ComponentName = cms.string( "hltESPKFFittingSmootherWithOutliersRejectionAndRK" ),
NoInvalidHitsBeginEnd = cms.bool( True ),
RejectTracks = cms.bool( True )
)
process.hltESPKFTrajectoryFitter = cms.ESProducer( "KFTrajectoryFitterESProducer",
minHits = cms.int32( 3 ),
ComponentName = cms.string( "hltESPKFTrajectoryFitter" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator" ),
Updator = cms.string( "hltESPKFUpdator" ),
Propagator = cms.string( "PropagatorWithMaterial" ),
RecoGeometry = cms.string( "hltESPDummyDetLayerGeometry" )
)
process.hltESPKFTrajectoryFitterForL2Muon = cms.ESProducer( "KFTrajectoryFitterESProducer",
minHits = cms.int32( 3 ),
ComponentName = cms.string( "hltESPKFTrajectoryFitterForL2Muon" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator" ),
Updator = cms.string( "hltESPKFUpdator" ),
Propagator = cms.string( "hltESPFastSteppingHelixPropagatorAny" ),
RecoGeometry = cms.string( "hltESPDummyDetLayerGeometry" )
)
process.hltESPKFTrajectorySmoother = cms.ESProducer( "KFTrajectorySmootherESProducer",
errorRescaling = cms.double( 100.0 ),
minHits = cms.int32( 3 ),
ComponentName = cms.string( "hltESPKFTrajectorySmoother" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator" ),
Updator = cms.string( "hltESPKFUpdator" ),
Propagator = cms.string( "PropagatorWithMaterial" ),
RecoGeometry = cms.string( "hltESPDummyDetLayerGeometry" )
)
process.hltESPKFTrajectorySmootherForL2Muon = cms.ESProducer( "KFTrajectorySmootherESProducer",
errorRescaling = cms.double( 100.0 ),
minHits = cms.int32( 3 ),
ComponentName = cms.string( "hltESPKFTrajectorySmootherForL2Muon" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator" ),
Updator = cms.string( "hltESPKFUpdator" ),
Propagator = cms.string( "hltESPFastSteppingHelixPropagatorOpposite" ),
RecoGeometry = cms.string( "hltESPDummyDetLayerGeometry" )
)
process.hltESPKFTrajectorySmootherForMuonTrackLoader = cms.ESProducer( "KFTrajectorySmootherESProducer",
errorRescaling = cms.double( 10.0 ),
minHits = cms.int32( 3 ),
ComponentName = cms.string( "hltESPKFTrajectorySmootherForMuonTrackLoader" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator" ),
Updator = cms.string( "hltESPKFUpdator" ),
Propagator = cms.string( "hltESPSmartPropagatorAnyOpposite" ),
RecoGeometry = cms.string( "hltESPDummyDetLayerGeometry" )
)
process.hltESPKFUpdator = cms.ESProducer( "KFUpdatorESProducer",
ComponentName = cms.string( "hltESPKFUpdator" )
)
process.hltESPKullbackLeiblerDistance5D = cms.ESProducer( "DistanceBetweenComponentsESProducer5D",
ComponentName = cms.string( "hltESPKullbackLeiblerDistance5D" ),
DistanceMeasure = cms.string( "KullbackLeibler" )
)
process.hltESPL1FastJetCorrectionESProducer = cms.ESProducer( "L1FastjetCorrectionESProducer",
appendToDataLabel = cms.string( "" ),
srcRho = cms.InputTag( 'hltKT6CaloJets','rho' ),
algorithm = cms.string( "AK5CaloHLT" ),
level = cms.string( "L1FastJet" )
)
process.hltESPL1PFFastJetCorrectionESProducer = cms.ESProducer( "L1FastjetCorrectionESProducer",
appendToDataLabel = cms.string( "" ),
srcRho = cms.InputTag( 'hltKT6PFJets','rho' ),
algorithm = cms.string( "AK5PFHLT" ),
level = cms.string( "L1FastJet" )
)
process.hltESPL1PFNoPUFastJetCorrectionESProducer = cms.ESProducer( "L1FastjetCorrectionESProducer",
appendToDataLabel = cms.string( "" ),
srcRho = cms.InputTag( 'hltKT6PFJets','rho' ),
algorithm = cms.string( "AK5PFchsHLT" ),
level = cms.string( "L1FastJet" )
)
process.hltESPL2PFNoPURelativeCorrectionESProducer = cms.ESProducer( "LXXXCorrectionESProducer",
appendToDataLabel = cms.string( "" ),
algorithm = cms.string( "AK5PFchsHLT" ),
level = cms.string( "L2Relative" )
)
process.hltESPL2PFRelativeCorrectionESProducer = cms.ESProducer( "LXXXCorrectionESProducer",
appendToDataLabel = cms.string( "" ),
algorithm = cms.string( "AK5PFHLT" ),
level = cms.string( "L2Relative" )
)
process.hltESPL2RelativeCorrectionESProducer = cms.ESProducer( "LXXXCorrectionESProducer",
appendToDataLabel = cms.string( "" ),
algorithm = cms.string( "AK5CaloHLT" ),
level = cms.string( "L2Relative" )
)
process.hltESPL3AbsoluteCorrectionESProducer = cms.ESProducer( "LXXXCorrectionESProducer",
appendToDataLabel = cms.string( "" ),
algorithm = cms.string( "AK5CaloHLT" ),
level = cms.string( "L3Absolute" )
)
process.hltESPL3MuKFTrajectoryFitter = cms.ESProducer( "KFTrajectoryFitterESProducer",
minHits = cms.int32( 3 ),
ComponentName = cms.string( "hltESPL3MuKFTrajectoryFitter" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator" ),
Updator = cms.string( "hltESPKFUpdator" ),
Propagator = cms.string( "hltESPSmartPropagatorAny" ),
RecoGeometry = cms.string( "hltESPDummyDetLayerGeometry" )
)
process.hltESPL3PFAbsoluteCorrectionESProducer = cms.ESProducer( "LXXXCorrectionESProducer",
appendToDataLabel = cms.string( "" ),
algorithm = cms.string( "AK5PFHLT" ),
level = cms.string( "L3Absolute" )
)
process.hltESPL3PFNoPUAbsoluteCorrectionESProducer = cms.ESProducer( "LXXXCorrectionESProducer",
appendToDataLabel = cms.string( "" ),
algorithm = cms.string( "AK5PFchsHLT" ),
level = cms.string( "L3Absolute" )
)
process.hltESPMeasurementTracker = cms.ESProducer( "MeasurementTrackerESProducer",
StripCPE = cms.string( "StripCPEfromTrackAngle" ),
inactivePixelDetectorLabels = cms.VInputTag( ),
PixelCPE = cms.string( "hltESPPixelCPEGeneric" ),
stripLazyGetterProducer = cms.string( "hltSiStripRawToClustersFacility" ),
OnDemand = cms.bool( True ),
Regional = cms.bool( True ),
UsePixelModuleQualityDB = cms.bool( True ),
pixelClusterProducer = cms.string( "hltSiPixelClusters" ),
switchOffPixelsIfEmpty = cms.bool( True ),
inactiveStripDetectorLabels = cms.VInputTag( 'hltSiStripExcludedFEDListProducer' ),
MaskBadAPVFibers = cms.bool( True ),
UseStripStripQualityDB = cms.bool( True ),
UsePixelROCQualityDB = cms.bool( True ),
DebugPixelROCQualityDB = cms.untracked.bool( False ),
UseStripAPVFiberQualityDB = cms.bool( True ),
stripClusterProducer = cms.string( "hltSiStripClusters" ),
DebugStripAPVFiberQualityDB = cms.untracked.bool( False ),
DebugStripStripQualityDB = cms.untracked.bool( False ),
SiStripQualityLabel = cms.string( "" ),
badStripCuts = cms.PSet(
TOB = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TID = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TEC = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TIB = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
)
),
DebugStripModuleQualityDB = cms.untracked.bool( False ),
ComponentName = cms.string( "hltESPMeasurementTracker" ),
DebugPixelModuleQualityDB = cms.untracked.bool( False ),
HitMatcher = cms.string( "StandardMatcher" ),
skipClusters = cms.InputTag( "" ),
UseStripModuleQualityDB = cms.bool( True ),
UseStripNoiseDB = cms.bool( False ),
UseStripCablingDB = cms.bool( False )
)
process.hltESPMeasurementTrackerForHI = cms.ESProducer( "MeasurementTrackerESProducer",
StripCPE = cms.string( "StripCPEfromTrackAngle" ),
inactivePixelDetectorLabels = cms.VInputTag( ),
PixelCPE = cms.string( "hltESPPixelCPEGeneric" ),
stripLazyGetterProducer = cms.string( "hltSiStripRawToClustersFacility" ),
OnDemand = cms.bool( False ),
Regional = cms.bool( False ),
UsePixelModuleQualityDB = cms.bool( True ),
pixelClusterProducer = cms.string( "hltHISiPixelClusters" ),
switchOffPixelsIfEmpty = cms.bool( True ),
inactiveStripDetectorLabels = cms.VInputTag( 'hltSiStripRawToDigi' ),
MaskBadAPVFibers = cms.bool( True ),
UseStripStripQualityDB = cms.bool( True ),
UsePixelROCQualityDB = cms.bool( True ),
DebugPixelROCQualityDB = cms.untracked.bool( False ),
UseStripAPVFiberQualityDB = cms.bool( True ),
stripClusterProducer = cms.string( "hltHISiStripClustersNonRegional" ),
DebugStripAPVFiberQualityDB = cms.untracked.bool( False ),
DebugStripStripQualityDB = cms.untracked.bool( False ),
SiStripQualityLabel = cms.string( "" ),
badStripCuts = cms.PSet(
TOB = cms.PSet(
maxConsecutiveBad = cms.uint32( 2 ),
maxBad = cms.uint32( 4 )
),
TID = cms.PSet(
maxBad = cms.uint32( 4 ),
maxConsecutiveBad = cms.uint32( 2 )
),
TEC = cms.PSet(
maxConsecutiveBad = cms.uint32( 2 ),
maxBad = cms.uint32( 4 )
),
TIB = cms.PSet(
maxConsecutiveBad = cms.uint32( 2 ),
maxBad = cms.uint32( 4 )
)
),
DebugStripModuleQualityDB = cms.untracked.bool( False ),
ComponentName = cms.string( "hltESPMeasurementTrackerForHI" ),
DebugPixelModuleQualityDB = cms.untracked.bool( False ),
HitMatcher = cms.string( "StandardMatcher" ),
skipClusters = cms.InputTag( "" ),
UseStripModuleQualityDB = cms.bool( True ),
UseStripNoiseDB = cms.bool( False ),
UseStripCablingDB = cms.bool( False )
)
process.hltESPMixedLayerPairs = cms.ESProducer( "SeedingLayersESProducer",
layerList = cms.vstring( 'BPix1+BPix2',
'BPix1+BPix3',
'BPix2+BPix3',
'BPix1+FPix1_pos',
'BPix1+FPix1_neg',
'BPix1+FPix2_pos',
'BPix1+FPix2_neg',
'BPix2+FPix1_pos',
'BPix2+FPix1_neg',
'BPix2+FPix2_pos',
'BPix2+FPix2_neg',
'FPix1_pos+FPix2_pos',
'FPix1_neg+FPix2_neg',
'FPix2_pos+TEC1_pos',
'FPix2_pos+TEC2_pos',
'TEC1_pos+TEC2_pos',
'TEC2_pos+TEC3_pos',
'FPix2_neg+TEC1_neg',
'FPix2_neg+TEC2_neg',
'TEC1_neg+TEC2_neg',
'TEC2_neg+TEC3_neg' ),
ComponentName = cms.string( "hltESPMixedLayerPairs" ),
TEC = cms.PSet(
useRingSlector = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
minRing = cms.int32( 1 ),
maxRing = cms.int32( 1 )
),
FPix = cms.PSet(
useErrorsFromParam = cms.bool( True ),
hitErrorRPhi = cms.double( 0.0051 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
HitProducer = cms.string( "hltSiPixelRecHits" ),
hitErrorRZ = cms.double( 0.0036 )
),
TID = cms.PSet( ),
BPix = cms.PSet(
useErrorsFromParam = cms.bool( True ),
hitErrorRPhi = cms.double( 0.0027 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
HitProducer = cms.string( "hltSiPixelRecHits" ),
hitErrorRZ = cms.double( 0.0060 )
),
TIB = cms.PSet( ),
TOB = cms.PSet( )
)
process.hltESPMuTrackJpsiEffTrajectoryBuilder = cms.ESProducer( "CkfTrajectoryBuilderESProducer",
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
trajectoryFilterName = cms.string( "hltESPMuTrackJpsiEffTrajectoryFilter" ),
maxCand = cms.int32( 1 ),
ComponentName = cms.string( "hltESPMuTrackJpsiEffTrajectoryBuilder" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
MeasurementTrackerName = cms.string( "hltESPMeasurementTracker" ),
estimator = cms.string( "hltESPChi2MeasurementEstimator" ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( False ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 )
)
process.hltESPMuTrackJpsiEffTrajectoryFilter = cms.ESProducer( "TrajectoryFilterESProducer",
filterPset = cms.PSet(
minPt = cms.double( 1.0 ),
minHitsMinPt = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
maxLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 9 ),
maxConsecLostHits = cms.int32( 1 ),
minimumNumberOfHits = cms.int32( 5 ),
nSigmaMinPt = cms.double( 5.0 ),
chargeSignificance = cms.double( -1.0 )
),
ComponentName = cms.string( "hltESPMuTrackJpsiEffTrajectoryFilter" )
)
process.hltESPMuTrackJpsiTrajectoryBuilder = cms.ESProducer( "CkfTrajectoryBuilderESProducer",
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
trajectoryFilterName = cms.string( "hltESPMuTrackJpsiTrajectoryFilter" ),
maxCand = cms.int32( 1 ),
ComponentName = cms.string( "hltESPMuTrackJpsiTrajectoryBuilder" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
MeasurementTrackerName = cms.string( "hltESPMeasurementTracker" ),
estimator = cms.string( "hltESPChi2MeasurementEstimator" ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( False ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 )
)
process.hltESPMuTrackJpsiTrajectoryFilter = cms.ESProducer( "TrajectoryFilterESProducer",
filterPset = cms.PSet(
minPt = cms.double( 1.0 ),
minHitsMinPt = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
maxLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 8 ),
maxConsecLostHits = cms.int32( 1 ),
minimumNumberOfHits = cms.int32( 5 ),
nSigmaMinPt = cms.double( 5.0 ),
chargeSignificance = cms.double( -1.0 )
),
ComponentName = cms.string( "hltESPMuTrackJpsiTrajectoryFilter" )
)
process.hltESPMuonCkfTrajectoryBuilder = cms.ESProducer( "MuonCkfTrajectoryBuilderESProducer",
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
trajectoryFilterName = cms.string( "hltESPMuonCkfTrajectoryFilter" ),
maxCand = cms.int32( 5 ),
ComponentName = cms.string( "hltESPMuonCkfTrajectoryBuilder" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
useSeedLayer = cms.bool( False ),
deltaEta = cms.double( 0.1 ),
deltaPhi = cms.double( 0.1 ),
estimator = cms.string( "hltESPChi2MeasurementEstimator" ),
rescaleErrorIfFail = cms.double( 1.0 ),
propagatorProximity = cms.string( "SteppingHelixPropagatorAny" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
MeasurementTrackerName = cms.string( "hltESPMeasurementTracker" ),
intermediateCleaning = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 )
)
process.hltESPMuonCkfTrajectoryBuilderSeedHit = cms.ESProducer( "MuonCkfTrajectoryBuilderESProducer",
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
trajectoryFilterName = cms.string( "hltESPMuonCkfTrajectoryFilter" ),
maxCand = cms.int32( 5 ),
ComponentName = cms.string( "hltESPMuonCkfTrajectoryBuilderSeedHit" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
useSeedLayer = cms.bool( True ),
deltaEta = cms.double( 0.1 ),
deltaPhi = cms.double( 0.1 ),
estimator = cms.string( "hltESPChi2MeasurementEstimator" ),
rescaleErrorIfFail = cms.double( 1.0 ),
propagatorProximity = cms.string( "SteppingHelixPropagatorAny" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
MeasurementTrackerName = cms.string( "hltESPMeasurementTracker" ),
intermediateCleaning = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 )
)
process.hltESPMuonCkfTrajectoryFilter = cms.ESProducer( "TrajectoryFilterESProducer",
filterPset = cms.PSet(
minPt = cms.double( 0.9 ),
minHitsMinPt = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
maxLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( -1 ),
maxConsecLostHits = cms.int32( 1 ),
chargeSignificance = cms.double( -1.0 ),
nSigmaMinPt = cms.double( 5.0 ),
minimumNumberOfHits = cms.int32( 5 )
),
ComponentName = cms.string( "hltESPMuonCkfTrajectoryFilter" )
)
process.hltESPMuonDetLayerGeometryESProducer = cms.ESProducer( "MuonDetLayerGeometryESProducer" )
process.hltESPMuonTransientTrackingRecHitBuilder = cms.ESProducer( "MuonTransientTrackingRecHitBuilderESProducer",
ComponentName = cms.string( "hltESPMuonTransientTrackingRecHitBuilder" )
)
process.hltESPPixelCPEGeneric = cms.ESProducer( "PixelCPEGenericESProducer",
EdgeClusterErrorX = cms.double( 50.0 ),
DoCosmics = cms.bool( False ),
LoadTemplatesFromDB = cms.bool( True ),
UseErrorsFromTemplates = cms.bool( True ),
eff_charge_cut_highX = cms.double( 1.0 ),
TruncatePixelCharge = cms.bool( True ),
size_cutY = cms.double( 3.0 ),
size_cutX = cms.double( 3.0 ),
inflate_all_errors_no_trk_angle = cms.bool( False ),
IrradiationBiasCorrection = cms.bool( False ),
TanLorentzAnglePerTesla = cms.double( 0.106 ),
inflate_errors = cms.bool( False ),
eff_charge_cut_lowX = cms.double( 0.0 ),
eff_charge_cut_highY = cms.double( 1.0 ),
ClusterProbComputationFlag = cms.int32( 0 ),
EdgeClusterErrorY = cms.double( 85.0 ),
ComponentName = cms.string( "hltESPPixelCPEGeneric" ),
eff_charge_cut_lowY = cms.double( 0.0 ),
PixelErrorParametrization = cms.string( "NOTcmsim" ),
Alpha2Order = cms.bool( True )
)
process.hltESPPixelCPETemplateReco = cms.ESProducer( "PixelCPETemplateRecoESProducer",
DoCosmics = cms.bool( False ),
LoadTemplatesFromDB = cms.bool( True ),
ComponentName = cms.string( "hltESPPixelCPETemplateReco" ),
Alpha2Order = cms.bool( True ),
ClusterProbComputationFlag = cms.int32( 0 ),
speed = cms.int32( -2 ),
UseClusterSplitter = cms.bool( False )
)
process.hltESPPixelLayerPairs = cms.ESProducer( "SeedingLayersESProducer",
layerList = cms.vstring( 'BPix1+BPix2',
'BPix1+BPix3',
'BPix2+BPix3',
'BPix1+FPix1_pos',
'BPix1+FPix1_neg',
'BPix1+FPix2_pos',
'BPix1+FPix2_neg',
'BPix2+FPix1_pos',
'BPix2+FPix1_neg',
'BPix2+FPix2_pos',
'BPix2+FPix2_neg',
'FPix1_pos+FPix2_pos',
'FPix1_neg+FPix2_neg' ),
ComponentName = cms.string( "hltESPPixelLayerPairs" ),
TEC = cms.PSet( ),
FPix = cms.PSet(
useErrorsFromParam = cms.bool( True ),
hitErrorRPhi = cms.double( 0.0051 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
HitProducer = cms.string( "hltSiPixelRecHits" ),
hitErrorRZ = cms.double( 0.0036 )
),
TID = cms.PSet( ),
BPix = cms.PSet(
useErrorsFromParam = cms.bool( True ),
hitErrorRPhi = cms.double( 0.0027 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
HitProducer = cms.string( "hltSiPixelRecHits" ),
hitErrorRZ = cms.double( 0.0060 )
),
TIB = cms.PSet( ),
TOB = cms.PSet( )
)
process.hltESPPixelLayerTriplets = cms.ESProducer( "SeedingLayersESProducer",
layerList = cms.vstring( 'BPix1+BPix2+BPix3',
'BPix1+BPix2+FPix1_pos',
'BPix1+BPix2+FPix1_neg',
'BPix1+FPix1_pos+FPix2_pos',
'BPix1+FPix1_neg+FPix2_neg' ),
ComponentName = cms.string( "hltESPPixelLayerTriplets" ),
TEC = cms.PSet( ),
FPix = cms.PSet(
useErrorsFromParam = cms.bool( True ),
hitErrorRPhi = cms.double( 0.0051 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
HitProducer = cms.string( "hltSiPixelRecHits" ),
hitErrorRZ = cms.double( 0.0036 )
),
TID = cms.PSet( ),
BPix = cms.PSet(
useErrorsFromParam = cms.bool( True ),
hitErrorRPhi = cms.double( 0.0027 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
HitProducer = cms.string( "hltSiPixelRecHits" ),
hitErrorRZ = cms.double( 0.0060 )
),
TIB = cms.PSet( ),
TOB = cms.PSet( )
)
process.hltESPPixelLayerTripletsHITHB = cms.ESProducer( "SeedingLayersESProducer",
layerList = cms.vstring( 'BPix1+BPix2+BPix3' ),
ComponentName = cms.string( "hltESPPixelLayerTripletsHITHB" ),
TEC = cms.PSet( ),
FPix = cms.PSet(
useErrorsFromParam = cms.bool( True ),
hitErrorRPhi = cms.double( 0.0051 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
HitProducer = cms.string( "hltSiPixelRecHits" ),
hitErrorRZ = cms.double( 0.0036 )
),
TID = cms.PSet( ),
BPix = cms.PSet(
useErrorsFromParam = cms.bool( True ),
hitErrorRPhi = cms.double( 0.0027 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
HitProducer = cms.string( "hltSiPixelRecHits" ),
hitErrorRZ = cms.double( 0.0060 )
),
TIB = cms.PSet( ),
TOB = cms.PSet( )
)
process.hltESPPixelLayerTripletsHITHE = cms.ESProducer( "SeedingLayersESProducer",
layerList = cms.vstring( 'BPix1+BPix2+FPix1_pos',
'BPix1+BPix2+FPix1_neg',
'BPix1+FPix1_pos+FPix2_pos',
'BPix1+FPix1_neg+FPix2_neg' ),
ComponentName = cms.string( "hltESPPixelLayerTripletsHITHE" ),
TEC = cms.PSet( ),
FPix = cms.PSet(
useErrorsFromParam = cms.bool( True ),
hitErrorRPhi = cms.double( 0.0051 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
HitProducer = cms.string( "hltSiPixelRecHits" ),
hitErrorRZ = cms.double( 0.0036 )
),
TID = cms.PSet( ),
BPix = cms.PSet(
useErrorsFromParam = cms.bool( True ),
hitErrorRPhi = cms.double( 0.0027 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
HitProducer = cms.string( "hltSiPixelRecHits" ),
hitErrorRZ = cms.double( 0.0060 )
),
TIB = cms.PSet( ),
TOB = cms.PSet( )
)
process.hltESPPixelLayerTripletsReg = cms.ESProducer( "SeedingLayersESProducer",
layerList = cms.vstring( 'BPix1+BPix2+BPix3',
'BPix1+BPix2+FPix1_pos',
'BPix1+BPix2+FPix1_neg',
'BPix1+FPix1_pos+FPix2_pos',
'BPix1+FPix1_neg+FPix2_neg' ),
ComponentName = cms.string( "hltESPPixelLayerTripletsReg" ),
TEC = cms.PSet( ),
FPix = cms.PSet(
hitErrorRZ = cms.double( 0.0036 ),
hitErrorRPhi = cms.double( 0.0051 ),
useErrorsFromParam = cms.bool( True ),
HitProducer = cms.string( "hltSiPixelRecHitsReg" ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" )
),
TID = cms.PSet( ),
BPix = cms.PSet(
hitErrorRZ = cms.double( 0.0060 ),
hitErrorRPhi = cms.double( 0.0027 ),
useErrorsFromParam = cms.bool( True ),
HitProducer = cms.string( "hltSiPixelRecHitsReg" ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" )
),
TIB = cms.PSet( ),
TOB = cms.PSet( )
)
process.hltESPPromptTrackCountingESProducer = cms.ESProducer( "PromptTrackCountingESProducer",
maxImpactParameterSig = cms.double( 999999.0 ),
deltaR = cms.double( -1.0 ),
maximumDecayLength = cms.double( 999999.0 ),
impactParameterType = cms.int32( 0 ),
trackQualityClass = cms.string( "any" ),
deltaRmin = cms.double( 0.0 ),
maxImpactParameter = cms.double( 0.03 ),
maximumDistanceToJetAxis = cms.double( 999999.0 ),
nthTrack = cms.int32( -1 )
)
process.hltESPRKTrajectoryFitter = cms.ESProducer( "KFTrajectoryFitterESProducer",
minHits = cms.int32( 3 ),
ComponentName = cms.string( "hltESPRKFitter" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator" ),
Updator = cms.string( "hltESPKFUpdator" ),
Propagator = cms.string( "hltESPRungeKuttaTrackerPropagator" ),
RecoGeometry = cms.string( "hltESPGlobalDetLayerGeometry" )
)
process.hltESPRKTrajectorySmoother = cms.ESProducer( "KFTrajectorySmootherESProducer",
errorRescaling = cms.double( 100.0 ),
minHits = cms.int32( 3 ),
ComponentName = cms.string( "hltESPRKSmoother" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator" ),
Updator = cms.string( "hltESPKFUpdator" ),
Propagator = cms.string( "hltESPRungeKuttaTrackerPropagator" ),
RecoGeometry = cms.string( "hltESPGlobalDetLayerGeometry" )
)
process.hltESPRungeKuttaTrackerPropagator = cms.ESProducer( "PropagatorWithMaterialESProducer",
PropagationDirection = cms.string( "alongMomentum" ),
ComponentName = cms.string( "hltESPRungeKuttaTrackerPropagator" ),
Mass = cms.double( 0.105 ),
ptMin = cms.double( -1.0 ),
MaxDPhi = cms.double( 1.6 ),
useRungeKutta = cms.bool( True )
)
process.hltESPSiStripRegionConnectivity = cms.ESProducer( "SiStripRegionConnectivity",
EtaDivisions = cms.untracked.uint32( 20 ),
PhiDivisions = cms.untracked.uint32( 20 ),
EtaMax = cms.untracked.double( 2.5 )
)
process.hltESPSmartPropagator = cms.ESProducer( "SmartPropagatorESProducer",
Epsilon = cms.double( 5.0 ),
TrackerPropagator = cms.string( "PropagatorWithMaterial" ),
MuonPropagator = cms.string( "hltESPSteppingHelixPropagatorAlong" ),
PropagationDirection = cms.string( "alongMomentum" ),
ComponentName = cms.string( "hltESPSmartPropagator" )
)
process.hltESPSmartPropagatorAny = cms.ESProducer( "SmartPropagatorESProducer",
Epsilon = cms.double( 5.0 ),
TrackerPropagator = cms.string( "PropagatorWithMaterial" ),
MuonPropagator = cms.string( "SteppingHelixPropagatorAny" ),
PropagationDirection = cms.string( "alongMomentum" ),
ComponentName = cms.string( "hltESPSmartPropagatorAny" )
)
process.hltESPSmartPropagatorAnyOpposite = cms.ESProducer( "SmartPropagatorESProducer",
Epsilon = cms.double( 5.0 ),
TrackerPropagator = cms.string( "PropagatorWithMaterialOpposite" ),
MuonPropagator = cms.string( "SteppingHelixPropagatorAny" ),
PropagationDirection = cms.string( "oppositeToMomentum" ),
ComponentName = cms.string( "hltESPSmartPropagatorAnyOpposite" )
)
process.hltESPSmartPropagatorOpposite = cms.ESProducer( "SmartPropagatorESProducer",
Epsilon = cms.double( 5.0 ),
TrackerPropagator = cms.string( "PropagatorWithMaterialOpposite" ),
MuonPropagator = cms.string( "hltESPSteppingHelixPropagatorOpposite" ),
PropagationDirection = cms.string( "oppositeToMomentum" ),
ComponentName = cms.string( "hltESPSmartPropagatorOpposite" )
)
process.hltESPSoftLeptonByDistance = cms.ESProducer( "LeptonTaggerByDistanceESProducer",
distance = cms.double( 0.5 )
)
process.hltESPSoftLeptonByPt = cms.ESProducer( "LeptonTaggerByPtESProducer",
ipSign = cms.string( "any" )
)
process.hltESPSteppingHelixPropagatorAlong = cms.ESProducer( "SteppingHelixPropagatorESProducer",
NoErrorPropagation = cms.bool( False ),
endcapShiftInZPos = cms.double( 0.0 ),
PropagationDirection = cms.string( "alongMomentum" ),
useTuningForL2Speed = cms.bool( False ),
useIsYokeFlag = cms.bool( True ),
endcapShiftInZNeg = cms.double( 0.0 ),
SetVBFPointer = cms.bool( False ),
AssumeNoMaterial = cms.bool( False ),
returnTangentPlane = cms.bool( True ),
useInTeslaFromMagField = cms.bool( False ),
VBFName = cms.string( "VolumeBasedMagneticField" ),
useEndcapShiftsInZ = cms.bool( False ),
sendLogWarning = cms.bool( False ),
useMatVolumes = cms.bool( True ),
debug = cms.bool( False ),
ApplyRadX0Correction = cms.bool( True ),
useMagVolumes = cms.bool( True ),
ComponentName = cms.string( "hltESPSteppingHelixPropagatorAlong" )
)
process.hltESPSteppingHelixPropagatorOpposite = cms.ESProducer( "SteppingHelixPropagatorESProducer",
NoErrorPropagation = cms.bool( False ),
endcapShiftInZPos = cms.double( 0.0 ),
PropagationDirection = cms.string( "oppositeToMomentum" ),
useTuningForL2Speed = cms.bool( False ),
useIsYokeFlag = cms.bool( True ),
endcapShiftInZNeg = cms.double( 0.0 ),
SetVBFPointer = cms.bool( False ),
AssumeNoMaterial = cms.bool( False ),
returnTangentPlane = cms.bool( True ),
useInTeslaFromMagField = cms.bool( False ),
VBFName = cms.string( "VolumeBasedMagneticField" ),
useEndcapShiftsInZ = cms.bool( False ),
sendLogWarning = cms.bool( False ),
useMatVolumes = cms.bool( True ),
debug = cms.bool( False ),
ApplyRadX0Correction = cms.bool( True ),
useMagVolumes = cms.bool( True ),
ComponentName = cms.string( "hltESPSteppingHelixPropagatorOpposite" )
)
process.hltESPStraightLinePropagator = cms.ESProducer( "StraightLinePropagatorESProducer",
ComponentName = cms.string( "hltESPStraightLinePropagator" ),
PropagationDirection = cms.string( "alongMomentum" )
)
process.hltESPTTRHBWithTrackAngle = cms.ESProducer( "TkTransientTrackingRecHitBuilderESProducer",
StripCPE = cms.string( "StripCPEfromTrackAngle" ),
Matcher = cms.string( "StandardMatcher" ),
ComputeCoarseLocalPositionFromDisk = cms.bool( False ),
PixelCPE = cms.string( "hltESPPixelCPEGeneric" ),
ComponentName = cms.string( "hltESPTTRHBWithTrackAngle" )
)
process.hltESPTTRHBuilderAngleAndTemplate = cms.ESProducer( "TkTransientTrackingRecHitBuilderESProducer",
StripCPE = cms.string( "StripCPEfromTrackAngle" ),
Matcher = cms.string( "StandardMatcher" ),
ComputeCoarseLocalPositionFromDisk = cms.bool( False ),
PixelCPE = cms.string( "hltESPPixelCPETemplateReco" ),
ComponentName = cms.string( "hltESPTTRHBuilderAngleAndTemplate" )
)
process.hltESPTTRHBuilderPixelOnly = cms.ESProducer( "TkTransientTrackingRecHitBuilderESProducer",
StripCPE = cms.string( "Fake" ),
Matcher = cms.string( "StandardMatcher" ),
ComputeCoarseLocalPositionFromDisk = cms.bool( False ),
PixelCPE = cms.string( "hltESPPixelCPEGeneric" ),
ComponentName = cms.string( "hltESPTTRHBuilderPixelOnly" )
)
process.hltESPTTRHBuilderWithoutAngle4PixelTriplets = cms.ESProducer( "TkTransientTrackingRecHitBuilderESProducer",
StripCPE = cms.string( "Fake" ),
Matcher = cms.string( "StandardMatcher" ),
ComputeCoarseLocalPositionFromDisk = cms.bool( False ),
PixelCPE = cms.string( "hltESPPixelCPEGeneric" ),
ComponentName = cms.string( "hltESPTTRHBuilderWithoutAngle4PixelTriplets" )
)
process.hltESPTrackCounting3D1st = cms.ESProducer( "TrackCountingESProducer",
deltaR = cms.double( -1.0 ),
maximumDistanceToJetAxis = cms.double( 0.07 ),
impactParameterType = cms.int32( 0 ),
trackQualityClass = cms.string( "any" ),
maximumDecayLength = cms.double( 5.0 ),
nthTrack = cms.int32( 1 )
)
process.hltESPTrackCounting3D2nd = cms.ESProducer( "TrackCountingESProducer",
deltaR = cms.double( -1.0 ),
maximumDistanceToJetAxis = cms.double( 0.07 ),
impactParameterType = cms.int32( 0 ),
trackQualityClass = cms.string( "any" ),
maximumDecayLength = cms.double( 5.0 ),
nthTrack = cms.int32( 2 )
)
process.hltESPTrackerRecoGeometryESProducer = cms.ESProducer( "TrackerRecoGeometryESProducer",
appendToDataLabel = cms.string( "" ),
trackerGeometryLabel = cms.untracked.string( "" )
)
process.hltESPTrajectoryBuilderForElectrons = cms.ESProducer( "CkfTrajectoryBuilderESProducer",
propagatorAlong = cms.string( "hltESPFwdElectronPropagator" ),
trajectoryFilterName = cms.string( "hltESPTrajectoryFilterForElectrons" ),
maxCand = cms.int32( 5 ),
ComponentName = cms.string( "hltESPTrajectoryBuilderForElectrons" ),
propagatorOpposite = cms.string( "hltESPBwdElectronPropagator" ),
MeasurementTrackerName = cms.string( "hltESPMeasurementTracker" ),
estimator = cms.string( "hltESPElectronChi2" ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( True ),
intermediateCleaning = cms.bool( False ),
lostHitPenalty = cms.double( 90.0 )
)
process.hltESPTrajectoryBuilderIT = cms.ESProducer( "CkfTrajectoryBuilderESProducer",
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
trajectoryFilterName = cms.string( "hltESPTrajectoryFilterIT" ),
maxCand = cms.int32( 2 ),
ComponentName = cms.string( "hltESPTrajectoryBuilderIT" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
MeasurementTrackerName = cms.string( "hltESPMeasurementTracker" ),
estimator = cms.string( "hltESPChi2MeasurementEstimator9" ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( False ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 )
)
process.hltESPTrajectoryBuilderL3 = cms.ESProducer( "CkfTrajectoryBuilderESProducer",
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
trajectoryFilterName = cms.string( "hltESPTrajectoryFilterL3" ),
maxCand = cms.int32( 5 ),
ComponentName = cms.string( "hltESPTrajectoryBuilderL3" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
MeasurementTrackerName = cms.string( "hltESPMeasurementTracker" ),
estimator = cms.string( "hltESPChi2MeasurementEstimator" ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( False ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 )
)
process.hltESPTrajectoryCleanerBySharedHits = cms.ESProducer( "TrajectoryCleanerESProducer",
ComponentName = cms.string( "hltESPTrajectoryCleanerBySharedHits" ),
fractionShared = cms.double( 0.5 ),
ValidHitBonus = cms.double( 100.0 ),
ComponentType = cms.string( "TrajectoryCleanerBySharedHits" ),
MissingHitPenalty = cms.double( 0.0 ),
allowSharedFirstHit = cms.bool( False )
)
process.hltESPTrajectoryCleanerBySharedSeeds = cms.ESProducer( "TrajectoryCleanerESProducer",
ComponentName = cms.string( "hltESPTrajectoryCleanerBySharedSeeds" ),
fractionShared = cms.double( 0.5 ),
ValidHitBonus = cms.double( 100.0 ),
ComponentType = cms.string( "TrajectoryCleanerBySharedSeeds" ),
MissingHitPenalty = cms.double( 0.0 ),
allowSharedFirstHit = cms.bool( True )
)
process.hltESPTrajectoryFilterForElectrons = cms.ESProducer( "TrajectoryFilterESProducer",
filterPset = cms.PSet(
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
minPt = cms.double( 2.0 ),
minHitsMinPt = cms.int32( -1 ),
maxLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( -1 ),
maxConsecLostHits = cms.int32( 1 ),
nSigmaMinPt = cms.double( 5.0 ),
minimumNumberOfHits = cms.int32( 5 ),
chargeSignificance = cms.double( -1.0 )
),
ComponentName = cms.string( "hltESPTrajectoryFilterForElectrons" )
)
process.hltESPTrajectoryFilterIT = cms.ESProducer( "TrajectoryFilterESProducer",
filterPset = cms.PSet(
minPt = cms.double( 0.3 ),
minHitsMinPt = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
maxLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
maxConsecLostHits = cms.int32( 1 ),
minimumNumberOfHits = cms.int32( 3 ),
nSigmaMinPt = cms.double( 5.0 ),
chargeSignificance = cms.double( -1.0 )
),
ComponentName = cms.string( "hltESPTrajectoryFilterIT" )
)
process.hltESPTrajectoryFilterL3 = cms.ESProducer( "TrajectoryFilterESProducer",
filterPset = cms.PSet(
minPt = cms.double( 0.5 ),
minHitsMinPt = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
maxLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 1000000000 ),
maxConsecLostHits = cms.int32( 1 ),
minimumNumberOfHits = cms.int32( 5 ),
nSigmaMinPt = cms.double( 5.0 ),
chargeSignificance = cms.double( -1.0 )
),
ComponentName = cms.string( "hltESPTrajectoryFilterL3" )
)
process.hltESPTrajectoryFitterRK = cms.ESProducer( "KFTrajectoryFitterESProducer",
minHits = cms.int32( 3 ),
ComponentName = cms.string( "hltESPTrajectoryFitterRK" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator" ),
Updator = cms.string( "hltESPKFUpdator" ),
Propagator = cms.string( "hltESPRungeKuttaTrackerPropagator" ),
RecoGeometry = cms.string( "hltESPDummyDetLayerGeometry" )
)
process.hltESPTrajectorySmootherRK = cms.ESProducer( "KFTrajectorySmootherESProducer",
errorRescaling = cms.double( 100.0 ),
minHits = cms.int32( 3 ),
ComponentName = cms.string( "hltESPTrajectorySmootherRK" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator" ),
Updator = cms.string( "hltESPKFUpdator" ),
Propagator = cms.string( "hltESPRungeKuttaTrackerPropagator" ),
RecoGeometry = cms.string( "hltESPDummyDetLayerGeometry" )
)
process.hltESPbJetRegionalTrajectoryBuilder = cms.ESProducer( "CkfTrajectoryBuilderESProducer",
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
trajectoryFilterName = cms.string( "hltESPbJetRegionalTrajectoryFilter" ),
maxCand = cms.int32( 1 ),
ComponentName = cms.string( "hltESPbJetRegionalTrajectoryBuilder" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
MeasurementTrackerName = cms.string( "hltESPMeasurementTracker" ),
estimator = cms.string( "hltESPChi2MeasurementEstimator" ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( False ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 )
)
process.hltESPbJetRegionalTrajectoryFilter = cms.ESProducer( "TrajectoryFilterESProducer",
filterPset = cms.PSet(
minPt = cms.double( 1.0 ),
minHitsMinPt = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
maxLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 8 ),
maxConsecLostHits = cms.int32( 1 ),
minimumNumberOfHits = cms.int32( 5 ),
nSigmaMinPt = cms.double( 5.0 ),
chargeSignificance = cms.double( -1.0 )
),
ComponentName = cms.string( "hltESPbJetRegionalTrajectoryFilter" )
)
process.hltHIAllESPCkf3HitTrajectoryBuilder = cms.ESProducer( "CkfTrajectoryBuilderESProducer",
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
trajectoryFilterName = cms.string( "hltESPCkf3HitTrajectoryFilter" ),
maxCand = cms.int32( 5 ),
ComponentName = cms.string( "hltHIAllESPCkf3HitTrajectoryBuilder" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
MeasurementTrackerName = cms.string( "hltHIAllESPMeasurementTracker" ),
estimator = cms.string( "hltESPChi2MeasurementEstimator" ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( True ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 )
)
process.hltHIAllESPCkfTrajectoryBuilder = cms.ESProducer( "CkfTrajectoryBuilderESProducer",
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
trajectoryFilterName = cms.string( "hltESPCkfTrajectoryFilter" ),
maxCand = cms.int32( 5 ),
ComponentName = cms.string( "hltHIAllESPCkfTrajectoryBuilder" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
MeasurementTrackerName = cms.string( "hltHIAllESPMeasurementTracker" ),
estimator = cms.string( "hltESPChi2MeasurementEstimator" ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( True ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 )
)
process.hltHIAllESPMeasurementTracker = cms.ESProducer( "MeasurementTrackerESProducer",
StripCPE = cms.string( "StripCPEfromTrackAngle" ),
inactivePixelDetectorLabels = cms.VInputTag( ),
PixelCPE = cms.string( "hltESPPixelCPEGeneric" ),
stripLazyGetterProducer = cms.string( "hltHISiStripRawToClustersFacility" ),
OnDemand = cms.bool( True ),
Regional = cms.bool( True ),
UsePixelModuleQualityDB = cms.bool( True ),
pixelClusterProducer = cms.string( "hltHISiPixelClusters" ),
switchOffPixelsIfEmpty = cms.bool( True ),
inactiveStripDetectorLabels = cms.VInputTag( 'hltSiStripExcludedFEDListProducer' ),
MaskBadAPVFibers = cms.bool( True ),
UseStripStripQualityDB = cms.bool( True ),
UsePixelROCQualityDB = cms.bool( True ),
DebugPixelROCQualityDB = cms.untracked.bool( False ),
UseStripAPVFiberQualityDB = cms.bool( True ),
stripClusterProducer = cms.string( "hltHISiStripClusters" ),
DebugStripAPVFiberQualityDB = cms.untracked.bool( False ),
DebugStripStripQualityDB = cms.untracked.bool( False ),
SiStripQualityLabel = cms.string( "" ),
badStripCuts = cms.PSet(
TID = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TOB = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TEC = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TIB = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
)
),
DebugStripModuleQualityDB = cms.untracked.bool( False ),
ComponentName = cms.string( "hltHIAllESPMeasurementTracker" ),
DebugPixelModuleQualityDB = cms.untracked.bool( False ),
HitMatcher = cms.string( "StandardMatcher" ),
skipClusters = cms.InputTag( "" ),
UseStripModuleQualityDB = cms.bool( True ),
UseStripNoiseDB = cms.bool( False ),
UseStripCablingDB = cms.bool( False )
)
process.hltHIAllESPMuonCkfTrajectoryBuilder = cms.ESProducer( "MuonCkfTrajectoryBuilderESProducer",
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
trajectoryFilterName = cms.string( "hltESPMuonCkfTrajectoryFilter" ),
maxCand = cms.int32( 5 ),
ComponentName = cms.string( "hltHIAllESPMuonCkfTrajectoryBuilder" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
useSeedLayer = cms.bool( False ),
deltaEta = cms.double( 0.1 ),
deltaPhi = cms.double( 0.1 ),
estimator = cms.string( "hltESPChi2MeasurementEstimator" ),
rescaleErrorIfFail = cms.double( 1.0 ),
propagatorProximity = cms.string( "SteppingHelixPropagatorAny" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
MeasurementTrackerName = cms.string( "hltHIAllESPMeasurementTracker" ),
intermediateCleaning = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 )
)
process.hltHIAllESPTrajectoryBuilderIT = cms.ESProducer( "CkfTrajectoryBuilderESProducer",
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
trajectoryFilterName = cms.string( "hltESPTrajectoryFilterIT" ),
maxCand = cms.int32( 5 ),
ComponentName = cms.string( "hltHIAllESPTrajectoryBuilderIT" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
MeasurementTrackerName = cms.string( "hltHIAllESPMeasurementTracker" ),
estimator = cms.string( "hltESPChi2MeasurementEstimator" ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( False ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 )
)
process.hltIter1ESPMeasurementTracker = cms.ESProducer( "MeasurementTrackerESProducer",
StripCPE = cms.string( "StripCPEfromTrackAngle" ),
inactivePixelDetectorLabels = cms.VInputTag( ),
PixelCPE = cms.string( "hltESPPixelCPEGeneric" ),
stripLazyGetterProducer = cms.string( "hltSiStripRawToClustersFacility" ),
OnDemand = cms.bool( True ),
Regional = cms.bool( True ),
UsePixelModuleQualityDB = cms.bool( True ),
pixelClusterProducer = cms.string( "hltSiPixelClusters" ),
switchOffPixelsIfEmpty = cms.bool( True ),
inactiveStripDetectorLabels = cms.VInputTag( 'hltSiStripExcludedFEDListProducer' ),
MaskBadAPVFibers = cms.bool( True ),
UseStripStripQualityDB = cms.bool( True ),
UsePixelROCQualityDB = cms.bool( True ),
DebugPixelROCQualityDB = cms.untracked.bool( False ),
UseStripAPVFiberQualityDB = cms.bool( True ),
stripClusterProducer = cms.string( "hltIter1SiStripClusters" ),
DebugStripAPVFiberQualityDB = cms.untracked.bool( False ),
DebugStripStripQualityDB = cms.untracked.bool( False ),
SiStripQualityLabel = cms.string( "" ),
badStripCuts = cms.PSet(
TOB = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TID = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TEC = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TIB = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
)
),
DebugStripModuleQualityDB = cms.untracked.bool( False ),
ComponentName = cms.string( "hltIter1ESPMeasurementTracker" ),
DebugPixelModuleQualityDB = cms.untracked.bool( False ),
HitMatcher = cms.string( "StandardMatcher" ),
skipClusters = cms.InputTag( "hltIter1ClustersRefRemoval" ),
UseStripModuleQualityDB = cms.bool( True ),
UseStripNoiseDB = cms.bool( False ),
UseStripCablingDB = cms.bool( False )
)
process.hltIter1ESPPixelLayerTriplets = cms.ESProducer( "SeedingLayersESProducer",
layerList = cms.vstring( 'BPix1+BPix2+BPix3',
'BPix1+BPix2+FPix1_pos',
'BPix1+BPix2+FPix1_neg',
'BPix1+FPix1_pos+FPix2_pos',
'BPix1+FPix1_neg+FPix2_neg' ),
ComponentName = cms.string( "hltIter1ESPPixelLayerTriplets" ),
TEC = cms.PSet( ),
FPix = cms.PSet(
HitProducer = cms.string( "hltSiPixelRecHits" ),
hitErrorRZ = cms.double( 0.0036 ),
useErrorsFromParam = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltIter1ClustersRefRemoval" ),
hitErrorRPhi = cms.double( 0.0051 )
),
TID = cms.PSet( ),
BPix = cms.PSet(
HitProducer = cms.string( "hltSiPixelRecHits" ),
hitErrorRZ = cms.double( 0.0060 ),
useErrorsFromParam = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltIter1ClustersRefRemoval" ),
hitErrorRPhi = cms.double( 0.0027 )
),
TIB = cms.PSet( ),
TOB = cms.PSet( )
)
process.hltIter1ESPTrajectoryBuilderIT = cms.ESProducer( "CkfTrajectoryBuilderESProducer",
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
trajectoryFilterName = cms.string( "hltIter1ESPTrajectoryFilterIT" ),
maxCand = cms.int32( 2 ),
ComponentName = cms.string( "hltIter1ESPTrajectoryBuilderIT" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
MeasurementTrackerName = cms.string( "hltIter1ESPMeasurementTracker" ),
estimator = cms.string( "hltESPChi2MeasurementEstimator16" ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( False ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 )
)
process.hltIter1ESPTrajectoryFilterIT = cms.ESProducer( "TrajectoryFilterESProducer",
filterPset = cms.PSet(
minPt = cms.double( 0.2 ),
minHitsMinPt = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
maxLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
maxConsecLostHits = cms.int32( 1 ),
minimumNumberOfHits = cms.int32( 3 ),
nSigmaMinPt = cms.double( 5.0 ),
chargeSignificance = cms.double( -1.0 )
),
ComponentName = cms.string( "hltIter1ESPTrajectoryFilterIT" )
)
process.hltIter3ESPLayerTripletsPA = cms.ESProducer( "SeedingLayersESProducer",
layerList = cms.vstring( 'BPix1+BPix2+BPix3',
'BPix1+BPix2+FPix1_pos',
'BPix1+BPix2+FPix1_neg',
'BPix1+FPix1_pos+FPix2_pos',
'BPix1+FPix1_neg+FPix2_neg',
'BPix2+FPix1_pos+FPix2_pos',
'BPix2+FPix1_neg+FPix2_neg',
'FPix1_pos+FPix2_pos+TEC1_pos',
'FPix1_neg+FPix2_neg+TEC1_neg',
'FPix2_pos+TEC2_pos+TEC3_pos',
'FPix2_neg+TEC2_neg+TEC3_neg',
'BPix2+BPix3+TIB1',
'BPix2+BPix3+TIB2',
'BPix1+BPix3+TIB1',
'BPix1+BPix3+TIB2',
'BPix1+BPix2+TIB1',
'BPix1+BPix2+TIB2' ),
ComponentName = cms.string( "hltIter3ESPLayerTripletsPA" ),
TEC = cms.PSet(
useRingSlector = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
minRing = cms.int32( 1 ),
maxRing = cms.int32( 1 )
),
FPix = cms.PSet(
HitProducer = cms.string( "hltSiPixelRecHits" ),
hitErrorRZ = cms.double( 0.0036 ),
useErrorsFromParam = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltPAFullTrackIter3ClustersRefRemoval" ),
hitErrorRPhi = cms.double( 0.0051 )
),
TID = cms.PSet( ),
BPix = cms.PSet(
HitProducer = cms.string( "hltSiPixelRecHits" ),
hitErrorRZ = cms.double( 0.0060 ),
useErrorsFromParam = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltPAFullTrackIter3ClustersRefRemoval" ),
hitErrorRPhi = cms.double( 0.0027 )
),
TIB = cms.PSet( TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ) ),
TOB = cms.PSet( )
)
process.hltIter3ESPTrajectoryBuilderITPA = cms.ESProducer( "CkfTrajectoryBuilderESProducer",
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
trajectoryFilterName = cms.string( "hltIter3ESPTrajectoryFilterIT" ),
maxCand = cms.int32( 1 ),
ComponentName = cms.string( "hltIter3ESPTrajectoryBuilderITPA" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
MeasurementTrackerName = cms.string( "hltIter3ESPMeasurementTrackerPA" ),
estimator = cms.string( "hltESPChi2MeasurementEstimator16" ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( False ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 )
)
process.hltIter3ESPMeasurementTrackerPA = cms.ESProducer( "MeasurementTrackerESProducer",
StripCPE = cms.string( "StripCPEfromTrackAngle" ),
inactivePixelDetectorLabels = cms.VInputTag( ),
PixelCPE = cms.string( "hltESPPixelCPEGeneric" ),
stripLazyGetterProducer = cms.string( "hltSiStripRawToClustersFacility" ),
OnDemand = cms.bool( True ),
Regional = cms.bool( True ),
UsePixelModuleQualityDB = cms.bool( True ),
pixelClusterProducer = cms.string( "hltSiPixelClusters" ),
switchOffPixelsIfEmpty = cms.bool( True ),
inactiveStripDetectorLabels = cms.VInputTag( 'hltSiStripExcludedFEDListProducer' ),
MaskBadAPVFibers = cms.bool( True ),
UseStripStripQualityDB = cms.bool( True ),
UsePixelROCQualityDB = cms.bool( True ),
DebugPixelROCQualityDB = cms.untracked.bool( False ),
UseStripAPVFiberQualityDB = cms.bool( True ),
stripClusterProducer = cms.string( "hltPAFullTrackIter3SiStripClusters" ),
DebugStripAPVFiberQualityDB = cms.untracked.bool( False ),
DebugStripStripQualityDB = cms.untracked.bool( False ),
SiStripQualityLabel = cms.string( "" ),
badStripCuts = cms.PSet(
TOB = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TID = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TEC = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TIB = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
)
),
DebugStripModuleQualityDB = cms.untracked.bool( False ),
ComponentName = cms.string( "hltIter3ESPMeasurementTrackerPA" ),
DebugPixelModuleQualityDB = cms.untracked.bool( False ),
HitMatcher = cms.string( "StandardMatcher" ),
skipClusters = cms.InputTag( "hltPAFullTrackIter3ClustersRefRemoval" ),
UseStripModuleQualityDB = cms.bool( True ),
UseStripNoiseDB = cms.bool( False ),
UseStripCablingDB = cms.bool( False )
)
process.hltIter1ESPPixelLayerTripletsPA = cms.ESProducer( "SeedingLayersESProducer",
layerList = cms.vstring( 'BPix1+BPix2+BPix3',
'BPix1+BPix2+FPix1_pos',
'BPix1+BPix2+FPix1_neg',
'BPix1+FPix1_pos+FPix2_pos',
'BPix1+FPix1_neg+FPix2_neg' ),
ComponentName = cms.string( "hltIter1ESPPixelLayerTripletsPA" ),
TEC = cms.PSet( ),
FPix = cms.PSet(
HitProducer = cms.string( "hltSiPixelRecHits" ),
hitErrorRZ = cms.double( 0.0036 ),
useErrorsFromParam = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltPAFullTrackIter1ClustersRefRemoval" ),
hitErrorRPhi = cms.double( 0.0051 )
),
TID = cms.PSet( ),
BPix = cms.PSet(
HitProducer = cms.string( "hltSiPixelRecHits" ),
hitErrorRZ = cms.double( 0.0060 ),
useErrorsFromParam = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltPAFullTrackIter1ClustersRefRemoval" ),
hitErrorRPhi = cms.double( 0.0027 )
),
TIB = cms.PSet( ),
TOB = cms.PSet( )
)
process.hltIter2ESPPixelLayerPairsPA = cms.ESProducer( "SeedingLayersESProducer",
layerList = cms.vstring( 'BPix1+BPix2',
'BPix1+BPix3',
'BPix2+BPix3',
'BPix1+FPix1_pos',
'BPix1+FPix1_neg',
'BPix1+FPix2_pos',
'BPix1+FPix2_neg',
'BPix2+FPix1_pos',
'BPix2+FPix1_neg',
'BPix2+FPix2_pos',
'BPix2+FPix2_neg',
'FPix1_pos+FPix2_pos',
'FPix1_neg+FPix2_neg' ),
ComponentName = cms.string( "hltIter2ESPPixelLayerPairsPA" ),
TEC = cms.PSet( ),
FPix = cms.PSet(
HitProducer = cms.string( "hltSiPixelRecHits" ),
hitErrorRZ = cms.double( 0.0036 ),
useErrorsFromParam = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltPAFullTrackIter2ClustersRefRemoval" ),
hitErrorRPhi = cms.double( 0.0051 )
),
TID = cms.PSet( ),
BPix = cms.PSet(
HitProducer = cms.string( "hltSiPixelRecHits" ),
hitErrorRZ = cms.double( 0.0060 ),
useErrorsFromParam = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltPAFullTrackIter2ClustersRefRemoval" ),
hitErrorRPhi = cms.double( 0.0027 )
),
TIB = cms.PSet( ),
TOB = cms.PSet( )
)
process.hltIter2ESPTrajectoryBuilderITPA = cms.ESProducer( "CkfTrajectoryBuilderESProducer",
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
trajectoryFilterName = cms.string( "hltIter2ESPTrajectoryFilterIT" ),
maxCand = cms.int32( 2 ),
ComponentName = cms.string( "hltIter2ESPTrajectoryBuilderITPA" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
MeasurementTrackerName = cms.string( "hltIter2ESPMeasurementTrackerPA" ),
estimator = cms.string( "hltESPChi2MeasurementEstimator16" ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( False ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 )
)
process.hltIter2ESPMeasurementTrackerPA = cms.ESProducer( "MeasurementTrackerESProducer",
StripCPE = cms.string( "StripCPEfromTrackAngle" ),
inactivePixelDetectorLabels = cms.VInputTag( ),
PixelCPE = cms.string( "hltESPPixelCPEGeneric" ),
stripLazyGetterProducer = cms.string( "hltSiStripRawToClustersFacility" ),
OnDemand = cms.bool( True ),
Regional = cms.bool( True ),
UsePixelModuleQualityDB = cms.bool( True ),
pixelClusterProducer = cms.string( "hltSiPixelClusters" ),
switchOffPixelsIfEmpty = cms.bool( True ),
inactiveStripDetectorLabels = cms.VInputTag( 'hltSiStripExcludedFEDListProducer' ),
MaskBadAPVFibers = cms.bool( True ),
UseStripStripQualityDB = cms.bool( True ),
UsePixelROCQualityDB = cms.bool( True ),
DebugPixelROCQualityDB = cms.untracked.bool( False ),
UseStripAPVFiberQualityDB = cms.bool( True ),
stripClusterProducer = cms.string( "hltPAFullTrackIter2SiStripClusters" ),
DebugStripAPVFiberQualityDB = cms.untracked.bool( False ),
DebugStripStripQualityDB = cms.untracked.bool( False ),
SiStripQualityLabel = cms.string( "" ),
badStripCuts = cms.PSet(
TOB = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TID = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TEC = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TIB = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
)
),
DebugStripModuleQualityDB = cms.untracked.bool( False ),
ComponentName = cms.string( "hltIter2ESPMeasurementTrackerPA" ),
DebugPixelModuleQualityDB = cms.untracked.bool( False ),
HitMatcher = cms.string( "StandardMatcher" ),
skipClusters = cms.InputTag( "hltPAFullTrackIter2ClustersRefRemoval" ),
UseStripModuleQualityDB = cms.bool( True ),
UseStripNoiseDB = cms.bool( False ),
UseStripCablingDB = cms.bool( False )
)
process.hltIter1ESPTrajectoryBuilderITPA = cms.ESProducer( "CkfTrajectoryBuilderESProducer",
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
trajectoryFilterName = cms.string( "hltIter1ESPTrajectoryFilterIT" ),
maxCand = cms.int32( 2 ),
ComponentName = cms.string( "hltIter1ESPTrajectoryBuilderITPA" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
MeasurementTrackerName = cms.string( "hltIter1ESPMeasurementTrackerPA" ),
estimator = cms.string( "hltESPChi2MeasurementEstimator16" ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( False ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 )
)
process.hltIter1ESPMeasurementTrackerPA = cms.ESProducer( "MeasurementTrackerESProducer",
StripCPE = cms.string( "StripCPEfromTrackAngle" ),
inactivePixelDetectorLabels = cms.VInputTag( ),
PixelCPE = cms.string( "hltESPPixelCPEGeneric" ),
stripLazyGetterProducer = cms.string( "hltSiStripRawToClustersFacility" ),
OnDemand = cms.bool( True ),
Regional = cms.bool( True ),
UsePixelModuleQualityDB = cms.bool( True ),
pixelClusterProducer = cms.string( "hltSiPixelClusters" ),
switchOffPixelsIfEmpty = cms.bool( True ),
inactiveStripDetectorLabels = cms.VInputTag( 'hltSiStripExcludedFEDListProducer' ),
MaskBadAPVFibers = cms.bool( True ),
UseStripStripQualityDB = cms.bool( True ),
UsePixelROCQualityDB = cms.bool( True ),
DebugPixelROCQualityDB = cms.untracked.bool( False ),
UseStripAPVFiberQualityDB = cms.bool( True ),
stripClusterProducer = cms.string( "hltPAFullTrackIter1SiStripClusters" ),
DebugStripAPVFiberQualityDB = cms.untracked.bool( False ),
DebugStripStripQualityDB = cms.untracked.bool( False ),
SiStripQualityLabel = cms.string( "" ),
badStripCuts = cms.PSet(
TOB = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TID = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TEC = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TIB = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
)
),
DebugStripModuleQualityDB = cms.untracked.bool( False ),
ComponentName = cms.string( "hltIter1ESPMeasurementTrackerPA" ),
DebugPixelModuleQualityDB = cms.untracked.bool( False ),
HitMatcher = cms.string( "StandardMatcher" ),
skipClusters = cms.InputTag( "hltPAFullTrackIter1ClustersRefRemoval" ),
UseStripModuleQualityDB = cms.bool( True ),
UseStripNoiseDB = cms.bool( False ),
UseStripCablingDB = cms.bool( False )
)
process.hltIter1Tau3MuESPMeasurementTracker = cms.ESProducer( "MeasurementTrackerESProducer",
StripCPE = cms.string( "StripCPEfromTrackAngle" ),
inactivePixelDetectorLabels = cms.VInputTag( ),
PixelCPE = cms.string( "hltESPPixelCPEGeneric" ),
stripLazyGetterProducer = cms.string( "hltSiStripRawToClustersFacility" ),
OnDemand = cms.bool( True ),
Regional = cms.bool( True ),
UsePixelModuleQualityDB = cms.bool( True ),
pixelClusterProducer = cms.string( "hltSiPixelClusters" ),
switchOffPixelsIfEmpty = cms.bool( True ),
inactiveStripDetectorLabels = cms.VInputTag( 'hltSiStripExcludedFEDListProducer' ),
MaskBadAPVFibers = cms.bool( True ),
UseStripStripQualityDB = cms.bool( True ),
UsePixelROCQualityDB = cms.bool( True ),
DebugPixelROCQualityDB = cms.untracked.bool( False ),
UseStripAPVFiberQualityDB = cms.bool( True ),
stripClusterProducer = cms.string( "hltIter1Tau3MuSiStripClusters" ),
DebugStripAPVFiberQualityDB = cms.untracked.bool( False ),
DebugStripStripQualityDB = cms.untracked.bool( False ),
SiStripQualityLabel = cms.string( "" ),
badStripCuts = cms.PSet(
TID = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TOB = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TEC = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TIB = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
)
),
DebugStripModuleQualityDB = cms.untracked.bool( False ),
ComponentName = cms.string( "hltIter1Tau3MuESPMeasurementTracker" ),
DebugPixelModuleQualityDB = cms.untracked.bool( False ),
HitMatcher = cms.string( "StandardMatcher" ),
skipClusters = cms.InputTag( "hltIter1Tau3MuClustersRefRemoval" ),
UseStripModuleQualityDB = cms.bool( True ),
UseStripNoiseDB = cms.bool( False ),
UseStripCablingDB = cms.bool( False )
)
process.hltIter1Tau3MuESPPixelLayerTriplets = cms.ESProducer( "SeedingLayersESProducer",
layerList = cms.vstring( 'BPix1+BPix2+BPix3',
'BPix1+BPix2+FPix1_pos',
'BPix1+BPix2+FPix1_neg',
'BPix1+FPix1_pos+FPix2_pos',
'BPix1+FPix1_neg+FPix2_neg' ),
ComponentName = cms.string( "hltIter1Tau3MuESPPixelLayerTriplets" ),
TEC = cms.PSet( ),
FPix = cms.PSet(
HitProducer = cms.string( "hltSiPixelRecHits" ),
hitErrorRZ = cms.double( 0.0036 ),
useErrorsFromParam = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltIter1Tau3MuClustersRefRemoval" ),
hitErrorRPhi = cms.double( 0.0051 )
),
TID = cms.PSet( ),
BPix = cms.PSet(
HitProducer = cms.string( "hltSiPixelRecHits" ),
hitErrorRZ = cms.double( 0.0060 ),
useErrorsFromParam = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltIter1Tau3MuClustersRefRemoval" ),
hitErrorRPhi = cms.double( 0.0027 )
),
TIB = cms.PSet( ),
TOB = cms.PSet( )
)
process.hltIter1Tau3MuESPTrajectoryBuilderIT = cms.ESProducer( "CkfTrajectoryBuilderESProducer",
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
trajectoryFilterName = cms.string( "hltIter1ESPTrajectoryFilterIT" ),
maxCand = cms.int32( 2 ),
ComponentName = cms.string( "hltIter1Tau3MuESPTrajectoryBuilderIT" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
MeasurementTrackerName = cms.string( "hltIter1Tau3MuESPMeasurementTracker" ),
estimator = cms.string( "hltESPChi2MeasurementEstimator16" ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( False ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 )
)
process.hltIter2ESPMeasurementTracker = cms.ESProducer( "MeasurementTrackerESProducer",
StripCPE = cms.string( "StripCPEfromTrackAngle" ),
inactivePixelDetectorLabels = cms.VInputTag( ),
PixelCPE = cms.string( "hltESPPixelCPEGeneric" ),
stripLazyGetterProducer = cms.string( "hltSiStripRawToClustersFacility" ),
OnDemand = cms.bool( True ),
Regional = cms.bool( True ),
UsePixelModuleQualityDB = cms.bool( True ),
pixelClusterProducer = cms.string( "hltSiPixelClusters" ),
switchOffPixelsIfEmpty = cms.bool( True ),
inactiveStripDetectorLabels = cms.VInputTag( 'hltSiStripExcludedFEDListProducer' ),
MaskBadAPVFibers = cms.bool( True ),
UseStripStripQualityDB = cms.bool( True ),
UsePixelROCQualityDB = cms.bool( True ),
DebugPixelROCQualityDB = cms.untracked.bool( False ),
UseStripAPVFiberQualityDB = cms.bool( True ),
stripClusterProducer = cms.string( "hltIter2SiStripClusters" ),
DebugStripAPVFiberQualityDB = cms.untracked.bool( False ),
DebugStripStripQualityDB = cms.untracked.bool( False ),
SiStripQualityLabel = cms.string( "" ),
badStripCuts = cms.PSet(
TOB = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TID = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TEC = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TIB = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
)
),
DebugStripModuleQualityDB = cms.untracked.bool( False ),
ComponentName = cms.string( "hltIter2ESPMeasurementTracker" ),
DebugPixelModuleQualityDB = cms.untracked.bool( False ),
HitMatcher = cms.string( "StandardMatcher" ),
skipClusters = cms.InputTag( "hltIter2ClustersRefRemoval" ),
UseStripModuleQualityDB = cms.bool( True ),
UseStripNoiseDB = cms.bool( False ),
UseStripCablingDB = cms.bool( False )
)
process.hltIter2ESPPixelLayerPairs = cms.ESProducer( "SeedingLayersESProducer",
layerList = cms.vstring( 'BPix1+BPix2',
'BPix1+BPix3',
'BPix2+BPix3',
'BPix1+FPix1_pos',
'BPix1+FPix1_neg',
'BPix1+FPix2_pos',
'BPix1+FPix2_neg',
'BPix2+FPix1_pos',
'BPix2+FPix1_neg',
'BPix2+FPix2_pos',
'BPix2+FPix2_neg',
'FPix1_pos+FPix2_pos',
'FPix1_neg+FPix2_neg' ),
ComponentName = cms.string( "hltIter2ESPPixelLayerPairs" ),
TEC = cms.PSet( ),
FPix = cms.PSet(
HitProducer = cms.string( "hltSiPixelRecHits" ),
hitErrorRZ = cms.double( 0.0036 ),
useErrorsFromParam = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltIter2ClustersRefRemoval" ),
hitErrorRPhi = cms.double( 0.0051 )
),
TID = cms.PSet( ),
BPix = cms.PSet(
HitProducer = cms.string( "hltSiPixelRecHits" ),
hitErrorRZ = cms.double( 0.0060 ),
useErrorsFromParam = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltIter2ClustersRefRemoval" ),
hitErrorRPhi = cms.double( 0.0027 )
),
TIB = cms.PSet( ),
TOB = cms.PSet( )
)
process.hltIter2ESPTrajectoryBuilderIT = cms.ESProducer( "CkfTrajectoryBuilderESProducer",
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
trajectoryFilterName = cms.string( "hltIter2ESPTrajectoryFilterIT" ),
maxCand = cms.int32( 2 ),
ComponentName = cms.string( "hltIter2ESPTrajectoryBuilderIT" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
MeasurementTrackerName = cms.string( "hltIter2ESPMeasurementTracker" ),
estimator = cms.string( "hltESPChi2MeasurementEstimator16" ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( False ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 )
)
process.hltIter2ESPTrajectoryFilterIT = cms.ESProducer( "TrajectoryFilterESProducer",
filterPset = cms.PSet(
minPt = cms.double( 0.3 ),
minHitsMinPt = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
maxLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
maxConsecLostHits = cms.int32( 1 ),
minimumNumberOfHits = cms.int32( 3 ),
nSigmaMinPt = cms.double( 5.0 ),
chargeSignificance = cms.double( -1.0 )
),
ComponentName = cms.string( "hltIter2ESPTrajectoryFilterIT" )
)
process.hltIter2Tau3MuESPMeasurementTracker = cms.ESProducer( "MeasurementTrackerESProducer",
StripCPE = cms.string( "StripCPEfromTrackAngle" ),
inactivePixelDetectorLabels = cms.VInputTag( ),
PixelCPE = cms.string( "hltESPPixelCPEGeneric" ),
stripLazyGetterProducer = cms.string( "hltSiStripRawToClustersFacility" ),
OnDemand = cms.bool( True ),
Regional = cms.bool( True ),
UsePixelModuleQualityDB = cms.bool( True ),
pixelClusterProducer = cms.string( "hltSiPixelClusters" ),
switchOffPixelsIfEmpty = cms.bool( True ),
inactiveStripDetectorLabels = cms.VInputTag( 'hltSiStripExcludedFEDListProducer' ),
MaskBadAPVFibers = cms.bool( True ),
UseStripStripQualityDB = cms.bool( True ),
UsePixelROCQualityDB = cms.bool( True ),
DebugPixelROCQualityDB = cms.untracked.bool( False ),
UseStripAPVFiberQualityDB = cms.bool( True ),
stripClusterProducer = cms.string( "hltIter2SiStripClusters" ),
DebugStripAPVFiberQualityDB = cms.untracked.bool( False ),
DebugStripStripQualityDB = cms.untracked.bool( False ),
SiStripQualityLabel = cms.string( "" ),
badStripCuts = cms.PSet(
TID = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TOB = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TEC = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TIB = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
)
),
DebugStripModuleQualityDB = cms.untracked.bool( False ),
ComponentName = cms.string( "hltIter2Tau3MuESPMeasurementTracker" ),
DebugPixelModuleQualityDB = cms.untracked.bool( False ),
HitMatcher = cms.string( "StandardMatcher" ),
skipClusters = cms.InputTag( "hltIter2Tau3MuClustersRefRemoval" ),
UseStripModuleQualityDB = cms.bool( True ),
UseStripNoiseDB = cms.bool( False ),
UseStripCablingDB = cms.bool( False )
)
process.hltIter2Tau3MuESPPixelLayerPairs = cms.ESProducer( "SeedingLayersESProducer",
layerList = cms.vstring( 'BPix1+BPix2',
'BPix1+BPix3',
'BPix2+BPix3',
'BPix1+FPix1_pos',
'BPix1+FPix1_neg',
'BPix1+FPix2_pos',
'BPix1+FPix2_neg',
'BPix2+FPix1_pos',
'BPix2+FPix1_neg',
'BPix2+FPix2_pos',
'BPix2+FPix2_neg',
'FPix1_pos+FPix2_pos',
'FPix1_neg+FPix2_neg' ),
ComponentName = cms.string( "hltIter2Tau3MuESPPixelLayerPairs" ),
TEC = cms.PSet( ),
FPix = cms.PSet(
HitProducer = cms.string( "hltSiPixelRecHits" ),
hitErrorRZ = cms.double( 0.0036 ),
useErrorsFromParam = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltIter2Tau3MuClustersRefRemoval" ),
hitErrorRPhi = cms.double( 0.0051 )
),
TID = cms.PSet( ),
BPix = cms.PSet(
HitProducer = cms.string( "hltSiPixelRecHits" ),
hitErrorRZ = cms.double( 0.0060 ),
useErrorsFromParam = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltIter2Tau3MuClustersRefRemoval" ),
hitErrorRPhi = cms.double( 0.0027 )
),
TIB = cms.PSet( ),
TOB = cms.PSet( )
)
process.hltIter2Tau3MuESPTrajectoryBuilderIT = cms.ESProducer( "CkfTrajectoryBuilderESProducer",
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
trajectoryFilterName = cms.string( "hltIter2ESPTrajectoryFilterIT" ),
maxCand = cms.int32( 2 ),
ComponentName = cms.string( "hltIter2Tau3MuESPTrajectoryBuilderIT" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
MeasurementTrackerName = cms.string( "hltIter2Tau3MuESPMeasurementTracker" ),
estimator = cms.string( "hltESPChi2MeasurementEstimator16" ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( False ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 )
)
process.hltIter3ESPLayerTriplets = cms.ESProducer( "SeedingLayersESProducer",
layerList = cms.vstring( 'BPix1+BPix2+BPix3',
'BPix1+BPix2+FPix1_pos',
'BPix1+BPix2+FPix1_neg',
'BPix1+FPix1_pos+FPix2_pos',
'BPix1+FPix1_neg+FPix2_neg',
'BPix2+FPix1_pos+FPix2_pos',
'BPix2+FPix1_neg+FPix2_neg',
'FPix1_pos+FPix2_pos+TEC1_pos',
'FPix1_neg+FPix2_neg+TEC1_neg',
'FPix2_pos+TEC2_pos+TEC3_pos',
'FPix2_neg+TEC2_neg+TEC3_neg',
'BPix2+BPix3+TIB1',
'BPix2+BPix3+TIB2',
'BPix1+BPix3+TIB1',
'BPix1+BPix3+TIB2',
'BPix1+BPix2+TIB1',
'BPix1+BPix2+TIB2' ),
ComponentName = cms.string( "hltIter3ESPLayerTriplets" ),
TEC = cms.PSet(
useRingSlector = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
minRing = cms.int32( 1 ),
maxRing = cms.int32( 1 )
),
FPix = cms.PSet(
HitProducer = cms.string( "hltSiPixelRecHits" ),
hitErrorRZ = cms.double( 0.0036 ),
useErrorsFromParam = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltIter3ClustersRefRemoval" ),
hitErrorRPhi = cms.double( 0.0051 )
),
TID = cms.PSet( ),
BPix = cms.PSet(
HitProducer = cms.string( "hltSiPixelRecHits" ),
hitErrorRZ = cms.double( 0.0060 ),
useErrorsFromParam = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltIter3ClustersRefRemoval" ),
hitErrorRPhi = cms.double( 0.0027 )
),
TIB = cms.PSet( TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ) ),
TOB = cms.PSet( )
)
process.hltIter3ESPMeasurementTracker = cms.ESProducer( "MeasurementTrackerESProducer",
StripCPE = cms.string( "StripCPEfromTrackAngle" ),
inactivePixelDetectorLabels = cms.VInputTag( ),
PixelCPE = cms.string( "hltESPPixelCPEGeneric" ),
stripLazyGetterProducer = cms.string( "hltSiStripRawToClustersFacility" ),
OnDemand = cms.bool( True ),
Regional = cms.bool( True ),
UsePixelModuleQualityDB = cms.bool( True ),
pixelClusterProducer = cms.string( "hltSiPixelClusters" ),
switchOffPixelsIfEmpty = cms.bool( True ),
inactiveStripDetectorLabels = cms.VInputTag( 'hltSiStripExcludedFEDListProducer' ),
MaskBadAPVFibers = cms.bool( True ),
UseStripStripQualityDB = cms.bool( True ),
UsePixelROCQualityDB = cms.bool( True ),
DebugPixelROCQualityDB = cms.untracked.bool( False ),
UseStripAPVFiberQualityDB = cms.bool( True ),
stripClusterProducer = cms.string( "hltIter3SiStripClusters" ),
DebugStripAPVFiberQualityDB = cms.untracked.bool( False ),
DebugStripStripQualityDB = cms.untracked.bool( False ),
SiStripQualityLabel = cms.string( "" ),
badStripCuts = cms.PSet(
TOB = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TID = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TEC = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TIB = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
)
),
DebugStripModuleQualityDB = cms.untracked.bool( False ),
ComponentName = cms.string( "hltIter3ESPMeasurementTracker" ),
DebugPixelModuleQualityDB = cms.untracked.bool( False ),
HitMatcher = cms.string( "StandardMatcher" ),
skipClusters = cms.InputTag( "hltIter3ClustersRefRemoval" ),
UseStripModuleQualityDB = cms.bool( True ),
UseStripNoiseDB = cms.bool( False ),
UseStripCablingDB = cms.bool( False )
)
process.hltIter3ESPTrajectoryBuilderIT = cms.ESProducer( "CkfTrajectoryBuilderESProducer",
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
trajectoryFilterName = cms.string( "hltIter3ESPTrajectoryFilterIT" ),
maxCand = cms.int32( 1 ),
ComponentName = cms.string( "hltIter3ESPTrajectoryBuilderIT" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
MeasurementTrackerName = cms.string( "hltIter3ESPMeasurementTracker" ),
estimator = cms.string( "hltESPChi2MeasurementEstimator16" ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( False ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 )
)
process.hltIter3ESPTrajectoryFilterIT = cms.ESProducer( "TrajectoryFilterESProducer",
filterPset = cms.PSet(
minPt = cms.double( 0.3 ),
minHitsMinPt = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
maxLostHits = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
maxConsecLostHits = cms.int32( 1 ),
minimumNumberOfHits = cms.int32( 3 ),
nSigmaMinPt = cms.double( 5.0 ),
chargeSignificance = cms.double( -1.0 )
),
ComponentName = cms.string( "hltIter3ESPTrajectoryFilterIT" )
)
process.hltIter3Tau3MuESPLayerTriplets = cms.ESProducer( "SeedingLayersESProducer",
layerList = cms.vstring( 'BPix1+BPix2+BPix3',
'BPix1+BPix2+FPix1_pos',
'BPix1+BPix2+FPix1_neg',
'BPix1+FPix1_pos+FPix2_pos',
'BPix1+FPix1_neg+FPix2_neg',
'BPix2+FPix1_pos+FPix2_pos',
'BPix2+FPix1_neg+FPix2_neg',
'FPix1_pos+FPix2_pos+TEC1_pos',
'FPix1_neg+FPix2_neg+TEC1_neg',
'FPix2_pos+TEC2_pos+TEC3_pos',
'FPix2_neg+TEC2_neg+TEC3_neg',
'BPix2+BPix3+TIB1',
'BPix2+BPix3+TIB2',
'BPix1+BPix3+TIB1',
'BPix1+BPix3+TIB2',
'BPix1+BPix2+TIB1',
'BPix1+BPix2+TIB2' ),
ComponentName = cms.string( "hltIter3Tau3MuESPLayerTriplets" ),
TEC = cms.PSet(
useRingSlector = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
minRing = cms.int32( 1 ),
maxRing = cms.int32( 1 )
),
FPix = cms.PSet(
HitProducer = cms.string( "hltSiPixelRecHits" ),
hitErrorRZ = cms.double( 0.0036 ),
useErrorsFromParam = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltIter3Tau3MuClustersRefRemoval" ),
hitErrorRPhi = cms.double( 0.0051 )
),
TID = cms.PSet( ),
BPix = cms.PSet(
HitProducer = cms.string( "hltSiPixelRecHits" ),
hitErrorRZ = cms.double( 0.0060 ),
useErrorsFromParam = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltIter3Tau3MuClustersRefRemoval" ),
hitErrorRPhi = cms.double( 0.0027 )
),
TIB = cms.PSet( TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ) ),
TOB = cms.PSet( )
)
process.hltIter3Tau3MuESPMeasurementTracker = cms.ESProducer( "MeasurementTrackerESProducer",
StripCPE = cms.string( "StripCPEfromTrackAngle" ),
inactivePixelDetectorLabels = cms.VInputTag( ),
PixelCPE = cms.string( "hltESPPixelCPEGeneric" ),
stripLazyGetterProducer = cms.string( "hltSiStripRawToClustersFacility" ),
OnDemand = cms.bool( True ),
Regional = cms.bool( True ),
UsePixelModuleQualityDB = cms.bool( True ),
pixelClusterProducer = cms.string( "hltSiPixelClusters" ),
switchOffPixelsIfEmpty = cms.bool( True ),
inactiveStripDetectorLabels = cms.VInputTag( 'hltSiStripExcludedFEDListProducer' ),
MaskBadAPVFibers = cms.bool( True ),
UseStripStripQualityDB = cms.bool( True ),
UsePixelROCQualityDB = cms.bool( True ),
DebugPixelROCQualityDB = cms.untracked.bool( False ),
UseStripAPVFiberQualityDB = cms.bool( True ),
stripClusterProducer = cms.string( "hltIter3Tau3MuSiStripClusters" ),
DebugStripAPVFiberQualityDB = cms.untracked.bool( False ),
DebugStripStripQualityDB = cms.untracked.bool( False ),
SiStripQualityLabel = cms.string( "" ),
badStripCuts = cms.PSet(
TID = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TOB = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TEC = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TIB = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
)
),
DebugStripModuleQualityDB = cms.untracked.bool( False ),
ComponentName = cms.string( "hltIter3Tau3MuESPMeasurementTracker" ),
DebugPixelModuleQualityDB = cms.untracked.bool( False ),
HitMatcher = cms.string( "StandardMatcher" ),
skipClusters = cms.InputTag( "hltIter3Tau3MuClustersRefRemoval" ),
UseStripModuleQualityDB = cms.bool( True ),
UseStripNoiseDB = cms.bool( False ),
UseStripCablingDB = cms.bool( False )
)
process.hltIter3Tau3MuESPTrajectoryBuilderIT = cms.ESProducer( "CkfTrajectoryBuilderESProducer",
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
trajectoryFilterName = cms.string( "hltIter3ESPTrajectoryFilterIT" ),
maxCand = cms.int32( 1 ),
ComponentName = cms.string( "hltIter3Tau3MuESPTrajectoryBuilderIT" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
MeasurementTrackerName = cms.string( "hltIter3Tau3MuESPMeasurementTracker" ),
estimator = cms.string( "hltESPChi2MeasurementEstimator16" ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( False ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 )
)
process.hltIter4ESPMeasurementTracker = cms.ESProducer( "MeasurementTrackerESProducer",
StripCPE = cms.string( "StripCPEfromTrackAngle" ),
inactivePixelDetectorLabels = cms.VInputTag( ),
PixelCPE = cms.string( "hltESPPixelCPEGeneric" ),
stripLazyGetterProducer = cms.string( "hltSiStripRawToClustersFacility" ),
OnDemand = cms.bool( True ),
Regional = cms.bool( True ),
UsePixelModuleQualityDB = cms.bool( True ),
pixelClusterProducer = cms.string( "hltSiPixelClusters" ),
switchOffPixelsIfEmpty = cms.bool( True ),
inactiveStripDetectorLabels = cms.VInputTag( 'hltSiStripExcludedFEDListProducer' ),
MaskBadAPVFibers = cms.bool( True ),
UseStripStripQualityDB = cms.bool( True ),
UsePixelROCQualityDB = cms.bool( True ),
DebugPixelROCQualityDB = cms.untracked.bool( False ),
UseStripAPVFiberQualityDB = cms.bool( True ),
stripClusterProducer = cms.string( "hltIter4SiStripClusters" ),
DebugStripAPVFiberQualityDB = cms.untracked.bool( False ),
DebugStripStripQualityDB = cms.untracked.bool( False ),
SiStripQualityLabel = cms.string( "" ),
badStripCuts = cms.PSet(
TOB = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TID = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TEC = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TIB = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
)
),
DebugStripModuleQualityDB = cms.untracked.bool( False ),
ComponentName = cms.string( "hltIter4ESPMeasurementTracker" ),
DebugPixelModuleQualityDB = cms.untracked.bool( False ),
HitMatcher = cms.string( "StandardMatcher" ),
skipClusters = cms.InputTag( "hltIter4ClustersRefRemoval" ),
UseStripModuleQualityDB = cms.bool( True ),
UseStripNoiseDB = cms.bool( False ),
UseStripCablingDB = cms.bool( False )
)
process.hltIter4ESPPixelLayerPairs = cms.ESProducer( "SeedingLayersESProducer",
layerList = cms.vstring( 'TIB1+TIB2' ),
ComponentName = cms.string( "hltIter4ESPPixelLayerPairs" ),
TEC = cms.PSet( ),
FPix = cms.PSet( ),
TID = cms.PSet( ),
BPix = cms.PSet( ),
TIB = cms.PSet( TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ) ),
TOB = cms.PSet( )
)
process.hltIter4ESPTrajectoryBuilderIT = cms.ESProducer( "CkfTrajectoryBuilderESProducer",
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
trajectoryFilterName = cms.string( "hltIter4ESPTrajectoryFilterIT" ),
maxCand = cms.int32( 1 ),
ComponentName = cms.string( "hltIter4ESPTrajectoryBuilderIT" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
MeasurementTrackerName = cms.string( "hltIter4ESPMeasurementTracker" ),
estimator = cms.string( "hltESPChi2MeasurementEstimator16" ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( False ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 ),
minNrOfHitsForRebuild = cms.untracked.int32( 4 )
)
process.hltIter4ESPTrajectoryFilterIT = cms.ESProducer( "TrajectoryFilterESProducer",
filterPset = cms.PSet(
minPt = cms.double( 0.3 ),
minHitsMinPt = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
maxLostHits = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
maxConsecLostHits = cms.int32( 1 ),
minimumNumberOfHits = cms.int32( 6 ),
nSigmaMinPt = cms.double( 5.0 ),
chargeSignificance = cms.double( -1.0 )
),
ComponentName = cms.string( "hltIter4ESPTrajectoryFilterIT" )
)
process.hltIter4Tau3MuESPMeasurementTracker = cms.ESProducer( "MeasurementTrackerESProducer",
StripCPE = cms.string( "StripCPEfromTrackAngle" ),
inactivePixelDetectorLabels = cms.VInputTag( ),
PixelCPE = cms.string( "hltESPPixelCPEGeneric" ),
stripLazyGetterProducer = cms.string( "hltSiStripRawToClustersFacility" ),
OnDemand = cms.bool( True ),
Regional = cms.bool( True ),
UsePixelModuleQualityDB = cms.bool( True ),
pixelClusterProducer = cms.string( "hltSiPixelClusters" ),
switchOffPixelsIfEmpty = cms.bool( True ),
inactiveStripDetectorLabels = cms.VInputTag( 'hltSiStripExcludedFEDListProducer' ),
MaskBadAPVFibers = cms.bool( True ),
UseStripStripQualityDB = cms.bool( True ),
UsePixelROCQualityDB = cms.bool( True ),
DebugPixelROCQualityDB = cms.untracked.bool( False ),
UseStripAPVFiberQualityDB = cms.bool( True ),
stripClusterProducer = cms.string( "hltIter4Tau3MuSiStripClusters" ),
DebugStripAPVFiberQualityDB = cms.untracked.bool( False ),
DebugStripStripQualityDB = cms.untracked.bool( False ),
SiStripQualityLabel = cms.string( "" ),
badStripCuts = cms.PSet(
TID = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TOB = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TEC = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
),
TIB = cms.PSet(
maxConsecutiveBad = cms.uint32( 9999 ),
maxBad = cms.uint32( 9999 )
)
),
DebugStripModuleQualityDB = cms.untracked.bool( False ),
ComponentName = cms.string( "hltIter4Tau3MuESPMeasurementTracker" ),
DebugPixelModuleQualityDB = cms.untracked.bool( False ),
HitMatcher = cms.string( "StandardMatcher" ),
skipClusters = cms.InputTag( "hltIter4Tau3MuClustersRefRemoval" ),
UseStripModuleQualityDB = cms.bool( True ),
UseStripNoiseDB = cms.bool( False ),
UseStripCablingDB = cms.bool( False )
)
process.hltIter4Tau3MuESPTrajectoryBuilderIT = cms.ESProducer( "CkfTrajectoryBuilderESProducer",
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
trajectoryFilterName = cms.string( "hltIter4ESPTrajectoryFilterIT" ),
maxCand = cms.int32( 1 ),
ComponentName = cms.string( "hltIter4Tau3MuESPTrajectoryBuilderIT" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
MeasurementTrackerName = cms.string( "hltIter4Tau3MuESPMeasurementTracker" ),
estimator = cms.string( "hltESPChi2MeasurementEstimator16" ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( False ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 ),
minNrOfHitsForRebuild = cms.untracked.int32( 4 )
)
process.hoDetIdAssociator = cms.ESProducer( "DetIdAssociatorESProducer",
ComponentName = cms.string( "HODetIdAssociator" ),
etaBinSize = cms.double( 0.087 ),
nEta = cms.int32( 30 ),
nPhi = cms.int32( 72 ),
includeBadChambers = cms.bool( False )
)
process.muonDetIdAssociator = cms.ESProducer( "DetIdAssociatorESProducer",
ComponentName = cms.string( "MuonDetIdAssociator" ),
etaBinSize = cms.double( 0.125 ),
nEta = cms.int32( 48 ),
nPhi = cms.int32( 48 ),
includeBadChambers = cms.bool( False )
)
process.navigationSchoolESProducer = cms.ESProducer( "NavigationSchoolESProducer",
ComponentName = cms.string( "SimpleNavigationSchool" )
)
process.preshowerDetIdAssociator = cms.ESProducer( "DetIdAssociatorESProducer",
ComponentName = cms.string( "PreshowerDetIdAssociator" ),
etaBinSize = cms.double( 0.1 ),
nEta = cms.int32( 60 ),
nPhi = cms.int32( 30 ),
includeBadChambers = cms.bool( False )
)
process.siPixelQualityESProducer = cms.ESProducer( "SiPixelQualityESProducer",
ListOfRecordToMerge = cms.VPSet(
cms.PSet( record = cms.string( "SiPixelQualityFromDbRcd" ),
tag = cms.string( "" )
),
cms.PSet( record = cms.string( "SiPixelDetVOffRcd" ),
tag = cms.string( "" )
)
)
)
process.siPixelTemplateDBObjectESProducer = cms.ESProducer( "SiPixelTemplateDBObjectESProducer" )
process.siStripLorentzAngleDepESProducer = cms.ESProducer( "SiStripLorentzAngleDepESProducer",
LatencyRecord = cms.PSet(
record = cms.string( "SiStripLatencyRcd" ),
label = cms.untracked.string( "" )
),
LorentzAngleDeconvMode = cms.PSet(
record = cms.string( "SiStripLorentzAngleRcd" ),
label = cms.untracked.string( "deconvolution" )
),
LorentzAnglePeakMode = cms.PSet(
record = cms.string( "SiStripLorentzAngleRcd" ),
label = cms.untracked.string( "peak" )
)
)
process.sistripconn = cms.ESProducer( "SiStripConnectivity" )
process.FastTimerService = cms.Service( "FastTimerService",
dqmPath = cms.untracked.string( "HLT/TimerService" ),
dqmModuleTimeRange = cms.untracked.double( 40.0 ),
luminosityProduct = cms.untracked.InputTag( "hltScalersRawToDigi" ),
enableTimingExclusive = cms.untracked.bool( False ),
enableTimingModules = cms.untracked.bool( True ),
enableDQMbyPathOverhead = cms.untracked.bool( False ),
dqmTimeResolution = cms.untracked.double( 5.0 ),
enableDQMbyModule = cms.untracked.bool( False ),
dqmLuminosityResolution = cms.untracked.double( 1.0E31 ),
skipFirstPath = cms.untracked.bool( False ),
enableTimingPaths = cms.untracked.bool( True ),
enableDQMbyLumiSection = cms.untracked.bool( True ),
dqmPathTimeResolution = cms.untracked.double( 0.5 ),
dqmPathTimeRange = cms.untracked.double( 100.0 ),
dqmTimeRange = cms.untracked.double( 1000.0 ),
dqmLumiSectionsRange = cms.untracked.uint32( 2500 ),
enableDQMSummary = cms.untracked.bool( True ),
enableTimingSummary = cms.untracked.bool( False ),
enableDQMbyPathTotal = cms.untracked.bool( False ),
useRealTimeClock = cms.untracked.bool( True ),
enableDQMbyPathExclusive = cms.untracked.bool( False ),
enableDQMbyLuminosity = cms.untracked.bool( True ),
enableDQM = cms.untracked.bool( True ),
supportedProcesses = cms.untracked.vuint32( 8, 12, 16, 24, 32 ),
dqmModuleTimeResolution = cms.untracked.double( 0.2 ),
dqmLuminosityRange = cms.untracked.double( 1.0E34 ),
enableDQMbyPathActive = cms.untracked.bool( False ),
enableDQMbyPathDetails = cms.untracked.bool( False ),
enableDQMbyProcesses = cms.untracked.bool( True ),
enableDQMbyPathCounters = cms.untracked.bool( False )
)
process.DQM = cms.Service( "DQM",
publishFrequency = cms.untracked.double( 5.0 ),
debug = cms.untracked.bool( False ),
collectorPort = cms.untracked.int32( 0 ),
collectorHost = cms.untracked.string( "" )
)
process.DQMStore = cms.Service( "DQMStore",
)
process.DTDataIntegrityTask = cms.Service( "DTDataIntegrityTask",
processingMode = cms.untracked.string( "HLT" ),
fedIntegrityFolder = cms.untracked.string( "DT/FEDIntegrity_EvF" ),
getSCInfo = cms.untracked.bool( True )
)
process.MessageLogger = cms.Service( "MessageLogger",
suppressInfo = cms.untracked.vstring( ),
debugs = cms.untracked.PSet(
threshold = cms.untracked.string( "INFO" ),
placeholder = cms.untracked.bool( True ),
suppressInfo = cms.untracked.vstring( ),
suppressWarning = cms.untracked.vstring( ),
suppressDebug = cms.untracked.vstring( ),
suppressError = cms.untracked.vstring( )
),
suppressDebug = cms.untracked.vstring( ),
cout = cms.untracked.PSet(
threshold = cms.untracked.string( "ERROR" ),
suppressInfo = cms.untracked.vstring( ),
suppressWarning = cms.untracked.vstring( ),
suppressDebug = cms.untracked.vstring( ),
suppressError = cms.untracked.vstring( )
),
cerr_stats = cms.untracked.PSet(
threshold = cms.untracked.string( "WARNING" ),
output = cms.untracked.string( "cerr" ),
optionalPSet = cms.untracked.bool( True )
),
warnings = cms.untracked.PSet(
threshold = cms.untracked.string( "INFO" ),
placeholder = cms.untracked.bool( True ),
suppressInfo = cms.untracked.vstring( ),
suppressWarning = cms.untracked.vstring( ),
suppressDebug = cms.untracked.vstring( ),
suppressError = cms.untracked.vstring( )
),
statistics = cms.untracked.vstring( 'cerr' ),
cerr = cms.untracked.PSet(
INFO = cms.untracked.PSet( limit = cms.untracked.int32( 0 ) ),
noTimeStamps = cms.untracked.bool( False ),
FwkReport = cms.untracked.PSet(
reportEvery = cms.untracked.int32( 1 ),
limit = cms.untracked.int32( 0 )
),
default = cms.untracked.PSet( limit = cms.untracked.int32( 10000000 ) ),
Root_NoDictionary = cms.untracked.PSet( limit = cms.untracked.int32( 0 ) ),
FwkJob = cms.untracked.PSet( limit = cms.untracked.int32( 0 ) ),
FwkSummary = cms.untracked.PSet(
reportEvery = cms.untracked.int32( 1 ),
limit = cms.untracked.int32( 10000000 )
),
threshold = cms.untracked.string( "INFO" ),
suppressInfo = cms.untracked.vstring( ),
suppressWarning = cms.untracked.vstring( ),
suppressDebug = cms.untracked.vstring( ),
suppressError = cms.untracked.vstring( )
),
FrameworkJobReport = cms.untracked.PSet(
default = cms.untracked.PSet( limit = cms.untracked.int32( 0 ) ),
FwkJob = cms.untracked.PSet( limit = cms.untracked.int32( 10000000 ) )
),
suppressWarning = cms.untracked.vstring( 'hltOnlineBeamSpot',
'hltCtf3HitL1SeededWithMaterialTracks',
'hltL3MuonsOIState',
'hltPixelTracksForHighMult',
'hltHITPixelTracksHE',
'hltHITPixelTracksHB',
'hltCtfL1SeededWithMaterialTracks',
'hltRegionalTracksForL3MuonIsolation',
'hltSiPixelClusters',
'hltActivityStartUpElectronPixelSeeds',
'hltLightPFTracks',
'hltPixelVertices3DbbPhi',
'hltL3MuonsIOHit',
'hltPixelTracks',
'hltSiPixelDigis',
'hltL3MuonsOIHit',
'hltL1SeededElectronGsfTracks',
'hltL1SeededStartUpElectronPixelSeeds',
'hltBLifetimeRegionalCtfWithMaterialTracksbbPhiL1FastJetFastPV',
'hltCtfActivityWithMaterialTracks' ),
errors = cms.untracked.PSet(
threshold = cms.untracked.string( "INFO" ),
placeholder = cms.untracked.bool( True ),
suppressInfo = cms.untracked.vstring( ),
suppressWarning = cms.untracked.vstring( ),
suppressDebug = cms.untracked.vstring( ),
suppressError = cms.untracked.vstring( )
),
fwkJobReports = cms.untracked.vstring( 'FrameworkJobReport' ),
debugModules = cms.untracked.vstring( ),
infos = cms.untracked.PSet(
threshold = cms.untracked.string( "INFO" ),
Root_NoDictionary = cms.untracked.PSet( limit = cms.untracked.int32( 0 ) ),
placeholder = cms.untracked.bool( True ),
suppressInfo = cms.untracked.vstring( ),
suppressWarning = cms.untracked.vstring( ),
suppressDebug = cms.untracked.vstring( ),
suppressError = cms.untracked.vstring( )
),
categories = cms.untracked.vstring( 'FwkJob',
'FwkReport',
'FwkSummary',
'Root_NoDictionary' ),
destinations = cms.untracked.vstring( 'warnings',
'errors',
'infos',
'debugs',
'cout',
'cerr' ),
threshold = cms.untracked.string( "INFO" ),
suppressError = cms.untracked.vstring( 'hltOnlineBeamSpot',
'hltL3MuonCandidates',
'hltL3TkTracksFromL2OIState',
'hltPFJetCtfWithMaterialTracks',
'hltL3TkTracksFromL2IOHit',
'hltL3TkTracksFromL2OIHit' )
)
process.MicroStateService = cms.Service( "MicroStateService",
)
process.ModuleWebRegistry = cms.Service( "ModuleWebRegistry",
)
process.PrescaleService = cms.Service( "PrescaleService",
forceDefault = cms.bool( False ),
prescaleTable = cms.VPSet( *(
cms.PSet( pathName = cms.string( "HLT_L2TripleMu10_0_0_NoVertex_PFJet40Neutral_v8" ),
prescales = cms.vuint32( 1, 1, 1, 1, 1, 1, 1, 0, 0 )
),
cms.PSet( pathName = cms.string( "HLT_DoubleDisplacedMu4_DiPFJet40Neutral_v8" ),
prescales = cms.vuint32( 1, 1, 1, 1, 1, 1, 1, 0, 0 )
),
cms.PSet( pathName = cms.string( "HLT_Mu8_DiJet30_v7" ),
prescales = cms.vuint32( 20, 20, 20, 20, 20, 20, 20, 0, 0 )
),
cms.PSet( pathName = cms.string( "HLT_Mu8_TriJet30_v7" ),
prescales = cms.vuint32( 3, 3, 3, 3, 3, 3, 3, 0, 0 )
),
cms.PSet( pathName = cms.string( "HLT_Mu8_QuadJet30_v7" ),
prescales = cms.vuint32( 1, 1, 1, 1, 1, 1, 1, 0, 0 )
),
cms.PSet( pathName = cms.string( "HLT_IsoMu12_DoubleCentralJet65_v4" ),
prescales = cms.vuint32( 10, 10, 10, 10, 10, 10, 10, 0, 0 )
),
cms.PSet( pathName = cms.string( "HLT_IsoMu17_eta2p1_DiCentralPFNoPUJet30_PFNoPUHT350_PFMHT40_v3" ),
prescales = cms.vuint32( 1, 1, 1, 1, 1, 1, 1, 0, 0 )
),
cms.PSet( pathName = cms.string( "HLT_DoubleRelIso1p0Mu5_Mass8_PFNoPUHT175_v4" ),
prescales = cms.vuint32( 1, 1, 1, 1, 1, 1, 1, 0, 0 )
),
cms.PSet( pathName = cms.string( "HLT_DoubleRelIso1p0Mu5_Mass8_PFNoPUHT225_v4" ),
prescales = cms.vuint32( 1, 1, 1, 1, 1, 1, 1, 0, 0 )
),
cms.PSet( pathName = cms.string( "HLT_DoubleMu8_Mass8_PFNoPUHT175_v4" ),
prescales = cms.vuint32( 1, 1, 1, 1, 1, 1, 1, 0, 0 )
),
cms.PSet( pathName = cms.string( "HLT_DoubleMu8_Mass8_PFNoPUHT225_v4" ),
prescales = cms.vuint32( 1, 1, 1, 1, 1, 1, 1, 0, 0 )
),
cms.PSet( pathName = cms.string( "HLT_RelIso1p0Mu5_Ele8_CaloIdT_TrkIdVL_Mass8_PFNoPUHT175_v4" ),
prescales = cms.vuint32( 1, 1, 1, 1, 1, 1, 1, 0, 0 )
),
cms.PSet( pathName = cms.string( "HLT_RelIso1p0Mu5_Ele8_CaloIdT_TrkIdVL_Mass8_PFNoPUHT225_v4" ),
prescales = cms.vuint32( 1, 1, 1, 1, 1, 1, 1, 0, 0 )
),
cms.PSet( pathName = cms.string( "HLT_Mu8_Ele8_CaloIdT_TrkIdVL_Mass8_PFNoPUHT175_v4" ),
prescales = cms.vuint32( 1, 1, 1, 1, 1, 1, 1, 0, 0 )
),
cms.PSet( pathName = cms.string( "HLT_Mu8_Ele8_CaloIdT_TrkIdVL_Mass8_PFNoPUHT225_v4" ),
prescales = cms.vuint32( 1, 1, 1, 1, 1, 1, 1, 0, 0 )
),
cms.PSet( pathName = cms.string( "HLT_PFNoPUHT350_Mu15_PFMET45_v4" ),
prescales = cms.vuint32( 1, 1, 1, 1, 1, 1, 1, 0, 0 )
),
cms.PSet( pathName = cms.string( "HLT_PFNoPUHT350_Mu15_PFMET50_v4" ),
prescales = cms.vuint32( 1, 1, 1, 1, 1, 1, 1, 0, 0 )
),
cms.PSet( pathName = cms.string( "HLT_PFNoPUHT400_Mu5_PFMET45_v4" ),
prescales = cms.vuint32( 1, 1, 1, 1, 1, 1, 1, 0, 0 )
),
cms.PSet( pathName = cms.string( "HLT_PFNoPUHT400_Mu5_PFMET50_v4" ),
prescales = cms.vuint32( 1, 1, 1, 1, 1, 1, 1, 0, 0 )
),
cms.PSet( pathName = cms.string( "HLT_Mu40_PFNoPUHT350_v4" ),
prescales = cms.vuint32( 1, 1, 1, 1, 1, 1, 1, 0, 0 )
),
cms.PSet( pathName = cms.string( "HLT_Mu60_PFNoPUHT350_v4" ),
prescales = cms.vuint32( 1, 1, 1, 1, 1, 1, 1, 0, 0 )
),
cms.PSet( pathName = cms.string( "HLT_IsoMu12_RsqMR30_Rsq0p04_MR200_v4" ),
prescales = cms.vuint32( 1, 1, 1, 1, 1, 1, 1, 0, 0 )
),
cms.PSet( pathName = cms.string( "HLT_IsoMu12_RsqMR40_Rsq0p04_MR200_v4" ),
prescales = cms.vuint32( 1, 1, 1, 1, 1, 1, 1, 0, 0 )
),
cms.PSet( pathName = cms.string( "HLT_DoubleMu14_Mass8_PFMET40_v8" ),
prescales = cms.vuint32( 1, 1, 1, 1, 1, 1, 1, 0, 0 )
),
cms.PSet( pathName = cms.string( "HLT_DoubleMu14_Mass8_PFMET50_v8" ),
prescales = cms.vuint32( 1, 1, 1, 1, 1, 1, 1, 0, 0 )
),
cms.PSet( pathName = cms.string( "HLT_Mu14_Ele14_CaloIdT_TrkIdVL_Mass8_PFMET40_v8" ),
prescales = cms.vuint32( 1, 1, 1, 1, 1, 1, 1, 0, 0 )
),
cms.PSet( pathName = cms.string( "HLT_Mu14_Ele14_CaloIdT_TrkIdVL_Mass8_PFMET50_v8" ),
prescales = cms.vuint32( 1, 1, 1, 1, 1, 1, 1, 0, 0 )
),
) ),
lvl1DefaultLabel = cms.string( "3e33" ),
lvl1Labels = cms.vstring( '9e33nopark',
'8e33nopark',
'8e33',
'7e33',
'6e33',
'4e33',
'2e33',
'5e32',
'CirculatingBeam' )
)
process.UpdaterService = cms.Service( "UpdaterService",
)
process.hltTriggerType = cms.EDFilter( "HLTTriggerTypeFilter",
SelectedTriggerType = cms.int32( 1 )
)
process.hltGtDigis = cms.EDProducer( "L1GlobalTriggerRawToDigi",
DaqGtFedId = cms.untracked.int32( 813 ),
DaqGtInputTag = cms.InputTag( "rawDataCollector" ),
UnpackBxInEvent = cms.int32( 5 ),
ActiveBoardsMask = cms.uint32( 0xffff )
)
process.hltGctDigis = cms.EDProducer( "GctRawToDigi",
unpackSharedRegions = cms.bool( False ),
numberOfGctSamplesToUnpack = cms.uint32( 1 ),
verbose = cms.untracked.bool( False ),
numberOfRctSamplesToUnpack = cms.uint32( 1 ),
inputLabel = cms.InputTag( "rawDataCollector" ),
unpackerVersion = cms.uint32( 0 ),
gctFedId = cms.untracked.int32( 745 ),
hltMode = cms.bool( True )
)
process.hltL1GtObjectMap = cms.EDProducer( "L1GlobalTrigger",
TechnicalTriggersUnprescaled = cms.bool( True ),
ProduceL1GtObjectMapRecord = cms.bool( True ),
AlgorithmTriggersUnmasked = cms.bool( False ),
EmulateBxInEvent = cms.int32( 1 ),
AlgorithmTriggersUnprescaled = cms.bool( True ),
ProduceL1GtDaqRecord = cms.bool( False ),
ReadTechnicalTriggerRecords = cms.bool( True ),
RecordLength = cms.vint32( 3, 0 ),
TechnicalTriggersUnmasked = cms.bool( False ),
ProduceL1GtEvmRecord = cms.bool( False ),
GmtInputTag = cms.InputTag( "hltGtDigis" ),
TechnicalTriggersVetoUnmasked = cms.bool( True ),
AlternativeNrBxBoardEvm = cms.uint32( 0 ),
TechnicalTriggersInputTags = cms.VInputTag( 'simBscDigis' ),
CastorInputTag = cms.InputTag( "castorL1Digis" ),
GctInputTag = cms.InputTag( "hltGctDigis" ),
AlternativeNrBxBoardDaq = cms.uint32( 0 ),
WritePsbL1GtDaqRecord = cms.bool( False ),
BstLengthBytes = cms.int32( -1 )
)
process.hltL1extraParticles = cms.EDProducer( "L1ExtraParticlesProd",
tauJetSource = cms.InputTag( 'hltGctDigis','tauJets' ),
etHadSource = cms.InputTag( "hltGctDigis" ),
etTotalSource = cms.InputTag( "hltGctDigis" ),
centralBxOnly = cms.bool( True ),
centralJetSource = cms.InputTag( 'hltGctDigis','cenJets' ),
etMissSource = cms.InputTag( "hltGctDigis" ),
hfRingEtSumsSource = cms.InputTag( "hltGctDigis" ),
produceMuonParticles = cms.bool( True ),
forwardJetSource = cms.InputTag( 'hltGctDigis','forJets' ),
ignoreHtMiss = cms.bool( False ),
htMissSource = cms.InputTag( "hltGctDigis" ),
produceCaloParticles = cms.bool( True ),
muonSource = cms.InputTag( "hltGtDigis" ),
isolatedEmSource = cms.InputTag( 'hltGctDigis','isoEm' ),
nonIsolatedEmSource = cms.InputTag( 'hltGctDigis','nonIsoEm' ),
hfRingBitCountsSource = cms.InputTag( "hltGctDigis" )
)
process.hltScalersRawToDigi = cms.EDProducer( "ScalersRawToDigi",
scalersInputTag = cms.InputTag( "rawDataCollector" )
)
process.hltOnlineBeamSpot = cms.EDProducer( "BeamSpotOnlineProducer",
maxZ = cms.double( 40.0 ),
src = cms.InputTag( "hltScalersRawToDigi" ),
gtEvmLabel = cms.InputTag( "" ),
changeToCMSCoordinates = cms.bool( False ),
setSigmaZ = cms.double( 0.0 ),
maxRadius = cms.double( 2.0 )
)
process.hltL1sL1TripleMu0ORTripleMu0HighQ = cms.EDFilter( "HLTLevel1GTSeed",
saveTags = cms.bool( True ),
L1SeedsLogicalExpression = cms.string( "L1_TripleMu0 OR L1_TripleMu0_HighQ" ),
L1MuonCollectionTag = cms.InputTag( "hltL1extraParticles" ),
L1UseL1TriggerObjectMaps = cms.bool( True ),
L1UseAliasesForSeeding = cms.bool( True ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
L1CollectionsTag = cms.InputTag( "hltL1extraParticles" ),
L1NrBxInEvent = cms.int32( 3 ),
L1GtObjectMapTag = cms.InputTag( "hltL1GtObjectMap" ),
L1TechTriggerSeeding = cms.bool( False )
)
process.hltPreL2TripleMu1000NoVertexPFJet40Neutral = cms.EDFilter( "HLTPrescaler",
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
offset = cms.uint32( 0 )
)
process.hltL1TripleMu0L1TriMuFiltered0 = cms.EDFilter( "HLTMuonL1Filter",
saveTags = cms.bool( False ),
CSCTFtag = cms.InputTag( "unused" ),
PreviousCandTag = cms.InputTag( "hltL1sL1TripleMu0ORTripleMu0HighQ" ),
MinPt = cms.double( 0.0 ),
MinN = cms.int32( 3 ),
MaxEta = cms.double( 2.5 ),
SelectQualities = cms.vint32( ),
CandTag = cms.InputTag( "hltL1extraParticles" ),
ExcludeSingleSegmentCSC = cms.bool( False )
)
process.hltMuonDTDigis = cms.EDProducer( "DTUnpackingModule",
useStandardFEDid = cms.bool( True ),
inputLabel = cms.InputTag( "rawDataCollector" ),
dataType = cms.string( "DDU" ),
fedbyType = cms.bool( False ),
readOutParameters = cms.PSet(
debug = cms.untracked.bool( False ),
rosParameters = cms.PSet(
writeSC = cms.untracked.bool( True ),
readingDDU = cms.untracked.bool( True ),
performDataIntegrityMonitor = cms.untracked.bool( False ),
readDDUIDfromDDU = cms.untracked.bool( True ),
debug = cms.untracked.bool( False ),
localDAQ = cms.untracked.bool( False )
),
localDAQ = cms.untracked.bool( False ),
performDataIntegrityMonitor = cms.untracked.bool( False )
),
dqmOnly = cms.bool( False )
)
process.hltDt1DRecHits = cms.EDProducer( "DTRecHitProducer",
debug = cms.untracked.bool( False ),
recAlgoConfig = cms.PSet(
tTrigMode = cms.string( "DTTTrigSyncFromDB" ),
minTime = cms.double( -3.0 ),
stepTwoFromDigi = cms.bool( False ),
doVdriftCorr = cms.bool( False ),
debug = cms.untracked.bool( False ),
maxTime = cms.double( 420.0 ),
tTrigModeConfig = cms.PSet(
vPropWire = cms.double( 24.4 ),
doTOFCorrection = cms.bool( True ),
tofCorrType = cms.int32( 0 ),
wirePropCorrType = cms.int32( 0 ),
tTrigLabel = cms.string( "" ),
doWirePropCorrection = cms.bool( True ),
doT0Correction = cms.bool( True ),
debug = cms.untracked.bool( False )
)
),
dtDigiLabel = cms.InputTag( "hltMuonDTDigis" ),
recAlgo = cms.string( "DTLinearDriftFromDBAlgo" )
)
process.hltDt4DSegments = cms.EDProducer( "DTRecSegment4DProducer",
debug = cms.untracked.bool( False ),
Reco4DAlgoName = cms.string( "DTCombinatorialPatternReco4D" ),
recHits2DLabel = cms.InputTag( "dt2DSegments" ),
recHits1DLabel = cms.InputTag( "hltDt1DRecHits" ),
Reco4DAlgoConfig = cms.PSet(
segmCleanerMode = cms.int32( 2 ),
Reco2DAlgoName = cms.string( "DTCombinatorialPatternReco" ),
recAlgoConfig = cms.PSet(
tTrigMode = cms.string( "DTTTrigSyncFromDB" ),
minTime = cms.double( -3.0 ),
stepTwoFromDigi = cms.bool( False ),
doVdriftCorr = cms.bool( False ),
debug = cms.untracked.bool( False ),
maxTime = cms.double( 420.0 ),
tTrigModeConfig = cms.PSet(
vPropWire = cms.double( 24.4 ),
doTOFCorrection = cms.bool( True ),
tofCorrType = cms.int32( 0 ),
wirePropCorrType = cms.int32( 0 ),
tTrigLabel = cms.string( "" ),
doWirePropCorrection = cms.bool( True ),
doT0Correction = cms.bool( True ),
debug = cms.untracked.bool( False )
)
),
nSharedHitsMax = cms.int32( 2 ),
hit_afterT0_resolution = cms.double( 0.03 ),
Reco2DAlgoConfig = cms.PSet(
segmCleanerMode = cms.int32( 2 ),
recAlgoConfig = cms.PSet(
tTrigMode = cms.string( "DTTTrigSyncFromDB" ),
minTime = cms.double( -3.0 ),
stepTwoFromDigi = cms.bool( False ),
doVdriftCorr = cms.bool( False ),
debug = cms.untracked.bool( False ),
maxTime = cms.double( 420.0 ),
tTrigModeConfig = cms.PSet(
vPropWire = cms.double( 24.4 ),
doTOFCorrection = cms.bool( True ),
tofCorrType = cms.int32( 0 ),
wirePropCorrType = cms.int32( 0 ),
tTrigLabel = cms.string( "" ),
doWirePropCorrection = cms.bool( True ),
doT0Correction = cms.bool( True ),
debug = cms.untracked.bool( False )
)
),
nSharedHitsMax = cms.int32( 2 ),
AlphaMaxPhi = cms.double( 1.0 ),
hit_afterT0_resolution = cms.double( 0.03 ),
MaxAllowedHits = cms.uint32( 50 ),
performT0_vdriftSegCorrection = cms.bool( False ),
AlphaMaxTheta = cms.double( 0.9 ),
debug = cms.untracked.bool( False ),
recAlgo = cms.string( "DTLinearDriftFromDBAlgo" ),
nUnSharedHitsMin = cms.int32( 2 ),
performT0SegCorrection = cms.bool( False ),
perform_delta_rejecting = cms.bool( False )
),
performT0_vdriftSegCorrection = cms.bool( False ),
debug = cms.untracked.bool( False ),
recAlgo = cms.string( "DTLinearDriftFromDBAlgo" ),
nUnSharedHitsMin = cms.int32( 2 ),
AllDTRecHits = cms.bool( True ),
performT0SegCorrection = cms.bool( False ),
perform_delta_rejecting = cms.bool( False )
)
)
process.hltMuonCSCDigis = cms.EDProducer( "CSCDCCUnpacker",
PrintEventNumber = cms.untracked.bool( False ),
UseSelectiveUnpacking = cms.bool( True ),
UseExaminer = cms.bool( True ),
ErrorMask = cms.uint32( 0x0 ),
InputObjects = cms.InputTag( "rawDataCollector" ),
UseFormatStatus = cms.bool( True ),
ExaminerMask = cms.uint32( 0x1febf3f6 ),
UnpackStatusDigis = cms.bool( False ),
VisualFEDInspect = cms.untracked.bool( False ),
FormatedEventDump = cms.untracked.bool( False ),
Debug = cms.untracked.bool( False ),
VisualFEDShort = cms.untracked.bool( False )
)
process.hltCsc2DRecHits = cms.EDProducer( "CSCRecHitDProducer",
XTasymmetry_ME1b = cms.double( 0.0 ),
XTasymmetry_ME1a = cms.double( 0.0 ),
ConstSyst_ME1a = cms.double( 0.022 ),
ConstSyst_ME1b = cms.double( 0.0070 ),
XTasymmetry_ME41 = cms.double( 0.0 ),
CSCStripxtalksOffset = cms.double( 0.03 ),
CSCUseCalibrations = cms.bool( True ),
CSCUseTimingCorrections = cms.bool( True ),
CSCNoOfTimeBinsForDynamicPedestal = cms.int32( 2 ),
XTasymmetry_ME22 = cms.double( 0.0 ),
UseFivePoleFit = cms.bool( True ),
XTasymmetry_ME21 = cms.double( 0.0 ),
ConstSyst_ME21 = cms.double( 0.0 ),
CSCDebug = cms.untracked.bool( False ),
ConstSyst_ME22 = cms.double( 0.0 ),
CSCUseGasGainCorrections = cms.bool( False ),
XTasymmetry_ME31 = cms.double( 0.0 ),
readBadChambers = cms.bool( True ),
NoiseLevel_ME13 = cms.double( 8.0 ),
NoiseLevel_ME12 = cms.double( 9.0 ),
NoiseLevel_ME32 = cms.double( 9.0 ),
NoiseLevel_ME31 = cms.double( 9.0 ),
XTasymmetry_ME32 = cms.double( 0.0 ),
ConstSyst_ME41 = cms.double( 0.0 ),
CSCStripClusterSize = cms.untracked.int32( 3 ),
CSCStripClusterChargeCut = cms.double( 25.0 ),
CSCStripPeakThreshold = cms.double( 10.0 ),
readBadChannels = cms.bool( True ),
UseParabolaFit = cms.bool( False ),
XTasymmetry_ME13 = cms.double( 0.0 ),
XTasymmetry_ME12 = cms.double( 0.0 ),
wireDigiTag = cms.InputTag( 'hltMuonCSCDigis','MuonCSCWireDigi' ),
ConstSyst_ME12 = cms.double( 0.0 ),
ConstSyst_ME13 = cms.double( 0.0 ),
ConstSyst_ME32 = cms.double( 0.0 ),
ConstSyst_ME31 = cms.double( 0.0 ),
UseAverageTime = cms.bool( False ),
NoiseLevel_ME1a = cms.double( 7.0 ),
NoiseLevel_ME1b = cms.double( 8.0 ),
CSCWireClusterDeltaT = cms.int32( 1 ),
CSCUseStaticPedestals = cms.bool( False ),
stripDigiTag = cms.InputTag( 'hltMuonCSCDigis','MuonCSCStripDigi' ),
CSCstripWireDeltaTime = cms.int32( 8 ),
NoiseLevel_ME21 = cms.double( 9.0 ),
NoiseLevel_ME22 = cms.double( 9.0 ),
NoiseLevel_ME41 = cms.double( 9.0 )
)
process.hltCscSegments = cms.EDProducer( "CSCSegmentProducer",
inputObjects = cms.InputTag( "hltCsc2DRecHits" ),
algo_psets = cms.VPSet(
cms.PSet( chamber_types = cms.vstring( 'ME1/a',
'ME1/b',
'ME1/2',
'ME1/3',
'ME2/1',
'ME2/2',
'ME3/1',
'ME3/2',
'ME4/1',
'ME4/2' ),
algo_name = cms.string( "CSCSegAlgoST" ),
parameters_per_chamber_type = cms.vint32( 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 ),
algo_psets = cms.VPSet(
cms.PSet( maxRatioResidualPrune = cms.double( 3.0 ),
yweightPenalty = cms.double( 1.5 ),
maxRecHitsInCluster = cms.int32( 20 ),
dPhiFineMax = cms.double( 0.025 ),
preClusteringUseChaining = cms.bool( True ),
ForceCovariance = cms.bool( False ),
hitDropLimit6Hits = cms.double( 0.3333 ),
NormChi2Cut2D = cms.double( 20.0 ),
BPMinImprovement = cms.double( 10000.0 ),
Covariance = cms.double( 0.0 ),
tanPhiMax = cms.double( 0.5 ),
SeedBig = cms.double( 0.0015 ),
onlyBestSegment = cms.bool( False ),
dRPhiFineMax = cms.double( 8.0 ),
SeedSmall = cms.double( 2.0E-4 ),
curvePenalty = cms.double( 2.0 ),
dXclusBoxMax = cms.double( 4.0 ),
BrutePruning = cms.bool( True ),
curvePenaltyThreshold = cms.double( 0.85 ),
CorrectTheErrors = cms.bool( True ),
hitDropLimit4Hits = cms.double( 0.6 ),
useShowering = cms.bool( False ),
CSCDebug = cms.untracked.bool( False ),
tanThetaMax = cms.double( 1.2 ),
NormChi2Cut3D = cms.double( 10.0 ),
minHitsPerSegment = cms.int32( 3 ),
ForceCovarianceAll = cms.bool( False ),
yweightPenaltyThreshold = cms.double( 1.0 ),
prePrunLimit = cms.double( 3.17 ),
hitDropLimit5Hits = cms.double( 0.8 ),
preClustering = cms.bool( True ),
prePrun = cms.bool( True ),
maxDPhi = cms.double( 999.0 ),
maxDTheta = cms.double( 999.0 ),
Pruning = cms.bool( True ),
dYclusBoxMax = cms.double( 8.0 )
),
cms.PSet( maxRatioResidualPrune = cms.double( 3.0 ),
yweightPenalty = cms.double( 1.5 ),
maxRecHitsInCluster = cms.int32( 24 ),
dPhiFineMax = cms.double( 0.025 ),
preClusteringUseChaining = cms.bool( True ),
ForceCovariance = cms.bool( False ),
hitDropLimit6Hits = cms.double( 0.3333 ),
NormChi2Cut2D = cms.double( 20.0 ),
BPMinImprovement = cms.double( 10000.0 ),
Covariance = cms.double( 0.0 ),
tanPhiMax = cms.double( 0.5 ),
SeedBig = cms.double( 0.0015 ),
onlyBestSegment = cms.bool( False ),
dRPhiFineMax = cms.double( 8.0 ),
SeedSmall = cms.double( 2.0E-4 ),
curvePenalty = cms.double( 2.0 ),
dXclusBoxMax = cms.double( 4.0 ),
BrutePruning = cms.bool( True ),
curvePenaltyThreshold = cms.double( 0.85 ),
CorrectTheErrors = cms.bool( True ),
hitDropLimit4Hits = cms.double( 0.6 ),
useShowering = cms.bool( False ),
CSCDebug = cms.untracked.bool( False ),
tanThetaMax = cms.double( 1.2 ),
NormChi2Cut3D = cms.double( 10.0 ),
minHitsPerSegment = cms.int32( 3 ),
ForceCovarianceAll = cms.bool( False ),
yweightPenaltyThreshold = cms.double( 1.0 ),
prePrunLimit = cms.double( 3.17 ),
hitDropLimit5Hits = cms.double( 0.8 ),
preClustering = cms.bool( True ),
prePrun = cms.bool( True ),
maxDPhi = cms.double( 999.0 ),
maxDTheta = cms.double( 999.0 ),
Pruning = cms.bool( True ),
dYclusBoxMax = cms.double( 8.0 )
)
)
)
),
algo_type = cms.int32( 1 )
)
process.hltMuonRPCDigis = cms.EDProducer( "RPCUnpackingModule",
InputLabel = cms.InputTag( "rawDataCollector" ),
doSynchro = cms.bool( False )
)
process.hltRpcRecHits = cms.EDProducer( "RPCRecHitProducer",
recAlgoConfig = cms.PSet( ),
deadvecfile = cms.FileInPath( "RecoLocalMuon/RPCRecHit/data/RPCDeadVec.dat" ),
rpcDigiLabel = cms.InputTag( "hltMuonRPCDigis" ),
maskvecfile = cms.FileInPath( "RecoLocalMuon/RPCRecHit/data/RPCMaskVec.dat" ),
recAlgo = cms.string( "RPCRecHitStandardAlgo" ),
deadSource = cms.string( "File" ),
maskSource = cms.string( "File" )
)
process.hltL2OfflineMuonSeeds = cms.EDProducer( "MuonSeedGenerator",
SMB_21 = cms.vdouble( 1.043, -0.124, 0.0, 0.183, 0.0, 0.0 ),
SMB_20 = cms.vdouble( 1.011, -0.052, 0.0, 0.188, 0.0, 0.0 ),
SMB_22 = cms.vdouble( 1.474, -0.758, 0.0, 0.185, 0.0, 0.0 ),
OL_2213 = cms.vdouble( 0.117, 0.0, 0.0, 0.044, 0.0, 0.0 ),
SME_11 = cms.vdouble( 3.295, -1.527, 0.112, 0.378, 0.02, 0.0 ),
SME_13 = cms.vdouble( -1.286, 1.711, 0.0, 0.356, 0.0, 0.0 ),
SME_12 = cms.vdouble( 0.102, 0.599, 0.0, 0.38, 0.0, 0.0 ),
DT_34_2_scale = cms.vdouble( -11.901897, 0.0 ),
OL_1213_0_scale = cms.vdouble( -4.488158, 0.0 ),
OL_1222_0_scale = cms.vdouble( -5.810449, 0.0 ),
DT_13 = cms.vdouble( 0.315, 0.068, -0.127, 0.051, -0.0020, 0.0 ),
DT_12 = cms.vdouble( 0.183, 0.054, -0.087, 0.028, 0.0020, 0.0 ),
DT_14 = cms.vdouble( 0.359, 0.052, -0.107, 0.072, -0.0040, 0.0 ),
CSC_13_3_scale = cms.vdouble( -1.701268, 0.0 ),
DT_24_2_scale = cms.vdouble( -6.63094, 0.0 ),
CSC_23 = cms.vdouble( -0.081, 0.113, -0.029, 0.015, 0.0080, 0.0 ),
CSC_24 = cms.vdouble( 0.0040, 0.021, -0.0020, 0.053, 0.0, 0.0 ),
OL_2222 = cms.vdouble( 0.107, 0.0, 0.0, 0.04, 0.0, 0.0 ),
DT_14_2_scale = cms.vdouble( -4.808546, 0.0 ),
SMB_10 = cms.vdouble( 1.387, -0.038, 0.0, 0.19, 0.0, 0.0 ),
SMB_11 = cms.vdouble( 1.247, 0.72, -0.802, 0.229, -0.075, 0.0 ),
SMB_12 = cms.vdouble( 2.128, -0.956, 0.0, 0.199, 0.0, 0.0 ),
SME_21 = cms.vdouble( -0.529, 1.194, -0.358, 0.472, 0.086, 0.0 ),
SME_22 = cms.vdouble( -1.207, 1.491, -0.251, 0.189, 0.243, 0.0 ),
DT_13_2_scale = cms.vdouble( -4.257687, 0.0 ),
CSC_34 = cms.vdouble( 0.062, -0.067, 0.019, 0.021, 0.0030, 0.0 ),
SME_22_0_scale = cms.vdouble( -3.457901, 0.0 ),
DT_24_1_scale = cms.vdouble( -7.490909, 0.0 ),
OL_1232_0_scale = cms.vdouble( -5.964634, 0.0 ),
DT_23_1_scale = cms.vdouble( -5.320346, 0.0 ),
SME_13_0_scale = cms.vdouble( 0.104905, 0.0 ),
SMB_22_0_scale = cms.vdouble( 1.346681, 0.0 ),
CSC_12_1_scale = cms.vdouble( -6.434242, 0.0 ),
DT_34 = cms.vdouble( 0.044, 0.0040, -0.013, 0.029, 0.0030, 0.0 ),
SME_32 = cms.vdouble( -0.901, 1.333, -0.47, 0.41, 0.073, 0.0 ),
SME_31 = cms.vdouble( -1.594, 1.482, -0.317, 0.487, 0.097, 0.0 ),
CSC_13_2_scale = cms.vdouble( -6.077936, 0.0 ),
crackEtas = cms.vdouble( 0.2, 1.6, 1.7 ),
SME_11_0_scale = cms.vdouble( 1.325085, 0.0 ),
SMB_20_0_scale = cms.vdouble( 1.486168, 0.0 ),
DT_13_1_scale = cms.vdouble( -4.520923, 0.0 ),
CSC_24_1_scale = cms.vdouble( -6.055701, 0.0 ),
CSC_01_1_scale = cms.vdouble( -1.915329, 0.0 ),
DT_23 = cms.vdouble( 0.13, 0.023, -0.057, 0.028, 0.0040, 0.0 ),
DT_24 = cms.vdouble( 0.176, 0.014, -0.051, 0.051, 0.0030, 0.0 ),
SMB_12_0_scale = cms.vdouble( 2.283221, 0.0 ),
SMB_30_0_scale = cms.vdouble( -3.629838, 0.0 ),
SME_42 = cms.vdouble( -0.0030, 0.0050, 0.0050, 0.608, 0.076, 0.0 ),
SME_41 = cms.vdouble( -0.0030, 0.0050, 0.0050, 0.608, 0.076, 0.0 ),
CSC_12_2_scale = cms.vdouble( -1.63622, 0.0 ),
DT_34_1_scale = cms.vdouble( -13.783765, 0.0 ),
CSC_34_1_scale = cms.vdouble( -11.520507, 0.0 ),
OL_2213_0_scale = cms.vdouble( -7.239789, 0.0 ),
SMB_32_0_scale = cms.vdouble( -3.054156, 0.0 ),
CSC_12_3_scale = cms.vdouble( -1.63622, 0.0 ),
SME_21_0_scale = cms.vdouble( -0.040862, 0.0 ),
OL_1232 = cms.vdouble( 0.184, 0.0, 0.0, 0.066, 0.0, 0.0 ),
DTRecSegmentLabel = cms.InputTag( "hltDt4DSegments" ),
SMB_10_0_scale = cms.vdouble( 2.448566, 0.0 ),
EnableDTMeasurement = cms.bool( True ),
CSCRecSegmentLabel = cms.InputTag( "hltCscSegments" ),
CSC_23_2_scale = cms.vdouble( -6.079917, 0.0 ),
scaleDT = cms.bool( True ),
DT_12_2_scale = cms.vdouble( -3.518165, 0.0 ),
OL_1222 = cms.vdouble( 0.848, -0.591, 0.0, 0.062, 0.0, 0.0 ),
CSC_23_1_scale = cms.vdouble( -19.084285, 0.0 ),
OL_1213 = cms.vdouble( 0.96, -0.737, 0.0, 0.052, 0.0, 0.0 ),
CSC_02 = cms.vdouble( 0.612, -0.207, 0.0, 0.067, -0.0010, 0.0 ),
CSC_03 = cms.vdouble( 0.787, -0.338, 0.029, 0.101, -0.0080, 0.0 ),
CSC_01 = cms.vdouble( 0.166, 0.0, 0.0, 0.031, 0.0, 0.0 ),
SMB_32 = cms.vdouble( 0.67, -0.327, 0.0, 0.22, 0.0, 0.0 ),
SMB_30 = cms.vdouble( 0.505, -0.022, 0.0, 0.215, 0.0, 0.0 ),
SMB_31 = cms.vdouble( 0.549, -0.145, 0.0, 0.207, 0.0, 0.0 ),
crackWindow = cms.double( 0.04 ),
CSC_14_3_scale = cms.vdouble( -1.969563, 0.0 ),
SMB_31_0_scale = cms.vdouble( -3.323768, 0.0 ),
DT_12_1_scale = cms.vdouble( -3.692398, 0.0 ),
SMB_21_0_scale = cms.vdouble( 1.58384, 0.0 ),
DT_23_2_scale = cms.vdouble( -5.117625, 0.0 ),
SME_12_0_scale = cms.vdouble( 2.279181, 0.0 ),
DT_14_1_scale = cms.vdouble( -5.644816, 0.0 ),
beamSpotTag = cms.InputTag( "hltOnlineBeamSpot" ),
SMB_11_0_scale = cms.vdouble( 2.56363, 0.0 ),
EnableCSCMeasurement = cms.bool( True ),
CSC_14 = cms.vdouble( 0.606, -0.181, -0.0020, 0.111, -0.0030, 0.0 ),
OL_2222_0_scale = cms.vdouble( -7.667231, 0.0 ),
CSC_13 = cms.vdouble( 0.901, -1.302, 0.533, 0.045, 0.0050, 0.0 ),
CSC_12 = cms.vdouble( -0.161, 0.254, -0.047, 0.042, -0.0070, 0.0 )
)
process.hltL2MuonSeeds = cms.EDProducer( "L2MuonSeedGenerator",
ServiceParameters = cms.PSet(
Propagators = cms.untracked.vstring( 'SteppingHelixPropagatorAny' ),
RPCLayers = cms.bool( True ),
UseMuonNavigation = cms.untracked.bool( True )
),
InputObjects = cms.InputTag( "hltL1extraParticles" ),
L1MaxEta = cms.double( 2.5 ),
OfflineSeedLabel = cms.untracked.InputTag( "hltL2OfflineMuonSeeds" ),
L1MinPt = cms.double( 0.0 ),
L1MinQuality = cms.uint32( 1 ),
GMTReadoutCollection = cms.InputTag( "hltGtDigis" ),
UseOfflineSeed = cms.untracked.bool( True ),
Propagator = cms.string( "SteppingHelixPropagatorAny" )
)
process.hltL2Muons = cms.EDProducer( "L2MuonProducer",
ServiceParameters = cms.PSet(
Propagators = cms.untracked.vstring( 'hltESPFastSteppingHelixPropagatorAny',
'hltESPFastSteppingHelixPropagatorOpposite' ),
RPCLayers = cms.bool( True ),
UseMuonNavigation = cms.untracked.bool( True )
),
InputObjects = cms.InputTag( "hltL2MuonSeeds" ),
SeedTransformerParameters = cms.PSet(
Fitter = cms.string( "hltESPKFFittingSmootherForL2Muon" ),
MuonRecHitBuilder = cms.string( "hltESPMuonTransientTrackingRecHitBuilder" ),
NMinRecHits = cms.uint32( 2 ),
UseSubRecHits = cms.bool( False ),
Propagator = cms.string( "hltESPFastSteppingHelixPropagatorAny" ),
RescaleError = cms.double( 100.0 )
),
L2TrajBuilderParameters = cms.PSet(
DoRefit = cms.bool( False ),
SeedPropagator = cms.string( "hltESPFastSteppingHelixPropagatorAny" ),
FilterParameters = cms.PSet(
NumberOfSigma = cms.double( 3.0 ),
FitDirection = cms.string( "insideOut" ),
DTRecSegmentLabel = cms.InputTag( "hltDt4DSegments" ),
MaxChi2 = cms.double( 1000.0 ),
MuonTrajectoryUpdatorParameters = cms.PSet(
MaxChi2 = cms.double( 25.0 ),
RescaleErrorFactor = cms.double( 100.0 ),
Granularity = cms.int32( 0 ),
ExcludeRPCFromFit = cms.bool( False ),
UseInvalidHits = cms.bool( True ),
RescaleError = cms.bool( False )
),
EnableRPCMeasurement = cms.bool( True ),
CSCRecSegmentLabel = cms.InputTag( "hltCscSegments" ),
EnableDTMeasurement = cms.bool( True ),
RPCRecSegmentLabel = cms.InputTag( "hltRpcRecHits" ),
Propagator = cms.string( "hltESPFastSteppingHelixPropagatorAny" ),
EnableCSCMeasurement = cms.bool( True )
),
NavigationType = cms.string( "Standard" ),
SeedTransformerParameters = cms.PSet(
Fitter = cms.string( "hltESPKFFittingSmootherForL2Muon" ),
MuonRecHitBuilder = cms.string( "hltESPMuonTransientTrackingRecHitBuilder" ),
NMinRecHits = cms.uint32( 2 ),
UseSubRecHits = cms.bool( False ),
Propagator = cms.string( "hltESPFastSteppingHelixPropagatorAny" ),
RescaleError = cms.double( 100.0 )
),
DoBackwardFilter = cms.bool( True ),
SeedPosition = cms.string( "in" ),
BWFilterParameters = cms.PSet(
NumberOfSigma = cms.double( 3.0 ),
CSCRecSegmentLabel = cms.InputTag( "hltCscSegments" ),
FitDirection = cms.string( "outsideIn" ),
DTRecSegmentLabel = cms.InputTag( "hltDt4DSegments" ),
MaxChi2 = cms.double( 100.0 ),
MuonTrajectoryUpdatorParameters = cms.PSet(
MaxChi2 = cms.double( 25.0 ),
RescaleErrorFactor = cms.double( 100.0 ),
Granularity = cms.int32( 2 ),
ExcludeRPCFromFit = cms.bool( False ),
UseInvalidHits = cms.bool( True ),
RescaleError = cms.bool( False )
),
EnableRPCMeasurement = cms.bool( True ),
BWSeedType = cms.string( "fromGenerator" ),
EnableDTMeasurement = cms.bool( True ),
RPCRecSegmentLabel = cms.InputTag( "hltRpcRecHits" ),
Propagator = cms.string( "hltESPFastSteppingHelixPropagatorAny" ),
EnableCSCMeasurement = cms.bool( True )
),
DoSeedRefit = cms.bool( False )
),
DoSeedRefit = cms.bool( False ),
TrackLoaderParameters = cms.PSet(
Smoother = cms.string( "hltESPKFTrajectorySmootherForMuonTrackLoader" ),
DoSmoothing = cms.bool( False ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
MuonUpdatorAtVertexParameters = cms.PSet(
MaxChi2 = cms.double( 1000000.0 ),
BeamSpotPosition = cms.vdouble( 0.0, 0.0, 0.0 ),
Propagator = cms.string( "hltESPFastSteppingHelixPropagatorOpposite" ),
BeamSpotPositionErrors = cms.vdouble( 0.1, 0.1, 5.3 )
),
VertexConstraint = cms.bool( True )
)
)
process.hltL2MuonCandidatesNoVtx = cms.EDProducer( "L2MuonCandidateProducer",
InputObjects = cms.InputTag( "hltL2Muons" )
)
process.hltL2TripleMu0NoVertexL2PreFiltered = cms.EDFilter( "HLTMuonL2PreFilter",
saveTags = cms.bool( True ),
MaxDr = cms.double( 9999.0 ),
CutOnChambers = cms.bool( False ),
PreviousCandTag = cms.InputTag( "hltL1TripleMu0L1TriMuFiltered0" ),
MinPt = cms.double( 0.0 ),
MinN = cms.int32( 3 ),
SeedMapTag = cms.InputTag( "hltL2Muons" ),
MaxEta = cms.double( 3.0 ),
MinNhits = cms.vint32( 1 ),
MinDxySig = cms.double( -1.0 ),
MinNchambers = cms.vint32( 0 ),
AbsEtaBins = cms.vdouble( 5.0 ),
MaxDz = cms.double( 9999.0 ),
CandTag = cms.InputTag( "hltL2MuonCandidatesNoVtx" ),
BeamSpotTag = cms.InputTag( "hltOnlineBeamSpot" ),
MinDr = cms.double( -1.0 ),
NSigmaPt = cms.double( 0.0 ),
MinNstations = cms.vint32( 0 )
)
process.hltL2Mu10NoVertexL2PreFiltered = cms.EDFilter( "HLTMuonL2PreFilter",
saveTags = cms.bool( True ),
MaxDr = cms.double( 9999.0 ),
CutOnChambers = cms.bool( False ),
PreviousCandTag = cms.InputTag( "hltL1TripleMu0L1TriMuFiltered0" ),
MinPt = cms.double( 15.0 ),
MinN = cms.int32( 1 ),
SeedMapTag = cms.InputTag( "hltL2Muons" ),
MaxEta = cms.double( 3.0 ),
MinNhits = cms.vint32( 1 ),
MinDxySig = cms.double( -1.0 ),
MinNchambers = cms.vint32( 0 ),
AbsEtaBins = cms.vdouble( 5.0 ),
MaxDz = cms.double( 9999.0 ),
CandTag = cms.InputTag( "hltL2MuonCandidatesNoVtx" ),
BeamSpotTag = cms.InputTag( "hltOnlineBeamSpot" ),
MinDr = cms.double( -1.0 ),
NSigmaPt = cms.double( 0.0 ),
MinNstations = cms.vint32( 0 )
)
process.hltEcalRawToRecHitFacility = cms.EDProducer( "EcalRawToRecHitFacility",
sourceTag = cms.InputTag( "rawDataCollector" ),
workerName = cms.string( "" )
)
process.hltEcalRegionalRestFEDs = cms.EDProducer( "EcalRawToRecHitRoI",
JetJobPSet = cms.VPSet(
),
sourceTag_es = cms.InputTag( "NotNeededoESfalse" ),
doES = cms.bool( False ),
type = cms.string( "all" ),
sourceTag = cms.InputTag( "hltEcalRawToRecHitFacility" ),
EmJobPSet = cms.VPSet(
),
CandJobPSet = cms.VPSet(
),
MuonJobPSet = cms.PSet( ),
esInstance = cms.untracked.string( "es" ),
MuJobPSet = cms.PSet( )
)
process.hltEcalRecHitAll = cms.EDProducer( "EcalRawToRecHitProducer",
splitOutput = cms.bool( True ),
rechitCollection = cms.string( "NotNeededsplitOutputTrue" ),
EErechitCollection = cms.string( "EcalRecHitsEE" ),
EBrechitCollection = cms.string( "EcalRecHitsEB" ),
sourceTag = cms.InputTag( "hltEcalRegionalRestFEDs" ),
cleaningConfig = cms.PSet(
e6e2thresh = cms.double( 0.04 ),
tightenCrack_e6e2_double = cms.double( 3.0 ),
e4e1Threshold_endcap = cms.double( 0.3 ),
tightenCrack_e4e1_single = cms.double( 3.0 ),
tightenCrack_e1_double = cms.double( 2.0 ),
cThreshold_barrel = cms.double( 4.0 ),
e4e1Threshold_barrel = cms.double( 0.08 ),
tightenCrack_e1_single = cms.double( 2.0 ),
e4e1_b_barrel = cms.double( -0.024 ),
e4e1_a_barrel = cms.double( 0.04 ),
ignoreOutOfTimeThresh = cms.double( 1.0E9 ),
cThreshold_endcap = cms.double( 15.0 ),
e4e1_b_endcap = cms.double( -0.0125 ),
e4e1_a_endcap = cms.double( 0.02 ),
cThreshold_double = cms.double( 10.0 )
),
lazyGetterTag = cms.InputTag( "hltEcalRawToRecHitFacility" )
)
process.hltHcalDigis = cms.EDProducer( "HcalRawToDigi",
UnpackZDC = cms.untracked.bool( True ),
FilterDataQuality = cms.bool( True ),
InputLabel = cms.InputTag( "rawDataCollector" ),
ComplainEmptyData = cms.untracked.bool( False ),
UnpackCalib = cms.untracked.bool( True ),
UnpackTTP = cms.untracked.bool( False ),
lastSample = cms.int32( 9 ),
firstSample = cms.int32( 0 )
)
process.hltHbhereco = cms.EDProducer( "HcalHitReconstructor",
digiTimeFromDB = cms.bool( True ),
S9S1stat = cms.PSet( ),
saturationParameters = cms.PSet( maxADCvalue = cms.int32( 127 ) ),
tsFromDB = cms.bool( True ),
samplesToAdd = cms.int32( 4 ),
correctionPhaseNS = cms.double( 13.0 ),
HFInWindowStat = cms.PSet( ),
digiLabel = cms.InputTag( "hltHcalDigis" ),
setHSCPFlags = cms.bool( False ),
firstAuxTS = cms.int32( 4 ),
setSaturationFlags = cms.bool( False ),
hfTimingTrustParameters = cms.PSet( ),
PETstat = cms.PSet( ),
digistat = cms.PSet( ),
useLeakCorrection = cms.bool( False ),
setTimingTrustFlags = cms.bool( False ),
S8S1stat = cms.PSet( ),
correctForPhaseContainment = cms.bool( True ),
correctForTimeslew = cms.bool( True ),
setNoiseFlags = cms.bool( False ),
correctTiming = cms.bool( False ),
setPulseShapeFlags = cms.bool( False ),
Subdetector = cms.string( "HBHE" ),
dropZSmarkedPassed = cms.bool( True ),
recoParamsFromDB = cms.bool( True ),
firstSample = cms.int32( 4 ),
setTimingShapedCutsFlags = cms.bool( False ),
timingshapedcutsParameters = cms.PSet(
ignorelowest = cms.bool( True ),
win_offset = cms.double( 0.0 ),
ignorehighest = cms.bool( False ),
win_gain = cms.double( 1.0 ),
tfilterEnvelope = cms.vdouble( 4.0, 12.04, 13.0, 10.56, 23.5, 8.82, 37.0, 7.38, 56.0, 6.3, 81.0, 5.64, 114.5, 5.44, 175.5, 5.38, 350.5, 5.14 )
),
pulseShapeParameters = cms.PSet( ),
flagParameters = cms.PSet(
nominalPedestal = cms.double( 3.0 ),
hitMultiplicityThreshold = cms.int32( 17 ),
hitEnergyMinimum = cms.double( 1.0 ),
pulseShapeParameterSets = cms.VPSet(
cms.PSet( pulseShapeParameters = cms.vdouble( 0.0, 100.0, -50.0, 0.0, -15.0, 0.15 ) ),
cms.PSet( pulseShapeParameters = cms.vdouble( 100.0, 2000.0, -50.0, 0.0, -5.0, 0.05 ) ),
cms.PSet( pulseShapeParameters = cms.vdouble( 2000.0, 1000000.0, -50.0, 0.0, 95.0, 0.0 ) ),
cms.PSet( pulseShapeParameters = cms.vdouble( -1000000.0, 1000000.0, 45.0, 0.1, 1000000.0, 0.0 ) )
)
),
hscpParameters = cms.PSet(
slopeMax = cms.double( -0.6 ),
r1Max = cms.double( 1.0 ),
r1Min = cms.double( 0.15 ),
TimingEnergyThreshold = cms.double( 30.0 ),
slopeMin = cms.double( -1.5 ),
outerMin = cms.double( 0.0 ),
outerMax = cms.double( 0.1 ),
fracLeaderMin = cms.double( 0.4 ),
r2Min = cms.double( 0.1 ),
r2Max = cms.double( 0.5 ),
fracLeaderMax = cms.double( 0.7 )
)
)
process.hltHfreco = cms.EDProducer( "HcalHitReconstructor",
digiTimeFromDB = cms.bool( True ),
S9S1stat = cms.PSet(
longETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),
shortEnergyParams = cms.vdouble( 35.1773, 35.37, 35.7933, 36.4472, 37.3317, 38.4468, 39.7925, 41.3688, 43.1757, 45.2132, 47.4813, 49.98, 52.7093 ),
flagsToSkip = cms.int32( 24 ),
shortETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),
short_optimumSlope = cms.vdouble( -99999.0, 0.0164905, 0.0238698, 0.0321383, 0.041296, 0.0513428, 0.0622789, 0.0741041, 0.0868186, 0.100422, 0.135313, 0.136289, 0.0589927 ),
longEnergyParams = cms.vdouble( 43.5, 45.7, 48.32, 51.36, 54.82, 58.7, 63.0, 67.72, 72.86, 78.42, 84.4, 90.8, 97.62 ),
long_optimumSlope = cms.vdouble( -99999.0, 0.0164905, 0.0238698, 0.0321383, 0.041296, 0.0513428, 0.0622789, 0.0741041, 0.0868186, 0.100422, 0.135313, 0.136289, 0.0589927 ),
isS8S1 = cms.bool( False ),
HcalAcceptSeverityLevel = cms.int32( 9 )
),
saturationParameters = cms.PSet( maxADCvalue = cms.int32( 127 ) ),
tsFromDB = cms.bool( True ),
samplesToAdd = cms.int32( 2 ),
correctionPhaseNS = cms.double( 13.0 ),
HFInWindowStat = cms.PSet(
hflongEthresh = cms.double( 40.0 ),
hflongMinWindowTime = cms.vdouble( -10.0 ),
hfshortEthresh = cms.double( 40.0 ),
hflongMaxWindowTime = cms.vdouble( 10.0 ),
hfshortMaxWindowTime = cms.vdouble( 10.0 ),
hfshortMinWindowTime = cms.vdouble( -12.0 )
),
digiLabel = cms.InputTag( "hltHcalDigis" ),
setHSCPFlags = cms.bool( False ),
firstAuxTS = cms.int32( 1 ),
setSaturationFlags = cms.bool( False ),
hfTimingTrustParameters = cms.PSet(
hfTimingTrustLevel2 = cms.int32( 4 ),
hfTimingTrustLevel1 = cms.int32( 1 )
),
PETstat = cms.PSet(
longETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),
short_R_29 = cms.vdouble( 0.8 ),
shortEnergyParams = cms.vdouble( 35.1773, 35.37, 35.7933, 36.4472, 37.3317, 38.4468, 39.7925, 41.3688, 43.1757, 45.2132, 47.4813, 49.98, 52.7093 ),
flagsToSkip = cms.int32( 0 ),
short_R = cms.vdouble( 0.8 ),
shortETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),
long_R_29 = cms.vdouble( 0.8 ),
longEnergyParams = cms.vdouble( 43.5, 45.7, 48.32, 51.36, 54.82, 58.7, 63.0, 67.72, 72.86, 78.42, 84.4, 90.8, 97.62 ),
long_R = cms.vdouble( 0.98 ),
HcalAcceptSeverityLevel = cms.int32( 9 )
),
digistat = cms.PSet(
HFdigiflagFirstSample = cms.int32( 1 ),
HFdigiflagMinEthreshold = cms.double( 40.0 ),
HFdigiflagSamplesToAdd = cms.int32( 3 ),
HFdigiflagExpectedPeak = cms.int32( 2 ),
HFdigiflagCoef = cms.vdouble( 0.93, -0.012667, -0.38275 )
),
useLeakCorrection = cms.bool( False ),
setTimingTrustFlags = cms.bool( False ),
S8S1stat = cms.PSet(
longETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),
shortEnergyParams = cms.vdouble( 40.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0 ),
flagsToSkip = cms.int32( 16 ),
shortETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),
short_optimumSlope = cms.vdouble( 0.3, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1 ),
longEnergyParams = cms.vdouble( 40.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0 ),
long_optimumSlope = cms.vdouble( 0.3, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1 ),
isS8S1 = cms.bool( True ),
HcalAcceptSeverityLevel = cms.int32( 9 )
),
correctForPhaseContainment = cms.bool( False ),
correctForTimeslew = cms.bool( False ),
setNoiseFlags = cms.bool( True ),
correctTiming = cms.bool( False ),
setPulseShapeFlags = cms.bool( False ),
Subdetector = cms.string( "HF" ),
dropZSmarkedPassed = cms.bool( True ),
recoParamsFromDB = cms.bool( True ),
firstSample = cms.int32( 2 ),
setTimingShapedCutsFlags = cms.bool( False ),
timingshapedcutsParameters = cms.PSet( ),
pulseShapeParameters = cms.PSet( ),
flagParameters = cms.PSet( ),
hscpParameters = cms.PSet( )
)
process.hltHoreco = cms.EDProducer( "HcalHitReconstructor",
digiTimeFromDB = cms.bool( True ),
S9S1stat = cms.PSet( ),
saturationParameters = cms.PSet( maxADCvalue = cms.int32( 127 ) ),
tsFromDB = cms.bool( True ),
samplesToAdd = cms.int32( 4 ),
correctionPhaseNS = cms.double( 13.0 ),
HFInWindowStat = cms.PSet( ),
digiLabel = cms.InputTag( "hltHcalDigis" ),
setHSCPFlags = cms.bool( False ),
firstAuxTS = cms.int32( 4 ),
setSaturationFlags = cms.bool( False ),
hfTimingTrustParameters = cms.PSet( ),
PETstat = cms.PSet( ),
digistat = cms.PSet( ),
useLeakCorrection = cms.bool( False ),
setTimingTrustFlags = cms.bool( False ),
S8S1stat = cms.PSet( ),
correctForPhaseContainment = cms.bool( True ),
correctForTimeslew = cms.bool( True ),
setNoiseFlags = cms.bool( False ),
correctTiming = cms.bool( False ),
setPulseShapeFlags = cms.bool( False ),
Subdetector = cms.string( "HO" ),
dropZSmarkedPassed = cms.bool( True ),
recoParamsFromDB = cms.bool( True ),
firstSample = cms.int32( 4 ),
setTimingShapedCutsFlags = cms.bool( False ),
timingshapedcutsParameters = cms.PSet( ),
pulseShapeParameters = cms.PSet( ),
flagParameters = cms.PSet( ),
hscpParameters = cms.PSet( )
)
process.hltTowerMakerForAll = cms.EDProducer( "CaloTowersCreator",
EBSumThreshold = cms.double( 0.2 ),
MomHBDepth = cms.double( 0.2 ),
UseEtEBTreshold = cms.bool( False ),
hfInput = cms.InputTag( "hltHfreco" ),
AllowMissingInputs = cms.bool( False ),
MomEEDepth = cms.double( 0.0 ),
EESumThreshold = cms.double( 0.45 ),
HBGrid = cms.vdouble( ),
HcalAcceptSeverityLevelForRejectedHit = cms.uint32( 9999 ),
HBThreshold = cms.double( 0.7 ),
EcalSeveritiesToBeUsedInBadTowers = cms.vstring( ),
UseEcalRecoveredHits = cms.bool( False ),
MomConstrMethod = cms.int32( 1 ),
MomHEDepth = cms.double( 0.4 ),
HcalThreshold = cms.double( -1000.0 ),
HF2Weights = cms.vdouble( ),
HOWeights = cms.vdouble( ),
EEGrid = cms.vdouble( ),
UseSymEBTreshold = cms.bool( False ),
EEWeights = cms.vdouble( ),
EEWeight = cms.double( 1.0 ),
UseHO = cms.bool( False ),
HBWeights = cms.vdouble( ),
HF1Weight = cms.double( 1.0 ),
HF2Grid = cms.vdouble( ),
HEDWeights = cms.vdouble( ),
HEDGrid = cms.vdouble( ),
EBWeight = cms.double( 1.0 ),
HF1Grid = cms.vdouble( ),
EBWeights = cms.vdouble( ),
HOWeight = cms.double( 1.0E-99 ),
HESWeight = cms.double( 1.0 ),
HESThreshold = cms.double( 0.8 ),
hbheInput = cms.InputTag( "hltHbhereco" ),
HF2Weight = cms.double( 1.0 ),
HF2Threshold = cms.double( 0.85 ),
HcalAcceptSeverityLevel = cms.uint32( 9 ),
EEThreshold = cms.double( 0.3 ),
HOThresholdPlus1 = cms.double( 3.5 ),
HOThresholdPlus2 = cms.double( 3.5 ),
HF1Weights = cms.vdouble( ),
hoInput = cms.InputTag( "hltHoreco" ),
HF1Threshold = cms.double( 0.5 ),
HOThresholdMinus1 = cms.double( 3.5 ),
HESGrid = cms.vdouble( ),
EcutTower = cms.double( -1000.0 ),
UseRejectedRecoveredEcalHits = cms.bool( False ),
UseEtEETreshold = cms.bool( False ),
HESWeights = cms.vdouble( ),
EcalRecHitSeveritiesToBeExcluded = cms.vstring( 'kTime',
'kWeird',
'kBad' ),
HEDWeight = cms.double( 1.0 ),
UseSymEETreshold = cms.bool( False ),
HEDThreshold = cms.double( 0.8 ),
EBThreshold = cms.double( 0.07 ),
UseRejectedHitsOnly = cms.bool( False ),
UseHcalRecoveredHits = cms.bool( False ),
HOThresholdMinus2 = cms.double( 3.5 ),
HOThreshold0 = cms.double( 3.5 ),
ecalInputs = cms.VInputTag( 'hltEcalRecHitAll:EcalRecHitsEB','hltEcalRecHitAll:EcalRecHitsEE' ),
UseRejectedRecoveredHcalHits = cms.bool( False ),
MomEBDepth = cms.double( 0.3 ),
HBWeight = cms.double( 1.0 ),
HOGrid = cms.vdouble( ),
EBGrid = cms.vdouble( )
)
process.hltKT6CaloJets = cms.EDProducer( "FastjetJetProducer",
Active_Area_Repeats = cms.int32( 1 ),
doAreaFastjet = cms.bool( False ),
voronoiRfact = cms.double( 0.9 ),
maxBadHcalCells = cms.uint32( 9999999 ),
doAreaDiskApprox = cms.bool( True ),
maxRecoveredEcalCells = cms.uint32( 9999999 ),
jetType = cms.string( "CaloJet" ),
minSeed = cms.uint32( 14327 ),
Ghost_EtaMax = cms.double( 5.0 ),
doRhoFastjet = cms.bool( True ),
jetAlgorithm = cms.string( "Kt" ),
nSigmaPU = cms.double( 1.0 ),
GhostArea = cms.double( 0.01 ),
Rho_EtaMax = cms.double( 4.4 ),
maxBadEcalCells = cms.uint32( 9999999 ),
useDeterministicSeed = cms.bool( True ),
doPVCorrection = cms.bool( False ),
maxRecoveredHcalCells = cms.uint32( 9999999 ),
rParam = cms.double( 0.6 ),
maxProblematicHcalCells = cms.uint32( 9999999 ),
doOutputJets = cms.bool( True ),
src = cms.InputTag( "hltTowerMakerForAll" ),
inputEtMin = cms.double( 0.3 ),
puPtMin = cms.double( 10.0 ),
srcPVs = cms.InputTag( "NotUsed" ),
jetPtMin = cms.double( 1.0 ),
radiusPU = cms.double( 0.5 ),
maxProblematicEcalCells = cms.uint32( 9999999 ),
doPUOffsetCorr = cms.bool( False ),
inputEMin = cms.double( 0.0 ),
subtractorName = cms.string( "" ),
MinVtxNdof = cms.int32( 0 ),
MaxVtxZ = cms.double( 15.0 ),
UseOnlyVertexTracks = cms.bool( False ),
UseOnlyOnePV = cms.bool( False ),
DzTrVtxMax = cms.double( 0.0 ),
sumRecHits = cms.bool( False ),
DxyTrVtxMax = cms.double( 0.0 )
)
process.hltAntiKT5CaloJets = cms.EDProducer( "FastjetJetProducer",
Active_Area_Repeats = cms.int32( 5 ),
doAreaFastjet = cms.bool( False ),
voronoiRfact = cms.double( 0.9 ),
maxBadHcalCells = cms.uint32( 9999999 ),
doAreaDiskApprox = cms.bool( True ),
maxRecoveredEcalCells = cms.uint32( 9999999 ),
jetType = cms.string( "CaloJet" ),
minSeed = cms.uint32( 14327 ),
Ghost_EtaMax = cms.double( 6.0 ),
doRhoFastjet = cms.bool( False ),
jetAlgorithm = cms.string( "AntiKt" ),
nSigmaPU = cms.double( 1.0 ),
GhostArea = cms.double( 0.01 ),
Rho_EtaMax = cms.double( 4.4 ),
maxBadEcalCells = cms.uint32( 9999999 ),
useDeterministicSeed = cms.bool( True ),
doPVCorrection = cms.bool( False ),
maxRecoveredHcalCells = cms.uint32( 9999999 ),
rParam = cms.double( 0.5 ),
maxProblematicHcalCells = cms.uint32( 9999999 ),
doOutputJets = cms.bool( True ),
src = cms.InputTag( "hltTowerMakerForAll" ),
inputEtMin = cms.double( 0.3 ),
puPtMin = cms.double( 10.0 ),
srcPVs = cms.InputTag( "NotUsed" ),
jetPtMin = cms.double( 1.0 ),
radiusPU = cms.double( 0.5 ),
maxProblematicEcalCells = cms.uint32( 9999999 ),
doPUOffsetCorr = cms.bool( False ),
inputEMin = cms.double( 0.0 ),
subtractorName = cms.string( "" ),
MinVtxNdof = cms.int32( 5 ),
MaxVtxZ = cms.double( 15.0 ),
UseOnlyVertexTracks = cms.bool( False ),
UseOnlyOnePV = cms.bool( False ),
DzTrVtxMax = cms.double( 0.0 ),
sumRecHits = cms.bool( False ),
DxyTrVtxMax = cms.double( 0.0 )
)
process.hltCaloJetIDPassed = cms.EDProducer( "HLTCaloJetIDProducer",
min_N90 = cms.int32( -2 ),
min_N90hits = cms.int32( 2 ),
min_EMF = cms.double( 1.0E-6 ),
jetsInput = cms.InputTag( "hltAntiKT5CaloJets" ),
JetIDParams = cms.PSet(
useRecHits = cms.bool( True ),
hbheRecHitsColl = cms.InputTag( "hltHbhereco" ),
hoRecHitsColl = cms.InputTag( "hltHoreco" ),
hfRecHitsColl = cms.InputTag( "hltHfreco" ),
ebRecHitsColl = cms.InputTag( 'hltEcalRecHitAll','EcalRecHitsEB' ),
eeRecHitsColl = cms.InputTag( 'hltEcalRecHitAll','EcalRecHitsEE' )
),
max_EMF = cms.double( 999.0 )
)
process.hltCaloJetL1FastJetCorrected = cms.EDProducer( "CaloJetCorrectionProducer",
src = cms.InputTag( "hltCaloJetIDPassed" ),
correctors = cms.vstring( 'hltESPAK5CaloL1L2L3' )
)
process.hltDijet40L1FastJet = cms.EDFilter( "HLT1CaloJet",
saveTags = cms.bool( True ),
MinPt = cms.double( 40.0 ),
MinN = cms.int32( 2 ),
MaxEta = cms.double( 3.0 ),
MinMass = cms.double( -1.0 ),
inputTag = cms.InputTag( "hltCaloJetL1FastJetCorrected" ),
MinE = cms.double( -1.0 ),
triggerType = cms.int32( 85 )
)
process.hltTowerMakerForPF = cms.EDProducer( "CaloTowersCreator",
EBSumThreshold = cms.double( 0.2 ),
MomHBDepth = cms.double( 0.2 ),
UseEtEBTreshold = cms.bool( False ),
hfInput = cms.InputTag( "hltHfreco" ),
AllowMissingInputs = cms.bool( False ),
MomEEDepth = cms.double( 0.0 ),
EESumThreshold = cms.double( 0.45 ),
HBGrid = cms.vdouble( ),
HcalAcceptSeverityLevelForRejectedHit = cms.uint32( 9999 ),
HBThreshold = cms.double( 0.4 ),
EcalSeveritiesToBeUsedInBadTowers = cms.vstring( ),
UseEcalRecoveredHits = cms.bool( False ),
MomConstrMethod = cms.int32( 1 ),
MomHEDepth = cms.double( 0.4 ),
HcalThreshold = cms.double( -1000.0 ),
HF2Weights = cms.vdouble( ),
HOWeights = cms.vdouble( ),
EEGrid = cms.vdouble( ),
UseSymEBTreshold = cms.bool( False ),
EEWeights = cms.vdouble( ),
EEWeight = cms.double( 1.0 ),
UseHO = cms.bool( False ),
HBWeights = cms.vdouble( ),
HF1Weight = cms.double( 1.0 ),
HF2Grid = cms.vdouble( ),
HEDWeights = cms.vdouble( ),
HEDGrid = cms.vdouble( ),
EBWeight = cms.double( 1.0 ),
HF1Grid = cms.vdouble( ),
EBWeights = cms.vdouble( ),
HOWeight = cms.double( 1.0 ),
HESWeight = cms.double( 1.0 ),
HESThreshold = cms.double( 0.4 ),
hbheInput = cms.InputTag( "hltHbhereco" ),
HF2Weight = cms.double( 1.0 ),
HF2Threshold = cms.double( 1.8 ),
HcalAcceptSeverityLevel = cms.uint32( 11 ),
EEThreshold = cms.double( 0.3 ),
HOThresholdPlus1 = cms.double( 1.1 ),
HOThresholdPlus2 = cms.double( 1.1 ),
HF1Weights = cms.vdouble( ),
hoInput = cms.InputTag( "hltHoreco" ),
HF1Threshold = cms.double( 1.2 ),
HOThresholdMinus1 = cms.double( 1.1 ),
HESGrid = cms.vdouble( ),
EcutTower = cms.double( -1000.0 ),
UseRejectedRecoveredEcalHits = cms.bool( False ),
UseEtEETreshold = cms.bool( False ),
HESWeights = cms.vdouble( ),
EcalRecHitSeveritiesToBeExcluded = cms.vstring( 'kTime',
'kWeird',
'kBad' ),
HEDWeight = cms.double( 1.0 ),
UseSymEETreshold = cms.bool( False ),
HEDThreshold = cms.double( 0.4 ),
EBThreshold = cms.double( 0.07 ),
UseRejectedHitsOnly = cms.bool( False ),
UseHcalRecoveredHits = cms.bool( True ),
HOThresholdMinus2 = cms.double( 1.1 ),
HOThreshold0 = cms.double( 1.1 ),
ecalInputs = cms.VInputTag( 'hltEcalRecHitAll:EcalRecHitsEB','hltEcalRecHitAll:EcalRecHitsEE' ),
UseRejectedRecoveredHcalHits = cms.bool( False ),
MomEBDepth = cms.double( 0.3 ),
HBWeight = cms.double( 1.0 ),
HOGrid = cms.vdouble( ),
EBGrid = cms.vdouble( )
)
process.hltAntiKT5CaloJetsPF = cms.EDProducer( "FastjetJetProducer",
Active_Area_Repeats = cms.int32( 5 ),
doAreaFastjet = cms.bool( False ),
voronoiRfact = cms.double( -9.0 ),
maxBadHcalCells = cms.uint32( 9999999 ),
doAreaDiskApprox = cms.bool( False ),
maxRecoveredEcalCells = cms.uint32( 9999999 ),
jetType = cms.string( "CaloJet" ),
minSeed = cms.uint32( 0 ),
Ghost_EtaMax = cms.double( 6.0 ),
doRhoFastjet = cms.bool( False ),
jetAlgorithm = cms.string( "AntiKt" ),
nSigmaPU = cms.double( 1.0 ),
GhostArea = cms.double( 0.01 ),
Rho_EtaMax = cms.double( 4.4 ),
maxBadEcalCells = cms.uint32( 9999999 ),
useDeterministicSeed = cms.bool( True ),
doPVCorrection = cms.bool( False ),
maxRecoveredHcalCells = cms.uint32( 9999999 ),
rParam = cms.double( 0.5 ),
maxProblematicHcalCells = cms.uint32( 9999999 ),
doOutputJets = cms.bool( True ),
src = cms.InputTag( "hltTowerMakerForPF" ),
inputEtMin = cms.double( 0.3 ),
puPtMin = cms.double( 10.0 ),
srcPVs = cms.InputTag( "NotUsed" ),
jetPtMin = cms.double( 1.0 ),
radiusPU = cms.double( 0.5 ),
maxProblematicEcalCells = cms.uint32( 9999999 ),
doPUOffsetCorr = cms.bool( False ),
inputEMin = cms.double( 0.0 ),
subtractorName = cms.string( "" ),
MinVtxNdof = cms.int32( 5 ),
MaxVtxZ = cms.double( 15.0 ),
UseOnlyVertexTracks = cms.bool( False ),
UseOnlyOnePV = cms.bool( False ),
DzTrVtxMax = cms.double( 0.0 ),
sumRecHits = cms.bool( False ),
DxyTrVtxMax = cms.double( 0.0 )
)
process.hltAntiKT5CaloJetsPFEt5 = cms.EDFilter( "EtMinCaloJetSelector",
filter = cms.bool( False ),
src = cms.InputTag( "hltAntiKT5CaloJetsPF" ),
etMin = cms.double( 5.0 )
)
process.hltL2MuonCandidates = cms.EDProducer( "L2MuonCandidateProducer",
InputObjects = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' )
)
process.hltSiPixelDigis = cms.EDProducer( "SiPixelRawToDigi",
UseQualityInfo = cms.bool( False ),
CheckPixelOrder = cms.bool( False ),
IncludeErrors = cms.bool( False ),
UseCablingTree = cms.untracked.bool( True ),
InputLabel = cms.InputTag( "rawDataCollector" ),
ErrorList = cms.vint32( ),
Regions = cms.PSet( ),
Timing = cms.untracked.bool( False ),
UserErrorList = cms.vint32( )
)
process.hltSiPixelClusters = cms.EDProducer( "SiPixelClusterProducer",
src = cms.InputTag( "hltSiPixelDigis" ),
ChannelThreshold = cms.int32( 1000 ),
maxNumberOfClusters = cms.int32( 20000 ),
VCaltoElectronGain = cms.int32( 65 ),
MissCalibrate = cms.untracked.bool( True ),
SplitClusters = cms.bool( False ),
VCaltoElectronOffset = cms.int32( -414 ),
payloadType = cms.string( "HLT" ),
SeedThreshold = cms.int32( 1000 ),
ClusterThreshold = cms.double( 4000.0 )
)
process.hltSiPixelRecHits = cms.EDProducer( "SiPixelRecHitConverter",
VerboseLevel = cms.untracked.int32( 0 ),
src = cms.InputTag( "hltSiPixelClusters" ),
CPE = cms.string( "hltESPPixelCPEGeneric" )
)
process.hltSiStripExcludedFEDListProducer = cms.EDProducer( "SiStripExcludedFEDListProducer",
ProductLabel = cms.InputTag( "rawDataCollector" )
)
process.hltSiStripRawToClustersFacility = cms.EDProducer( "SiStripRawToClusters",
ProductLabel = cms.InputTag( "rawDataCollector" ),
DoAPVEmulatorCheck = cms.bool( False ),
Algorithms = cms.PSet(
SiStripFedZeroSuppressionMode = cms.uint32( 4 ),
CommonModeNoiseSubtractionMode = cms.string( "Median" ),
PedestalSubtractionFedMode = cms.bool( True ),
TruncateInSuppressor = cms.bool( True ),
doAPVRestore = cms.bool( False ),
useCMMeanMap = cms.bool( False )
),
Clusterizer = cms.PSet(
ChannelThreshold = cms.double( 2.0 ),
MaxSequentialBad = cms.uint32( 1 ),
MaxSequentialHoles = cms.uint32( 0 ),
Algorithm = cms.string( "ThreeThresholdAlgorithm" ),
MaxAdjacentBad = cms.uint32( 0 ),
QualityLabel = cms.string( "" ),
SeedThreshold = cms.double( 3.0 ),
ClusterThreshold = cms.double( 5.0 ),
setDetId = cms.bool( True ),
RemoveApvShots = cms.bool( True )
)
)
process.hltSiStripClusters = cms.EDProducer( "MeasurementTrackerSiStripRefGetterProducer",
InputModuleLabel = cms.InputTag( "hltSiStripRawToClustersFacility" ),
measurementTrackerName = cms.string( "hltESPMeasurementTracker" )
)
process.hltL3TrajSeedOIState = cms.EDProducer( "TSGFromL2Muon",
TkSeedGenerator = cms.PSet(
propagatorCompatibleName = cms.string( "hltESPSteppingHelixPropagatorOpposite" ),
option = cms.uint32( 3 ),
maxChi2 = cms.double( 40.0 ),
errorMatrixPset = cms.PSet(
atIP = cms.bool( True ),
action = cms.string( "use" ),
errorMatrixValuesPSet = cms.PSet(
pf3_V12 = cms.PSet(
action = cms.string( "scale" ),
values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )
),
pf3_V13 = cms.PSet(
action = cms.string( "scale" ),
values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )
),
pf3_V11 = cms.PSet(
action = cms.string( "scale" ),
values = cms.vdouble( 3.0, 3.0, 3.0, 5.0, 4.0, 5.0, 10.0, 7.0, 10.0, 10.0, 10.0, 10.0 )
),
pf3_V14 = cms.PSet(
action = cms.string( "scale" ),
values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )
),
pf3_V15 = cms.PSet(
action = cms.string( "scale" ),
values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )
),
yAxis = cms.vdouble( 0.0, 1.0, 1.4, 10.0 ),
pf3_V33 = cms.PSet(
action = cms.string( "scale" ),
values = cms.vdouble( 3.0, 3.0, 3.0, 5.0, 4.0, 5.0, 10.0, 7.0, 10.0, 10.0, 10.0, 10.0 )
),
zAxis = cms.vdouble( -3.14159, 3.14159 ),
pf3_V44 = cms.PSet(
action = cms.string( "scale" ),
values = cms.vdouble( 3.0, 3.0, 3.0, 5.0, 4.0, 5.0, 10.0, 7.0, 10.0, 10.0, 10.0, 10.0 )
),
xAxis = cms.vdouble( 0.0, 13.0, 30.0, 70.0, 1000.0 ),
pf3_V22 = cms.PSet(
action = cms.string( "scale" ),
values = cms.vdouble( 3.0, 3.0, 3.0, 5.0, 4.0, 5.0, 10.0, 7.0, 10.0, 10.0, 10.0, 10.0 )
),
pf3_V23 = cms.PSet(
action = cms.string( "scale" ),
values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )
),
pf3_V45 = cms.PSet(
action = cms.string( "scale" ),
values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )
),
pf3_V55 = cms.PSet(
action = cms.string( "scale" ),
values = cms.vdouble( 3.0, 3.0, 3.0, 5.0, 4.0, 5.0, 10.0, 7.0, 10.0, 10.0, 10.0, 10.0 )
),
pf3_V34 = cms.PSet(
action = cms.string( "scale" ),
values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )
),
pf3_V35 = cms.PSet(
action = cms.string( "scale" ),
values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )
),
pf3_V25 = cms.PSet(
action = cms.string( "scale" ),
values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )
),
pf3_V24 = cms.PSet(
action = cms.string( "scale" ),
values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )
)
)
),
propagatorName = cms.string( "hltESPSteppingHelixPropagatorAlong" ),
manySeeds = cms.bool( False ),
copyMuonRecHit = cms.bool( False ),
ComponentName = cms.string( "TSGForRoadSearch" )
),
ServiceParameters = cms.PSet(
Propagators = cms.untracked.vstring( 'hltESPSteppingHelixPropagatorOpposite',
'hltESPSteppingHelixPropagatorAlong' ),
RPCLayers = cms.bool( True ),
UseMuonNavigation = cms.untracked.bool( True )
),
MuonCollectionLabel = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' ),
MuonTrackingRegionBuilder = cms.PSet( ),
PCut = cms.double( 2.5 ),
TrackerSeedCleaner = cms.PSet( ),
PtCut = cms.double( 1.0 )
)
process.hltL3TrackCandidateFromL2OIState = cms.EDProducer( "CkfTrajectoryMaker",
src = cms.InputTag( "hltL3TrajSeedOIState" ),
reverseTrajectories = cms.bool( True ),
TransientInitialStateEstimatorParameters = cms.PSet(
propagatorAlongTISE = cms.string( "PropagatorWithMaterial" ),
numberMeasurementsForFit = cms.int32( 4 ),
propagatorOppositeTISE = cms.string( "PropagatorWithMaterialOpposite" )
),
TrajectoryCleaner = cms.string( "hltESPTrajectoryCleanerBySharedHits" ),
cleanTrajectoryAfterInOut = cms.bool( False ),
useHitsSplitting = cms.bool( False ),
RedundantSeedCleaner = cms.string( "CachingSeedCleanerBySharedInput" ),
doSeedingRegionRebuilding = cms.bool( False ),
trackCandidateAlso = cms.bool( True ),
NavigationSchool = cms.string( "SimpleNavigationSchool" ),
TrajectoryBuilder = cms.string( "hltESPMuonCkfTrajectoryBuilderSeedHit" ),
maxNSeeds = cms.uint32( 100000 )
)
process.hltL3TkTracksFromL2OIState = cms.EDProducer( "TrackProducer",
src = cms.InputTag( "hltL3TrackCandidateFromL2OIState" ),
clusterRemovalInfo = cms.InputTag( "" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Fitter = cms.string( "hltESPKFFittingSmoother" ),
useHitsSplitting = cms.bool( False ),
MeasurementTracker = cms.string( "" ),
alias = cms.untracked.string( "" ),
NavigationSchool = cms.string( "" ),
TrajectoryInEvent = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
AlgorithmName = cms.string( "undefAlgorithm" ),
Propagator = cms.string( "PropagatorWithMaterial" )
)
process.hltL3MuonsOIState = cms.EDProducer( "L3MuonProducer",
ServiceParameters = cms.PSet(
Propagators = cms.untracked.vstring( 'hltESPSmartPropagatorAny',
'SteppingHelixPropagatorAny',
'hltESPSmartPropagator',
'hltESPSteppingHelixPropagatorOpposite' ),
RPCLayers = cms.bool( True ),
UseMuonNavigation = cms.untracked.bool( True )
),
L3TrajBuilderParameters = cms.PSet(
ScaleTECyFactor = cms.double( -1.0 ),
GlbRefitterParameters = cms.PSet(
TrackerSkipSection = cms.int32( -1 ),
DoPredictionsOnly = cms.bool( False ),
PropDirForCosmics = cms.bool( False ),
HitThreshold = cms.int32( 1 ),
MuonHitsOption = cms.int32( 1 ),
Chi2CutRPC = cms.double( 1.0 ),
Fitter = cms.string( "hltESPL3MuKFTrajectoryFitter" ),
DTRecSegmentLabel = cms.InputTag( "hltDt4DSegments" ),
TrackerRecHitBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
MuonRecHitBuilder = cms.string( "hltESPMuonTransientTrackingRecHitBuilder" ),
RefitDirection = cms.string( "insideOut" ),
CSCRecSegmentLabel = cms.InputTag( "hltCscSegments" ),
Chi2CutCSC = cms.double( 150.0 ),
Chi2CutDT = cms.double( 10.0 ),
RefitRPCHits = cms.bool( True ),
SkipStation = cms.int32( -1 ),
Propagator = cms.string( "hltESPSmartPropagatorAny" ),
TrackerSkipSystem = cms.int32( -1 ),
DYTthrs = cms.vint32( 30, 15 )
),
ScaleTECxFactor = cms.double( -1.0 ),
TrackerRecHitBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
MuonRecHitBuilder = cms.string( "hltESPMuonTransientTrackingRecHitBuilder" ),
MuonTrackingRegionBuilder = cms.PSet(
EtaR_UpperLimit_Par1 = cms.double( 0.25 ),
EtaR_UpperLimit_Par2 = cms.double( 0.15 ),
OnDemand = cms.double( -1.0 ),
Rescale_Dz = cms.double( 3.0 ),
vertexCollection = cms.InputTag( "pixelVertices" ),
Rescale_phi = cms.double( 3.0 ),
Eta_fixed = cms.double( 0.2 ),
DeltaZ_Region = cms.double( 15.9 ),
MeasurementTrackerName = cms.string( "hltESPMeasurementTracker" ),
PhiR_UpperLimit_Par2 = cms.double( 0.2 ),
Eta_min = cms.double( 0.05 ),
Phi_fixed = cms.double( 0.2 ),
DeltaR = cms.double( 0.2 ),
EscapePt = cms.double( 1.5 ),
UseFixedRegion = cms.bool( False ),
PhiR_UpperLimit_Par1 = cms.double( 0.6 ),
Rescale_eta = cms.double( 3.0 ),
Phi_min = cms.double( 0.05 ),
UseVertex = cms.bool( False ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" )
),
RefitRPCHits = cms.bool( True ),
PCut = cms.double( 2.5 ),
TrackTransformer = cms.PSet(
DoPredictionsOnly = cms.bool( False ),
Fitter = cms.string( "hltESPL3MuKFTrajectoryFitter" ),
TrackerRecHitBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
Smoother = cms.string( "hltESPKFTrajectorySmootherForMuonTrackLoader" ),
MuonRecHitBuilder = cms.string( "hltESPMuonTransientTrackingRecHitBuilder" ),
RefitDirection = cms.string( "insideOut" ),
RefitRPCHits = cms.bool( True ),
Propagator = cms.string( "hltESPSmartPropagatorAny" )
),
GlobalMuonTrackMatcher = cms.PSet(
Pt_threshold1 = cms.double( 0.0 ),
DeltaDCut_3 = cms.double( 15.0 ),
MinP = cms.double( 2.5 ),
MinPt = cms.double( 1.0 ),
Chi2Cut_1 = cms.double( 50.0 ),
Pt_threshold2 = cms.double( 9.99999999E8 ),
LocChi2Cut = cms.double( 0.0010 ),
Eta_threshold = cms.double( 1.2 ),
Quality_3 = cms.double( 7.0 ),
Quality_2 = cms.double( 15.0 ),
Chi2Cut_2 = cms.double( 50.0 ),
Chi2Cut_3 = cms.double( 200.0 ),
DeltaDCut_1 = cms.double( 40.0 ),
DeltaRCut_2 = cms.double( 0.2 ),
DeltaRCut_3 = cms.double( 1.0 ),
DeltaDCut_2 = cms.double( 10.0 ),
DeltaRCut_1 = cms.double( 0.1 ),
Propagator = cms.string( "hltESPSmartPropagator" ),
Quality_1 = cms.double( 20.0 )
),
PtCut = cms.double( 1.0 ),
TrackerPropagator = cms.string( "SteppingHelixPropagatorAny" ),
tkTrajLabel = cms.InputTag( "hltL3TkTracksFromL2OIState" )
),
TrackLoaderParameters = cms.PSet(
PutTkTrackIntoEvent = cms.untracked.bool( False ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
SmoothTkTrack = cms.untracked.bool( False ),
MuonSeededTracksInstance = cms.untracked.string( "L2Seeded" ),
Smoother = cms.string( "hltESPKFTrajectorySmootherForMuonTrackLoader" ),
MuonUpdatorAtVertexParameters = cms.PSet(
MaxChi2 = cms.double( 1000000.0 ),
Propagator = cms.string( "hltESPSteppingHelixPropagatorOpposite" ),
BeamSpotPositionErrors = cms.vdouble( 0.1, 0.1, 5.3 )
),
VertexConstraint = cms.bool( False ),
DoSmoothing = cms.bool( True )
),
MuonCollectionLabel = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' )
)
process.hltL3TrajSeedOIHit = cms.EDProducer( "TSGFromL2Muon",
TkSeedGenerator = cms.PSet(
PSetNames = cms.vstring( 'skipTSG',
'iterativeTSG' ),
L3TkCollectionA = cms.InputTag( "hltL3MuonsOIState" ),
iterativeTSG = cms.PSet(
ErrorRescaling = cms.double( 3.0 ),
beamSpot = cms.InputTag( "unused" ),
MaxChi2 = cms.double( 40.0 ),
errorMatrixPset = cms.PSet(
atIP = cms.bool( True ),
action = cms.string( "use" ),
errorMatrixValuesPSet = cms.PSet(
pf3_V12 = cms.PSet(
action = cms.string( "scale" ),
values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )
),
pf3_V13 = cms.PSet(
action = cms.string( "scale" ),
values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )
),
pf3_V11 = cms.PSet(
action = cms.string( "scale" ),
values = cms.vdouble( 3.0, 3.0, 3.0, 5.0, 4.0, 5.0, 10.0, 7.0, 10.0, 10.0, 10.0, 10.0 )
),
pf3_V14 = cms.PSet(
action = cms.string( "scale" ),
values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )
),
pf3_V15 = cms.PSet(
action = cms.string( "scale" ),
values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )
),
yAxis = cms.vdouble( 0.0, 1.0, 1.4, 10.0 ),
pf3_V33 = cms.PSet(
action = cms.string( "scale" ),
values = cms.vdouble( 3.0, 3.0, 3.0, 5.0, 4.0, 5.0, 10.0, 7.0, 10.0, 10.0, 10.0, 10.0 )
),
zAxis = cms.vdouble( -3.14159, 3.14159 ),
pf3_V44 = cms.PSet(
action = cms.string( "scale" ),
values = cms.vdouble( 3.0, 3.0, 3.0, 5.0, 4.0, 5.0, 10.0, 7.0, 10.0, 10.0, 10.0, 10.0 )
),
xAxis = cms.vdouble( 0.0, 13.0, 30.0, 70.0, 1000.0 ),
pf3_V22 = cms.PSet(
action = cms.string( "scale" ),
values = cms.vdouble( 3.0, 3.0, 3.0, 5.0, 4.0, 5.0, 10.0, 7.0, 10.0, 10.0, 10.0, 10.0 )
),
pf3_V23 = cms.PSet(
action = cms.string( "scale" ),
values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )
),
pf3_V45 = cms.PSet(
action = cms.string( "scale" ),
values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )
),
pf3_V55 = cms.PSet(
action = cms.string( "scale" ),
values = cms.vdouble( 3.0, 3.0, 3.0, 5.0, 4.0, 5.0, 10.0, 7.0, 10.0, 10.0, 10.0, 10.0 )
),
pf3_V34 = cms.PSet(
action = cms.string( "scale" ),
values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )
),
pf3_V35 = cms.PSet(
action = cms.string( "scale" ),
values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )
),
pf3_V25 = cms.PSet(
action = cms.string( "scale" ),
values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )
),
pf3_V24 = cms.PSet(
action = cms.string( "scale" ),
values = cms.vdouble( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 )
)
)
),
UpdateState = cms.bool( True ),
MeasurementTrackerName = cms.string( "hltESPMeasurementTracker" ),
SelectState = cms.bool( False ),
SigmaZ = cms.double( 25.0 ),
ResetMethod = cms.string( "matrix" ),
ComponentName = cms.string( "TSGFromPropagation" ),
UseVertexState = cms.bool( True ),
Propagator = cms.string( "hltESPSmartPropagatorAnyOpposite" )
),
skipTSG = cms.PSet( ),
ComponentName = cms.string( "DualByL2TSG" )
),
ServiceParameters = cms.PSet(
Propagators = cms.untracked.vstring( 'PropagatorWithMaterial',
'hltESPSmartPropagatorAnyOpposite' ),
RPCLayers = cms.bool( True ),
UseMuonNavigation = cms.untracked.bool( True )
),
MuonCollectionLabel = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' ),
MuonTrackingRegionBuilder = cms.PSet( ),
PCut = cms.double( 2.5 ),
TrackerSeedCleaner = cms.PSet(
cleanerFromSharedHits = cms.bool( True ),
ptCleaner = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
directionCleaner = cms.bool( True )
),
PtCut = cms.double( 1.0 )
)
process.hltL3TrackCandidateFromL2OIHit = cms.EDProducer( "CkfTrajectoryMaker",
src = cms.InputTag( "hltL3TrajSeedOIHit" ),
reverseTrajectories = cms.bool( True ),
TransientInitialStateEstimatorParameters = cms.PSet(
propagatorAlongTISE = cms.string( "PropagatorWithMaterial" ),
numberMeasurementsForFit = cms.int32( 4 ),
propagatorOppositeTISE = cms.string( "PropagatorWithMaterialOpposite" )
),
TrajectoryCleaner = cms.string( "hltESPTrajectoryCleanerBySharedHits" ),
cleanTrajectoryAfterInOut = cms.bool( False ),
useHitsSplitting = cms.bool( False ),
RedundantSeedCleaner = cms.string( "CachingSeedCleanerBySharedInput" ),
doSeedingRegionRebuilding = cms.bool( False ),
trackCandidateAlso = cms.bool( True ),
NavigationSchool = cms.string( "SimpleNavigationSchool" ),
TrajectoryBuilder = cms.string( "hltESPMuonCkfTrajectoryBuilder" ),
maxNSeeds = cms.uint32( 100000 )
)
process.hltL3TkTracksFromL2OIHit = cms.EDProducer( "TrackProducer",
src = cms.InputTag( "hltL3TrackCandidateFromL2OIHit" ),
clusterRemovalInfo = cms.InputTag( "" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Fitter = cms.string( "hltESPKFFittingSmoother" ),
useHitsSplitting = cms.bool( False ),
MeasurementTracker = cms.string( "" ),
alias = cms.untracked.string( "" ),
NavigationSchool = cms.string( "" ),
TrajectoryInEvent = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
AlgorithmName = cms.string( "undefAlgorithm" ),
Propagator = cms.string( "PropagatorWithMaterial" )
)
process.hltL3MuonsOIHit = cms.EDProducer( "L3MuonProducer",
ServiceParameters = cms.PSet(
Propagators = cms.untracked.vstring( 'hltESPSmartPropagatorAny',
'SteppingHelixPropagatorAny',
'hltESPSmartPropagator',
'hltESPSteppingHelixPropagatorOpposite' ),
RPCLayers = cms.bool( True ),
UseMuonNavigation = cms.untracked.bool( True )
),
L3TrajBuilderParameters = cms.PSet(
ScaleTECyFactor = cms.double( -1.0 ),
GlbRefitterParameters = cms.PSet(
TrackerSkipSection = cms.int32( -1 ),
DoPredictionsOnly = cms.bool( False ),
PropDirForCosmics = cms.bool( False ),
HitThreshold = cms.int32( 1 ),
MuonHitsOption = cms.int32( 1 ),
Chi2CutRPC = cms.double( 1.0 ),
Fitter = cms.string( "hltESPL3MuKFTrajectoryFitter" ),
DTRecSegmentLabel = cms.InputTag( "hltDt4DSegments" ),
TrackerRecHitBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
MuonRecHitBuilder = cms.string( "hltESPMuonTransientTrackingRecHitBuilder" ),
RefitDirection = cms.string( "insideOut" ),
CSCRecSegmentLabel = cms.InputTag( "hltCscSegments" ),
Chi2CutCSC = cms.double( 150.0 ),
Chi2CutDT = cms.double( 10.0 ),
RefitRPCHits = cms.bool( True ),
SkipStation = cms.int32( -1 ),
Propagator = cms.string( "hltESPSmartPropagatorAny" ),
TrackerSkipSystem = cms.int32( -1 ),
DYTthrs = cms.vint32( 30, 15 )
),
ScaleTECxFactor = cms.double( -1.0 ),
TrackerRecHitBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
MuonRecHitBuilder = cms.string( "hltESPMuonTransientTrackingRecHitBuilder" ),
MuonTrackingRegionBuilder = cms.PSet(
EtaR_UpperLimit_Par1 = cms.double( 0.25 ),
EtaR_UpperLimit_Par2 = cms.double( 0.15 ),
OnDemand = cms.double( -1.0 ),
Rescale_Dz = cms.double( 3.0 ),
vertexCollection = cms.InputTag( "pixelVertices" ),
Rescale_phi = cms.double( 3.0 ),
Eta_fixed = cms.double( 0.2 ),
DeltaZ_Region = cms.double( 15.9 ),
MeasurementTrackerName = cms.string( "hltESPMeasurementTracker" ),
PhiR_UpperLimit_Par2 = cms.double( 0.2 ),
Eta_min = cms.double( 0.05 ),
Phi_fixed = cms.double( 0.2 ),
DeltaR = cms.double( 0.2 ),
EscapePt = cms.double( 1.5 ),
UseFixedRegion = cms.bool( False ),
PhiR_UpperLimit_Par1 = cms.double( 0.6 ),
Rescale_eta = cms.double( 3.0 ),
Phi_min = cms.double( 0.05 ),
UseVertex = cms.bool( False ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" )
),
RefitRPCHits = cms.bool( True ),
PCut = cms.double( 2.5 ),
TrackTransformer = cms.PSet(
DoPredictionsOnly = cms.bool( False ),
Fitter = cms.string( "hltESPL3MuKFTrajectoryFitter" ),
TrackerRecHitBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
Smoother = cms.string( "hltESPKFTrajectorySmootherForMuonTrackLoader" ),
MuonRecHitBuilder = cms.string( "hltESPMuonTransientTrackingRecHitBuilder" ),
RefitDirection = cms.string( "insideOut" ),
RefitRPCHits = cms.bool( True ),
Propagator = cms.string( "hltESPSmartPropagatorAny" )
),
GlobalMuonTrackMatcher = cms.PSet(
Pt_threshold1 = cms.double( 0.0 ),
DeltaDCut_3 = cms.double( 15.0 ),
MinP = cms.double( 2.5 ),
MinPt = cms.double( 1.0 ),
Chi2Cut_1 = cms.double( 50.0 ),
Pt_threshold2 = cms.double( 9.99999999E8 ),
LocChi2Cut = cms.double( 0.0010 ),
Eta_threshold = cms.double( 1.2 ),
Quality_3 = cms.double( 7.0 ),
Quality_2 = cms.double( 15.0 ),
Chi2Cut_2 = cms.double( 50.0 ),
Chi2Cut_3 = cms.double( 200.0 ),
DeltaDCut_1 = cms.double( 40.0 ),
DeltaRCut_2 = cms.double( 0.2 ),
DeltaRCut_3 = cms.double( 1.0 ),
DeltaDCut_2 = cms.double( 10.0 ),
DeltaRCut_1 = cms.double( 0.1 ),
Propagator = cms.string( "hltESPSmartPropagator" ),
Quality_1 = cms.double( 20.0 )
),
PtCut = cms.double( 1.0 ),
TrackerPropagator = cms.string( "SteppingHelixPropagatorAny" ),
tkTrajLabel = cms.InputTag( "hltL3TkTracksFromL2OIHit" )
),
TrackLoaderParameters = cms.PSet(
PutTkTrackIntoEvent = cms.untracked.bool( False ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
SmoothTkTrack = cms.untracked.bool( False ),
MuonSeededTracksInstance = cms.untracked.string( "L2Seeded" ),
Smoother = cms.string( "hltESPKFTrajectorySmootherForMuonTrackLoader" ),
MuonUpdatorAtVertexParameters = cms.PSet(
MaxChi2 = cms.double( 1000000.0 ),
Propagator = cms.string( "hltESPSteppingHelixPropagatorOpposite" ),
BeamSpotPositionErrors = cms.vdouble( 0.1, 0.1, 5.3 )
),
VertexConstraint = cms.bool( False ),
DoSmoothing = cms.bool( True )
),
MuonCollectionLabel = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' )
)
process.hltL3TkFromL2OICombination = cms.EDProducer( "L3TrackCombiner",
labels = cms.VInputTag( 'hltL3MuonsOIState','hltL3MuonsOIHit' )
)
process.hltL3TrajSeedIOHit = cms.EDProducer( "TSGFromL2Muon",
TkSeedGenerator = cms.PSet(
PSetNames = cms.vstring( 'skipTSG',
'iterativeTSG' ),
L3TkCollectionA = cms.InputTag( "hltL3TkFromL2OICombination" ),
iterativeTSG = cms.PSet(
firstTSG = cms.PSet(
ComponentName = cms.string( "TSGFromOrderedHits" ),
OrderedHitsFactoryPSet = cms.PSet(
ComponentName = cms.string( "StandardHitTripletGenerator" ),
GeneratorPSet = cms.PSet(
useBending = cms.bool( True ),
useFixedPreFiltering = cms.bool( False ),
maxElement = cms.uint32( 0 ),
phiPreFiltering = cms.double( 0.3 ),
extraHitRPhitolerance = cms.double( 0.06 ),
useMultScattering = cms.bool( True ),
ComponentName = cms.string( "PixelTripletHLTGenerator" ),
extraHitRZtolerance = cms.double( 0.06 ),
SeedComparitorPSet = cms.PSet( ComponentName = cms.string( "none" ) )
),
SeedingLayers = cms.string( "hltESPPixelLayerTriplets" )
),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" )
),
PSetNames = cms.vstring( 'firstTSG',
'secondTSG' ),
ComponentName = cms.string( "CombinedTSG" ),
thirdTSG = cms.PSet(
PSetNames = cms.vstring( 'endcapTSG',
'barrelTSG' ),
barrelTSG = cms.PSet( ),
endcapTSG = cms.PSet(
ComponentName = cms.string( "TSGFromOrderedHits" ),
OrderedHitsFactoryPSet = cms.PSet(
maxElement = cms.uint32( 0 ),
ComponentName = cms.string( "StandardHitPairGenerator" ),
SeedingLayers = cms.string( "hltESPMixedLayerPairs" ),
useOnDemandTracker = cms.untracked.int32( 0 )
),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" )
),
etaSeparation = cms.double( 2.0 ),
ComponentName = cms.string( "DualByEtaTSG" )
),
secondTSG = cms.PSet(
ComponentName = cms.string( "TSGFromOrderedHits" ),
OrderedHitsFactoryPSet = cms.PSet(
maxElement = cms.uint32( 0 ),
ComponentName = cms.string( "StandardHitPairGenerator" ),
SeedingLayers = cms.string( "hltESPPixelLayerPairs" ),
useOnDemandTracker = cms.untracked.int32( 0 )
),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" )
)
),
skipTSG = cms.PSet( ),
ComponentName = cms.string( "DualByL2TSG" )
),
ServiceParameters = cms.PSet(
Propagators = cms.untracked.vstring( 'PropagatorWithMaterial' ),
RPCLayers = cms.bool( True ),
UseMuonNavigation = cms.untracked.bool( True )
),
MuonCollectionLabel = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' ),
MuonTrackingRegionBuilder = cms.PSet(
EtaR_UpperLimit_Par1 = cms.double( 0.25 ),
EtaR_UpperLimit_Par2 = cms.double( 0.15 ),
OnDemand = cms.double( -1.0 ),
Rescale_Dz = cms.double( 3.0 ),
vertexCollection = cms.InputTag( "pixelVertices" ),
Rescale_phi = cms.double( 3.0 ),
Eta_fixed = cms.double( 0.2 ),
DeltaZ_Region = cms.double( 15.9 ),
MeasurementTrackerName = cms.string( "hltESPMeasurementTracker" ),
PhiR_UpperLimit_Par2 = cms.double( 0.2 ),
Eta_min = cms.double( 0.1 ),
Phi_fixed = cms.double( 0.2 ),
DeltaR = cms.double( 0.2 ),
EscapePt = cms.double( 1.5 ),
UseFixedRegion = cms.bool( False ),
PhiR_UpperLimit_Par1 = cms.double( 0.6 ),
Rescale_eta = cms.double( 3.0 ),
Phi_min = cms.double( 0.1 ),
UseVertex = cms.bool( False ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" )
),
PCut = cms.double( 2.5 ),
TrackerSeedCleaner = cms.PSet(
cleanerFromSharedHits = cms.bool( True ),
ptCleaner = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
directionCleaner = cms.bool( True )
),
PtCut = cms.double( 1.0 )
)
process.hltL3TrackCandidateFromL2IOHit = cms.EDProducer( "CkfTrajectoryMaker",
src = cms.InputTag( "hltL3TrajSeedIOHit" ),
reverseTrajectories = cms.bool( False ),
TransientInitialStateEstimatorParameters = cms.PSet(
propagatorAlongTISE = cms.string( "PropagatorWithMaterial" ),
numberMeasurementsForFit = cms.int32( 4 ),
propagatorOppositeTISE = cms.string( "PropagatorWithMaterialOpposite" )
),
TrajectoryCleaner = cms.string( "hltESPTrajectoryCleanerBySharedHits" ),
cleanTrajectoryAfterInOut = cms.bool( False ),
useHitsSplitting = cms.bool( False ),
RedundantSeedCleaner = cms.string( "CachingSeedCleanerBySharedInput" ),
doSeedingRegionRebuilding = cms.bool( False ),
trackCandidateAlso = cms.bool( True ),
NavigationSchool = cms.string( "SimpleNavigationSchool" ),
TrajectoryBuilder = cms.string( "hltESPMuonCkfTrajectoryBuilder" ),
maxNSeeds = cms.uint32( 100000 )
)
process.hltL3TkTracksFromL2IOHit = cms.EDProducer( "TrackProducer",
src = cms.InputTag( "hltL3TrackCandidateFromL2IOHit" ),
clusterRemovalInfo = cms.InputTag( "" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Fitter = cms.string( "hltESPKFFittingSmoother" ),
useHitsSplitting = cms.bool( False ),
MeasurementTracker = cms.string( "" ),
alias = cms.untracked.string( "" ),
NavigationSchool = cms.string( "" ),
TrajectoryInEvent = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
AlgorithmName = cms.string( "undefAlgorithm" ),
Propagator = cms.string( "PropagatorWithMaterial" )
)
process.hltL3MuonsIOHit = cms.EDProducer( "L3MuonProducer",
ServiceParameters = cms.PSet(
Propagators = cms.untracked.vstring( 'hltESPSmartPropagatorAny',
'SteppingHelixPropagatorAny',
'hltESPSmartPropagator',
'hltESPSteppingHelixPropagatorOpposite' ),
RPCLayers = cms.bool( True ),
UseMuonNavigation = cms.untracked.bool( True )
),
L3TrajBuilderParameters = cms.PSet(
ScaleTECyFactor = cms.double( -1.0 ),
GlbRefitterParameters = cms.PSet(
TrackerSkipSection = cms.int32( -1 ),
DoPredictionsOnly = cms.bool( False ),
PropDirForCosmics = cms.bool( False ),
HitThreshold = cms.int32( 1 ),
MuonHitsOption = cms.int32( 1 ),
Chi2CutRPC = cms.double( 1.0 ),
Fitter = cms.string( "hltESPL3MuKFTrajectoryFitter" ),
DTRecSegmentLabel = cms.InputTag( "hltDt4DSegments" ),
TrackerRecHitBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
MuonRecHitBuilder = cms.string( "hltESPMuonTransientTrackingRecHitBuilder" ),
RefitDirection = cms.string( "insideOut" ),
CSCRecSegmentLabel = cms.InputTag( "hltCscSegments" ),
Chi2CutCSC = cms.double( 150.0 ),
Chi2CutDT = cms.double( 10.0 ),
RefitRPCHits = cms.bool( True ),
SkipStation = cms.int32( -1 ),
Propagator = cms.string( "hltESPSmartPropagatorAny" ),
TrackerSkipSystem = cms.int32( -1 ),
DYTthrs = cms.vint32( 30, 15 )
),
ScaleTECxFactor = cms.double( -1.0 ),
TrackerRecHitBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
MuonRecHitBuilder = cms.string( "hltESPMuonTransientTrackingRecHitBuilder" ),
MuonTrackingRegionBuilder = cms.PSet(
EtaR_UpperLimit_Par1 = cms.double( 0.25 ),
EtaR_UpperLimit_Par2 = cms.double( 0.15 ),
OnDemand = cms.double( -1.0 ),
Rescale_Dz = cms.double( 3.0 ),
vertexCollection = cms.InputTag( "pixelVertices" ),
Rescale_phi = cms.double( 3.0 ),
Eta_fixed = cms.double( 0.2 ),
DeltaZ_Region = cms.double( 15.9 ),
MeasurementTrackerName = cms.string( "hltESPMeasurementTracker" ),
PhiR_UpperLimit_Par2 = cms.double( 0.2 ),
Eta_min = cms.double( 0.05 ),
Phi_fixed = cms.double( 0.2 ),
DeltaR = cms.double( 0.2 ),
EscapePt = cms.double( 1.5 ),
UseFixedRegion = cms.bool( False ),
PhiR_UpperLimit_Par1 = cms.double( 0.6 ),
Rescale_eta = cms.double( 3.0 ),
Phi_min = cms.double( 0.05 ),
UseVertex = cms.bool( False ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" )
),
RefitRPCHits = cms.bool( True ),
PCut = cms.double( 2.5 ),
TrackTransformer = cms.PSet(
DoPredictionsOnly = cms.bool( False ),
Fitter = cms.string( "hltESPL3MuKFTrajectoryFitter" ),
TrackerRecHitBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
Smoother = cms.string( "hltESPKFTrajectorySmootherForMuonTrackLoader" ),
MuonRecHitBuilder = cms.string( "hltESPMuonTransientTrackingRecHitBuilder" ),
RefitDirection = cms.string( "insideOut" ),
RefitRPCHits = cms.bool( True ),
Propagator = cms.string( "hltESPSmartPropagatorAny" )
),
GlobalMuonTrackMatcher = cms.PSet(
Pt_threshold1 = cms.double( 0.0 ),
DeltaDCut_3 = cms.double( 15.0 ),
MinP = cms.double( 2.5 ),
MinPt = cms.double( 1.0 ),
Chi2Cut_1 = cms.double( 50.0 ),
Pt_threshold2 = cms.double( 9.99999999E8 ),
LocChi2Cut = cms.double( 0.0010 ),
Eta_threshold = cms.double( 1.2 ),
Quality_3 = cms.double( 7.0 ),
Quality_2 = cms.double( 15.0 ),
Chi2Cut_2 = cms.double( 50.0 ),
Chi2Cut_3 = cms.double( 200.0 ),
DeltaDCut_1 = cms.double( 40.0 ),
DeltaRCut_2 = cms.double( 0.2 ),
DeltaRCut_3 = cms.double( 1.0 ),
DeltaDCut_2 = cms.double( 10.0 ),
DeltaRCut_1 = cms.double( 0.1 ),
Propagator = cms.string( "hltESPSmartPropagator" ),
Quality_1 = cms.double( 20.0 )
),
PtCut = cms.double( 1.0 ),
TrackerPropagator = cms.string( "SteppingHelixPropagatorAny" ),
tkTrajLabel = cms.InputTag( "hltL3TkTracksFromL2IOHit" )
),
TrackLoaderParameters = cms.PSet(
PutTkTrackIntoEvent = cms.untracked.bool( False ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
SmoothTkTrack = cms.untracked.bool( False ),
MuonSeededTracksInstance = cms.untracked.string( "L2Seeded" ),
Smoother = cms.string( "hltESPKFTrajectorySmootherForMuonTrackLoader" ),
MuonUpdatorAtVertexParameters = cms.PSet(
MaxChi2 = cms.double( 1000000.0 ),
Propagator = cms.string( "hltESPSteppingHelixPropagatorOpposite" ),
BeamSpotPositionErrors = cms.vdouble( 0.1, 0.1, 5.3 )
),
VertexConstraint = cms.bool( False ),
DoSmoothing = cms.bool( True )
),
MuonCollectionLabel = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' )
)
process.hltL3TrajectorySeed = cms.EDProducer( "L3MuonTrajectorySeedCombiner",
labels = cms.VInputTag( 'hltL3TrajSeedIOHit','hltL3TrajSeedOIState','hltL3TrajSeedOIHit' )
)
process.hltL3TrackCandidateFromL2 = cms.EDProducer( "L3TrackCandCombiner",
labels = cms.VInputTag( 'hltL3TrackCandidateFromL2IOHit','hltL3TrackCandidateFromL2OIHit','hltL3TrackCandidateFromL2OIState' )
)
process.hltL3TkTracksFromL2 = cms.EDProducer( "L3TrackCombiner",
labels = cms.VInputTag( 'hltL3TkTracksFromL2IOHit','hltL3TkTracksFromL2OIHit','hltL3TkTracksFromL2OIState' )
)
process.hltL3MuonsLinksCombination = cms.EDProducer( "L3TrackLinksCombiner",
labels = cms.VInputTag( 'hltL3MuonsOIState','hltL3MuonsOIHit','hltL3MuonsIOHit' )
)
process.hltL3Muons = cms.EDProducer( "L3TrackCombiner",
labels = cms.VInputTag( 'hltL3MuonsOIState','hltL3MuonsOIHit','hltL3MuonsIOHit' )
)
process.hltL3MuonCandidates = cms.EDProducer( "L3MuonCandidateProducer",
InputLinksObjects = cms.InputTag( "hltL3MuonsLinksCombination" ),
InputObjects = cms.InputTag( "hltL3Muons" ),
MuonPtOption = cms.string( "Tracker" )
)
process.hltPixelTracks = cms.EDProducer( "PixelTrackProducer",
useFilterWithES = cms.bool( False ),
FilterPSet = cms.PSet(
chi2 = cms.double( 1000.0 ),
nSigmaTipMaxTolerance = cms.double( 0.0 ),
ComponentName = cms.string( "PixelTrackFilterByKinematics" ),
nSigmaInvPtTolerance = cms.double( 0.0 ),
ptMin = cms.double( 0.1 ),
tipMax = cms.double( 1.0 )
),
passLabel = cms.string( "Pixel triplet primary tracks with vertex constraint" ),
FitterPSet = cms.PSet(
ComponentName = cms.string( "PixelFitterByHelixProjections" ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
fixImpactParameter = cms.double( 0.0 )
),
RegionFactoryPSet = cms.PSet(
ComponentName = cms.string( "GlobalRegionProducerFromBeamSpot" ),
RegionPSet = cms.PSet(
precise = cms.bool( True ),
originRadius = cms.double( 0.2 ),
ptMin = cms.double( 0.9 ),
originHalfLength = cms.double( 24.0 ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" )
)
),
CleanerPSet = cms.PSet( ComponentName = cms.string( "PixelTrackCleanerBySharedHits" ) ),
OrderedHitsFactoryPSet = cms.PSet(
ComponentName = cms.string( "StandardHitTripletGenerator" ),
GeneratorPSet = cms.PSet(
useBending = cms.bool( True ),
useFixedPreFiltering = cms.bool( False ),
maxElement = cms.uint32( 100000 ),
phiPreFiltering = cms.double( 0.3 ),
extraHitRPhitolerance = cms.double( 0.06 ),
useMultScattering = cms.bool( True ),
SeedComparitorPSet = cms.PSet( ComponentName = cms.string( "LowPtClusterShapeSeedComparitor" ) ),
extraHitRZtolerance = cms.double( 0.06 ),
ComponentName = cms.string( "PixelTripletHLTGenerator" )
),
SeedingLayers = cms.string( "hltESPPixelLayerTriplets" )
)
)
process.hltPixelVertices = cms.EDProducer( "PixelVertexProducer",
WtAverage = cms.bool( True ),
Method2 = cms.bool( True ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Verbosity = cms.int32( 0 ),
UseError = cms.bool( True ),
TrackCollection = cms.InputTag( "hltPixelTracks" ),
PtMin = cms.double( 1.0 ),
NTrkMin = cms.int32( 2 ),
ZOffset = cms.double( 5.0 ),
Finder = cms.string( "DivisiveVertexFinder" ),
ZSeparation = cms.double( 0.05 )
)
process.hltPFJetPixelSeedsFromPixelTracks = cms.EDProducer( "SeedGeneratorFromProtoTracksEDProducer",
useEventsWithNoVertex = cms.bool( True ),
originHalfLength = cms.double( 0.3 ),
useProtoTrackKinematics = cms.bool( False ),
InputVertexCollection = cms.InputTag( "hltPixelVertices" ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
InputCollection = cms.InputTag( "hltPixelTracks" ),
originRadius = cms.double( 0.1 )
)
process.hltPFJetCkfTrackCandidates = cms.EDProducer( "CkfTrackCandidateMaker",
src = cms.InputTag( "hltPFJetPixelSeedsFromPixelTracks" ),
maxSeedsBeforeCleaning = cms.uint32( 1000 ),
TransientInitialStateEstimatorParameters = cms.PSet(
propagatorAlongTISE = cms.string( "PropagatorWithMaterial" ),
numberMeasurementsForFit = cms.int32( 4 ),
propagatorOppositeTISE = cms.string( "PropagatorWithMaterialOpposite" )
),
TrajectoryCleaner = cms.string( "hltESPTrajectoryCleanerBySharedHits" ),
cleanTrajectoryAfterInOut = cms.bool( False ),
useHitsSplitting = cms.bool( False ),
RedundantSeedCleaner = cms.string( "CachingSeedCleanerBySharedInput" ),
doSeedingRegionRebuilding = cms.bool( False ),
maxNSeeds = cms.uint32( 100000 ),
NavigationSchool = cms.string( "SimpleNavigationSchool" ),
TrajectoryBuilder = cms.string( "hltESPTrajectoryBuilderIT" )
)
process.hltPFJetCtfWithMaterialTracks = cms.EDProducer( "TrackProducer",
src = cms.InputTag( "hltPFJetCkfTrackCandidates" ),
clusterRemovalInfo = cms.InputTag( "" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Fitter = cms.string( "hltESPFittingSmootherIT" ),
useHitsSplitting = cms.bool( False ),
MeasurementTracker = cms.string( "" ),
alias = cms.untracked.string( "ctfWithMaterialTracks" ),
NavigationSchool = cms.string( "" ),
TrajectoryInEvent = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
AlgorithmName = cms.string( "iter0" ),
Propagator = cms.string( "hltESPRungeKuttaTrackerPropagator" )
)
process.hltPFlowTrackSelectionHighPurity = cms.EDProducer( "AnalyticalTrackSelector",
max_d0 = cms.double( 100.0 ),
minNumber3DLayers = cms.uint32( 0 ),
applyAbsCutsIfNoPV = cms.bool( False ),
qualityBit = cms.string( "highPurity" ),
minNumberLayers = cms.uint32( 3 ),
chi2n_par = cms.double( 0.7 ),
useVtxError = cms.bool( False ),
nSigmaZ = cms.double( 3.0 ),
dz_par2 = cms.vdouble( 0.4, 4.0 ),
applyAdaptedPVCuts = cms.bool( True ),
min_eta = cms.double( -9999.0 ),
dz_par1 = cms.vdouble( 0.35, 4.0 ),
copyTrajectories = cms.untracked.bool( True ),
vtxNumber = cms.int32( -1 ),
max_d0NoPV = cms.double( 100.0 ),
keepAllTracks = cms.bool( False ),
maxNumberLostLayers = cms.uint32( 1 ),
beamspot = cms.InputTag( "hltOnlineBeamSpot" ),
max_relpterr = cms.double( 9999.0 ),
copyExtras = cms.untracked.bool( True ),
max_z0NoPV = cms.double( 100.0 ),
vertexCut = cms.string( "tracksSize>=3" ),
max_z0 = cms.double( 100.0 ),
useVertices = cms.bool( True ),
min_nhits = cms.uint32( 0 ),
src = cms.InputTag( "hltPFJetCtfWithMaterialTracks" ),
chi2n_no1Dmod_par = cms.double( 9999.0 ),
vertices = cms.InputTag( "hltPixelVertices" ),
max_eta = cms.double( 9999.0 ),
d0_par2 = cms.vdouble( 0.4, 4.0 ),
d0_par1 = cms.vdouble( 0.3, 4.0 ),
res_par = cms.vdouble( 0.0030, 0.0010 ),
minHitsToBypassChecks = cms.uint32( 20 )
)
process.hltTrackRefsForJetsIter0 = cms.EDProducer( "ChargedRefCandidateProducer",
src = cms.InputTag( "hltPFlowTrackSelectionHighPurity" ),
particleType = cms.string( "pi+" )
)
process.hltAntiKT5TrackJetsIter0 = cms.EDProducer( "FastjetJetProducer",
Active_Area_Repeats = cms.int32( 5 ),
doAreaFastjet = cms.bool( False ),
voronoiRfact = cms.double( 0.9 ),
maxBadHcalCells = cms.uint32( 9999999 ),
doAreaDiskApprox = cms.bool( False ),
maxRecoveredEcalCells = cms.uint32( 9999999 ),
jetType = cms.string( "TrackJet" ),
minSeed = cms.uint32( 14327 ),
Ghost_EtaMax = cms.double( 6.0 ),
doRhoFastjet = cms.bool( False ),
jetAlgorithm = cms.string( "AntiKt" ),
nSigmaPU = cms.double( 1.0 ),
GhostArea = cms.double( 0.01 ),
Rho_EtaMax = cms.double( 4.4 ),
maxBadEcalCells = cms.uint32( 9999999 ),
useDeterministicSeed = cms.bool( True ),
doPVCorrection = cms.bool( False ),
maxRecoveredHcalCells = cms.uint32( 9999999 ),
rParam = cms.double( 0.5 ),
maxProblematicHcalCells = cms.uint32( 9999999 ),
doOutputJets = cms.bool( True ),
src = cms.InputTag( "hltTrackRefsForJetsIter0" ),
inputEtMin = cms.double( 0.1 ),
puPtMin = cms.double( 0.0 ),
srcPVs = cms.InputTag( "hltPixelVertices" ),
jetPtMin = cms.double( 1.0 ),
radiusPU = cms.double( 0.5 ),
maxProblematicEcalCells = cms.uint32( 9999999 ),
doPUOffsetCorr = cms.bool( False ),
inputEMin = cms.double( 0.0 ),
subtractorName = cms.string( "" ),
MinVtxNdof = cms.int32( 0 ),
MaxVtxZ = cms.double( 30.0 ),
UseOnlyVertexTracks = cms.bool( False ),
UseOnlyOnePV = cms.bool( True ),
DzTrVtxMax = cms.double( 0.5 ),
sumRecHits = cms.bool( False ),
DxyTrVtxMax = cms.double( 0.2 )
)
process.hltTrackAndTauJetsIter0 = cms.EDProducer( "TauJetSelectorForHLTTrackSeeding",
fractionMinCaloInTauCone = cms.double( 0.7 ),
fractionMaxChargedPUInCaloCone = cms.double( 0.3 ),
tauConeSize = cms.double( 0.2 ),
ptTrkMaxInCaloCone = cms.double( 1.0 ),
isolationConeSize = cms.double( 0.5 ),
inputTrackJetTag = cms.InputTag( "hltAntiKT5TrackJetsIter0" ),
nTrkMaxInCaloCone = cms.int32( 0 ),
inputCaloJetTag = cms.InputTag( "hltAntiKT5CaloJetsPFEt5" ),
etaMinCaloJet = cms.double( -2.7 ),
etaMaxCaloJet = cms.double( 2.7 ),
ptMinCaloJet = cms.double( 5.0 ),
inputTrackTag = cms.InputTag( "hltPFlowTrackSelectionHighPurity" )
)
process.hltIter1ClustersRefRemoval = cms.EDProducer( "HLTTrackClusterRemover",
doStrip = cms.bool( True ),
trajectories = cms.InputTag( "hltPFlowTrackSelectionHighPurity" ),
oldClusterRemovalInfo = cms.InputTag( "" ),
stripClusters = cms.InputTag( "hltSiStripRawToClustersFacility" ),
pixelClusters = cms.InputTag( "hltSiPixelClusters" ),
Common = cms.PSet( maxChi2 = cms.double( 9.0 ) ),
doPixel = cms.bool( True )
)
process.hltIter1SiStripClusters = cms.EDProducer( "MeasurementTrackerSiStripRefGetterProducer",
InputModuleLabel = cms.InputTag( "hltSiStripRawToClustersFacility" ),
measurementTrackerName = cms.string( "hltIter1ESPMeasurementTracker" )
)
process.hltIter1PFJetPixelSeeds = cms.EDProducer( "SeedGeneratorFromRegionHitsEDProducer",
RegionFactoryPSet = cms.PSet(
ComponentName = cms.string( "TauRegionalPixelSeedGenerator" ),
RegionPSet = cms.PSet(
precise = cms.bool( True ),
deltaPhiRegion = cms.double( 1.0 ),
originHalfLength = cms.double( 0.1 ),
originRadius = cms.double( 0.05 ),
measurementTrackerName = cms.string( "hltIter1ESPMeasurementTracker" ),
deltaEtaRegion = cms.double( 1.0 ),
vertexSrc = cms.InputTag( "hltPixelVertices" ),
searchOpt = cms.bool( True ),
JetSrc = cms.InputTag( "hltTrackAndTauJetsIter0" ),
originZPos = cms.double( 0.0 ),
ptMin = cms.double( 0.5 )
)
),
SeedComparitorPSet = cms.PSet( ComponentName = cms.string( "none" ) ),
ClusterCheckPSet = cms.PSet(
PixelClusterCollectionLabel = cms.InputTag( "hltSiPixelClusters" ),
MaxNumberOfCosmicClusters = cms.uint32( 50000 ),
doClusterCheck = cms.bool( False ),
ClusterCollectionLabel = cms.InputTag( "hltSiStripClusters" ),
MaxNumberOfPixelClusters = cms.uint32( 10000 )
),
OrderedHitsFactoryPSet = cms.PSet(
maxElement = cms.uint32( 0 ),
ComponentName = cms.string( "StandardHitTripletGenerator" ),
GeneratorPSet = cms.PSet(
useBending = cms.bool( True ),
useFixedPreFiltering = cms.bool( False ),
maxElement = cms.uint32( 100000 ),
phiPreFiltering = cms.double( 0.3 ),
extraHitRPhitolerance = cms.double( 0.032 ),
useMultScattering = cms.bool( True ),
ComponentName = cms.string( "PixelTripletHLTGenerator" ),
extraHitRZtolerance = cms.double( 0.037 ),
SeedComparitorPSet = cms.PSet( ComponentName = cms.string( "none" ) )
),
SeedingLayers = cms.string( "hltIter1ESPPixelLayerTriplets" )
),
SeedCreatorPSet = cms.PSet(
ComponentName = cms.string( "SeedFromConsecutiveHitsTripletOnlyCreator" ),
propagator = cms.string( "PropagatorWithMaterial" )
),
TTRHBuilder = cms.string( "WithTrackAngle" )
)
process.hltIter1PFJetCkfTrackCandidates = cms.EDProducer( "CkfTrackCandidateMaker",
src = cms.InputTag( "hltIter1PFJetPixelSeeds" ),
maxSeedsBeforeCleaning = cms.uint32( 1000 ),
TransientInitialStateEstimatorParameters = cms.PSet(
propagatorAlongTISE = cms.string( "PropagatorWithMaterial" ),
numberMeasurementsForFit = cms.int32( 4 ),
propagatorOppositeTISE = cms.string( "PropagatorWithMaterialOpposite" )
),
TrajectoryCleaner = cms.string( "hltESPTrajectoryCleanerBySharedHits" ),
cleanTrajectoryAfterInOut = cms.bool( False ),
useHitsSplitting = cms.bool( False ),
RedundantSeedCleaner = cms.string( "CachingSeedCleanerBySharedInput" ),
doSeedingRegionRebuilding = cms.bool( False ),
maxNSeeds = cms.uint32( 100000 ),
NavigationSchool = cms.string( "SimpleNavigationSchool" ),
TrajectoryBuilder = cms.string( "hltIter1ESPTrajectoryBuilderIT" )
)
process.hltIter1PFJetCtfWithMaterialTracks = cms.EDProducer( "TrackProducer",
src = cms.InputTag( "hltIter1PFJetCkfTrackCandidates" ),
clusterRemovalInfo = cms.InputTag( "" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Fitter = cms.string( "hltESPFittingSmootherIT" ),
useHitsSplitting = cms.bool( False ),
MeasurementTracker = cms.string( "" ),
alias = cms.untracked.string( "ctfWithMaterialTracks" ),
NavigationSchool = cms.string( "" ),
TrajectoryInEvent = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
AlgorithmName = cms.string( "iter1" ),
Propagator = cms.string( "hltESPRungeKuttaTrackerPropagator" )
)
process.hltIter1PFlowTrackSelectionHighPurityLoose = cms.EDProducer( "AnalyticalTrackSelector",
max_d0 = cms.double( 100.0 ),
minNumber3DLayers = cms.uint32( 0 ),
applyAbsCutsIfNoPV = cms.bool( False ),
qualityBit = cms.string( "highPurity" ),
minNumberLayers = cms.uint32( 3 ),
chi2n_par = cms.double( 0.7 ),
useVtxError = cms.bool( False ),
nSigmaZ = cms.double( 3.0 ),
dz_par2 = cms.vdouble( 0.9, 3.0 ),
applyAdaptedPVCuts = cms.bool( True ),
min_eta = cms.double( -9999.0 ),
dz_par1 = cms.vdouble( 0.8, 3.0 ),
copyTrajectories = cms.untracked.bool( True ),
vtxNumber = cms.int32( -1 ),
max_d0NoPV = cms.double( 100.0 ),
keepAllTracks = cms.bool( False ),
maxNumberLostLayers = cms.uint32( 1 ),
beamspot = cms.InputTag( "hltOnlineBeamSpot" ),
max_relpterr = cms.double( 9999.0 ),
copyExtras = cms.untracked.bool( True ),
max_z0NoPV = cms.double( 100.0 ),
vertexCut = cms.string( "tracksSize>=3" ),
max_z0 = cms.double( 100.0 ),
useVertices = cms.bool( True ),
min_nhits = cms.uint32( 0 ),
src = cms.InputTag( "hltIter1PFJetCtfWithMaterialTracks" ),
chi2n_no1Dmod_par = cms.double( 9999.0 ),
vertices = cms.InputTag( "hltPixelVertices" ),
max_eta = cms.double( 9999.0 ),
d0_par2 = cms.vdouble( 0.9, 3.0 ),
d0_par1 = cms.vdouble( 0.85, 3.0 ),
res_par = cms.vdouble( 0.0030, 0.0010 ),
minHitsToBypassChecks = cms.uint32( 20 )
)
process.hltIter1PFlowTrackSelectionHighPurityTight = cms.EDProducer( "AnalyticalTrackSelector",
max_d0 = cms.double( 100.0 ),
minNumber3DLayers = cms.uint32( 0 ),
applyAbsCutsIfNoPV = cms.bool( False ),
qualityBit = cms.string( "highPurity" ),
minNumberLayers = cms.uint32( 5 ),
chi2n_par = cms.double( 0.4 ),
useVtxError = cms.bool( False ),
nSigmaZ = cms.double( 3.0 ),
dz_par2 = cms.vdouble( 1.0, 4.0 ),
applyAdaptedPVCuts = cms.bool( True ),
min_eta = cms.double( -9999.0 ),
dz_par1 = cms.vdouble( 1.0, 4.0 ),
copyTrajectories = cms.untracked.bool( True ),
vtxNumber = cms.int32( -1 ),
max_d0NoPV = cms.double( 100.0 ),
keepAllTracks = cms.bool( False ),
maxNumberLostLayers = cms.uint32( 1 ),
beamspot = cms.InputTag( "hltOnlineBeamSpot" ),
max_relpterr = cms.double( 9999.0 ),
copyExtras = cms.untracked.bool( True ),
max_z0NoPV = cms.double( 100.0 ),
vertexCut = cms.string( "tracksSize>=3" ),
max_z0 = cms.double( 100.0 ),
useVertices = cms.bool( True ),
min_nhits = cms.uint32( 0 ),
src = cms.InputTag( "hltIter1PFJetCtfWithMaterialTracks" ),
chi2n_no1Dmod_par = cms.double( 9999.0 ),
vertices = cms.InputTag( "hltPixelVertices" ),
max_eta = cms.double( 9999.0 ),
d0_par2 = cms.vdouble( 1.0, 4.0 ),
d0_par1 = cms.vdouble( 1.0, 4.0 ),
res_par = cms.vdouble( 0.0030, 0.0010 ),
minHitsToBypassChecks = cms.uint32( 20 )
)
process.hltIter1PFlowTrackSelectionHighPurity = cms.EDProducer( "SimpleTrackListMerger",
ShareFrac = cms.double( 0.19 ),
promoteTrackQuality = cms.bool( True ),
MinPT = cms.double( 0.05 ),
copyExtras = cms.untracked.bool( True ),
Epsilon = cms.double( -0.0010 ),
allowFirstHitShare = cms.bool( True ),
newQuality = cms.string( "confirmed" ),
MaxNormalizedChisq = cms.double( 1000.0 ),
TrackProducer1 = cms.string( "hltIter1PFlowTrackSelectionHighPurityLoose" ),
MinFound = cms.int32( 3 ),
TrackProducer2 = cms.string( "hltIter1PFlowTrackSelectionHighPurityTight" ),
LostHitPenalty = cms.double( 20.0 ),
FoundHitBonus = cms.double( 5.0 )
)
process.hltIter1Merged = cms.EDProducer( "SimpleTrackListMerger",
ShareFrac = cms.double( 0.19 ),
promoteTrackQuality = cms.bool( True ),
MinPT = cms.double( 0.05 ),
copyExtras = cms.untracked.bool( True ),
Epsilon = cms.double( -0.0010 ),
allowFirstHitShare = cms.bool( True ),
newQuality = cms.string( "confirmed" ),
MaxNormalizedChisq = cms.double( 1000.0 ),
TrackProducer1 = cms.string( "hltPFlowTrackSelectionHighPurity" ),
MinFound = cms.int32( 3 ),
TrackProducer2 = cms.string( "hltIter1PFlowTrackSelectionHighPurity" ),
LostHitPenalty = cms.double( 20.0 ),
FoundHitBonus = cms.double( 5.0 )
)
process.hltTrackRefsForJetsIter1 = cms.EDProducer( "ChargedRefCandidateProducer",
src = cms.InputTag( "hltIter1Merged" ),
particleType = cms.string( "pi+" )
)
process.hltAntiKT5TrackJetsIter1 = cms.EDProducer( "FastjetJetProducer",
Active_Area_Repeats = cms.int32( 5 ),
doAreaFastjet = cms.bool( False ),
voronoiRfact = cms.double( 0.9 ),
maxBadHcalCells = cms.uint32( 9999999 ),
doAreaDiskApprox = cms.bool( False ),
maxRecoveredEcalCells = cms.uint32( 9999999 ),
jetType = cms.string( "TrackJet" ),
minSeed = cms.uint32( 14327 ),
Ghost_EtaMax = cms.double( 6.0 ),
doRhoFastjet = cms.bool( False ),
jetAlgorithm = cms.string( "AntiKt" ),
nSigmaPU = cms.double( 1.0 ),
GhostArea = cms.double( 0.01 ),
Rho_EtaMax = cms.double( 4.4 ),
maxBadEcalCells = cms.uint32( 9999999 ),
useDeterministicSeed = cms.bool( True ),
doPVCorrection = cms.bool( False ),
maxRecoveredHcalCells = cms.uint32( 9999999 ),
rParam = cms.double( 0.5 ),
maxProblematicHcalCells = cms.uint32( 9999999 ),
doOutputJets = cms.bool( True ),
src = cms.InputTag( "hltTrackRefsForJetsIter1" ),
inputEtMin = cms.double( 0.1 ),
puPtMin = cms.double( 0.0 ),
srcPVs = cms.InputTag( "hltPixelVertices" ),
jetPtMin = cms.double( 1.4 ),
radiusPU = cms.double( 0.5 ),
maxProblematicEcalCells = cms.uint32( 9999999 ),
doPUOffsetCorr = cms.bool( False ),
inputEMin = cms.double( 0.0 ),
subtractorName = cms.string( "" ),
MinVtxNdof = cms.int32( 0 ),
MaxVtxZ = cms.double( 30.0 ),
UseOnlyVertexTracks = cms.bool( False ),
UseOnlyOnePV = cms.bool( True ),
DzTrVtxMax = cms.double( 0.5 ),
sumRecHits = cms.bool( False ),
DxyTrVtxMax = cms.double( 0.2 )
)
process.hltTrackAndTauJetsIter1 = cms.EDProducer( "TauJetSelectorForHLTTrackSeeding",
fractionMinCaloInTauCone = cms.double( 0.7 ),
fractionMaxChargedPUInCaloCone = cms.double( 0.3 ),
tauConeSize = cms.double( 0.2 ),
ptTrkMaxInCaloCone = cms.double( 1.4 ),
isolationConeSize = cms.double( 0.5 ),
inputTrackJetTag = cms.InputTag( "hltAntiKT5TrackJetsIter1" ),
nTrkMaxInCaloCone = cms.int32( 0 ),
inputCaloJetTag = cms.InputTag( "hltAntiKT5CaloJetsPFEt5" ),
etaMinCaloJet = cms.double( -2.7 ),
etaMaxCaloJet = cms.double( 2.7 ),
ptMinCaloJet = cms.double( 5.0 ),
inputTrackTag = cms.InputTag( "hltIter1Merged" )
)
process.hltIter2ClustersRefRemoval = cms.EDProducer( "HLTTrackClusterRemover",
doStrip = cms.bool( True ),
trajectories = cms.InputTag( "hltIter1PFlowTrackSelectionHighPurity" ),
oldClusterRemovalInfo = cms.InputTag( "hltIter1ClustersRefRemoval" ),
stripClusters = cms.InputTag( "hltSiStripRawToClustersFacility" ),
pixelClusters = cms.InputTag( "hltSiPixelClusters" ),
Common = cms.PSet( maxChi2 = cms.double( 16.0 ) ),
doPixel = cms.bool( True )
)
process.hltIter2SiStripClusters = cms.EDProducer( "MeasurementTrackerSiStripRefGetterProducer",
InputModuleLabel = cms.InputTag( "hltSiStripRawToClustersFacility" ),
measurementTrackerName = cms.string( "hltIter2ESPMeasurementTracker" )
)
process.hltIter2PFJetPixelSeeds = cms.EDProducer( "SeedGeneratorFromRegionHitsEDProducer",
RegionFactoryPSet = cms.PSet(
ComponentName = cms.string( "TauRegionalPixelSeedGenerator" ),
RegionPSet = cms.PSet(
precise = cms.bool( True ),
deltaPhiRegion = cms.double( 0.8 ),
originHalfLength = cms.double( 0.05 ),
originRadius = cms.double( 0.025 ),
measurementTrackerName = cms.string( "hltIter2ESPMeasurementTracker" ),
deltaEtaRegion = cms.double( 0.8 ),
vertexSrc = cms.InputTag( "hltPixelVertices" ),
searchOpt = cms.bool( True ),
JetSrc = cms.InputTag( "hltTrackAndTauJetsIter1" ),
originZPos = cms.double( 0.0 ),
ptMin = cms.double( 1.2 )
)
),
SeedComparitorPSet = cms.PSet( ComponentName = cms.string( "none" ) ),
ClusterCheckPSet = cms.PSet(
PixelClusterCollectionLabel = cms.InputTag( "hltSiPixelClusters" ),
MaxNumberOfCosmicClusters = cms.uint32( 50000 ),
doClusterCheck = cms.bool( False ),
ClusterCollectionLabel = cms.InputTag( "hltSiStripClusters" ),
MaxNumberOfPixelClusters = cms.uint32( 10000 )
),
OrderedHitsFactoryPSet = cms.PSet(
maxElement = cms.uint32( 0 ),
ComponentName = cms.string( "StandardHitPairGenerator" ),
GeneratorPSet = cms.PSet(
maxElement = cms.uint32( 100000 ),
SeedComparitorPSet = cms.PSet( ComponentName = cms.string( "none" ) )
),
SeedingLayers = cms.string( "hltIter2ESPPixelLayerPairs" )
),
SeedCreatorPSet = cms.PSet(
ComponentName = cms.string( "SeedFromConsecutiveHitsCreator" ),
propagator = cms.string( "PropagatorWithMaterial" )
),
TTRHBuilder = cms.string( "WithTrackAngle" )
)
process.hltIter2PFJetCkfTrackCandidates = cms.EDProducer( "CkfTrackCandidateMaker",
src = cms.InputTag( "hltIter2PFJetPixelSeeds" ),
maxSeedsBeforeCleaning = cms.uint32( 1000 ),
TransientInitialStateEstimatorParameters = cms.PSet(
propagatorAlongTISE = cms.string( "PropagatorWithMaterial" ),
numberMeasurementsForFit = cms.int32( 4 ),
propagatorOppositeTISE = cms.string( "PropagatorWithMaterialOpposite" )
),
TrajectoryCleaner = cms.string( "hltESPTrajectoryCleanerBySharedHits" ),
cleanTrajectoryAfterInOut = cms.bool( False ),
useHitsSplitting = cms.bool( False ),
RedundantSeedCleaner = cms.string( "CachingSeedCleanerBySharedInput" ),
doSeedingRegionRebuilding = cms.bool( False ),
maxNSeeds = cms.uint32( 100000 ),
NavigationSchool = cms.string( "SimpleNavigationSchool" ),
TrajectoryBuilder = cms.string( "hltIter2ESPTrajectoryBuilderIT" )
)
process.hltIter2PFJetCtfWithMaterialTracks = cms.EDProducer( "TrackProducer",
src = cms.InputTag( "hltIter2PFJetCkfTrackCandidates" ),
clusterRemovalInfo = cms.InputTag( "" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Fitter = cms.string( "hltESPFittingSmootherIT" ),
useHitsSplitting = cms.bool( False ),
MeasurementTracker = cms.string( "" ),
alias = cms.untracked.string( "ctfWithMaterialTracks" ),
NavigationSchool = cms.string( "" ),
TrajectoryInEvent = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
AlgorithmName = cms.string( "iter2" ),
Propagator = cms.string( "hltESPRungeKuttaTrackerPropagator" )
)
process.hltIter2PFlowTrackSelectionHighPurity = cms.EDProducer( "AnalyticalTrackSelector",
max_d0 = cms.double( 100.0 ),
minNumber3DLayers = cms.uint32( 0 ),
applyAbsCutsIfNoPV = cms.bool( False ),
qualityBit = cms.string( "highPurity" ),
minNumberLayers = cms.uint32( 3 ),
chi2n_par = cms.double( 0.7 ),
useVtxError = cms.bool( False ),
nSigmaZ = cms.double( 3.0 ),
dz_par2 = cms.vdouble( 0.4, 4.0 ),
applyAdaptedPVCuts = cms.bool( True ),
min_eta = cms.double( -9999.0 ),
dz_par1 = cms.vdouble( 0.35, 4.0 ),
copyTrajectories = cms.untracked.bool( True ),
vtxNumber = cms.int32( -1 ),
max_d0NoPV = cms.double( 100.0 ),
keepAllTracks = cms.bool( False ),
maxNumberLostLayers = cms.uint32( 1 ),
beamspot = cms.InputTag( "hltOnlineBeamSpot" ),
max_relpterr = cms.double( 9999.0 ),
copyExtras = cms.untracked.bool( True ),
max_z0NoPV = cms.double( 100.0 ),
vertexCut = cms.string( "tracksSize>=3" ),
max_z0 = cms.double( 100.0 ),
useVertices = cms.bool( True ),
min_nhits = cms.uint32( 0 ),
src = cms.InputTag( "hltIter2PFJetCtfWithMaterialTracks" ),
chi2n_no1Dmod_par = cms.double( 9999.0 ),
vertices = cms.InputTag( "hltPixelVertices" ),
max_eta = cms.double( 9999.0 ),
d0_par2 = cms.vdouble( 0.4, 4.0 ),
d0_par1 = cms.vdouble( 0.3, 4.0 ),
res_par = cms.vdouble( 0.0030, 0.0010 ),
minHitsToBypassChecks = cms.uint32( 20 )
)
process.hltIter2Merged = cms.EDProducer( "SimpleTrackListMerger",
ShareFrac = cms.double( 0.19 ),
promoteTrackQuality = cms.bool( True ),
MinPT = cms.double( 0.05 ),
copyExtras = cms.untracked.bool( True ),
Epsilon = cms.double( -0.0010 ),
allowFirstHitShare = cms.bool( True ),
newQuality = cms.string( "confirmed" ),
MaxNormalizedChisq = cms.double( 1000.0 ),
TrackProducer1 = cms.string( "hltIter1Merged" ),
MinFound = cms.int32( 3 ),
TrackProducer2 = cms.string( "hltIter2PFlowTrackSelectionHighPurity" ),
LostHitPenalty = cms.double( 20.0 ),
FoundHitBonus = cms.double( 5.0 )
)
process.hltTrackRefsForJetsIter2 = cms.EDProducer( "ChargedRefCandidateProducer",
src = cms.InputTag( "hltIter2Merged" ),
particleType = cms.string( "pi+" )
)
process.hltAntiKT5TrackJetsIter2 = cms.EDProducer( "FastjetJetProducer",
Active_Area_Repeats = cms.int32( 5 ),
doAreaFastjet = cms.bool( False ),
voronoiRfact = cms.double( 0.9 ),
maxBadHcalCells = cms.uint32( 9999999 ),
doAreaDiskApprox = cms.bool( False ),
maxRecoveredEcalCells = cms.uint32( 9999999 ),
jetType = cms.string( "TrackJet" ),
minSeed = cms.uint32( 14327 ),
Ghost_EtaMax = cms.double( 6.0 ),
doRhoFastjet = cms.bool( False ),
jetAlgorithm = cms.string( "AntiKt" ),
nSigmaPU = cms.double( 1.0 ),
GhostArea = cms.double( 0.01 ),
Rho_EtaMax = cms.double( 4.4 ),
maxBadEcalCells = cms.uint32( 9999999 ),
useDeterministicSeed = cms.bool( True ),
doPVCorrection = cms.bool( False ),
maxRecoveredHcalCells = cms.uint32( 9999999 ),
rParam = cms.double( 0.5 ),
maxProblematicHcalCells = cms.uint32( 9999999 ),
doOutputJets = cms.bool( True ),
src = cms.InputTag( "hltTrackRefsForJetsIter2" ),
inputEtMin = cms.double( 0.1 ),
puPtMin = cms.double( 0.0 ),
srcPVs = cms.InputTag( "hltPixelVertices" ),
jetPtMin = cms.double( 3.0 ),
radiusPU = cms.double( 0.5 ),
maxProblematicEcalCells = cms.uint32( 9999999 ),
doPUOffsetCorr = cms.bool( False ),
inputEMin = cms.double( 0.0 ),
subtractorName = cms.string( "" ),
MinVtxNdof = cms.int32( 0 ),
MaxVtxZ = cms.double( 30.0 ),
UseOnlyVertexTracks = cms.bool( False ),
UseOnlyOnePV = cms.bool( True ),
DzTrVtxMax = cms.double( 0.5 ),
sumRecHits = cms.bool( False ),
DxyTrVtxMax = cms.double( 0.2 )
)
process.hltTrackAndTauJetsIter2 = cms.EDProducer( "TauJetSelectorForHLTTrackSeeding",
fractionMinCaloInTauCone = cms.double( 0.7 ),
fractionMaxChargedPUInCaloCone = cms.double( 0.3 ),
tauConeSize = cms.double( 0.2 ),
ptTrkMaxInCaloCone = cms.double( 3.0 ),
isolationConeSize = cms.double( 0.5 ),
inputTrackJetTag = cms.InputTag( "hltAntiKT5TrackJetsIter2" ),
nTrkMaxInCaloCone = cms.int32( 0 ),
inputCaloJetTag = cms.InputTag( "hltAntiKT5CaloJetsPFEt5" ),
etaMinCaloJet = cms.double( -2.7 ),
etaMaxCaloJet = cms.double( 2.7 ),
ptMinCaloJet = cms.double( 5.0 ),
inputTrackTag = cms.InputTag( "hltIter2Merged" )
)
process.hltIter3ClustersRefRemoval = cms.EDProducer( "HLTTrackClusterRemover",
doStrip = cms.bool( True ),
trajectories = cms.InputTag( "hltIter2PFlowTrackSelectionHighPurity" ),
oldClusterRemovalInfo = cms.InputTag( "hltIter2ClustersRefRemoval" ),
stripClusters = cms.InputTag( "hltSiStripRawToClustersFacility" ),
pixelClusters = cms.InputTag( "hltSiPixelClusters" ),
Common = cms.PSet( maxChi2 = cms.double( 16.0 ) ),
doPixel = cms.bool( True )
)
process.hltIter3SiStripClusters = cms.EDProducer( "MeasurementTrackerSiStripRefGetterProducer",
InputModuleLabel = cms.InputTag( "hltSiStripRawToClustersFacility" ),
measurementTrackerName = cms.string( "hltIter3ESPMeasurementTracker" )
)
process.hltIter3PFJetMixedSeeds = cms.EDProducer( "SeedGeneratorFromRegionHitsEDProducer",
RegionFactoryPSet = cms.PSet(
ComponentName = cms.string( "TauRegionalPixelSeedGenerator" ),
RegionPSet = cms.PSet(
precise = cms.bool( True ),
deltaPhiRegion = cms.double( 0.5 ),
originHalfLength = cms.double( 0.05 ),
originRadius = cms.double( 0.05 ),
measurementTrackerName = cms.string( "hltIter3ESPMeasurementTracker" ),
deltaEtaRegion = cms.double( 0.5 ),
vertexSrc = cms.InputTag( "hltPixelVertices" ),
searchOpt = cms.bool( True ),
JetSrc = cms.InputTag( "hltTrackAndTauJetsIter2" ),
originZPos = cms.double( 0.0 ),
ptMin = cms.double( 0.8 )
)
),
SeedComparitorPSet = cms.PSet( ComponentName = cms.string( "none" ) ),
ClusterCheckPSet = cms.PSet(
PixelClusterCollectionLabel = cms.InputTag( "hltSiPixelClusters" ),
MaxNumberOfCosmicClusters = cms.uint32( 50000 ),
doClusterCheck = cms.bool( False ),
ClusterCollectionLabel = cms.InputTag( "hltSiStripClusters" ),
MaxNumberOfPixelClusters = cms.uint32( 10000 )
),
OrderedHitsFactoryPSet = cms.PSet(
maxElement = cms.uint32( 0 ),
ComponentName = cms.string( "StandardHitTripletGenerator" ),
GeneratorPSet = cms.PSet(
useBending = cms.bool( True ),
useFixedPreFiltering = cms.bool( False ),
maxElement = cms.uint32( 100000 ),
phiPreFiltering = cms.double( 0.3 ),
extraHitRPhitolerance = cms.double( 0.032 ),
useMultScattering = cms.bool( True ),
ComponentName = cms.string( "PixelTripletHLTGenerator" ),
extraHitRZtolerance = cms.double( 0.037 ),
SeedComparitorPSet = cms.PSet( ComponentName = cms.string( "none" ) )
),
SeedingLayers = cms.string( "hltIter3ESPLayerTriplets" )
),
SeedCreatorPSet = cms.PSet(
ComponentName = cms.string( "SeedFromConsecutiveHitsTripletOnlyCreator" ),
propagator = cms.string( "PropagatorWithMaterial" )
),
TTRHBuilder = cms.string( "WithTrackAngle" )
)
process.hltIter3PFJetCkfTrackCandidates = cms.EDProducer( "CkfTrackCandidateMaker",
src = cms.InputTag( "hltIter3PFJetMixedSeeds" ),
maxSeedsBeforeCleaning = cms.uint32( 1000 ),
TransientInitialStateEstimatorParameters = cms.PSet(
propagatorAlongTISE = cms.string( "PropagatorWithMaterial" ),
numberMeasurementsForFit = cms.int32( 4 ),
propagatorOppositeTISE = cms.string( "PropagatorWithMaterialOpposite" )
),
TrajectoryCleaner = cms.string( "hltESPTrajectoryCleanerBySharedHits" ),
cleanTrajectoryAfterInOut = cms.bool( False ),
useHitsSplitting = cms.bool( False ),
RedundantSeedCleaner = cms.string( "CachingSeedCleanerBySharedInput" ),
doSeedingRegionRebuilding = cms.bool( False ),
maxNSeeds = cms.uint32( 100000 ),
NavigationSchool = cms.string( "SimpleNavigationSchool" ),
TrajectoryBuilder = cms.string( "hltIter3ESPTrajectoryBuilderIT" )
)
process.hltIter3PFJetCtfWithMaterialTracks = cms.EDProducer( "TrackProducer",
src = cms.InputTag( "hltIter3PFJetCkfTrackCandidates" ),
clusterRemovalInfo = cms.InputTag( "" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Fitter = cms.string( "hltESPFittingSmootherIT" ),
useHitsSplitting = cms.bool( False ),
MeasurementTracker = cms.string( "" ),
alias = cms.untracked.string( "ctfWithMaterialTracks" ),
NavigationSchool = cms.string( "" ),
TrajectoryInEvent = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
AlgorithmName = cms.string( "iter3" ),
Propagator = cms.string( "hltESPRungeKuttaTrackerPropagator" )
)
process.hltIter3PFlowTrackSelectionHighPurityLoose = cms.EDProducer( "AnalyticalTrackSelector",
max_d0 = cms.double( 100.0 ),
minNumber3DLayers = cms.uint32( 0 ),
applyAbsCutsIfNoPV = cms.bool( False ),
qualityBit = cms.string( "highPurity" ),
minNumberLayers = cms.uint32( 3 ),
chi2n_par = cms.double( 0.7 ),
useVtxError = cms.bool( False ),
nSigmaZ = cms.double( 3.0 ),
dz_par2 = cms.vdouble( 0.9, 3.0 ),
applyAdaptedPVCuts = cms.bool( True ),
min_eta = cms.double( -9999.0 ),
dz_par1 = cms.vdouble( 0.85, 3.0 ),
copyTrajectories = cms.untracked.bool( True ),
vtxNumber = cms.int32( -1 ),
max_d0NoPV = cms.double( 100.0 ),
keepAllTracks = cms.bool( False ),
maxNumberLostLayers = cms.uint32( 1 ),
beamspot = cms.InputTag( "hltOnlineBeamSpot" ),
max_relpterr = cms.double( 9999.0 ),
copyExtras = cms.untracked.bool( True ),
max_z0NoPV = cms.double( 100.0 ),
vertexCut = cms.string( "tracksSize>=3" ),
max_z0 = cms.double( 100.0 ),
useVertices = cms.bool( True ),
min_nhits = cms.uint32( 0 ),
src = cms.InputTag( "hltIter3PFJetCtfWithMaterialTracks" ),
chi2n_no1Dmod_par = cms.double( 9999.0 ),
vertices = cms.InputTag( "hltPixelVertices" ),
max_eta = cms.double( 9999.0 ),
d0_par2 = cms.vdouble( 0.9, 3.0 ),
d0_par1 = cms.vdouble( 0.85, 3.0 ),
res_par = cms.vdouble( 0.0030, 0.0010 ),
minHitsToBypassChecks = cms.uint32( 20 )
)
process.hltIter3PFlowTrackSelectionHighPurityTight = cms.EDProducer( "AnalyticalTrackSelector",
max_d0 = cms.double( 100.0 ),
minNumber3DLayers = cms.uint32( 0 ),
applyAbsCutsIfNoPV = cms.bool( False ),
qualityBit = cms.string( "highPurity" ),
minNumberLayers = cms.uint32( 5 ),
chi2n_par = cms.double( 0.4 ),
useVtxError = cms.bool( False ),
nSigmaZ = cms.double( 3.0 ),
dz_par2 = cms.vdouble( 1.0, 4.0 ),
applyAdaptedPVCuts = cms.bool( True ),
min_eta = cms.double( -9999.0 ),
dz_par1 = cms.vdouble( 1.0, 4.0 ),
copyTrajectories = cms.untracked.bool( True ),
vtxNumber = cms.int32( -1 ),
max_d0NoPV = cms.double( 100.0 ),
keepAllTracks = cms.bool( False ),
maxNumberLostLayers = cms.uint32( 1 ),
beamspot = cms.InputTag( "hltOnlineBeamSpot" ),
max_relpterr = cms.double( 9999.0 ),
copyExtras = cms.untracked.bool( True ),
max_z0NoPV = cms.double( 100.0 ),
vertexCut = cms.string( "tracksSize>=3" ),
max_z0 = cms.double( 100.0 ),
useVertices = cms.bool( True ),
min_nhits = cms.uint32( 0 ),
src = cms.InputTag( "hltIter3PFJetCtfWithMaterialTracks" ),
chi2n_no1Dmod_par = cms.double( 9999.0 ),
vertices = cms.InputTag( "hltPixelVertices" ),
max_eta = cms.double( 9999.0 ),
d0_par2 = cms.vdouble( 1.0, 4.0 ),
d0_par1 = cms.vdouble( 1.0, 4.0 ),
res_par = cms.vdouble( 0.0030, 0.0010 ),
minHitsToBypassChecks = cms.uint32( 20 )
)
process.hltIter3PFlowTrackSelectionHighPurity = cms.EDProducer( "SimpleTrackListMerger",
ShareFrac = cms.double( 0.19 ),
promoteTrackQuality = cms.bool( True ),
MinPT = cms.double( 0.05 ),
copyExtras = cms.untracked.bool( True ),
Epsilon = cms.double( -0.0010 ),
allowFirstHitShare = cms.bool( True ),
newQuality = cms.string( "confirmed" ),
MaxNormalizedChisq = cms.double( 1000.0 ),
TrackProducer1 = cms.string( "hltIter3PFlowTrackSelectionHighPurityLoose" ),
MinFound = cms.int32( 3 ),
TrackProducer2 = cms.string( "hltIter3PFlowTrackSelectionHighPurityTight" ),
LostHitPenalty = cms.double( 20.0 ),
FoundHitBonus = cms.double( 5.0 )
)
process.hltIter3Merged = cms.EDProducer( "SimpleTrackListMerger",
ShareFrac = cms.double( 0.19 ),
promoteTrackQuality = cms.bool( True ),
MinPT = cms.double( 0.05 ),
copyExtras = cms.untracked.bool( True ),
Epsilon = cms.double( -0.0010 ),
allowFirstHitShare = cms.bool( True ),
newQuality = cms.string( "confirmed" ),
MaxNormalizedChisq = cms.double( 1000.0 ),
TrackProducer1 = cms.string( "hltIter2Merged" ),
MinFound = cms.int32( 3 ),
TrackProducer2 = cms.string( "hltIter3PFlowTrackSelectionHighPurity" ),
LostHitPenalty = cms.double( 20.0 ),
FoundHitBonus = cms.double( 5.0 )
)
process.hltTrackRefsForJetsIter3 = cms.EDProducer( "ChargedRefCandidateProducer",
src = cms.InputTag( "hltIter3Merged" ),
particleType = cms.string( "pi+" )
)
process.hltAntiKT5TrackJetsIter3 = cms.EDProducer( "FastjetJetProducer",
Active_Area_Repeats = cms.int32( 5 ),
doAreaFastjet = cms.bool( False ),
voronoiRfact = cms.double( 0.9 ),
maxBadHcalCells = cms.uint32( 9999999 ),
doAreaDiskApprox = cms.bool( False ),
maxRecoveredEcalCells = cms.uint32( 9999999 ),
jetType = cms.string( "TrackJet" ),
minSeed = cms.uint32( 14327 ),
Ghost_EtaMax = cms.double( 6.0 ),
doRhoFastjet = cms.bool( False ),
jetAlgorithm = cms.string( "AntiKt" ),
nSigmaPU = cms.double( 1.0 ),
GhostArea = cms.double( 0.01 ),
Rho_EtaMax = cms.double( 4.4 ),
maxBadEcalCells = cms.uint32( 9999999 ),
useDeterministicSeed = cms.bool( True ),
doPVCorrection = cms.bool( False ),
maxRecoveredHcalCells = cms.uint32( 9999999 ),
rParam = cms.double( 0.5 ),
maxProblematicHcalCells = cms.uint32( 9999999 ),
doOutputJets = cms.bool( True ),
src = cms.InputTag( "hltTrackRefsForJetsIter3" ),
inputEtMin = cms.double( 0.1 ),
puPtMin = cms.double( 0.0 ),
srcPVs = cms.InputTag( "hltPixelVertices" ),
jetPtMin = cms.double( 4.0 ),
radiusPU = cms.double( 0.5 ),
maxProblematicEcalCells = cms.uint32( 9999999 ),
doPUOffsetCorr = cms.bool( False ),
inputEMin = cms.double( 0.0 ),
subtractorName = cms.string( "" ),
MinVtxNdof = cms.int32( 0 ),
MaxVtxZ = cms.double( 30.0 ),
UseOnlyVertexTracks = cms.bool( False ),
UseOnlyOnePV = cms.bool( True ),
DzTrVtxMax = cms.double( 0.5 ),
sumRecHits = cms.bool( False ),
DxyTrVtxMax = cms.double( 0.2 )
)
process.hltTrackAndTauJetsIter3 = cms.EDProducer( "TauJetSelectorForHLTTrackSeeding",
fractionMinCaloInTauCone = cms.double( 0.7 ),
fractionMaxChargedPUInCaloCone = cms.double( 0.3 ),
tauConeSize = cms.double( 0.2 ),
ptTrkMaxInCaloCone = cms.double( 4.0 ),
isolationConeSize = cms.double( 0.5 ),
inputTrackJetTag = cms.InputTag( "hltAntiKT5TrackJetsIter3" ),
nTrkMaxInCaloCone = cms.int32( 0 ),
inputCaloJetTag = cms.InputTag( "hltAntiKT5CaloJetsPFEt5" ),
etaMinCaloJet = cms.double( -2.0 ),
etaMaxCaloJet = cms.double( 2.0 ),
ptMinCaloJet = cms.double( 5.0 ),
inputTrackTag = cms.InputTag( "hltIter3Merged" )
)
process.hltIter4ClustersRefRemoval = cms.EDProducer( "HLTTrackClusterRemover",
doStrip = cms.bool( True ),
trajectories = cms.InputTag( "hltIter3PFlowTrackSelectionHighPurity" ),
oldClusterRemovalInfo = cms.InputTag( "hltIter3ClustersRefRemoval" ),
stripClusters = cms.InputTag( "hltSiStripRawToClustersFacility" ),
pixelClusters = cms.InputTag( "hltSiPixelClusters" ),
Common = cms.PSet( maxChi2 = cms.double( 16.0 ) ),
doPixel = cms.bool( True )
)
process.hltIter4SiStripClusters = cms.EDProducer( "MeasurementTrackerSiStripRefGetterProducer",
InputModuleLabel = cms.InputTag( "hltSiStripRawToClustersFacility" ),
measurementTrackerName = cms.string( "hltIter4ESPMeasurementTracker" )
)
process.hltIter4PFJetPixelLessSeeds = cms.EDProducer( "SeedGeneratorFromRegionHitsEDProducer",
RegionFactoryPSet = cms.PSet(
ComponentName = cms.string( "TauRegionalPixelSeedGenerator" ),
RegionPSet = cms.PSet(
precise = cms.bool( True ),
deltaPhiRegion = cms.double( 0.5 ),
originHalfLength = cms.double( 1.0 ),
originRadius = cms.double( 0.5 ),
measurementTrackerName = cms.string( "hltIter4ESPMeasurementTracker" ),
deltaEtaRegion = cms.double( 0.5 ),
vertexSrc = cms.InputTag( "hltPixelVertices" ),
searchOpt = cms.bool( True ),
JetSrc = cms.InputTag( "hltTrackAndTauJetsIter3" ),
originZPos = cms.double( 0.0 ),
ptMin = cms.double( 0.8 )
)
),
SeedComparitorPSet = cms.PSet( ComponentName = cms.string( "none" ) ),
ClusterCheckPSet = cms.PSet(
PixelClusterCollectionLabel = cms.InputTag( "hltSiPixelClusters" ),
MaxNumberOfCosmicClusters = cms.uint32( 50000 ),
doClusterCheck = cms.bool( False ),
ClusterCollectionLabel = cms.InputTag( "hltSiStripClusters" ),
MaxNumberOfPixelClusters = cms.uint32( 10000 )
),
OrderedHitsFactoryPSet = cms.PSet(
maxElement = cms.uint32( 0 ),
ComponentName = cms.string( "StandardHitPairGenerator" ),
GeneratorPSet = cms.PSet(
maxElement = cms.uint32( 100000 ),
SeedComparitorPSet = cms.PSet( ComponentName = cms.string( "none" ) )
),
SeedingLayers = cms.string( "hltIter4ESPPixelLayerPairs" )
),
SeedCreatorPSet = cms.PSet(
ComponentName = cms.string( "SeedFromConsecutiveHitsCreator" ),
propagator = cms.string( "PropagatorWithMaterial" )
),
TTRHBuilder = cms.string( "WithTrackAngle" )
)
process.hltIter4PFJetCkfTrackCandidates = cms.EDProducer( "CkfTrackCandidateMaker",
src = cms.InputTag( "hltIter4PFJetPixelLessSeeds" ),
maxSeedsBeforeCleaning = cms.uint32( 1000 ),
TransientInitialStateEstimatorParameters = cms.PSet(
propagatorAlongTISE = cms.string( "PropagatorWithMaterial" ),
numberMeasurementsForFit = cms.int32( 4 ),
propagatorOppositeTISE = cms.string( "PropagatorWithMaterialOpposite" )
),
TrajectoryCleaner = cms.string( "hltESPTrajectoryCleanerBySharedHits" ),
cleanTrajectoryAfterInOut = cms.bool( False ),
useHitsSplitting = cms.bool( False ),
RedundantSeedCleaner = cms.string( "CachingSeedCleanerBySharedInput" ),
doSeedingRegionRebuilding = cms.bool( False ),
maxNSeeds = cms.uint32( 100000 ),
NavigationSchool = cms.string( "SimpleNavigationSchool" ),
TrajectoryBuilder = cms.string( "hltIter4ESPTrajectoryBuilderIT" )
)
process.hltIter4PFJetCtfWithMaterialTracks = cms.EDProducer( "TrackProducer",
src = cms.InputTag( "hltIter4PFJetCkfTrackCandidates" ),
clusterRemovalInfo = cms.InputTag( "" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Fitter = cms.string( "hltESPFittingSmootherIT" ),
useHitsSplitting = cms.bool( False ),
MeasurementTracker = cms.string( "" ),
alias = cms.untracked.string( "ctfWithMaterialTracks" ),
NavigationSchool = cms.string( "" ),
TrajectoryInEvent = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
AlgorithmName = cms.string( "iter4" ),
Propagator = cms.string( "hltESPRungeKuttaTrackerPropagator" )
)
process.hltIter4PFlowTrackSelectionHighPurity = cms.EDProducer( "AnalyticalTrackSelector",
max_d0 = cms.double( 100.0 ),
minNumber3DLayers = cms.uint32( 0 ),
applyAbsCutsIfNoPV = cms.bool( False ),
qualityBit = cms.string( "highPurity" ),
minNumberLayers = cms.uint32( 5 ),
chi2n_par = cms.double( 0.25 ),
useVtxError = cms.bool( False ),
nSigmaZ = cms.double( 3.0 ),
dz_par2 = cms.vdouble( 1.0, 4.0 ),
applyAdaptedPVCuts = cms.bool( True ),
min_eta = cms.double( -9999.0 ),
dz_par1 = cms.vdouble( 1.0, 4.0 ),
copyTrajectories = cms.untracked.bool( True ),
vtxNumber = cms.int32( -1 ),
max_d0NoPV = cms.double( 100.0 ),
keepAllTracks = cms.bool( False ),
maxNumberLostLayers = cms.uint32( 0 ),
beamspot = cms.InputTag( "hltOnlineBeamSpot" ),
max_relpterr = cms.double( 9999.0 ),
copyExtras = cms.untracked.bool( True ),
max_z0NoPV = cms.double( 100.0 ),
vertexCut = cms.string( "tracksSize>=3" ),
max_z0 = cms.double( 100.0 ),
useVertices = cms.bool( True ),
min_nhits = cms.uint32( 0 ),
src = cms.InputTag( "hltIter4PFJetCtfWithMaterialTracks" ),
chi2n_no1Dmod_par = cms.double( 9999.0 ),
vertices = cms.InputTag( "hltPixelVertices" ),
max_eta = cms.double( 9999.0 ),
d0_par2 = cms.vdouble( 1.0, 4.0 ),
d0_par1 = cms.vdouble( 1.0, 4.0 ),
res_par = cms.vdouble( 0.0030, 0.0010 ),
minHitsToBypassChecks = cms.uint32( 20 )
)
process.hltIter4Merged = cms.EDProducer( "SimpleTrackListMerger",
ShareFrac = cms.double( 0.19 ),
promoteTrackQuality = cms.bool( True ),
MinPT = cms.double( 0.05 ),
copyExtras = cms.untracked.bool( True ),
Epsilon = cms.double( -0.0010 ),
allowFirstHitShare = cms.bool( True ),
newQuality = cms.string( "confirmed" ),
MaxNormalizedChisq = cms.double( 1000.0 ),
TrackProducer1 = cms.string( "hltIter3Merged" ),
MinFound = cms.int32( 3 ),
TrackProducer2 = cms.string( "hltIter4PFlowTrackSelectionHighPurity" ),
LostHitPenalty = cms.double( 20.0 ),
FoundHitBonus = cms.double( 5.0 )
)
process.hltPFMuonMerging = cms.EDProducer( "SimpleTrackListMerger",
ShareFrac = cms.double( 0.19 ),
promoteTrackQuality = cms.bool( True ),
MinPT = cms.double( 0.05 ),
copyExtras = cms.untracked.bool( True ),
Epsilon = cms.double( -0.0010 ),
allowFirstHitShare = cms.bool( True ),
newQuality = cms.string( "confirmed" ),
MaxNormalizedChisq = cms.double( 1000.0 ),
TrackProducer1 = cms.string( "hltL3TkTracksFromL2" ),
MinFound = cms.int32( 3 ),
TrackProducer2 = cms.string( "hltIter4Merged" ),
LostHitPenalty = cms.double( 20.0 ),
FoundHitBonus = cms.double( 5.0 )
)
process.hltMuonLinks = cms.EDProducer( "MuonLinksProducerForHLT",
pMin = cms.double( 2.5 ),
InclusiveTrackerTrackCollection = cms.InputTag( "hltPFMuonMerging" ),
shareHitFraction = cms.double( 0.8 ),
LinkCollection = cms.InputTag( "hltL3MuonsLinksCombination" ),
ptMin = cms.double( 2.5 )
)
process.hltMuons = cms.EDProducer( "MuonIdProducer",
TrackExtractorPSet = cms.PSet(
Diff_z = cms.double( 0.2 ),
inputTrackCollection = cms.InputTag( "hltPFMuonMerging" ),
BeamSpotLabel = cms.InputTag( "hltOnlineBeamSpot" ),
ComponentName = cms.string( "TrackExtractor" ),
DR_Max = cms.double( 1.0 ),
Diff_r = cms.double( 0.1 ),
Chi2Prob_Min = cms.double( -1.0 ),
DR_Veto = cms.double( 0.01 ),
NHits_Min = cms.uint32( 0 ),
Chi2Ndof_Max = cms.double( 1.0E64 ),
Pt_Min = cms.double( -1.0 ),
DepositLabel = cms.untracked.string( "" ),
BeamlineOption = cms.string( "BeamSpotFromEvent" )
),
maxAbsEta = cms.double( 3.0 ),
fillGlobalTrackRefits = cms.bool( False ),
arbitrationCleanerOptions = cms.PSet(
Clustering = cms.bool( True ),
ME1a = cms.bool( True ),
ClusterDPhi = cms.double( 0.6 ),
OverlapDTheta = cms.double( 0.02 ),
Overlap = cms.bool( True ),
OverlapDPhi = cms.double( 0.0786 ),
ClusterDTheta = cms.double( 0.02 )
),
globalTrackQualityInputTag = cms.InputTag( "glbTrackQual" ),
addExtraSoftMuons = cms.bool( False ),
debugWithTruthMatching = cms.bool( False ),
CaloExtractorPSet = cms.PSet(
PrintTimeReport = cms.untracked.bool( False ),
DR_Max = cms.double( 1.0 ),
DepositInstanceLabels = cms.vstring( 'ecal',
'hcal',
'ho' ),
Noise_HE = cms.double( 0.2 ),
NoiseTow_EB = cms.double( 0.04 ),
NoiseTow_EE = cms.double( 0.15 ),
Threshold_H = cms.double( 0.5 ),
ServiceParameters = cms.PSet(
Propagators = cms.untracked.vstring( 'hltESPFastSteppingHelixPropagatorAny' ),
RPCLayers = cms.bool( False ),
UseMuonNavigation = cms.untracked.bool( False )
),
Threshold_E = cms.double( 0.2 ),
PropagatorName = cms.string( "hltESPFastSteppingHelixPropagatorAny" ),
DepositLabel = cms.untracked.string( "Cal" ),
UseRecHitsFlag = cms.bool( False ),
TrackAssociatorParameters = cms.PSet(
muonMaxDistanceSigmaX = cms.double( 0.0 ),
muonMaxDistanceSigmaY = cms.double( 0.0 ),
CSCSegmentCollectionLabel = cms.InputTag( "hltCscSegments" ),
dRHcal = cms.double( 1.0 ),
dRPreshowerPreselection = cms.double( 0.2 ),
CaloTowerCollectionLabel = cms.InputTag( "hltTowerMakerForPF" ),
useEcal = cms.bool( False ),
dREcal = cms.double( 1.0 ),
dREcalPreselection = cms.double( 1.0 ),
HORecHitCollectionLabel = cms.InputTag( "hltHoreco" ),
dRMuon = cms.double( 9999.0 ),
propagateAllDirections = cms.bool( True ),
muonMaxDistanceX = cms.double( 5.0 ),
muonMaxDistanceY = cms.double( 5.0 ),
useHO = cms.bool( False ),
trajectoryUncertaintyTolerance = cms.double( -1.0 ),
usePreshower = cms.bool( False ),
DTRecSegment4DCollectionLabel = cms.InputTag( "hltDt4DSegments" ),
EERecHitCollectionLabel = cms.InputTag( 'hltEcalRecHitAll','EcalRecHitsEE' ),
dRHcalPreselection = cms.double( 1.0 ),
useMuon = cms.bool( False ),
useCalo = cms.bool( True ),
accountForTrajectoryChangeCalo = cms.bool( False ),
EBRecHitCollectionLabel = cms.InputTag( 'hltEcalRecHitAll','EcalRecHitsEB' ),
dRMuonPreselection = cms.double( 0.2 ),
truthMatch = cms.bool( False ),
HBHERecHitCollectionLabel = cms.InputTag( "hltHbhereco" ),
useHcal = cms.bool( False )
),
Threshold_HO = cms.double( 0.5 ),
Noise_EE = cms.double( 0.1 ),
Noise_EB = cms.double( 0.025 ),
DR_Veto_H = cms.double( 0.1 ),
CenterConeOnCalIntersection = cms.bool( False ),
ComponentName = cms.string( "CaloExtractorByAssociator" ),
Noise_HB = cms.double( 0.2 ),
DR_Veto_E = cms.double( 0.07 ),
DR_Veto_HO = cms.double( 0.1 ),
Noise_HO = cms.double( 0.2 )
),
runArbitrationCleaner = cms.bool( False ),
fillEnergy = cms.bool( True ),
TrackerKinkFinderParameters = cms.PSet(
usePosition = cms.bool( False ),
diagonalOnly = cms.bool( False )
),
TimingFillerParameters = cms.PSet(
UseDT = cms.bool( True ),
ErrorDT = cms.double( 6.0 ),
EcalEnergyCut = cms.double( 0.4 ),
ErrorEB = cms.double( 2.085 ),
ErrorCSC = cms.double( 7.4 ),
CSCTimingParameters = cms.PSet(
CSCsegments = cms.InputTag( "hltCscSegments" ),
CSCTimeOffset = cms.double( 0.0 ),
CSCStripTimeOffset = cms.double( 0.0 ),
MatchParameters = cms.PSet(
CSCsegments = cms.InputTag( "hltCscSegments" ),
DTsegments = cms.InputTag( "hltDt4DSegments" ),
DTradius = cms.double( 0.01 ),
TightMatchDT = cms.bool( False ),
TightMatchCSC = cms.bool( True )
),
debug = cms.bool( False ),
UseStripTime = cms.bool( True ),
CSCStripError = cms.double( 7.0 ),
CSCWireError = cms.double( 8.6 ),
CSCWireTimeOffset = cms.double( 0.0 ),
ServiceParameters = cms.PSet(
Propagators = cms.untracked.vstring( 'hltESPFastSteppingHelixPropagatorAny' ),
RPCLayers = cms.bool( True )
),
PruneCut = cms.double( 100.0 ),
UseWireTime = cms.bool( True )
),
DTTimingParameters = cms.PSet(
HitError = cms.double( 6.0 ),
DoWireCorr = cms.bool( False ),
MatchParameters = cms.PSet(
CSCsegments = cms.InputTag( "hltCscSegments" ),
DTsegments = cms.InputTag( "hltDt4DSegments" ),
DTradius = cms.double( 0.01 ),
TightMatchDT = cms.bool( False ),
TightMatchCSC = cms.bool( True )
),
debug = cms.bool( False ),
DTsegments = cms.InputTag( "hltDt4DSegments" ),
PruneCut = cms.double( 10000.0 ),
RequireBothProjections = cms.bool( False ),
HitsMin = cms.int32( 5 ),
DTTimeOffset = cms.double( 2.7 ),
DropTheta = cms.bool( True ),
UseSegmentT0 = cms.bool( False ),
ServiceParameters = cms.PSet(
Propagators = cms.untracked.vstring( 'hltESPFastSteppingHelixPropagatorAny' ),
RPCLayers = cms.bool( True )
)
),
ErrorEE = cms.double( 6.95 ),
UseCSC = cms.bool( True ),
UseECAL = cms.bool( True )
),
inputCollectionTypes = cms.vstring( 'inner tracks',
'links',
'outer tracks' ),
minCaloCompatibility = cms.double( 0.6 ),
ecalDepositName = cms.string( "ecal" ),
minP = cms.double( 10.0 ),
fillIsolation = cms.bool( True ),
jetDepositName = cms.string( "jets" ),
hoDepositName = cms.string( "ho" ),
writeIsoDeposits = cms.bool( False ),
maxAbsPullX = cms.double( 4.0 ),
maxAbsPullY = cms.double( 9999.0 ),
minPt = cms.double( 10.0 ),
TrackAssociatorParameters = cms.PSet(
muonMaxDistanceSigmaX = cms.double( 0.0 ),
muonMaxDistanceSigmaY = cms.double( 0.0 ),
CSCSegmentCollectionLabel = cms.InputTag( "hltCscSegments" ),
dRHcal = cms.double( 9999.0 ),
dRPreshowerPreselection = cms.double( 0.2 ),
CaloTowerCollectionLabel = cms.InputTag( "hltTowerMakerForPF" ),
useEcal = cms.bool( True ),
dREcal = cms.double( 9999.0 ),
dREcalPreselection = cms.double( 0.05 ),
HORecHitCollectionLabel = cms.InputTag( "hltHoreco" ),
dRMuon = cms.double( 9999.0 ),
propagateAllDirections = cms.bool( True ),
muonMaxDistanceX = cms.double( 5.0 ),
muonMaxDistanceY = cms.double( 5.0 ),
useHO = cms.bool( True ),
trajectoryUncertaintyTolerance = cms.double( -1.0 ),
usePreshower = cms.bool( False ),
DTRecSegment4DCollectionLabel = cms.InputTag( "hltDt4DSegments" ),
EERecHitCollectionLabel = cms.InputTag( 'hltEcalRecHitAll','EcalRecHitsEE' ),
dRHcalPreselection = cms.double( 0.2 ),
useMuon = cms.bool( True ),
useCalo = cms.bool( False ),
accountForTrajectoryChangeCalo = cms.bool( False ),
EBRecHitCollectionLabel = cms.InputTag( 'hltEcalRecHitAll','EcalRecHitsEB' ),
dRMuonPreselection = cms.double( 0.2 ),
truthMatch = cms.bool( False ),
HBHERecHitCollectionLabel = cms.InputTag( "hltHbhereco" ),
useHcal = cms.bool( True )
),
JetExtractorPSet = cms.PSet(
PrintTimeReport = cms.untracked.bool( False ),
ExcludeMuonVeto = cms.bool( True ),
TrackAssociatorParameters = cms.PSet(
muonMaxDistanceSigmaX = cms.double( 0.0 ),
muonMaxDistanceSigmaY = cms.double( 0.0 ),
CSCSegmentCollectionLabel = cms.InputTag( "hltCscSegments" ),
dRHcal = cms.double( 0.5 ),
dRPreshowerPreselection = cms.double( 0.2 ),
CaloTowerCollectionLabel = cms.InputTag( "hltTowerMakerForPF" ),
useEcal = cms.bool( False ),
dREcal = cms.double( 0.5 ),
dREcalPreselection = cms.double( 0.5 ),
HORecHitCollectionLabel = cms.InputTag( "hltHoreco" ),
dRMuon = cms.double( 9999.0 ),
propagateAllDirections = cms.bool( True ),
muonMaxDistanceX = cms.double( 5.0 ),
muonMaxDistanceY = cms.double( 5.0 ),
useHO = cms.bool( False ),
trajectoryUncertaintyTolerance = cms.double( -1.0 ),
usePreshower = cms.bool( False ),
DTRecSegment4DCollectionLabel = cms.InputTag( "hltDt4DSegments" ),
EERecHitCollectionLabel = cms.InputTag( 'hltEcalRecHitAll','EcalRecHitsEE' ),
dRHcalPreselection = cms.double( 0.5 ),
useMuon = cms.bool( False ),
useCalo = cms.bool( True ),
accountForTrajectoryChangeCalo = cms.bool( False ),
EBRecHitCollectionLabel = cms.InputTag( 'hltEcalRecHitAll','EcalRecHitsEB' ),
dRMuonPreselection = cms.double( 0.2 ),
truthMatch = cms.bool( False ),
HBHERecHitCollectionLabel = cms.InputTag( "hltHbhereco" ),
useHcal = cms.bool( False )
),
ServiceParameters = cms.PSet(
Propagators = cms.untracked.vstring( 'hltESPFastSteppingHelixPropagatorAny' ),
RPCLayers = cms.bool( False ),
UseMuonNavigation = cms.untracked.bool( False )
),
ComponentName = cms.string( "JetExtractor" ),
DR_Max = cms.double( 1.0 ),
PropagatorName = cms.string( "hltESPFastSteppingHelixPropagatorAny" ),
JetCollectionLabel = cms.InputTag( "hltAntiKT5CaloJetsPFEt5" ),
DR_Veto = cms.double( 0.1 ),
Threshold = cms.double( 5.0 )
),
fillGlobalTrackQuality = cms.bool( False ),
minPCaloMuon = cms.double( 1.0E9 ),
maxAbsDy = cms.double( 9999.0 ),
fillCaloCompatibility = cms.bool( True ),
fillMatching = cms.bool( True ),
MuonCaloCompatibility = cms.PSet(
allSiPMHO = cms.bool( False ),
PionTemplateFileName = cms.FileInPath( "RecoMuon/MuonIdentification/data/MuID_templates_pions_lowPt_3_1_norm.root" ),
MuonTemplateFileName = cms.FileInPath( "RecoMuon/MuonIdentification/data/MuID_templates_muons_lowPt_3_1_norm.root" ),
delta_eta = cms.double( 0.02 ),
delta_phi = cms.double( 0.02 )
),
fillTrackerKink = cms.bool( False ),
hcalDepositName = cms.string( "hcal" ),
sigmaThresholdToFillCandidateP4WithGlobalFit = cms.double( 2.0 ),
inputCollectionLabels = cms.VInputTag( 'hltPFMuonMerging','hltMuonLinks','hltL2Muons' ),
trackDepositName = cms.string( "tracker" ),
maxAbsDx = cms.double( 3.0 ),
ptThresholdToFillCandidateP4WithGlobalFit = cms.double( 200.0 ),
minNumberOfMatches = cms.int32( 1 )
)
process.hltESRawToRecHitFacility = cms.EDProducer( "EcalRawToRecHitFacility",
sourceTag = cms.InputTag( "rawDataCollector" ),
workerName = cms.string( "hltESPESUnpackerWorker" )
)
process.hltEcalRegionalESRestFEDs = cms.EDProducer( "EcalRawToRecHitRoI",
JetJobPSet = cms.VPSet(
),
sourceTag_es = cms.InputTag( "hltESRawToRecHitFacility" ),
doES = cms.bool( True ),
type = cms.string( "all" ),
sourceTag = cms.InputTag( "hltEcalRawToRecHitFacility" ),
EmJobPSet = cms.VPSet(
),
CandJobPSet = cms.VPSet(
),
MuonJobPSet = cms.PSet( ),
esInstance = cms.untracked.string( "es" ),
MuJobPSet = cms.PSet( )
)
process.hltESRecHitAll = cms.EDProducer( "EcalRawToRecHitProducer",
splitOutput = cms.bool( False ),
rechitCollection = cms.string( "EcalRecHitsES" ),
EErechitCollection = cms.string( "" ),
EBrechitCollection = cms.string( "" ),
sourceTag = cms.InputTag( 'hltEcalRegionalESRestFEDs','es' ),
cleaningConfig = cms.PSet( ),
lazyGetterTag = cms.InputTag( "hltESRawToRecHitFacility" )
)
process.hltParticleFlowRecHitECAL = cms.EDProducer( "PFRecHitProducerECAL",
crossBarrelEndcapBorder = cms.bool( False ),
verbose = cms.untracked.bool( False ),
ecalRecHitsEE = cms.InputTag( 'hltEcalRecHitAll','EcalRecHitsEE' ),
ecalRecHitsEB = cms.InputTag( 'hltEcalRecHitAll','EcalRecHitsEB' ),
thresh_Cleaning_EB = cms.double( 2.0 ),
timing_Cleaning = cms.bool( True ),
thresh_Barrel = cms.double( 0.08 ),
thresh_Cleaning_EE = cms.double( 1.0E9 ),
topological_Cleaning = cms.bool( True ),
thresh_Endcap = cms.double( 0.3 )
)
process.hltParticleFlowRecHitHCAL = cms.EDProducer( "PFRecHitProducerHCAL",
ECAL_Compensate = cms.bool( False ),
ECAL_Dead_Code = cms.uint32( 10 ),
MinLongTiming_Cut = cms.double( -5.0 ),
verbose = cms.untracked.bool( False ),
ECAL_Compensation = cms.double( 0.5 ),
MaxLongTiming_Cut = cms.double( 5.0 ),
weight_HFhad = cms.double( 1.0 ),
ApplyPulseDPG = cms.bool( True ),
ECAL_Threshold = cms.double( 10.0 ),
ApplyTimeDPG = cms.bool( False ),
caloTowers = cms.InputTag( "hltTowerMakerForPF" ),
hcalRecHitsHBHE = cms.InputTag( "hltHbhereco" ),
LongFibre_Fraction = cms.double( 0.1 ),
MaxShortTiming_Cut = cms.double( 5.0 ),
HcalMaxAllowedHFLongShortSev = cms.int32( 9 ),
thresh_Barrel = cms.double( 0.4 ),
navigation_HF = cms.bool( True ),
HcalMaxAllowedHFInTimeWindowSev = cms.int32( 9 ),
HF_Calib_29 = cms.double( 1.07 ),
LongFibre_Cut = cms.double( 120.0 ),
EM_Depth = cms.double( 22.0 ),
weight_HFem = cms.double( 1.0 ),
LongShortFibre_Cut = cms.double( 30.0 ),
MinShortTiming_Cut = cms.double( -5.0 ),
HCAL_Calib = cms.bool( True ),
thresh_HF = cms.double( 0.4 ),
HcalMaxAllowedHFDigiTimeSev = cms.int32( 9 ),
thresh_Endcap = cms.double( 0.4 ),
HcalMaxAllowedChannelStatusSev = cms.int32( 9 ),
hcalRecHitsHF = cms.InputTag( "hltHfreco" ),
ShortFibre_Cut = cms.double( 60.0 ),
ApplyLongShortDPG = cms.bool( True ),
HF_Calib = cms.bool( True ),
HAD_Depth = cms.double( 47.0 ),
ShortFibre_Fraction = cms.double( 0.01 ),
HCAL_Calib_29 = cms.double( 1.35 )
)
process.hltParticleFlowRecHitPS = cms.EDProducer( "PFRecHitProducerPS",
ecalRecHitsES = cms.InputTag( 'hltESRecHitAll','EcalRecHitsES' ),
thresh_Barrel = cms.double( 7.0E-6 ),
verbose = cms.untracked.bool( False ),
thresh_Endcap = cms.double( 7.0E-6 )
)
process.hltParticleFlowClusterECAL = cms.EDProducer( "PFClusterProducer",
posCalcNCrystal = cms.int32( 9 ),
verbose = cms.untracked.bool( False ),
showerSigma = cms.double( 1.5 ),
thresh_DoubleSpike_Barrel = cms.double( 10.0 ),
thresh_Pt_Barrel = cms.double( 0.0 ),
thresh_Clean_Barrel = cms.double( 4.0 ),
PFRecHits = cms.InputTag( "hltParticleFlowRecHitECAL" ),
cleanRBXandHPDs = cms.bool( False ),
depthCor_B = cms.double( 7.4 ),
depthCor_A = cms.double( 0.89 ),
nNeighbours = cms.int32( 8 ),
thresh_DoubleSpike_Endcap = cms.double( 1.0E9 ),
minS4S1_Clean_Barrel = cms.vdouble( 0.04, -0.024 ),
thresh_Pt_Seed_Barrel = cms.double( 0.0 ),
thresh_Pt_Endcap = cms.double( 0.0 ),
thresh_Barrel = cms.double( 0.08 ),
thresh_Clean_Endcap = cms.double( 15.0 ),
thresh_Seed_Barrel = cms.double( 0.23 ),
depthCor_Mode = cms.int32( 1 ),
depthCor_B_preshower = cms.double( 4.0 ),
useCornerCells = cms.bool( True ),
depthCor_A_preshower = cms.double( 0.89 ),
thresh_Endcap = cms.double( 0.3 ),
thresh_Pt_Seed_Endcap = cms.double( 0.15 ),
minS4S1_Clean_Endcap = cms.vdouble( 0.02, -0.0125 ),
thresh_Seed_Endcap = cms.double( 0.6 ),
minS6S2_DoubleSpike_Endcap = cms.double( -1.0 ),
minS6S2_DoubleSpike_Barrel = cms.double( 0.04 )
)
process.hltParticleFlowClusterHCAL = cms.EDProducer( "PFClusterProducer",
posCalcNCrystal = cms.int32( 5 ),
verbose = cms.untracked.bool( False ),
showerSigma = cms.double( 10.0 ),
thresh_DoubleSpike_Barrel = cms.double( 1.0E9 ),
thresh_Pt_Barrel = cms.double( 0.0 ),
thresh_Clean_Barrel = cms.double( 100000.0 ),
PFRecHits = cms.InputTag( "hltParticleFlowRecHitHCAL" ),
cleanRBXandHPDs = cms.bool( True ),
depthCor_B = cms.double( 7.4 ),
depthCor_A = cms.double( 0.89 ),
nNeighbours = cms.int32( 4 ),
thresh_DoubleSpike_Endcap = cms.double( 1.0E9 ),
minS4S1_Clean_Barrel = cms.vdouble( 0.032, -0.045 ),
thresh_Pt_Seed_Barrel = cms.double( 0.0 ),
thresh_Pt_Endcap = cms.double( 0.0 ),
thresh_Barrel = cms.double( 0.8 ),
thresh_Clean_Endcap = cms.double( 100000.0 ),
thresh_Seed_Barrel = cms.double( 0.8 ),
depthCor_Mode = cms.int32( 0 ),
depthCor_B_preshower = cms.double( 4.0 ),
useCornerCells = cms.bool( True ),
depthCor_A_preshower = cms.double( 0.89 ),
thresh_Endcap = cms.double( 0.8 ),
thresh_Pt_Seed_Endcap = cms.double( 0.0 ),
minS4S1_Clean_Endcap = cms.vdouble( 0.032, -0.045 ),
thresh_Seed_Endcap = cms.double( 1.1 ),
minS6S2_DoubleSpike_Endcap = cms.double( -1.0 ),
minS6S2_DoubleSpike_Barrel = cms.double( -1.0 )
)
process.hltParticleFlowClusterHFEM = cms.EDProducer( "PFClusterProducer",
posCalcNCrystal = cms.int32( 5 ),
verbose = cms.untracked.bool( False ),
showerSigma = cms.double( 10.0 ),
thresh_DoubleSpike_Barrel = cms.double( 1.0E9 ),
thresh_Pt_Barrel = cms.double( 0.0 ),
thresh_Clean_Barrel = cms.double( 80.0 ),
PFRecHits = cms.InputTag( 'hltParticleFlowRecHitHCAL','HFEM' ),
cleanRBXandHPDs = cms.bool( False ),
depthCor_B = cms.double( 7.4 ),
depthCor_A = cms.double( 0.89 ),
nNeighbours = cms.int32( 0 ),
thresh_DoubleSpike_Endcap = cms.double( 1.0E9 ),
minS4S1_Clean_Barrel = cms.vdouble( 0.11, -0.19 ),
thresh_Pt_Seed_Barrel = cms.double( 0.0 ),
thresh_Pt_Endcap = cms.double( 0.0 ),
thresh_Barrel = cms.double( 0.8 ),
thresh_Clean_Endcap = cms.double( 80.0 ),
thresh_Seed_Barrel = cms.double( 1.4 ),
depthCor_Mode = cms.int32( 0 ),
depthCor_B_preshower = cms.double( 4.0 ),
useCornerCells = cms.bool( False ),
depthCor_A_preshower = cms.double( 0.89 ),
thresh_Endcap = cms.double( 0.8 ),
thresh_Pt_Seed_Endcap = cms.double( 0.0 ),
minS4S1_Clean_Endcap = cms.vdouble( 0.11, -0.19 ),
thresh_Seed_Endcap = cms.double( 1.4 ),
minS6S2_DoubleSpike_Endcap = cms.double( -1.0 ),
minS6S2_DoubleSpike_Barrel = cms.double( -1.0 )
)
process.hltParticleFlowClusterHFHAD = cms.EDProducer( "PFClusterProducer",
posCalcNCrystal = cms.int32( 5 ),
verbose = cms.untracked.bool( False ),
showerSigma = cms.double( 10.0 ),
thresh_DoubleSpike_Barrel = cms.double( 1.0E9 ),
thresh_Pt_Barrel = cms.double( 0.0 ),
thresh_Clean_Barrel = cms.double( 120.0 ),
PFRecHits = cms.InputTag( 'hltParticleFlowRecHitHCAL','HFHAD' ),
cleanRBXandHPDs = cms.bool( False ),
depthCor_B = cms.double( 7.4 ),
depthCor_A = cms.double( 0.89 ),
nNeighbours = cms.int32( 0 ),
thresh_DoubleSpike_Endcap = cms.double( 1.0E9 ),
minS4S1_Clean_Barrel = cms.vdouble( 0.045, -0.08 ),
thresh_Pt_Seed_Barrel = cms.double( 0.0 ),
thresh_Pt_Endcap = cms.double( 0.0 ),
thresh_Barrel = cms.double( 0.8 ),
thresh_Clean_Endcap = cms.double( 120.0 ),
thresh_Seed_Barrel = cms.double( 1.4 ),
depthCor_Mode = cms.int32( 0 ),
depthCor_B_preshower = cms.double( 4.0 ),
useCornerCells = cms.bool( False ),
depthCor_A_preshower = cms.double( 0.89 ),
thresh_Endcap = cms.double( 0.8 ),
thresh_Pt_Seed_Endcap = cms.double( 0.0 ),
minS4S1_Clean_Endcap = cms.vdouble( 0.045, -0.08 ),
thresh_Seed_Endcap = cms.double( 1.4 ),
minS6S2_DoubleSpike_Endcap = cms.double( -1.0 ),
minS6S2_DoubleSpike_Barrel = cms.double( -1.0 )
)
process.hltParticleFlowClusterPS = cms.EDProducer( "PFClusterProducer",
posCalcNCrystal = cms.int32( -1 ),
verbose = cms.untracked.bool( False ),
showerSigma = cms.double( 0.2 ),
thresh_DoubleSpike_Barrel = cms.double( 1.0E9 ),
thresh_Pt_Barrel = cms.double( 0.0 ),
thresh_Clean_Barrel = cms.double( 100000.0 ),
PFRecHits = cms.InputTag( "hltParticleFlowRecHitPS" ),
cleanRBXandHPDs = cms.bool( False ),
depthCor_B = cms.double( 7.4 ),
depthCor_A = cms.double( 0.89 ),
nNeighbours = cms.int32( 8 ),
thresh_DoubleSpike_Endcap = cms.double( 1.0E9 ),
minS4S1_Clean_Barrel = cms.vdouble( 0.0, 0.0 ),
thresh_Pt_Seed_Barrel = cms.double( 0.0 ),
thresh_Pt_Endcap = cms.double( 0.0 ),
thresh_Barrel = cms.double( 6.0E-5 ),
thresh_Clean_Endcap = cms.double( 100000.0 ),
thresh_Seed_Barrel = cms.double( 1.2E-4 ),
depthCor_Mode = cms.int32( 0 ),
depthCor_B_preshower = cms.double( 4.0 ),
useCornerCells = cms.bool( False ),
depthCor_A_preshower = cms.double( 0.89 ),
thresh_Endcap = cms.double( 6.0E-5 ),
thresh_Pt_Seed_Endcap = cms.double( 0.0 ),
minS4S1_Clean_Endcap = cms.vdouble( 0.0, 0.0 ),
thresh_Seed_Endcap = cms.double( 1.2E-4 ),
minS6S2_DoubleSpike_Endcap = cms.double( -1.0 ),
minS6S2_DoubleSpike_Barrel = cms.double( -1.0 )
)
process.hltLightPFTracks = cms.EDProducer( "LightPFTrackProducer",
TrackQuality = cms.string( "none" ),
UseQuality = cms.bool( False ),
TkColList = cms.VInputTag( 'hltPFMuonMerging' )
)
process.hltParticleFlowBlock = cms.EDProducer( "PFBlockProducer",
PFClustersHCAL = cms.InputTag( "hltParticleFlowClusterHCAL" ),
RecMuons = cms.InputTag( "hltMuons" ),
PFClustersHFHAD = cms.InputTag( "hltParticleFlowClusterHFHAD" ),
PFConversions = cms.InputTag( "" ),
useConversions = cms.bool( False ),
nuclearInteractionsPurity = cms.uint32( 1 ),
PFClustersECAL = cms.InputTag( "hltParticleFlowClusterECAL" ),
verbose = cms.untracked.bool( False ),
PFClustersPS = cms.InputTag( "hltParticleFlowClusterPS" ),
usePFatHLT = cms.bool( True ),
PFClustersHO = cms.InputTag( "hltParticleFlowClusterHO" ),
useIterTracking = cms.bool( False ),
useConvBremPFRecTracks = cms.bool( False ),
useV0 = cms.bool( False ),
useNuclear = cms.bool( False ),
EGPhotons = cms.InputTag( "" ),
ConvBremGsfRecTracks = cms.InputTag( "" ),
useKDTreeTrackEcalLinker = cms.bool( True ),
useConvBremGsfTracks = cms.bool( False ),
pf_DPtoverPt_Cut = cms.vdouble( 0.5, 0.5, 0.5, 0.5, 0.5 ),
GsfRecTracks = cms.InputTag( "" ),
RecTracks = cms.InputTag( "hltLightPFTracks" ),
useHO = cms.bool( False ),
PFNuclear = cms.InputTag( "" ),
PFV0 = cms.InputTag( "" ),
PhotonSelectionCuts = cms.vdouble( ),
PFClustersHFEM = cms.InputTag( "hltParticleFlowClusterHFEM" ),
debug = cms.untracked.bool( False ),
useEGPhotons = cms.bool( False ),
pf_NHit_Cut = cms.vuint32( 3, 3, 3, 3, 3 )
)
process.hltParticleFlow = cms.EDProducer( "PFProducer",
sumPtTrackIsoForEgammaSC_endcap = cms.double( 4.0 ),
calibHF_use = cms.bool( False ),
verbose = cms.untracked.bool( False ),
minSignificance = cms.double( 2.5 ),
usePhotonReg = cms.bool( False ),
pf_nsigma_ECAL = cms.double( 0.0 ),
usePFConversions = cms.bool( False ),
useCalibrationsFromDB = cms.bool( True ),
sumPtTrackIsoForPhoton = cms.double( -1.0 ),
calibPFSCEle_endcap = cms.vdouble( 1.153, -16.5975, 5.668, -0.1772, 16.22, 7.326, 0.0483, -4.068, 9.406 ),
usePFElectrons = cms.bool( False ),
postMuonCleaning = cms.bool( True ),
minDeltaMet = cms.double( 0.4 ),
minSignificanceReduction = cms.double( 1.4 ),
muon_HCAL = cms.vdouble( 3.0, 3.0 ),
muon_HO = cms.vdouble( 0.9, 0.9 ),
postHFCleaning = cms.bool( False ),
factors_45 = cms.vdouble( 10.0, 100.0 ),
cleanedHF = cms.VInputTag( 'hltParticleFlowRecHitHCAL:Cleaned','hltParticleFlowClusterHFHAD:Cleaned','hltParticleFlowClusterHFEM:Cleaned' ),
iCfgCandConnector = cms.PSet(
bCalibSecondary = cms.bool( False ),
bCalibPrimary = cms.bool( False ),
bCorrect = cms.bool( False ),
nuclCalibFactors = cms.vdouble( 0.8, 0.15, 0.5, 0.5, 0.05 )
),
useBestMuonTrack = cms.bool( False ),
rejectTracks_Bad = cms.bool( False ),
coneEcalIsoForEgammaSC = cms.double( 0.3 ),
usePFPhotons = cms.bool( False ),
vertexCollection = cms.InputTag( "hltPixelVertices" ),
sumPtTrackIsoForEgammaSC_barrel = cms.double( 4.0 ),
egammaElectrons = cms.InputTag( "" ),
calibHF_a_EMonly = cms.vdouble( 0.96945, 0.96701, 0.76309, 0.82268, 0.87583, 0.89718, 0.98674, 1.4681, 1.458, 1.458 ),
maxDeltaPhiPt = cms.double( 7.0 ),
muons = cms.InputTag( "hltMuons" ),
pf_electronID_crackCorrection = cms.bool( False ),
minHFCleaningPt = cms.double( 5.0 ),
nTrackIsoForEgammaSC = cms.uint32( 2 ),
pf_nsigma_HCAL = cms.double( 1.0 ),
calibPFSCEle_Fbrem_barrel = cms.vdouble( 0.6, 6.0, -0.0255975, 0.0576727, 0.975442, -5.46394E-4, 1.26147, 25.0, -0.02025, 0.04537, 0.9728, -8.962E-4, 1.172 ),
muon_ECAL = cms.vdouble( 0.5, 0.5 ),
blocks = cms.InputTag( "hltParticleFlowBlock" ),
calibPFSCEle_barrel = cms.vdouble( 1.004, -1.536, 22.88, -1.467, 0.3555, 0.6227, 14.65, 2051.0, 25.0, 0.9932, -0.5444, 0.0, 0.5438, 0.7109, 7.645, 0.2904, 0.0 ),
pf_electron_mvaCut = cms.double( -0.1 ),
useEGammaElectrons = cms.bool( False ),
useHO = cms.bool( False ),
nsigma_TRACK = cms.double( 1.0 ),
pf_electron_output_col = cms.string( "electrons" ),
dptRel_DispVtx = cms.double( 10.0 ),
usePFMuonMomAssign = cms.bool( False ),
useVerticesForNeutral = cms.bool( True ),
pf_conv_mvaCut = cms.double( 0.0 ),
sumEtEcalIsoForEgammaSC_endcap = cms.double( 2.0 ),
pf_Res_mvaWeightFile = cms.string( "RecoParticleFlow/PFProducer/data/TMVARegression_BDTG_PFRes.root" ),
usePFDecays = cms.bool( False ),
sumPtTrackIsoSlopeForPhoton = cms.double( -1.0 ),
calibHF_b_EMHAD = cms.vdouble( 1.27541, 0.85361, 0.86333, 0.89091, 0.94348, 0.94348, 0.9437, 1.0034, 1.0444, 1.0444 ),
rejectTracks_Step45 = cms.bool( False ),
pf_GlobC_mvaWeightFile = cms.string( "RecoParticleFlow/PFProducer/data/TMVARegression_BDTG_PFGlobalCorr.root" ),
pf_locC_mvaWeightFile = cms.string( "RecoParticleFlow/PFProducer/data/TMVARegression_BDTG_PFClusterCorr.root" ),
sumEtEcalIsoForEgammaSC_barrel = cms.double( 1.0 ),
calibPFSCEle_Fbrem_endcap = cms.vdouble( 0.9, 6.5, -0.0692932, 0.101776, 0.995338, -0.00236548, 0.874998, 1.653, -0.0750184, 0.147, 0.923165, 4.74665E-4, 1.10782 ),
coneTrackIsoForEgammaSC = cms.double( 0.3 ),
usePFNuclearInteractions = cms.bool( False ),
pf_electronID_mvaWeightFile = cms.string( "RecoParticleFlow/PFProducer/data/MVAnalysis_BDT.weights_PfElectrons23Jan_IntToFloat.txt" ),
maxSignificance = cms.double( 2.5 ),
calibHF_b_HADonly = cms.vdouble( 1.27541, 0.85361, 0.86333, 0.89091, 0.94348, 0.94348, 0.9437, 1.0034, 1.0444, 1.0444 ),
calibHF_a_EMHAD = cms.vdouble( 1.42215, 1.00496, 0.68961, 0.81656, 0.98504, 0.98504, 1.00802, 1.0593, 1.4576, 1.4576 ),
algoType = cms.uint32( 0 ),
usePFSCEleCalib = cms.bool( True ),
pt_Error = cms.double( 1.0 ),
debug = cms.untracked.bool( False ),
X0_Map = cms.string( "RecoParticleFlow/PFProducer/data/allX0histos.root" ),
pf_convID_mvaWeightFile = cms.string( "RecoParticleFlow/PFProducer/data/MVAnalysis_BDT.weights_pfConversionAug0411.txt" ),
calibHF_eta_step = cms.vdouble( 0.0, 2.9, 3.0, 3.2, 4.2, 4.4, 4.6, 4.8, 5.2, 5.4 ),
useRegressionFromDB = cms.bool( False ),
useEGammaSupercluster = cms.bool( False )
)
process.hltAntiKT5PFJets = cms.EDProducer( "FastjetJetProducer",
Active_Area_Repeats = cms.int32( 5 ),
doAreaFastjet = cms.bool( False ),
voronoiRfact = cms.double( -9.0 ),
maxBadHcalCells = cms.uint32( 9999999 ),
doAreaDiskApprox = cms.bool( True ),
maxRecoveredEcalCells = cms.uint32( 9999999 ),
jetType = cms.string( "PFJet" ),
minSeed = cms.uint32( 0 ),
Ghost_EtaMax = cms.double( 6.0 ),
doRhoFastjet = cms.bool( False ),
jetAlgorithm = cms.string( "AntiKt" ),
nSigmaPU = cms.double( 1.0 ),
GhostArea = cms.double( 0.01 ),
Rho_EtaMax = cms.double( 4.4 ),
maxBadEcalCells = cms.uint32( 9999999 ),
useDeterministicSeed = cms.bool( True ),
doPVCorrection = cms.bool( False ),
maxRecoveredHcalCells = cms.uint32( 9999999 ),
rParam = cms.double( 0.5 ),
maxProblematicHcalCells = cms.uint32( 9999999 ),
doOutputJets = cms.bool( True ),
src = cms.InputTag( "hltParticleFlow" ),
inputEtMin = cms.double( 0.0 ),
puPtMin = cms.double( 10.0 ),
srcPVs = cms.InputTag( "hltPixelVertices" ),
jetPtMin = cms.double( 0.0 ),
radiusPU = cms.double( 0.5 ),
maxProblematicEcalCells = cms.uint32( 9999999 ),
doPUOffsetCorr = cms.bool( False ),
inputEMin = cms.double( 0.0 ),
subtractorName = cms.string( "" ),
MinVtxNdof = cms.int32( 0 ),
MaxVtxZ = cms.double( 15.0 ),
UseOnlyVertexTracks = cms.bool( False ),
UseOnlyOnePV = cms.bool( False ),
DzTrVtxMax = cms.double( 0.0 ),
sumRecHits = cms.bool( False ),
DxyTrVtxMax = cms.double( 0.0 )
)
process.hltKT6PFJets = cms.EDProducer( "FastjetJetProducer",
Active_Area_Repeats = cms.int32( 1 ),
doAreaFastjet = cms.bool( False ),
voronoiRfact = cms.double( 0.9 ),
maxBadHcalCells = cms.uint32( 9999999 ),
doAreaDiskApprox = cms.bool( True ),
maxRecoveredEcalCells = cms.uint32( 9999999 ),
jetType = cms.string( "PFJet" ),
minSeed = cms.uint32( 14327 ),
Ghost_EtaMax = cms.double( 5.0 ),
doRhoFastjet = cms.bool( True ),
jetAlgorithm = cms.string( "Kt" ),
nSigmaPU = cms.double( 1.0 ),
GhostArea = cms.double( 0.01 ),
Rho_EtaMax = cms.double( 4.4 ),
maxBadEcalCells = cms.uint32( 9999999 ),
useDeterministicSeed = cms.bool( True ),
doPVCorrection = cms.bool( False ),
maxRecoveredHcalCells = cms.uint32( 9999999 ),
rParam = cms.double( 0.6 ),
maxProblematicHcalCells = cms.uint32( 9999999 ),
doOutputJets = cms.bool( True ),
src = cms.InputTag( "hltParticleFlow" ),
inputEtMin = cms.double( 0.0 ),
puPtMin = cms.double( 10.0 ),
srcPVs = cms.InputTag( "hltPixelVertices" ),
jetPtMin = cms.double( 0.0 ),
radiusPU = cms.double( 0.5 ),
maxProblematicEcalCells = cms.uint32( 9999999 ),
doPUOffsetCorr = cms.bool( False ),
inputEMin = cms.double( 0.0 ),
subtractorName = cms.string( "" ),
MinVtxNdof = cms.int32( 0 ),
MaxVtxZ = cms.double( 15.0 ),
UseOnlyVertexTracks = cms.bool( False ),
UseOnlyOnePV = cms.bool( False ),
DzTrVtxMax = cms.double( 0.0 ),
sumRecHits = cms.bool( False ),
DxyTrVtxMax = cms.double( 0.0 )
)
process.hltPFNeutralHadronsAndPartons = cms.EDFilter( "PdgIdPFCandidateSelector",
pdgId = cms.vint32( 22, 111, 130, 310, 2112 ),
src = cms.InputTag( "hltParticleFlow" )
)
process.hltAntiKT5PFJetsNeutral = cms.EDProducer( "FastjetJetProducer",
Active_Area_Repeats = cms.int32( 1 ),
doAreaFastjet = cms.bool( False ),
voronoiRfact = cms.double( -9.0 ),
maxBadHcalCells = cms.uint32( 9999999 ),
doAreaDiskApprox = cms.bool( False ),
maxRecoveredEcalCells = cms.uint32( 9999999 ),
jetType = cms.string( "PFJet" ),
minSeed = cms.uint32( 0 ),
Ghost_EtaMax = cms.double( 5.0 ),
doRhoFastjet = cms.bool( False ),
jetAlgorithm = cms.string( "AntiKt" ),
nSigmaPU = cms.double( 1.0 ),
GhostArea = cms.double( 0.01 ),
Rho_EtaMax = cms.double( 4.4 ),
maxBadEcalCells = cms.uint32( 9999999 ),
useDeterministicSeed = cms.bool( True ),
doPVCorrection = cms.bool( True ),
maxRecoveredHcalCells = cms.uint32( 9999999 ),
rParam = cms.double( 0.5 ),
maxProblematicHcalCells = cms.uint32( 9999999 ),
doOutputJets = cms.bool( True ),
src = cms.InputTag( "hltPFNeutralHadronsAndPartons" ),
inputEtMin = cms.double( 0.0 ),
puPtMin = cms.double( 10.0 ),
srcPVs = cms.InputTag( "hltPixelVertices" ),
jetPtMin = cms.double( 0.0 ),
radiusPU = cms.double( 0.5 ),
maxProblematicEcalCells = cms.uint32( 9999999 ),
doPUOffsetCorr = cms.bool( False ),
inputEMin = cms.double( 0.0 ),
subtractorName = cms.string( "" ),
MinVtxNdof = cms.int32( 0 ),
MaxVtxZ = cms.double( 15.0 ),
UseOnlyVertexTracks = cms.bool( False ),
UseOnlyOnePV = cms.bool( False ),
DzTrVtxMax = cms.double( 0.0 ),
sumRecHits = cms.bool( False ),
DxyTrVtxMax = cms.double( 0.0 )
)
process.hltAK5PFJetNeutralL1FastL2L3Corrected = cms.EDProducer( "PFJetCorrectionProducer",
src = cms.InputTag( "hltAntiKT5PFJetsNeutral" ),
correctors = cms.vstring( 'hltESPAK5PFL1L2L3' )
)
process.hltCentralPFJet40Neutral = cms.EDFilter( "HLT1PFJet",
saveTags = cms.bool( True ),
MinPt = cms.double( 40.0 ),
MinN = cms.int32( 1 ),
MaxEta = cms.double( 2.6 ),
MinMass = cms.double( -1.0 ),
inputTag = cms.InputTag( "hltAK5PFJetNeutralL1FastL2L3Corrected" ),
MinE = cms.double( -1.0 ),
triggerType = cms.int32( 85 )
)
process.hltBoolEnd = cms.EDFilter( "HLTBool",
result = cms.bool( True )
)
process.hltL1sL1DoubleMu0erOR3erHighQ = cms.EDFilter( "HLTLevel1GTSeed",
saveTags = cms.bool( True ),
L1SeedsLogicalExpression = cms.string( "L1_DoubleMu0er_HighQ OR L1_DoubleMu3er_HighQ_WdEta22 " ),
L1MuonCollectionTag = cms.InputTag( "hltL1extraParticles" ),
L1UseL1TriggerObjectMaps = cms.bool( True ),
L1UseAliasesForSeeding = cms.bool( True ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
L1CollectionsTag = cms.InputTag( "hltL1extraParticles" ),
L1NrBxInEvent = cms.int32( 3 ),
L1GtObjectMapTag = cms.InputTag( "hltL1GtObjectMap" ),
L1TechTriggerSeeding = cms.bool( False )
)
process.hltPreDoubleDisplacedMu4DiPFJet40Neutral = cms.EDFilter( "HLTPrescaler",
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
offset = cms.uint32( 0 )
)
process.hltL1DoubleMuon0erOR3erHighQL1Filtered0 = cms.EDFilter( "HLTMuonL1Filter",
saveTags = cms.bool( False ),
CSCTFtag = cms.InputTag( "unused" ),
PreviousCandTag = cms.InputTag( "hltL1sL1DoubleMu0erOR3erHighQ" ),
MinPt = cms.double( 0.0 ),
MinN = cms.int32( 2 ),
MaxEta = cms.double( 2.5 ),
SelectQualities = cms.vint32( ),
CandTag = cms.InputTag( "hltL1extraParticles" ),
ExcludeSingleSegmentCSC = cms.bool( False )
)
process.hltDoubleMu4L2PreFiltered = cms.EDFilter( "HLTMuonL2PreFilter",
saveTags = cms.bool( True ),
MaxDr = cms.double( 9999.0 ),
CutOnChambers = cms.bool( False ),
PreviousCandTag = cms.InputTag( "hltL1DoubleMuon0erOR3erHighQL1Filtered0" ),
MinPt = cms.double( 4.0 ),
MinN = cms.int32( 2 ),
SeedMapTag = cms.InputTag( "hltL2Muons" ),
MaxEta = cms.double( 2.5 ),
MinNhits = cms.vint32( 1 ),
MinDxySig = cms.double( -1.0 ),
MinNchambers = cms.vint32( 0 ),
AbsEtaBins = cms.vdouble( 5.0 ),
MaxDz = cms.double( 9999.0 ),
CandTag = cms.InputTag( "hltL2MuonCandidates" ),
BeamSpotTag = cms.InputTag( "hltOnlineBeamSpot" ),
MinDr = cms.double( -1.0 ),
NSigmaPt = cms.double( 0.0 ),
MinNstations = cms.vint32( 0 )
)
process.hltDoubleDisplacedMu4L3PreFiltered = cms.EDFilter( "HLTMuonL3PreFilter",
MaxNormalizedChi2 = cms.double( 9999.0 ),
saveTags = cms.bool( True ),
PreviousCandTag = cms.InputTag( "hltDoubleMu4L2PreFiltered" ),
MinNmuonHits = cms.int32( 0 ),
MinN = cms.int32( 2 ),
MinTrackPt = cms.double( 0.0 ),
MaxEta = cms.double( 2.5 ),
MaxDXYBeamSpot = cms.double( 9999.0 ),
MinNhits = cms.int32( 1 ),
MinDxySig = cms.double( 5.0 ),
NSigmaPt = cms.double( 0.0 ),
MaxDz = cms.double( 9999.0 ),
MaxPtDifference = cms.double( 9999.0 ),
MaxDr = cms.double( 9999.0 ),
CandTag = cms.InputTag( "hltL3MuonCandidates" ),
MinDr = cms.double( -1.0 ),
BeamSpotTag = cms.InputTag( "hltOnlineBeamSpot" ),
MinPt = cms.double( 4.0 )
)
process.hltDiCentralPFJet40Neutral = cms.EDFilter( "HLT1PFJet",
saveTags = cms.bool( True ),
MinPt = cms.double( 40.0 ),
MinN = cms.int32( 2 ),
MaxEta = cms.double( 2.6 ),
MinMass = cms.double( -1.0 ),
inputTag = cms.InputTag( "hltAK5PFJetNeutralL1FastL2L3Corrected" ),
MinE = cms.double( -1.0 ),
triggerType = cms.int32( 85 )
)
process.hltL1sL1Mu8DoubleJetC20 = cms.EDFilter( "HLTLevel1GTSeed",
saveTags = cms.bool( True ),
L1SeedsLogicalExpression = cms.string( "L1_Mu8_DoubleJetC20" ),
L1MuonCollectionTag = cms.InputTag( "hltL1extraParticles" ),
L1UseL1TriggerObjectMaps = cms.bool( True ),
L1UseAliasesForSeeding = cms.bool( True ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
L1CollectionsTag = cms.InputTag( "hltL1extraParticles" ),
L1NrBxInEvent = cms.int32( 3 ),
L1GtObjectMapTag = cms.InputTag( "hltL1GtObjectMap" ),
L1TechTriggerSeeding = cms.bool( False )
)
process.hltPreMu8DiJet30 = cms.EDFilter( "HLTPrescaler",
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
offset = cms.uint32( 0 )
)
process.hltL1Mu8DoubleJetC20L1Filtered0 = cms.EDFilter( "HLTMuonL1Filter",
saveTags = cms.bool( False ),
CSCTFtag = cms.InputTag( "unused" ),
PreviousCandTag = cms.InputTag( "hltL1sL1Mu8DoubleJetC20" ),
MinPt = cms.double( 0.0 ),
MinN = cms.int32( 1 ),
MaxEta = cms.double( 2.5 ),
SelectQualities = cms.vint32( ),
CandTag = cms.InputTag( "hltL1extraParticles" ),
ExcludeSingleSegmentCSC = cms.bool( False )
)
process.hltL2Mu8DoubleJetC20L2Filtered8 = cms.EDFilter( "HLTMuonL2PreFilter",
saveTags = cms.bool( False ),
MaxDr = cms.double( 9999.0 ),
CutOnChambers = cms.bool( False ),
PreviousCandTag = cms.InputTag( "hltL1Mu8DoubleJetC20L1Filtered0" ),
MinPt = cms.double( 8.0 ),
MinN = cms.int32( 1 ),
SeedMapTag = cms.InputTag( "hltL2Muons" ),
MaxEta = cms.double( 2.5 ),
MinNhits = cms.vint32( 0 ),
MinDxySig = cms.double( -1.0 ),
MinNchambers = cms.vint32( 0 ),
AbsEtaBins = cms.vdouble( 5.0 ),
MaxDz = cms.double( 9999.0 ),
CandTag = cms.InputTag( "hltL2MuonCandidates" ),
BeamSpotTag = cms.InputTag( "hltOnlineBeamSpot" ),
MinDr = cms.double( -1.0 ),
NSigmaPt = cms.double( 0.0 ),
MinNstations = cms.vint32( 0 )
)
process.hltL3Mu8DoubleJetC20L3Filtered8 = cms.EDFilter( "HLTMuonL3PreFilter",
MaxNormalizedChi2 = cms.double( 9999.0 ),
saveTags = cms.bool( True ),
PreviousCandTag = cms.InputTag( "hltL2Mu8DoubleJetC20L2Filtered8" ),
MinNmuonHits = cms.int32( 0 ),
MinN = cms.int32( 1 ),
MinTrackPt = cms.double( 0.0 ),
MaxEta = cms.double( 2.5 ),
MaxDXYBeamSpot = cms.double( 9999.0 ),
MinNhits = cms.int32( 0 ),
MinDxySig = cms.double( -1.0 ),
NSigmaPt = cms.double( 0.0 ),
MaxDz = cms.double( 9999.0 ),
MaxPtDifference = cms.double( 9999.0 ),
MaxDr = cms.double( 2.0 ),
CandTag = cms.InputTag( "hltL3MuonCandidates" ),
MinDr = cms.double( -1.0 ),
BeamSpotTag = cms.InputTag( "hltOnlineBeamSpot" ),
MinPt = cms.double( 8.0 )
)
process.hltDiJet30 = cms.EDFilter( "HLT1CaloJet",
saveTags = cms.bool( False ),
MinPt = cms.double( 30.0 ),
MinN = cms.int32( 2 ),
MaxEta = cms.double( 5.0 ),
MinMass = cms.double( -1.0 ),
inputTag = cms.InputTag( "hltCaloJetL1FastJetCorrected" ),
MinE = cms.double( -1.0 ),
triggerType = cms.int32( 85 )
)
process.hltPreMu8TriJet30 = cms.EDFilter( "HLTPrescaler",
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
offset = cms.uint32( 0 )
)
process.hltTriJet30 = cms.EDFilter( "HLT1CaloJet",
saveTags = cms.bool( False ),
MinPt = cms.double( 30.0 ),
MinN = cms.int32( 3 ),
MaxEta = cms.double( 5.0 ),
MinMass = cms.double( -1.0 ),
inputTag = cms.InputTag( "hltCaloJetL1FastJetCorrected" ),
MinE = cms.double( -1.0 ),
triggerType = cms.int32( 85 )
)
process.hltPreMu8QuadJet30 = cms.EDFilter( "HLTPrescaler",
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
offset = cms.uint32( 0 )
)
process.hltQuadJet30 = cms.EDFilter( "HLT1CaloJet",
saveTags = cms.bool( False ),
MinPt = cms.double( 30.0 ),
MinN = cms.int32( 4 ),
MaxEta = cms.double( 5.0 ),
MinMass = cms.double( -1.0 ),
inputTag = cms.InputTag( "hltCaloJetL1FastJetCorrected" ),
MinE = cms.double( -1.0 ),
triggerType = cms.int32( 85 )
)
process.hltL1sL1DoubleJetC64ORDoubleJetC56ORDoubleJetC52 = cms.EDFilter( "HLTLevel1GTSeed",
saveTags = cms.bool( True ),
L1SeedsLogicalExpression = cms.string( "L1_DoubleJetC64 OR L1_DoubleJetC56 OR L1_DoubleJetC52" ),
L1MuonCollectionTag = cms.InputTag( "hltL1extraParticles" ),
L1UseL1TriggerObjectMaps = cms.bool( True ),
L1UseAliasesForSeeding = cms.bool( True ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
L1CollectionsTag = cms.InputTag( "hltL1extraParticles" ),
L1NrBxInEvent = cms.int32( 3 ),
L1GtObjectMapTag = cms.InputTag( "hltL1GtObjectMap" ),
L1TechTriggerSeeding = cms.bool( False )
)
process.hltPreIsoMu12DoubleCentralJet65 = cms.EDFilter( "HLTPrescaler",
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
offset = cms.uint32( 0 )
)
process.hltL1sL1SingleMuOpenCandidate = cms.EDFilter( "HLTLevel1GTSeed",
saveTags = cms.bool( True ),
L1SeedsLogicalExpression = cms.string( "L1_SingleMuOpen" ),
L1MuonCollectionTag = cms.InputTag( "hltL1extraParticles" ),
L1UseL1TriggerObjectMaps = cms.bool( False ),
L1UseAliasesForSeeding = cms.bool( True ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
L1CollectionsTag = cms.InputTag( "hltL1extraParticles" ),
L1NrBxInEvent = cms.int32( 1 ),
L1GtObjectMapTag = cms.InputTag( "hltL1GtObjectMap" ),
L1TechTriggerSeeding = cms.bool( False )
)
process.hltSingleMuOpenCandidateL1Filtered0 = cms.EDFilter( "HLTMuonL1Filter",
saveTags = cms.bool( False ),
CSCTFtag = cms.InputTag( "unused" ),
PreviousCandTag = cms.InputTag( "hltL1sL1SingleMuOpenCandidate" ),
MinPt = cms.double( 0.0 ),
MinN = cms.int32( 1 ),
MaxEta = cms.double( 2.5 ),
SelectQualities = cms.vint32( ),
CandTag = cms.InputTag( "hltL1extraParticles" ),
ExcludeSingleSegmentCSC = cms.bool( False )
)
process.hltSingleMuOpenCandidateL2Filtered3 = cms.EDFilter( "HLTMuonL2PreFilter",
saveTags = cms.bool( True ),
MaxDr = cms.double( 9999.0 ),
CutOnChambers = cms.bool( False ),
PreviousCandTag = cms.InputTag( "hltSingleMuOpenCandidateL1Filtered0" ),
MinPt = cms.double( 3.0 ),
MinN = cms.int32( 1 ),
SeedMapTag = cms.InputTag( "hltL2Muons" ),
MaxEta = cms.double( 2.5 ),
MinNhits = cms.vint32( 0 ),
MinDxySig = cms.double( -1.0 ),
MinNchambers = cms.vint32( 0 ),
AbsEtaBins = cms.vdouble( 5.0 ),
MaxDz = cms.double( 9999.0 ),
CandTag = cms.InputTag( "hltL2MuonCandidates" ),
BeamSpotTag = cms.InputTag( "hltOnlineBeamSpot" ),
MinDr = cms.double( -1.0 ),
NSigmaPt = cms.double( 0.0 ),
MinNstations = cms.vint32( 0 )
)
process.hltSingleMuOpenCandidateL3Filtered12 = cms.EDFilter( "HLTMuonL3PreFilter",
MaxNormalizedChi2 = cms.double( 9999.0 ),
saveTags = cms.bool( True ),
PreviousCandTag = cms.InputTag( "hltSingleMuOpenCandidateL2Filtered3" ),
MinNmuonHits = cms.int32( 0 ),
MinN = cms.int32( 1 ),
MinTrackPt = cms.double( 0.0 ),
MaxEta = cms.double( 2.5 ),
MaxDXYBeamSpot = cms.double( 9999.0 ),
MinNhits = cms.int32( 0 ),
MinDxySig = cms.double( -1.0 ),
NSigmaPt = cms.double( 0.0 ),
MaxDz = cms.double( 9999.0 ),
MaxPtDifference = cms.double( 9999.0 ),
MaxDr = cms.double( 2.0 ),
CandTag = cms.InputTag( "hltL3MuonCandidates" ),
MinDr = cms.double( -1.0 ),
BeamSpotTag = cms.InputTag( "hltOnlineBeamSpot" ),
MinPt = cms.double( 12.0 )
)
process.hltDoubleCentralJet65L1FastJet = cms.EDFilter( "HLT1CaloJet",
saveTags = cms.bool( True ),
MinPt = cms.double( 65.0 ),
MinN = cms.int32( 2 ),
MaxEta = cms.double( 3.0 ),
MinMass = cms.double( -1.0 ),
inputTag = cms.InputTag( "hltCaloJetL1FastJetCorrected" ),
MinE = cms.double( -1.0 ),
triggerType = cms.int32( 85 )
)
process.hltEcalRegionalMuonsFEDs = cms.EDProducer( "EcalRawToRecHitRoI",
JetJobPSet = cms.VPSet(
),
sourceTag_es = cms.InputTag( "NotNeededoESfalse" ),
doES = cms.bool( False ),
type = cms.string( "candidate" ),
sourceTag = cms.InputTag( "hltEcalRawToRecHitFacility" ),
EmJobPSet = cms.VPSet(
),
CandJobPSet = cms.VPSet(
cms.PSet( bePrecise = cms.bool( False ),
propagatorNameToBePrecise = cms.string( "" ),
epsilon = cms.double( 0.01 ),
regionPhiMargin = cms.double( 0.3 ),
cType = cms.string( "chargedcandidate" ),
Source = cms.InputTag( "hltL2MuonCandidates" ),
Ptmin = cms.double( 0.0 ),
regionEtaMargin = cms.double( 0.3 )
)
),
MuonJobPSet = cms.PSet( ),
esInstance = cms.untracked.string( "es" ),
MuJobPSet = cms.PSet( )
)
process.hltEcalRegionalMuonsRecHit = cms.EDProducer( "EcalRawToRecHitProducer",
splitOutput = cms.bool( True ),
rechitCollection = cms.string( "NotNeededsplitOutputTrue" ),
EErechitCollection = cms.string( "EcalRecHitsEE" ),
EBrechitCollection = cms.string( "EcalRecHitsEB" ),
sourceTag = cms.InputTag( "hltEcalRegionalMuonsFEDs" ),
cleaningConfig = cms.PSet(
e6e2thresh = cms.double( 0.04 ),
tightenCrack_e6e2_double = cms.double( 3.0 ),
e4e1Threshold_endcap = cms.double( 0.3 ),
tightenCrack_e4e1_single = cms.double( 3.0 ),
tightenCrack_e1_double = cms.double( 2.0 ),
cThreshold_barrel = cms.double( 4.0 ),
e4e1Threshold_barrel = cms.double( 0.08 ),
tightenCrack_e1_single = cms.double( 2.0 ),
e4e1_b_barrel = cms.double( -0.024 ),
e4e1_a_barrel = cms.double( 0.04 ),
ignoreOutOfTimeThresh = cms.double( 1.0E9 ),
cThreshold_endcap = cms.double( 15.0 ),
e4e1_b_endcap = cms.double( -0.0125 ),
e4e1_a_endcap = cms.double( 0.02 ),
cThreshold_double = cms.double( 10.0 )
),
lazyGetterTag = cms.InputTag( "hltEcalRawToRecHitFacility" )
)
process.hltTowerMakerForMuons = cms.EDProducer( "CaloTowersCreator",
EBSumThreshold = cms.double( 0.2 ),
MomHBDepth = cms.double( 0.2 ),
UseEtEBTreshold = cms.bool( False ),
hfInput = cms.InputTag( "hltHfreco" ),
AllowMissingInputs = cms.bool( False ),
MomEEDepth = cms.double( 0.0 ),
EESumThreshold = cms.double( 0.45 ),
HBGrid = cms.vdouble( ),
HcalAcceptSeverityLevelForRejectedHit = cms.uint32( 9999 ),
HBThreshold = cms.double( 0.7 ),
EcalSeveritiesToBeUsedInBadTowers = cms.vstring( ),
UseEcalRecoveredHits = cms.bool( False ),
MomConstrMethod = cms.int32( 1 ),
MomHEDepth = cms.double( 0.4 ),
HcalThreshold = cms.double( -1000.0 ),
HF2Weights = cms.vdouble( ),
HOWeights = cms.vdouble( ),
EEGrid = cms.vdouble( ),
UseSymEBTreshold = cms.bool( False ),
EEWeights = cms.vdouble( ),
EEWeight = cms.double( 1.0 ),
UseHO = cms.bool( False ),
HBWeights = cms.vdouble( ),
HF1Weight = cms.double( 1.0 ),
HF2Grid = cms.vdouble( ),
HEDWeights = cms.vdouble( ),
HEDGrid = cms.vdouble( ),
EBWeight = cms.double( 1.0 ),
HF1Grid = cms.vdouble( ),
EBWeights = cms.vdouble( ),
HOWeight = cms.double( 1.0E-99 ),
HESWeight = cms.double( 1.0 ),
HESThreshold = cms.double( 0.8 ),
hbheInput = cms.InputTag( "hltHbhereco" ),
HF2Weight = cms.double( 1.0 ),
HF2Threshold = cms.double( 0.85 ),
HcalAcceptSeverityLevel = cms.uint32( 9 ),
EEThreshold = cms.double( 0.3 ),
HOThresholdPlus1 = cms.double( 3.5 ),
HOThresholdPlus2 = cms.double( 3.5 ),
HF1Weights = cms.vdouble( ),
hoInput = cms.InputTag( "hltHoreco" ),
HF1Threshold = cms.double( 0.5 ),
HOThresholdMinus1 = cms.double( 3.5 ),
HESGrid = cms.vdouble( ),
EcutTower = cms.double( -1000.0 ),
UseRejectedRecoveredEcalHits = cms.bool( False ),
UseEtEETreshold = cms.bool( False ),
HESWeights = cms.vdouble( ),
EcalRecHitSeveritiesToBeExcluded = cms.vstring( 'kTime',
'kWeird',
'kBad' ),
HEDWeight = cms.double( 1.0 ),
UseSymEETreshold = cms.bool( False ),
HEDThreshold = cms.double( 0.8 ),
EBThreshold = cms.double( 0.07 ),
UseRejectedHitsOnly = cms.bool( False ),
UseHcalRecoveredHits = cms.bool( False ),
HOThresholdMinus2 = cms.double( 3.5 ),
HOThreshold0 = cms.double( 3.5 ),
ecalInputs = cms.VInputTag( 'hltEcalRegionalMuonsRecHit:EcalRecHitsEB','hltEcalRegionalMuonsRecHit:EcalRecHitsEE' ),
UseRejectedRecoveredHcalHits = cms.bool( False ),
MomEBDepth = cms.double( 0.3 ),
HBWeight = cms.double( 1.0 ),
HOGrid = cms.vdouble( ),
EBGrid = cms.vdouble( )
)
process.hltKT6CaloJetsForMuons = cms.EDProducer( "FastjetJetProducer",
Active_Area_Repeats = cms.int32( 1 ),
doAreaFastjet = cms.bool( False ),
voronoiRfact = cms.double( 0.9 ),
maxBadHcalCells = cms.uint32( 9999999 ),
doAreaDiskApprox = cms.bool( True ),
maxRecoveredEcalCells = cms.uint32( 9999999 ),
jetType = cms.string( "CaloJet" ),
minSeed = cms.uint32( 14327 ),
Ghost_EtaMax = cms.double( 5.0 ),
doRhoFastjet = cms.bool( True ),
jetAlgorithm = cms.string( "Kt" ),
nSigmaPU = cms.double( 1.0 ),
GhostArea = cms.double( 0.01 ),
Rho_EtaMax = cms.double( 2.5 ),
maxBadEcalCells = cms.uint32( 9999999 ),
useDeterministicSeed = cms.bool( True ),
doPVCorrection = cms.bool( False ),
maxRecoveredHcalCells = cms.uint32( 9999999 ),
rParam = cms.double( 0.6 ),
maxProblematicHcalCells = cms.uint32( 9999999 ),
doOutputJets = cms.bool( True ),
src = cms.InputTag( "hltTowerMakerForMuons" ),
inputEtMin = cms.double( 0.3 ),
puPtMin = cms.double( 10.0 ),
srcPVs = cms.InputTag( "NotUsed" ),
jetPtMin = cms.double( 1.0 ),
radiusPU = cms.double( 0.5 ),
maxProblematicEcalCells = cms.uint32( 9999999 ),
doPUOffsetCorr = cms.bool( False ),
inputEMin = cms.double( 0.0 ),
subtractorName = cms.string( "" ),
MinVtxNdof = cms.int32( 0 ),
MaxVtxZ = cms.double( 15.0 ),
UseOnlyVertexTracks = cms.bool( False ),
UseOnlyOnePV = cms.bool( False ),
DzTrVtxMax = cms.double( 0.0 ),
sumRecHits = cms.bool( False ),
DxyTrVtxMax = cms.double( 0.0 )
)
process.hltL3CaloMuonCorrectedIsolations = cms.EDProducer( "L2MuonIsolationProducer",
WriteIsolatorFloat = cms.bool( True ),
IsolatorPSet = cms.PSet(
ConeSizesRel = cms.vdouble( 0.3 ),
EffAreaSFEndcap = cms.double( 1.0 ),
CutAbsoluteIso = cms.bool( True ),
AndOrCuts = cms.bool( True ),
RhoSrc = cms.InputTag( 'hltKT6CaloJetsForMuons','rho' ),
ConeSizes = cms.vdouble( 0.3 ),
ComponentName = cms.string( "CutsIsolatorWithCorrection" ),
ReturnRelativeSum = cms.bool( False ),
RhoScaleBarrel = cms.double( 1.0 ),
EffAreaSFBarrel = cms.double( 1.0 ),
CutRelativeIso = cms.bool( False ),
EtaBounds = cms.vdouble( 2.411 ),
Thresholds = cms.vdouble( 9.9999999E7 ),
ReturnAbsoluteSum = cms.bool( True ),
ThresholdsRel = cms.vdouble( 9.9999999E7 ),
EtaBoundsRel = cms.vdouble( 2.411 ),
RhoScaleEndcap = cms.double( 1.0 ),
RhoMax = cms.double( 9.9999999E7 ),
UseRhoCorrection = cms.bool( True )
),
StandAloneCollectionLabel = cms.InputTag( "hltL3Muons" ),
ExtractorPSet = cms.PSet(
DR_Veto_H = cms.double( 0.1 ),
Vertex_Constraint_Z = cms.bool( False ),
Threshold_H = cms.double( 0.5 ),
ComponentName = cms.string( "CaloExtractor" ),
Threshold_E = cms.double( 0.2 ),
DR_Max = cms.double( 1.0 ),
DR_Veto_E = cms.double( 0.07 ),
Weight_E = cms.double( 1.0 ),
Vertex_Constraint_XY = cms.bool( False ),
DepositLabel = cms.untracked.string( "EcalPlusHcal" ),
CaloTowerCollectionLabel = cms.InputTag( "hltTowerMakerForMuons" ),
Weight_H = cms.double( 1.0 )
)
)
process.hltRegionalSeedsForL3MuonIsolation = cms.EDProducer( "SeedGeneratorFromRegionHitsEDProducer",
RegionFactoryPSet = cms.PSet(
ComponentName = cms.string( "IsolationRegionAroundL3Muon" ),
RegionPSet = cms.PSet(
precise = cms.bool( True ),
originRadius = cms.double( 0.2 ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
originHalfLength = cms.double( 15.0 ),
ptMin = cms.double( 1.0 ),
deltaPhiRegion = cms.double( 0.3 ),
measurementTrackerName = cms.string( "hltESPMeasurementTracker" ),
zVertex = cms.double( 5.0 ),
deltaEtaRegion = cms.double( 0.3 ),
rVertex = cms.double( 5.0 ),
vertexSrc = cms.string( "" ),
vertexZConstrained = cms.bool( False ),
vertexZDefault = cms.double( 0.0 ),
TrkSrc = cms.InputTag( "hltL3Muons" )
),
CollectionsPSet = cms.PSet(
recoL2MuonsCollection = cms.InputTag( "" ),
recoTrackMuonsCollection = cms.InputTag( "cosmicMuons" ),
recoMuonsCollection = cms.InputTag( "" )
),
RegionInJetsCheckPSet = cms.PSet(
recoCaloJetsCollection = cms.InputTag( "ak5CaloJets" ),
deltaRExclusionSize = cms.double( 0.3 ),
jetsPtMin = cms.double( 5.0 ),
doJetsExclusionCheck = cms.bool( True )
),
ToolsPSet = cms.PSet(
regionBase = cms.string( "seedOnCosmicMuon" ),
thePropagatorName = cms.string( "AnalyticalPropagator" )
)
),
SeedComparitorPSet = cms.PSet( ComponentName = cms.string( "none" ) ),
ClusterCheckPSet = cms.PSet(
MaxNumberOfPixelClusters = cms.uint32( 20000 ),
PixelClusterCollectionLabel = cms.InputTag( "hltSiPixelClusters" ),
MaxNumberOfCosmicClusters = cms.uint32( 50000 ),
ClusterCollectionLabel = cms.InputTag( "hltSiStripClusters" ),
doClusterCheck = cms.bool( False )
),
OrderedHitsFactoryPSet = cms.PSet(
maxElement = cms.uint32( 100000 ),
ComponentName = cms.string( "StandardHitPairGenerator" ),
SeedingLayers = cms.string( "hltESPMixedLayerPairs" ),
LayerPSet = cms.PSet(
TOB = cms.PSet( TTRHBuilder = cms.string( "WithTrackAngle" ) ),
layerList = cms.vstring( 'TOB6+TOB5',
'TOB6+TOB4',
'TOB6+TOB3',
'TOB5+TOB4',
'TOB5+TOB3',
'TOB4+TOB3',
'TEC1_neg+TOB6',
'TEC1_neg+TOB5',
'TEC1_neg+TOB4',
'TEC1_pos+TOB6',
'TEC1_pos+TOB5',
'TEC1_pos+TOB4' ),
TEC = cms.PSet(
useRingSlector = cms.bool( False ),
TTRHBuilder = cms.string( "WithTrackAngle" ),
minRing = cms.int32( 6 ),
maxRing = cms.int32( 7 )
)
)
),
SeedCreatorPSet = cms.PSet(
ComponentName = cms.string( "SeedFromConsecutiveHitsCreator" ),
SeedMomentumForBOFF = cms.double( 5.0 ),
propagator = cms.string( "PropagatorWithMaterial" ),
maxseeds = cms.int32( 10000 )
),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" )
)
process.hltRegionalCandidatesForL3MuonIsolation = cms.EDProducer( "CkfTrackCandidateMaker",
src = cms.InputTag( "hltRegionalSeedsForL3MuonIsolation" ),
maxSeedsBeforeCleaning = cms.uint32( 1000 ),
TransientInitialStateEstimatorParameters = cms.PSet(
propagatorAlongTISE = cms.string( "PropagatorWithMaterial" ),
numberMeasurementsForFit = cms.int32( 4 ),
propagatorOppositeTISE = cms.string( "PropagatorWithMaterialOpposite" )
),
TrajectoryCleaner = cms.string( "hltESPTrajectoryCleanerBySharedHits" ),
cleanTrajectoryAfterInOut = cms.bool( False ),
useHitsSplitting = cms.bool( False ),
RedundantSeedCleaner = cms.string( "CachingSeedCleanerBySharedInput" ),
doSeedingRegionRebuilding = cms.bool( False ),
maxNSeeds = cms.uint32( 100000 ),
NavigationSchool = cms.string( "SimpleNavigationSchool" ),
TrajectoryBuilder = cms.string( "hltESPCkfTrajectoryBuilder" )
)
process.hltRegionalTracksForL3MuonIsolation = cms.EDProducer( "TrackProducer",
src = cms.InputTag( "hltRegionalCandidatesForL3MuonIsolation" ),
clusterRemovalInfo = cms.InputTag( "" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Fitter = cms.string( "hltESPKFFittingSmoother" ),
useHitsSplitting = cms.bool( False ),
MeasurementTracker = cms.string( "" ),
alias = cms.untracked.string( "ctfWithMaterialTracks" ),
NavigationSchool = cms.string( "" ),
TrajectoryInEvent = cms.bool( False ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
AlgorithmName = cms.string( "undefAlgorithm" ),
Propagator = cms.string( "PropagatorWithMaterial" )
)
process.hltL3MuonCombRelIsolations = cms.EDProducer( "L3MuonCombinedRelativeIsolationProducer",
printDebug = cms.bool( False ),
CutsPSet = cms.PSet(
ConeSizes = cms.vdouble( 0.3 ),
ComponentName = cms.string( "SimpleCuts" ),
Thresholds = cms.vdouble( 0.15 ),
maxNTracks = cms.int32( -1 ),
EtaBounds = cms.vdouble( 2.411 ),
applyCutsORmaxNTracks = cms.bool( False )
),
OutputMuIsoDeposits = cms.bool( True ),
TrackPt_Min = cms.double( -1.0 ),
CaloDepositsLabel = cms.InputTag( "hltL3CaloMuonCorrectedIsolations" ),
CaloExtractorPSet = cms.PSet(
DR_Veto_H = cms.double( 0.1 ),
Vertex_Constraint_Z = cms.bool( False ),
Threshold_H = cms.double( 0.5 ),
ComponentName = cms.string( "CaloExtractor" ),
Threshold_E = cms.double( 0.2 ),
DR_Max = cms.double( 0.3 ),
DR_Veto_E = cms.double( 0.07 ),
Weight_E = cms.double( 1.0 ),
Vertex_Constraint_XY = cms.bool( False ),
DepositLabel = cms.untracked.string( "EcalPlusHcal" ),
CaloTowerCollectionLabel = cms.InputTag( "hltTowerMakerForMuons" ),
Weight_H = cms.double( 1.0 )
),
inputMuonCollection = cms.InputTag( "hltL3Muons" ),
UseRhoCorrectedCaloDeposits = cms.bool( True ),
TrkExtractorPSet = cms.PSet(
DR_VetoPt = cms.double( 0.025 ),
Diff_z = cms.double( 0.2 ),
inputTrackCollection = cms.InputTag( "hltRegionalTracksForL3MuonIsolation" ),
ReferenceRadius = cms.double( 6.0 ),
BeamSpotLabel = cms.InputTag( "hltOnlineBeamSpot" ),
ComponentName = cms.string( "PixelTrackExtractor" ),
DR_Max = cms.double( 0.3 ),
Diff_r = cms.double( 0.1 ),
PropagateTracksToRadius = cms.bool( True ),
Chi2Prob_Min = cms.double( -1.0 ),
DR_Veto = cms.double( 0.01 ),
NHits_Min = cms.uint32( 0 ),
Chi2Ndof_Max = cms.double( 1.0E64 ),
Pt_Min = cms.double( -1.0 ),
DepositLabel = cms.untracked.string( "PXLS" ),
BeamlineOption = cms.string( "BeamSpotFromEvent" ),
VetoLeadingTrack = cms.bool( True ),
PtVeto_Min = cms.double( 2.0 )
)
)
process.hltL3crIsoL1sMuOpenCandidateL1f0L2f3L3f12L3crIsoFiltered12 = cms.EDFilter( "HLTMuonIsoFilter",
saveTags = cms.bool( True ),
PreviousCandTag = cms.InputTag( "hltSingleMuOpenCandidateL3Filtered12" ),
MinN = cms.int32( 1 ),
IsolatorPSet = cms.PSet( ),
CandTag = cms.InputTag( "hltL3MuonCandidates" ),
DepTag = cms.VInputTag( 'hltL3MuonCombRelIsolations' )
)
process.hltL1sL1HTT150OrHTT175 = cms.EDFilter( "HLTLevel1GTSeed",
saveTags = cms.bool( True ),
L1SeedsLogicalExpression = cms.string( "L1_HTT150 OR L1_HTT175" ),
L1MuonCollectionTag = cms.InputTag( "hltL1extraParticles" ),
L1UseL1TriggerObjectMaps = cms.bool( True ),
L1UseAliasesForSeeding = cms.bool( True ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
L1CollectionsTag = cms.InputTag( "hltL1extraParticles" ),
L1NrBxInEvent = cms.int32( 3 ),
L1GtObjectMapTag = cms.InputTag( "hltL1GtObjectMap" ),
L1TechTriggerSeeding = cms.bool( False )
)
process.hltL1sL1HTT150OrHTT175OrHTT200 = cms.EDFilter( "HLTLevel1GTSeed",
saveTags = cms.bool( True ),
L1SeedsLogicalExpression = cms.string( "L1_HTT150 OR L1_HTT175 OR L1_HTT200" ),
L1MuonCollectionTag = cms.InputTag( "hltL1extraParticles" ),
L1UseL1TriggerObjectMaps = cms.bool( True ),
L1UseAliasesForSeeding = cms.bool( True ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
L1CollectionsTag = cms.InputTag( "hltL1extraParticles" ),
L1NrBxInEvent = cms.int32( 3 ),
L1GtObjectMapTag = cms.InputTag( "hltL1GtObjectMap" ),
L1TechTriggerSeeding = cms.bool( False )
)
process.hltPreIsoMu17eta2p1DiCentralPFNoPUJet30PFNoPUHT350PFMHT40 = cms.EDFilter( "HLTPrescaler",
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
offset = cms.uint32( 0 )
)
process.hltSingleMuOpenCenJetL1Filtered0 = cms.EDFilter( "HLTMuonL1Filter",
saveTags = cms.bool( False ),
CSCTFtag = cms.InputTag( "unused" ),
PreviousCandTag = cms.InputTag( "hltL1sL1SingleMuOpenCandidate" ),
MinPt = cms.double( 0.0 ),
MinN = cms.int32( 1 ),
MaxEta = cms.double( 2.1 ),
SelectQualities = cms.vint32( ),
CandTag = cms.InputTag( "hltL1extraParticles" ),
ExcludeSingleSegmentCSC = cms.bool( False )
)
process.hltL2SingleMuOpenCenJetL2QFiltered14 = cms.EDFilter( "HLTMuonL2PreFilter",
saveTags = cms.bool( False ),
MaxDr = cms.double( 9999.0 ),
CutOnChambers = cms.bool( False ),
PreviousCandTag = cms.InputTag( "hltSingleMuOpenCenJetL1Filtered0" ),
MinPt = cms.double( 14.0 ),
MinN = cms.int32( 1 ),
SeedMapTag = cms.InputTag( "hltL2Muons" ),
MaxEta = cms.double( 2.1 ),
MinNhits = cms.vint32( 0, 1, 0, 1 ),
MinDxySig = cms.double( -1.0 ),
MinNchambers = cms.vint32( 0 ),
AbsEtaBins = cms.vdouble( 0.9, 1.5, 2.1, 5.0 ),
MaxDz = cms.double( 9999.0 ),
CandTag = cms.InputTag( "hltL2MuonCandidates" ),
BeamSpotTag = cms.InputTag( "hltOnlineBeamSpot" ),
MinDr = cms.double( -1.0 ),
NSigmaPt = cms.double( 0.0 ),
MinNstations = cms.vint32( 0, 2, 0, 2 )
)
process.hltSingleMuOpenIsoCenJetL3withL2QPreFiltered17 = cms.EDFilter( "HLTMuonL3PreFilter",
MaxNormalizedChi2 = cms.double( 9999.0 ),
saveTags = cms.bool( False ),
PreviousCandTag = cms.InputTag( "hltL2SingleMuOpenCenJetL2QFiltered14" ),
MinNmuonHits = cms.int32( 0 ),
MinN = cms.int32( 1 ),
MinTrackPt = cms.double( 0.0 ),
MaxEta = cms.double( 2.1 ),
MaxDXYBeamSpot = cms.double( 9999.0 ),
MinNhits = cms.int32( 0 ),
MinDxySig = cms.double( -1.0 ),
NSigmaPt = cms.double( 0.0 ),
MaxDz = cms.double( 9999.0 ),
MaxPtDifference = cms.double( 9999.0 ),
MaxDr = cms.double( 2.0 ),
CandTag = cms.InputTag( "hltL3MuonCandidates" ),
MinDr = cms.double( -1.0 ),
BeamSpotTag = cms.InputTag( "hltOnlineBeamSpot" ),
MinPt = cms.double( 17.0 )
)
process.hltSingleMuOpenIsoCenJetL3crIsoRhoFiltered0p15 = cms.EDFilter( "HLTMuonIsoFilter",
saveTags = cms.bool( True ),
PreviousCandTag = cms.InputTag( "hltSingleMuOpenIsoCenJetL3withL2QPreFiltered17" ),
MinN = cms.int32( 1 ),
IsolatorPSet = cms.PSet( ),
CandTag = cms.InputTag( "hltL3MuonCandidates" ),
DepTag = cms.VInputTag( 'hltL3MuonCombRelIsolations' )
)
process.hltAK5PFJetL1FastL2L3Corrected = cms.EDProducer( "PFJetCorrectionProducer",
src = cms.InputTag( "hltAntiKT5PFJets" ),
correctors = cms.vstring( 'hltESPAK5PFL1L2L3' )
)
process.hltOnlinePrimaryVertices = cms.EDProducer( "PrimaryVertexProducer",
vertexCollections = cms.VPSet(
cms.PSet( maxDistanceToBeam = cms.double( 1.0 ),
useBeamConstraint = cms.bool( False ),
minNdof = cms.double( 0.0 ),
algorithm = cms.string( "AdaptiveVertexFitter" ),
label = cms.string( "" )
),
cms.PSet( maxDistanceToBeam = cms.double( 1.0 ),
useBeamConstraint = cms.bool( True ),
minNdof = cms.double( 2.0 ),
algorithm = cms.string( "AdaptiveVertexFitter" ),
label = cms.string( "WithBS" )
)
),
verbose = cms.untracked.bool( False ),
TkFilterParameters = cms.PSet(
maxNormalizedChi2 = cms.double( 20.0 ),
minPt = cms.double( 0.0 ),
algorithm = cms.string( "filter" ),
maxD0Significance = cms.double( 5.0 ),
trackQuality = cms.string( "any" ),
minPixelLayersWithHits = cms.int32( 2 ),
minSiliconLayersWithHits = cms.int32( 5 )
),
beamSpotLabel = cms.InputTag( "hltOnlineBeamSpot" ),
TrackLabel = cms.InputTag( "hltPFMuonMerging" ),
TkClusParameters = cms.PSet(
TkDAClusParameters = cms.PSet(
d0CutOff = cms.double( 3.0 ),
Tmin = cms.double( 4.0 ),
dzCutOff = cms.double( 4.0 ),
coolingFactor = cms.double( 0.6 ),
use_vdt = cms.untracked.bool( True ),
vertexSize = cms.double( 0.01 )
),
algorithm = cms.string( "DA" )
)
)
process.hltGoodOnlinePVs = cms.EDFilter( "PrimaryVertexObjectFilter",
src = cms.InputTag( "hltOnlinePrimaryVertices" ),
filterParams = cms.PSet(
maxZ = cms.double( 24.0 ),
minNdof = cms.double( 4.0 ),
maxRho = cms.double( 2.0 ),
pvSrc = cms.InputTag( "hltOnlinePrimaryVertices" )
)
)
process.hltPFPileUp = cms.EDProducer( "PFPileUp",
checkClosestZVertex = cms.bool( False ),
Enable = cms.bool( True ),
PFCandidates = cms.InputTag( "hltParticleFlow" ),
verbose = cms.untracked.bool( False ),
Vertices = cms.InputTag( "hltGoodOnlinePVs" )
)
process.hltPFNoPileUp = cms.EDProducer( "TPPFCandidatesOnPFCandidates",
bottomCollection = cms.InputTag( "hltParticleFlow" ),
enable = cms.bool( True ),
topCollection = cms.InputTag( "hltPFPileUp" ),
name = cms.untracked.string( "pileUpOnPFCandidates" ),
verbose = cms.untracked.bool( False )
)
process.hltAntiKT5PFJetsNoPU = cms.EDProducer( "FastjetJetProducer",
Active_Area_Repeats = cms.int32( 5 ),
doAreaFastjet = cms.bool( False ),
voronoiRfact = cms.double( -9.0 ),
maxBadHcalCells = cms.uint32( 9999999 ),
doAreaDiskApprox = cms.bool( True ),
maxRecoveredEcalCells = cms.uint32( 9999999 ),
jetType = cms.string( "PFJet" ),
minSeed = cms.uint32( 0 ),
Ghost_EtaMax = cms.double( 6.0 ),
doRhoFastjet = cms.bool( False ),
jetAlgorithm = cms.string( "AntiKt" ),
nSigmaPU = cms.double( 1.0 ),
GhostArea = cms.double( 0.01 ),
Rho_EtaMax = cms.double( 4.4 ),
maxBadEcalCells = cms.uint32( 9999999 ),
useDeterministicSeed = cms.bool( True ),
doPVCorrection = cms.bool( False ),
maxRecoveredHcalCells = cms.uint32( 9999999 ),
rParam = cms.double( 0.5 ),
maxProblematicHcalCells = cms.uint32( 9999999 ),
doOutputJets = cms.bool( True ),
src = cms.InputTag( "hltPFNoPileUp" ),
inputEtMin = cms.double( 0.0 ),
puPtMin = cms.double( 10.0 ),
srcPVs = cms.InputTag( "hltPixelVertices" ),
jetPtMin = cms.double( 0.0 ),
radiusPU = cms.double( 0.5 ),
maxProblematicEcalCells = cms.uint32( 9999999 ),
doPUOffsetCorr = cms.bool( False ),
inputEMin = cms.double( 0.0 ),
subtractorName = cms.string( "" ),
MinVtxNdof = cms.int32( 0 ),
MaxVtxZ = cms.double( 15.0 ),
UseOnlyVertexTracks = cms.bool( False ),
UseOnlyOnePV = cms.bool( False ),
DzTrVtxMax = cms.double( 0.0 ),
sumRecHits = cms.bool( False ),
DxyTrVtxMax = cms.double( 0.0 )
)
process.hltAK5PFJetL1FastL2L3CorrectedNoPU = cms.EDProducer( "PFJetCorrectionProducer",
src = cms.InputTag( "hltAntiKT5PFJetsNoPU" ),
correctors = cms.vstring( 'hltESPAK5PFNoPUL1L2L3' )
)
process.hltDiCentralPFJet30NoPU = cms.EDFilter( "HLT1PFJet",
saveTags = cms.bool( False ),
MinPt = cms.double( 30.0 ),
MinN = cms.int32( 2 ),
MaxEta = cms.double( 3.0 ),
MinMass = cms.double( -1.0 ),
inputTag = cms.InputTag( "hltAK5PFJetL1FastL2L3CorrectedNoPU" ),
MinE = cms.double( -1.0 ),
triggerType = cms.int32( 85 )
)
process.hltPFMHT = cms.EDProducer( "HLTHtMhtProducer",
tracksLabel = cms.InputTag( "hltL3Muons" ),
useTracks = cms.bool( False ),
minPtJetHt = cms.double( 40.0 ),
maxEtaJetMht = cms.double( 999.0 ),
minNJetMht = cms.int32( 0 ),
jetsLabel = cms.InputTag( "hltAK5PFJetL1FastL2L3Corrected" ),
usePt = cms.bool( True ),
maxEtaJetHt = cms.double( 3.0 ),
minPtJetMht = cms.double( 30.0 ),
excludePFMuons = cms.bool( False ),
pfCandidatesLabel = cms.InputTag( "hltParticleFlow" ),
minNJetHt = cms.int32( 0 )
)
process.hltPFHTNoPU = cms.EDProducer( "HLTHtMhtProducer",
tracksLabel = cms.InputTag( "hltL3Muons" ),
useTracks = cms.bool( False ),
minPtJetHt = cms.double( 40.0 ),
maxEtaJetMht = cms.double( 999.0 ),
minNJetMht = cms.int32( 0 ),
jetsLabel = cms.InputTag( "hltAK5PFJetL1FastL2L3CorrectedNoPU" ),
usePt = cms.bool( True ),
maxEtaJetHt = cms.double( 3.0 ),
minPtJetMht = cms.double( 0.0 ),
excludePFMuons = cms.bool( False ),
pfCandidatesLabel = cms.InputTag( "hltParticleFlow" ),
minNJetHt = cms.int32( 0 )
)
process.hltPFMHT40HT350 = cms.EDFilter( "HLTHtMhtFilter",
saveTags = cms.bool( True ),
mhtLabels = cms.VInputTag( 'hltPFMHT' ),
meffSlope = cms.vdouble( 1.0 ),
minMeff = cms.vdouble( 0.0 ),
minMht = cms.vdouble( 40.0 ),
htLabels = cms.VInputTag( 'hltPFHTNoPU' ),
minHt = cms.vdouble( 350.0 )
)
process.hltL1sL1Mu0HTT100 = cms.EDFilter( "HLTLevel1GTSeed",
saveTags = cms.bool( True ),
L1SeedsLogicalExpression = cms.string( "L1_Mu0_HTT100" ),
L1MuonCollectionTag = cms.InputTag( "hltL1extraParticles" ),
L1UseL1TriggerObjectMaps = cms.bool( True ),
L1UseAliasesForSeeding = cms.bool( True ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
L1CollectionsTag = cms.InputTag( "hltL1extraParticles" ),
L1NrBxInEvent = cms.int32( 3 ),
L1GtObjectMapTag = cms.InputTag( "hltL1GtObjectMap" ),
L1TechTriggerSeeding = cms.bool( False )
)
process.hltPreDoubleRelIso1p0Mu5Mass8PFNoPUHT175 = cms.EDFilter( "HLTPrescaler",
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
offset = cms.uint32( 0 )
)
process.hltIgnoredL1SingleMuOpenL1DiMuFiltered0 = cms.EDFilter( "HLTMuonL1Filter",
saveTags = cms.bool( False ),
CSCTFtag = cms.InputTag( "unused" ),
PreviousCandTag = cms.InputTag( "hltL1sL1SingleMuOpenCandidate" ),
MinPt = cms.double( 0.0 ),
MinN = cms.int32( 2 ),
MaxEta = cms.double( 2.5 ),
SelectQualities = cms.vint32( ),
CandTag = cms.InputTag( "hltL1extraParticles" ),
ExcludeSingleSegmentCSC = cms.bool( False )
)
process.hltIgnoredL1SingleMuOpenL2DiMuFiltered0 = cms.EDFilter( "HLTMuonL2PreFilter",
saveTags = cms.bool( True ),
MaxDr = cms.double( 9999.0 ),
CutOnChambers = cms.bool( False ),
PreviousCandTag = cms.InputTag( "hltIgnoredL1SingleMuOpenL1DiMuFiltered0" ),
MinPt = cms.double( 0.0 ),
MinN = cms.int32( 2 ),
SeedMapTag = cms.InputTag( "hltL2Muons" ),
MaxEta = cms.double( 2.5 ),
MinNhits = cms.vint32( 0 ),
MinDxySig = cms.double( -1.0 ),
MinNchambers = cms.vint32( 0 ),
AbsEtaBins = cms.vdouble( 5.0 ),
MaxDz = cms.double( 9999.0 ),
CandTag = cms.InputTag( "hltL2MuonCandidates" ),
BeamSpotTag = cms.InputTag( "hltOnlineBeamSpot" ),
MinDr = cms.double( -1.0 ),
NSigmaPt = cms.double( 0.0 ),
MinNstations = cms.vint32( 0 )
)
process.hltIgnoredL1SingleMuOpenDiMu5Mass8L3Filtered5 = cms.EDFilter( "HLTMuonDimuonL3Filter",
saveTags = cms.bool( True ),
ChargeOpt = cms.int32( 0 ),
MaxPtMin = cms.vdouble( 1.0E125 ),
FastAccept = cms.bool( False ),
CandTag = cms.InputTag( "hltL3MuonCandidates" ),
PreviousCandTag = cms.InputTag( "hltIgnoredL1SingleMuOpenL2DiMuFiltered0" ),
MaxPtBalance = cms.double( 999999.0 ),
MaxPtPair = cms.vdouble( 1.0E125 ),
MaxAcop = cms.double( 999.0 ),
MinPtMin = cms.vdouble( 5.0 ),
MaxInvMass = cms.vdouble( 9999.0 ),
MinPtMax = cms.vdouble( 0.0 ),
BeamSpotTag = cms.InputTag( "hltOnlineBeamSpot" ),
MaxDz = cms.double( 9999.0 ),
MinPtPair = cms.vdouble( 0.0 ),
MaxDr = cms.double( 2.0 ),
MinAcop = cms.double( -999.0 ),
MaxDCAMuMu = cms.double( 9999999.0 ),
MinNhits = cms.int32( 0 ),
NSigmaPt = cms.double( 0.0 ),
MinPtBalance = cms.double( -1.0 ),
MaxEta = cms.double( 2.5 ),
MaxRapidityPair = cms.double( 999999.0 ),
CutCowboys = cms.bool( False ),
MinInvMass = cms.vdouble( 8.0 )
)
process.hltL3MuonCombRelIsolationsIso1p0 = cms.EDProducer( "L3MuonCombinedRelativeIsolationProducer",
printDebug = cms.bool( False ),
CutsPSet = cms.PSet(
ConeSizes = cms.vdouble( 0.3 ),
ComponentName = cms.string( "SimpleCuts" ),
Thresholds = cms.vdouble( 1.0 ),
maxNTracks = cms.int32( -1 ),
EtaBounds = cms.vdouble( 2.5 ),
applyCutsORmaxNTracks = cms.bool( False )
),
OutputMuIsoDeposits = cms.bool( True ),
TrackPt_Min = cms.double( -1.0 ),
CaloDepositsLabel = cms.InputTag( "hltL3CaloMuonCorrectedIsolations" ),
CaloExtractorPSet = cms.PSet(
DR_Veto_H = cms.double( 0.1 ),
Vertex_Constraint_Z = cms.bool( False ),
Threshold_H = cms.double( 0.5 ),
ComponentName = cms.string( "CaloExtractor" ),
Threshold_E = cms.double( 0.2 ),
DR_Max = cms.double( 0.3 ),
DR_Veto_E = cms.double( 0.07 ),
Weight_E = cms.double( 1.5 ),
Vertex_Constraint_XY = cms.bool( False ),
DepositLabel = cms.untracked.string( "EcalPlusHcal" ),
CaloTowerCollectionLabel = cms.InputTag( "hltTowerMakerForMuons" ),
Weight_H = cms.double( 1.0 )
),
inputMuonCollection = cms.InputTag( "hltL3Muons" ),
UseRhoCorrectedCaloDeposits = cms.bool( False ),
TrkExtractorPSet = cms.PSet(
DR_VetoPt = cms.double( 0.025 ),
Diff_z = cms.double( 0.2 ),
inputTrackCollection = cms.InputTag( "hltRegionalTracksForL3MuonIsolation" ),
ReferenceRadius = cms.double( 6.0 ),
BeamSpotLabel = cms.InputTag( "hltOnlineBeamSpot" ),
ComponentName = cms.string( "PixelTrackExtractor" ),
DR_Max = cms.double( 0.3 ),
Diff_r = cms.double( 0.1 ),
PropagateTracksToRadius = cms.bool( True ),
Chi2Prob_Min = cms.double( -1.0 ),
DR_Veto = cms.double( 0.01 ),
NHits_Min = cms.uint32( 0 ),
Chi2Ndof_Max = cms.double( 1.0E64 ),
Pt_Min = cms.double( -1.0 ),
DepositLabel = cms.untracked.string( "PXLS" ),
BeamlineOption = cms.string( "BeamSpotFromEvent" ),
VetoLeadingTrack = cms.bool( True ),
PtVeto_Min = cms.double( 2.0 )
)
)
process.hltL3doublereliso1p0mufilter5 = cms.EDFilter( "HLTMuonIsoFilter",
saveTags = cms.bool( True ),
PreviousCandTag = cms.InputTag( "hltIgnoredL1SingleMuOpenDiMu5Mass8L3Filtered5" ),
MinN = cms.int32( 2 ),
IsolatorPSet = cms.PSet( ),
CandTag = cms.InputTag( "hltL3MuonCandidates" ),
DepTag = cms.VInputTag( 'hltL3MuonCombRelIsolationsIso1p0' )
)
process.hltHtMht = cms.EDProducer( "HLTHtMhtProducer",
tracksLabel = cms.InputTag( "" ),
useTracks = cms.bool( False ),
minPtJetHt = cms.double( 40.0 ),
maxEtaJetMht = cms.double( 5.0 ),
minNJetMht = cms.int32( 0 ),
jetsLabel = cms.InputTag( "hltCaloJetL1FastJetCorrected" ),
usePt = cms.bool( False ),
maxEtaJetHt = cms.double( 3.0 ),
minPtJetMht = cms.double( 30.0 ),
excludePFMuons = cms.bool( False ),
pfCandidatesLabel = cms.InputTag( "" ),
minNJetHt = cms.int32( 0 )
)
process.hltHt100 = cms.EDFilter( "HLTHtMhtFilter",
saveTags = cms.bool( False ),
mhtLabels = cms.VInputTag( 'hltHtMht' ),
meffSlope = cms.vdouble( 1.0 ),
minMeff = cms.vdouble( 0.0 ),
minMht = cms.vdouble( 0.0 ),
htLabels = cms.VInputTag( 'hltHtMht' ),
minHt = cms.vdouble( 100.0 )
)
process.hltPFHT175NoPU = cms.EDFilter( "HLTHtMhtFilter",
saveTags = cms.bool( True ),
mhtLabels = cms.VInputTag( 'hltPFHTNoPU' ),
meffSlope = cms.vdouble( 1.0 ),
minMeff = cms.vdouble( 0.0 ),
minMht = cms.vdouble( 0.0 ),
htLabels = cms.VInputTag( 'hltPFHTNoPU' ),
minHt = cms.vdouble( 175.0 )
)
process.hltL1sL1Mu4HTT125 = cms.EDFilter( "HLTLevel1GTSeed",
saveTags = cms.bool( True ),
L1SeedsLogicalExpression = cms.string( "L1_Mu4_HTT125" ),
L1MuonCollectionTag = cms.InputTag( "hltL1extraParticles" ),
L1UseL1TriggerObjectMaps = cms.bool( True ),
L1UseAliasesForSeeding = cms.bool( True ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
L1CollectionsTag = cms.InputTag( "hltL1extraParticles" ),
L1NrBxInEvent = cms.int32( 3 ),
L1GtObjectMapTag = cms.InputTag( "hltL1GtObjectMap" ),
L1TechTriggerSeeding = cms.bool( False )
)
process.hltPreDoubleRelIso1p0Mu5Mass8PFNoPUHT225 = cms.EDFilter( "HLTPrescaler",
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
offset = cms.uint32( 0 )
)
process.hltHt125 = cms.EDFilter( "HLTHtMhtFilter",
saveTags = cms.bool( False ),
mhtLabels = cms.VInputTag( 'hltHtMht' ),
meffSlope = cms.vdouble( 1.0 ),
minMeff = cms.vdouble( 0.0 ),
minMht = cms.vdouble( 0.0 ),
htLabels = cms.VInputTag( 'hltHtMht' ),
minHt = cms.vdouble( 125.0 )
)
process.hltPFHT225NoPU = cms.EDFilter( "HLTHtMhtFilter",
saveTags = cms.bool( True ),
mhtLabels = cms.VInputTag( 'hltPFHTNoPU' ),
meffSlope = cms.vdouble( 1.0 ),
minMeff = cms.vdouble( 0.0 ),
minMht = cms.vdouble( 0.0 ),
htLabels = cms.VInputTag( 'hltPFHTNoPU' ),
minHt = cms.vdouble( 225.0 )
)
process.hltL1sL1Mu0HTT100ORL1Mu4HTT125 = cms.EDFilter( "HLTLevel1GTSeed",
saveTags = cms.bool( True ),
L1SeedsLogicalExpression = cms.string( "L1_Mu0_HTT100 OR L1_Mu4_HTT125" ),
L1MuonCollectionTag = cms.InputTag( "hltL1extraParticles" ),
L1UseL1TriggerObjectMaps = cms.bool( True ),
L1UseAliasesForSeeding = cms.bool( True ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
L1CollectionsTag = cms.InputTag( "hltL1extraParticles" ),
L1NrBxInEvent = cms.int32( 3 ),
L1GtObjectMapTag = cms.InputTag( "hltL1GtObjectMap" ),
L1TechTriggerSeeding = cms.bool( False )
)
process.hltPreDoubleMu8Mass8PFNoPUHT175 = cms.EDFilter( "HLTPrescaler",
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
offset = cms.uint32( 0 )
)
process.hltIgnoredL1SingleMuOpenDiMu8Mass8L3Filtered = cms.EDFilter( "HLTMuonDimuonL3Filter",
saveTags = cms.bool( True ),
ChargeOpt = cms.int32( 0 ),
MaxPtMin = cms.vdouble( 1.0E125 ),
FastAccept = cms.bool( False ),
CandTag = cms.InputTag( "hltL3MuonCandidates" ),
PreviousCandTag = cms.InputTag( "hltIgnoredL1SingleMuOpenL2DiMuFiltered0" ),
MaxPtBalance = cms.double( 999999.0 ),
MaxPtPair = cms.vdouble( 1.0E125 ),
MaxAcop = cms.double( 999.0 ),
MinPtMin = cms.vdouble( 8.0 ),
MaxInvMass = cms.vdouble( 9999.0 ),
MinPtMax = cms.vdouble( 0.0 ),
BeamSpotTag = cms.InputTag( "hltOnlineBeamSpot" ),
MaxDz = cms.double( 9999.0 ),
MinPtPair = cms.vdouble( 0.0 ),
MaxDr = cms.double( 2.0 ),
MinAcop = cms.double( -999.0 ),
MaxDCAMuMu = cms.double( 9999999.0 ),
MinNhits = cms.int32( 0 ),
NSigmaPt = cms.double( 0.0 ),
MinPtBalance = cms.double( -1.0 ),
MaxEta = cms.double( 2.5 ),
MaxRapidityPair = cms.double( 999999.0 ),
CutCowboys = cms.bool( False ),
MinInvMass = cms.vdouble( 8.0 )
)
process.hltPreDoubleMu8Mass8PFNoPUHT225 = cms.EDFilter( "HLTPrescaler",
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
offset = cms.uint32( 0 )
)
process.hltHt150 = cms.EDFilter( "HLTHtMhtFilter",
saveTags = cms.bool( False ),
mhtLabels = cms.VInputTag( 'hltHtMht' ),
meffSlope = cms.vdouble( 1.0 ),
minMeff = cms.vdouble( 0.0 ),
minMht = cms.vdouble( 0.0 ),
htLabels = cms.VInputTag( 'hltHtMht' ),
minHt = cms.vdouble( 150.0 )
)
process.hltPreRelIso1p0Mu5Ele8CaloIdTTrkIdVLMass8PFNoPUHT175 = cms.EDFilter( "HLTPrescaler",
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
offset = cms.uint32( 0 )
)
process.hltL1Mu0HTT100L1MuFiltered0 = cms.EDFilter( "HLTMuonL1Filter",
saveTags = cms.bool( False ),
CSCTFtag = cms.InputTag( "unused" ),
PreviousCandTag = cms.InputTag( "hltL1sL1Mu0HTT100" ),
MinPt = cms.double( 0.0 ),
MinN = cms.int32( 1 ),
MaxEta = cms.double( 2.5 ),
SelectQualities = cms.vint32( ),
CandTag = cms.InputTag( "hltL1extraParticles" ),
ExcludeSingleSegmentCSC = cms.bool( False )
)
process.hltL1Mu0HTT100L2Filtered0 = cms.EDFilter( "HLTMuonL2PreFilter",
saveTags = cms.bool( True ),
MaxDr = cms.double( 9999.0 ),
CutOnChambers = cms.bool( False ),
PreviousCandTag = cms.InputTag( "hltL1Mu0HTT100L1MuFiltered0" ),
MinPt = cms.double( 0.0 ),
MinN = cms.int32( 1 ),
SeedMapTag = cms.InputTag( "hltL2Muons" ),
MaxEta = cms.double( 2.5 ),
MinNhits = cms.vint32( 0 ),
MinDxySig = cms.double( -1.0 ),
MinNchambers = cms.vint32( 0 ),
AbsEtaBins = cms.vdouble( 5.0 ),
MaxDz = cms.double( 9999.0 ),
CandTag = cms.InputTag( "hltL2MuonCandidates" ),
BeamSpotTag = cms.InputTag( "hltOnlineBeamSpot" ),
MinDr = cms.double( -1.0 ),
NSigmaPt = cms.double( 0.0 ),
MinNstations = cms.vint32( 0 )
)
process.hltL1Mu0HTT100L3Filtered5 = cms.EDFilter( "HLTMuonL3PreFilter",
MaxNormalizedChi2 = cms.double( 9999.0 ),
saveTags = cms.bool( True ),
PreviousCandTag = cms.InputTag( "hltL1Mu0HTT100L2Filtered0" ),
MinNmuonHits = cms.int32( 0 ),
MinN = cms.int32( 1 ),
MinTrackPt = cms.double( 0.0 ),
MaxEta = cms.double( 2.5 ),
MaxDXYBeamSpot = cms.double( 9999.0 ),
MinNhits = cms.int32( 0 ),
MinDxySig = cms.double( -1.0 ),
NSigmaPt = cms.double( 0.0 ),
MaxDz = cms.double( 9999.0 ),
MaxPtDifference = cms.double( 9999.0 ),
MaxDr = cms.double( 2.0 ),
CandTag = cms.InputTag( "hltL3MuonCandidates" ),
MinDr = cms.double( -1.0 ),
BeamSpotTag = cms.InputTag( "hltOnlineBeamSpot" ),
MinPt = cms.double( 5.0 )
)
process.hltHybridSuperClustersActivity = cms.EDProducer( "HybridClusterProducer",
eThreshA = cms.double( 0.0030 ),
basicclusterCollection = cms.string( "hybridBarrelBasicClusters" ),
clustershapecollection = cms.string( "" ),
ethresh = cms.double( 0.1 ),
ewing = cms.double( 0.0 ),
RecHitSeverityToBeExcluded = cms.vstring( 'kWeird' ),
posCalcParameters = cms.PSet(
T0_barl = cms.double( 7.4 ),
LogWeighted = cms.bool( True ),
T0_endc = cms.double( 3.1 ),
T0_endcPresh = cms.double( 1.2 ),
W0 = cms.double( 4.2 ),
X0 = cms.double( 0.89 )
),
HybridBarrelSeedThr = cms.double( 1.0 ),
dynamicPhiRoad = cms.bool( False ),
shapeAssociation = cms.string( "hybridShapeAssoc" ),
RecHitFlagToBeExcluded = cms.vstring( ),
useEtForXi = cms.bool( True ),
step = cms.int32( 17 ),
eThreshB = cms.double( 0.1 ),
xi = cms.double( 0.0 ),
eseed = cms.double( 0.35 ),
ecalhitproducer = cms.string( "hltEcalRecHitAll" ),
dynamicEThresh = cms.bool( False ),
ecalhitcollection = cms.string( "EcalRecHitsEB" ),
excludeFlagged = cms.bool( True ),
superclusterCollection = cms.string( "" )
)
process.hltCorrectedHybridSuperClustersActivity = cms.EDProducer( "EgammaSCCorrectionMaker",
corectedSuperClusterCollection = cms.string( "" ),
sigmaElectronicNoise = cms.double( 0.15 ),
superClusterAlgo = cms.string( "Hybrid" ),
etThresh = cms.double( 5.0 ),
rawSuperClusterProducer = cms.InputTag( "hltHybridSuperClustersActivity" ),
applyEnergyCorrection = cms.bool( True ),
isl_fCorrPset = cms.PSet( ),
VerbosityLevel = cms.string( "ERROR" ),
recHitProducer = cms.InputTag( 'hltEcalRecHitAll','EcalRecHitsEB' ),
fix_fCorrPset = cms.PSet( ),
modeEE = cms.int32( 0 ),
modeEB = cms.int32( 0 ),
dyn_fCorrPset = cms.PSet( ),
energyCorrectorName = cms.string( "EcalClusterEnergyCorrectionObjectSpecific" ),
applyCrackCorrection = cms.bool( False ),
hyb_fCorrPset = cms.PSet(
brLinearLowThr = cms.double( 1.1 ),
fEtEtaVec = cms.vdouble( 0.0, 1.00121, -0.63672, 0.0, 0.0, 0.0, 0.5655, 6.457, 0.5081, 8.0, 1.023, -0.00181 ),
brLinearHighThr = cms.double( 8.0 ),
fBremVec = cms.vdouble( -0.04382, 0.1169, 0.9267, -9.413E-4, 1.419 )
)
)
process.hltMulti5x5BasicClustersActivity = cms.EDProducer( "Multi5x5ClusterProducer",
endcapHitCollection = cms.string( "EcalRecHitsEE" ),
barrelClusterCollection = cms.string( "multi5x5BarrelBasicClusters" ),
IslandEndcapSeedThr = cms.double( 0.18 ),
doEndcap = cms.bool( True ),
posCalcParameters = cms.PSet(
T0_barl = cms.double( 7.4 ),
LogWeighted = cms.bool( True ),
T0_endc = cms.double( 3.1 ),
T0_endcPresh = cms.double( 1.2 ),
W0 = cms.double( 4.2 ),
X0 = cms.double( 0.89 )
),
barrelShapeAssociation = cms.string( "multi5x5BarrelShapeAssoc" ),
endcapShapeAssociation = cms.string( "multi5x5EndcapShapeAssoc" ),
endcapHitProducer = cms.string( "hltEcalRecHitAll" ),
clustershapecollectionEB = cms.string( "multi5x5BarrelShape" ),
IslandBarrelSeedThr = cms.double( 0.5 ),
barrelHitProducer = cms.string( "hltEcalRecHitAll" ),
RecHitFlagToBeExcluded = cms.vstring( ),
barrelHitCollection = cms.string( "EcalRecHitsEB" ),
clustershapecollectionEE = cms.string( "multi5x5EndcapShape" ),
endcapClusterCollection = cms.string( "multi5x5EndcapBasicClusters" ),
doBarrel = cms.bool( False )
)
process.hltMulti5x5SuperClustersActivity = cms.EDProducer( "Multi5x5SuperClusterProducer",
barrelSuperclusterCollection = cms.string( "multi5x5BarrelSuperClusters" ),
endcapEtaSearchRoad = cms.double( 0.14 ),
barrelClusterCollection = cms.string( "multi5x5BarrelBasicClusters" ),
dynamicPhiRoad = cms.bool( False ),
endcapClusterProducer = cms.string( "hltMulti5x5BasicClustersActivity" ),
barrelPhiSearchRoad = cms.double( 0.8 ),
endcapPhiSearchRoad = cms.double( 0.6 ),
barrelClusterProducer = cms.string( "hltMulti5x5BasicClustersActivity" ),
seedTransverseEnergyThreshold = cms.double( 1.0 ),
endcapSuperclusterCollection = cms.string( "multi5x5EndcapSuperClusters" ),
barrelEtaSearchRoad = cms.double( 0.06 ),
bremRecoveryPset = cms.PSet(
barrel = cms.PSet(
cryVec = cms.vint32( 16, 13, 11, 10, 9, 8, 7, 6, 5, 4, 3 ),
cryMin = cms.int32( 2 ),
etVec = cms.vdouble( 5.0, 10.0, 15.0, 20.0, 30.0, 40.0, 45.0, 55.0, 135.0, 195.0, 225.0 )
),
endcap = cms.PSet(
a = cms.double( 47.85 ),
c = cms.double( 0.1201 ),
b = cms.double( 108.8 )
)
),
doEndcaps = cms.bool( True ),
endcapClusterCollection = cms.string( "multi5x5EndcapBasicClusters" ),
doBarrel = cms.bool( False )
)
process.hltMulti5x5SuperClustersWithPreshowerActivity = cms.EDProducer( "PreshowerClusterProducer",
assocSClusterCollection = cms.string( "" ),
preshStripEnergyCut = cms.double( 0.0 ),
preshClusterCollectionY = cms.string( "preshowerYClusters" ),
preshClusterCollectionX = cms.string( "preshowerXClusters" ),
etThresh = cms.double( 0.0 ),
preshRecHitProducer = cms.InputTag( 'hltESRecHitAll','EcalRecHitsES' ),
endcapSClusterProducer = cms.InputTag( 'hltMulti5x5SuperClustersActivity','multi5x5EndcapSuperClusters' ),
preshNclust = cms.int32( 4 ),
preshClusterEnergyCut = cms.double( 0.0 ),
preshSeededNstrip = cms.int32( 15 )
)
process.hltCorrectedMulti5x5SuperClustersWithPreshowerActivity = cms.EDProducer( "EgammaSCCorrectionMaker",
corectedSuperClusterCollection = cms.string( "" ),
sigmaElectronicNoise = cms.double( 0.15 ),
superClusterAlgo = cms.string( "Multi5x5" ),
etThresh = cms.double( 5.0 ),
rawSuperClusterProducer = cms.InputTag( "hltMulti5x5SuperClustersWithPreshowerActivity" ),
applyEnergyCorrection = cms.bool( True ),
isl_fCorrPset = cms.PSet( ),
VerbosityLevel = cms.string( "ERROR" ),
recHitProducer = cms.InputTag( 'hltEcalRecHitAll','EcalRecHitsEE' ),
fix_fCorrPset = cms.PSet(
brLinearLowThr = cms.double( 0.9 ),
fEtEtaVec = cms.vdouble( 1.0, -0.4386, -32.38, 0.6372, 15.67, -0.0928, -2.462, 1.138, 20.93 ),
brLinearHighThr = cms.double( 6.0 ),
fBremVec = cms.vdouble( -0.05228, 0.08738, 0.9508, 0.002677, 1.221 )
),
modeEE = cms.int32( 0 ),
modeEB = cms.int32( 0 ),
dyn_fCorrPset = cms.PSet( ),
energyCorrectorName = cms.string( "EcalClusterEnergyCorrectionObjectSpecific" ),
applyCrackCorrection = cms.bool( False ),
hyb_fCorrPset = cms.PSet( )
)
process.hltRecoEcalSuperClusterActivityCandidate = cms.EDProducer( "EgammaHLTRecoEcalCandidateProducers",
scIslandEndcapProducer = cms.InputTag( "hltCorrectedMulti5x5SuperClustersWithPreshowerActivity" ),
scHybridBarrelProducer = cms.InputTag( "hltCorrectedHybridSuperClustersActivity" ),
recoEcalCandidateCollection = cms.string( "" )
)
process.hltEcalActivitySuperClusterWrapper = cms.EDFilter( "HLTEgammaTriggerFilterObjectWrapper",
saveTags = cms.bool( False ),
doIsolated = cms.bool( True ),
candIsolatedTag = cms.InputTag( "hltRecoEcalSuperClusterActivityCandidate" ),
candNonIsolatedTag = cms.InputTag( "" )
)
process.hltSingleEle8NoCandEtFilter = cms.EDFilter( "HLTEgammaEtFilter",
saveTags = cms.bool( False ),
L1NonIsoCand = cms.InputTag( "" ),
relaxed = cms.untracked.bool( False ),
L1IsoCand = cms.InputTag( "hltRecoEcalSuperClusterActivityCandidate" ),
inputTag = cms.InputTag( "hltEcalActivitySuperClusterWrapper" ),
etcutEB = cms.double( 8.0 ),
ncandcut = cms.int32( 1 ),
etcutEE = cms.double( 8.0 )
)
process.hltActivityPhotonClusterShape = cms.EDProducer( "EgammaHLTClusterShapeProducer",
recoEcalCandidateProducer = cms.InputTag( "hltRecoEcalSuperClusterActivityCandidate" ),
ecalRechitEB = cms.InputTag( 'hltEcalRecHitAll','EcalRecHitsEB' ),
ecalRechitEE = cms.InputTag( 'hltEcalRecHitAll','EcalRecHitsEE' ),
isIeta = cms.bool( True )
)
process.hltSingleEle8CaloIdTNoCandClusterShapeFilter = cms.EDFilter( "HLTEgammaGenericFilter",
doIsolated = cms.bool( True ),
nonIsoTag = cms.InputTag( "" ),
L1NonIsoCand = cms.InputTag( "" ),
saveTags = cms.bool( False ),
thrOverE2EB = cms.double( -1.0 ),
thrRegularEE = cms.double( 0.031 ),
thrOverEEE = cms.double( -1.0 ),
L1IsoCand = cms.InputTag( "hltRecoEcalSuperClusterActivityCandidate" ),
thrOverEEB = cms.double( -1.0 ),
thrRegularEB = cms.double( 0.011 ),
lessThan = cms.bool( True ),
useEt = cms.bool( False ),
ncandcut = cms.int32( 1 ),
isoTag = cms.InputTag( "hltActivityPhotonClusterShape" ),
candTag = cms.InputTag( "hltSingleEle8NoCandEtFilter" ),
thrOverE2EE = cms.double( -1.0 )
)
process.hltActivityPhotonHcalForHE = cms.EDProducer( "EgammaHLTHcalIsolationProducersRegional",
eMinHE = cms.double( 0.8 ),
hbheRecHitProducer = cms.InputTag( "hltHbhereco" ),
effectiveAreaBarrel = cms.double( 0.105 ),
outerCone = cms.double( 0.14 ),
eMinHB = cms.double( 0.7 ),
innerCone = cms.double( 0.0 ),
etMinHE = cms.double( -1.0 ),
etMinHB = cms.double( -1.0 ),
rhoProducer = cms.InputTag( 'hltKT6CaloJets','rho' ),
depth = cms.int32( -1 ),
doRhoCorrection = cms.bool( False ),
effectiveAreaEndcap = cms.double( 0.17 ),
recoEcalCandidateProducer = cms.InputTag( "hltRecoEcalSuperClusterActivityCandidate" ),
rhoMax = cms.double( 9.9999999E7 ),
rhoScale = cms.double( 1.0 ),
doEtSum = cms.bool( False )
)
process.hltSingleEle8CaloIdTNoCandHEFilter = cms.EDFilter( "HLTEgammaGenericFilter",
doIsolated = cms.bool( True ),
nonIsoTag = cms.InputTag( "" ),
L1NonIsoCand = cms.InputTag( "" ),
saveTags = cms.bool( False ),
thrOverE2EB = cms.double( -1.0 ),
thrRegularEE = cms.double( -1.0 ),
thrOverEEE = cms.double( 0.075 ),
L1IsoCand = cms.InputTag( "hltRecoEcalSuperClusterActivityCandidate" ),
thrOverEEB = cms.double( 0.1 ),
thrRegularEB = cms.double( -1.0 ),
lessThan = cms.bool( True ),
useEt = cms.bool( False ),
ncandcut = cms.int32( 1 ),
isoTag = cms.InputTag( "hltActivityPhotonHcalForHE" ),
candTag = cms.InputTag( "hltSingleEle8CaloIdTNoCandClusterShapeFilter" ),
thrOverE2EE = cms.double( -1.0 )
)
process.hltActivityStartUpElectronPixelSeeds = cms.EDProducer( "ElectronSeedProducer",
endcapSuperClusters = cms.InputTag( "hltCorrectedMulti5x5SuperClustersWithPreshowerActivity" ),
SeedConfiguration = cms.PSet(
searchInTIDTEC = cms.bool( True ),
HighPtThreshold = cms.double( 35.0 ),
r2MinF = cms.double( -0.15 ),
OrderedHitsFactoryPSet = cms.PSet(
maxElement = cms.uint32( 0 ),
ComponentName = cms.string( "StandardHitPairGenerator" ),
SeedingLayers = cms.string( "hltESPMixedLayerPairs" ),
useOnDemandTracker = cms.untracked.int32( 0 )
),
DeltaPhi1Low = cms.double( 0.23 ),
DeltaPhi1High = cms.double( 0.08 ),
ePhiMin1 = cms.double( -0.08 ),
PhiMin2 = cms.double( -0.0040 ),
LowPtThreshold = cms.double( 3.0 ),
RegionPSet = cms.PSet(
deltaPhiRegion = cms.double( 0.4 ),
originHalfLength = cms.double( 15.0 ),
useZInVertex = cms.bool( True ),
deltaEtaRegion = cms.double( 0.1 ),
ptMin = cms.double( 1.5 ),
originRadius = cms.double( 0.2 ),
VertexProducer = cms.InputTag( "dummyVertices" )
),
maxHOverE = cms.double( 999999.0 ),
dynamicPhiRoad = cms.bool( False ),
ePhiMax1 = cms.double( 0.04 ),
DeltaPhi2 = cms.double( 0.0040 ),
measurementTrackerName = cms.string( "hltESPMeasurementTracker" ),
SizeWindowENeg = cms.double( 0.675 ),
nSigmasDeltaZ1 = cms.double( 5.0 ),
rMaxI = cms.double( 0.2 ),
rMinI = cms.double( -0.2 ),
preFilteredSeeds = cms.bool( True ),
r2MaxF = cms.double( 0.15 ),
pPhiMin1 = cms.double( -0.04 ),
initialSeeds = cms.InputTag( "noSeedsHere" ),
pPhiMax1 = cms.double( 0.08 ),
hbheModule = cms.string( "hbhereco" ),
SCEtCut = cms.double( 3.0 ),
z2MaxB = cms.double( 0.09 ),
fromTrackerSeeds = cms.bool( True ),
hcalRecHits = cms.InputTag( "hltHbhereco" ),
z2MinB = cms.double( -0.09 ),
hbheInstance = cms.string( "" ),
PhiMax2 = cms.double( 0.0040 ),
hOverEConeSize = cms.double( 0.0 ),
hOverEHBMinE = cms.double( 999999.0 ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
applyHOverECut = cms.bool( False ),
hOverEHFMinE = cms.double( 999999.0 )
),
barrelSuperClusters = cms.InputTag( "hltCorrectedHybridSuperClustersActivity" )
)
process.hltSingleEle8CaloIdTNoCandPixelMatchFilter = cms.EDFilter( "HLTElectronPixelMatchFilter",
saveTags = cms.bool( False ),
doIsolated = cms.bool( True ),
L1NonIsoCand = cms.InputTag( "" ),
L1NonIsoPixelSeedsTag = cms.InputTag( "" ),
L1IsoCand = cms.InputTag( "hltRecoEcalSuperClusterActivityCandidate" ),
npixelmatchcut = cms.double( 1.0 ),
ncandcut = cms.int32( 1 ),
candTag = cms.InputTag( "hltSingleEle8CaloIdTNoCandHEFilter" ),
L1IsoPixelSeedsTag = cms.InputTag( "hltActivityStartUpElectronPixelSeeds" )
)
process.hltCkfActivityTrackCandidates = cms.EDProducer( "CkfTrackCandidateMaker",
src = cms.InputTag( "hltActivityStartUpElectronPixelSeeds" ),
maxSeedsBeforeCleaning = cms.uint32( 1000 ),
TransientInitialStateEstimatorParameters = cms.PSet(
propagatorAlongTISE = cms.string( "PropagatorWithMaterial" ),
numberMeasurementsForFit = cms.int32( 4 ),
propagatorOppositeTISE = cms.string( "PropagatorWithMaterialOpposite" )
),
TrajectoryCleaner = cms.string( "hltESPTrajectoryCleanerBySharedHits" ),
cleanTrajectoryAfterInOut = cms.bool( False ),
useHitsSplitting = cms.bool( False ),
RedundantSeedCleaner = cms.string( "CachingSeedCleanerBySharedInput" ),
doSeedingRegionRebuilding = cms.bool( False ),
maxNSeeds = cms.uint32( 100000 ),
NavigationSchool = cms.string( "SimpleNavigationSchool" ),
TrajectoryBuilder = cms.string( "hltESPCkfTrajectoryBuilder" )
)
process.hltCtfActivityWithMaterialTracks = cms.EDProducer( "TrackProducer",
src = cms.InputTag( "hltCkfActivityTrackCandidates" ),
clusterRemovalInfo = cms.InputTag( "" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Fitter = cms.string( "hltESPKFFittingSmoother" ),
useHitsSplitting = cms.bool( False ),
MeasurementTracker = cms.string( "" ),
alias = cms.untracked.string( "ctfWithMaterialTracks" ),
NavigationSchool = cms.string( "" ),
TrajectoryInEvent = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
AlgorithmName = cms.string( "undefAlgorithm" ),
Propagator = cms.string( "PropagatorWithMaterial" )
)
process.hltPixelMatchElectronsActivity = cms.EDProducer( "EgammaHLTPixelMatchElectronProducers",
BSProducer = cms.InputTag( "hltOnlineBeamSpot" ),
UseGsfTracks = cms.bool( False ),
TrackProducer = cms.InputTag( "hltCtfActivityWithMaterialTracks" ),
GsfTrackProducer = cms.InputTag( "" )
)
process.hltSingleElectronEt8CaloIdTTrkIdVLNoCandOneOEMinusOneOPFilter = cms.EDFilter( "HLTElectronOneOEMinusOneOPFilterRegional",
saveTags = cms.bool( False ),
doIsolated = cms.bool( True ),
electronNonIsolatedProducer = cms.InputTag( "" ),
barrelcut = cms.double( 0.03 ),
electronIsolatedProducer = cms.InputTag( "hltPixelMatchElectronsActivity" ),
ncandcut = cms.int32( 1 ),
candTag = cms.InputTag( "hltSingleEle8CaloIdTNoCandPixelMatchFilter" ),
endcapcut = cms.double( 0.03 )
)
process.hltElectronActivityDetaDphi = cms.EDProducer( "EgammaHLTElectronDetaDphiProducer",
variablesAtVtx = cms.bool( False ),
useSCRefs = cms.bool( False ),
BSProducer = cms.InputTag( "hltOnlineBeamSpot" ),
electronProducer = cms.InputTag( "hltPixelMatchElectronsActivity" ),
recoEcalCandidateProducer = cms.InputTag( "" ),
useTrackProjectionToEcal = cms.bool( False )
)
process.hltSingleElectronEt8CaloIdTTrkIdVLNoCandDetaFilter = cms.EDFilter( "HLTElectronGenericFilter",
doIsolated = cms.bool( True ),
nonIsoTag = cms.InputTag( "" ),
L1NonIsoCand = cms.InputTag( "" ),
thrTimesPtEB = cms.double( -1.0 ),
saveTags = cms.bool( False ),
thrRegularEE = cms.double( 0.01 ),
L1IsoCand = cms.InputTag( "hltPixelMatchElectronsActivity" ),
thrRegularEB = cms.double( 0.01 ),
lessThan = cms.bool( True ),
ncandcut = cms.int32( 1 ),
isoTag = cms.InputTag( 'hltElectronActivityDetaDphi','Deta' ),
candTag = cms.InputTag( "hltSingleElectronEt8CaloIdTTrkIdVLNoCandOneOEMinusOneOPFilter" ),
thrTimesPtEE = cms.double( -1.0 ),
thrOverPtEE = cms.double( -1.0 ),
thrOverPtEB = cms.double( -1.0 )
)
process.hltSingleElectronEt8CaloIdTTrkIdVLNoCandDphiFilter = cms.EDFilter( "HLTElectronGenericFilter",
doIsolated = cms.bool( True ),
nonIsoTag = cms.InputTag( "" ),
L1NonIsoCand = cms.InputTag( "" ),
thrTimesPtEB = cms.double( -1.0 ),
saveTags = cms.bool( True ),
thrRegularEE = cms.double( 0.1 ),
L1IsoCand = cms.InputTag( "hltPixelMatchElectronsActivity" ),
thrRegularEB = cms.double( 0.15 ),
lessThan = cms.bool( True ),
ncandcut = cms.int32( 1 ),
isoTag = cms.InputTag( 'hltElectronActivityDetaDphi','Dphi' ),
candTag = cms.InputTag( "hltSingleElectronEt8CaloIdTTrkIdVLNoCandDetaFilter" ),
thrTimesPtEE = cms.double( -1.0 ),
thrOverPtEE = cms.double( -1.0 ),
thrOverPtEB = cms.double( -1.0 )
)
process.hltL1Mu0HTT100L3RelIso1p0MuonIsoFilter = cms.EDFilter( "HLTMuonIsoFilter",
saveTags = cms.bool( True ),
PreviousCandTag = cms.InputTag( "hltL1Mu0HTT100L3Filtered5" ),
MinN = cms.int32( 1 ),
IsolatorPSet = cms.PSet( ),
CandTag = cms.InputTag( "hltL3MuonCandidates" ),
DepTag = cms.VInputTag( 'hltL3MuonCombRelIsolationsIso1p0' )
)
process.hltL1Mu0HTT100Mu5Ele8CaloIdTTrkIdVLMass8Filter = cms.EDFilter( "HLTElectronMuonInvMassFilter",
saveTags = cms.bool( True ),
lowerMassCut = cms.double( 8.0 ),
ElectronL1IsoCand = cms.InputTag( "hltPixelMatchElectronsActivity" ),
electronRelaxed = cms.untracked.bool( True ),
MuonCand = cms.InputTag( "hltL3MuonCandidates" ),
ElectronL1NonIsoCand = cms.InputTag( "hltPixelMatchElectronsActivity" ),
muonPrevCandTag = cms.InputTag( "hltL1Mu0HTT100L3RelIso1p0MuonIsoFilter" ),
ncandcut = cms.int32( 1 ),
upperMassCut = cms.double( 999999.0 ),
elePrevCandTag = cms.InputTag( "hltSingleElectronEt8CaloIdTTrkIdVLNoCandDphiFilter" )
)
process.hltPreRelIso1p0Mu5Ele8CaloIdTTrkIdVLMass8PFNoPUHT225 = cms.EDFilter( "HLTPrescaler",
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
offset = cms.uint32( 0 )
)
process.hltL1Mu4HTT125L1MuFiltered0 = cms.EDFilter( "HLTMuonL1Filter",
saveTags = cms.bool( False ),
CSCTFtag = cms.InputTag( "unused" ),
PreviousCandTag = cms.InputTag( "hltL1sL1Mu4HTT125" ),
MinPt = cms.double( 0.0 ),
MinN = cms.int32( 1 ),
MaxEta = cms.double( 2.5 ),
SelectQualities = cms.vint32( ),
CandTag = cms.InputTag( "hltL1extraParticles" ),
ExcludeSingleSegmentCSC = cms.bool( False )
)
process.hltL1Mu4HTT125L2Filtered0 = cms.EDFilter( "HLTMuonL2PreFilter",
saveTags = cms.bool( True ),
MaxDr = cms.double( 9999.0 ),
CutOnChambers = cms.bool( False ),
PreviousCandTag = cms.InputTag( "hltL1Mu4HTT125L1MuFiltered0" ),
MinPt = cms.double( 0.0 ),
MinN = cms.int32( 1 ),
SeedMapTag = cms.InputTag( "hltL2Muons" ),
MaxEta = cms.double( 2.5 ),
MinNhits = cms.vint32( 0 ),
MinDxySig = cms.double( -1.0 ),
MinNchambers = cms.vint32( 0 ),
AbsEtaBins = cms.vdouble( 5.0 ),
MaxDz = cms.double( 9999.0 ),
CandTag = cms.InputTag( "hltL2MuonCandidates" ),
BeamSpotTag = cms.InputTag( "hltOnlineBeamSpot" ),
MinDr = cms.double( -1.0 ),
NSigmaPt = cms.double( 0.0 ),
MinNstations = cms.vint32( 0 )
)
process.hltL1Mu4HTT125L3Filtered5 = cms.EDFilter( "HLTMuonL3PreFilter",
MaxNormalizedChi2 = cms.double( 9999.0 ),
saveTags = cms.bool( True ),
PreviousCandTag = cms.InputTag( "hltL1Mu4HTT125L2Filtered0" ),
MinNmuonHits = cms.int32( 0 ),
MinN = cms.int32( 1 ),
MinTrackPt = cms.double( 0.0 ),
MaxEta = cms.double( 2.5 ),
MaxDXYBeamSpot = cms.double( 9999.0 ),
MinNhits = cms.int32( 0 ),
MinDxySig = cms.double( -1.0 ),
NSigmaPt = cms.double( 0.0 ),
MaxDz = cms.double( 9999.0 ),
MaxPtDifference = cms.double( 9999.0 ),
MaxDr = cms.double( 2.0 ),
CandTag = cms.InputTag( "hltL3MuonCandidates" ),
MinDr = cms.double( -1.0 ),
BeamSpotTag = cms.InputTag( "hltOnlineBeamSpot" ),
MinPt = cms.double( 5.0 )
)
process.hltL1Mu4HTT125L3RelIso1p0MuonIsoFilter = cms.EDFilter( "HLTMuonIsoFilter",
saveTags = cms.bool( True ),
PreviousCandTag = cms.InputTag( "hltL1Mu4HTT125L3Filtered5" ),
MinN = cms.int32( 1 ),
IsolatorPSet = cms.PSet( ),
CandTag = cms.InputTag( "hltL3MuonCandidates" ),
DepTag = cms.VInputTag( 'hltL3MuonCombRelIsolationsIso1p0' )
)
process.hltL1Mu4HTT125Mu5Ele8CaloIdTTrkIdVLMass8Filter = cms.EDFilter( "HLTElectronMuonInvMassFilter",
saveTags = cms.bool( True ),
lowerMassCut = cms.double( 8.0 ),
ElectronL1IsoCand = cms.InputTag( "hltPixelMatchElectronsActivity" ),
electronRelaxed = cms.untracked.bool( True ),
MuonCand = cms.InputTag( "hltL3MuonCandidates" ),
ElectronL1NonIsoCand = cms.InputTag( "hltPixelMatchElectronsActivity" ),
muonPrevCandTag = cms.InputTag( "hltL1Mu4HTT125L3RelIso1p0MuonIsoFilter" ),
ncandcut = cms.int32( 1 ),
upperMassCut = cms.double( 999999.0 ),
elePrevCandTag = cms.InputTag( "hltSingleElectronEt8CaloIdTTrkIdVLNoCandDphiFilter" )
)
process.hltPreMu8Ele8CaloIdTTrkIdVLMass8PFNoPUHT175 = cms.EDFilter( "HLTPrescaler",
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
offset = cms.uint32( 0 )
)
process.hltL1Mu0HTT100ORMu4HTT125L1MuFiltered0 = cms.EDFilter( "HLTMuonL1Filter",
saveTags = cms.bool( False ),
CSCTFtag = cms.InputTag( "unused" ),
PreviousCandTag = cms.InputTag( "hltL1sL1Mu0HTT100ORL1Mu4HTT125" ),
MinPt = cms.double( 0.0 ),
MinN = cms.int32( 1 ),
MaxEta = cms.double( 2.5 ),
SelectQualities = cms.vint32( ),
CandTag = cms.InputTag( "hltL1extraParticles" ),
ExcludeSingleSegmentCSC = cms.bool( False )
)
process.hltL1Mu0HTT100ORMu4HTT125L2Filtered0 = cms.EDFilter( "HLTMuonL2PreFilter",
saveTags = cms.bool( True ),
MaxDr = cms.double( 9999.0 ),
CutOnChambers = cms.bool( False ),
PreviousCandTag = cms.InputTag( "hltL1Mu0HTT100ORMu4HTT125L1MuFiltered0" ),
MinPt = cms.double( 0.0 ),
MinN = cms.int32( 1 ),
SeedMapTag = cms.InputTag( "hltL2Muons" ),
MaxEta = cms.double( 2.5 ),
MinNhits = cms.vint32( 0 ),
MinDxySig = cms.double( -1.0 ),
MinNchambers = cms.vint32( 0 ),
AbsEtaBins = cms.vdouble( 5.0 ),
MaxDz = cms.double( 9999.0 ),
CandTag = cms.InputTag( "hltL2MuonCandidates" ),
BeamSpotTag = cms.InputTag( "hltOnlineBeamSpot" ),
MinDr = cms.double( -1.0 ),
NSigmaPt = cms.double( 0.0 ),
MinNstations = cms.vint32( 0 )
)
process.hltL1Mu0HTT100ORMu4HTT125L3Filtered8 = cms.EDFilter( "HLTMuonL3PreFilter",
MaxNormalizedChi2 = cms.double( 9999.0 ),
saveTags = cms.bool( True ),
PreviousCandTag = cms.InputTag( "hltL1Mu0HTT100ORMu4HTT125L2Filtered0" ),
MinNmuonHits = cms.int32( 0 ),
MinN = cms.int32( 1 ),
MinTrackPt = cms.double( 0.0 ),
MaxEta = cms.double( 2.5 ),
MaxDXYBeamSpot = cms.double( 9999.0 ),
MinNhits = cms.int32( 0 ),
MinDxySig = cms.double( -1.0 ),
NSigmaPt = cms.double( 0.0 ),
MaxDz = cms.double( 9999.0 ),
MaxPtDifference = cms.double( 9999.0 ),
MaxDr = cms.double( 2.0 ),
CandTag = cms.InputTag( "hltL3MuonCandidates" ),
MinDr = cms.double( -1.0 ),
BeamSpotTag = cms.InputTag( "hltOnlineBeamSpot" ),
MinPt = cms.double( 8.0 )
)
process.hltMu8Ele8CaloIdTTrkIdVLMass8Filter = cms.EDFilter( "HLTElectronMuonInvMassFilter",
saveTags = cms.bool( True ),
lowerMassCut = cms.double( 8.0 ),
ElectronL1IsoCand = cms.InputTag( "hltPixelMatchElectronsActivity" ),
electronRelaxed = cms.untracked.bool( True ),
MuonCand = cms.InputTag( "hltL3MuonCandidates" ),
ElectronL1NonIsoCand = cms.InputTag( "hltPixelMatchElectronsActivity" ),
muonPrevCandTag = cms.InputTag( "hltL1Mu0HTT100ORMu4HTT125L3Filtered8" ),
ncandcut = cms.int32( 1 ),
upperMassCut = cms.double( 999999.0 ),
elePrevCandTag = cms.InputTag( "hltSingleElectronEt8CaloIdTTrkIdVLNoCandDphiFilter" )
)
process.hltPreMu8Ele8CaloIdTTrkIdVLMass8PFNoPUHT225 = cms.EDFilter( "HLTPrescaler",
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
offset = cms.uint32( 0 )
)
process.hltPrePFNoPUHT350Mu15PFMET45 = cms.EDFilter( "HLTPrescaler",
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
offset = cms.uint32( 0 )
)
process.hltHt250 = cms.EDFilter( "HLTHtMhtFilter",
saveTags = cms.bool( False ),
mhtLabels = cms.VInputTag( 'hltHtMht' ),
meffSlope = cms.vdouble( 1.0 ),
minMeff = cms.vdouble( 0.0 ),
minMht = cms.vdouble( 0.0 ),
htLabels = cms.VInputTag( 'hltHtMht' ),
minHt = cms.vdouble( 250.0 )
)
process.hltHTT150L1MuFiltered0 = cms.EDFilter( "HLTMuonL1Filter",
saveTags = cms.bool( False ),
CSCTFtag = cms.InputTag( "unused" ),
PreviousCandTag = cms.InputTag( "hltL1sL1SingleMuOpenCandidate" ),
MinPt = cms.double( 0.0 ),
MinN = cms.int32( 1 ),
MaxEta = cms.double( 2.5 ),
SelectQualities = cms.vint32( ),
CandTag = cms.InputTag( "hltL1extraParticles" ),
ExcludeSingleSegmentCSC = cms.bool( False )
)
process.hltL1HTT150singleMuL2PreFiltered10 = cms.EDFilter( "HLTMuonL2PreFilter",
saveTags = cms.bool( True ),
MaxDr = cms.double( 9999.0 ),
CutOnChambers = cms.bool( False ),
PreviousCandTag = cms.InputTag( "hltHTT150L1MuFiltered0" ),
MinPt = cms.double( 10.0 ),
MinN = cms.int32( 1 ),
SeedMapTag = cms.InputTag( "hltL2Muons" ),
MaxEta = cms.double( 2.5 ),
MinNhits = cms.vint32( 0 ),
MinDxySig = cms.double( -1.0 ),
MinNchambers = cms.vint32( 0 ),
AbsEtaBins = cms.vdouble( 5.0 ),
MaxDz = cms.double( 9999.0 ),
CandTag = cms.InputTag( "hltL2MuonCandidates" ),
BeamSpotTag = cms.InputTag( "hltOnlineBeamSpot" ),
MinDr = cms.double( -1.0 ),
NSigmaPt = cms.double( 0.0 ),
MinNstations = cms.vint32( 0 )
)
process.hltL1HTT150singleMuL3PreFiltered15 = cms.EDFilter( "HLTMuonL3PreFilter",
MaxNormalizedChi2 = cms.double( 9999.0 ),
saveTags = cms.bool( True ),
PreviousCandTag = cms.InputTag( "hltL1HTT150singleMuL2PreFiltered10" ),
MinNmuonHits = cms.int32( 0 ),
MinN = cms.int32( 1 ),
MinTrackPt = cms.double( 0.0 ),
MaxEta = cms.double( 2.5 ),
MaxDXYBeamSpot = cms.double( 9999.0 ),
MinNhits = cms.int32( 0 ),
MinDxySig = cms.double( -1.0 ),
NSigmaPt = cms.double( 0.0 ),
MaxDz = cms.double( 9999.0 ),
MaxPtDifference = cms.double( 9999.0 ),
MaxDr = cms.double( 2.0 ),
CandTag = cms.InputTag( "hltL3MuonCandidates" ),
MinDr = cms.double( -1.0 ),
BeamSpotTag = cms.InputTag( "hltOnlineBeamSpot" ),
MinPt = cms.double( 15.0 )
)
process.hltPFMETProducer = cms.EDProducer( "HLTMhtProducer",
usePt = cms.bool( True ),
inputJetTag = cms.InputTag( "hltAntiKT5PFJets" ),
etaJet = cms.double( 9999.0 ),
minPtJet = cms.double( 0.0 )
)
process.hltPFHT350PFMET45 = cms.EDFilter( "HLTHtMhtFilter",
saveTags = cms.bool( True ),
mhtLabels = cms.VInputTag( 'hltPFMETProducer' ),
meffSlope = cms.vdouble( 1.0 ),
minMeff = cms.vdouble( 0.0 ),
minMht = cms.vdouble( 45.0 ),
htLabels = cms.VInputTag( 'hltPFHTNoPU' ),
minHt = cms.vdouble( 350.0 )
)
process.hltPrePFNoPUHT350Mu15PFMET50 = cms.EDFilter( "HLTPrescaler",
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
offset = cms.uint32( 0 )
)
process.hltPFHT350PFMET50 = cms.EDFilter( "HLTHtMhtFilter",
saveTags = cms.bool( True ),
mhtLabels = cms.VInputTag( 'hltPFMETProducer' ),
meffSlope = cms.vdouble( 1.0 ),
minMeff = cms.vdouble( 0.0 ),
minMht = cms.vdouble( 50.0 ),
htLabels = cms.VInputTag( 'hltPFHTNoPU' ),
minHt = cms.vdouble( 350.0 )
)
process.hltPrePFNoPUHT400Mu5PFMET45 = cms.EDFilter( "HLTPrescaler",
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
offset = cms.uint32( 0 )
)
process.hltHt300 = cms.EDFilter( "HLTHtMhtFilter",
saveTags = cms.bool( False ),
mhtLabels = cms.VInputTag( 'hltHtMht' ),
meffSlope = cms.vdouble( 1.0 ),
minMeff = cms.vdouble( 0.0 ),
minMht = cms.vdouble( 0.0 ),
htLabels = cms.VInputTag( 'hltHtMht' ),
minHt = cms.vdouble( 300.0 )
)
process.hltL1HTT150singleMuL2PreFiltered0 = cms.EDFilter( "HLTMuonL2PreFilter",
saveTags = cms.bool( True ),
MaxDr = cms.double( 9999.0 ),
CutOnChambers = cms.bool( False ),
PreviousCandTag = cms.InputTag( "hltHTT150L1MuFiltered0" ),
MinPt = cms.double( 0.0 ),
MinN = cms.int32( 1 ),
SeedMapTag = cms.InputTag( "hltL2Muons" ),
MaxEta = cms.double( 2.5 ),
MinNhits = cms.vint32( 0 ),
MinDxySig = cms.double( -1.0 ),
MinNchambers = cms.vint32( 0 ),
AbsEtaBins = cms.vdouble( 5.0 ),
MaxDz = cms.double( 9999.0 ),
CandTag = cms.InputTag( "hltL2MuonCandidates" ),
BeamSpotTag = cms.InputTag( "hltOnlineBeamSpot" ),
MinDr = cms.double( -1.0 ),
NSigmaPt = cms.double( 0.0 ),
MinNstations = cms.vint32( 0 )
)
process.hltL1HTT150singleMuL3PreFiltered5 = cms.EDFilter( "HLTMuonL3PreFilter",
MaxNormalizedChi2 = cms.double( 9999.0 ),
saveTags = cms.bool( True ),
PreviousCandTag = cms.InputTag( "hltL1HTT150singleMuL2PreFiltered0" ),
MinNmuonHits = cms.int32( 0 ),
MinN = cms.int32( 1 ),
MinTrackPt = cms.double( 0.0 ),
MaxEta = cms.double( 2.5 ),
MaxDXYBeamSpot = cms.double( 9999.0 ),
MinNhits = cms.int32( 0 ),
MinDxySig = cms.double( -1.0 ),
NSigmaPt = cms.double( 0.0 ),
MaxDz = cms.double( 9999.0 ),
MaxPtDifference = cms.double( 9999.0 ),
MaxDr = cms.double( 2.0 ),
CandTag = cms.InputTag( "hltL3MuonCandidates" ),
MinDr = cms.double( -1.0 ),
BeamSpotTag = cms.InputTag( "hltOnlineBeamSpot" ),
MinPt = cms.double( 5.0 )
)
process.hltPFHT400PFMET45 = cms.EDFilter( "HLTHtMhtFilter",
saveTags = cms.bool( True ),
mhtLabels = cms.VInputTag( 'hltPFMETProducer' ),
meffSlope = cms.vdouble( 1.0 ),
minMeff = cms.vdouble( 0.0 ),
minMht = cms.vdouble( 45.0 ),
htLabels = cms.VInputTag( 'hltPFHTNoPU' ),
minHt = cms.vdouble( 400.0 )
)
process.hltPrePFNoPUHT400Mu5PFMET50 = cms.EDFilter( "HLTPrescaler",
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
offset = cms.uint32( 0 )
)
process.hltPFHT400PFMET50 = cms.EDFilter( "HLTHtMhtFilter",
saveTags = cms.bool( True ),
mhtLabels = cms.VInputTag( 'hltPFMETProducer' ),
meffSlope = cms.vdouble( 1.0 ),
minMeff = cms.vdouble( 0.0 ),
minMht = cms.vdouble( 50.0 ),
htLabels = cms.VInputTag( 'hltPFHTNoPU' ),
minHt = cms.vdouble( 400.0 )
)
process.hltPreMu40PFNoPUHT350 = cms.EDFilter( "HLTPrescaler",
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
offset = cms.uint32( 0 )
)
process.hltL1Mu0HTT100ORL1Mu4HTT125L1MuFiltered0 = cms.EDFilter( "HLTMuonL1Filter",
saveTags = cms.bool( False ),
CSCTFtag = cms.InputTag( "unused" ),
PreviousCandTag = cms.InputTag( "hltL1sL1Mu0HTT100ORL1Mu4HTT125" ),
MinPt = cms.double( 0.0 ),
MinN = cms.int32( 1 ),
MaxEta = cms.double( 2.5 ),
SelectQualities = cms.vint32( ),
CandTag = cms.InputTag( "hltL1extraParticles" ),
ExcludeSingleSegmentCSC = cms.bool( False )
)
process.hltL1Mu0HTT100ORL1Mu4HTT125L2QualMuFiltered16 = cms.EDFilter( "HLTMuonL2PreFilter",
saveTags = cms.bool( True ),
MaxDr = cms.double( 9999.0 ),
CutOnChambers = cms.bool( False ),
PreviousCandTag = cms.InputTag( "hltL1Mu0HTT100ORL1Mu4HTT125L1MuFiltered0" ),
MinPt = cms.double( 16.0 ),
MinN = cms.int32( 1 ),
SeedMapTag = cms.InputTag( "hltL2Muons" ),
MaxEta = cms.double( 2.5 ),
MinNhits = cms.vint32( 0, 1, 0, 1 ),
MinDxySig = cms.double( -1.0 ),
MinNchambers = cms.vint32( 0 ),
AbsEtaBins = cms.vdouble( 0.9, 1.5, 2.1, 5.0 ),
MaxDz = cms.double( 9999.0 ),
CandTag = cms.InputTag( "hltL2MuonCandidates" ),
BeamSpotTag = cms.InputTag( "hltOnlineBeamSpot" ),
MinDr = cms.double( -1.0 ),
NSigmaPt = cms.double( 0.0 ),
MinNstations = cms.vint32( 0, 2, 0, 2 )
)
process.hltL1Mu0HTT100ORL1Mu4HTT125L2QualL3MuFiltered40 = cms.EDFilter( "HLTMuonL3PreFilter",
MaxNormalizedChi2 = cms.double( 9999.0 ),
saveTags = cms.bool( True ),
PreviousCandTag = cms.InputTag( "hltL1Mu0HTT100ORL1Mu4HTT125L2QualMuFiltered16" ),
MinNmuonHits = cms.int32( 0 ),
MinN = cms.int32( 1 ),
MinTrackPt = cms.double( 0.0 ),
MaxEta = cms.double( 2.5 ),
MaxDXYBeamSpot = cms.double( 9999.0 ),
MinNhits = cms.int32( 0 ),
MinDxySig = cms.double( -1.0 ),
NSigmaPt = cms.double( 0.0 ),
MaxDz = cms.double( 9999.0 ),
MaxPtDifference = cms.double( 9999.0 ),
MaxDr = cms.double( 2.0 ),
CandTag = cms.InputTag( "hltL3MuonCandidates" ),
MinDr = cms.double( -1.0 ),
BeamSpotTag = cms.InputTag( "hltOnlineBeamSpot" ),
MinPt = cms.double( 40.0 )
)
process.hltHt200 = cms.EDFilter( "HLTHtMhtFilter",
saveTags = cms.bool( False ),
mhtLabels = cms.VInputTag( 'hltHtMht' ),
meffSlope = cms.vdouble( 1.0 ),
minMeff = cms.vdouble( 0.0 ),
minMht = cms.vdouble( 0.0 ),
htLabels = cms.VInputTag( 'hltHtMht' ),
minHt = cms.vdouble( 200.0 )
)
process.hltPFHT350NoPU = cms.EDFilter( "HLTHtMhtFilter",
saveTags = cms.bool( True ),
mhtLabels = cms.VInputTag( 'hltPFHTNoPU' ),
meffSlope = cms.vdouble( 1.0 ),
minMeff = cms.vdouble( 0.0 ),
minMht = cms.vdouble( 0.0 ),
htLabels = cms.VInputTag( 'hltPFHTNoPU' ),
minHt = cms.vdouble( 350.0 )
)
process.hltPreMu60PFNoPUHT350 = cms.EDFilter( "HLTPrescaler",
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
offset = cms.uint32( 0 )
)
process.hltL1Mu0HTT100ORL1Mu4HTT125L2QualL3MuFiltered60 = cms.EDFilter( "HLTMuonL3PreFilter",
MaxNormalizedChi2 = cms.double( 9999.0 ),
saveTags = cms.bool( True ),
PreviousCandTag = cms.InputTag( "hltL1Mu0HTT100ORL1Mu4HTT125L2QualMuFiltered16" ),
MinNmuonHits = cms.int32( 0 ),
MinN = cms.int32( 1 ),
MinTrackPt = cms.double( 0.0 ),
MaxEta = cms.double( 2.5 ),
MaxDXYBeamSpot = cms.double( 9999.0 ),
MinNhits = cms.int32( 0 ),
MinDxySig = cms.double( -1.0 ),
NSigmaPt = cms.double( 0.0 ),
MaxDz = cms.double( 9999.0 ),
MaxPtDifference = cms.double( 9999.0 ),
MaxDr = cms.double( 2.0 ),
CandTag = cms.InputTag( "hltL3MuonCandidates" ),
MinDr = cms.double( -1.0 ),
BeamSpotTag = cms.InputTag( "hltOnlineBeamSpot" ),
MinPt = cms.double( 60.0 )
)
process.hltPreIsoMu12RsqMR30Rsq0p04MR200 = cms.EDFilter( "HLTPrescaler",
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
offset = cms.uint32( 0 )
)
process.hltMet = cms.EDProducer( "METProducer",
resolutionsEra = cms.string( "Spring10" ),
HB_EtResPar = cms.vdouble( 0.0, 1.22, 0.05 ),
EE_PhiResPar = cms.vdouble( 0.02511 ),
jdpt9 = cms.vdouble( 0.843, 0.885, 1.245, 1.665, 1.944, 1.981, 1.972, 2.875, 3.923, 7.51 ),
jdpt8 = cms.vdouble( 0.889, 0.939, 1.166, 1.365, 1.553, 1.805, 2.06, 2.22, 2.268, 2.247 ),
jdpt7 = cms.vdouble( 1.094, 1.139, 1.436, 1.672, 1.831, 2.05, 2.267, 2.549, 2.785, 2.86 ),
jdpt6 = cms.vdouble( 1.213, 1.298, 1.716, 2.015, 2.191, 2.612, 2.863, 2.879, 2.925, 2.902 ),
jdpt5 = cms.vdouble( 1.049, 1.149, 1.607, 1.869, 2.012, 2.219, 2.289, 2.412, 2.695, 2.865 ),
jdpt4 = cms.vdouble( 0.85, 0.961, 1.337, 1.593, 1.854, 2.005, 2.209, 2.533, 2.812, 3.047 ),
jdpt3 = cms.vdouble( 0.929, 1.04, 1.46, 1.74, 2.042, 2.289, 2.639, 2.837, 2.946, 2.971 ),
jdpt2 = cms.vdouble( 0.841, 0.937, 1.316, 1.605, 1.919, 2.295, 2.562, 2.722, 2.943, 3.293 ),
jdpt1 = cms.vdouble( 0.718, 0.813, 1.133, 1.384, 1.588, 1.841, 2.115, 2.379, 2.508, 2.772 ),
jdpt0 = cms.vdouble( 0.749, 0.829, 1.099, 1.355, 1.584, 1.807, 2.035, 2.217, 2.378, 2.591 ),
HE_EtResPar = cms.vdouble( 0.0, 1.3, 0.05 ),
alias = cms.string( "RawCaloMET" ),
HF_PhiResPar = cms.vdouble( 0.05022 ),
InputType = cms.string( "CandidateCollection" ),
HE_PhiResPar = cms.vdouble( 0.02511 ),
HB_PhiResPar = cms.vdouble( 0.02511 ),
EE_EtResPar = cms.vdouble( 0.2, 0.03, 0.0050 ),
noHF = cms.bool( False ),
PF_PhiResType2 = cms.vdouble( 0.0020 ),
PF_PhiResType3 = cms.vdouble( 0.0020 ),
HF_EtResPar = cms.vdouble( 0.0, 1.82, 0.09 ),
resolutionsAlgo = cms.string( "AK5PF" ),
PF_PhiResType6 = cms.vdouble( 0.02511 ),
PF_PhiResType7 = cms.vdouble( 0.02511 ),
PF_PhiResType4 = cms.vdouble( 0.0028, 0.0, 0.0022 ),
PF_PhiResType5 = cms.vdouble( 0.1, 0.1, 0.13 ),
ptresolthreshold = cms.double( 10.0 ),
METType = cms.string( "CaloMET" ),
EB_EtResPar = cms.vdouble( 0.2, 0.03, 0.0050 ),
PF_PhiResType1 = cms.vdouble( 0.0020 ),
globalThreshold = cms.double( 0.3 ),
EB_PhiResPar = cms.vdouble( 0.00502 ),
src = cms.InputTag( "hltTowerMakerForAll" ),
jdphi9 = cms.vdouble( 0.062, 0.059, 0.053, 0.047, 0.042, 0.045, 0.036, 0.032, 0.034, 0.044 ),
jdphi8 = cms.vdouble( 0.059, 0.057, 0.051, 0.044, 0.038, 0.035, 0.037, 0.032, 0.028, 0.028 ),
jdphi4 = cms.vdouble( 0.042, 0.042, 0.043, 0.042, 0.038, 0.036, 0.036, 0.033, 0.031, 0.031 ),
jdphi3 = cms.vdouble( 0.042, 0.043, 0.044, 0.043, 0.041, 0.039, 0.039, 0.036, 0.034, 0.031 ),
jdphi2 = cms.vdouble( 0.04, 0.04, 0.04, 0.04, 0.04, 0.038, 0.036, 0.035, 0.034, 0.033 ),
jdphi1 = cms.vdouble( 0.034, 0.035, 0.035, 0.035, 0.035, 0.034, 0.031, 0.03, 0.029, 0.027 ),
jdphi0 = cms.vdouble( 0.034, 0.034, 0.034, 0.034, 0.032, 0.031, 0.028, 0.027, 0.027, 0.027 ),
jdphi7 = cms.vdouble( 0.077, 0.072, 0.059, 0.05, 0.045, 0.042, 0.039, 0.039, 0.037, 0.031 ),
jdphi6 = cms.vdouble( 0.084, 0.08, 0.072, 0.065, 0.066, 0.06, 0.051, 0.049, 0.045, 0.045 ),
jdphi5 = cms.vdouble( 0.069, 0.069, 0.064, 0.058, 0.053, 0.049, 0.049, 0.043, 0.039, 0.04 ),
HO_EtResPar = cms.vdouble( 0.0, 1.3, 0.0050 ),
HO_PhiResPar = cms.vdouble( 0.02511 ),
PF_EtResType5 = cms.vdouble( 0.41, 0.52, 0.25 ),
PF_EtResType4 = cms.vdouble( 0.042, 0.1, 0.0 ),
PF_EtResType7 = cms.vdouble( 0.0, 1.22, 0.05 ),
PF_EtResType6 = cms.vdouble( 0.0, 1.22, 0.05 ),
PF_EtResType1 = cms.vdouble( 0.05, 0.0, 0.0 ),
calculateSignificance = cms.bool( False ),
PF_EtResType3 = cms.vdouble( 0.05, 0.0, 0.0 ),
PF_EtResType2 = cms.vdouble( 0.05, 0.0, 0.0 ),
usePt = cms.untracked.bool( False ),
onlyFiducialParticles = cms.bool( False ),
vertexRho = cms.double( 2.0 ),
eVetoDeltaPhi = cms.double( 100.0 ),
PFClustersHCAL = cms.InputTag( "particleFlowClusterHCAL" ),
PFClustersHFHAD = cms.InputTag( "particleFlowClusterHFHAD" ),
dupMinPt = cms.double( 0.0 ),
chi2_tight_max = cms.double( 5.0 ),
vertexZ = cms.double( 15.0 ),
nLayersTight = cms.int32( 0 ),
vertexNdof = cms.int32( 4 ),
ptErr_max = cms.double( 0.2 ),
corner = cms.double( 1.479 ),
PFClustersECAL = cms.InputTag( "particleFlowClusterECAL" ),
eta_max = cms.double( 2.65 ),
muonInputTag = cms.InputTag( "muons" ),
eVetoDeltaCotTheta = cms.double( 100.0 ),
maxd0cut = cms.double( 0.3 ),
PFClustersHFEM = cms.InputTag( "particleFlowClusterHFEM" ),
d0cutb = cms.double( 0.5 ),
checkTrackPropagation = cms.bool( False ),
usePFClusters = cms.bool( False ),
vertexMaxDZ = cms.double( 0.2 ),
deltaRShower = cms.double( 0.01 ),
chi2_max = cms.double( 5.0 ),
maxpt_eta25 = cms.double( 0.0 ),
track_algos = cms.vint32( ),
ptErr_tight_max = cms.double( 0.2 ),
maxTrackAlgo = cms.int32( 8 ),
nLayers = cms.int32( 0 ),
correctShowerTracks = cms.bool( False ),
vetoDuplicates = cms.bool( False ),
pt_max = cms.double( 100.0 ),
radius = cms.double( 130.0 ),
nhits_tight_min = cms.double( 9.0 ),
beamSpotInputTag = cms.InputTag( "unused" ),
dupDCotTh = cms.double( 6.0E-4 ),
usedeltaRRejection = cms.bool( False ),
trackInputTag = cms.InputTag( "generalTracks" ),
dupDPhi = cms.double( 0.03 ),
electronInputTag = cms.InputTag( "gsfElectrons" ),
tcmetDepValueMap = cms.InputTag( 'muonTCMETValueMapProducer','muCorrData' ),
d0cuta = cms.double( 0.015 ),
hOverECut = cms.double( 0.1 ),
electronVetoCone = cms.bool( True ),
muonDepValueMap = cms.InputTag( 'muonMETValueMapProducer','muCorrData' ),
metInputTag = cms.InputTag( "met" ),
usePvtxd0 = cms.bool( False ),
vertexInputTag = cms.InputTag( "offlinePrimaryVertices" ),
zdist = cms.double( 314.0 ),
nhits_min = cms.double( 6.0 ),
eVetoDeltaR = cms.double( 0.015 ),
maxpt_eta20 = cms.double( 100.0 ),
pt_min = cms.double( 1.0 ),
rf_type = cms.int32( 0 ),
nMinOuterHits = cms.int32( 2 ),
track_quality = cms.vint32( 2 ),
isCosmics = cms.bool( False ),
eVetoMinElectronPt = cms.double( 10.0 )
)
process.hltRHemisphereMuCorr = cms.EDFilter( "HLTRHemisphere",
acceptNJ = cms.bool( True ),
maxEta = cms.double( 3.0 ),
inputTag = cms.InputTag( "hltCaloJetL1FastJetCorrected" ),
maxMuonEta = cms.double( 2.1 ),
muonTag = cms.InputTag( "hltL3MuonCandidates" ),
minJetPt = cms.double( 40.0 ),
doMuonCorrection = cms.bool( True ),
maxNJ = cms.int32( 7 )
)
process.hltRsqMR30Rsq0p04MR200MuCorr = cms.EDFilter( "HLTRFilter",
acceptNJ = cms.bool( True ),
doRPrime = cms.bool( False ),
R2Offset = cms.double( -0.043 ),
inputTag = cms.InputTag( "hltRHemisphereMuCorr" ),
inputMetTag = cms.InputTag( "hltMet" ),
RMRCut = cms.double( 30.0 ),
MROffset = cms.double( 6.0 ),
doMuonCorrection = cms.bool( True ),
minMR = cms.double( 200.0 ),
minR = cms.double( 0.2 )
)
process.hltPreIsoMu12RsqMR40Rsq0p04MR200 = cms.EDFilter( "HLTPrescaler",
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
offset = cms.uint32( 0 )
)
process.hltRsqMR40Rsq0p04MR200MuCorr = cms.EDFilter( "HLTRFilter",
acceptNJ = cms.bool( True ),
doRPrime = cms.bool( False ),
R2Offset = cms.double( -0.043 ),
inputTag = cms.InputTag( "hltRHemisphereMuCorr" ),
inputMetTag = cms.InputTag( "hltMet" ),
RMRCut = cms.double( 40.0 ),
MROffset = cms.double( 6.0 ),
doMuonCorrection = cms.bool( True ),
minMR = cms.double( 200.0 ),
minR = cms.double( 0.2 )
)
process.hltL1sL1DoubleMu10MuOpenORDoubleMu103p5 = cms.EDFilter( "HLTLevel1GTSeed",
saveTags = cms.bool( True ),
L1SeedsLogicalExpression = cms.string( "L1_DoubleMu_10_Open OR L1_DoubleMu_10_3p5" ),
L1MuonCollectionTag = cms.InputTag( "hltL1extraParticles" ),
L1UseL1TriggerObjectMaps = cms.bool( True ),
L1UseAliasesForSeeding = cms.bool( True ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
L1CollectionsTag = cms.InputTag( "hltL1extraParticles" ),
L1NrBxInEvent = cms.int32( 3 ),
L1GtObjectMapTag = cms.InputTag( "hltL1GtObjectMap" ),
L1TechTriggerSeeding = cms.bool( False )
)
process.hltPreDoubleMu14Mass8PFMET40 = cms.EDFilter( "HLTPrescaler",
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
offset = cms.uint32( 0 )
)
process.hltL1DoubleMu10MuOpenORDoubleMu103p5L1DiMuFiltered0 = cms.EDFilter( "HLTMuonL1Filter",
saveTags = cms.bool( False ),
CSCTFtag = cms.InputTag( "unused" ),
PreviousCandTag = cms.InputTag( "hltL1sL1DoubleMu10MuOpenORDoubleMu103p5" ),
MinPt = cms.double( 0.0 ),
MinN = cms.int32( 2 ),
MaxEta = cms.double( 2.5 ),
SelectQualities = cms.vint32( ),
CandTag = cms.InputTag( "hltL1extraParticles" ),
ExcludeSingleSegmentCSC = cms.bool( False )
)
process.hltL1DoubleMu10MuOpenORDoubleMu103p5L2DiMuFiltered0 = cms.EDFilter( "HLTMuonL2PreFilter",
saveTags = cms.bool( True ),
MaxDr = cms.double( 9999.0 ),
CutOnChambers = cms.bool( False ),
PreviousCandTag = cms.InputTag( "hltL1DoubleMu10MuOpenORDoubleMu103p5L1DiMuFiltered0" ),
MinPt = cms.double( 0.0 ),
MinN = cms.int32( 2 ),
SeedMapTag = cms.InputTag( "hltL2Muons" ),
MaxEta = cms.double( 2.5 ),
MinNhits = cms.vint32( 0 ),
MinDxySig = cms.double( -1.0 ),
MinNchambers = cms.vint32( 0 ),
AbsEtaBins = cms.vdouble( 5.0 ),
MaxDz = cms.double( 9999.0 ),
CandTag = cms.InputTag( "hltL2MuonCandidates" ),
BeamSpotTag = cms.InputTag( "hltOnlineBeamSpot" ),
MinDr = cms.double( -1.0 ),
NSigmaPt = cms.double( 0.0 ),
MinNstations = cms.vint32( 0 )
)
process.hltL1DoubleMu10MuOpenORDoubleMu103p5L3DiMu14Mass8Filtered = cms.EDFilter( "HLTMuonDimuonL3Filter",
saveTags = cms.bool( True ),
ChargeOpt = cms.int32( 0 ),
MaxPtMin = cms.vdouble( 1.0E125 ),
FastAccept = cms.bool( False ),
CandTag = cms.InputTag( "hltL3MuonCandidates" ),
PreviousCandTag = cms.InputTag( "hltL1DoubleMu10MuOpenORDoubleMu103p5L2DiMuFiltered0" ),
MaxPtBalance = cms.double( 999999.0 ),
MaxPtPair = cms.vdouble( 1.0E125 ),
MaxAcop = cms.double( 999.0 ),
MinPtMin = cms.vdouble( 14.0 ),
MaxInvMass = cms.vdouble( 999999.0 ),
MinPtMax = cms.vdouble( 0.0 ),
BeamSpotTag = cms.InputTag( "hltOnlineBeamSpot" ),
MaxDz = cms.double( 9999.0 ),
MinPtPair = cms.vdouble( 0.0 ),
MaxDr = cms.double( 9999.0 ),
MinAcop = cms.double( -999.0 ),
MaxDCAMuMu = cms.double( 9999999.0 ),
MinNhits = cms.int32( 0 ),
NSigmaPt = cms.double( 0.0 ),
MinPtBalance = cms.double( -1.0 ),
MaxEta = cms.double( 2.5 ),
MaxRapidityPair = cms.double( 999999.0 ),
CutCowboys = cms.bool( False ),
MinInvMass = cms.vdouble( 8.0 )
)
process.hltPFMET40Filter = cms.EDFilter( "HLTMhtFilter",
saveTags = cms.bool( True ),
minMht = cms.double( 40.0 ),
inputMhtTag = cms.InputTag( "hltPFMETProducer" )
)
process.hltPreDoubleMu14Mass8PFMET50 = cms.EDFilter( "HLTPrescaler",
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
offset = cms.uint32( 0 )
)
process.hltPFMET50Filter = cms.EDFilter( "HLTMhtFilter",
saveTags = cms.bool( True ),
minMht = cms.double( 50.0 ),
inputMhtTag = cms.InputTag( "hltPFMETProducer" )
)
process.hltL1sL1Mu12EG7ORL1MuOpenEG12 = cms.EDFilter( "HLTLevel1GTSeed",
saveTags = cms.bool( True ),
L1SeedsLogicalExpression = cms.string( "L1_Mu12_EG7 OR L1_MuOpen_EG12" ),
L1MuonCollectionTag = cms.InputTag( "hltL1extraParticles" ),
L1UseL1TriggerObjectMaps = cms.bool( True ),
L1UseAliasesForSeeding = cms.bool( True ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
L1CollectionsTag = cms.InputTag( "hltL1extraParticles" ),
L1NrBxInEvent = cms.int32( 3 ),
L1GtObjectMapTag = cms.InputTag( "hltL1GtObjectMap" ),
L1TechTriggerSeeding = cms.bool( False )
)
process.hltPreMu14Ele14CaloIdTTrkIdVLMass8PFMET40 = cms.EDFilter( "HLTPrescaler",
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
offset = cms.uint32( 0 )
)
process.hltL1Mu12EG7ORL1MuOpenEG12L1MuFiltered0 = cms.EDFilter( "HLTMuonL1Filter",
saveTags = cms.bool( False ),
CSCTFtag = cms.InputTag( "unused" ),
PreviousCandTag = cms.InputTag( "hltL1sL1Mu12EG7ORL1MuOpenEG12" ),
MinPt = cms.double( 0.0 ),
MinN = cms.int32( 1 ),
MaxEta = cms.double( 2.5 ),
SelectQualities = cms.vint32( ),
CandTag = cms.InputTag( "hltL1extraParticles" ),
ExcludeSingleSegmentCSC = cms.bool( False )
)
process.hltL1Mu12EG7ORL1MuOpenEG12L2MuFiltered0 = cms.EDFilter( "HLTMuonL2PreFilter",
saveTags = cms.bool( True ),
MaxDr = cms.double( 9999.0 ),
CutOnChambers = cms.bool( False ),
PreviousCandTag = cms.InputTag( "hltL1Mu12EG7ORL1MuOpenEG12L1MuFiltered0" ),
MinPt = cms.double( 0.0 ),
MinN = cms.int32( 1 ),
SeedMapTag = cms.InputTag( "hltL2Muons" ),
MaxEta = cms.double( 2.5 ),
MinNhits = cms.vint32( 0 ),
MinDxySig = cms.double( -1.0 ),
MinNchambers = cms.vint32( 0 ),
AbsEtaBins = cms.vdouble( 5.0 ),
MaxDz = cms.double( 9999.0 ),
CandTag = cms.InputTag( "hltL2MuonCandidates" ),
BeamSpotTag = cms.InputTag( "hltOnlineBeamSpot" ),
MinDr = cms.double( -1.0 ),
NSigmaPt = cms.double( 0.0 ),
MinNstations = cms.vint32( 0 )
)
process.hltL1Mu12EG7ORL1MuOpenEG12L3MuFiltered14 = cms.EDFilter( "HLTMuonL3PreFilter",
MaxNormalizedChi2 = cms.double( 9999.0 ),
saveTags = cms.bool( True ),
PreviousCandTag = cms.InputTag( "hltL1Mu12EG7ORL1MuOpenEG12L2MuFiltered0" ),
MinNmuonHits = cms.int32( 0 ),
MinN = cms.int32( 1 ),
MinTrackPt = cms.double( 0.0 ),
MaxEta = cms.double( 2.5 ),
MaxDXYBeamSpot = cms.double( 9999.0 ),
MinNhits = cms.int32( 0 ),
MinDxySig = cms.double( -1.0 ),
NSigmaPt = cms.double( 0.0 ),
MaxDz = cms.double( 9999.0 ),
MaxPtDifference = cms.double( 9999.0 ),
MaxDr = cms.double( 999.0 ),
CandTag = cms.InputTag( "hltL3MuonCandidates" ),
MinDr = cms.double( -1.0 ),
BeamSpotTag = cms.InputTag( "hltOnlineBeamSpot" ),
MinPt = cms.double( 14.0 )
)
process.hltEcalRegionalEgammaFEDs = cms.EDProducer( "EcalRawToRecHitRoI",
JetJobPSet = cms.VPSet(
),
sourceTag_es = cms.InputTag( "hltESRawToRecHitFacility" ),
doES = cms.bool( True ),
type = cms.string( "egamma" ),
sourceTag = cms.InputTag( "hltEcalRawToRecHitFacility" ),
EmJobPSet = cms.VPSet(
cms.PSet( regionEtaMargin = cms.double( 0.25 ),
regionPhiMargin = cms.double( 0.4 ),
Ptmin = cms.double( 5.0 ),
Source = cms.InputTag( 'hltL1extraParticles','Isolated' )
),
cms.PSet( regionEtaMargin = cms.double( 0.25 ),
regionPhiMargin = cms.double( 0.4 ),
Ptmin = cms.double( 5.0 ),
Source = cms.InputTag( 'hltL1extraParticles','NonIsolated' )
)
),
CandJobPSet = cms.VPSet(
),
MuonJobPSet = cms.PSet( ),
esInstance = cms.untracked.string( "es" ),
MuJobPSet = cms.PSet( )
)
process.hltEcalRegionalEgammaRecHit = cms.EDProducer( "EcalRawToRecHitProducer",
splitOutput = cms.bool( True ),
rechitCollection = cms.string( "NotNeededsplitOutputTrue" ),
EErechitCollection = cms.string( "EcalRecHitsEE" ),
EBrechitCollection = cms.string( "EcalRecHitsEB" ),
sourceTag = cms.InputTag( "hltEcalRegionalEgammaFEDs" ),
cleaningConfig = cms.PSet(
e6e2thresh = cms.double( 0.04 ),
tightenCrack_e6e2_double = cms.double( 3.0 ),
e4e1Threshold_endcap = cms.double( 0.3 ),
tightenCrack_e4e1_single = cms.double( 3.0 ),
tightenCrack_e1_double = cms.double( 2.0 ),
cThreshold_barrel = cms.double( 4.0 ),
e4e1Threshold_barrel = cms.double( 0.08 ),
tightenCrack_e1_single = cms.double( 2.0 ),
e4e1_b_barrel = cms.double( -0.024 ),
e4e1_a_barrel = cms.double( 0.04 ),
ignoreOutOfTimeThresh = cms.double( 1.0E9 ),
cThreshold_endcap = cms.double( 15.0 ),
e4e1_b_endcap = cms.double( -0.0125 ),
e4e1_a_endcap = cms.double( 0.02 ),
cThreshold_double = cms.double( 10.0 )
),
lazyGetterTag = cms.InputTag( "hltEcalRawToRecHitFacility" )
)
process.hltESRegionalEgammaRecHit = cms.EDProducer( "EcalRawToRecHitProducer",
splitOutput = cms.bool( False ),
rechitCollection = cms.string( "EcalRecHitsES" ),
EErechitCollection = cms.string( "" ),
EBrechitCollection = cms.string( "" ),
sourceTag = cms.InputTag( 'hltEcalRegionalEgammaFEDs','es' ),
cleaningConfig = cms.PSet( ),
lazyGetterTag = cms.InputTag( "hltESRawToRecHitFacility" )
)
process.hltHybridSuperClustersL1Seeded = cms.EDProducer( "EgammaHLTHybridClusterProducer",
xi = cms.double( 0.0 ),
regionEtaMargin = cms.double( 0.14 ),
regionPhiMargin = cms.double( 0.4 ),
severityRecHitThreshold = cms.double( 4.0 ),
RecHitFlagToBeExcluded = cms.vstring( ),
ecalhitcollection = cms.string( "EcalRecHitsEB" ),
eThreshA = cms.double( 0.0030 ),
basicclusterCollection = cms.string( "" ),
eThreshB = cms.double( 0.1 ),
dynamicPhiRoad = cms.bool( False ),
RecHitSeverityToBeExcluded = cms.vstring( 'kWeird' ),
l1UpperThr = cms.double( 999.0 ),
excludeFlagged = cms.bool( True ),
posCalcParameters = cms.PSet(
T0_barl = cms.double( 7.4 ),
LogWeighted = cms.bool( True ),
T0_endc = cms.double( 3.1 ),
T0_endcPresh = cms.double( 1.2 ),
W0 = cms.double( 4.2 ),
X0 = cms.double( 0.89 )
),
l1LowerThr = cms.double( 5.0 ),
doIsolated = cms.bool( True ),
eseed = cms.double( 0.35 ),
ethresh = cms.double( 0.1 ),
ewing = cms.double( 0.0 ),
useEtForXi = cms.bool( True ),
step = cms.int32( 17 ),
debugLevel = cms.string( "INFO" ),
dynamicEThresh = cms.bool( False ),
l1TagIsolated = cms.InputTag( 'hltL1extraParticles','Isolated' ),
superclusterCollection = cms.string( "" ),
HybridBarrelSeedThr = cms.double( 1.5 ),
l1TagNonIsolated = cms.InputTag( 'hltL1extraParticles','NonIsolated' ),
l1LowerThrIgnoreIsolation = cms.double( 0.0 ),
ecalhitproducer = cms.InputTag( "hltEcalRegionalEgammaRecHit" )
)
process.hltCorrectedHybridSuperClustersL1Seeded = cms.EDProducer( "EgammaSCCorrectionMaker",
corectedSuperClusterCollection = cms.string( "" ),
sigmaElectronicNoise = cms.double( 0.03 ),
superClusterAlgo = cms.string( "Hybrid" ),
etThresh = cms.double( 1.0 ),
rawSuperClusterProducer = cms.InputTag( "hltHybridSuperClustersL1Seeded" ),
applyEnergyCorrection = cms.bool( True ),
isl_fCorrPset = cms.PSet( ),
VerbosityLevel = cms.string( "ERROR" ),
recHitProducer = cms.InputTag( 'hltEcalRegionalEgammaRecHit','EcalRecHitsEB' ),
fix_fCorrPset = cms.PSet( ),
modeEE = cms.int32( 0 ),
modeEB = cms.int32( 0 ),
dyn_fCorrPset = cms.PSet( ),
energyCorrectorName = cms.string( "EcalClusterEnergyCorrectionObjectSpecific" ),
applyCrackCorrection = cms.bool( False ),
hyb_fCorrPset = cms.PSet(
brLinearLowThr = cms.double( 1.1 ),
fBremVec = cms.vdouble( -0.05208, 0.1331, 0.9196, -5.735E-4, 1.343 ),
brLinearHighThr = cms.double( 8.0 ),
fEtEtaVec = cms.vdouble( 1.0012, -0.5714, 0.0, 0.0, 0.0, 0.5549, 12.74, 1.0448, 0.0, 0.0, 0.0, 0.0, 8.0, 1.023, -0.00181, 0.0, 0.0 )
)
)
process.hltMulti5x5BasicClustersL1Seeded = cms.EDProducer( "EgammaHLTMulti5x5ClusterProducer",
l1LowerThr = cms.double( 5.0 ),
Multi5x5BarrelSeedThr = cms.double( 0.5 ),
Multi5x5EndcapSeedThr = cms.double( 0.18 ),
endcapHitCollection = cms.string( "EcalRecHitsEE" ),
barrelClusterCollection = cms.string( "notused" ),
doEndcaps = cms.bool( True ),
regionEtaMargin = cms.double( 0.3 ),
regionPhiMargin = cms.double( 0.4 ),
RecHitFlagToBeExcluded = cms.vstring( ),
l1TagNonIsolated = cms.InputTag( 'hltL1extraParticles','NonIsolated' ),
endcapHitProducer = cms.InputTag( "hltEcalRegionalEgammaRecHit" ),
posCalcParameters = cms.PSet(
T0_barl = cms.double( 7.4 ),
LogWeighted = cms.bool( True ),
T0_endc = cms.double( 3.1 ),
T0_endcPresh = cms.double( 1.2 ),
W0 = cms.double( 4.2 ),
X0 = cms.double( 0.89 )
),
VerbosityLevel = cms.string( "ERROR" ),
doIsolated = cms.bool( True ),
barrelHitProducer = cms.InputTag( "hltEcalRegionalEgammaRecHit" ),
l1LowerThrIgnoreIsolation = cms.double( 0.0 ),
l1TagIsolated = cms.InputTag( 'hltL1extraParticles','Isolated' ),
barrelHitCollection = cms.string( "EcalRecHitsEB" ),
doBarrel = cms.bool( False ),
endcapClusterCollection = cms.string( "multi5x5EndcapBasicClusters" ),
l1UpperThr = cms.double( 999.0 )
)
process.hltMulti5x5SuperClustersL1Seeded = cms.EDProducer( "Multi5x5SuperClusterProducer",
barrelSuperclusterCollection = cms.string( "multi5x5BarrelSuperClusters" ),
endcapEtaSearchRoad = cms.double( 0.14 ),
barrelClusterCollection = cms.string( "multi5x5BarrelBasicClusters" ),
dynamicPhiRoad = cms.bool( False ),
endcapClusterProducer = cms.string( "hltMulti5x5BasicClustersL1Seeded" ),
barrelPhiSearchRoad = cms.double( 0.8 ),
endcapPhiSearchRoad = cms.double( 0.6 ),
barrelClusterProducer = cms.string( "notused" ),
seedTransverseEnergyThreshold = cms.double( 1.0 ),
endcapSuperclusterCollection = cms.string( "multi5x5EndcapSuperClusters" ),
barrelEtaSearchRoad = cms.double( 0.06 ),
bremRecoveryPset = cms.PSet(
barrel = cms.PSet( ),
endcap = cms.PSet(
a = cms.double( 47.85 ),
c = cms.double( 0.1201 ),
b = cms.double( 108.8 )
),
doEndcaps = cms.bool( True ),
doBarrel = cms.bool( False )
),
doEndcaps = cms.bool( True ),
endcapClusterCollection = cms.string( "multi5x5EndcapBasicClusters" ),
doBarrel = cms.bool( False )
)
process.hltMulti5x5EndcapSuperClustersWithPreshowerL1Seeded = cms.EDProducer( "PreshowerClusterProducer",
assocSClusterCollection = cms.string( "" ),
preshStripEnergyCut = cms.double( 0.0 ),
preshClusterCollectionY = cms.string( "preshowerYClusters" ),
preshClusterCollectionX = cms.string( "preshowerXClusters" ),
etThresh = cms.double( 5.0 ),
preshRecHitProducer = cms.InputTag( 'hltESRegionalEgammaRecHit','EcalRecHitsES' ),
endcapSClusterProducer = cms.InputTag( 'hltMulti5x5SuperClustersL1Seeded','multi5x5EndcapSuperClusters' ),
preshNclust = cms.int32( 4 ),
preshClusterEnergyCut = cms.double( 0.0 ),
preshSeededNstrip = cms.int32( 15 )
)
process.hltCorrectedMulti5x5EndcapSuperClustersWithPreshowerL1Seeded = cms.EDProducer( "EgammaSCCorrectionMaker",
corectedSuperClusterCollection = cms.string( "" ),
sigmaElectronicNoise = cms.double( 0.15 ),
superClusterAlgo = cms.string( "Multi5x5" ),
etThresh = cms.double( 1.0 ),
rawSuperClusterProducer = cms.InputTag( "hltMulti5x5EndcapSuperClustersWithPreshowerL1Seeded" ),
applyEnergyCorrection = cms.bool( True ),
isl_fCorrPset = cms.PSet( ),
VerbosityLevel = cms.string( "ERROR" ),
recHitProducer = cms.InputTag( 'hltEcalRegionalEgammaRecHit','EcalRecHitsEE' ),
fix_fCorrPset = cms.PSet(
brLinearLowThr = cms.double( 0.6 ),
fBremVec = cms.vdouble( -0.04163, 0.08552, 0.95048, -0.002308, 1.077 ),
brLinearHighThr = cms.double( 6.0 ),
fEtEtaVec = cms.vdouble( 0.9746, -6.512, 0.0, 0.0, 0.02771, 4.983, 0.0, 0.0, -0.007288, -0.9446, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0 )
),
modeEE = cms.int32( 0 ),
modeEB = cms.int32( 0 ),
dyn_fCorrPset = cms.PSet( ),
energyCorrectorName = cms.string( "EcalClusterEnergyCorrectionObjectSpecific" ),
applyCrackCorrection = cms.bool( False ),
hyb_fCorrPset = cms.PSet( )
)
process.hltL1SeededRecoEcalCandidate = cms.EDProducer( "EgammaHLTRecoEcalCandidateProducers",
scIslandEndcapProducer = cms.InputTag( "hltCorrectedMulti5x5EndcapSuperClustersWithPreshowerL1Seeded" ),
scHybridBarrelProducer = cms.InputTag( "hltCorrectedHybridSuperClustersL1Seeded" ),
recoEcalCandidateCollection = cms.string( "" )
)
process.hltEGRegionalL1Mu12EG7ORL1MuOpenEG12 = cms.EDFilter( "HLTEgammaL1MatchFilterRegional",
saveTags = cms.bool( False ),
endcap_end = cms.double( 2.65 ),
region_eta_size_ecap = cms.double( 1.0 ),
barrel_end = cms.double( 1.4791 ),
l1IsolatedTag = cms.InputTag( 'hltL1extraParticles','Isolated' ),
candIsolatedTag = cms.InputTag( "hltL1SeededRecoEcalCandidate" ),
region_phi_size = cms.double( 1.044 ),
region_eta_size = cms.double( 0.522 ),
L1SeedFilterTag = cms.InputTag( "hltL1sL1Mu12EG7ORL1MuOpenEG12" ),
ncandcut = cms.int32( 1 ),
doIsolated = cms.bool( False ),
candNonIsolatedTag = cms.InputTag( "" ),
l1NonIsolatedTag = cms.InputTag( 'hltL1extraParticles','NonIsolated' )
)
process.hltEG14EtFilterL1Mu12EG7ORL1MuOpenEG12 = cms.EDFilter( "HLTEgammaEtFilter",
saveTags = cms.bool( False ),
L1NonIsoCand = cms.InputTag( "" ),
relaxed = cms.untracked.bool( False ),
L1IsoCand = cms.InputTag( "hltL1SeededRecoEcalCandidate" ),
inputTag = cms.InputTag( "hltEGRegionalL1Mu12EG7ORL1MuOpenEG12" ),
etcutEB = cms.double( 14.0 ),
ncandcut = cms.int32( 1 ),
etcutEE = cms.double( 14.0 )
)
process.hltL1SeededHLTClusterShape = cms.EDProducer( "EgammaHLTClusterShapeProducer",
recoEcalCandidateProducer = cms.InputTag( "hltL1SeededRecoEcalCandidate" ),
ecalRechitEB = cms.InputTag( 'hltEcalRegionalEgammaRecHit','EcalRecHitsEB' ),
ecalRechitEE = cms.InputTag( 'hltEcalRegionalEgammaRecHit','EcalRecHitsEE' ),
isIeta = cms.bool( True )
)
process.hltEle14CaloIdTClusterShapeFilter = cms.EDFilter( "HLTEgammaGenericFilter",
doIsolated = cms.bool( True ),
nonIsoTag = cms.InputTag( "" ),
L1NonIsoCand = cms.InputTag( "" ),
saveTags = cms.bool( False ),
thrOverE2EB = cms.double( -1.0 ),
thrRegularEE = cms.double( 0.031 ),
thrOverEEE = cms.double( -1.0 ),
L1IsoCand = cms.InputTag( "hltL1SeededRecoEcalCandidate" ),
thrOverEEB = cms.double( -1.0 ),
thrRegularEB = cms.double( 0.011 ),
lessThan = cms.bool( True ),
useEt = cms.bool( False ),
ncandcut = cms.int32( 1 ),
isoTag = cms.InputTag( "hltL1SeededHLTClusterShape" ),
candTag = cms.InputTag( "hltEG14EtFilterL1Mu12EG7ORL1MuOpenEG12" ),
thrOverE2EE = cms.double( -1.0 )
)
process.hltL1SeededPhotonHcalForHE = cms.EDProducer( "EgammaHLTHcalIsolationProducersRegional",
eMinHE = cms.double( 0.8 ),
hbheRecHitProducer = cms.InputTag( "hltHbhereco" ),
effectiveAreaBarrel = cms.double( 0.105 ),
outerCone = cms.double( 0.14 ),
eMinHB = cms.double( 0.7 ),
innerCone = cms.double( 0.0 ),
etMinHE = cms.double( -1.0 ),
etMinHB = cms.double( -1.0 ),
rhoProducer = cms.InputTag( 'hltKT6CaloJets','rho' ),
depth = cms.int32( -1 ),
doRhoCorrection = cms.bool( False ),
effectiveAreaEndcap = cms.double( 0.17 ),
recoEcalCandidateProducer = cms.InputTag( "hltL1SeededRecoEcalCandidate" ),
rhoMax = cms.double( 9.9999999E7 ),
rhoScale = cms.double( 1.0 ),
doEtSum = cms.bool( False )
)
process.hltEle14CaloIdTHEFilter = cms.EDFilter( "HLTEgammaGenericFilter",
doIsolated = cms.bool( True ),
nonIsoTag = cms.InputTag( "" ),
L1NonIsoCand = cms.InputTag( "" ),
saveTags = cms.bool( False ),
thrOverE2EB = cms.double( -1.0 ),
thrRegularEE = cms.double( -1.0 ),
thrOverEEE = cms.double( 0.075 ),
L1IsoCand = cms.InputTag( "hltL1SeededRecoEcalCandidate" ),
thrOverEEB = cms.double( 0.1 ),
thrRegularEB = cms.double( -1.0 ),
lessThan = cms.bool( True ),
useEt = cms.bool( False ),
ncandcut = cms.int32( 1 ),
isoTag = cms.InputTag( "hltL1SeededPhotonHcalForHE" ),
candTag = cms.InputTag( "hltEle14CaloIdTClusterShapeFilter" ),
thrOverE2EE = cms.double( -1.0 )
)
process.hltL1SeededStartUpElectronPixelSeeds = cms.EDProducer( "ElectronSeedProducer",
endcapSuperClusters = cms.InputTag( "hltCorrectedMulti5x5EndcapSuperClustersWithPreshowerL1Seeded" ),
SeedConfiguration = cms.PSet(
searchInTIDTEC = cms.bool( True ),
HighPtThreshold = cms.double( 35.0 ),
r2MinF = cms.double( -0.15 ),
OrderedHitsFactoryPSet = cms.PSet(
maxElement = cms.uint32( 0 ),
ComponentName = cms.string( "StandardHitPairGenerator" ),
SeedingLayers = cms.string( "hltESPMixedLayerPairs" ),
useOnDemandTracker = cms.untracked.int32( 0 )
),
DeltaPhi1Low = cms.double( 0.23 ),
DeltaPhi1High = cms.double( 0.08 ),
ePhiMin1 = cms.double( -0.08 ),
PhiMin2 = cms.double( -0.0040 ),
LowPtThreshold = cms.double( 3.0 ),
RegionPSet = cms.PSet(
deltaPhiRegion = cms.double( 0.4 ),
originHalfLength = cms.double( 15.0 ),
useZInVertex = cms.bool( True ),
deltaEtaRegion = cms.double( 0.1 ),
ptMin = cms.double( 1.5 ),
originRadius = cms.double( 0.2 ),
VertexProducer = cms.InputTag( "dummyVertices" )
),
maxHOverE = cms.double( 999999.0 ),
dynamicPhiRoad = cms.bool( False ),
ePhiMax1 = cms.double( 0.04 ),
DeltaPhi2 = cms.double( 0.0040 ),
measurementTrackerName = cms.string( "hltESPMeasurementTracker" ),
SizeWindowENeg = cms.double( 0.675 ),
nSigmasDeltaZ1 = cms.double( 5.0 ),
rMaxI = cms.double( 0.2 ),
PhiMax2 = cms.double( 0.0040 ),
preFilteredSeeds = cms.bool( True ),
r2MaxF = cms.double( 0.15 ),
pPhiMin1 = cms.double( -0.04 ),
initialSeeds = cms.InputTag( "noSeedsHere" ),
pPhiMax1 = cms.double( 0.08 ),
hbheModule = cms.string( "hbhereco" ),
SCEtCut = cms.double( 3.0 ),
z2MaxB = cms.double( 0.09 ),
fromTrackerSeeds = cms.bool( True ),
hcalRecHits = cms.InputTag( "hltHbhereco" ),
z2MinB = cms.double( -0.09 ),
hbheInstance = cms.string( "" ),
rMinI = cms.double( -0.2 ),
hOverEConeSize = cms.double( 0.0 ),
hOverEHBMinE = cms.double( 999999.0 ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
applyHOverECut = cms.bool( False ),
hOverEHFMinE = cms.double( 999999.0 )
),
barrelSuperClusters = cms.InputTag( "hltCorrectedHybridSuperClustersL1Seeded" )
)
process.hltEle14CaloIdTPixelMatchFilter = cms.EDFilter( "HLTElectronPixelMatchFilter",
saveTags = cms.bool( False ),
doIsolated = cms.bool( True ),
L1NonIsoCand = cms.InputTag( "" ),
L1NonIsoPixelSeedsTag = cms.InputTag( "" ),
L1IsoCand = cms.InputTag( "hltL1SeededRecoEcalCandidate" ),
npixelmatchcut = cms.double( 1.0 ),
ncandcut = cms.int32( 1 ),
candTag = cms.InputTag( "hltEle14CaloIdTHEFilter" ),
L1IsoPixelSeedsTag = cms.InputTag( "hltL1SeededStartUpElectronPixelSeeds" )
)
process.hltCkfL1SeededTrackCandidates = cms.EDProducer( "CkfTrackCandidateMaker",
src = cms.InputTag( "hltL1SeededStartUpElectronPixelSeeds" ),
maxSeedsBeforeCleaning = cms.uint32( 1000 ),
TransientInitialStateEstimatorParameters = cms.PSet(
propagatorAlongTISE = cms.string( "PropagatorWithMaterial" ),
numberMeasurementsForFit = cms.int32( 4 ),
propagatorOppositeTISE = cms.string( "PropagatorWithMaterialOpposite" )
),
TrajectoryCleaner = cms.string( "hltESPTrajectoryCleanerBySharedHits" ),
cleanTrajectoryAfterInOut = cms.bool( False ),
useHitsSplitting = cms.bool( False ),
RedundantSeedCleaner = cms.string( "CachingSeedCleanerBySharedInput" ),
doSeedingRegionRebuilding = cms.bool( False ),
maxNSeeds = cms.uint32( 100000 ),
NavigationSchool = cms.string( "SimpleNavigationSchool" ),
TrajectoryBuilder = cms.string( "hltESPCkfTrajectoryBuilder" )
)
process.hltCtfL1SeededWithMaterialTracks = cms.EDProducer( "TrackProducer",
src = cms.InputTag( "hltCkfL1SeededTrackCandidates" ),
clusterRemovalInfo = cms.InputTag( "" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Fitter = cms.string( "hltESPKFFittingSmoother" ),
useHitsSplitting = cms.bool( False ),
MeasurementTracker = cms.string( "" ),
alias = cms.untracked.string( "ctfWithMaterialTracks" ),
NavigationSchool = cms.string( "" ),
TrajectoryInEvent = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
AlgorithmName = cms.string( "undefAlgorithm" ),
Propagator = cms.string( "PropagatorWithMaterial" )
)
process.hltPixelMatchElectronsL1Seeded = cms.EDProducer( "EgammaHLTPixelMatchElectronProducers",
BSProducer = cms.InputTag( "hltOnlineBeamSpot" ),
UseGsfTracks = cms.bool( False ),
TrackProducer = cms.InputTag( "hltCtfL1SeededWithMaterialTracks" ),
GsfTrackProducer = cms.InputTag( "" )
)
process.hltEle14CaloIdTTrkIdVLOneOEMinusOneOPFilter = cms.EDFilter( "HLTElectronOneOEMinusOneOPFilterRegional",
saveTags = cms.bool( False ),
doIsolated = cms.bool( True ),
electronNonIsolatedProducer = cms.InputTag( "" ),
barrelcut = cms.double( 999.9 ),
electronIsolatedProducer = cms.InputTag( "hltPixelMatchElectronsL1Seeded" ),
ncandcut = cms.int32( 1 ),
candTag = cms.InputTag( "hltEle14CaloIdTPixelMatchFilter" ),
endcapcut = cms.double( 999.9 )
)
process.hltElectronL1SeededDetaDphi = cms.EDProducer( "EgammaHLTElectronDetaDphiProducer",
variablesAtVtx = cms.bool( False ),
useSCRefs = cms.bool( False ),
BSProducer = cms.InputTag( "hltOnlineBeamSpot" ),
electronProducer = cms.InputTag( "hltPixelMatchElectronsL1Seeded" ),
recoEcalCandidateProducer = cms.InputTag( "" ),
useTrackProjectionToEcal = cms.bool( False )
)
process.hltEle14CaloIdTTrkIdVLDetaFilter = cms.EDFilter( "HLTElectronGenericFilter",
doIsolated = cms.bool( True ),
nonIsoTag = cms.InputTag( "" ),
L1NonIsoCand = cms.InputTag( "" ),
thrTimesPtEB = cms.double( -1.0 ),
saveTags = cms.bool( False ),
thrRegularEE = cms.double( 0.01 ),
L1IsoCand = cms.InputTag( "hltPixelMatchElectronsL1Seeded" ),
thrRegularEB = cms.double( 0.01 ),
lessThan = cms.bool( True ),
ncandcut = cms.int32( 1 ),
isoTag = cms.InputTag( 'hltElectronL1SeededDetaDphi','Deta' ),
candTag = cms.InputTag( "hltEle14CaloIdTTrkIdVLOneOEMinusOneOPFilter" ),
thrTimesPtEE = cms.double( -1.0 ),
thrOverPtEE = cms.double( -1.0 ),
thrOverPtEB = cms.double( -1.0 )
)
process.hltEle14CaloIdTTrkIdVLDphiFilter = cms.EDFilter( "HLTElectronGenericFilter",
doIsolated = cms.bool( True ),
nonIsoTag = cms.InputTag( "" ),
L1NonIsoCand = cms.InputTag( "" ),
thrTimesPtEB = cms.double( -1.0 ),
saveTags = cms.bool( True ),
thrRegularEE = cms.double( 0.1 ),
L1IsoCand = cms.InputTag( "hltPixelMatchElectronsL1Seeded" ),
thrRegularEB = cms.double( 0.15 ),
lessThan = cms.bool( True ),
ncandcut = cms.int32( 1 ),
isoTag = cms.InputTag( 'hltElectronL1SeededDetaDphi','Dphi' ),
candTag = cms.InputTag( "hltEle14CaloIdTTrkIdVLDetaFilter" ),
thrTimesPtEE = cms.double( -1.0 ),
thrOverPtEE = cms.double( -1.0 ),
thrOverPtEB = cms.double( -1.0 )
)
process.hltMu14Ele14CaloIdTTrkIdVLMass8Filter = cms.EDFilter( "HLTElectronMuonInvMassFilter",
saveTags = cms.bool( True ),
lowerMassCut = cms.double( 8.0 ),
ElectronL1IsoCand = cms.InputTag( "hltPixelMatchElectronsActivity" ),
electronRelaxed = cms.untracked.bool( True ),
MuonCand = cms.InputTag( "hltL3MuonCandidates" ),
ElectronL1NonIsoCand = cms.InputTag( "hltPixelMatchElectronsActivity" ),
muonPrevCandTag = cms.InputTag( "hltL1Mu12EG7ORL1MuOpenEG12L3MuFiltered14" ),
ncandcut = cms.int32( 1 ),
upperMassCut = cms.double( 999999.0 ),
elePrevCandTag = cms.InputTag( "hltEle14CaloIdTTrkIdVLDphiFilter" )
)
process.hltPreMu14Ele14CaloIdTTrkIdVLMass8PFMET50 = cms.EDFilter( "HLTPrescaler",
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" ),
offset = cms.uint32( 0 )
)
process.HLTL1UnpackerSequence = cms.Sequence( process.hltGtDigis + process.hltGctDigis + process.hltL1GtObjectMap + process.hltL1extraParticles )
process.HLTBeamSpot = cms.Sequence( process.hltScalersRawToDigi + process.hltOnlineBeamSpot )
process.HLTBeginSequence = cms.Sequence( process.hltTriggerType + process.HLTL1UnpackerSequence + process.HLTBeamSpot )
process.HLTMuonLocalRecoSequence = cms.Sequence( process.hltMuonDTDigis + process.hltDt1DRecHits + process.hltDt4DSegments + process.hltMuonCSCDigis + process.hltCsc2DRecHits + process.hltCscSegments + process.hltMuonRPCDigis + process.hltRpcRecHits )
process.HLTL2muonrecoNocandSequence = cms.Sequence( process.HLTMuonLocalRecoSequence + process.hltL2OfflineMuonSeeds + process.hltL2MuonSeeds + process.hltL2Muons )
process.HLTL2muonrecoSequenceNoVtx = cms.Sequence( process.HLTL2muonrecoNocandSequence + process.hltL2MuonCandidatesNoVtx )
process.HLTDoLocalHcalSequence = cms.Sequence( process.hltHcalDigis + process.hltHbhereco + process.hltHfreco + process.hltHoreco )
process.HLTDoCaloSequence = cms.Sequence( process.hltEcalRawToRecHitFacility + process.hltEcalRegionalRestFEDs + process.hltEcalRecHitAll + process.HLTDoLocalHcalSequence + process.hltTowerMakerForAll )
process.HLTRecoJetSequenceAK5L1FastJetCorrected = cms.Sequence( process.HLTDoCaloSequence + process.hltKT6CaloJets + process.hltAntiKT5CaloJets + process.hltCaloJetIDPassed + process.hltCaloJetL1FastJetCorrected )
process.HLTDoCaloSequencePF = cms.Sequence( process.hltEcalRawToRecHitFacility + process.hltEcalRegionalRestFEDs + process.hltEcalRecHitAll + process.HLTDoLocalHcalSequence + process.hltTowerMakerForPF )
process.HLTRecoJetSequenceAK5UncorrectedPF = cms.Sequence( process.HLTDoCaloSequencePF + process.hltAntiKT5CaloJetsPF )
process.HLTRecoJetSequencePrePF = cms.Sequence( process.HLTRecoJetSequenceAK5UncorrectedPF + process.hltAntiKT5CaloJetsPFEt5 )
process.HLTL2muonrecoSequence = cms.Sequence( process.HLTL2muonrecoNocandSequence + process.hltL2MuonCandidates )
process.HLTDoLocalPixelSequence = cms.Sequence( process.hltSiPixelDigis + process.hltSiPixelClusters + process.hltSiPixelRecHits )
process.HLTDoLocalStripSequence = cms.Sequence( process.hltSiStripExcludedFEDListProducer + process.hltSiStripRawToClustersFacility + process.hltSiStripClusters )
process.HLTL3muonTkCandidateSequence = cms.Sequence( process.HLTDoLocalPixelSequence + process.HLTDoLocalStripSequence + process.hltL3TrajSeedOIState + process.hltL3TrackCandidateFromL2OIState + process.hltL3TkTracksFromL2OIState + process.hltL3MuonsOIState + process.hltL3TrajSeedOIHit + process.hltL3TrackCandidateFromL2OIHit + process.hltL3TkTracksFromL2OIHit + process.hltL3MuonsOIHit + process.hltL3TkFromL2OICombination + process.hltL3TrajSeedIOHit + process.hltL3TrackCandidateFromL2IOHit + process.hltL3TkTracksFromL2IOHit + process.hltL3MuonsIOHit + process.hltL3TrajectorySeed + process.hltL3TrackCandidateFromL2 )
process.HLTL3muonrecoNocandSequence = cms.Sequence( process.HLTL3muonTkCandidateSequence + process.hltL3TkTracksFromL2 + process.hltL3MuonsLinksCombination + process.hltL3Muons )
process.HLTL3muonrecoSequence = cms.Sequence( process.HLTL3muonrecoNocandSequence + process.hltL3MuonCandidates )
process.HLTRecopixelvertexingSequence = cms.Sequence( process.hltPixelTracks + process.hltPixelVertices )
process.HLTIterativeTrackingIteration0 = cms.Sequence( process.hltPFJetPixelSeedsFromPixelTracks + process.hltPFJetCkfTrackCandidates + process.hltPFJetCtfWithMaterialTracks + process.hltPFlowTrackSelectionHighPurity + process.hltTrackRefsForJetsIter0 + process.hltAntiKT5TrackJetsIter0 + process.hltTrackAndTauJetsIter0 )
process.HLTIterativeTrackingIteration1 = cms.Sequence( process.hltIter1ClustersRefRemoval + process.hltIter1SiStripClusters + process.hltIter1PFJetPixelSeeds + process.hltIter1PFJetCkfTrackCandidates + process.hltIter1PFJetCtfWithMaterialTracks + process.hltIter1PFlowTrackSelectionHighPurityLoose + process.hltIter1PFlowTrackSelectionHighPurityTight + process.hltIter1PFlowTrackSelectionHighPurity + process.hltIter1Merged + process.hltTrackRefsForJetsIter1 + process.hltAntiKT5TrackJetsIter1 + process.hltTrackAndTauJetsIter1 )
process.HLTIterativeTrackingIteration2 = cms.Sequence( process.hltIter2ClustersRefRemoval + process.hltIter2SiStripClusters + process.hltIter2PFJetPixelSeeds + process.hltIter2PFJetCkfTrackCandidates + process.hltIter2PFJetCtfWithMaterialTracks + process.hltIter2PFlowTrackSelectionHighPurity + process.hltIter2Merged + process.hltTrackRefsForJetsIter2 + process.hltAntiKT5TrackJetsIter2 + process.hltTrackAndTauJetsIter2 )
process.HLTIterativeTrackingIteration3 = cms.Sequence( process.hltIter3ClustersRefRemoval + process.hltIter3SiStripClusters + process.hltIter3PFJetMixedSeeds + process.hltIter3PFJetCkfTrackCandidates + process.hltIter3PFJetCtfWithMaterialTracks + process.hltIter3PFlowTrackSelectionHighPurityLoose + process.hltIter3PFlowTrackSelectionHighPurityTight + process.hltIter3PFlowTrackSelectionHighPurity + process.hltIter3Merged + process.hltTrackRefsForJetsIter3 + process.hltAntiKT5TrackJetsIter3 + process.hltTrackAndTauJetsIter3 )
process.HLTIterativeTrackingIteration4 = cms.Sequence( process.hltIter4ClustersRefRemoval + process.hltIter4SiStripClusters + process.hltIter4PFJetPixelLessSeeds + process.hltIter4PFJetCkfTrackCandidates + process.hltIter4PFJetCtfWithMaterialTracks + process.hltIter4PFlowTrackSelectionHighPurity + process.hltIter4Merged )
process.HLTIterativeTracking = cms.Sequence( process.HLTIterativeTrackingIteration0 + process.HLTIterativeTrackingIteration1 + process.HLTIterativeTrackingIteration2 + process.HLTIterativeTrackingIteration3 + process.HLTIterativeTrackingIteration4 )
process.HLTTrackReconstructionForPF = cms.Sequence( process.HLTDoLocalPixelSequence + process.HLTRecopixelvertexingSequence + process.HLTDoLocalStripSequence + process.HLTIterativeTracking + process.hltPFMuonMerging + process.hltMuonLinks + process.hltMuons )
process.HLTPreshowerSequence = cms.Sequence( process.hltESRawToRecHitFacility + process.hltEcalRegionalESRestFEDs + process.hltESRecHitAll )
process.HLTParticleFlowSequence = cms.Sequence( process.HLTPreshowerSequence + process.hltParticleFlowRecHitECAL + process.hltParticleFlowRecHitHCAL + process.hltParticleFlowRecHitPS + process.hltParticleFlowClusterECAL + process.hltParticleFlowClusterHCAL + process.hltParticleFlowClusterHFEM + process.hltParticleFlowClusterHFHAD + process.hltParticleFlowClusterPS + process.hltLightPFTracks + process.hltParticleFlowBlock + process.hltParticleFlow )
process.HLTPFJetsSequence = cms.Sequence( process.hltAntiKT5PFJets )
process.HLTPFJetTriggerSequence = cms.Sequence( process.HLTL2muonrecoSequence + process.HLTL3muonrecoSequence + process.HLTTrackReconstructionForPF + process.HLTParticleFlowSequence + process.HLTPFJetsSequence )
process.HLTPFReconstructionSequence = cms.Sequence( process.HLTRecoJetSequencePrePF + process.HLTPFJetTriggerSequence )
process.HLTPFL1FastL2L3JetsSequenceNeutral = cms.Sequence( process.hltKT6PFJets + process.hltPFNeutralHadronsAndPartons + process.hltAntiKT5PFJetsNeutral + process.hltAK5PFJetNeutralL1FastL2L3Corrected )
process.HLTEndSequence = cms.Sequence( process.hltBoolEnd )
process.HLTL3muoncaloisorecoSequenceNoBools = cms.Sequence( process.hltEcalRawToRecHitFacility + process.hltEcalRegionalMuonsFEDs + process.hltEcalRegionalMuonsRecHit + process.HLTDoLocalHcalSequence + process.hltTowerMakerForMuons + process.hltKT6CaloJetsForMuons + process.hltL3CaloMuonCorrectedIsolations )
process.HLTRegionalCKFTracksForL3Isolation = cms.Sequence( process.hltRegionalSeedsForL3MuonIsolation + process.hltRegionalCandidatesForL3MuonIsolation + process.hltRegionalTracksForL3MuonIsolation )
process.HLTL3muonisorecoSequence = cms.Sequence( process.HLTDoLocalPixelSequence + process.HLTDoLocalStripSequence + process.HLTRegionalCKFTracksForL3Isolation + process.hltL3MuonCombRelIsolations )
process.HLTPFL1FastL2L3JetsSequence = cms.Sequence( process.hltKT6PFJets + process.hltAntiKT5PFJets + process.hltAK5PFJetL1FastL2L3Corrected )
process.HLTPFL1FastL2L3JetTriggerSequence = cms.Sequence( process.HLTL2muonrecoSequence + process.HLTL3muonrecoSequence + process.HLTTrackReconstructionForPF + process.HLTParticleFlowSequence + process.HLTPFL1FastL2L3JetsSequence )
process.HLTPFL1FastL2L3ReconstructionSequence = cms.Sequence( process.HLTRecoJetSequencePrePF + process.HLTPFL1FastL2L3JetTriggerSequence )
process.HLTPFJetRecoNoPUL1FastL2L3Sequence = cms.Sequence( process.hltOnlinePrimaryVertices + process.hltGoodOnlinePVs + process.hltPFPileUp + process.hltPFNoPileUp + process.hltKT6PFJets + process.hltAntiKT5PFJetsNoPU + process.hltAK5PFJetL1FastL2L3CorrectedNoPU )
process.HLTPFnoPUL1FastL2L3JetTriggerSequence = cms.Sequence( process.HLTL2muonrecoSequence + process.HLTL3muonrecoSequence + process.HLTTrackReconstructionForPF + process.HLTParticleFlowSequence + process.HLTPFJetRecoNoPUL1FastL2L3Sequence )
process.HLTPFnoPUL1FastL2L3ReconstructionSequence = cms.Sequence( process.HLTRecoJetSequencePrePF + process.HLTPFnoPUL1FastL2L3JetTriggerSequence )
process.HLTL3muonisorecoSequenceIso1p0 = cms.Sequence( process.HLTDoLocalPixelSequence + process.HLTDoLocalStripSequence + process.HLTRegionalCKFTracksForL3Isolation + process.hltL3MuonCombRelIsolationsIso1p0 )
process.HLTEcalActivitySequence = cms.Sequence( process.hltEcalRawToRecHitFacility + process.hltESRawToRecHitFacility + process.hltEcalRegionalRestFEDs + process.hltEcalRegionalESRestFEDs + process.hltEcalRecHitAll + process.hltESRecHitAll + process.hltHybridSuperClustersActivity + process.hltCorrectedHybridSuperClustersActivity + process.hltMulti5x5BasicClustersActivity + process.hltMulti5x5SuperClustersActivity + process.hltMulti5x5SuperClustersWithPreshowerActivity + process.hltCorrectedMulti5x5SuperClustersWithPreshowerActivity + process.hltRecoEcalSuperClusterActivityCandidate + process.hltEcalActivitySuperClusterWrapper )
process.HLTPixelMatchElectronActivityTrackingSequence = cms.Sequence( process.hltCkfActivityTrackCandidates + process.hltCtfActivityWithMaterialTracks + process.hltPixelMatchElectronsActivity )
process.HLTRecoMETSequence = cms.Sequence( process.HLTDoCaloSequence + process.hltMet )
process.HLTRSequenceMuCorrL1FastJetDiJet65 = cms.Sequence( process.HLTRecoJetSequenceAK5L1FastJetCorrected + process.hltDoubleCentralJet65L1FastJet + process.HLTRecoMETSequence + process.hltRHemisphereMuCorr )
process.HLTDoRegionalEgammaEcalSequence = cms.Sequence( process.hltESRawToRecHitFacility + process.hltEcalRawToRecHitFacility + process.hltEcalRegionalEgammaFEDs + process.hltEcalRegionalEgammaRecHit + process.hltESRegionalEgammaRecHit )
process.HLTMulti5x5SuperClusterL1Seeded = cms.Sequence( process.hltMulti5x5BasicClustersL1Seeded + process.hltMulti5x5SuperClustersL1Seeded + process.hltMulti5x5EndcapSuperClustersWithPreshowerL1Seeded + process.hltCorrectedMulti5x5EndcapSuperClustersWithPreshowerL1Seeded )
process.HLTL1SeededEcalClustersSequence = cms.Sequence( process.hltHybridSuperClustersL1Seeded + process.hltCorrectedHybridSuperClustersL1Seeded + process.HLTMulti5x5SuperClusterL1Seeded )
process.HLTDoEGammaStartupSequence = cms.Sequence( process.HLTDoRegionalEgammaEcalSequence + process.HLTL1SeededEcalClustersSequence + process.hltL1SeededRecoEcalCandidate )
process.HLTDoEgammaClusterShapeSequence = cms.Sequence( process.hltL1SeededHLTClusterShape )
process.HLTDoLocalHcalWithoutHOSequence = cms.Sequence( process.hltHcalDigis + process.hltHbhereco + process.hltHfreco )
process.HLTDoEGammaHESequence = cms.Sequence( process.HLTDoLocalHcalWithoutHOSequence + process.hltL1SeededPhotonHcalForHE )
process.HLTDoEGammaPixelSequence = cms.Sequence( process.HLTDoLocalPixelSequence + process.HLTDoLocalStripSequence + process.hltL1SeededStartUpElectronPixelSeeds )
process.HLTEle14L1NonIsoHLTCaloIdTSequence = cms.Sequence( process.HLTDoEGammaStartupSequence + process.hltEGRegionalL1Mu12EG7ORL1MuOpenEG12 + process.hltEG14EtFilterL1Mu12EG7ORL1MuOpenEG12 + process.HLTDoEgammaClusterShapeSequence + process.hltEle14CaloIdTClusterShapeFilter + process.HLTDoEGammaHESequence + process.hltEle14CaloIdTHEFilter + process.HLTDoEGammaPixelSequence + process.hltEle14CaloIdTPixelMatchFilter )
process.HLTPixelMatchElectronL1SeededTrackingSequence = cms.Sequence( process.hltCkfL1SeededTrackCandidates + process.hltCtfL1SeededWithMaterialTracks + process.hltPixelMatchElectronsL1Seeded )
process.HLTDoElectronDetaDphiSequence = cms.Sequence( process.hltElectronL1SeededDetaDphi )
process.HLT_L2TripleMu10_0_0_NoVertex_PFJet40Neutral_v8 = cms.Path( process.HLTBeginSequence + process.hltL1sL1TripleMu0ORTripleMu0HighQ + process.hltPreL2TripleMu1000NoVertexPFJet40Neutral + process.hltL1TripleMu0L1TriMuFiltered0 + process.HLTL2muonrecoSequenceNoVtx + process.hltL2TripleMu0NoVertexL2PreFiltered + process.hltL2Mu10NoVertexL2PreFiltered + process.HLTRecoJetSequenceAK5L1FastJetCorrected + process.hltDijet40L1FastJet + process.HLTPFReconstructionSequence + process.HLTPFL1FastL2L3JetsSequenceNeutral + process.hltCentralPFJet40Neutral + process.HLTEndSequence )
process.HLT_DoubleDisplacedMu4_DiPFJet40Neutral_v8 = cms.Path( process.HLTBeginSequence + process.hltL1sL1DoubleMu0erOR3erHighQ + process.hltPreDoubleDisplacedMu4DiPFJet40Neutral + process.hltL1DoubleMuon0erOR3erHighQL1Filtered0 + process.HLTL2muonrecoSequence + process.hltDoubleMu4L2PreFiltered + process.HLTL3muonrecoSequence + process.hltDoubleDisplacedMu4L3PreFiltered + process.HLTRecoJetSequenceAK5L1FastJetCorrected + process.hltDijet40L1FastJet + process.HLTPFReconstructionSequence + process.HLTPFL1FastL2L3JetsSequenceNeutral + process.hltDiCentralPFJet40Neutral + process.HLTEndSequence )
process.HLT_Mu8_DiJet30_v7 = cms.Path( process.HLTBeginSequence + process.hltL1sL1Mu8DoubleJetC20 + process.hltPreMu8DiJet30 + process.hltL1Mu8DoubleJetC20L1Filtered0 + process.HLTL2muonrecoSequence + process.hltL2Mu8DoubleJetC20L2Filtered8 + process.HLTL3muonrecoSequence + process.hltL3Mu8DoubleJetC20L3Filtered8 + process.HLTRecoJetSequenceAK5L1FastJetCorrected + process.hltDiJet30 + process.HLTEndSequence )
process.HLT_Mu8_TriJet30_v7 = cms.Path( process.HLTBeginSequence + process.hltL1sL1Mu8DoubleJetC20 + process.hltPreMu8TriJet30 + process.hltL1Mu8DoubleJetC20L1Filtered0 + process.HLTL2muonrecoSequence + process.hltL2Mu8DoubleJetC20L2Filtered8 + process.HLTL3muonrecoSequence + process.hltL3Mu8DoubleJetC20L3Filtered8 + process.HLTRecoJetSequenceAK5L1FastJetCorrected + process.hltTriJet30 + process.HLTEndSequence )
process.HLT_Mu8_QuadJet30_v7 = cms.Path( process.HLTBeginSequence + process.hltL1sL1Mu8DoubleJetC20 + process.hltPreMu8QuadJet30 + process.hltL1Mu8DoubleJetC20L1Filtered0 + process.HLTL2muonrecoSequence + process.hltL2Mu8DoubleJetC20L2Filtered8 + process.HLTL3muonrecoSequence + process.hltL3Mu8DoubleJetC20L3Filtered8 + process.HLTRecoJetSequenceAK5L1FastJetCorrected + process.hltQuadJet30 + process.HLTEndSequence )
process.HLT_IsoMu12_DoubleCentralJet65_v4 = cms.Path( process.HLTBeginSequence + process.hltL1sL1DoubleJetC64ORDoubleJetC56ORDoubleJetC52 + process.hltPreIsoMu12DoubleCentralJet65 + cms.ignore(process.hltL1sL1SingleMuOpenCandidate) + process.hltSingleMuOpenCandidateL1Filtered0 + process.HLTL2muonrecoSequence + process.hltSingleMuOpenCandidateL2Filtered3 + process.HLTL3muonrecoSequence + process.hltSingleMuOpenCandidateL3Filtered12 + process.HLTRecoJetSequenceAK5L1FastJetCorrected + process.hltDoubleCentralJet65L1FastJet + process.HLTL3muoncaloisorecoSequenceNoBools + process.HLTL3muonisorecoSequence + process.hltL3crIsoL1sMuOpenCandidateL1f0L2f3L3f12L3crIsoFiltered12 + process.HLTEndSequence )
process.HLT_IsoMu17_eta2p1_DiCentralPFNoPUJet30_PFNoPUHT350_PFMHT40_v3 = cms.Path( process.HLTBeginSequence + process.hltL1sL1HTT150OrHTT175 + process.hltL1sL1HTT150OrHTT175OrHTT200 + cms.ignore(process.hltL1sL1SingleMuOpenCandidate) + process.hltPreIsoMu17eta2p1DiCentralPFNoPUJet30PFNoPUHT350PFMHT40 + process.hltSingleMuOpenCenJetL1Filtered0 + process.HLTL2muonrecoSequence + process.hltL2SingleMuOpenCenJetL2QFiltered14 + process.HLTL3muonrecoSequence + process.hltSingleMuOpenIsoCenJetL3withL2QPreFiltered17 + process.HLTL3muoncaloisorecoSequenceNoBools + process.HLTL3muonisorecoSequence + process.hltSingleMuOpenIsoCenJetL3crIsoRhoFiltered0p15 + process.HLTPFL1FastL2L3ReconstructionSequence + process.HLTPFnoPUL1FastL2L3ReconstructionSequence + process.hltDiCentralPFJet30NoPU + process.hltPFMHT + process.hltPFHTNoPU + process.hltPFMHT40HT350 + process.HLTEndSequence )
process.HLT_DoubleRelIso1p0Mu5_Mass8_PFNoPUHT175_v4 = cms.Path( process.HLTBeginSequence + process.hltL1sL1Mu0HTT100 + process.hltPreDoubleRelIso1p0Mu5Mass8PFNoPUHT175 + cms.ignore(process.hltL1sL1SingleMuOpenCandidate) + process.hltIgnoredL1SingleMuOpenL1DiMuFiltered0 + process.HLTL2muonrecoSequence + process.hltIgnoredL1SingleMuOpenL2DiMuFiltered0 + process.HLTL3muonrecoSequence + process.hltIgnoredL1SingleMuOpenDiMu5Mass8L3Filtered5 + process.HLTL3muoncaloisorecoSequenceNoBools + process.HLTL3muonisorecoSequenceIso1p0 + process.hltL3doublereliso1p0mufilter5 + process.HLTRecoJetSequenceAK5L1FastJetCorrected + process.hltHtMht + process.hltHt100 + process.HLTPFnoPUL1FastL2L3ReconstructionSequence + process.hltPFHTNoPU + process.hltPFHT175NoPU + process.HLTEndSequence )
process.HLT_DoubleRelIso1p0Mu5_Mass8_PFNoPUHT225_v4 = cms.Path( process.HLTBeginSequence + process.hltL1sL1Mu4HTT125 + process.hltPreDoubleRelIso1p0Mu5Mass8PFNoPUHT225 + cms.ignore(process.hltL1sL1SingleMuOpenCandidate) + process.hltIgnoredL1SingleMuOpenL1DiMuFiltered0 + process.HLTL2muonrecoSequence + process.hltIgnoredL1SingleMuOpenL2DiMuFiltered0 + process.HLTL3muonrecoSequence + process.hltIgnoredL1SingleMuOpenDiMu5Mass8L3Filtered5 + process.HLTL3muoncaloisorecoSequenceNoBools + process.HLTL3muonisorecoSequenceIso1p0 + process.hltL3doublereliso1p0mufilter5 + process.HLTRecoJetSequenceAK5L1FastJetCorrected + process.hltHtMht + process.hltHt125 + process.HLTPFnoPUL1FastL2L3ReconstructionSequence + process.hltPFHTNoPU + process.hltPFHT225NoPU + process.HLTEndSequence )
process.HLT_DoubleMu8_Mass8_PFNoPUHT175_v4 = cms.Path( process.HLTBeginSequence + process.hltL1sL1Mu0HTT100ORL1Mu4HTT125 + process.hltPreDoubleMu8Mass8PFNoPUHT175 + cms.ignore(process.hltL1sL1SingleMuOpenCandidate) + process.hltIgnoredL1SingleMuOpenL1DiMuFiltered0 + process.HLTL2muonrecoSequence + process.hltIgnoredL1SingleMuOpenL2DiMuFiltered0 + process.HLTL3muonrecoSequence + process.hltIgnoredL1SingleMuOpenDiMu8Mass8L3Filtered + process.HLTRecoJetSequenceAK5L1FastJetCorrected + process.hltHtMht + process.hltHt100 + process.HLTPFnoPUL1FastL2L3ReconstructionSequence + process.hltPFHTNoPU + process.hltPFHT175NoPU + process.HLTEndSequence )
process.HLT_DoubleMu8_Mass8_PFNoPUHT225_v4 = cms.Path( process.HLTBeginSequence + process.hltL1sL1Mu0HTT100ORL1Mu4HTT125 + process.hltPreDoubleMu8Mass8PFNoPUHT225 + cms.ignore(process.hltL1sL1SingleMuOpenCandidate) + process.hltIgnoredL1SingleMuOpenL1DiMuFiltered0 + process.HLTL2muonrecoSequence + process.hltIgnoredL1SingleMuOpenL2DiMuFiltered0 + process.HLTL3muonrecoSequence + process.hltIgnoredL1SingleMuOpenDiMu8Mass8L3Filtered + process.HLTRecoJetSequenceAK5L1FastJetCorrected + process.hltHtMht + process.hltHt150 + process.HLTPFnoPUL1FastL2L3ReconstructionSequence + process.hltPFHTNoPU + process.hltPFHT225NoPU + process.HLTEndSequence )
process.HLT_RelIso1p0Mu5_Ele8_CaloIdT_TrkIdVL_Mass8_PFNoPUHT175_v4 = cms.Path( process.HLTBeginSequence + process.hltL1sL1Mu0HTT100 + process.hltPreRelIso1p0Mu5Ele8CaloIdTTrkIdVLMass8PFNoPUHT175 + process.hltL1Mu0HTT100L1MuFiltered0 + process.HLTL2muonrecoSequence + process.hltL1Mu0HTT100L2Filtered0 + process.HLTL3muonrecoSequence + process.hltL1Mu0HTT100L3Filtered5 + process.HLTRecoJetSequenceAK5L1FastJetCorrected + process.hltHtMht + process.hltHt100 + process.HLTEcalActivitySequence + process.hltSingleEle8NoCandEtFilter + process.hltActivityPhotonClusterShape + process.hltSingleEle8CaloIdTNoCandClusterShapeFilter + process.hltActivityPhotonHcalForHE + process.hltSingleEle8CaloIdTNoCandHEFilter + process.hltActivityStartUpElectronPixelSeeds + process.hltSingleEle8CaloIdTNoCandPixelMatchFilter + process.HLTPixelMatchElectronActivityTrackingSequence + process.hltSingleElectronEt8CaloIdTTrkIdVLNoCandOneOEMinusOneOPFilter + process.hltElectronActivityDetaDphi + process.hltSingleElectronEt8CaloIdTTrkIdVLNoCandDetaFilter + process.hltSingleElectronEt8CaloIdTTrkIdVLNoCandDphiFilter + process.HLTL3muoncaloisorecoSequenceNoBools + process.HLTL3muonisorecoSequenceIso1p0 + process.hltL1Mu0HTT100L3RelIso1p0MuonIsoFilter + process.hltL1Mu0HTT100Mu5Ele8CaloIdTTrkIdVLMass8Filter + process.HLTPFnoPUL1FastL2L3ReconstructionSequence + process.hltPFHTNoPU + process.hltPFHT175NoPU + process.HLTEndSequence )
process.HLT_RelIso1p0Mu5_Ele8_CaloIdT_TrkIdVL_Mass8_PFNoPUHT225_v4 = cms.Path( process.HLTBeginSequence + process.hltL1sL1Mu4HTT125 + process.hltPreRelIso1p0Mu5Ele8CaloIdTTrkIdVLMass8PFNoPUHT225 + process.hltL1Mu4HTT125L1MuFiltered0 + process.HLTL2muonrecoSequence + process.hltL1Mu4HTT125L2Filtered0 + process.HLTL3muonrecoSequence + process.hltL1Mu4HTT125L3Filtered5 + process.HLTRecoJetSequenceAK5L1FastJetCorrected + process.hltHtMht + process.hltHt125 + process.HLTEcalActivitySequence + process.hltSingleEle8NoCandEtFilter + process.hltActivityPhotonClusterShape + process.hltSingleEle8CaloIdTNoCandClusterShapeFilter + process.hltActivityPhotonHcalForHE + process.hltSingleEle8CaloIdTNoCandHEFilter + process.hltActivityStartUpElectronPixelSeeds + process.hltSingleEle8CaloIdTNoCandPixelMatchFilter + process.HLTPixelMatchElectronActivityTrackingSequence + process.hltSingleElectronEt8CaloIdTTrkIdVLNoCandOneOEMinusOneOPFilter + process.hltElectronActivityDetaDphi + process.hltSingleElectronEt8CaloIdTTrkIdVLNoCandDetaFilter + process.hltSingleElectronEt8CaloIdTTrkIdVLNoCandDphiFilter + process.HLTL3muoncaloisorecoSequenceNoBools + process.HLTL3muonisorecoSequenceIso1p0 + process.hltL1Mu4HTT125L3RelIso1p0MuonIsoFilter + process.hltL1Mu4HTT125Mu5Ele8CaloIdTTrkIdVLMass8Filter + process.HLTPFnoPUL1FastL2L3ReconstructionSequence + process.hltPFHTNoPU + process.hltPFHT225NoPU + process.HLTEndSequence )
process.HLT_Mu8_Ele8_CaloIdT_TrkIdVL_Mass8_PFNoPUHT175_v4 = cms.Path( process.HLTBeginSequence + process.hltL1sL1Mu0HTT100ORL1Mu4HTT125 + process.hltPreMu8Ele8CaloIdTTrkIdVLMass8PFNoPUHT175 + process.hltL1Mu0HTT100ORMu4HTT125L1MuFiltered0 + process.HLTL2muonrecoSequence + process.hltL1Mu0HTT100ORMu4HTT125L2Filtered0 + process.HLTL3muonrecoSequence + process.hltL1Mu0HTT100ORMu4HTT125L3Filtered8 + process.HLTRecoJetSequenceAK5L1FastJetCorrected + process.hltHtMht + process.hltHt100 + process.HLTEcalActivitySequence + process.hltSingleEle8NoCandEtFilter + process.hltActivityPhotonClusterShape + process.hltSingleEle8CaloIdTNoCandClusterShapeFilter + process.hltActivityPhotonHcalForHE + process.hltSingleEle8CaloIdTNoCandHEFilter + process.hltActivityStartUpElectronPixelSeeds + process.hltSingleEle8CaloIdTNoCandPixelMatchFilter + process.HLTPixelMatchElectronActivityTrackingSequence + process.hltSingleElectronEt8CaloIdTTrkIdVLNoCandOneOEMinusOneOPFilter + process.hltElectronActivityDetaDphi + process.hltSingleElectronEt8CaloIdTTrkIdVLNoCandDetaFilter + process.hltSingleElectronEt8CaloIdTTrkIdVLNoCandDphiFilter + process.hltMu8Ele8CaloIdTTrkIdVLMass8Filter + process.HLTPFnoPUL1FastL2L3ReconstructionSequence + process.hltPFHTNoPU + process.hltPFHT175NoPU + process.HLTEndSequence )
process.HLT_Mu8_Ele8_CaloIdT_TrkIdVL_Mass8_PFNoPUHT225_v4 = cms.Path( process.HLTBeginSequence + process.hltL1sL1Mu0HTT100ORL1Mu4HTT125 + process.hltPreMu8Ele8CaloIdTTrkIdVLMass8PFNoPUHT225 + process.hltL1Mu0HTT100ORMu4HTT125L1MuFiltered0 + process.HLTL2muonrecoSequence + process.hltL1Mu0HTT100ORMu4HTT125L2Filtered0 + process.HLTL3muonrecoSequence + process.hltL1Mu0HTT100ORMu4HTT125L3Filtered8 + process.HLTRecoJetSequenceAK5L1FastJetCorrected + process.hltHtMht + process.hltHt100 + process.HLTEcalActivitySequence + process.hltSingleEle8NoCandEtFilter + process.hltActivityPhotonClusterShape + process.hltSingleEle8CaloIdTNoCandClusterShapeFilter + process.hltActivityPhotonHcalForHE + process.hltSingleEle8CaloIdTNoCandHEFilter + process.hltActivityStartUpElectronPixelSeeds + process.hltSingleEle8CaloIdTNoCandPixelMatchFilter + process.HLTPixelMatchElectronActivityTrackingSequence + process.hltSingleElectronEt8CaloIdTTrkIdVLNoCandOneOEMinusOneOPFilter + process.hltElectronActivityDetaDphi + process.hltSingleElectronEt8CaloIdTTrkIdVLNoCandDetaFilter + process.hltSingleElectronEt8CaloIdTTrkIdVLNoCandDphiFilter + process.hltMu8Ele8CaloIdTTrkIdVLMass8Filter + process.HLTPFnoPUL1FastL2L3ReconstructionSequence + process.hltPFHTNoPU + process.hltPFHT225NoPU + process.HLTEndSequence )
process.HLT_PFNoPUHT350_Mu15_PFMET45_v4 = cms.Path( process.HLTBeginSequence + process.hltL1sL1HTT150OrHTT175 + process.hltPrePFNoPUHT350Mu15PFMET45 + cms.ignore(process.hltL1sL1SingleMuOpenCandidate) + process.HLTRecoJetSequenceAK5L1FastJetCorrected + process.hltHtMht + process.hltHt250 + process.hltHTT150L1MuFiltered0 + process.HLTL2muonrecoSequence + process.hltL1HTT150singleMuL2PreFiltered10 + process.HLTL3muonrecoSequence + process.hltL1HTT150singleMuL3PreFiltered15 + process.HLTPFL1FastL2L3ReconstructionSequence + process.HLTPFnoPUL1FastL2L3ReconstructionSequence + process.hltPFHTNoPU + process.hltPFMETProducer + process.hltPFHT350PFMET45 + process.HLTEndSequence )
process.HLT_PFNoPUHT350_Mu15_PFMET50_v4 = cms.Path( process.HLTBeginSequence + process.hltL1sL1HTT150OrHTT175 + process.hltPrePFNoPUHT350Mu15PFMET50 + cms.ignore(process.hltL1sL1SingleMuOpenCandidate) + process.HLTRecoJetSequenceAK5L1FastJetCorrected + process.hltHtMht + process.hltHt250 + process.hltHTT150L1MuFiltered0 + process.HLTL2muonrecoSequence + process.hltL1HTT150singleMuL2PreFiltered10 + process.HLTL3muonrecoSequence + process.hltL1HTT150singleMuL3PreFiltered15 + process.HLTPFL1FastL2L3ReconstructionSequence + process.HLTPFnoPUL1FastL2L3ReconstructionSequence + process.hltPFHTNoPU + process.hltPFMETProducer + process.hltPFHT350PFMET50 + process.HLTEndSequence )
process.HLT_PFNoPUHT400_Mu5_PFMET45_v4 = cms.Path( process.HLTBeginSequence + process.hltL1sL1HTT150OrHTT175 + process.hltPrePFNoPUHT400Mu5PFMET45 + cms.ignore(process.hltL1sL1SingleMuOpenCandidate) + process.HLTRecoJetSequenceAK5L1FastJetCorrected + process.hltHtMht + process.hltHt300 + process.hltHTT150L1MuFiltered0 + process.HLTL2muonrecoSequence + process.hltL1HTT150singleMuL2PreFiltered0 + process.HLTL3muonrecoSequence + process.hltL1HTT150singleMuL3PreFiltered5 + process.HLTPFL1FastL2L3ReconstructionSequence + process.HLTPFnoPUL1FastL2L3ReconstructionSequence + process.hltPFHTNoPU + process.hltPFMETProducer + process.hltPFHT400PFMET45 + process.HLTEndSequence )
process.HLT_PFNoPUHT400_Mu5_PFMET50_v4 = cms.Path( process.HLTBeginSequence + process.hltL1sL1HTT150OrHTT175 + process.hltPrePFNoPUHT400Mu5PFMET50 + cms.ignore(process.hltL1sL1SingleMuOpenCandidate) + process.HLTRecoJetSequenceAK5L1FastJetCorrected + process.hltHtMht + process.hltHt300 + process.hltHTT150L1MuFiltered0 + process.HLTL2muonrecoSequence + process.hltL1HTT150singleMuL2PreFiltered0 + process.HLTL3muonrecoSequence + process.hltL1HTT150singleMuL3PreFiltered5 + process.HLTPFL1FastL2L3ReconstructionSequence + process.HLTPFnoPUL1FastL2L3ReconstructionSequence + process.hltPFHTNoPU + process.hltPFMETProducer + process.hltPFHT400PFMET50 + process.HLTEndSequence )
process.HLT_Mu40_PFNoPUHT350_v4 = cms.Path( process.HLTBeginSequence + process.hltL1sL1Mu0HTT100ORL1Mu4HTT125 + process.hltPreMu40PFNoPUHT350 + process.hltL1Mu0HTT100ORL1Mu4HTT125L1MuFiltered0 + process.HLTL2muonrecoSequence + process.hltL1Mu0HTT100ORL1Mu4HTT125L2QualMuFiltered16 + process.HLTL3muonrecoSequence + process.hltL1Mu0HTT100ORL1Mu4HTT125L2QualL3MuFiltered40 + process.HLTRecoJetSequenceAK5L1FastJetCorrected + process.hltHtMht + process.hltHt200 + process.HLTPFnoPUL1FastL2L3ReconstructionSequence + process.hltPFHTNoPU + process.hltPFHT350NoPU + process.HLTEndSequence )
process.HLT_Mu60_PFNoPUHT350_v4 = cms.Path( process.HLTBeginSequence + process.hltL1sL1Mu0HTT100ORL1Mu4HTT125 + process.hltPreMu60PFNoPUHT350 + process.hltL1Mu0HTT100ORL1Mu4HTT125L1MuFiltered0 + process.HLTL2muonrecoSequence + process.hltL1Mu0HTT100ORL1Mu4HTT125L2QualMuFiltered16 + process.HLTL3muonrecoSequence + process.hltL1Mu0HTT100ORL1Mu4HTT125L2QualL3MuFiltered60 + process.HLTRecoJetSequenceAK5L1FastJetCorrected + process.hltHtMht + process.hltHt200 + process.HLTPFnoPUL1FastL2L3ReconstructionSequence + process.hltPFHTNoPU + process.hltPFHT350NoPU + process.HLTEndSequence )
process.HLT_IsoMu12_RsqMR30_Rsq0p04_MR200_v4 = cms.Path( process.HLTBeginSequence + process.hltL1sL1DoubleJetC64ORDoubleJetC56ORDoubleJetC52 + process.hltPreIsoMu12RsqMR30Rsq0p04MR200 + cms.ignore(process.hltL1sL1SingleMuOpenCandidate) + process.hltSingleMuOpenCandidateL1Filtered0 + process.HLTL2muonrecoSequence + process.hltSingleMuOpenCandidateL2Filtered3 + process.HLTL3muonrecoSequence + process.hltSingleMuOpenCandidateL3Filtered12 + process.HLTRSequenceMuCorrL1FastJetDiJet65 + process.hltRsqMR30Rsq0p04MR200MuCorr + process.HLTL3muoncaloisorecoSequenceNoBools + process.HLTL3muonisorecoSequence + process.hltL3crIsoL1sMuOpenCandidateL1f0L2f3L3f12L3crIsoFiltered12 + process.HLTEndSequence )
process.HLT_IsoMu12_RsqMR40_Rsq0p04_MR200_v4 = cms.Path( process.HLTBeginSequence + process.hltL1sL1DoubleJetC64ORDoubleJetC56ORDoubleJetC52 + process.hltPreIsoMu12RsqMR40Rsq0p04MR200 + cms.ignore(process.hltL1sL1SingleMuOpenCandidate) + process.hltSingleMuOpenCandidateL1Filtered0 + process.HLTL2muonrecoSequence + process.hltSingleMuOpenCandidateL2Filtered3 + process.HLTL3muonrecoSequence + process.hltSingleMuOpenCandidateL3Filtered12 + process.HLTRSequenceMuCorrL1FastJetDiJet65 + process.hltRsqMR40Rsq0p04MR200MuCorr + process.HLTL3muoncaloisorecoSequenceNoBools + process.HLTL3muonisorecoSequence + process.hltL3crIsoL1sMuOpenCandidateL1f0L2f3L3f12L3crIsoFiltered12 + process.HLTEndSequence )
process.HLT_DoubleMu14_Mass8_PFMET40_v8 = cms.Path( process.HLTBeginSequence + process.hltL1sL1DoubleMu10MuOpenORDoubleMu103p5 + process.hltPreDoubleMu14Mass8PFMET40 + process.hltL1DoubleMu10MuOpenORDoubleMu103p5L1DiMuFiltered0 + process.HLTL2muonrecoSequence + process.hltL1DoubleMu10MuOpenORDoubleMu103p5L2DiMuFiltered0 + process.HLTL3muonrecoSequence + process.hltL1DoubleMu10MuOpenORDoubleMu103p5L3DiMu14Mass8Filtered + process.HLTPFL1FastL2L3ReconstructionSequence + process.hltPFMETProducer + process.hltPFMET40Filter + process.HLTEndSequence )
process.HLT_DoubleMu14_Mass8_PFMET50_v8 = cms.Path( process.HLTBeginSequence + process.hltL1sL1DoubleMu10MuOpenORDoubleMu103p5 + process.hltPreDoubleMu14Mass8PFMET50 + process.hltL1DoubleMu10MuOpenORDoubleMu103p5L1DiMuFiltered0 + process.HLTL2muonrecoSequence + process.hltL1DoubleMu10MuOpenORDoubleMu103p5L2DiMuFiltered0 + process.HLTL3muonrecoSequence + process.hltL1DoubleMu10MuOpenORDoubleMu103p5L3DiMu14Mass8Filtered + process.HLTPFL1FastL2L3ReconstructionSequence + process.hltPFMETProducer + process.hltPFMET50Filter + process.HLTEndSequence )
process.HLT_Mu14_Ele14_CaloIdT_TrkIdVL_Mass8_PFMET40_v8 = cms.Path( process.HLTBeginSequence + process.hltL1sL1Mu12EG7ORL1MuOpenEG12 + process.hltPreMu14Ele14CaloIdTTrkIdVLMass8PFMET40 + process.hltL1Mu12EG7ORL1MuOpenEG12L1MuFiltered0 + process.HLTL2muonrecoSequence + process.hltL1Mu12EG7ORL1MuOpenEG12L2MuFiltered0 + process.HLTL3muonrecoSequence + process.hltL1Mu12EG7ORL1MuOpenEG12L3MuFiltered14 + process.HLTEle14L1NonIsoHLTCaloIdTSequence + process.HLTPixelMatchElectronL1SeededTrackingSequence + process.hltEle14CaloIdTTrkIdVLOneOEMinusOneOPFilter + process.HLTDoElectronDetaDphiSequence + process.hltEle14CaloIdTTrkIdVLDetaFilter + process.hltEle14CaloIdTTrkIdVLDphiFilter + process.hltMu14Ele14CaloIdTTrkIdVLMass8Filter + process.HLTPFL1FastL2L3ReconstructionSequence + process.hltPFMETProducer + process.hltPFMET40Filter + process.HLTEndSequence )
process.HLT_Mu14_Ele14_CaloIdT_TrkIdVL_Mass8_PFMET50_v8 = cms.Path( process.HLTBeginSequence + process.hltL1sL1Mu12EG7ORL1MuOpenEG12 + process.hltPreMu14Ele14CaloIdTTrkIdVLMass8PFMET50 + process.hltL1Mu12EG7ORL1MuOpenEG12L1MuFiltered0 + process.HLTL2muonrecoSequence + process.hltL1Mu12EG7ORL1MuOpenEG12L2MuFiltered0 + process.HLTL3muonrecoSequence + process.hltL1Mu12EG7ORL1MuOpenEG12L3MuFiltered14 + process.HLTEle14L1NonIsoHLTCaloIdTSequence + process.HLTPixelMatchElectronL1SeededTrackingSequence + process.hltEle14CaloIdTTrkIdVLOneOEMinusOneOPFilter + process.HLTDoElectronDetaDphiSequence + process.hltEle14CaloIdTTrkIdVLDetaFilter + process.hltEle14CaloIdTTrkIdVLDphiFilter + process.hltMu14Ele14CaloIdTTrkIdVLMass8Filter + process.HLTPFL1FastL2L3ReconstructionSequence + process.hltPFMETProducer + process.hltPFMET50Filter + process.HLTEndSequence )
process.source = cms.Source( "PoolSource",
fileNames = cms.untracked.vstring(
'file:RelVal_Raw_GRun_DATA.root',
),
secondaryFileNames = cms.untracked.vstring(
),
inputCommands = cms.untracked.vstring(
'keep *'
)
)
# Enable HF Noise filters in GRun menu
if 'hltHfreco' in process.__dict__:
process.hltHfreco.setNoiseFlags = cms.bool( True )
# CMSSW version specific customizations
import os
cmsswVersion = os.environ['CMSSW_VERSION']
# customization for CMSSW_5_2_X
if cmsswVersion.startswith('CMSSW_5_2_'):
# force the use of the correct calo jet energy corrections
if 'hltESPL1FastJetCorrectionESProducer' in process.__dict__:
process.hltESPL1FastJetCorrectionESProducer.algorithm = "AK5CaloHLT"
if 'hltESPL2RelativeCorrectionESProducer' in process.__dict__:
process.hltESPL2RelativeCorrectionESProducer.algorithm = "AK5CaloHLT"
if 'hltESPL3AbsoluteCorrectionESProducer' in process.__dict__:
process.hltESPL3AbsoluteCorrectionESProducer.algorithm = "AK5CaloHLT"
# customization for CMSSW_5_3_X
if cmsswVersion.startswith('CMSSW_5_3_'):
# do not override the calo jet energy corrections in 5.3.x for consistency with the current MC samples
pass
# customization for CMSSW_6_1_X and 6_2_X
if cmsswVersion.startswith('CMSSW_6_1_') or cmsswVersion.startswith('CMSSW_6_2_'):
# force the use of the correct calo jet energy corrections
if 'hltESPL1FastJetCorrectionESProducer' in process.__dict__:
process.hltESPL1FastJetCorrectionESProducer.algorithm = "AK5CaloHLT"
if 'hltESPL2RelativeCorrectionESProducer' in process.__dict__:
process.hltESPL2RelativeCorrectionESProducer.algorithm = "AK5CaloHLT"
if 'hltESPL3AbsoluteCorrectionESProducer' in process.__dict__:
process.hltESPL3AbsoluteCorrectionESProducer.algorithm = "AK5CaloHLT"
# adapt the HLT menu to the "prototype for Event Interpretation" development
if 'hltPFPileUp' in process.__dict__:
# define new PFCandidateFwdPtrProducer module
process.hltParticleFlowPtrs = cms.EDProducer("PFCandidateFwdPtrProducer",
src = cms.InputTag('hltParticleFlow')
)
# add the new module before the hltPFPileUp module
_sequence = None
for _sequence in [ _sequence for _sequence in process.__dict__.itervalues() if isinstance(_sequence, cms._ModuleSequenceType)]:
try:
_sequence.insert( _sequence.index(process.hltPFPileUp), process.hltParticleFlowPtrs )
except ValueError:
pass
# reconfigure hltPFPileUp and hltPFNoPileUp to use the new module
process.hltPFPileUp.PFCandidates = cms.InputTag( "hltParticleFlowPtrs" )
process.hltPFNoPileUp.bottomCollection = cms.InputTag( "hltParticleFlowPtrs" )
# customization for CMSSW_6_2_X only
if cmsswVersion.startswith('CMSSW_6_2_'):
# /Geometry/TrackerNumberingBuilder/trackerTopologyConstants_cfi.py
process.trackerTopologyConstants = cms.ESProducer('TrackerTopologyEP',
pxb_layerStartBit = cms.uint32(16),
pxb_ladderStartBit = cms.uint32(8),
pxb_moduleStartBit = cms.uint32(2),
pxb_layerMask = cms.uint32(15),
pxb_ladderMask = cms.uint32(255),
pxb_moduleMask = cms.uint32(63),
pxf_sideStartBit = cms.uint32(23),
pxf_diskStartBit = cms.uint32(16),
pxf_bladeStartBit = cms.uint32(10),
pxf_panelStartBit = cms.uint32(8),
pxf_moduleStartBit = cms.uint32(2),
pxf_sideMask = cms.uint32(3),
pxf_diskMask = cms.uint32(15),
pxf_bladeMask = cms.uint32(63),
pxf_panelMask = cms.uint32(3),
pxf_moduleMask = cms.uint32(63),
tec_sideStartBit = cms.uint32(18),
tec_wheelStartBit = cms.uint32(14),
tec_petal_fw_bwStartBit = cms.uint32(12),
tec_petalStartBit = cms.uint32(8),
tec_ringStartBit = cms.uint32(5),
tec_moduleStartBit = cms.uint32(2),
tec_sterStartBit = cms.uint32(0),
tec_sideMask = cms.uint32(3),
tec_wheelMask = cms.uint32(15),
tec_petal_fw_bwMask = cms.uint32(3),
tec_petalMask = cms.uint32(15),
tec_ringMask = cms.uint32(7),
tec_moduleMask = cms.uint32(7),
tec_sterMask = cms.uint32(3),
tib_layerStartBit = cms.uint32(14),
tib_str_fw_bwStartBit = cms.uint32(12),
tib_str_int_extStartBit = cms.uint32(10),
tib_strStartBit = cms.uint32(4),
tib_moduleStartBit = cms.uint32(2),
tib_sterStartBit = cms.uint32(0),
tib_layerMask = cms.uint32(7),
tib_str_fw_bwMask = cms.uint32(3),
tib_str_int_extMask = cms.uint32(3),
tib_strMask = cms.uint32(63),
tib_moduleMask = cms.uint32(3),
tib_sterMask = cms.uint32(3),
tid_sideStartBit = cms.uint32(13),
tid_wheelStartBit = cms.uint32(11),
tid_ringStartBit = cms.uint32(9),
tid_module_fw_bwStartBit = cms.uint32(7),
tid_moduleStartBit = cms.uint32(2),
tid_sterStartBit = cms.uint32(0),
tid_sideMask = cms.uint32(3),
tid_wheelMask = cms.uint32(3),
tid_ringMask = cms.uint32(3),
tid_module_fw_bwMask = cms.uint32(3),
tid_moduleMask = cms.uint32(31),
tid_sterMask = cms.uint32(3),
tob_layerStartBit = cms.uint32(14),
tob_rod_fw_bwStartBit = cms.uint32(12),
tob_rodStartBit = cms.uint32(5),
tob_moduleStartBit = cms.uint32(2),
tob_sterStartBit = cms.uint32(0),
tob_layerMask = cms.uint32(7),
tob_rod_fw_bwMask = cms.uint32(3),
tob_rodMask = cms.uint32(127),
tob_moduleMask = cms.uint32(7),
tob_sterMask = cms.uint32(3),
appendToDataLabel = cms.string('')
)
# adapt HLT modules to the correct process name
if 'hltTrigReport' in process.__dict__:
process.hltTrigReport.HLTriggerResults = cms.InputTag( 'TriggerResults', '', 'TEST' )
if 'hltPreExpressCosmicsOutputSmart' in process.__dict__:
process.hltPreExpressCosmicsOutputSmart.TriggerResultsTag = cms.InputTag( 'TriggerResults', '', 'TEST' )
if 'hltPreExpressOutputSmart' in process.__dict__:
process.hltPreExpressOutputSmart.TriggerResultsTag = cms.InputTag( 'TriggerResults', '', 'TEST' )
if 'hltPreDQMForHIOutputSmart' in process.__dict__:
process.hltPreDQMForHIOutputSmart.TriggerResultsTag = cms.InputTag( 'TriggerResults', '', 'TEST' )
if 'hltPreDQMForPPOutputSmart' in process.__dict__:
process.hltPreDQMForPPOutputSmart.TriggerResultsTag = cms.InputTag( 'TriggerResults', '', 'TEST' )
if 'hltPreHLTDQMResultsOutputSmart' in process.__dict__:
process.hltPreHLTDQMResultsOutputSmart.TriggerResultsTag = cms.InputTag( 'TriggerResults', '', 'TEST' )
if 'hltPreHLTDQMOutputSmart' in process.__dict__:
process.hltPreHLTDQMOutputSmart.TriggerResultsTag = cms.InputTag( 'TriggerResults', '', 'TEST' )
if 'hltPreHLTMONOutputSmart' in process.__dict__:
process.hltPreHLTMONOutputSmart.TriggerResultsTag = cms.InputTag( 'TriggerResults', '', 'TEST' )
if 'hltDQMHLTScalers' in process.__dict__:
process.hltDQMHLTScalers.triggerResults = cms.InputTag( 'TriggerResults', '', 'TEST' )
process.hltDQMHLTScalers.processname = 'TEST'
if 'hltDQML1SeedLogicScalers' in process.__dict__:
process.hltDQML1SeedLogicScalers.processname = 'TEST'
# limit the number of events to be processed
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32( 100 )
)
# enable the TrigReport and TimeReport
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool( True )
)
# override the GlobalTag, connection string and pfnPrefix
if 'GlobalTag' in process.__dict__:
process.GlobalTag.connect = 'frontier://FrontierProd/CMS_COND_31X_GLOBALTAG'
process.GlobalTag.pfnPrefix = cms.untracked.string('frontier://FrontierProd/')
from Configuration.AlCa.GlobalTag import GlobalTag as customiseGlobalTag
process.GlobalTag = customiseGlobalTag(process.GlobalTag, globaltag = 'auto:hltonline')
# customize the L1 emulator to run customiseL1GtEmulatorFromRaw with HLT to switchToSimGtDigis
process.load( 'Configuration.StandardSequences.RawToDigi_Data_cff' )
process.load( 'Configuration.StandardSequences.SimL1Emulator_cff' )
import L1Trigger.Configuration.L1Trigger_custom
process = L1Trigger.Configuration.L1Trigger_custom.customiseL1GtEmulatorFromRaw( process )
process = L1Trigger.Configuration.L1Trigger_custom.customiseResetPrescalesAndMasks( process )
# customize the HLT to use the emulated results
import HLTrigger.Configuration.customizeHLTforL1Emulator
process = HLTrigger.Configuration.customizeHLTforL1Emulator.switchToL1Emulator( process )
process = HLTrigger.Configuration.customizeHLTforL1Emulator.switchToSimGtDigis( process )
if 'MessageLogger' in process.__dict__:
process.MessageLogger.categories.append('TriggerSummaryProducerAOD')
process.MessageLogger.categories.append('L1GtTrigReport')
process.MessageLogger.categories.append('HLTrigReport')
process.MessageLogger.categories.append('FastReport')
| [
"halil.gamsizkan@cern.ch"
] | halil.gamsizkan@cern.ch |
24364854b0efa09b1fd0ed72288c66064dfb1353 | a2e3f4944076a9d25fd6e7aa30d0cda55c47ff18 | /template_dynamicloader/views.py | 2f66be35ba8cae440020eeac4d89c162fbdf329c | [] | no_license | redatest/Shakal-NG | fb62b58b3d4c7a6a236beed8efd98712425621f2 | d2a38df9910ec11b237912eefe1c1259203675ee | refs/heads/master | 2021-01-18T02:21:44.654598 | 2015-03-21T14:09:56 | 2015-03-21T14:09:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 526 | py | # -*- coding: utf-8 -*-
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.views.decorators.http import require_POST
from template_dynamicloader.forms import ChangeTemplateHiddenForm
from template_dynamicloader.utils import switch_template
@require_POST
def change(request):
form = ChangeTemplateHiddenForm(request.POST)
if form.is_valid() and 'change_style' in request.POST:
switch_template(request, **form.cleaned_data)
return HttpResponseRedirect(reverse('home'))
| [
"miroslav.bendik@gmail.com"
] | miroslav.bendik@gmail.com |
b4189e2890cf1a95e9133a85b4a520a56542c3a0 | 8a9f09c7048f79043280a3552a4f9b4ea950d167 | /service_venv/adserver/bin/easy_install | 72e7259f2b8cb09231c4d61f8f1525cc19696c94 | [] | no_license | ajinkyapathak/adservice | 6688e7216292f30574b2a673ab3bbeff40d0ae5c | 073cc35667e9afe0b0577ad8c906d7c29edc2185 | refs/heads/main | 2023-05-11T04:29:00.619932 | 2022-07-25T05:12:26 | 2022-07-25T05:12:26 | 229,237,294 | 0 | 1 | null | 2023-05-01T21:19:22 | 2019-12-20T09:51:58 | Python | UTF-8 | Python | false | false | 253 | #!/home/ajinkya/adserver/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"ajinkya.pathak@agrostar.in"
] | ajinkya.pathak@agrostar.in | |
fbb590e9d0e3c97035b80b0a45b1ebc1aac74239 | 69b6f6b14c75c53da54c4c907bb02a77a7d0230d | /.ycm_extra_conf.py | c3a96c119fd1423139405fa9fe8329f7a2457a70 | [] | no_license | cerveka2/rir | 009aae4bc2ee24f406bb58b9fad0f0ab7a2b59df | 25c18f00c7cbd690f3d4450aa1441eecb8fa2a47 | refs/heads/master | 2020-03-26T15:25:13.708942 | 2018-08-16T08:49:08 | 2018-08-16T08:49:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,834 | py | # Generated by YCM Generator at 2016-11-01 15:50:59.501081
# This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
flags = [
'-x',
'c++',
'-Drir_EXPORTS',
'-I'+DirectoryOfThisScript()+'/external/custom-r/include',
'-I'+DirectoryOfThisScript()+'/rir/src',
'-Wall',
'-std=c++11',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.C', '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.H', '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| [
"o@o1o.ch"
] | o@o1o.ch |
0b113dbee95e60808131df07d4c7d8ad9f011301 | 763b72993beb04681a28949d2ebcdc205d497bf6 | /parseex.py | 636b074ab13f13a5c185a3ab9564832f94d73b49 | [] | no_license | hamza07-w/img_type_converter | a7e1bf368f91309d3d8b80dec8447ad9386edab4 | 761b451729ea7c537cc1ac33a9da69e718df7a1d | refs/heads/main | 2023-08-15T14:01:49.706146 | 2021-09-16T19:09:24 | 2021-09-16T19:09:24 | 345,197,895 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 625 | py | #!/usr/bin/env python
import optparse
import requests
parser = optparse.OptionParser()
parser.add_option("-u", "--url", dest="url", type="str", help="here you need to past url of img")
parser.add_option("-i", "--img", dest="img", type="str", help="here you need to penter type of img you want to convert to")
(opt, arg) = parser.parse_args()
def convert(url, toType):
geturl = requests.get(url)
try:
with open(f'img.{toType}', 'wb') as f:
f.write(geturl.content)
return "The picture converted seccessfuly!"
except:
return "error! type not found!"
l = opt.url
imgT = opt.img
print(convert(l, imgT)) | [
"noreply@github.com"
] | hamza07-w.noreply@github.com |
4e727091b2fd22fb5add4c9b86864a3a44c97895 | 5459616d34a368031aa04ad437f5e87ae53d7d51 | /Fundamentals of supervised learning.py | 5d77cec8c7f604cc5f1f74faeb10c84b6f1ac4d0 | [] | no_license | eltonlanders/Supervised-Learning-Algorithms | 48fd924aadc161c32e3c7075326b3b8961a649ca | f027a4c79b2d149c9548388cfe34fd7eaf01aa84 | refs/heads/main | 2023-03-18T18:08:10.495039 | 2021-03-08T07:18:41 | 2021-03-08T07:18:41 | 345,564,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,776 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Feb 27 08:27:48 2021
@author: elton
"""
import pandas as pd
import numpy as np
#Loading and Summarizing the Titanic Dataset
df = pd.read_csv(r'C:/Users/elton/Documents/Books/Packt workshops/The Supervised Learning Workshop/The-Supervised-Learning-Workshop-master/Chapter01/Datasets/titanic.csv')
#Indexing and Selecting Data
df['Age']
df.Age #when no spaces in col name
df[['Name', 'Parch', 'Sex']]
df.iloc[0] #first row
df.iloc[[0, 1, 2]] #first 3 rows
columns = df.columns
df[columns[1:4]]
len(df)
df.iloc[2]['Fare'] #row-centric method
df.iloc[2].Fare
df['Fare'][2] #column centric method
df.Fare[2]
#Advanced Indexing and Selection
child_passengers = df[df.Age < 21][['Name', 'Age']]
len(child_passengers)
young_adult_passengers = df.loc[(df.Age > 21) & (df.Age < 30)]
first_or_third_class = df[(df.Pclass == 3) | (df.Pclass == 1)]
not_first_or_third_class = df[~((df.Pclass == 3) | (df.Pclass == 1))]
del df['Unnamed: 0']
described_table = df.describe()
described_table_2 = df.describe(include='all')
# Splitting, Applying, and Combining Data Sources
embarked_group = df.groupby('Embarked')
len(embarked_group)
embarked_group.groups #it's a dictionary where keys are groups
#The values are the rows or indexes of the entries that belong to that group
df.iloc[1]
for name, group in embarked_group:
print(name, group.Age.mean())
embarked_group.agg(np.mean)
def first_val(x):
return x.values[0]
embarked_group.agg(first_val)
# Creating Lambda Functions
embarked_group = df.groupby('Embarked')
embarked_group.agg(lambda x: x.values[0])
first_mean_std = embarked_group.agg([lambda x: x.values[0], np.mean, np.std])
embarked_group.agg({ #the agg method with a dictionary of different columns
'Fare': np.sum,
'Age': lambda x: x.values[0]
})
age_embarked_group = df.groupby(['Sex', 'Embarked'])
age_embarked_group.groups
# Managing Missing Data
len(df)
df.dropna()
len(df.dropna())
df.aggregate(lambda x: x.isna().sum())
df_valid = df.loc[(~df.Embarked.isna()) & (~df.Fare.isna())]
df_valid['Age'] = df_valid['Age'].fillna(df_valid.Age.mean()) #mean-imputing
df_valid.loc[df.Pclass == 1, 'Age'].mean() #imputing age by avg class age
df_valid.loc[df.Pclass == 2, 'Age'].mean()
df_valid.loc[df.Pclass == 3, 'Age'].mean()
for name, grp in df_valid.groupby(['Pclass', 'Sex']):
print('%i' % name[0], name[1], '%0.2f' % grp['Age'].mean())
mean_ages = df_valid.groupby(['Pclass', 'Sex'])['Age'].transform(lambda x: x.fillna(x.mean()))
df_valid.loc[:, 'Age'] = mean_ages
# Class imbalance
len(df.loc[df.Survived == 1])
len(df.loc[df.Survived == 0])
#copying the first row to the end of the DataFrame
df_oversample = df.append(df.iloc[0])
# Implementing Pandas Functions
df = pd.read_csv(r'C:/Users/elton/Documents/Books/Packt workshops/The Supervised Learning Workshop/The-Supervised-Learning-Workshop-master/Chapter01/Datasets/titanic.csv')
df_described = df.describe(include='all')
df.drop('Unnamed: 0',axis=1, inplace=True)
df.mean()
df.std()
df.min()
df.max()
df.quantile(0.33)
df.quantile(0.66)
df.quantile(0.99)
class_groups = df.groupby(['Pclass'])
for name, index in class_groups:
print(f'Class: {name}: {len(index)}')
third_class = df.loc[df.Pclass == 3]
age_max = third_class.loc[(third_class.Age == third_class.Age.max())]
fare_max = df.Fare.max()
age_max = df.Age.max()
df.agg({
'Fare': lambda x: x / fare_max,
'Age': lambda x: x / age_max,
}).head()
missing = df.loc[df['Fare'].isna() == True]
df_nan_fare = df.loc[(df.Fare.isna())]
embarked_class_groups = df.groupby(['Embarked', 'Pclass'])
indices = embarked_class_groups.groups[(df_nan_fare.Embarked.
values[0], df_nan_fare.Pclass.values[0])]
mean_fare = df.iloc[indices].Fare.mean()
df.loc[(df.index == 1043), 'Fare'] = mean_fare
df.iloc[1043]
"""
Notes:
1. Understand the source and type of the data, the means by which it is
collected, and any errors potentially resulting from the collection process.
2. Any function that can take a list or a similar iterable and compute a single
value as a result can be used with agg.
3. If you have clean data, in sufficient quantity, with a good correlation
between the input data type and the desired output, then the specifics regarding
the type and details of the selected supervised learning model become significantly
less important in achieving a good result.
4. Treating class imbalance: Oversample the under-represented class by randomly
copying samples from the under-represented class in the dataset to boost the
number of samples.
5. Dealing with low sample size: Transfer learning.
""" | [
"noreply@github.com"
] | eltonlanders.noreply@github.com |
21294b9a0632585a5d552d1ca8c0526639089b9a | 675ea71974e73d01c01972ae1211dab6c3273864 | /twisted_client_for_nimbusio/rest_api.py | 25a1b3b5e0003002032f0ba2577a112b826288ff | [] | no_license | SpiderOak/twisted_client_for_nimbusio | 1e8880a24b6852428d6d13c59d78e0df8a21386a | 8c70a46112f809780f725f778dce24f6ff08aaea | refs/heads/master | 2021-01-10T20:44:49.912621 | 2013-03-07T21:02:57 | 2013-03-07T21:02:57 | 7,900,092 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,436 | py | # -*- coding: utf-8 -*-
"""
rest_api.py
support the nimbus.io REST API
"""
from lumberyard.http_util import compute_uri as compute_uri_path
def compute_archive_path(key, *args, **kwargs):
"""
compute a path to archive the key
"""
return compute_uri_path("data", key, *args, **kwargs)
def compute_head_path(key, *args, **kwargs):
"""
compute a path to HEAD the key
"""
return compute_uri_path("data", key, *args, **kwargs)
def compute_list_keys_path(prefix=""):
"""
list all keys, or all keys beginning with prefix
"""
kwargs = dict()
if prefix != "" and prefix is not None:
kwargs["prefix"] = prefix
return compute_uri_path("data/", **kwargs)
def compute_list_versions_path(prefix=""):
"""
list all versions of all keys,
or all versions of all keys beginning with prefix
"""
kwargs = dict()
if prefix != "" and prefix is not None:
kwargs["prefix"] = prefix
return compute_uri_path("/?versions", **kwargs)
def compute_retrieve_path(key, version_id=None):
"""
retrieve the contents of a key
"""
kwargs = {"version_identifier" : version_id}
return compute_uri_path("data", key, **kwargs)
def compute_range_header_tuple(slice_offset, slice_size):
"""
return a (key, value) tuple used to add a 'Range:' header
to retrieve the specified slice
"""
if slice_size is not None:
if slice_offset is None:
slice_offset = 0
return "Range", "bytes=%d-%d" % (slice_offset,
slice_offset + slice_size - 1, )
assert slice_offset is not None
return "Range", "bytes=%d-" % (slice_offset, )
def compute_start_conjoined_path(key):
"""
start a conjoined archive
"""
kwargs = {"action" : "start"}
return compute_uri_path("conjoined", key, **kwargs)
def compute_abort_conjoined_path(conjoined_identifier, key):
"""
start a conjoined archive
"""
kwargs = {"action" : "abort",
"conjoined_identifier" : conjoined_identifier}
uri = compute_uri_path("conjoined", key, **kwargs)
def compute_finish_conjoined_path(key, conjoined_identifier):
"""
start a conjoined archive
"""
kwargs = {"action" : "finish",
"conjoined_identifier" : conjoined_identifier}
return compute_uri_path("conjoined", key, **kwargs)
| [
"dougfort@spideroak.com"
] | dougfort@spideroak.com |
6a138ba973cb0c3445c9e304eb69802cea8a51f1 | 34b76d94ff323e65e76be9bef71379e73046ad1f | /sacred_runs_final/_sources/run_sacred_926b2f1738101acc8665dff2324ae499.py | 44541df559402ca43e56054e8681d454cc6dacc7 | [
"MIT"
] | permissive | lorelupo/baselines | 5324e3f05615789608e6119ae7395b77973cbe8c | 8b6df664ecb714e77703f8fd9c7ea3841048bb28 | refs/heads/master | 2020-04-29T20:19:34.256241 | 2019-02-28T19:18:21 | 2019-02-28T19:18:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,135 | py | #!/usr/bin/env python3
# noinspection PyUnresolvedReferences
'''
This script runs rllab or gym environments. To run RLLAB, use the format
rllab.<env_name> as env name, otherwise gym will be used.
export SACRED_RUNS_DIRECTORY to log sacred to a directory
export SACRED_SLACK_CONFIG to use a slack plugin
'''
# Common imports
import sys, re, os, time, logging
from collections import defaultdict
# Framework imports
import gym
import tensorflow as tf
# Self imports: utils
from baselines.common import set_global_seeds
from baselines import logger
import baselines.common.tf_util as U
from baselines.common.rllab_utils import Rllab2GymWrapper, rllab_env_from_name
from baselines.common.atari_wrappers import make_atari, wrap_deepmind
from baselines.common.parallel_sampler import ParallelSampler
from baselines.common.cmd_util import get_env_type
# Self imports: algorithm
from baselines.policy.mlp_policy import MlpPolicy
from baselines.policy.cnn_policy import CnnPolicy
from baselines.pois import pois
# Sacred
from sacred import Experiment
from sacred.observers import FileStorageObserver, SlackObserver
# Create experiment, assign the name if provided in env variables
if os.environ.get('EXPERIMENT_NAME') is not None:
ex = Experiment(os.environ.get('EXPERIMENT_NAME'))
else:
ex = Experiment('POIS')
# Set a File Observer
if os.environ.get('SACRED_RUNS_DIRECTORY') is not None:
print("Sacred logging at:", os.environ.get('SACRED_RUNS_DIRECTORY'))
ex.observers.append(FileStorageObserver.create(os.environ.get('SACRED_RUNS_DIRECTORY')))
if os.environ.get('SACRED_SLACK_CONFIG') is not None:
print("Sacred is using slack.")
ex.observers.append(SlackObserver.from_config(os.environ.get('SACRED_SLACK_CONFIG')))
@ex.config
def custom_config():
seed = 0
env = 'rllab.cartpole'
num_episodes = 100
max_iters = 500
horizon = 500
iw_method = 'is'
iw_norm = 'none'
natural = False
file_name = 'progress'
logdir = 'logs'
bound = 'max-d2'
delta = 0.99
njobs = -1
save_weights = False
policy = 'nn'
policy_init = 'xavier'
max_offline_iters = 10
gamma = 1.0
center = False
clipping = False
entropy = 'none'
reward_clustering = 'none'
positive_return = False
experiment_name = None
# ENTROPY can be of 4 schemes:
# - 'none': no entropy bonus
# - 'step:<height>:<duration>': step function which is <height> tall for <duration> iterations
# - 'lin:<max>:<min>': linearly decreasing function from <max> to <min> over all iterations, clipped to 0 for negatives
# - 'exp:<height>:<scale>': exponentially decreasing curve <height> tall, use <scale> to make it "spread" more
# REWARD_CLUSTERING can be of 4 schemes:
# - 'none': do nothing
# - 'manual:<N>:<min>:<max>': N classes between min and max
# - 'global:<N>': N classes over global min and max (as seen so far)
# - 'batch:<N>': N classes over batch min and max (as seen so far)
# TODO: quantiles discretization?
# Create the filename
if file_name == 'progress':
file_name = '%s_iw=%s_bound=%s_delta=%s_gamma=%s_center=%s_entropy=%s_seed=%s_%s' % (env.upper(), iw_method, bound, delta, gamma, center, entropy, seed, time.time())
else:
file_name = file_name
def train(env, policy, policy_init, n_episodes, horizon, seed, njobs=1, save_weights=False, **alg_args):
if env.startswith('rllab.'):
# Get env name and class
env_name = re.match('rllab.(\S+)', env).group(1)
env_rllab_class = rllab_env_from_name(env_name)
# Define env maker
def make_env():
env_rllab = env_rllab_class()
_env = Rllab2GymWrapper(env_rllab)
return _env
# Used later
env_type = 'rllab'
else:
# Normal gym, get if Atari or not.
env_type = get_env_type(env)
assert env_type is not None, "Env not recognized."
# Define the correct env maker
if env_type == 'atari':
# Atari, custom env creation
def make_env():
_env = make_atari(env)
return wrap_deepmind(_env)
else:
# Not atari, standard env creation
def make_env():
env_rllab = gym.make(env)
return env_rllab
if policy == 'linear':
hid_size = num_hid_layers = 0
elif policy == 'nn':
hid_size = [100, 50, 25]
num_hid_layers = 3
if policy_init == 'xavier':
policy_initializer = tf.contrib.layers.xavier_initializer()
elif policy_init == 'zeros':
policy_initializer = U.normc_initializer(0.0)
else:
raise Exception('Unrecognized policy initializer.')
if policy == 'linear' or policy == 'nn':
def make_policy(name, ob_space, ac_space):
return MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space,
hid_size=hid_size, num_hid_layers=num_hid_layers, gaussian_fixed_var=True, use_bias=False, use_critic=False,
hidden_W_init=policy_initializer, output_W_init=policy_initializer)
elif policy == 'cnn':
def make_policy(name, ob_space, ac_space):
return CnnPolicy(name=name, ob_space=ob_space, ac_space=ac_space,
gaussian_fixed_var=True, use_bias=False, use_critic=False,
hidden_W_init=policy_initializer,
output_W_init=policy_initializer)
else:
raise Exception('Unrecognized policy type.')
sampler = ParallelSampler(make_policy, make_env, n_episodes, horizon, True, n_workers=njobs, seed=seed)
try:
affinity = len(os.sched_getaffinity(0))
except:
affinity = njobs
sess = U.make_session(affinity)
sess.__enter__()
set_global_seeds(seed)
gym.logger.setLevel(logging.WARN)
pois.learn(make_env, make_policy, n_episodes=n_episodes, horizon=horizon,
sampler=sampler, save_weights=save_weights, **alg_args)
sampler.close()
@ex.automain
def main(seed, env, num_episodes, horizon, iw_method, iw_norm, natural,
file_name, logdir, bound, delta, njobs, save_weights, policy,
policy_init, max_offline_iters, gamma, center, clipping, entropy,
max_iters, positive_return, reward_clustering, _run):
logger.configure(dir=logdir, format_strs=['stdout', 'csv', 'tensorboard', 'sacred'], file_name=file_name, run=_run)
train(env=env,
policy=policy,
policy_init=policy_init,
n_episodes=num_episodes,
horizon=horizon,
seed=seed,
njobs=njobs,
save_weights=save_weights,
max_iters=max_iters,
iw_method=iw_method,
iw_norm=iw_norm,
use_natural_gradient=natural,
bound=bound,
delta=delta,
gamma=gamma,
max_offline_iters=max_offline_iters,
center_return=center,
clipping=clipping,
entropy=entropy,
reward_clustering=reward_clustering)
| [
"nico.montali24@gmail.com"
] | nico.montali24@gmail.com |
67686cd35abc399e2bafd17dc1e5472c07dd21ea | b71a22a01e55b098ddbe08b7a4d9b1d423f6445e | /app/app/settings.py | 16f68180b087e3213abeb937b5eef4dcaecdea6a | [
"MIT"
] | permissive | DmitryBovsunovskyi/django-training-app | 4f26a8cb2b128066e0c612973e6739bcac6e7023 | 90b32a9572b6b6827a6b4622af3776c9cef0b0dc | refs/heads/main | 2023-03-29T22:09:18.164711 | 2021-03-29T12:47:39 | 2021-03-29T12:47:39 | 346,772,915 | 0 | 0 | MIT | 2021-03-31T10:51:31 | 2021-03-11T16:51:30 | Python | UTF-8 | Python | false | false | 4,323 | py | """
From new branch
Django settings for app project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
import datetime
from decouple import config
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', cast=bool)
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"rest_framework",
"rest_framework.authtoken",
'rest_framework_simplejwt.token_blacklist',
"django_nose",
"drf_yasg",
"core",
"user",
"body",
]
SWAGGER_SETTINGS = {
'SECURITY_DEFINITIONS': {
'Bearer': {
'type': 'apiKey',
'name': 'Authorization',
'in': 'header'
}
}
}
# Use nose to run all tests
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = [
'--with-coverage',
'--cover-package=core,user',
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "app.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "app.wsgi.application"
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_simplejwt.authentication.JWTAuthentication',
)
}
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': datetime.timedelta(minutes=10),
'REFRESH_TOKEN_LIFETIME': datetime.timedelta(days=1),
}
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
"default": {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = "/static/"
AUTH_USER_MODEL = "core.User"
EMAIL_USE_TLS = config('EMAIL_USE_TLS', cast=bool)
EMAIL_PORT = config('EMAIL_PORT', cast=int)
EMAIL_HOST_USER = config('EMAIL_HOST_USER')
EMAIL_HOST = config('EMAIL_HOST')
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default='localhost')
| [
"dmitrybovsunovskyi@gmail.com"
] | dmitrybovsunovskyi@gmail.com |
642f6e0e9b192453ad9d2a8d58de8557d8ad0ab5 | 7192a6e3e2debd26919390941f1a7bc05255f861 | /Code_fast/test_sdA.py | f6566f98913d299b22aaaf27a11c154d358f9da2 | [] | no_license | digirak/TIFR-code | bfff28bb8f85aebb0fb122c52cefe1c8a6be8985 | 5961828397e713f2a46b172d20b0cb0187f97a68 | refs/heads/master | 2021-01-10T12:06:20.992655 | 2016-02-29T07:47:33 | 2016-02-29T07:47:33 | 50,757,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,913 | py | from test_sdA import *
from stacked_dA import Stacked_dA
import theano.tensor as T
import numpy
from theano import function,pp
import timeit
import os
import sys
def test_dA(learning_rate, training_epochs,
sig,sig_noise,chunks,batch_size=20,n_ins=441,n_hidden=1000):
"""
This demo is tested on MNIST
:type learning_rate: float
:param learning_rate: learning rate used for training the DeNosing
AutoEncoder
:type training_epochs: int
:param training_epochs: number of epochs used for training
:type dataset: string
:param dataset: path to the picked dataset
"""
# datasets = load_data(training_dataset,validation_dataset)
pulsations= sig
observations=sig_noise
# compute number of minibatches for training, validation and testing
n_train_batches = pulsations.get_value(borrow=True).shape[0] / batch_size
print("Size of batch is %d"%batch_size)
# start-snippet-2
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
x = T.matrix('pulsations') # the data is presented as rasterized images
y = T.matrix('Observations') # Noisy
# end-snippet-2
####################################
# BUILDING THE MODEL NO CORRUPTION #
####################################
rng = numpy.random.RandomState(123)
da = Stacked_dA(
x,
rng,
n_visible=n_ins,
n_hidden=n_hidden
)
cost, updates = da.get_cost_updates(corrupted_input=observations[index*batch_size:(index+1)*batch_size,:],learning_rate=learning_rate)
train_da = function(
inputs=[index],
outputs=[cost],
updates=updates,
givens={
#y: observations[index * batch_size: (index + 1) * batch_size,:],
x: pulsations[index * batch_size: (index + 1) * batch_size,:]
}
)
start_time = timeit.default_timer()
############
# TRAINING #
############
n_train_batches=chunks
# go through training epochs
cos=[]
for epoch in xrange(training_epochs):
# go through trainng set
c = []
for batch_index in xrange(int(n_train_batches/batch_size)):
# st_time=timeit.default_timer()
c.append(train_da(batch_index))
#end_time=timeit.default_timer()
#print("training for batch %d/%d takes %3.3f s"%(batch_index,n_train_batches/batch_size,(end_time-st_time)))
cos.append(numpy.mean([item for item in c]))
print 'Training epoch %d ' % epoch
end_time = timeit.default_timer()
training_time = (end_time - start_time)
print >> sys.stderr, ('The no corruption code for file ' +
os.path.split(__file__)[1] +
' ran for %3.2f s' % ((training_time)))
return (cos,updates)
#if __name__ == '__main__':
# test_dA()
| [
"raknath@gmail.com"
] | raknath@gmail.com |
b25ce7f623ec6fdde3d149c689911c96dd5e5206 | 471763d760e57f0487d5f032d261674c6fb732c8 | /pymoo/experimental/my_test.py | c374176b5538cd3516ee40931e823ed8ac6f23c1 | [
"Apache-2.0"
] | permissive | s-m-amin-ghasemi/pymoo | 7b583834d2f6dea26592001eb59e45472dadd490 | 74123484b0f72d601823bcda56f9526ad12e751a | refs/heads/master | 2020-05-02T09:55:31.641675 | 2019-03-04T19:24:37 | 2019-03-04T19:24:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 799 | py | from pymoo.operators.crossover.simulated_binary_crossover import SimulatedBinaryCrossover
from pymoo.operators.mutation.polynomial_mutation import PolynomialMutation
from pymoo.optimize import minimize
from pymoo.util import plotting
from pymoo.util.reference_direction import UniformReferenceDirectionFactory
from pymop.factory import get_problem
problem = get_problem("dtlz1", n_var=7, n_obj=3)
ref_dirs = UniformReferenceDirectionFactory(3, n_points=91).do()
pf = problem.pareto_front(ref_dirs)
res = minimize(problem,
method='nsga3',
method_args={
'pop_size': 92,
'ref_dirs': ref_dirs},
termination=('n_gen', 400),
pf=pf,
seed=1,
disp=True)
plotting.plot(res.F)
| [
"jules89@arcor.de"
] | jules89@arcor.de |
fe89fc03e568e8325052551c739aedd539b5192b | af5a0681e651207ac7d6f1f03d7c49c1905fe47c | /src/outputs/TimelineAnimation.py | 8186101f5fe3aff2e82071bd51d5e290d4320395 | [] | no_license | rlui1/robo_blender | f03e8bfec5dc8cd6bc2eea661f026f9a400079a8 | 87072eea3810f099505061983c3cd75582d96203 | refs/heads/master | 2020-04-16T03:32:15.482466 | 2015-02-23T20:50:23 | 2015-02-23T20:50:23 | 165,235,053 | 0 | 0 | null | 2019-01-11T11:44:52 | 2019-01-11T11:44:52 | null | UTF-8 | Python | false | false | 1,523 | py | import rospy
import Utils
class FrameMap:
""" Represents the position of an animation in the timeline. """
def __iter__(self):
return iter(range(self.frame_start, self.frame_end))
def set_duration(self, val):
self.duration = val
def get_frame_duration(self):
return float(self.frame_start - self.frame_end)/self.duration
@classmethod
def from_string(cls, str):
""" Alternative constructor method. """
# 'cls' hold the FrameMap class
# Asterix below means the list str.split(":") will be expanded into
# arguments frame_start, frame_end, min_duration
return cls(*str.split(":"))
def __init__(self, frame_start, frame_end, min_duration):
self.frame_start = frame_start
self.frame_end = frame_end
self.min_duration = min_duration
class TimelineAnimation:
"""
This output can build and send an animation out, if you give the
animation's location (FrameMap instance).
"""
def send(self, frame_map):
"""
Call this from your controller.
Will iterate throught frames, build trajectory messages and send them.
"""
# __iter__ method in FrameMap will allow you to iterate like this:
#
#for frame in frame_map:
# print("Current frame: %s" % frame)
raise NotImplementedError
def __init__(self, confentry):
# Motor config and categories can be saved here (outputs/anim_zeno.yaml)
# until we find a better location for it.
self.config = Utils.read_yaml("anim_zeno.yaml", __file__)
raise NotImplementedError | [
"gabrielius.m@gmail.com"
] | gabrielius.m@gmail.com |
3a1962855bf806f40e9e9f6b4bdb7b6754e65897 | fe2c4709aec40e2d6da8ff732d91b93fc172d2c6 | /setup.py | 09182f0e2ac6f7cd00d690640499fb82c7d115c5 | [
"MIT",
"CC-BY-NC-4.0",
"CC-BY-NC-SA-4.0"
] | permissive | swarmer/depchecker | 5c4ab3761d27e62956fbc44b79fc9269c4052a68 | 9b0daf8f2052d69da9496125db523449d0f1756b | refs/heads/master | 2023-08-31T07:05:23.591299 | 2018-11-17T15:37:22 | 2018-11-17T15:37:22 | 104,056,102 | 1 | 1 | MIT | 2023-09-11T20:51:15 | 2017-09-19T09:39:31 | Python | UTF-8 | Python | false | false | 2,182 | py | import codecs
import os
import re
from setuptools import find_packages, setup
# utility functions
def slurp(path):
with codecs.open(path, 'rb', 'utf-8') as f:
return f.read()
def find_meta(field):
meta_match = re.search(
r'^__{field}__ = [\'"]([^\'"]*)[\'"]'.format(field=field),
META_FILE,
re.M,
)
if not meta_match:
raise RuntimeError('Unable to find __{field}__ string.'.format(field=field))
return meta_match.group(1)
# utility constants
HERE = os.path.abspath(os.path.dirname(__file__))
META_FILE = slurp(os.path.join(HERE, 'src', 'depchecker', '__init__.py'))
# metadata
NAME = 'depchecker'
AUTHOR = 'swarmer'
AUTHOR_EMAIL = 'anton@swarmer.me'
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Programming Language :: Python',
'Topic :: Software Development',
]
DESCRIPTION = 'A tool that checks python project dependencies'
INSTALL_REQUIRES = [
'click>=6.0,<7.0',
'requests>=2.7,<3.0',
'setuptools>=19.0',
'six>=1.10,<2.0',
]
KEYWORDS = ['dependency', 'package', 'vulnerability', 'requirements']
LICENSE = 'MIT'
MAINTAINER = AUTHOR
MAINTAINER_EMAIL = AUTHOR_EMAIL
PACKAGES = find_packages(where='src')
URL = 'https://github.com/swarmer/depchecker'
VERSION = find_meta('version')
if __name__ == '__main__':
setup(
name=NAME,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
classifiers=CLASSIFIERS,
description=DESCRIPTION,
install_requires=INSTALL_REQUIRES,
keywords=KEYWORDS,
license=LICENSE,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
package_dir={'': 'src'},
packages=PACKAGES,
url=URL,
version=VERSION,
zip_safe=False,
)
| [
"anton@swarmer.me"
] | anton@swarmer.me |
5252e557a96623c7eb1aedce2f90affb3e6db048 | c6fa53212eb03017f9e72fad36dbf705b27cc797 | /RecoLocalTracker/SiPixelRecHits/test/readRecHits_cfg.py | 6de508131e53ef816856fae2633ab4039bcb12eb | [] | no_license | gem-sw/cmssw | a31fc4ef2233b2157e1e7cbe9a0d9e6c2795b608 | 5893ef29c12b2718b3c1385e821170f91afb5446 | refs/heads/CMSSW_6_2_X_SLHC | 2022-04-29T04:43:51.786496 | 2015-12-16T16:09:31 | 2015-12-16T16:09:31 | 12,892,177 | 2 | 4 | null | 2018-11-22T13:40:31 | 2013-09-17T10:10:26 | C++ | UTF-8 | Python | false | false | 1,987 | py | #
# rechits are not persisent anymore, so one should run one of the CPEs
# on clusters ot do the track fitting. 11/08 d.k.
#
import FWCore.ParameterSet.Config as cms
process = cms.Process("recHitsTest")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10)
)
process.MessageLogger = cms.Service("MessageLogger",
debugModules = cms.untracked.vstring('ReadPixelRecHit'),
destinations = cms.untracked.vstring('cout'),
# destinations = cms.untracked.vstring("log","cout"),
cout = cms.untracked.PSet(
threshold = cms.untracked.string('ERROR')
)
# log = cms.untracked.PSet(
# threshold = cms.untracked.string('DEBUG')
# )
)
process.source = cms.Source("PoolSource",
# fileNames = cms.untracked.vstring('file:/scratch/dkotlins/digis.root')
fileNames = cms.untracked.vstring('file:/scratch/dkotlins/promptrecoCosmics_1.root')
)
# a service to use root histos
process.TFileService = cms.Service("TFileService",
fileName = cms.string('histo.root')
)
process.load("Configuration.StandardSequences.Geometry_cff")
process.load("Configuration.StandardSequences.MagneticField_38T_cff")
# what is this?
# process.load("Configuration.StandardSequences.Services_cff")
# what is this?
#process.load("SimTracker.Configuration.SimTracker_cff")
# needed for global transformation
process.load("Configuration.StandardSequences.FakeConditions_cff")
# Initialize magnetic field
# include "MagneticField/Engine/data/volumeBasedMagneticField.cfi"
# Tracker SimGeometryXML
# include "Geometry/TrackerSimData/data/trackerSimGeometryXML.cfi"
# Tracker Geometry Builder
# include "Geometry/TrackerGeometryBuilder/data/trackerGeometry.cfi"
# Tracker Numbering Builder
# include "Geometry/TrackerNumberingBuilder/data/trackerNumberingGeometry.cfi"
process.analysis = cms.EDAnalyzer("ReadPixelRecHit",
Verbosity = cms.untracked.bool(True),
src = cms.InputTag("siPixelRecHits"),
)
process.p = cms.Path(process.analysis)
| [
"sha1-3a033e6407e48eb9fbe364eec828608ee1ba03ae@cern.ch"
] | sha1-3a033e6407e48eb9fbe364eec828608ee1ba03ae@cern.ch |
0e6876a1e8d88576b12e5ceb833274c7a9211072 | 2ce25c675171837f4fae787e3247310649449e75 | /scripts/pullcraftingdata.py | 4286aaa1f82fa27da52fca400b86ee11ad4c1b87 | [] | no_license | brandonwstiles/AlbionMarketOptimizer | 29767d630a3095cf98b5260fbe41dfb5db852b3e | 841ea18346030c1841b68bd370d9ac9174fd8c85 | refs/heads/master | 2020-12-02T23:23:10.797868 | 2020-01-20T06:05:38 | 2020-01-20T06:05:38 | 231,148,219 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 875 | py | from bs4 import BeautifulSoup
from flask import Flask
import json
import requests
with open('../json/TestItems.json') as jsonFile:
items = json.load(jsonFile)
for item in items['items']:
url = 'https://www.albion-online-data.com/api/v1/stats/Prices/' + item['name'] + '?'
response = requests.get(url, timeout=10)
objects = response.json()
output = item['name'] + ": " + item['description'] + ": "
for obj in objects:
# if obj['city'] == "Black Market" or obj['city'] == "Caerleon":
if obj['city'] != "Mountain Cross" and obj['city'] != "Steppe Cross" and obj['city'] != "Forest Cross" and obj['city'] != "Highland Cross" and obj['city'] != "Swamp Cross":
output2 = str(obj['sell_price_min']) + " " + obj['city'] + ": "
output = output + output2
print(output)
| [
"brandonwstiles@gmail.com"
] | brandonwstiles@gmail.com |
b68497fb4af46419ef3b81b78945f50158b80c22 | bf3b6607700a9eed936f043397ddb97f801c4339 | /RuleBasedControl/control_rules.py | 364b00639dc5534a3f065af28ee26ca1d7975b9c | [] | no_license | DavidChoi76/swmm_rl | 4548055f4542ec40acf3156d0a90d8a5ec8b2b8a | c260024fcd4d843d0e12a95d385de78096f25c35 | refs/heads/master | 2023-03-15T12:38:54.539578 | 2020-11-10T20:13:57 | 2020-11-10T20:13:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,300 | py | """
Written by Benjamin Bowes, 02-26-2020
This script contains functions to implement Rule-based Control (RBC) of SWMM simulations
"""
import numpy as np
import math
def find_nonzero_runs(a):
# Create an array that is 1 where a is nonzero, and pad each end with an extra 0.
isnonzero = np.concatenate(([0], (np.asarray(a) != 0).view(np.int8), [0]))
absdiff = np.abs(np.diff(isnonzero))
# Runs start and end where absdiff is 1.
ranges = np.where(absdiff == 1)[0].reshape(-1, 2)
return ranges
def read_fcst(rain_gages, fcst_data, timestep, fcst_len=97):
# 1 determine number of events
event_dict = {} # dict to store number of events and volumes
# get current forecast based on simulation timestep
fcst_rain1 = fcst_data.iloc[timestep, :fcst_len]
fcst_rain2 = fcst_data.iloc[timestep, fcst_len:-fcst_len]
event_dict["total"] = [fcst_rain1.sum(), fcst_rain2.sum()] # add 24hr totals to dict
# find number of events for each rain gage
for gage_idx, gage_name in enumerate(rain_gages):
if gage_idx == 0:
m = np.asarray(fcst_rain1 != 0)
elif gage_idx == 1:
m = np.asarray(fcst_rain2 != 0)
# combine events if within 6hr of each other
for i in range(len(m)):
if i < fcst_len - 1:
if m[i]: # if current value is true
if not m[i + 1]: # if next value is false
if i + 25 > len(m):
if np.any(m[i + 1:]):
m[i + 1] = True
else:
if np.any(m[i+1:i+25]):
m[i+1] = True
# 2 calculate per event precip. totals
events = find_nonzero_runs(m)
event_sums = []
for event in events:
if gage_idx == 0:
event_sums.append(fcst_rain1[event[0]:event[1]].sum())
if gage_idx == 1:
event_sums.append(fcst_rain2[event[0]:event[1]].sum())
event_dict[gage_name] = event_sums
# 3 track event start and end?
return event_dict
def drain_time(flood_vol, stage_storage, storage_head, current_depth, diam=2., coeff=0.65, ctl_step=900):
"""
takes in current valve positions and expected flood volume for pond
calculates time to drain expected flood volume from pond, assuming valve fully open
https://www.lmnoeng.com/Tank/TankTime.php
https://www.mathopenref.com/segmentareaht.html
"""
valve_area = round(math.pi * (diam/2)**2, 4) # full area of circle
if current_depth < 2.: # partial area of circle
r = (diam/2)**2
h = current_depth
# theta = 2 * math.acos(((diam / 2) - h) / (diam / 2))
valve_area = round(r**2 * math.acos((r-h)/r) - (r-h) * math.sqrt(2*r*h-h**2), 4)
req_vol = flood_vol * 1.2 # drain flood volume + 20% safety factor
# find depth occupied by flood volume
flood_depth = req_vol / stage_storage
# find change in head required
req_head = abs(storage_head - flood_depth)
# find time required to drain to required head, valve fully open, assuming discharge to atm
time = (stage_storage / (valve_area * coeff)) * (math.sqrt(storage_head) - math.sqrt(req_head)) * math.sqrt(2 / 32.2) # assuming english units, time in seconds
drain_steps = math.ceil(time / ctl_step) + 1 # number of time steps required to drain (rounded up)
return drain_steps
def valve_position(current_depth, stage_storage, drawdown_time=86400, target_depth=2., diam=2., coeff=0.65):
"""Returns minimum valve opening to drain stormwater back to target depth over specified drawdown period
Drawdown time is in seconds, defaults to 86400 (24hr)"""
if current_depth <= target_depth: # close valve if current depth less than target depth
return 0
else:
full_valve_area = round(math.pi * (diam / 2) ** 2, 4) # full area of circle
new_valve_area = stage_storage / ((drawdown_time * coeff) / (math.sqrt(current_depth) - math.sqrt(target_depth))
* math.sqrt(2 / 32.2)) # assuming english units, time in seconds
valve_opening = 1 - round(new_valve_area / full_valve_area, 4)
return valve_opening
| [
"noreply@github.com"
] | DavidChoi76.noreply@github.com |
bfef3942212fd8787ff9a982dc557466875180f2 | 18c0f7dc356db08027472243ab19ed5ab98f5dcc | /script.module.placenta/lib/resources/lib/modules/unjuice.py | 9bd6d75cf13480440c43b5f5d3c66d523db1b25b | [
"Beerware"
] | permissive | parser4life/tantrumrepo | 8656ac06f18aa3e76b4c279de61ec11ee6a88d60 | 3b37145f4772409e538cbddb0b7aa23be525772a | refs/heads/master | 2020-03-08T04:48:30.025644 | 2018-04-01T02:21:16 | 2018-04-01T02:21:16 | 127,931,630 | 1 | 2 | null | 2018-04-03T15:46:42 | 2018-04-03T15:46:42 | null | UTF-8 | Python | false | false | 4,541 | py | # -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @tantrumdev wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: MuadDib
import re
import sys
from resources.lib.modules import jsunpack
Juice = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/="
def test(e):
return True if re.search(r'JuicyCodes.Run\(', e, re.IGNORECASE) else False
def run(e):
try:
e = re.findall(r'JuicyCodes.Run\(([^\)]+)', e, re.IGNORECASE)[0]
e = re.sub(r'\"\s*\+\s*\"','', e)
e = re.sub(r'[^A-Za-z0-9+\\/=]','', e)
except:
return None
t = ""
n=r=i=s=o=u=a=f=0
while f < len(e):
try:
s = Juice.index(e[f]);f+=1;
o = Juice.index(e[f]);f+=1;
u = Juice.index(e[f]);f+=1;
a = Juice.index(e[f]);f+=1;
n = s << 2 | o >> 4; r = (15 & o) << 4 | u >> 2; i = (3 & u) << 6 | a
t += chr(n)
if 64 != u: t += chr(r)
if 64 != a: t += chr(i)
except:
continue
pass
try:
t = jsunpack.unpack(t)
t = unicode(t, 'utf-8')
except:
t = None
return t
def main():
#for testing
codes = 'JuicyCodes.Run("ZXZhbChmdW5jdGlvbihwLGEsYyxrLGUsZCl7ZT1mdW5jdGlvbihj"+"KXtyZXR1cm4oYzxhPycnOmUocGFyc2VJbnQoYy9hKSkpKygoYz1j"+"JWEpPjM1P1N0cmluZy5mcm9tQ2hhckNvZGUoYysyOSk6Yy50b1N0"+"cmluZygzNikpfTtpZighJycucmVwbGFjZSgvXi8sU3RyaW5nKSl7"+"d2hpbGUoYy0tKXtkW2UoYyldPWtbY118fGUoYyl9az1bZnVuY3Rp"+"b24oZSl7cmV0dXJuIGRbZV19XTtlPWZ1bmN0aW9uKCl7cmV0dXJu"+"J1xcdysnfTtjPTF9O3doaWxlKGMtLSl7aWYoa1tjXSl7cD1wLnJl"+"cGxhY2UobmV3IFJlZ0V4cCgnXFxiJytlKGMpKydcXGInLCdnJyks"+"a1tjXSl9fXJldHVybiBwfSgnMyBqPXsiSCI6IlgiLCJKIjoiUC1G"+"IiwiSyI6bH07eS5NPVwnVj09XCc7MyAxPXkoXCd2LTFcJyk7MyBk"+"OzMgNzszIEksbT1sOzMgajskKHgpLncoMigpe2ouRT14LlI7JC5R"+"KHtOOlwnTzovL1Mudi5ZLzdcJyxXOlwnVVwnLDY6aixaOlwnTFwn"+"LEM6MihlKXtkPWUuZDs3PWUuNzt0KCl9LH0pOyQoXCcjQi04XCcp"+"LnMoMigpeyQoXCcjZi04XCcpLmMoXCd1XCcpOzEuQShhLmkoNi5i"+"KSl9KTskKFwnI0QtOFwnKS5zKDIoKXskKFwnI2YtOFwnKS5jKFwn"+"dVwnKTsxLnEoKX0pfSk7MiB0KCl7MyBwPXs3OjcsZDpkLEc6IlQl"+"IiwxaTpcJzE2OjlcJywxbzpsLDFuOnt9LDFtOnsxazpcJyMxbFwn"+"LDFxOjF3LDExOjAsMXY6XCcxdFwnLDFyOlwnMXVcJ30sfTsxLjFz"+"KHApOzEuNChcJ3FcJywyKCl7fSk7MS40KFwnd1wnLDIoKXt9KTsx"+"LjQoXCcxcFwnLDIoKXt9KTsxLjQoXCcxalwnLDIoKXsxOChtJiZh"+"LmkoNi5iKSYmYS5pKDYuYik+MTkpezEuMTcoKTttPTE1OyQoXCcj"+"NS04XCcpLjEyKHooYS5pKDYuYikpKTskKFwnI2YtOFwnKS5jKFwn"+"b1wnKX19KTsxLjQoXCc1XCcsMigpe2EuMTMoNi5iLDEuMTQoKSl9"+"KTsxLjQoXCduXCcsMigpeyQoXCcjZi1uXCcpLmMoXCdvXCcpfSk7"+"MS40KFwnMWFcJywyKCl7JChcJyNmLW5cJykuYyhcJ29cJyl9KX0y"+"IHoocil7MyA1PTFiIDFnKDAsMCwwKTs1LjFoKHIpOzMgZz01LjFm"+"KCk7MyBoPTUuMWUoKTszIGs9NS4xYygpOzFkKGc8MTA/KFwnMFwn"+"K2cpOmcpK1wnOlwnKyhoPDEwPyhcJzBcJytoKTpoKStcJzpcJyso"+"azwxMD8oXCcwXCcrayk6ayl9Jyw2Miw5NSwnfHBsYXllcnxmdW5j"+"dGlvbnx2YXJ8b258dGltZXxkYXRhfHNvdXJjZXN8cmVzdW1lfHxs"+"b2NhbFN0b3JhZ2V8aWR8bW9kYWx8dHJhY2tzfHxwb3B8dGltZV9o"+"fHRpbWVfbXxnZXRJdGVtfGRhdGFQT1NUfHRpbWVfc3x0cnVlfGZp"+"cnN0X2xvYWR8ZXJyb3J8c2hvd3xqd2NvbmZpZ3xwbGF5fF90aW1l"+"fGNsaWNrfGxvYWRQbGF5ZXJ8aGlkZXxzdHJlYW1kb3J8cmVhZHl8"+"ZG9jdW1lbnR8andwbGF5ZXJ8Y29udmVydF90aW1lfHNlZWt8eWVz"+"fHN1Y2Nlc3N8bm98cmVmZXJlcnxjOXZJek5CanVPRmRqcEtYcV9f"+"WlF8d2lkdGh8ZXBpc29kZUlEfHBsYXlsaXN0fGZpbGV8c3VidGl0"+"bGV8anNvbnxrZXl8dXJsfGh0dHBzfFY0SUdfVGRxOFlPU2ZzWmlG"+"ZDFFc2xjeU9lSkIyUENZQ2hrXzRxcmkwX2lsTkE2TVpPX1BGcldX"+"REc1aHZkSGh8YWpheHxyZWZlcnJlcnxhcGl8MTAwfFBPU1R8SE92"+"OVlLNmVncFpnazVjY0JpWnBZZklBUXgzUTVib0dWN3RpR3d8bWV0"+"aG9kfDM2MDg0NXxjb3xkYXRhVHlwZXx8YmFja2dyb3VuZE9wYWNp"+"dHl8dGV4dHxzZXRJdGVtfGdldFBvc2l0aW9ufGZhbHNlfHxwYXVz"+"ZXxpZnwzMHxzZXR1cEVycm9yfG5ld3xnZXRTZWNvbmRzfHJldHVy"+"bnxnZXRNaW51dGVzfGdldEhvdXJzfERhdGV8c2V0U2Vjb25kc3xh"+"c3BlY3RyYXRpb3xmaXJzdEZyYW1lfGNvbG9yfGYzZjM3OHxjYXB0"+"aW9uc3xjYXN0fGF1dG9zdGFydHxjb21wbGV0ZXxmb250U2l6ZXxl"+"ZGdlU3R5bGV8c2V0dXB8SGVsdmV0aWNhfHJhaXNlZHxmb250ZmFt"+"aWx5fDIwJy5zcGxpdCgnfCcpLDAse30pKQo=")'
res = run(codes)
pass
if __name__ == "__main__":
sys.exit(int(main() or 0)) | [
"31803191+muaddibttv@users.noreply.github.com"
] | 31803191+muaddibttv@users.noreply.github.com |
192816a0aa4248471ba63ca120bc57733699c6ee | 4852046aed2588c7a359c4b805251fa953399b23 | /web/urls.py | bd18502d943190273fbe1e27349abd18c0f82e9d | [] | no_license | enasmohmed/Mobily-WebSite | 8cc11cc0e31d78da85029e8885c56b4ecc4d1e33 | dbab598ca36ccbadb15e37199b719b618b5c11f9 | refs/heads/master | 2020-08-08T12:08:23.169066 | 2019-10-26T20:24:51 | 2019-10-26T20:24:51 | 213,828,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,532 | py | """web URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
import Run
from Run import views
app_name = 'Run'
urlpatterns = [
path('admin/', admin.site.urls),
path('', Run.views.HomePageView, name='home'),
path('', include('Run.urls', namespace='Run')),
path('accounts/', include('accounts.urls', namespace='accounts')),
path('NewProduct/', include('NewProduct.urls', namespace='NewProduct')),
path('ckeditor/', include('ckeditor_uploader.urls')),
path('contact', Run.views.ContactUs, name='contact'),
]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL ,document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL ,document_root=settings.STATIC_ROOT) | [
"enasm2477@gmail.com"
] | enasm2477@gmail.com |
1f4373cda3a1ff3ad29f59e43a73da09a5d9e4b8 | 49b4caa1dba9e0d692fa3af3992b79703224fc8b | /manage.py | f2d2f7cd212c9123f06a72cfe20e70db191780e2 | [] | no_license | Runpls/django_ITjobs | 8471b1239fc03956e8c5611db90007950b5d1979 | 1ea6e548d8cbf29725a6793f3e0db197bf97cebf | refs/heads/master | 2023-04-28T02:23:55.931387 | 2019-06-09T04:59:34 | 2019-06-09T04:59:34 | 190,504,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'getjobs.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"ngay292.5@gmail.com"
] | ngay292.5@gmail.com |
64e8e62c068ed15b021a558af13afb773d1d4574 | eb99e2e9eda8ed769bf9d27d0e1ece2f4df98302 | /nb classifier/Classifier_Star_Apply.py | dc6161463c7df1f705c079f0360b0553f029a078 | [] | no_license | AmbarDudhane/Hotel-Search-Engine | b33990d59d445ebcade0e212bc9b41a7ddf8f1ab | 8744a51461c67aec748815981be93f32d150aa34 | refs/heads/master | 2020-08-09T11:20:03.930810 | 2019-12-04T05:25:42 | 2019-12-04T05:25:42 | 214,076,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,722 | py | from sklearn.metrics import mean_squared_error
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
import os
import pickle
import pandas as pd
import operator
import math
from nltk.stem import WordNetLemmatizer
class Classifier:
def __inti__(self):
print("in constructor")
def load(self):
print("in load function")
f1 = open(r"C:\Users\Ambar\PycharmProjects\FlaskDemo\Classifier\vocab.pkl", "rb")
f2 = open(r"C:\Users\Ambar\PycharmProjects\FlaskDemo\Classifier\prior.pkl", "rb")
f3 = open(r"C:\Users\Ambar\PycharmProjects\FlaskDemo\Classifier\cond_prob.pkl", "rb")
vocab = pickle.load(f1)
prior = pickle.load(f2)
cond_prob = pickle.load(f3)
f1.close()
f2.close()
f3.close()
return [vocab, prior, cond_prob]
def apply_multinomial_nb(self, vocabulary, prior, cond_prob, df_test, flag):
df_test["Prediction"] = ""
lemmatizer = WordNetLemmatizer()
stop_words = set(stopwords.words('english'))
tokenizer = RegexpTokenizer(r'\w+') # getting rid of punctuation while tokenizing
for index, row in df_test.iterrows():
tokens = list(tokenizer.tokenize(row[2].lower()))
filtered_tokens = list()
for term in tokens:
if term not in stop_words:
filtered_tokens.append(lemmatizer.lemmatize(term))
tokens = self.ExtractElements(vocabulary, filtered_tokens)
score = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0}
for c in [1, 2, 3, 4, 5]:
# score[c] = math.log2(prior[c])
score[c] = prior[c]
for term in tokens:
# score[c] = score[c] + math.log2(cond_prob[c][term])
score[c] = score[c] + cond_prob[c][term]
df_test.at[index, 'Prediction'] = max(score.items(), key=operator.itemgetter(1))[0]
# if called from test function, then execute evaluation
if flag == "test":
self.evaluation(df_test)
return df_test
def ExtractElements(self, vocab, tokens):
return list(set(vocab) & set(tokens))
def evaluation(self, df_test):
actual = df_test['reviews.rating'].tolist()
predicted = df_test['Prediction'].tolist()
classes = [1, 2, 3, 4, 5]
# print(confusion_matrix(actual, predicted, labels=classes))
# print(classification_report(actual, predicted))
from math import sqrt
# Calculating MSE
mse = mean_squared_error(actual, predicted)
print("Mean Squared Error:", mse)
print("RMSE:", sqrt(mse))
# Calculating accuracy score
print("Accuracy score:", "{:.0%}".format(accuracy_score(actual, predicted)))
def test(self):
print("Test")
os.chdir(r"C:\Users\Ambar\PycharmProjects\FlaskDemo\Classifier")
df = pd.read_csv(r"F:\UTA\1st sem\DM\hotel-reviews (1)\Hotel_Reviews_Jun19_reduced.csv",
usecols=['reviews.id', 'reviews.text', 'reviews.rating'])
from sklearn.model_selection import train_test_split
df_train, df_test = train_test_split(df, test_size=0.3, random_state=0)
result = self.load()
self.apply_multinomial_nb(vocabulary=result[0], prior=result[1], cond_prob=result[2], df_test=df_test,
flag="test")
if __name__ == "__main__":
inst = Classifier()
inst.test()
| [
"noreply@github.com"
] | AmbarDudhane.noreply@github.com |
a33f00ae4c2d0a44e8d884798cff5199cbd63b9e | b4914b08ce57707a4f663403566b4e8e9b68d9a0 | /hofvideos/settings.py | cd8023eb92ad11c2702bd1d251e7218271c4a589 | [] | no_license | Harshvartak/halloffamevids | 9d47521ac9cafbcc1bbb8f049e64765d300bbf6c | 89bd7d3890feecd67ba293b0ab8d62ced491d025 | refs/heads/master | 2022-12-09T10:57:47.856072 | 2019-09-26T19:31:56 | 2019-09-26T19:31:56 | 211,171,960 | 0 | 0 | null | 2022-12-08T06:38:36 | 2019-09-26T20:02:38 | JavaScript | UTF-8 | Python | false | false | 3,326 | py | """
Django settings for hofvideos project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '198eoywsu)$@msv6jhixb$%tc3ruj83aq()oloy39(eiaw1za2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
LOGIN_URL = 'login'
LOGIN_REDIRECT_URL= 'dashboard'
LOGOUT_REDIRECT_URL= 'home'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'halls',
'widget_tweaks',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'hofvideos.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hofvideos.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_ROOT= os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
MEDIA_ROOT=os.path.join(BASE_DIR, 'media')
MEDIA_URL='/media/'
| [
"vartak.harsh@gmail.com"
] | vartak.harsh@gmail.com |
825e5b112be413802be4e582a733b67f276cf6ad | 1ceb35da7b1106a4da4e8a3a5620d23a326a68e4 | /corticalmapping/scripts/post_recording/00_old/movie_multi_plane_single_channel_deepscope/within_plane_folder/090_get_neuropil_subtracted_traces.py | 551768f06dbf818c36c61b57bb1068b0fc1d1578 | [] | no_license | zhuangjun1981/corticalmapping | c3870a3f31ed064d77f209a08e71f44c375676a3 | 0ddd261b3993f5ce5608adfbd98a588afc56d20c | refs/heads/master | 2022-11-14T03:24:53.443659 | 2020-07-13T23:48:50 | 2020-07-13T23:48:50 | 84,975,797 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,204 | py | import sys
import os
import h5py
import numpy as np
import corticalmapping.HighLevel as hl
import corticalmapping.core.FileTools as ft
import matplotlib.pyplot as plt
lam = 1. # 100.
plot_chunk_size = 5000
def plot_traces_chunks(traces, labels, chunk_size, roi_ind):
"""
:param traces: np.array, shape=[trace_type, t_num]
:param labels:
:param chunk_size:
:param figures_folder:
:param roi_ind:
:return:
"""
t_num = traces.shape[1]
chunk_num = t_num // chunk_size
chunks = []
for chunk_ind in range(chunk_num):
chunks.append([chunk_ind * chunk_size, (chunk_ind + 1) * chunk_size])
if t_num % chunk_size != 0:
chunks.append([chunk_num * chunk_size, t_num])
v_max = np.amax(traces)
v_min = np.amin(traces)
fig = plt.figure(figsize=(75, 20))
fig.suptitle('neuropil subtraction for ROI: {}'.format(roi_ind))
for chunk_ind, chunk in enumerate(chunks):
curr_ax = fig.add_subplot(len(chunks), 1, chunk_ind + 1)
for trace_ind in range(traces.shape[0]):
curr_ax.plot(traces[trace_ind, chunk[0]: chunk[1]], label=labels[trace_ind])
curr_ax.set_xlim([0, chunk_size])
curr_ax.set_ylim([v_min, v_max * 1.2])
curr_ax.legend()
return fig
curr_folder = os.path.dirname(os.path.realpath(__file__))
os.chdir(curr_folder)
data_f = h5py.File('rois_and_traces.hdf5')
traces_raw = data_f['traces_center_raw'].value
traces_srround = data_f['traces_surround_raw'].value
traces_subtracted = np.zeros(traces_raw.shape, np.float32)
ratio = np.zeros(traces_raw.shape[0], np.float32)
err = np.zeros(traces_raw.shape[0], np.float32)
for i in range(traces_raw.shape[0]):
curr_trace_c = traces_raw[i]
curr_trace_s = traces_srround[i]
curr_r, curr_err, curr_trace_sub = hl.neural_pil_subtraction(curr_trace_c, curr_trace_s, lam=lam)
print "roi_%s \tr = %.4f; error = %.4f." % (ft.int2str(i, 5), curr_r, curr_err)
traces_subtracted[i] = curr_trace_sub
ratio[i] = curr_r
err[i] = curr_err
print('\nplotting neuropil subtraction results ...')
figures_folder = 'figures/neuropil_subtraction_lam_{}'.format(lam)
if not os.path.isdir(figures_folder):
os.makedirs(figures_folder)
for roi_ind in range(traces_raw.shape[0]):
print('roi_{:04d}'.format(roi_ind))
curr_traces = np.array([traces_raw[roi_ind], traces_srround[roi_ind], traces_subtracted[roi_ind]])
curr_fig = plot_traces_chunks(traces=curr_traces,
labels=['center', 'surround', 'subtracted'],
chunk_size=plot_chunk_size,
roi_ind=roi_ind)
curr_fig.savefig(os.path.join(figures_folder, 'neuropil_subtraction_ROI_{:04d}.png'.format(roi_ind)))
curr_fig.clear()
plt.close(curr_fig)
# wait for keyboard abortion
msg = raw_input('Do you want to save? (y/n)\n')
while True:
if msg == 'y':
break
elif msg == 'n':
sys.exit('Stop process without saving.')
else:
msg = raw_input('Do you want to save? (y/n)\n')
data_f['traces_center_subtracted'] = traces_subtracted
data_f['neuropil_r'] = ratio
data_f['neuropil_err'] = err
data_f.close() | [
"junz@alleninstitute.org"
] | junz@alleninstitute.org |
cba1f174f5c9d6f085d1d2144a177cac7730448a | 7034f75d6047be61cbd07a2854304a611ef53cee | /linear_regression/LinearReg/linear_reg.py | 460e393171ae3306923e707a526695b1447fad0c | [] | no_license | cjackie/machine_learning | c72e0fc8201bf74755e50c3f0b9933b863dc4195 | c78f8de693043eaf247399d0cb95c782329e2fd7 | refs/heads/master | 2021-01-01T16:21:11.875039 | 2016-12-27T03:50:57 | 2016-12-27T03:50:57 | 28,286,760 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,653 | py | import numpy as np
import matplotlib.pyplot as plt
class LinReg:
"""
self.names: an array of names for each feature
self.theta: np.matrix of linear coefficients, a vector
"""
def __init__(self, data_path):
names, y, X = self.parse(data_path)
X = self.__transform(X)
self.names = names
try:
self.theta = (X.T*X).I*X.T*y
except np.linalg.linalg.LinAlgError as e:
print("no psudoinverse of the training data matrix, exiting")
exit()
self.__plot(X, y)
"""
parse the csv file and construct matrix X and vector y
@data_path: string, path to the csv file
@return: (names ,y, X), y is the vector of results for each training set
X is the matrix for all training set. Each row is a training
set. names is an array of label for data
"""
def parse(self,data_path):
f = open(data_path)
line = f.readline()
names = line[:-1].split(",")
X, y = [], []
for line in f:
row = [float(a) for a in line.split(",")]
y.append([row.pop(0)])
X.append([1]+row) #include the intercept term
return names, np.matrix(y), np.matrix(X)
"""
predict the value
@x: 1d-array-like, a value of feature vector
@return: int, a numerical result predicted by the model
"""
def predict(self,x):
x_vector = self.__transform_v(np.matrix(x).T)
if len(x) != len(self.theta):
print("data len is inconsistent")
return ""
return (self.theta.T*x_vector).item(0,0)
"""
plot the resulting model for the training data
@y: the vector of results for each training set
@X: the matrix for all training set. Each row is a training
set.
"""
def __plot(self,X,y):
dimension = len(self.theta)-1
if dimension == 1:
return
#TODO...
elif dimension == 2:
return
#TODO..... use mplot3d?
"""
tranform the training data. This function can be modified case by
case to make a better model.
@X: np.matrix, input data
@return: np.matrix, the data after the transformation
"""
def __transform(self,X):
#customizable
return np.matrix([self.__transform_v(x) for x in X.tolist()])
"""
tranform a input vector.
@x: np.matrix, a row, 1*n dimension
@return: np.matrix, the data after the transformation
"""
def __transform_v(self,x):
#customizable
return x
| [
"chaojie.keepgoing@gmail.com"
] | chaojie.keepgoing@gmail.com |
9b78264da22d980cf740f09fc3b9061cb67d48b2 | 5ae4d7a8d395feb3db091e978a12b645373fd0f4 | /project/com/controller/__init__.py | c8e71226661d4370e040a87a5a1e4771c8c33d87 | [] | no_license | Achyut4/intelligence_emp_engagement | e6b994b52ea29fba2b989c8ed0bfec00542b9e95 | 36deaa85f8e5fccb008441efe12d62d3d1964e7f | refs/heads/master | 2020-04-25T17:05:07.248007 | 2019-04-20T18:05:30 | 2019-04-20T18:05:30 | 172,934,146 | 0 | 0 | null | 2019-04-20T18:07:17 | 2019-02-27T14:50:34 | CSS | UTF-8 | Python | false | false | 563 | py | import project.com.controller.RegisterController
import project.com.controller.LoginController
import project.com.controller.DepartmentController
import project.com.controller.RoleController
import project.com.controller.DatasetController
import project.com.controller.StaffController
import project.com.controller.ComplainController
import project.com.controller.FeedbackController
import project.com.controller.AttendanceController
import project.com.controller.ReportController
import project.com.controller.TrackingController
| [
"noreply@github.com"
] | Achyut4.noreply@github.com |
bf50004145bd6d307ec066d1ad0794c4877ad04b | 849f05421d6becc6c9da70cb077dc356c3b4af0b | /addphoto/migrations/0002_auto_20200301_1602.py | 1aa115012e6672cc5efaab5d54635095ea376dff | [] | no_license | microStationCorp/faceshot | 63d632ff07b71c24b65577c926a28beb0e6ebd89 | 451e1a19f56a0da84f6290b2d6d15c0d8e60cb92 | refs/heads/master | 2021-02-06T20:08:35.427105 | 2020-03-03T07:16:25 | 2020-03-03T07:16:25 | 243,944,888 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | # Generated by Django 3.0.3 on 2020-03-01 10:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('addphoto', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='uploadedphoto',
name='image',
field=models.ImageField(max_length=1, upload_to='get_image_path'),
),
]
| [
"sujanmondal916@gmail.com"
] | sujanmondal916@gmail.com |
7d503436d2d772f337fa170b88ce13e1e6d851f4 | d87483a2c0b50ed97c1515d49d62c6e9feaddbe0 | /.history/buy_top_fc_smart_20210204001749.py | e28c6f69964eb4311396f03581510b45098e4b0e | [
"MIT"
] | permissive | HopperKremer/hoptrader | 0d36b6e33922414003cf689fb81f924da076a54b | 406793c10bc888648290fd15c7c2af62cf8c6c67 | refs/heads/main | 2023-06-12T15:51:00.910310 | 2021-07-06T16:15:41 | 2021-07-06T16:15:41 | 334,754,936 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,730 | py | # Buy top tickers from Financhill
import requests
from tda import auth, client
from tda.orders.equities import equity_buy_market, equity_buy_limit
from tda.orders.common import Duration, Session
import tda
import os, sys
import time
from selenium import webdriver
import json
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
import config # stored in parent directory for security
token_path = "token"
DRIVER_PATH = "/home/hopper/chromedriver"
driver = webdriver.Chrome(DRIVER_PATH)
redirect_uri = "https://localhost"
try:
c = auth.client_from_token_file(token_path, config.api_key)
except FileNotFoundError:
c = auth.client_from_login_flow(driver, config.api_key, redirect_uri, token_path)
# All this scraping code works
driver.get("https://financhill.com/screen/stock-score")
time.sleep(2)
driver.find_element_by_css_selector(
'span[data-sort-name="stock_score_normalized"]'
).click()
time.sleep(2)
tickers = driver.find_elements_by_tag_name("td")
positions = c.get_account(config.tda_acct_num, c.Account.Fields.POSITIONS)
print(positions)
# i = 0
# [0]:Ticker, [1]:Share Price, [2]:Rating, [3]:Score, [4]:Rating Change Date, [5]:Price Change %
# while i < 40:
# print(len(tickers))
# ticker = str(tickers[i].text)
# print(ticker)
# share_price = float(tickers[i + 1].text)
# # How many dollars of each stock to buy:
# desired_dollar_amount = 1000
# num_shares = round(desired_dollar_amount / share_price)
# print(num_shares)
# order = equity_buy_market(ticker, 1)
# r = c.place_order(config.tda_acct_num, order)
# time.sleep(2)
# print(r.status_code)
# i += 10
driver.quit() | [
"hopperkremer@gmail.com"
] | hopperkremer@gmail.com |
2daad19369a486f1df1d13c62a3551ce498e6cfe | a17c00f7ff2481cc08cd75d90a17d38cded8fde6 | /16.py | 7e84fff6a574c27cd7e4ae28439204142c178618 | [] | no_license | robinspollak/ProjectEuler | c5013b9676d3e0a976f1f29fffbc5e48755aac26 | 41298d11ec3587c261e5c9c93494829e9b13716b | refs/heads/master | 2021-01-21T13:25:46.952417 | 2016-05-31T21:33:04 | 2016-05-31T21:33:04 | 44,571,945 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 106 | py | bignumber = 2**1000
list_of_digits = map(lambda x:int(x),list(str(bignumber)))
print sum(list_of_digits)
| [
"rpollak96@gmail.com"
] | rpollak96@gmail.com |
027bb69c50ae03f62d4973b82b01570ef3170e9b | 9892312f5543eafffbd86a084daf90c8b4628a59 | /DataAnalyticsWithPython-Training/student_files/ch05_more_pandas/03_imputing.py | bbc64526e612666f822e0943770906f5eca04588 | [] | no_license | jigarshah2811/Data_Analytics_ML | 1718a79f8f569a4946b56cc499b17546beb9c67d | 107197cfd3e258c1a73c6930951463392159c3ed | refs/heads/master | 2022-01-19T19:42:14.746580 | 2019-07-21T20:21:07 | 2019-07-21T20:21:07 | 197,888,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,226 | py | """
03_imputing.py
Use fillna() to impute missing values.
Use drop() to remove columns.
Each of these is shown below...
"""
import pandas as pd
sat_temps = pd.DataFrame(data=[(78, 50), (82, 52), (83, 53)],
index=['Colorado Springs', 'Canon City', 'Pueblo'],
columns=['High', 'Low'])
sat_humidity = pd.DataFrame([22, 18, 19, 25],
index=['Colorado Springs', 'Canon City', 'Pueblo', 'Denver'],
columns=['% Hum'])
temps_df = pd.concat([sat_temps, sat_humidity], axis=1)
print(temps_df)
# ------------------------------
# Imputing values...
temps_df['High'].fillna(value=80, inplace=True)
temps_df['Low'].fillna(52, inplace=True)
print(temps_df)
# ------------------------------
# Copying a DataFrame...
temps_df2 = temps_df.copy(deep=True)
temps_df3 = temps_df.copy() # deep=True is the default
# ---------------------------------
# Removing one or more columns...
temps_df2.drop(['Low', '% Hum'], axis=1, inplace=True)
print(temps_df2)
temps_df3.drop(temps_df3.columns[[1, 2]], axis=1, inplace=True)
print(temps_df3)
| [
"jigasha2@cisco.com"
] | jigasha2@cisco.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.