hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c3b8c1a040c940ae6e79c7eb41f1a4ba4854214 | 16,712 | py | Python | src/Nvidia-StyleGAN/results/00013-sgan-custom_dataset-1gpu/src/train.py | vignesh-pagadala/phishing-detection-gan | 193154f71e7abb37314cbf4aba580062fa87a672 | [
"Apache-2.0"
] | 1 | 2022-03-09T07:40:31.000Z | 2022-03-09T07:40:31.000Z | src/Nvidia-StyleGAN/results/00014-sgan-custom_dataset-1gpu/src/train.py | vignesh-pagadala/phishing-detection-gan | 193154f71e7abb37314cbf4aba580062fa87a672 | [
"Apache-2.0"
] | null | null | null | src/Nvidia-StyleGAN/results/00014-sgan-custom_dataset-1gpu/src/train.py | vignesh-pagadala/phishing-detection-gan | 193154f71e7abb37314cbf4aba580062fa87a672 | [
"Apache-2.0"
] | 2 | 2021-07-02T20:16:05.000Z | 2022-03-09T07:40:39.000Z | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Main entry point for training StyleGAN and ProGAN networks."""
import copy
import dnnlib
from dnnlib import EasyDict
import config
from metrics import metric_base
#----------------------------------------------------------------------------
# Official training configs for StyleGAN, targeted mainly for FFHQ.
if 1:
desc = 'sgan' # Description string included in result subdir name.
train = EasyDict(run_func_name='training.training_loop.training_loop') # Options for training loop.
G = EasyDict(func_name='training.networks_stylegan.G_style') # Options for generator network.
D = EasyDict(func_name='training.networks_stylegan.D_basic') # Options for discriminator network.
G_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for generator optimizer.
D_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for discriminator optimizer.
G_loss = EasyDict(func_name='training.loss.G_logistic_nonsaturating') # Options for generator loss.
D_loss = EasyDict(func_name='training.loss.D_logistic_simplegp', r1_gamma=10.0) # Options for discriminator loss.
dataset = EasyDict() # Options for load_dataset().
sched = EasyDict() # Options for TrainingSchedule.
grid = EasyDict(size='4k', layout='random') # Options for setup_snapshot_image_grid().
metrics = [metric_base.fid50k] # Options for MetricGroup.
submit_config = dnnlib.SubmitConfig() # Options for dnnlib.submit_run().
tf_config = {'rnd.np_random_seed': 1000} # Options for tflib.init_tf().
# Dataset.
desc += '-custom_dataset'; dataset = EasyDict(tfrecord_dir='custom_dataset', resolution=128); train.mirror_augment = False
#desc += '-ffhq'; dataset = EasyDict(tfrecord_dir='ffhq'); train.mirror_augment = True
#desc += '-ffhq512'; dataset = EasyDict(tfrecord_dir='ffhq', resolution=512); train.mirror_augment = True
#desc += '-ffhq256'; dataset = EasyDict(tfrecord_dir='ffhq', resolution=256); train.mirror_augment = True
#desc += '-celebahq'; dataset = EasyDict(tfrecord_dir='celebahq'); train.mirror_augment = True
#desc += '-bedroom'; dataset = EasyDict(tfrecord_dir='lsun-bedroom-full'); train.mirror_augment = False
#desc += '-car'; dataset = EasyDict(tfrecord_dir='lsun-car-512x384'); train.mirror_augment = False
#desc += '-cat'; dataset = EasyDict(tfrecord_dir='lsun-cat-full'); train.mirror_augment = False
# Number of GPUs.
desc += '-1gpu'; submit_config.num_gpus = 1; sched.minibatch_base = 4; sched.minibatch_dict = {4: 128, 8: 128, 16: 128, 32: 64, 64: 32, 128: 16, 256: 8, 512: 4}
#desc += '-2gpu'; submit_config.num_gpus = 2; sched.minibatch_base = 8; sched.minibatch_dict = {4: 256, 8: 256, 16: 128, 32: 64, 64: 32, 128: 16, 256: 8}
#desc += '-4gpu'; submit_config.num_gpus = 4; sched.minibatch_base = 16; sched.minibatch_dict = {4: 512, 8: 256, 16: 128, 32: 64, 64: 32, 128: 16}
#desc += '-8gpu'; submit_config.num_gpus = 8; sched.minibatch_base = 32; sched.minibatch_dict = {4: 512, 8: 256, 16: 128, 32: 64, 64: 32}
# Default options.
train.total_kimg = 25000
sched.lod_initial_resolution = 8
sched.G_lrate_dict = {128: 0.0015, 256: 0.002, 512: 0.003, 1024: 0.003}
sched.D_lrate_dict = EasyDict(sched.G_lrate_dict)
# WGAN-GP loss for CelebA-HQ.
#desc += '-wgangp'; G_loss = EasyDict(func_name='training.loss.G_wgan'); D_loss = EasyDict(func_name='training.loss.D_wgan_gp'); sched.G_lrate_dict = {k: min(v, 0.002) for k, v in sched.G_lrate_dict.items()}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict)
# Table 1.
#desc += '-tuned-baseline'; G.use_styles = False; G.use_pixel_norm = True; G.use_instance_norm = False; G.mapping_layers = 0; G.truncation_psi = None; G.const_input_layer = False; G.style_mixing_prob = 0.0; G.use_noise = False
#desc += '-add-mapping-and-styles'; G.const_input_layer = False; G.style_mixing_prob = 0.0; G.use_noise = False
#desc += '-remove-traditional-input'; G.style_mixing_prob = 0.0; G.use_noise = False
#desc += '-add-noise-inputs'; G.style_mixing_prob = 0.0
#desc += '-mixing-regularization' # default
# Table 2.
#desc += '-mix0'; G.style_mixing_prob = 0.0
#desc += '-mix50'; G.style_mixing_prob = 0.5
#desc += '-mix90'; G.style_mixing_prob = 0.9 # default
#desc += '-mix100'; G.style_mixing_prob = 1.0
# Table 4.
#desc += '-traditional-0'; G.use_styles = False; G.use_pixel_norm = True; G.use_instance_norm = False; G.mapping_layers = 0; G.truncation_psi = None; G.const_input_layer = False; G.style_mixing_prob = 0.0; G.use_noise = False
#desc += '-traditional-8'; G.use_styles = False; G.use_pixel_norm = True; G.use_instance_norm = False; G.mapping_layers = 8; G.truncation_psi = None; G.const_input_layer = False; G.style_mixing_prob = 0.0; G.use_noise = False
#desc += '-stylebased-0'; G.mapping_layers = 0
#desc += '-stylebased-1'; G.mapping_layers = 1
#desc += '-stylebased-2'; G.mapping_layers = 2
#desc += '-stylebased-8'; G.mapping_layers = 8 # default
#----------------------------------------------------------------------------
# Official training configs for Progressive GAN, targeted mainly for CelebA-HQ.
if 0:
desc = 'pgan' # Description string included in result subdir name.
train = EasyDict(run_func_name='training.training_loop.training_loop') # Options for training loop.
G = EasyDict(func_name='training.networks_progan.G_paper') # Options for generator network.
D = EasyDict(func_name='training.networks_progan.D_paper') # Options for discriminator network.
G_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for generator optimizer.
D_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for discriminator optimizer.
G_loss = EasyDict(func_name='training.loss.G_wgan') # Options for generator loss.
D_loss = EasyDict(func_name='training.loss.D_wgan_gp') # Options for discriminator loss.
dataset = EasyDict() # Options for load_dataset().
sched = EasyDict() # Options for TrainingSchedule.
grid = EasyDict(size='1080p', layout='random') # Options for setup_snapshot_image_grid().
metrics = [metric_base.fid50k] # Options for MetricGroup.
submit_config = dnnlib.SubmitConfig() # Options for dnnlib.submit_run().
tf_config = {'rnd.np_random_seed': 1000} # Options for tflib.init_tf().
# Dataset (choose one).
desc += '-celebahq'; dataset = EasyDict(tfrecord_dir='celebahq'); train.mirror_augment = True
#desc += '-celeba'; dataset = EasyDict(tfrecord_dir='celeba'); train.mirror_augment = True
#desc += '-cifar10'; dataset = EasyDict(tfrecord_dir='cifar10')
#desc += '-cifar100'; dataset = EasyDict(tfrecord_dir='cifar100')
#desc += '-svhn'; dataset = EasyDict(tfrecord_dir='svhn')
#desc += '-mnist'; dataset = EasyDict(tfrecord_dir='mnist')
#desc += '-mnistrgb'; dataset = EasyDict(tfrecord_dir='mnistrgb')
#desc += '-syn1024rgb'; dataset = EasyDict(class_name='training.dataset.SyntheticDataset', resolution=1024, num_channels=3)
#desc += '-lsun-airplane'; dataset = EasyDict(tfrecord_dir='lsun-airplane-100k'); train.mirror_augment = True
#desc += '-lsun-bedroom'; dataset = EasyDict(tfrecord_dir='lsun-bedroom-100k'); train.mirror_augment = True
#desc += '-lsun-bicycle'; dataset = EasyDict(tfrecord_dir='lsun-bicycle-100k'); train.mirror_augment = True
#desc += '-lsun-bird'; dataset = EasyDict(tfrecord_dir='lsun-bird-100k'); train.mirror_augment = True
#desc += '-lsun-boat'; dataset = EasyDict(tfrecord_dir='lsun-boat-100k'); train.mirror_augment = True
#desc += '-lsun-bottle'; dataset = EasyDict(tfrecord_dir='lsun-bottle-100k'); train.mirror_augment = True
#desc += '-lsun-bridge'; dataset = EasyDict(tfrecord_dir='lsun-bridge-100k'); train.mirror_augment = True
#desc += '-lsun-bus'; dataset = EasyDict(tfrecord_dir='lsun-bus-100k'); train.mirror_augment = True
#desc += '-lsun-car'; dataset = EasyDict(tfrecord_dir='lsun-car-100k'); train.mirror_augment = True
#desc += '-lsun-cat'; dataset = EasyDict(tfrecord_dir='lsun-cat-100k'); train.mirror_augment = True
#desc += '-lsun-chair'; dataset = EasyDict(tfrecord_dir='lsun-chair-100k'); train.mirror_augment = True
#desc += '-lsun-churchoutdoor'; dataset = EasyDict(tfrecord_dir='lsun-churchoutdoor-100k'); train.mirror_augment = True
#desc += '-lsun-classroom'; dataset = EasyDict(tfrecord_dir='lsun-classroom-100k'); train.mirror_augment = True
#desc += '-lsun-conferenceroom'; dataset = EasyDict(tfrecord_dir='lsun-conferenceroom-100k'); train.mirror_augment = True
#desc += '-lsun-cow'; dataset = EasyDict(tfrecord_dir='lsun-cow-100k'); train.mirror_augment = True
#desc += '-lsun-diningroom'; dataset = EasyDict(tfrecord_dir='lsun-diningroom-100k'); train.mirror_augment = True
#desc += '-lsun-diningtable'; dataset = EasyDict(tfrecord_dir='lsun-diningtable-100k'); train.mirror_augment = True
#desc += '-lsun-dog'; dataset = EasyDict(tfrecord_dir='lsun-dog-100k'); train.mirror_augment = True
#desc += '-lsun-horse'; dataset = EasyDict(tfrecord_dir='lsun-horse-100k'); train.mirror_augment = True
#desc += '-lsun-kitchen'; dataset = EasyDict(tfrecord_dir='lsun-kitchen-100k'); train.mirror_augment = True
#desc += '-lsun-livingroom'; dataset = EasyDict(tfrecord_dir='lsun-livingroom-100k'); train.mirror_augment = True
#desc += '-lsun-motorbike'; dataset = EasyDict(tfrecord_dir='lsun-motorbike-100k'); train.mirror_augment = True
#desc += '-lsun-person'; dataset = EasyDict(tfrecord_dir='lsun-person-100k'); train.mirror_augment = True
#desc += '-lsun-pottedplant'; dataset = EasyDict(tfrecord_dir='lsun-pottedplant-100k'); train.mirror_augment = True
#desc += '-lsun-restaurant'; dataset = EasyDict(tfrecord_dir='lsun-restaurant-100k'); train.mirror_augment = True
#desc += '-lsun-sheep'; dataset = EasyDict(tfrecord_dir='lsun-sheep-100k'); train.mirror_augment = True
#desc += '-lsun-sofa'; dataset = EasyDict(tfrecord_dir='lsun-sofa-100k'); train.mirror_augment = True
#desc += '-lsun-tower'; dataset = EasyDict(tfrecord_dir='lsun-tower-100k'); train.mirror_augment = True
#desc += '-lsun-train'; dataset = EasyDict(tfrecord_dir='lsun-train-100k'); train.mirror_augment = True
#desc += '-lsun-tvmonitor'; dataset = EasyDict(tfrecord_dir='lsun-tvmonitor-100k'); train.mirror_augment = True
# Conditioning & snapshot options.
#desc += '-cond'; dataset.max_label_size = 'full' # conditioned on full label
#desc += '-cond1'; dataset.max_label_size = 1 # conditioned on first component of the label
#desc += '-g4k'; grid.size = '4k'
#desc += '-grpc'; grid.layout = 'row_per_class'
# Config presets (choose one).
#desc += '-preset-v1-1gpu'; submit_config.num_gpus = 1; D.mbstd_group_size = 16; sched.minibatch_base = 16; sched.minibatch_dict = {256: 14, 512: 6, 1024: 3}; sched.lod_training_kimg = 800; sched.lod_transition_kimg = 800; train.total_kimg = 19000
desc += '-preset-v2-1gpu'; submit_config.num_gpus = 1; sched.minibatch_base = 4; sched.minibatch_dict = {4: 128, 8: 128, 16: 128, 32: 64, 64: 32, 128: 16, 256: 8, 512: 4}; sched.G_lrate_dict = {1024: 0.0015}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict); train.total_kimg = 12000
#desc += '-preset-v2-2gpus'; submit_config.num_gpus = 2; sched.minibatch_base = 8; sched.minibatch_dict = {4: 256, 8: 256, 16: 128, 32: 64, 64: 32, 128: 16, 256: 8}; sched.G_lrate_dict = {512: 0.0015, 1024: 0.002}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict); train.total_kimg = 12000
#desc += '-preset-v2-4gpus'; submit_config.num_gpus = 4; sched.minibatch_base = 16; sched.minibatch_dict = {4: 512, 8: 256, 16: 128, 32: 64, 64: 32, 128: 16}; sched.G_lrate_dict = {256: 0.0015, 512: 0.002, 1024: 0.003}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict); train.total_kimg = 12000
#desc += '-preset-v2-8gpus'; submit_config.num_gpus = 8; sched.minibatch_base = 32; sched.minibatch_dict = {4: 512, 8: 256, 16: 128, 32: 64, 64: 32}; sched.G_lrate_dict = {128: 0.0015, 256: 0.002, 512: 0.003, 1024: 0.003}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict); train.total_kimg = 12000
# Numerical precision (choose one).
desc += '-fp32'; sched.max_minibatch_per_gpu = {256: 16, 512: 8, 1024: 4}
#desc += '-fp16'; G.dtype = 'float16'; D.dtype = 'float16'; G.pixelnorm_epsilon=1e-4; G_opt.use_loss_scaling = True; D_opt.use_loss_scaling = True; sched.max_minibatch_per_gpu = {512: 16, 1024: 8}
# Disable individual features.
#desc += '-nogrowing'; sched.lod_initial_resolution = 1024; sched.lod_training_kimg = 0; sched.lod_transition_kimg = 0; train.total_kimg = 10000
#desc += '-nopixelnorm'; G.use_pixelnorm = False
#desc += '-nowscale'; G.use_wscale = False; D.use_wscale = False
#desc += '-noleakyrelu'; G.use_leakyrelu = False
#desc += '-nosmoothing'; train.G_smoothing_kimg = 0.0
#desc += '-norepeat'; train.minibatch_repeats = 1
#desc += '-noreset'; train.reset_opt_for_new_lod = False
# Special modes.
#desc += '-BENCHMARK'; sched.lod_initial_resolution = 4; sched.lod_training_kimg = 3; sched.lod_transition_kimg = 3; train.total_kimg = (8*2+1)*3; sched.tick_kimg_base = 1; sched.tick_kimg_dict = {}; train.image_snapshot_ticks = 1000; train.network_snapshot_ticks = 1000
#desc += '-BENCHMARK0'; sched.lod_initial_resolution = 1024; train.total_kimg = 10; sched.tick_kimg_base = 1; sched.tick_kimg_dict = {}; train.image_snapshot_ticks = 1000; train.network_snapshot_ticks = 1000
#desc += '-VERBOSE'; sched.tick_kimg_base = 1; sched.tick_kimg_dict = {}; train.image_snapshot_ticks = 1; train.network_snapshot_ticks = 100
#desc += '-GRAPH'; train.save_tf_graph = True
#desc += '-HIST'; train.save_weight_histograms = True
#----------------------------------------------------------------------------
# Main entry point for training.
# Calls the function indicated by 'train' using the selected options.
def main():
kwargs = EasyDict(train)
kwargs.update(G_args=G, D_args=D, G_opt_args=G_opt, D_opt_args=D_opt, G_loss_args=G_loss, D_loss_args=D_loss)
kwargs.update(dataset_args=dataset, sched_args=sched, grid_args=grid, metric_arg_list=metrics, tf_config=tf_config)
kwargs.submit_config = copy.deepcopy(submit_config)
kwargs.submit_config.run_dir_root = dnnlib.submission.submit.get_template_from_path(config.result_dir)
kwargs.submit_config.run_dir_ignore += config.run_dir_ignore
kwargs.submit_config.run_desc = desc
dnnlib.submit_run(**kwargs)
#----------------------------------------------------------------------------
if __name__ == "__main__":
main()
#----------------------------------------------------------------------------
| 86.14433 | 302 | 0.622487 |
import copy
import dnnlib
from dnnlib import EasyDict
import config
from metrics import metric_base
if 1:
desc = 'sgan'
train = EasyDict(run_func_name='training.training_loop.training_loop')
G = EasyDict(func_name='training.networks_stylegan.G_style')
D = EasyDict(func_name='training.networks_stylegan.D_basic')
G_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8)
D_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8)
G_loss = EasyDict(func_name='training.loss.G_logistic_nonsaturating')
D_loss = EasyDict(func_name='training.loss.D_logistic_simplegp', r1_gamma=10.0)
dataset = EasyDict()
sched = EasyDict()
grid = EasyDict(size='4k', layout='random')
metrics = [metric_base.fid50k]
submit_config = dnnlib.SubmitConfig()
tf_config = {'rnd.np_random_seed': 1000}
desc += '-custom_dataset'; dataset = EasyDict(tfrecord_dir='custom_dataset', resolution=128); train.mirror_augment = False
desc += '-1gpu'; submit_config.num_gpus = 1; sched.minibatch_base = 4; sched.minibatch_dict = {4: 128, 8: 128, 16: 128, 32: 64, 64: 32, 128: 16, 256: 8, 512: 4}
train.total_kimg = 25000
sched.lod_initial_resolution = 8
sched.G_lrate_dict = {128: 0.0015, 256: 0.002, 512: 0.003, 1024: 0.003}
sched.D_lrate_dict = EasyDict(sched.G_lrate_dict)
:
desc = 'pgan'
train = EasyDict(run_func_name='training.training_loop.training_loop')
G = EasyDict(func_name='training.networks_progan.G_paper')
D = EasyDict(func_name='training.networks_progan.D_paper')
G_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8)
D_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8)
G_loss = EasyDict(func_name='training.loss.G_wgan')
D_loss = EasyDict(func_name='training.loss.D_wgan_gp')
dataset = EasyDict()
sched = EasyDict()
grid = EasyDict(size='1080p', layout='random')
metrics = [metric_base.fid50k]
submit_config = dnnlib.SubmitConfig()
tf_config = {'rnd.np_random_seed': 1000}
desc += '-celebahq'; dataset = EasyDict(tfrecord_dir='celebahq'); train.mirror_augment = True
num_gpus = 1; sched.minibatch_base = 4; sched.minibatch_dict = {4: 128, 8: 128, 16: 128, 32: 64, 64: 32, 128: 16, 256: 8, 512: 4}; sched.G_lrate_dict = {1024: 0.0015}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict); train.total_kimg = 12000
desc += '-fp32'; sched.max_minibatch_per_gpu = {256: 16, 512: 8, 1024: 4}
def main():
kwargs = EasyDict(train)
kwargs.update(G_args=G, D_args=D, G_opt_args=G_opt, D_opt_args=D_opt, G_loss_args=G_loss, D_loss_args=D_loss)
kwargs.update(dataset_args=dataset, sched_args=sched, grid_args=grid, metric_arg_list=metrics, tf_config=tf_config)
kwargs.submit_config = copy.deepcopy(submit_config)
kwargs.submit_config.run_dir_root = dnnlib.submission.submit.get_template_from_path(config.result_dir)
kwargs.submit_config.run_dir_ignore += config.run_dir_ignore
kwargs.submit_config.run_desc = desc
dnnlib.submit_run(**kwargs)
if __name__ == "__main__":
main()
| true | true |
1c3b8ca5afcb581835c8a7ef7d06ea1ed3df160d | 539 | py | Python | cgbeacon2/constants/__init__.py | Clinical-Genomics/cgbeacon2 | 968771c80d65b265141a0a67c6c58c565536fa49 | [
"MIT"
] | null | null | null | cgbeacon2/constants/__init__.py | Clinical-Genomics/cgbeacon2 | 968771c80d65b265141a0a67c6c58c565536fa49 | [
"MIT"
] | 169 | 2020-04-08T08:34:17.000Z | 2022-03-31T12:25:30.000Z | cgbeacon2/constants/__init__.py | Clinical-Genomics/cgbeacon2 | 968771c80d65b265141a0a67c6c58c565536fa49 | [
"MIT"
] | 1 | 2020-10-26T15:50:56.000Z | 2020-10-26T15:50:56.000Z | from .consent_codes import CONSENT_CODES
from .variant_constants import CHROMOSOMES
from .query_errors import (
NO_MANDATORY_PARAMS,
NO_SECONDARY_PARAMS,
NO_POSITION_PARAMS,
INVALID_COORDINATES,
BUILD_MISMATCH,
)
from .oauth_errors import (
MISSING_PUBLIC_KEY,
MISSING_TOKEN_CLAIMS,
INVALID_TOKEN_CLAIMS,
EXPIRED_TOKEN_SIGNATURE,
INVALID_TOKEN_AUTH,
NO_GA4GH_USERDATA,
PASSPORTS_ERROR,
)
from .request_errors import MISSING_TOKEN, WRONG_SCHEME
from .response_objs import QUERY_PARAMS_API_V1
| 25.666667 | 55 | 0.795918 | from .consent_codes import CONSENT_CODES
from .variant_constants import CHROMOSOMES
from .query_errors import (
NO_MANDATORY_PARAMS,
NO_SECONDARY_PARAMS,
NO_POSITION_PARAMS,
INVALID_COORDINATES,
BUILD_MISMATCH,
)
from .oauth_errors import (
MISSING_PUBLIC_KEY,
MISSING_TOKEN_CLAIMS,
INVALID_TOKEN_CLAIMS,
EXPIRED_TOKEN_SIGNATURE,
INVALID_TOKEN_AUTH,
NO_GA4GH_USERDATA,
PASSPORTS_ERROR,
)
from .request_errors import MISSING_TOKEN, WRONG_SCHEME
from .response_objs import QUERY_PARAMS_API_V1
| true | true |
1c3b8d49c184ec6f47976fcb0921813004ec198e | 5,294 | py | Python | ethereum_private/init_chain_admin.py | alesiong/dpass-pc-backend | b787621133eb4d250380bfd394e8e6a5f1292640 | [
"MIT"
] | null | null | null | ethereum_private/init_chain_admin.py | alesiong/dpass-pc-backend | b787621133eb4d250380bfd394e8e6a5f1292640 | [
"MIT"
] | null | null | null | ethereum_private/init_chain_admin.py | alesiong/dpass-pc-backend | b787621133eb4d250380bfd394e8e6a5f1292640 | [
"MIT"
] | null | null | null | """
Init the ethereum private chain. This script should be run only once for each unique private chain.
This script is for administrator, as it will deploy the factory contract to the chain.
Chain initialization for end user is run when the server starts.
You need to have `geth` in PATH.
This script may not work on windows
If you need to compile the contracts, you need to also have `solc` in PATH and `solc` python package installed
"""
import argparse
import getpass
import json
import os
import subprocess
from pathlib import Path
import time
from web3 import Web3, IPCProvider
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def create_storage_factory(web3, account, abi, bytecode):
storage_factory = web3.eth.contract(abi=abi, bytecode=bytecode)
transaction_hash = storage_factory.deploy(transaction={'from': account})
while True:
time.sleep(0.1)
receipt = web3.eth.getTransactionReceipt(transaction_hash)
if receipt:
return receipt['contractAddress']
def compile_sol(filename, contract):
from solc import compile_files
compiled_contract = compile_files([filename])
compiled_contract = compiled_contract['%s:%s' % (filename, contract)]
return compiled_contract['abi'], compiled_contract['bin']
if __name__ == '__main__':
cwd = Path('.')
parser = argparse.ArgumentParser(
description='Admin tools to generate a new ethereum private chain and deploy the storage factory contract')
parser.add_argument('--compile', action='store_true', help='Use `solc` to compile the storage.sol')
args = parser.parse_args()
print('This tool is for administrator to generate a new ethereum private chain and then deploy'
' the storage factory contract, type YES if you want to continue:', end='')
inp = input()
if inp != 'YES':
exit()
if os.path.exists('data'):
print(bcolors.FAIL + 'A private chain exists, exiting...' + bcolors.ENDC)
exit()
if not (os.path.exists('genesis.json.template') and os.path.exists('static-nodes.json') and os.path.exists('contracts')):
print(bcolors.FAIL + 'genesis.json.template, static-nodes.json, and contracts/ should be in this folder'
+ bcolors.ENDC)
exit()
print(bcolors.OKGREEN + 'Creating the initial account, please remember the password' + bcolors.ENDC)
os.system('geth account new --datadir . --keystore .')
key_file_name = list(cwd.glob('UTC--*'))[0].name
init_address = key_file_name[key_file_name.rfind('--') + 2:]
inp = input('Type YES to confirm the initial account address ' + init_address + ':')
if inp != 'YES':
os.remove(key_file_name)
exit()
with open('tmp.json', 'w') as f:
print(open('./genesis.json.template').read().replace('<initial account>', init_address), file=f)
print('Initiating the private blockchain, will assign 10K ETH to the initial account')
os.system('geth --datadir ./data/ init ./tmp.json')
os.rename(key_file_name, './data/keystore/' + key_file_name)
os.remove('tmp.json')
print('Linking the static-nodes.json')
os.symlink('../../static-nodes.json', './data/geth/static-nodes.json')
print(bcolors.OKGREEN + 'Please edit the static-nodes.json to add more static peers later.' + bcolors.ENDC)
print('Start the geth process')
geth = subprocess.Popen(['geth',
'--datadir',
'./data/',
'--ethash.dagdir',
'./data/ethash',
'--networkid',
'1042',
'--targetgaslimit',
'4000000'
],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
# wait for geth to start
time.sleep(5)
try:
web3 = Web3(IPCProvider('./data/geth.ipc'))
web3.miner.start(1)
init_address = '0x' + init_address
password = getpass.getpass('Please input the password for the initial account:')
web3.personal.unlockAccount(init_address, password)
print('Deploying storage factory contract...')
print('Also start mining, it will take a lot of time to generate DAG (~3 min)...')
if args.compile:
storage_factory_abi, storage_factory_bin = compile_sol('./contracts/storage.sol', 'StorageFactory')
json.dump(storage_factory_abi, open('contracts/storage_factory.abi.json', 'w'))
json.dump(storage_factory_bin, open('contracts/storage_factory.bin', 'w'))
else:
storage_factory_abi = json.load(open('contracts/storage_factory.abi.json'))
storage_factory_bin = open('contracts/storage_factory.bin').read().strip()
factory_address = create_storage_factory(web3, init_address, storage_factory_abi, storage_factory_bin)
print(bcolors.OKGREEN + 'The storage factory address is ' + factory_address + bcolors.ENDC)
finally:
geth.terminate()
| 41.359375 | 125 | 0.635436 | import argparse
import getpass
import json
import os
import subprocess
from pathlib import Path
import time
from web3 import Web3, IPCProvider
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def create_storage_factory(web3, account, abi, bytecode):
storage_factory = web3.eth.contract(abi=abi, bytecode=bytecode)
transaction_hash = storage_factory.deploy(transaction={'from': account})
while True:
time.sleep(0.1)
receipt = web3.eth.getTransactionReceipt(transaction_hash)
if receipt:
return receipt['contractAddress']
def compile_sol(filename, contract):
from solc import compile_files
compiled_contract = compile_files([filename])
compiled_contract = compiled_contract['%s:%s' % (filename, contract)]
return compiled_contract['abi'], compiled_contract['bin']
if __name__ == '__main__':
cwd = Path('.')
parser = argparse.ArgumentParser(
description='Admin tools to generate a new ethereum private chain and deploy the storage factory contract')
parser.add_argument('--compile', action='store_true', help='Use `solc` to compile the storage.sol')
args = parser.parse_args()
print('This tool is for administrator to generate a new ethereum private chain and then deploy'
' the storage factory contract, type YES if you want to continue:', end='')
inp = input()
if inp != 'YES':
exit()
if os.path.exists('data'):
print(bcolors.FAIL + 'A private chain exists, exiting...' + bcolors.ENDC)
exit()
if not (os.path.exists('genesis.json.template') and os.path.exists('static-nodes.json') and os.path.exists('contracts')):
print(bcolors.FAIL + 'genesis.json.template, static-nodes.json, and contracts/ should be in this folder'
+ bcolors.ENDC)
exit()
print(bcolors.OKGREEN + 'Creating the initial account, please remember the password' + bcolors.ENDC)
os.system('geth account new --datadir . --keystore .')
key_file_name = list(cwd.glob('UTC--*'))[0].name
init_address = key_file_name[key_file_name.rfind('--') + 2:]
inp = input('Type YES to confirm the initial account address ' + init_address + ':')
if inp != 'YES':
os.remove(key_file_name)
exit()
with open('tmp.json', 'w') as f:
print(open('./genesis.json.template').read().replace('<initial account>', init_address), file=f)
print('Initiating the private blockchain, will assign 10K ETH to the initial account')
os.system('geth --datadir ./data/ init ./tmp.json')
os.rename(key_file_name, './data/keystore/' + key_file_name)
os.remove('tmp.json')
print('Linking the static-nodes.json')
os.symlink('../../static-nodes.json', './data/geth/static-nodes.json')
print(bcolors.OKGREEN + 'Please edit the static-nodes.json to add more static peers later.' + bcolors.ENDC)
print('Start the geth process')
geth = subprocess.Popen(['geth',
'--datadir',
'./data/',
'--ethash.dagdir',
'./data/ethash',
'--networkid',
'1042',
'--targetgaslimit',
'4000000'
],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
time.sleep(5)
try:
web3 = Web3(IPCProvider('./data/geth.ipc'))
web3.miner.start(1)
init_address = '0x' + init_address
password = getpass.getpass('Please input the password for the initial account:')
web3.personal.unlockAccount(init_address, password)
print('Deploying storage factory contract...')
print('Also start mining, it will take a lot of time to generate DAG (~3 min)...')
if args.compile:
storage_factory_abi, storage_factory_bin = compile_sol('./contracts/storage.sol', 'StorageFactory')
json.dump(storage_factory_abi, open('contracts/storage_factory.abi.json', 'w'))
json.dump(storage_factory_bin, open('contracts/storage_factory.bin', 'w'))
else:
storage_factory_abi = json.load(open('contracts/storage_factory.abi.json'))
storage_factory_bin = open('contracts/storage_factory.bin').read().strip()
factory_address = create_storage_factory(web3, init_address, storage_factory_abi, storage_factory_bin)
print(bcolors.OKGREEN + 'The storage factory address is ' + factory_address + bcolors.ENDC)
finally:
geth.terminate()
| true | true |
1c3b8e5da1c72d325d541aeb4ff0e38fc1ef4c49 | 1,278 | py | Python | PythonBaseDemo/fileIOdemo/12.7/test.py | CypHelp/TestNewWorldDemo | ee6f73df05756f191c1c56250fa290461fdd1b9a | [
"Apache-2.0"
] | null | null | null | PythonBaseDemo/fileIOdemo/12.7/test.py | CypHelp/TestNewWorldDemo | ee6f73df05756f191c1c56250fa290461fdd1b9a | [
"Apache-2.0"
] | null | null | null | PythonBaseDemo/fileIOdemo/12.7/test.py | CypHelp/TestNewWorldDemo | ee6f73df05756f191c1c56250fa290461fdd1b9a | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#########################################################################
# 网站: <a href="http://www.crazyit.org">疯狂Java联盟</a> #
# author yeeku.H.lee kongyeeku@163.com #
# #
# version 1.0 #
# #
# Copyright (C), 2001-2018, yeeku.H.Lee #
# #
# This program is protected by copyright laws. #
# #
# Program Name: #
# #
# <br>Date: #
#########################################################################
import os, sys
# 打开文件
fd = os.open("f1.txt",os.O_RDWR|os.O_CREAT)
print(fd)
## 写入字符串
#ret = os.write(fd,"This is runoob.com site")
#
## 输入返回值
#print "写入的位数为: "
#print ret
#
#print "写入成功"
#
## 关闭文件
#os.close(fd)
#print "关闭文件成功!!" | 37.588235 | 73 | 0.241002 | true | true | |
1c3b8f17dd7de87e6422c0c380a155afbb757940 | 943 | py | Python | hippoapi/scripts/paramiko_transport.py | like-ycy/hippo_api | 708751a6bf9bfc69a4139dac90820f09e0ba042a | [
"MIT"
] | null | null | null | hippoapi/scripts/paramiko_transport.py | like-ycy/hippo_api | 708751a6bf9bfc69a4139dac90820f09e0ba042a | [
"MIT"
] | null | null | null | hippoapi/scripts/paramiko_transport.py | like-ycy/hippo_api | 708751a6bf9bfc69a4139dac90820f09e0ba042a | [
"MIT"
] | null | null | null | import paramiko
from paramiko.ssh_exception import AuthenticationException
"""
transport模式
"""
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
# 1、密码连接
ssh.connect(hostname='192.168.174.23', username='root', password='123.com')
# 2、秘钥连接
# pkey = RSAKey.from_private_key(StringIO(pkey))
# ssh.connect(hostname='192.168.174.23', username='root', pkey=pkey)
while True:
# 保存本次ssh的连接的回话状态
cli = ssh.get_transport().open_session()
# 设置回话超时时间
cli.settimeout(120)
command = input("请输入您要发送的指令:")
if command == "exit":
break
# 发送指令
cli.exec_command(command)
# 接受操作指令以后,远程主机返回的结果
stdout = cli.makefile("rb", -1)
# 读取结果并转换编码
content = stdout.read().decode()
print(content)
except AuthenticationException as e:
print(e.message)
finally:
# 关闭连接
ssh.close()
| 24.179487 | 79 | 0.630965 | import paramiko
from paramiko.ssh_exception import AuthenticationException
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(hostname='192.168.174.23', username='root', password='123.com')
while True:
cli = ssh.get_transport().open_session()
cli.settimeout(120)
command = input("请输入您要发送的指令:")
if command == "exit":
break
cli.exec_command(command)
stdout = cli.makefile("rb", -1)
content = stdout.read().decode()
print(content)
except AuthenticationException as e:
print(e.message)
finally:
ssh.close()
| true | true |
1c3b90aabb486e78ca185ec99144aefce7e93bab | 2,351 | py | Python | google/cloud/mediatranslation_v1beta1/services/speech_translation_service/transports/base.py | vam-google/python-media-translation | 6b7cf38268b8b42373db97d2f70d2089e4c57462 | [
"Apache-2.0"
] | null | null | null | google/cloud/mediatranslation_v1beta1/services/speech_translation_service/transports/base.py | vam-google/python-media-translation | 6b7cf38268b8b42373db97d2f70d2089e4c57462 | [
"Apache-2.0"
] | null | null | null | google/cloud/mediatranslation_v1beta1/services/speech_translation_service/transports/base.py | vam-google/python-media-translation | 6b7cf38268b8b42373db97d2f70d2089e4c57462 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
from google import auth
from google.auth import credentials # type: ignore
from google.cloud.mediatranslation_v1beta1.types import media_translation
class SpeechTranslationServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for SpeechTranslationService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
def __init__(
self,
*,
host: str = "mediatranslation.googleapis.com",
credentials: credentials.Credentials = None,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
@property
def streaming_translate_speech(
self
) -> typing.Callable[
[media_translation.StreamingTranslateSpeechRequest],
media_translation.StreamingTranslateSpeechResponse,
]:
raise NotImplementedError
__all__ = ("SpeechTranslationServiceTransport",)
| 32.652778 | 78 | 0.682263 |
import abc
import typing
from google import auth
from google.auth import credentials
from google.cloud.mediatranslation_v1beta1.types import media_translation
class SpeechTranslationServiceTransport(metaclass=abc.ABCMeta):
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
def __init__(
self,
*,
host: str = "mediatranslation.googleapis.com",
credentials: credentials.Credentials = None,
) -> None:
if ":" not in host:
host += ":443"
self._host = host
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
self._credentials = credentials
@property
def streaming_translate_speech(
self
) -> typing.Callable[
[media_translation.StreamingTranslateSpeechRequest],
media_translation.StreamingTranslateSpeechResponse,
]:
raise NotImplementedError
__all__ = ("SpeechTranslationServiceTransport",)
| true | true |
1c3b911492a60c6d7276ed012bdfeccdc209cf10 | 21,229 | py | Python | venv/Lib/site-packages/PyOpenGL-3.0.1/OpenGL/GL/images.py | temelkirci/Motion_Editor | a8b8d4c4d2dcc9be28385600f56066cef92a38ad | [
"MIT"
] | 1 | 2022-03-02T17:07:20.000Z | 2022-03-02T17:07:20.000Z | venv/Lib/site-packages/PyOpenGL-3.0.1/OpenGL/GL/images.py | temelkirci/RealTime_6DOF_Motion_Editor | a8b8d4c4d2dcc9be28385600f56066cef92a38ad | [
"MIT"
] | null | null | null | venv/Lib/site-packages/PyOpenGL-3.0.1/OpenGL/GL/images.py | temelkirci/RealTime_6DOF_Motion_Editor | a8b8d4c4d2dcc9be28385600f56066cef92a38ad | [
"MIT"
] | null | null | null | """Image-handling routines
### Unresolved:
Following methods are not yet resolved due to my not being sure how the
function should be wrapped:
glCompressedTexImage3D
glCompressedTexImage2D
glCompressedTexImage1D
glCompressedTexSubImage3D
glCompressedTexSubImage2D
glCompressedTexSubImage1D
"""
from OpenGL.raw import GL as simple
from OpenGL import images, arrays, wrapper, platform
import ctypes
def asInt( value ):
if isinstance( value, float ):
return int(round(value,0))
return value
## update the image tables with standard image types...
#images.FORMAT_BITS.update( {
# simple.GL_BITMAP : 1, # must be GL_UNSIGNED_BYTE
#
# simple.GL_RED : 8,
# simple.GL_GREEN : 8,
# simple.GL_BLUE : 8,
# simple.GL_ALPHA : 8,
# simple.GL_LUMINANCE : 8,
# simple.GL_LUMINANCE_ALPHA : 8,
# simple.GL_COLOR_INDEX : 8,
# simple.GL_STENCIL_INDEX : 8,
# simple.GL_DEPTH_COMPONENT : 8,
# simple.GL_RGB : 24,
# simple.GL_BGR : 24,
#
# simple.GL_RGBA : 32,
# simple.GL_BGRA : 32,
# simple.GL_ABGR_EXT : 32,
# simple.GL_CMYK_EXT : 32,
#
# simple.GL_CMYKA_EXT : 40,
#
# simple.GL_YCRCB_422_SGIX : 8, # must be GL_UNSIGNED_BYTE
# simple.GL_YCRCB_444_SGIX : 8, # must be GL_UNSIGNED_SHORT
#
# simple.GL_FORMAT_SUBSAMPLE_24_24_OML : 32, # must be GL_UNSIGNED_INT_10_10_10_2
# simple.GL_FORMAT_SUBSAMPLE_244_244_OML : 32, # must be GL_UNSIGNED_INT_10_10_10_2
#} )
images.COMPONENT_COUNTS.update( {
simple.GL_BITMAP : 1, # must be GL_UNSIGNED_BYTE
simple.GL_RED : 1,
simple.GL_GREEN : 1,
simple.GL_BLUE : 1,
simple.GL_ALPHA : 1,
simple.GL_LUMINANCE : 1,
simple.GL_LUMINANCE_ALPHA : 2,
simple.GL_COLOR_INDEX : 1,
simple.GL_STENCIL_INDEX : 1,
simple.GL_DEPTH_COMPONENT : 1,
simple.GL_RGB : 3,
simple.GL_BGR : 3,
simple.GL_RGBA : 4,
simple.GL_BGRA : 4,
simple.GL_ABGR_EXT : 4,
simple.GL_CMYK_EXT : 4,
simple.GL_CMYKA_EXT : 5,
simple.GL_YCRCB_422_SGIX : 1, # must be GL_UNSIGNED_BYTE
simple.GL_YCRCB_444_SGIX : 1, # must be GL_UNSIGNED_SHORT
simple.GL_FORMAT_SUBSAMPLE_24_24_OML : 1, # must be GL_UNSIGNED_INT_10_10_10_2
simple.GL_FORMAT_SUBSAMPLE_244_244_OML : 1, # must be GL_UNSIGNED_INT_10_10_10_2
} )
#images.TYPE_TO_BITS.update( {
# simple.GL_UNSIGNED_BYTE_3_3_2 : 8,
# simple.GL_UNSIGNED_BYTE_2_3_3_REV : 8,
# simple.GL_UNSIGNED_SHORT_4_4_4_4 : 16,
# simple.GL_UNSIGNED_SHORT_4_4_4_4_REV : 16,
# simple.GL_UNSIGNED_SHORT_5_5_5_1 : 16,
# simple.GL_UNSIGNED_SHORT_1_5_5_5_REV : 16,
# simple.GL_UNSIGNED_SHORT_5_6_5 : 16,
# simple.GL_UNSIGNED_SHORT_5_6_5_REV : 16,
# simple.GL_UNSIGNED_INT_8_8_8_8 : 32,
# simple.GL_UNSIGNED_INT_8_8_8_8_REV : 32,
# simple.GL_UNSIGNED_INT_10_10_10_2 : 32,
# simple.GL_UNSIGNED_INT_2_10_10_10_REV : 32,
# simple.GL_UNSIGNED_BYTE : ctypes.sizeof(simple.GLubyte) * 8,
# simple.GL_BYTE: ctypes.sizeof(simple.GLbyte) * 8,
# simple.GL_UNSIGNED_SHORT : ctypes.sizeof(simple.GLushort) * 8,
# simple.GL_SHORT : ctypes.sizeof(simple.GLshort) * 8,
# simple.GL_UNSIGNED_INT : ctypes.sizeof(simple.GLuint) * 8,
# simple.GL_INT : ctypes.sizeof(simple.GLint) * 8,
# simple.GL_FLOAT : ctypes.sizeof(simple.GLfloat) * 8,
# simple.GL_DOUBLE : ctypes.sizeof(simple.GLdouble) * 8,
#} )
images.TYPE_TO_ARRAYTYPE.update( {
simple.GL_UNSIGNED_BYTE_3_3_2 : simple.GL_UNSIGNED_BYTE,
simple.GL_UNSIGNED_BYTE_2_3_3_REV : simple.GL_UNSIGNED_BYTE,
simple.GL_UNSIGNED_SHORT_4_4_4_4 : simple.GL_UNSIGNED_SHORT,
simple.GL_UNSIGNED_SHORT_4_4_4_4_REV : simple.GL_UNSIGNED_SHORT,
simple.GL_UNSIGNED_SHORT_5_5_5_1 : simple.GL_UNSIGNED_SHORT,
simple.GL_UNSIGNED_SHORT_1_5_5_5_REV : simple.GL_UNSIGNED_SHORT,
simple.GL_UNSIGNED_SHORT_5_6_5 : simple.GL_UNSIGNED_SHORT,
simple.GL_UNSIGNED_SHORT_5_6_5_REV : simple.GL_UNSIGNED_SHORT,
simple.GL_UNSIGNED_INT_8_8_8_8 : simple.GL_UNSIGNED_INT,
simple.GL_UNSIGNED_INT_8_8_8_8_REV : simple.GL_UNSIGNED_INT,
simple.GL_UNSIGNED_INT_10_10_10_2 : simple.GL_UNSIGNED_INT,
simple.GL_UNSIGNED_INT_2_10_10_10_REV : simple.GL_UNSIGNED_INT,
simple.GL_UNSIGNED_BYTE : simple.GL_UNSIGNED_BYTE,
simple.GL_BYTE: simple.GL_BYTE,
simple.GL_UNSIGNED_SHORT : simple.GL_UNSIGNED_SHORT,
simple.GL_SHORT : simple.GL_SHORT,
simple.GL_UNSIGNED_INT : simple.GL_UNSIGNED_INT,
simple.GL_INT : simple.GL_INT,
simple.GL_FLOAT : simple.GL_FLOAT,
simple.GL_DOUBLE : simple.GL_DOUBLE,
simple.GL_BITMAP : simple.GL_UNSIGNED_BYTE,
} )
images.TIGHT_PACK_FORMATS.update({
simple.GL_UNSIGNED_BYTE_3_3_2 : 3,
simple.GL_UNSIGNED_BYTE_2_3_3_REV : 3,
simple.GL_UNSIGNED_SHORT_4_4_4_4 : 4,
simple.GL_UNSIGNED_SHORT_4_4_4_4_REV : 4,
simple.GL_UNSIGNED_SHORT_5_5_5_1 : 4,
simple.GL_UNSIGNED_SHORT_1_5_5_5_REV : 4,
simple.GL_UNSIGNED_SHORT_5_6_5 : 3,
simple.GL_UNSIGNED_SHORT_5_6_5_REV : 3,
simple.GL_UNSIGNED_INT_8_8_8_8 : 4,
simple.GL_UNSIGNED_INT_8_8_8_8_REV : 4,
simple.GL_UNSIGNED_INT_10_10_10_2 : 4,
simple.GL_UNSIGNED_INT_2_10_10_10_REV : 4,
simple.GL_BITMAP: 8, # single bits, 8 of them...
})
images.RANK_PACKINGS.update( {
4: [
(simple.glPixelStorei,simple.GL_PACK_SKIP_VOLUMES_SGIS, 0),
(simple.glPixelStorei,simple.GL_PACK_IMAGE_DEPTH_SGIS, 0),
(simple.glPixelStorei,simple.GL_PACK_ALIGNMENT, 1),
],
3: [
(simple.glPixelStorei,simple.GL_PACK_SKIP_IMAGES, 0),
(simple.glPixelStorei,simple.GL_PACK_IMAGE_HEIGHT, 0),
(simple.glPixelStorei,simple.GL_PACK_ALIGNMENT, 1),
],
2: [
(simple.glPixelStorei,simple.GL_PACK_ROW_LENGTH, 0),
(simple.glPixelStorei,simple.GL_PACK_SKIP_ROWS, 0),
(simple.glPixelStorei,simple.GL_PACK_ALIGNMENT, 1),
],
1: [
(simple.glPixelStorei,simple.GL_PACK_SKIP_PIXELS, 0),
(simple.glPixelStorei,simple.GL_PACK_ALIGNMENT, 1),
],
} )
__all__ = (
'glReadPixels',
'glReadPixelsb',
'glReadPixelsd',
'glReadPixelsf',
'glReadPixelsi',
'glReadPixelss',
'glReadPixelsub',
'glReadPixelsui',
'glReadPixelsus',
'glGetTexImage',
'glDrawPixels',
'glDrawPixelsb',
'glDrawPixelsf',
'glDrawPixelsi',
'glDrawPixelss',
'glDrawPixelsub',
'glDrawPixelsui',
'glDrawPixelsus',
'glTexSubImage2D',
'glTexSubImage1D',
#'glTexSubImage3D',
'glTexImage1D',
'glTexImage2D',
#'glTexImage3D',
'glGetTexImageb',
'glGetTexImaged',
'glGetTexImagef',
'glGetTexImagei',
'glGetTexImages',
'glGetTexImageub',
'glGetTexImageui',
'glGetTexImageus',
'glTexImage1Db',
'glTexImage2Db',
#'glTexImage3Db',
'glTexSubImage1Db',
'glTexSubImage2Db',
#'glTexSubImage3Db',
'glTexImage1Df',
'glTexImage2Df',
#'glTexImage3Df',
'glTexSubImage1Df',
'glTexSubImage2Df',
#'glTexSubImage3Df',
'glTexImage1Di',
'glTexImage2Di',
#'glTexImage3Di',
'glTexSubImage1Di',
'glTexSubImage2Di',
#'glTexSubImage3Di',
'glTexImage1Ds',
'glTexImage2Ds',
#'glTexImage3Ds',
'glTexSubImage1Ds',
'glTexSubImage2Ds',
#'glTexSubImage3Ds',
'glTexImage1Dub',
'glTexImage2Dub',
#'glTexImage3Dub',
'glTexSubImage1Dub',
'glTexSubImage2Dub',
#'glTexSubImage3Dub',
'glTexImage1Dui',
'glTexImage2Dui',
#'glTexImage3Dui',
'glTexSubImage1Dui',
'glTexSubImage2Dui',
#'glTexSubImage3Dui',
'glTexImage1Dus',
'glTexImage2Dus',
#'glTexImage3Dus',
'glTexSubImage1Dus',
'glTexSubImage2Dus',
#'glTexSubImage3Dus',
#'glColorTable',
#'glGetColorTable',
#'glColorSubTable',
#'glConvolutionFilter1D',
#'glConvolutionFilter2D',
#'glGetConvolutionFilter',
#'glSeparableFilter2D',
#'glGetSeparableFilter',
#'glGetMinmax',
)
for suffix,type in [
('b',simple.GL_BYTE),
('d',simple.GL_DOUBLE),
('f',simple.GL_FLOAT),
('i',simple.GL_INT),
('s',simple.GL_SHORT),
('ub',simple.GL_UNSIGNED_BYTE),
('ui',simple.GL_UNSIGNED_INT),
('us',simple.GL_UNSIGNED_SHORT),
]:
def glReadPixels( x,y,width,height,format,type=type, array=None ):
"""Read specified pixels from the current display buffer
This typed version returns data in your specified default
array data-type format, or in the passed array, which will
be converted to the array-type required by the format.
"""
x,y,width,height = asInt(x),asInt(y),asInt(width),asInt(height)
arrayType = arrays.GL_CONSTANT_TO_ARRAY_TYPE[ images.TYPE_TO_ARRAYTYPE.get(type,type) ]
if array is None:
array = images.SetupPixelRead( format, (width,height), type )
else:
array = arrayType.asArray( array )
imageData = arrayType.voidDataPointer( array )
simple.glReadPixels(
x,y,
width, height,
format,type,
imageData
)
return array
globals()["glReadPixels%s"%(suffix,)] = glReadPixels
def glGetTexImage( target, level,format,type=type ):
"""Get a texture-level as an image"""
from OpenGL.GL import glget
dims = [glget.glGetTexLevelParameteriv( target, level, simple.GL_TEXTURE_WIDTH )]
if target != simple.GL_TEXTURE_1D:
dims.append( glget.glGetTexLevelParameteriv( target, level, simple.GL_TEXTURE_HEIGHT ) )
if target != simple.GL_TEXTURE_2D:
dims.append( glget.glGetTexLevelParameteriv( target, level, simple.GL_TEXTURE_DEPTH ) )
array = images.SetupPixelRead( format, tuple(dims), type )
arrayType = arrays.GL_CONSTANT_TO_ARRAY_TYPE[ images.TYPE_TO_ARRAYTYPE.get(type,type) ]
simple.glGetTexImage(
target, level, format, type, ctypes.c_void_p( arrayType.dataPointer(array))
)
return array
globals()["glGetTexImage%s"%(suffix,)] = glGetTexImage
## def glGetTexSubImage( target, level,format,type ):
## """Get a texture-level as an image"""
## from OpenGL.GL import glget
## dims = [glget.glGetTexLevelParameteriv( target, level, simple.GL_TEXTURE_WIDTH )]
## if target != simple.GL_TEXTURE_1D:
## dims.append( glget.glGetTexLevelParameteriv( target, level, simple.GL_TEXTURE_HEIGHT ) )
## if target != simple.GL_TEXTURE_2D:
## dims.append( glget.glGetTexLevelParameteriv( target, level, simple.GL_TEXTURE_DEPTH ) )
## array = images.SetupPixelRead( format, tuple(dims), type )
## arrayType = arrays.GL_CONSTANT_TO_ARRAY_TYPE[ images.TYPE_TO_ARRAYTYPE.get(type,type) ]
## simple.glGetTexImage(
## target, level, format, type, ctypes.c_void_p( arrayType.dataPointer(array))
## )
## return array
## "%s = glGetTexImage"%(suffix)
del suffix,type
# Now the real glReadPixels...
def glReadPixels( x,y,width,height,format,type, array=None, outputType=str ):
"""Read specified pixels from the current display buffer
x,y,width,height -- location and dimensions of the image to read
from the buffer
format -- pixel format for the resulting data
type -- data-format for the resulting data
array -- optional array/offset into which to store the value
outputType -- default (str) provides string output of the
results iff OpenGL.UNSIGNED_BYTE_IMAGES_AS_STRING is True
and type == GL_UNSIGNED_BYTE. Any other value will cause
output in the default array output format.
returns the pixel data array in the format defined by the
format, type and outputType
"""
x,y,width,height = asInt(x),asInt(y),asInt(width),asInt(height)
arrayType = arrays.GL_CONSTANT_TO_ARRAY_TYPE[ images.TYPE_TO_ARRAYTYPE.get(type,type) ]
if array is None:
array = images.SetupPixelRead( format, (width,height), type )
else:
array = arrayType.asArray( array )
imageData = arrayType.voidDataPointer( array )
simple.glReadPixels(
x,y,width,height,
format,type,
imageData
)
if outputType is str:
return images.returnFormat( array, type )
else:
return array
def glGetTexImage( target, level,format,type, outputType=str ):
"""Get a texture-level as an image
target -- enum constant for the texture engine to be read
level -- the mip-map level to read
format -- image format to read out the data
type -- data-type into which to read the data
outputType -- default (str) provides string output of the
results iff OpenGL.UNSIGNED_BYTE_IMAGES_AS_STRING is True
and type == GL_UNSIGNED_BYTE. Any other value will cause
output in the default array output format.
returns the pixel data array in the format defined by the
format, type and outputType
"""
from OpenGL.GL import glget
dims = [glget.glGetTexLevelParameteriv( target, level, simple.GL_TEXTURE_WIDTH )]
if target != simple.GL_TEXTURE_1D:
dims.append( glget.glGetTexLevelParameteriv( target, level, simple.GL_TEXTURE_HEIGHT ) )
if target != simple.GL_TEXTURE_2D:
dims.append( glget.glGetTexLevelParameteriv( target, level, simple.GL_TEXTURE_DEPTH ) )
array = images.SetupPixelRead( format, tuple(dims), type )
arrayType = arrays.GL_CONSTANT_TO_ARRAY_TYPE[ images.TYPE_TO_ARRAYTYPE.get(type,type) ]
simple.glGetTexImage(
target, level, format, type, ctypes.c_void_p( arrayType.dataPointer(array))
)
if outputType is str:
return images.returnFormat( array, type )
else:
return array
INT_DIMENSION_NAMES = [
'width','height','depth','x','y','z',
'xoffset','yoffset','zoffset',
'start', 'count',
]
def asWrapper( value ):
if not isinstance( value, wrapper.Wrapper ):
return wrapper.wrapper( value )
return value
def asIntConverter( value, *args ):
if isinstance( value, float ):
return int(round(value,0))
return value
def setDimensionsAsInts( baseOperation ):
"""Set arguments with names in INT_DIMENSION_NAMES to asInt processing"""
baseOperation = asWrapper( baseOperation )
argNames = getattr( baseOperation, 'pyConverterNames', baseOperation.argNames )
for i,argName in enumerate(argNames):
if argName in INT_DIMENSION_NAMES:
baseOperation.setPyConverter( argName, asIntConverter )
return baseOperation
class ImageInputConverter( object ):
def __init__( self, rank, pixelsName=None, typeName='type' ):
self.rank = rank
self.typeName = typeName
self.pixelsName = pixelsName
def finalise( self, wrapper ):
"""Get our pixel index from the wrapper"""
self.typeIndex = wrapper.pyArgIndex( self.typeName )
self.pixelsIndex = wrapper.pyArgIndex( self.pixelsName )
def __call__( self, arg, baseOperation, pyArgs ):
"""pyConverter for the pixels argument"""
images.setupDefaultTransferMode()
images.rankPacking( self.rank )
type = pyArgs[ self.typeIndex ]
arrayType = arrays.GL_CONSTANT_TO_ARRAY_TYPE[ images.TYPE_TO_ARRAYTYPE[ type ] ]
return arrayType.asArray( arg )
# def cResolver( self, array ):
# return array
# return ctypes.c_void_p( arrays.ArrayDatatype.dataPointer( array ) )
class TypedImageInputConverter( ImageInputConverter ):
def __init__( self, rank, pixelsName, arrayType, typeName=None ):
self.rank = rank
self.arrayType = arrayType
self.pixelsName = pixelsName
self.typeName = typeName
def __call__( self, arg, baseOperation, pyArgs ):
"""The pyConverter for the pixels"""
images.setupDefaultTransferMode()
images.rankPacking( self.rank )
return self.arrayType.asArray( arg )
def finalise( self, wrapper ):
"""Get our pixel index from the wrapper"""
self.pixelsIndex = wrapper.pyArgIndex( self.pixelsName )
def width( self, pyArgs, index, wrappedOperation ):
"""Extract the width from the pixels argument"""
return self.arrayType.dimensions( pyArgs[self.pixelsIndex] )[0]
def height( self, pyArgs, index, wrappedOperation ):
"""Extract the height from the pixels argument"""
return self.arrayType.dimensions( pyArgs[self.pixelsIndex] )[1]
def depth( self, pyArgs, index, wrappedOperation ):
"""Extract the depth from the pixels argument"""
return self.arrayType.dimensions( pyArgs[self.pixelsIndex] )[2]
def type( self, pyArgs, index, wrappedOperation ):
"""Provide the item-type argument from our stored value
This is used for pre-bound processing where we want to provide
the type by implication...
"""
return self.typeName
class CompressedImageConverter( object ):
def finalise( self, wrapper ):
"""Get our pixel index from the wrapper"""
self.dataIndex = wrapper.pyArgIndex( 'data' )
def __call__( self, pyArgs, index, wrappedOperation ):
"""Create a data-size measurement for our image"""
arg = pyArgs[ self.dataIndex ]
return arrays.ArrayType.arrayByteCount( arg )
DIMENSION_NAMES = (
'width','height','depth'
)
PIXEL_NAMES = (
'pixels', 'row', 'column',
)
DATA_SIZE_NAMES = (
'imageSize',
)
def setImageInput(
baseOperation, arrayType=None, dimNames=DIMENSION_NAMES,
pixelName="pixels", typeName=None
):
"""Determine how to convert "pixels" into an image-compatible argument"""
baseOperation = asWrapper( baseOperation )
# rank is the count of width,height,depth arguments...
rank = len([
# rank is the number of dims we want, not the number we give...
argName for argName in baseOperation.argNames
if argName in dimNames
]) + 1
if arrayType:
converter = TypedImageInputConverter( rank, pixelName, arrayType, typeName=typeName )
for i,argName in enumerate(baseOperation.argNames):
if argName in dimNames:
baseOperation.setPyConverter( argName )
baseOperation.setCConverter( argName, getattr(converter,argName) )
elif argName == 'type' and typeName is not None:
baseOperation.setPyConverter( argName )
baseOperation.setCConverter( argName, converter.type )
else:
converter = ImageInputConverter( rank, pixelsName=pixelName, typeName=typeName or 'type' )
for argName in baseOperation.argNames:
if argName in DATA_SIZE_NAMES:
baseOperation.setPyConverter( argName )
baseOperation.setCConverter( argName, converter.imageDataSize )
baseOperation.setPyConverter(
pixelName, converter,
)
# baseOperation.setCResolver(
# pixelName, converter.cResolver
# )
return baseOperation
glDrawPixels = setDimensionsAsInts(
setImageInput(
simple.glDrawPixels
)
)
glTexSubImage2D = setDimensionsAsInts(
setImageInput(
simple.glTexSubImage2D
)
)
glTexSubImage1D = setDimensionsAsInts(
setImageInput(
simple.glTexSubImage1D
)
)
glTexImage2D = setDimensionsAsInts(
setImageInput(
simple.glTexImage2D
)
)
glTexImage1D = setDimensionsAsInts(
setImageInput(
simple.glTexImage1D
)
)
def typedImageFunction( suffix, arrayConstant, baseFunction ):
"""Produce a typed version of the given image function"""
functionName = baseFunction.__name__
functionName = '%(functionName)s%(suffix)s'%locals()
if baseFunction:
arrayType = arrays.GL_CONSTANT_TO_ARRAY_TYPE[ arrayConstant ]
function = setDimensionsAsInts(
setImageInput(
baseFunction,
arrayType,
typeName = arrayConstant,
)
)
return functionName, function
else:
return functionName, baseFunction
def _setDataSize( baseFunction, argument='imageSize' ):
"""Set the data-size value to come from the data field"""
if baseFunction:
converter = CompressedImageConverter()
return asWrapper( baseFunction ).setPyConverter(
argument
).setCConverter( argument, converter )
else:
return baseFunction
def compressedImageFunction( baseFunction ):
"""Set the imageSize and dimensions-as-ints converters for baseFunction"""
if baseFunction:
return setDimensionsAsInts(
_setDataSize(
baseFunction, argument='imageSize'
)
)
else:
return baseFunction
for suffix,arrayConstant in [
('b', simple.GL_BYTE),
('f', simple.GL_FLOAT),
('i', simple.GL_INT),
('s', simple.GL_SHORT),
('ub', simple.GL_UNSIGNED_BYTE),
('ui', simple.GL_UNSIGNED_INT),
('us', simple.GL_UNSIGNED_SHORT),
]:
for functionName in (
'glTexImage1D','glTexImage2D',
'glTexSubImage1D','glTexSubImage2D',
'glDrawPixels',
#'glTexSubImage3D','glTexImage3D', # extension/1.2 standard
):
functionName, function = typedImageFunction(
suffix, arrayConstant, getattr(simple,functionName),
)
globals()[functionName] = function
del function, functionName
del suffix,arrayConstant
| 34.801639 | 103 | 0.68218 | from OpenGL.raw import GL as simple
from OpenGL import images, arrays, wrapper, platform
import ctypes
def asInt( value ):
if isinstance( value, float ):
return int(round(value,0))
return value
_LUMINANCE : 1,
simple.GL_LUMINANCE_ALPHA : 2,
simple.GL_COLOR_INDEX : 1,
simple.GL_STENCIL_INDEX : 1,
simple.GL_DEPTH_COMPONENT : 1,
simple.GL_RGB : 3,
simple.GL_BGR : 3,
simple.GL_RGBA : 4,
simple.GL_BGRA : 4,
simple.GL_ABGR_EXT : 4,
simple.GL_CMYK_EXT : 4,
simple.GL_CMYKA_EXT : 5,
simple.GL_YCRCB_422_SGIX : 1,
simple.GL_YCRCB_444_SGIX : 1,
simple.GL_FORMAT_SUBSAMPLE_24_24_OML : 1,
simple.GL_FORMAT_SUBSAMPLE_244_244_OML : 1,
} )
images.TYPE_TO_ARRAYTYPE.update( {
simple.GL_UNSIGNED_BYTE_3_3_2 : simple.GL_UNSIGNED_BYTE,
simple.GL_UNSIGNED_BYTE_2_3_3_REV : simple.GL_UNSIGNED_BYTE,
simple.GL_UNSIGNED_SHORT_4_4_4_4 : simple.GL_UNSIGNED_SHORT,
simple.GL_UNSIGNED_SHORT_4_4_4_4_REV : simple.GL_UNSIGNED_SHORT,
simple.GL_UNSIGNED_SHORT_5_5_5_1 : simple.GL_UNSIGNED_SHORT,
simple.GL_UNSIGNED_SHORT_1_5_5_5_REV : simple.GL_UNSIGNED_SHORT,
simple.GL_UNSIGNED_SHORT_5_6_5 : simple.GL_UNSIGNED_SHORT,
simple.GL_UNSIGNED_SHORT_5_6_5_REV : simple.GL_UNSIGNED_SHORT,
simple.GL_UNSIGNED_INT_8_8_8_8 : simple.GL_UNSIGNED_INT,
simple.GL_UNSIGNED_INT_8_8_8_8_REV : simple.GL_UNSIGNED_INT,
simple.GL_UNSIGNED_INT_10_10_10_2 : simple.GL_UNSIGNED_INT,
simple.GL_UNSIGNED_INT_2_10_10_10_REV : simple.GL_UNSIGNED_INT,
simple.GL_UNSIGNED_BYTE : simple.GL_UNSIGNED_BYTE,
simple.GL_BYTE: simple.GL_BYTE,
simple.GL_UNSIGNED_SHORT : simple.GL_UNSIGNED_SHORT,
simple.GL_SHORT : simple.GL_SHORT,
simple.GL_UNSIGNED_INT : simple.GL_UNSIGNED_INT,
simple.GL_INT : simple.GL_INT,
simple.GL_FLOAT : simple.GL_FLOAT,
simple.GL_DOUBLE : simple.GL_DOUBLE,
simple.GL_BITMAP : simple.GL_UNSIGNED_BYTE,
} )
images.TIGHT_PACK_FORMATS.update({
simple.GL_UNSIGNED_BYTE_3_3_2 : 3,
simple.GL_UNSIGNED_BYTE_2_3_3_REV : 3,
simple.GL_UNSIGNED_SHORT_4_4_4_4 : 4,
simple.GL_UNSIGNED_SHORT_4_4_4_4_REV : 4,
simple.GL_UNSIGNED_SHORT_5_5_5_1 : 4,
simple.GL_UNSIGNED_SHORT_1_5_5_5_REV : 4,
simple.GL_UNSIGNED_SHORT_5_6_5 : 3,
simple.GL_UNSIGNED_SHORT_5_6_5_REV : 3,
simple.GL_UNSIGNED_INT_8_8_8_8 : 4,
simple.GL_UNSIGNED_INT_8_8_8_8_REV : 4,
simple.GL_UNSIGNED_INT_10_10_10_2 : 4,
simple.GL_UNSIGNED_INT_2_10_10_10_REV : 4,
simple.GL_BITMAP: 8,
})
images.RANK_PACKINGS.update( {
4: [
(simple.glPixelStorei,simple.GL_PACK_SKIP_VOLUMES_SGIS, 0),
(simple.glPixelStorei,simple.GL_PACK_IMAGE_DEPTH_SGIS, 0),
(simple.glPixelStorei,simple.GL_PACK_ALIGNMENT, 1),
],
3: [
(simple.glPixelStorei,simple.GL_PACK_SKIP_IMAGES, 0),
(simple.glPixelStorei,simple.GL_PACK_IMAGE_HEIGHT, 0),
(simple.glPixelStorei,simple.GL_PACK_ALIGNMENT, 1),
],
2: [
(simple.glPixelStorei,simple.GL_PACK_ROW_LENGTH, 0),
(simple.glPixelStorei,simple.GL_PACK_SKIP_ROWS, 0),
(simple.glPixelStorei,simple.GL_PACK_ALIGNMENT, 1),
],
1: [
(simple.glPixelStorei,simple.GL_PACK_SKIP_PIXELS, 0),
(simple.glPixelStorei,simple.GL_PACK_ALIGNMENT, 1),
],
} )
__all__ = (
'glReadPixels',
'glReadPixelsb',
'glReadPixelsd',
'glReadPixelsf',
'glReadPixelsi',
'glReadPixelss',
'glReadPixelsub',
'glReadPixelsui',
'glReadPixelsus',
'glGetTexImage',
'glDrawPixels',
'glDrawPixelsb',
'glDrawPixelsf',
'glDrawPixelsi',
'glDrawPixelss',
'glDrawPixelsub',
'glDrawPixelsui',
'glDrawPixelsus',
'glTexSubImage2D',
'glTexSubImage1D',
'glTexImage1D',
'glTexImage2D',
'glGetTexImageb',
'glGetTexImaged',
'glGetTexImagef',
'glGetTexImagei',
'glGetTexImages',
'glGetTexImageub',
'glGetTexImageui',
'glGetTexImageus',
'glTexImage1Db',
'glTexImage2Db',
'glTexSubImage1Db',
'glTexSubImage2Db',
'glTexImage1Df',
'glTexImage2Df',
'glTexSubImage1Df',
'glTexSubImage2Df',
'glTexImage1Di',
'glTexImage2Di',
'glTexSubImage1Di',
'glTexSubImage2Di',
'glTexImage1Ds',
'glTexImage2Ds',
'glTexSubImage1Ds',
'glTexSubImage2Ds',
'glTexImage1Dub',
'glTexImage2Dub',
'glTexSubImage1Dub',
'glTexSubImage2Dub',
'glTexImage1Dui',
'glTexImage2Dui',
'glTexSubImage1Dui',
'glTexSubImage2Dui',
'glTexImage1Dus',
'glTexImage2Dus',
'glTexSubImage1Dus',
'glTexSubImage2Dus',
)
for suffix,type in [
('b',simple.GL_BYTE),
('d',simple.GL_DOUBLE),
('f',simple.GL_FLOAT),
('i',simple.GL_INT),
('s',simple.GL_SHORT),
('ub',simple.GL_UNSIGNED_BYTE),
('ui',simple.GL_UNSIGNED_INT),
('us',simple.GL_UNSIGNED_SHORT),
]:
def glReadPixels( x,y,width,height,format,type=type, array=None ):
x,y,width,height = asInt(x),asInt(y),asInt(width),asInt(height)
arrayType = arrays.GL_CONSTANT_TO_ARRAY_TYPE[ images.TYPE_TO_ARRAYTYPE.get(type,type) ]
if array is None:
array = images.SetupPixelRead( format, (width,height), type )
else:
array = arrayType.asArray( array )
imageData = arrayType.voidDataPointer( array )
simple.glReadPixels(
x,y,
width, height,
format,type,
imageData
)
return array
globals()["glReadPixels%s"%(suffix,)] = glReadPixels
def glGetTexImage( target, level,format,type=type ):
from OpenGL.GL import glget
dims = [glget.glGetTexLevelParameteriv( target, level, simple.GL_TEXTURE_WIDTH )]
if target != simple.GL_TEXTURE_1D:
dims.append( glget.glGetTexLevelParameteriv( target, level, simple.GL_TEXTURE_HEIGHT ) )
if target != simple.GL_TEXTURE_2D:
dims.append( glget.glGetTexLevelParameteriv( target, level, simple.GL_TEXTURE_DEPTH ) )
array = images.SetupPixelRead( format, tuple(dims), type )
arrayType = arrays.GL_CONSTANT_TO_ARRAY_TYPE[ images.TYPE_TO_ARRAYTYPE.get(type,type) ]
simple.glGetTexImage(
target, level, format, type, ctypes.c_void_p( arrayType.dataPointer(array))
)
return array
globals()["glGetTexImage%s"%(suffix,)] = glGetTexImage
glget
dims = [glget.glGetTexLevelParameteriv( target, level, simple.GL_TEXTURE_WIDTH )]
if target != simple.GL_TEXTURE_1D:
dims.append( glget.glGetTexLevelParameteriv( target, level, simple.GL_TEXTURE_HEIGHT ) )
if target != simple.GL_TEXTURE_2D:
dims.append( glget.glGetTexLevelParameteriv( target, level, simple.GL_TEXTURE_DEPTH ) )
array = images.SetupPixelRead( format, tuple(dims), type )
arrayType = arrays.GL_CONSTANT_TO_ARRAY_TYPE[ images.TYPE_TO_ARRAYTYPE.get(type,type) ]
simple.glGetTexImage(
target, level, format, type, ctypes.c_void_p( arrayType.dataPointer(array))
)
if outputType is str:
return images.returnFormat( array, type )
else:
return array
INT_DIMENSION_NAMES = [
'width','height','depth','x','y','z',
'xoffset','yoffset','zoffset',
'start', 'count',
]
def asWrapper( value ):
if not isinstance( value, wrapper.Wrapper ):
return wrapper.wrapper( value )
return value
def asIntConverter( value, *args ):
if isinstance( value, float ):
return int(round(value,0))
return value
def setDimensionsAsInts( baseOperation ):
baseOperation = asWrapper( baseOperation )
argNames = getattr( baseOperation, 'pyConverterNames', baseOperation.argNames )
for i,argName in enumerate(argNames):
if argName in INT_DIMENSION_NAMES:
baseOperation.setPyConverter( argName, asIntConverter )
return baseOperation
class ImageInputConverter( object ):
def __init__( self, rank, pixelsName=None, typeName='type' ):
self.rank = rank
self.typeName = typeName
self.pixelsName = pixelsName
def finalise( self, wrapper ):
self.typeIndex = wrapper.pyArgIndex( self.typeName )
self.pixelsIndex = wrapper.pyArgIndex( self.pixelsName )
def __call__( self, arg, baseOperation, pyArgs ):
images.setupDefaultTransferMode()
images.rankPacking( self.rank )
type = pyArgs[ self.typeIndex ]
arrayType = arrays.GL_CONSTANT_TO_ARRAY_TYPE[ images.TYPE_TO_ARRAYTYPE[ type ] ]
return arrayType.asArray( arg )
class TypedImageInputConverter( ImageInputConverter ):
def __init__( self, rank, pixelsName, arrayType, typeName=None ):
self.rank = rank
self.arrayType = arrayType
self.pixelsName = pixelsName
self.typeName = typeName
def __call__( self, arg, baseOperation, pyArgs ):
images.setupDefaultTransferMode()
images.rankPacking( self.rank )
return self.arrayType.asArray( arg )
def finalise( self, wrapper ):
self.pixelsIndex = wrapper.pyArgIndex( self.pixelsName )
def width( self, pyArgs, index, wrappedOperation ):
return self.arrayType.dimensions( pyArgs[self.pixelsIndex] )[0]
def height( self, pyArgs, index, wrappedOperation ):
return self.arrayType.dimensions( pyArgs[self.pixelsIndex] )[1]
def depth( self, pyArgs, index, wrappedOperation ):
return self.arrayType.dimensions( pyArgs[self.pixelsIndex] )[2]
def type( self, pyArgs, index, wrappedOperation ):
return self.typeName
class CompressedImageConverter( object ):
def finalise( self, wrapper ):
self.dataIndex = wrapper.pyArgIndex( 'data' )
def __call__( self, pyArgs, index, wrappedOperation ):
arg = pyArgs[ self.dataIndex ]
return arrays.ArrayType.arrayByteCount( arg )
DIMENSION_NAMES = (
'width','height','depth'
)
PIXEL_NAMES = (
'pixels', 'row', 'column',
)
DATA_SIZE_NAMES = (
'imageSize',
)
def setImageInput(
baseOperation, arrayType=None, dimNames=DIMENSION_NAMES,
pixelName="pixels", typeName=None
):
baseOperation = asWrapper( baseOperation )
rank = len([
argName for argName in baseOperation.argNames
if argName in dimNames
]) + 1
if arrayType:
converter = TypedImageInputConverter( rank, pixelName, arrayType, typeName=typeName )
for i,argName in enumerate(baseOperation.argNames):
if argName in dimNames:
baseOperation.setPyConverter( argName )
baseOperation.setCConverter( argName, getattr(converter,argName) )
elif argName == 'type' and typeName is not None:
baseOperation.setPyConverter( argName )
baseOperation.setCConverter( argName, converter.type )
else:
converter = ImageInputConverter( rank, pixelsName=pixelName, typeName=typeName or 'type' )
for argName in baseOperation.argNames:
if argName in DATA_SIZE_NAMES:
baseOperation.setPyConverter( argName )
baseOperation.setCConverter( argName, converter.imageDataSize )
baseOperation.setPyConverter(
pixelName, converter,
)
return baseOperation
glDrawPixels = setDimensionsAsInts(
setImageInput(
simple.glDrawPixels
)
)
glTexSubImage2D = setDimensionsAsInts(
setImageInput(
simple.glTexSubImage2D
)
)
glTexSubImage1D = setDimensionsAsInts(
setImageInput(
simple.glTexSubImage1D
)
)
glTexImage2D = setDimensionsAsInts(
setImageInput(
simple.glTexImage2D
)
)
glTexImage1D = setDimensionsAsInts(
setImageInput(
simple.glTexImage1D
)
)
def typedImageFunction( suffix, arrayConstant, baseFunction ):
functionName = baseFunction.__name__
functionName = '%(functionName)s%(suffix)s'%locals()
if baseFunction:
arrayType = arrays.GL_CONSTANT_TO_ARRAY_TYPE[ arrayConstant ]
function = setDimensionsAsInts(
setImageInput(
baseFunction,
arrayType,
typeName = arrayConstant,
)
)
return functionName, function
else:
return functionName, baseFunction
def _setDataSize( baseFunction, argument='imageSize' ):
if baseFunction:
converter = CompressedImageConverter()
return asWrapper( baseFunction ).setPyConverter(
argument
).setCConverter( argument, converter )
else:
return baseFunction
def compressedImageFunction( baseFunction ):
if baseFunction:
return setDimensionsAsInts(
_setDataSize(
baseFunction, argument='imageSize'
)
)
else:
return baseFunction
for suffix,arrayConstant in [
('b', simple.GL_BYTE),
('f', simple.GL_FLOAT),
('i', simple.GL_INT),
('s', simple.GL_SHORT),
('ub', simple.GL_UNSIGNED_BYTE),
('ui', simple.GL_UNSIGNED_INT),
('us', simple.GL_UNSIGNED_SHORT),
]:
for functionName in (
'glTexImage1D','glTexImage2D',
'glTexSubImage1D','glTexSubImage2D',
'glDrawPixels',
Name, function = typedImageFunction(
suffix, arrayConstant, getattr(simple,functionName),
)
globals()[functionName] = function
del function, functionName
del suffix,arrayConstant
| true | true |
1c3b911fd355be54f104f5f1f71cbc02840658dd | 40,479 | py | Python | nova/tests/compute/test_compute_mgr.py | bopopescu/nova-40 | d8d5e4c4e30d0e605001ebab9f19005d2ea96f99 | [
"Apache-2.0"
] | null | null | null | nova/tests/compute/test_compute_mgr.py | bopopescu/nova-40 | d8d5e4c4e30d0e605001ebab9f19005d2ea96f99 | [
"Apache-2.0"
] | null | null | null | nova/tests/compute/test_compute_mgr.py | bopopescu/nova-40 | d8d5e4c4e30d0e605001ebab9f19005d2ea96f99 | [
"Apache-2.0"
] | 1 | 2020-07-24T09:44:17.000Z | 2020-07-24T09:44:17.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for ComputeManager()."""
import time
import mox
from oslo.config import cfg
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor import rpcapi as conductor_rpcapi
from nova import context
from nova import db
from nova import exception
from nova.network import model as network_model
from nova.objects import base as obj_base
from nova.objects import instance as instance_obj
from nova.openstack.common import importutils
from nova.openstack.common import uuidutils
from nova import test
from nova.tests.compute import fake_resource_tracker
from nova.tests import fake_instance
from nova import utils
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
class ComputeManagerUnitTestCase(test.NoDBTestCase):
def setUp(self):
super(ComputeManagerUnitTestCase, self).setUp()
self.compute = importutils.import_object(CONF.compute_manager)
self.context = context.RequestContext('fake', 'fake')
def test_allocate_network_succeeds_after_retries(self):
self.flags(network_allocate_retries=8)
nwapi = self.compute.network_api
self.mox.StubOutWithMock(nwapi, 'allocate_for_instance')
self.mox.StubOutWithMock(time, 'sleep')
instance = {}
is_vpn = 'fake-is-vpn'
req_networks = 'fake-req-networks'
macs = 'fake-macs'
sec_groups = 'fake-sec-groups'
final_result = 'meow'
dhcp_options = None
expected_sleep_times = [1, 2, 4, 8, 16, 30, 30, 30]
for sleep_time in expected_sleep_times:
nwapi.allocate_for_instance(
self.context, instance, vpn=is_vpn,
requested_networks=req_networks, macs=macs,
security_groups=sec_groups,
dhcp_options=dhcp_options).AndRaise(
test.TestingException())
time.sleep(sleep_time)
nwapi.allocate_for_instance(
self.context, instance, vpn=is_vpn,
requested_networks=req_networks, macs=macs,
security_groups=sec_groups,
dhcp_options=dhcp_options).AndReturn(final_result)
self.mox.ReplayAll()
res = self.compute._allocate_network_async(self.context, instance,
req_networks,
macs,
sec_groups,
is_vpn,
dhcp_options)
self.assertEqual(final_result, res)
def test_allocate_network_fails(self):
self.flags(network_allocate_retries=0)
nwapi = self.compute.network_api
self.mox.StubOutWithMock(nwapi, 'allocate_for_instance')
instance = {}
is_vpn = 'fake-is-vpn'
req_networks = 'fake-req-networks'
macs = 'fake-macs'
sec_groups = 'fake-sec-groups'
dhcp_options = None
nwapi.allocate_for_instance(
self.context, instance, vpn=is_vpn,
requested_networks=req_networks, macs=macs,
security_groups=sec_groups,
dhcp_options=dhcp_options).AndRaise(test.TestingException())
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.compute._allocate_network_async,
self.context, instance, req_networks, macs,
sec_groups, is_vpn, dhcp_options)
def test_allocate_network_neg_conf_value_treated_as_zero(self):
self.flags(network_allocate_retries=-1)
nwapi = self.compute.network_api
self.mox.StubOutWithMock(nwapi, 'allocate_for_instance')
instance = {}
is_vpn = 'fake-is-vpn'
req_networks = 'fake-req-networks'
macs = 'fake-macs'
sec_groups = 'fake-sec-groups'
dhcp_options = None
# Only attempted once.
nwapi.allocate_for_instance(
self.context, instance, vpn=is_vpn,
requested_networks=req_networks, macs=macs,
security_groups=sec_groups,
dhcp_options=dhcp_options).AndRaise(test.TestingException())
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.compute._allocate_network_async,
self.context, instance, req_networks, macs,
sec_groups, is_vpn, dhcp_options)
def test_init_host(self):
our_host = self.compute.host
fake_context = 'fake-context'
inst = fake_instance.fake_db_instance(
vm_state=vm_states.ACTIVE,
info_cache={'instance_uuid': 'fake-uuid',
'network_info': None},
security_groups=None)
startup_instances = [inst, inst, inst]
def _do_mock_calls(defer_iptables_apply):
self.compute.driver.init_host(host=our_host)
context.get_admin_context().AndReturn(fake_context)
db.instance_get_all_by_host(
fake_context, our_host, columns_to_join=['info_cache']
).AndReturn(startup_instances)
if defer_iptables_apply:
self.compute.driver.filter_defer_apply_on()
self.compute._destroy_evacuated_instances(fake_context)
self.compute._init_instance(fake_context,
mox.IsA(instance_obj.Instance))
self.compute._init_instance(fake_context,
mox.IsA(instance_obj.Instance))
self.compute._init_instance(fake_context,
mox.IsA(instance_obj.Instance))
if defer_iptables_apply:
self.compute.driver.filter_defer_apply_off()
self.mox.StubOutWithMock(self.compute.driver, 'init_host')
self.mox.StubOutWithMock(self.compute.driver,
'filter_defer_apply_on')
self.mox.StubOutWithMock(self.compute.driver,
'filter_defer_apply_off')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(context, 'get_admin_context')
self.mox.StubOutWithMock(self.compute,
'_destroy_evacuated_instances')
self.mox.StubOutWithMock(self.compute,
'_init_instance')
# Test with defer_iptables_apply
self.flags(defer_iptables_apply=True)
_do_mock_calls(True)
self.mox.ReplayAll()
self.compute.init_host()
self.mox.VerifyAll()
# Test without defer_iptables_apply
self.mox.ResetAll()
self.flags(defer_iptables_apply=False)
_do_mock_calls(False)
self.mox.ReplayAll()
self.compute.init_host()
# tearDown() uses context.get_admin_context(), so we have
# to do the verification here and unstub it.
self.mox.VerifyAll()
self.mox.UnsetStubs()
def test_init_host_with_deleted_migration(self):
our_host = self.compute.host
not_our_host = 'not-' + our_host
fake_context = 'fake-context'
deleted_instance = {
'name': 'fake-name',
'host': not_our_host,
'uuid': 'fake-uuid',
}
self.mox.StubOutWithMock(self.compute.driver, 'init_host')
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(context, 'get_admin_context')
self.mox.StubOutWithMock(self.compute, 'init_virt_events')
self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver')
self.mox.StubOutWithMock(self.compute, '_init_instance')
self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
self.compute.driver.init_host(host=our_host)
context.get_admin_context().AndReturn(fake_context)
db.instance_get_all_by_host(fake_context, our_host,
columns_to_join=['info_cache']
).AndReturn([])
self.compute.init_virt_events()
# simulate failed instance
self.compute._get_instances_on_driver(
fake_context, {'deleted': False}).AndReturn([deleted_instance])
self.compute._get_instance_nw_info(fake_context, deleted_instance
).AndRaise(exception.InstanceNotFound(
instance_id=deleted_instance['uuid']))
# ensure driver.destroy is called so that driver may
# clean up any dangling files
self.compute.driver.destroy(deleted_instance,
mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.compute.init_host()
# tearDown() uses context.get_admin_context(), so we have
# to do the verification here and unstub it.
self.mox.VerifyAll()
self.mox.UnsetStubs()
def test_init_instance_failed_resume_sets_error(self):
instance = {
'uuid': 'fake-uuid',
'info_cache': None,
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE,
'task_state': None,
}
self.flags(resume_guests_state_on_host_boot=True)
self.mox.StubOutWithMock(self.compute, '_get_power_state')
self.mox.StubOutWithMock(self.compute.driver, 'plug_vifs')
self.mox.StubOutWithMock(self.compute.driver,
'resume_state_on_host_boot')
self.mox.StubOutWithMock(self.compute,
'_get_instance_volume_block_device_info')
self.mox.StubOutWithMock(self.compute,
'_set_instance_error_state')
self.compute._get_power_state(mox.IgnoreArg(),
instance).AndReturn(power_state.SHUTDOWN)
self.compute.driver.plug_vifs(instance, mox.IgnoreArg())
self.compute._get_instance_volume_block_device_info(mox.IgnoreArg(),
instance).AndReturn('fake-bdm')
self.compute.driver.resume_state_on_host_boot(mox.IgnoreArg(),
instance, mox.IgnoreArg(),
'fake-bdm').AndRaise(test.TestingException)
self.compute._set_instance_error_state(mox.IgnoreArg(),
instance['uuid'])
self.mox.ReplayAll()
self.compute._init_instance('fake-context', instance)
def _test_init_instance_reverts_crashed_migrations(self,
old_vm_state=None):
power_on = True if (not old_vm_state or
old_vm_state == vm_states.ACTIVE) else False
sys_meta = {
'old_vm_state': old_vm_state
}
instance = {
'uuid': 'foo',
'vm_state': vm_states.ERROR,
'task_state': task_states.RESIZE_MIGRATING,
'power_state': power_state.SHUTDOWN,
'system_metadata': sys_meta
}
fixed = dict(instance, task_state=None)
self.mox.StubOutWithMock(compute_utils, 'get_nw_info_for_instance')
self.mox.StubOutWithMock(utils, 'instance_sys_meta')
self.mox.StubOutWithMock(self.compute.driver, 'plug_vifs')
self.mox.StubOutWithMock(self.compute.driver,
'finish_revert_migration')
self.mox.StubOutWithMock(self.compute,
'_get_instance_volume_block_device_info')
self.mox.StubOutWithMock(self.compute.driver, 'get_info')
self.mox.StubOutWithMock(self.compute, '_instance_update')
compute_utils.get_nw_info_for_instance(instance).AndReturn(
network_model.NetworkInfo())
self.compute.driver.plug_vifs(instance, [])
utils.instance_sys_meta(instance).AndReturn(sys_meta)
self.compute._get_instance_volume_block_device_info(
self.context, instance).AndReturn([])
self.compute.driver.finish_revert_migration(instance, [], [], power_on)
self.compute._instance_update(self.context, instance['uuid'],
task_state=None).AndReturn(fixed)
self.compute.driver.get_info(fixed).AndReturn(
{'state': power_state.SHUTDOWN})
self.mox.ReplayAll()
self.compute._init_instance(self.context, instance)
def test_init_instance_reverts_crashed_migration_from_active(self):
self._test_init_instance_reverts_crashed_migrations(
old_vm_state=vm_states.ACTIVE)
def test_init_instance_reverts_crashed_migration_from_stopped(self):
self._test_init_instance_reverts_crashed_migrations(
old_vm_state=vm_states.STOPPED)
def test_init_instance_reverts_crashed_migration_no_old_state(self):
self._test_init_instance_reverts_crashed_migrations(old_vm_state=None)
def test_get_instances_on_driver(self):
fake_context = context.get_admin_context()
driver_instances = []
for x in xrange(10):
instance = dict(uuid=uuidutils.generate_uuid())
driver_instances.append(instance)
self.mox.StubOutWithMock(self.compute.driver,
'list_instance_uuids')
self.mox.StubOutWithMock(self.compute.conductor_api,
'instance_get_all_by_filters')
self.compute.driver.list_instance_uuids().AndReturn(
[inst['uuid'] for inst in driver_instances])
self.compute.conductor_api.instance_get_all_by_filters(
fake_context,
{'uuid': [inst['uuid'] for
inst in driver_instances]},
columns_to_join=[]).AndReturn(
driver_instances)
self.mox.ReplayAll()
result = self.compute._get_instances_on_driver(fake_context,
columns_to_join=[])
self.assertEqual(driver_instances, result)
def test_get_instances_on_driver_fallback(self):
# Test getting instances when driver doesn't support
# 'list_instance_uuids'
self.compute.host = 'host'
filters = {'host': self.compute.host}
fake_context = context.get_admin_context()
all_instances = []
driver_instances = []
for x in xrange(10):
instance = dict(name=uuidutils.generate_uuid())
if x % 2:
driver_instances.append(instance)
all_instances.append(instance)
self.mox.StubOutWithMock(self.compute.driver,
'list_instance_uuids')
self.mox.StubOutWithMock(self.compute.driver,
'list_instances')
self.mox.StubOutWithMock(self.compute.conductor_api,
'instance_get_all_by_filters')
self.compute.driver.list_instance_uuids().AndRaise(
NotImplementedError())
self.compute.driver.list_instances().AndReturn(
[inst['name'] for inst in driver_instances])
self.compute.conductor_api.instance_get_all_by_filters(
fake_context, filters,
columns_to_join=None).AndReturn(all_instances)
self.mox.ReplayAll()
result = self.compute._get_instances_on_driver(fake_context, filters)
self.assertEqual(driver_instances, result)
def test_instance_usage_audit(self):
instances = [{'uuid': 'foo'}]
self.flags(instance_usage_audit=True)
self.stubs.Set(compute_utils, 'has_audit_been_run',
lambda *a, **k: False)
self.stubs.Set(self.compute.conductor_api,
'instance_get_active_by_window_joined',
lambda *a, **k: instances)
self.stubs.Set(compute_utils, 'start_instance_usage_audit',
lambda *a, **k: None)
self.stubs.Set(compute_utils, 'finish_instance_usage_audit',
lambda *a, **k: None)
self.mox.StubOutWithMock(self.compute.conductor_api,
'notify_usage_exists')
self.compute.conductor_api.notify_usage_exists(
self.context, instances[0], ignore_missing_network_data=False)
self.mox.ReplayAll()
self.compute._instance_usage_audit(self.context)
def _get_sync_instance(self, power_state, vm_state, task_state=None):
instance = instance_obj.Instance()
instance.uuid = 'fake-uuid'
instance.power_state = power_state
instance.vm_state = vm_state
instance.host = self.compute.host
instance.task_state = task_state
self.mox.StubOutWithMock(instance, 'refresh')
self.mox.StubOutWithMock(instance, 'save')
return instance
def test_sync_instance_power_state_match(self):
instance = self._get_sync_instance(power_state.RUNNING,
vm_states.ACTIVE)
instance.refresh()
self.mox.ReplayAll()
self.compute._sync_instance_power_state(self.context, instance,
power_state.RUNNING)
def test_sync_instance_power_state_running_stopped(self):
instance = self._get_sync_instance(power_state.RUNNING,
vm_states.ACTIVE)
instance.refresh()
instance.save()
self.mox.ReplayAll()
self.compute._sync_instance_power_state(self.context, instance,
power_state.SHUTDOWN)
self.assertEqual(instance.power_state, power_state.SHUTDOWN)
def _test_sync_to_stop(self, power_state, vm_state, driver_power_state,
stop=True, force=False):
instance = self._get_sync_instance(power_state, vm_state)
instance.refresh()
instance.save()
self.mox.StubOutWithMock(self.compute.compute_api, 'stop')
self.mox.StubOutWithMock(self.compute.compute_api, 'force_stop')
if stop:
if force:
self.compute.compute_api.force_stop(self.context, instance)
else:
self.compute.compute_api.stop(self.context, instance)
self.mox.ReplayAll()
self.compute._sync_instance_power_state(self.context, instance,
driver_power_state)
self.mox.VerifyAll()
self.mox.UnsetStubs()
def test_sync_instance_power_state_to_stop(self):
for ps in (power_state.SHUTDOWN, power_state.CRASHED,
power_state.SUSPENDED):
self._test_sync_to_stop(power_state.RUNNING, vm_states.ACTIVE, ps)
self._test_sync_to_stop(power_state.SHUTDOWN, vm_states.STOPPED,
power_state.RUNNING, force=True)
def test_sync_instance_power_state_to_no_stop(self):
for ps in (power_state.PAUSED, power_state.NOSTATE):
self._test_sync_to_stop(power_state.RUNNING, vm_states.ACTIVE, ps,
stop=False)
for vs in (vm_states.SOFT_DELETED, vm_states.DELETED):
for ps in (power_state.NOSTATE, power_state.SHUTDOWN):
self._test_sync_to_stop(power_state.RUNNING, vs, ps,
stop=False)
def test_run_pending_deletes(self):
self.flags(instance_delete_interval=10)
class FakeInstance(object):
def __init__(self, uuid, name, smd):
self.uuid = uuid
self.name = name
self.system_metadata = smd
self.cleaned = False
def __getitem__(self, name):
return getattr(self, name)
def save(self, context):
pass
class FakeInstanceList(object):
def get_by_filters(self, *args, **kwargs):
return []
a = FakeInstance('123', 'apple', {'clean_attempts': '100'})
b = FakeInstance('456', 'orange', {'clean_attempts': '3'})
c = FakeInstance('789', 'banana', {})
self.mox.StubOutWithMock(instance_obj.InstanceList,
'get_by_filters')
instance_obj.InstanceList.get_by_filters(
{'read_deleted': 'yes'},
{'deleted': True, 'soft_deleted': False, 'host': 'fake-mini',
'cleaned': False},
expected_attrs=['info_cache', 'security_groups',
'system_metadata']).AndReturn([a, b, c])
self.mox.StubOutWithMock(self.compute.driver, 'delete_instance_files')
self.compute.driver.delete_instance_files(
mox.IgnoreArg()).AndReturn(True)
self.compute.driver.delete_instance_files(
mox.IgnoreArg()).AndReturn(False)
self.mox.ReplayAll()
self.compute._run_pending_deletes({})
self.assertFalse(a.cleaned)
self.assertEqual('100', a.system_metadata['clean_attempts'])
self.assertTrue(b.cleaned)
self.assertEqual('4', b.system_metadata['clean_attempts'])
self.assertFalse(c.cleaned)
self.assertEqual('1', c.system_metadata['clean_attempts'])
def test_swap_volume_volume_api_usage(self):
# This test ensures that volume_id arguments are passed to volume_api
# and that volume states are OK
volumes = {}
old_volume_id = uuidutils.generate_uuid()
volumes[old_volume_id] = {'id': old_volume_id,
'display_name': 'old_volume',
'status': 'detaching'}
new_volume_id = uuidutils.generate_uuid()
volumes[new_volume_id] = {'id': new_volume_id,
'display_name': 'new_volume',
'status': 'attaching'}
def fake_vol_api_func(context, volume, *args):
self.assertTrue(uuidutils.is_uuid_like(volume))
return {}
def fake_vol_get(context, volume_id):
self.assertTrue(uuidutils.is_uuid_like(volume_id))
return volumes[volume_id]
def fake_vol_attach(context, volume_id, instance_uuid, connector):
self.assertTrue(uuidutils.is_uuid_like(volume_id))
self.assertIn(volumes[volume_id]['status'],
['available', 'attaching'])
volumes[volume_id]['status'] = 'in-use'
def fake_vol_unreserve(context, volume_id):
self.assertTrue(uuidutils.is_uuid_like(volume_id))
if volumes[volume_id]['status'] == 'attaching':
volumes[volume_id]['status'] = 'available'
def fake_vol_detach(context, volume_id):
self.assertTrue(uuidutils.is_uuid_like(volume_id))
volumes[volume_id]['status'] = 'available'
def fake_vol_migrate_volume_completion(context, old_volume_id,
new_volume_id, error=False):
self.assertTrue(uuidutils.is_uuid_like(old_volume_id))
self.assertTrue(uuidutils.is_uuid_like(old_volume_id))
return {'save_volume_id': new_volume_id}
def fake_func_exc(*args, **kwargs):
raise AttributeError # Random exception
self.stubs.Set(self.compute.volume_api, 'get', fake_vol_get)
self.stubs.Set(self.compute.volume_api, 'initialize_connection',
fake_vol_api_func)
self.stubs.Set(self.compute.volume_api, 'attach', fake_vol_attach)
self.stubs.Set(self.compute.volume_api, 'unreserve_volume',
fake_vol_unreserve)
self.stubs.Set(self.compute.volume_api, 'terminate_connection',
fake_vol_api_func)
self.stubs.Set(self.compute.volume_api, 'detach', fake_vol_detach)
self.stubs.Set(self.compute, '_get_instance_volume_bdm',
lambda x, y, z: {'device_name': '/dev/vdb',
'connection_info': '{"foo": "bar"}'})
self.stubs.Set(self.compute.driver, 'get_volume_connector',
lambda x: {})
self.stubs.Set(self.compute.driver, 'swap_volume',
lambda w, x, y, z: None)
self.stubs.Set(self.compute.volume_api, 'migrate_volume_completion',
fake_vol_migrate_volume_completion)
self.stubs.Set(self.compute.conductor_api,
'block_device_mapping_update_or_create',
lambda x, y: None)
self.stubs.Set(self.compute.conductor_api,
'instance_fault_create',
lambda x, y: None)
# Good path
self.compute.swap_volume(self.context, old_volume_id, new_volume_id,
{'uuid': 'fake'})
self.assertEqual(volumes[old_volume_id]['status'], 'available')
self.assertEqual(volumes[new_volume_id]['status'], 'in-use')
# Error paths
volumes[old_volume_id]['status'] = 'detaching'
volumes[new_volume_id]['status'] = 'attaching'
self.stubs.Set(self.compute.driver, 'swap_volume', fake_func_exc)
self.assertRaises(AttributeError, self.compute.swap_volume,
self.context, old_volume_id, new_volume_id,
{'uuid': 'fake'})
self.assertEqual(volumes[old_volume_id]['status'], 'detaching')
self.assertEqual(volumes[new_volume_id]['status'], 'attaching')
volumes[old_volume_id]['status'] = 'detaching'
volumes[new_volume_id]['status'] = 'attaching'
self.stubs.Set(self.compute.volume_api, 'initialize_connection',
fake_func_exc)
self.assertRaises(AttributeError, self.compute.swap_volume,
self.context, old_volume_id, new_volume_id,
{'uuid': 'fake'})
self.assertEqual(volumes[old_volume_id]['status'], 'detaching')
self.assertEqual(volumes[new_volume_id]['status'], 'available')
def test_check_can_live_migrate_source(self):
is_volume_backed = 'volume_backed'
bdms = 'bdms'
dest_check_data = dict(foo='bar')
db_instance = fake_instance.fake_db_instance()
instance = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), db_instance)
expected_dest_check_data = dict(dest_check_data,
is_volume_backed=is_volume_backed)
self.mox.StubOutWithMock(self.compute.conductor_api,
'block_device_mapping_get_all_by_instance')
self.mox.StubOutWithMock(self.compute.compute_api,
'is_volume_backed_instance')
self.mox.StubOutWithMock(self.compute.driver,
'check_can_live_migrate_source')
instance_p = obj_base.obj_to_primitive(instance)
self.compute.conductor_api.block_device_mapping_get_all_by_instance(
self.context, instance_p, legacy=False).AndReturn(bdms)
self.compute.compute_api.is_volume_backed_instance(
self.context, instance, bdms).AndReturn(is_volume_backed)
self.compute.driver.check_can_live_migrate_source(
self.context, instance, expected_dest_check_data)
self.mox.ReplayAll()
self.compute.check_can_live_migrate_source(
self.context, instance=instance,
dest_check_data=dest_check_data)
def _test_check_can_live_migrate_destination(self, do_raise=False,
has_mig_data=False):
db_instance = fake_instance.fake_db_instance(host='fake-host')
instance = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), db_instance)
instance.host = 'fake-host'
block_migration = 'block_migration'
disk_over_commit = 'disk_over_commit'
src_info = 'src_info'
dest_info = 'dest_info'
dest_check_data = dict(foo='bar')
mig_data = dict(cow='moo')
expected_result = dict(mig_data)
if has_mig_data:
dest_check_data['migrate_data'] = dict(cat='meow')
expected_result.update(cat='meow')
self.mox.StubOutWithMock(self.compute, '_get_compute_info')
self.mox.StubOutWithMock(self.compute.driver,
'check_can_live_migrate_destination')
self.mox.StubOutWithMock(self.compute.compute_rpcapi,
'check_can_live_migrate_source')
self.mox.StubOutWithMock(self.compute.driver,
'check_can_live_migrate_destination_cleanup')
self.compute._get_compute_info(self.context,
'fake-host').AndReturn(src_info)
self.compute._get_compute_info(self.context,
CONF.host).AndReturn(dest_info)
self.compute.driver.check_can_live_migrate_destination(
self.context, instance, src_info, dest_info,
block_migration, disk_over_commit).AndReturn(dest_check_data)
mock_meth = self.compute.compute_rpcapi.check_can_live_migrate_source(
self.context, instance, dest_check_data)
if do_raise:
mock_meth.AndRaise(test.TestingException())
else:
mock_meth.AndReturn(mig_data)
self.compute.driver.check_can_live_migrate_destination_cleanup(
self.context, dest_check_data)
self.mox.ReplayAll()
result = self.compute.check_can_live_migrate_destination(
self.context, instance=instance,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
self.assertEqual(expected_result, result)
def test_check_can_live_migrate_destination_success(self):
self._test_check_can_live_migrate_destination()
def test_check_can_live_migrate_destination_success_w_mig_data(self):
self._test_check_can_live_migrate_destination(has_mig_data=True)
def test_check_can_live_migrate_destination_fail(self):
self.assertRaises(
test.TestingException,
self._test_check_can_live_migrate_destination,
do_raise=True)
class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
def setUp(self):
super(ComputeManagerBuildInstanceTestCase, self).setUp()
self.compute = importutils.import_object(CONF.compute_manager)
self.context = context.RequestContext('fake', 'fake')
self.instance = fake_instance.fake_db_instance(
vm_state=vm_states.ACTIVE)
self.admin_pass = 'pass'
self.injected_files = []
self.image = {}
self.node = 'fake-node'
self.limits = {}
# override tracker with a version that doesn't need the database:
fake_rt = fake_resource_tracker.FakeResourceTracker(self.compute.host,
self.compute.driver, self.node)
self.compute._resource_tracker_dict[self.node] = fake_rt
def test_build_and_run_instance_called_with_proper_args(self):
self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
self.mox.StubOutWithMock(self.compute.conductor_api,
'action_event_start')
self.mox.StubOutWithMock(self.compute.conductor_api,
'action_event_finish')
self.compute._build_and_run_instance(self.context, self.instance,
self.image, self.injected_files, self.admin_pass, self.node,
self.limits)
self.compute.conductor_api.action_event_start(self.context,
mox.IgnoreArg())
self.compute.conductor_api.action_event_finish(self.context,
mox.IgnoreArg())
self.mox.ReplayAll()
self.compute.build_and_run_instance(self.context, self.instance,
self.image, request_spec={}, filter_properties=[],
injected_files=self.injected_files,
admin_password=self.admin_pass, node=self.node,
limits=self.limits)
def test_build_abort_exception(self):
self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
self.mox.StubOutWithMock(self.compute, '_set_instance_error_state')
self.mox.StubOutWithMock(self.compute.compute_task_api,
'build_instances')
self.mox.StubOutWithMock(self.compute.conductor_api,
'action_event_start')
self.mox.StubOutWithMock(self.compute.conductor_api,
'action_event_finish')
self.compute._build_and_run_instance(self.context, self.instance,
self.image, self.injected_files, self.admin_pass, self.node,
self.limits).AndRaise(exception.BuildAbortException(reason='',
instance_uuid=self.instance['uuid']))
self.compute._set_instance_error_state(self.context,
self.instance['uuid'])
self.compute.conductor_api.action_event_start(self.context,
mox.IgnoreArg())
self.compute.conductor_api.action_event_finish(self.context,
mox.IgnoreArg())
self.mox.ReplayAll()
self.compute.build_and_run_instance(self.context, self.instance,
self.image, request_spec={}, filter_properties=[],
injected_files=self.injected_files,
admin_password=self.admin_pass, node=self.node,
limits=self.limits)
def test_rescheduled_exception(self):
self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
self.mox.StubOutWithMock(self.compute, '_set_instance_error_state')
self.mox.StubOutWithMock(self.compute.compute_task_api,
'build_instances')
self.mox.StubOutWithMock(self.compute.conductor_api,
'action_event_start')
self.mox.StubOutWithMock(self.compute.conductor_api,
'action_event_finish')
self.compute._build_and_run_instance(self.context, self.instance,
self.image, self.injected_files, self.admin_pass, self.node,
self.limits).AndRaise(exception.RescheduledException(reason='',
instance_uuid=self.instance['uuid']))
self.compute.compute_task_api.build_instances(self.context,
[self.instance], self.image, [], self.admin_pass,
self.injected_files, None, None, None)
self.compute.conductor_api.action_event_start(self.context,
mox.IgnoreArg())
self.compute.conductor_api.action_event_finish(self.context,
mox.IgnoreArg())
self.mox.ReplayAll()
self.compute.build_and_run_instance(self.context, self.instance,
self.image, request_spec={}, filter_properties=[],
injected_files=self.injected_files,
admin_password=self.admin_pass, node=self.node,
limits=self.limits)
def test_instance_not_found(self):
self.mox.StubOutWithMock(self.compute.driver, 'spawn')
self.mox.StubOutWithMock(conductor_rpcapi.ConductorAPI,
'instance_update')
self.compute.driver.spawn(self.context, self.instance, self.image,
self.injected_files, self.admin_pass).AndRaise(
exception.InstanceNotFound(instance_id=1))
conductor_rpcapi.ConductorAPI.instance_update(
self.context, self.instance['uuid'], mox.IgnoreArg(), 'conductor')
self.mox.ReplayAll()
self.assertRaises(exception.BuildAbortException,
self.compute._build_and_run_instance, self.context,
self.instance, self.image, self.injected_files,
self.admin_pass, self.node, self.limits)
def test_reschedule_on_exception(self):
self.mox.StubOutWithMock(self.compute.driver, 'spawn')
self.mox.StubOutWithMock(conductor_rpcapi.ConductorAPI,
'instance_update')
self.compute.driver.spawn(self.context, self.instance, self.image,
self.injected_files, self.admin_pass).AndRaise(
test.TestingException())
conductor_rpcapi.ConductorAPI.instance_update(
self.context, self.instance['uuid'], mox.IgnoreArg(), 'conductor')
self.mox.ReplayAll()
self.assertRaises(exception.RescheduledException,
self.compute._build_and_run_instance, self.context,
self.instance, self.image, self.injected_files,
self.admin_pass, self.node, self.limits)
def test_unexpected_task_state(self):
self.mox.StubOutWithMock(self.compute.driver, 'spawn')
self.mox.StubOutWithMock(conductor_rpcapi.ConductorAPI,
'instance_update')
self.compute.driver.spawn(self.context, self.instance, self.image,
self.injected_files, self.admin_pass).AndRaise(
exception.UnexpectedTaskStateError(expected=None,
actual='deleting'))
conductor_rpcapi.ConductorAPI.instance_update(
self.context, self.instance['uuid'], mox.IgnoreArg(), 'conductor')
self.mox.ReplayAll()
self.assertRaises(exception.BuildAbortException,
self.compute._build_and_run_instance, self.context,
self.instance, self.image, self.injected_files,
self.admin_pass, self.node, self.limits)
def test_reschedule_on_resources_unavailable(self):
class FakeResourceTracker(object):
def instance_claim(self, context, instance, limits):
raise exception.ComputeResourcesUnavailable
self.mox.StubOutWithMock(self.compute, '_get_resource_tracker')
self.mox.StubOutWithMock(self.compute.compute_task_api,
'build_instances')
self.mox.StubOutWithMock(self.compute.conductor_api,
'action_event_start')
self.mox.StubOutWithMock(self.compute.conductor_api,
'action_event_finish')
self.compute._get_resource_tracker('node').AndReturn(
FakeResourceTracker())
self.compute.compute_task_api.build_instances(self.context,
[self.instance], self.image, [], self.admin_pass,
self.injected_files, None, None, None)
self.compute.conductor_api.action_event_start(self.context,
mox.IgnoreArg())
self.compute.conductor_api.action_event_finish(self.context,
mox.IgnoreArg())
self.mox.ReplayAll()
self.compute.build_and_run_instance(self.context, self.instance,
self.image, request_spec={}, filter_properties=[],
injected_files=self.injected_files,
admin_password=self.admin_pass, node=self.node,
limits=self.limits)
| 45.635851 | 79 | 0.618148 |
import time
import mox
from oslo.config import cfg
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor import rpcapi as conductor_rpcapi
from nova import context
from nova import db
from nova import exception
from nova.network import model as network_model
from nova.objects import base as obj_base
from nova.objects import instance as instance_obj
from nova.openstack.common import importutils
from nova.openstack.common import uuidutils
from nova import test
from nova.tests.compute import fake_resource_tracker
from nova.tests import fake_instance
from nova import utils
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
class ComputeManagerUnitTestCase(test.NoDBTestCase):
def setUp(self):
super(ComputeManagerUnitTestCase, self).setUp()
self.compute = importutils.import_object(CONF.compute_manager)
self.context = context.RequestContext('fake', 'fake')
def test_allocate_network_succeeds_after_retries(self):
self.flags(network_allocate_retries=8)
nwapi = self.compute.network_api
self.mox.StubOutWithMock(nwapi, 'allocate_for_instance')
self.mox.StubOutWithMock(time, 'sleep')
instance = {}
is_vpn = 'fake-is-vpn'
req_networks = 'fake-req-networks'
macs = 'fake-macs'
sec_groups = 'fake-sec-groups'
final_result = 'meow'
dhcp_options = None
expected_sleep_times = [1, 2, 4, 8, 16, 30, 30, 30]
for sleep_time in expected_sleep_times:
nwapi.allocate_for_instance(
self.context, instance, vpn=is_vpn,
requested_networks=req_networks, macs=macs,
security_groups=sec_groups,
dhcp_options=dhcp_options).AndRaise(
test.TestingException())
time.sleep(sleep_time)
nwapi.allocate_for_instance(
self.context, instance, vpn=is_vpn,
requested_networks=req_networks, macs=macs,
security_groups=sec_groups,
dhcp_options=dhcp_options).AndReturn(final_result)
self.mox.ReplayAll()
res = self.compute._allocate_network_async(self.context, instance,
req_networks,
macs,
sec_groups,
is_vpn,
dhcp_options)
self.assertEqual(final_result, res)
def test_allocate_network_fails(self):
self.flags(network_allocate_retries=0)
nwapi = self.compute.network_api
self.mox.StubOutWithMock(nwapi, 'allocate_for_instance')
instance = {}
is_vpn = 'fake-is-vpn'
req_networks = 'fake-req-networks'
macs = 'fake-macs'
sec_groups = 'fake-sec-groups'
dhcp_options = None
nwapi.allocate_for_instance(
self.context, instance, vpn=is_vpn,
requested_networks=req_networks, macs=macs,
security_groups=sec_groups,
dhcp_options=dhcp_options).AndRaise(test.TestingException())
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.compute._allocate_network_async,
self.context, instance, req_networks, macs,
sec_groups, is_vpn, dhcp_options)
def test_allocate_network_neg_conf_value_treated_as_zero(self):
self.flags(network_allocate_retries=-1)
nwapi = self.compute.network_api
self.mox.StubOutWithMock(nwapi, 'allocate_for_instance')
instance = {}
is_vpn = 'fake-is-vpn'
req_networks = 'fake-req-networks'
macs = 'fake-macs'
sec_groups = 'fake-sec-groups'
dhcp_options = None
nwapi.allocate_for_instance(
self.context, instance, vpn=is_vpn,
requested_networks=req_networks, macs=macs,
security_groups=sec_groups,
dhcp_options=dhcp_options).AndRaise(test.TestingException())
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.compute._allocate_network_async,
self.context, instance, req_networks, macs,
sec_groups, is_vpn, dhcp_options)
def test_init_host(self):
our_host = self.compute.host
fake_context = 'fake-context'
inst = fake_instance.fake_db_instance(
vm_state=vm_states.ACTIVE,
info_cache={'instance_uuid': 'fake-uuid',
'network_info': None},
security_groups=None)
startup_instances = [inst, inst, inst]
def _do_mock_calls(defer_iptables_apply):
self.compute.driver.init_host(host=our_host)
context.get_admin_context().AndReturn(fake_context)
db.instance_get_all_by_host(
fake_context, our_host, columns_to_join=['info_cache']
).AndReturn(startup_instances)
if defer_iptables_apply:
self.compute.driver.filter_defer_apply_on()
self.compute._destroy_evacuated_instances(fake_context)
self.compute._init_instance(fake_context,
mox.IsA(instance_obj.Instance))
self.compute._init_instance(fake_context,
mox.IsA(instance_obj.Instance))
self.compute._init_instance(fake_context,
mox.IsA(instance_obj.Instance))
if defer_iptables_apply:
self.compute.driver.filter_defer_apply_off()
self.mox.StubOutWithMock(self.compute.driver, 'init_host')
self.mox.StubOutWithMock(self.compute.driver,
'filter_defer_apply_on')
self.mox.StubOutWithMock(self.compute.driver,
'filter_defer_apply_off')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(context, 'get_admin_context')
self.mox.StubOutWithMock(self.compute,
'_destroy_evacuated_instances')
self.mox.StubOutWithMock(self.compute,
'_init_instance')
self.flags(defer_iptables_apply=True)
_do_mock_calls(True)
self.mox.ReplayAll()
self.compute.init_host()
self.mox.VerifyAll()
self.mox.ResetAll()
self.flags(defer_iptables_apply=False)
_do_mock_calls(False)
self.mox.ReplayAll()
self.compute.init_host()
self.mox.VerifyAll()
self.mox.UnsetStubs()
def test_init_host_with_deleted_migration(self):
our_host = self.compute.host
not_our_host = 'not-' + our_host
fake_context = 'fake-context'
deleted_instance = {
'name': 'fake-name',
'host': not_our_host,
'uuid': 'fake-uuid',
}
self.mox.StubOutWithMock(self.compute.driver, 'init_host')
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(context, 'get_admin_context')
self.mox.StubOutWithMock(self.compute, 'init_virt_events')
self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver')
self.mox.StubOutWithMock(self.compute, '_init_instance')
self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
self.compute.driver.init_host(host=our_host)
context.get_admin_context().AndReturn(fake_context)
db.instance_get_all_by_host(fake_context, our_host,
columns_to_join=['info_cache']
).AndReturn([])
self.compute.init_virt_events()
self.compute._get_instances_on_driver(
fake_context, {'deleted': False}).AndReturn([deleted_instance])
self.compute._get_instance_nw_info(fake_context, deleted_instance
).AndRaise(exception.InstanceNotFound(
instance_id=deleted_instance['uuid']))
self.compute.driver.destroy(deleted_instance,
mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.compute.init_host()
self.mox.VerifyAll()
self.mox.UnsetStubs()
def test_init_instance_failed_resume_sets_error(self):
instance = {
'uuid': 'fake-uuid',
'info_cache': None,
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE,
'task_state': None,
}
self.flags(resume_guests_state_on_host_boot=True)
self.mox.StubOutWithMock(self.compute, '_get_power_state')
self.mox.StubOutWithMock(self.compute.driver, 'plug_vifs')
self.mox.StubOutWithMock(self.compute.driver,
'resume_state_on_host_boot')
self.mox.StubOutWithMock(self.compute,
'_get_instance_volume_block_device_info')
self.mox.StubOutWithMock(self.compute,
'_set_instance_error_state')
self.compute._get_power_state(mox.IgnoreArg(),
instance).AndReturn(power_state.SHUTDOWN)
self.compute.driver.plug_vifs(instance, mox.IgnoreArg())
self.compute._get_instance_volume_block_device_info(mox.IgnoreArg(),
instance).AndReturn('fake-bdm')
self.compute.driver.resume_state_on_host_boot(mox.IgnoreArg(),
instance, mox.IgnoreArg(),
'fake-bdm').AndRaise(test.TestingException)
self.compute._set_instance_error_state(mox.IgnoreArg(),
instance['uuid'])
self.mox.ReplayAll()
self.compute._init_instance('fake-context', instance)
def _test_init_instance_reverts_crashed_migrations(self,
old_vm_state=None):
power_on = True if (not old_vm_state or
old_vm_state == vm_states.ACTIVE) else False
sys_meta = {
'old_vm_state': old_vm_state
}
instance = {
'uuid': 'foo',
'vm_state': vm_states.ERROR,
'task_state': task_states.RESIZE_MIGRATING,
'power_state': power_state.SHUTDOWN,
'system_metadata': sys_meta
}
fixed = dict(instance, task_state=None)
self.mox.StubOutWithMock(compute_utils, 'get_nw_info_for_instance')
self.mox.StubOutWithMock(utils, 'instance_sys_meta')
self.mox.StubOutWithMock(self.compute.driver, 'plug_vifs')
self.mox.StubOutWithMock(self.compute.driver,
'finish_revert_migration')
self.mox.StubOutWithMock(self.compute,
'_get_instance_volume_block_device_info')
self.mox.StubOutWithMock(self.compute.driver, 'get_info')
self.mox.StubOutWithMock(self.compute, '_instance_update')
compute_utils.get_nw_info_for_instance(instance).AndReturn(
network_model.NetworkInfo())
self.compute.driver.plug_vifs(instance, [])
utils.instance_sys_meta(instance).AndReturn(sys_meta)
self.compute._get_instance_volume_block_device_info(
self.context, instance).AndReturn([])
self.compute.driver.finish_revert_migration(instance, [], [], power_on)
self.compute._instance_update(self.context, instance['uuid'],
task_state=None).AndReturn(fixed)
self.compute.driver.get_info(fixed).AndReturn(
{'state': power_state.SHUTDOWN})
self.mox.ReplayAll()
self.compute._init_instance(self.context, instance)
def test_init_instance_reverts_crashed_migration_from_active(self):
self._test_init_instance_reverts_crashed_migrations(
old_vm_state=vm_states.ACTIVE)
def test_init_instance_reverts_crashed_migration_from_stopped(self):
self._test_init_instance_reverts_crashed_migrations(
old_vm_state=vm_states.STOPPED)
def test_init_instance_reverts_crashed_migration_no_old_state(self):
self._test_init_instance_reverts_crashed_migrations(old_vm_state=None)
def test_get_instances_on_driver(self):
fake_context = context.get_admin_context()
driver_instances = []
for x in xrange(10):
instance = dict(uuid=uuidutils.generate_uuid())
driver_instances.append(instance)
self.mox.StubOutWithMock(self.compute.driver,
'list_instance_uuids')
self.mox.StubOutWithMock(self.compute.conductor_api,
'instance_get_all_by_filters')
self.compute.driver.list_instance_uuids().AndReturn(
[inst['uuid'] for inst in driver_instances])
self.compute.conductor_api.instance_get_all_by_filters(
fake_context,
{'uuid': [inst['uuid'] for
inst in driver_instances]},
columns_to_join=[]).AndReturn(
driver_instances)
self.mox.ReplayAll()
result = self.compute._get_instances_on_driver(fake_context,
columns_to_join=[])
self.assertEqual(driver_instances, result)
def test_get_instances_on_driver_fallback(self):
# 'list_instance_uuids'
self.compute.host = 'host'
filters = {'host': self.compute.host}
fake_context = context.get_admin_context()
all_instances = []
driver_instances = []
for x in xrange(10):
instance = dict(name=uuidutils.generate_uuid())
if x % 2:
driver_instances.append(instance)
all_instances.append(instance)
self.mox.StubOutWithMock(self.compute.driver,
'list_instance_uuids')
self.mox.StubOutWithMock(self.compute.driver,
'list_instances')
self.mox.StubOutWithMock(self.compute.conductor_api,
'instance_get_all_by_filters')
self.compute.driver.list_instance_uuids().AndRaise(
NotImplementedError())
self.compute.driver.list_instances().AndReturn(
[inst['name'] for inst in driver_instances])
self.compute.conductor_api.instance_get_all_by_filters(
fake_context, filters,
columns_to_join=None).AndReturn(all_instances)
self.mox.ReplayAll()
result = self.compute._get_instances_on_driver(fake_context, filters)
self.assertEqual(driver_instances, result)
def test_instance_usage_audit(self):
instances = [{'uuid': 'foo'}]
self.flags(instance_usage_audit=True)
self.stubs.Set(compute_utils, 'has_audit_been_run',
lambda *a, **k: False)
self.stubs.Set(self.compute.conductor_api,
'instance_get_active_by_window_joined',
lambda *a, **k: instances)
self.stubs.Set(compute_utils, 'start_instance_usage_audit',
lambda *a, **k: None)
self.stubs.Set(compute_utils, 'finish_instance_usage_audit',
lambda *a, **k: None)
self.mox.StubOutWithMock(self.compute.conductor_api,
'notify_usage_exists')
self.compute.conductor_api.notify_usage_exists(
self.context, instances[0], ignore_missing_network_data=False)
self.mox.ReplayAll()
self.compute._instance_usage_audit(self.context)
def _get_sync_instance(self, power_state, vm_state, task_state=None):
instance = instance_obj.Instance()
instance.uuid = 'fake-uuid'
instance.power_state = power_state
instance.vm_state = vm_state
instance.host = self.compute.host
instance.task_state = task_state
self.mox.StubOutWithMock(instance, 'refresh')
self.mox.StubOutWithMock(instance, 'save')
return instance
def test_sync_instance_power_state_match(self):
instance = self._get_sync_instance(power_state.RUNNING,
vm_states.ACTIVE)
instance.refresh()
self.mox.ReplayAll()
self.compute._sync_instance_power_state(self.context, instance,
power_state.RUNNING)
def test_sync_instance_power_state_running_stopped(self):
instance = self._get_sync_instance(power_state.RUNNING,
vm_states.ACTIVE)
instance.refresh()
instance.save()
self.mox.ReplayAll()
self.compute._sync_instance_power_state(self.context, instance,
power_state.SHUTDOWN)
self.assertEqual(instance.power_state, power_state.SHUTDOWN)
def _test_sync_to_stop(self, power_state, vm_state, driver_power_state,
stop=True, force=False):
instance = self._get_sync_instance(power_state, vm_state)
instance.refresh()
instance.save()
self.mox.StubOutWithMock(self.compute.compute_api, 'stop')
self.mox.StubOutWithMock(self.compute.compute_api, 'force_stop')
if stop:
if force:
self.compute.compute_api.force_stop(self.context, instance)
else:
self.compute.compute_api.stop(self.context, instance)
self.mox.ReplayAll()
self.compute._sync_instance_power_state(self.context, instance,
driver_power_state)
self.mox.VerifyAll()
self.mox.UnsetStubs()
def test_sync_instance_power_state_to_stop(self):
for ps in (power_state.SHUTDOWN, power_state.CRASHED,
power_state.SUSPENDED):
self._test_sync_to_stop(power_state.RUNNING, vm_states.ACTIVE, ps)
self._test_sync_to_stop(power_state.SHUTDOWN, vm_states.STOPPED,
power_state.RUNNING, force=True)
def test_sync_instance_power_state_to_no_stop(self):
for ps in (power_state.PAUSED, power_state.NOSTATE):
self._test_sync_to_stop(power_state.RUNNING, vm_states.ACTIVE, ps,
stop=False)
for vs in (vm_states.SOFT_DELETED, vm_states.DELETED):
for ps in (power_state.NOSTATE, power_state.SHUTDOWN):
self._test_sync_to_stop(power_state.RUNNING, vs, ps,
stop=False)
def test_run_pending_deletes(self):
self.flags(instance_delete_interval=10)
class FakeInstance(object):
def __init__(self, uuid, name, smd):
self.uuid = uuid
self.name = name
self.system_metadata = smd
self.cleaned = False
def __getitem__(self, name):
return getattr(self, name)
def save(self, context):
pass
class FakeInstanceList(object):
def get_by_filters(self, *args, **kwargs):
return []
a = FakeInstance('123', 'apple', {'clean_attempts': '100'})
b = FakeInstance('456', 'orange', {'clean_attempts': '3'})
c = FakeInstance('789', 'banana', {})
self.mox.StubOutWithMock(instance_obj.InstanceList,
'get_by_filters')
instance_obj.InstanceList.get_by_filters(
{'read_deleted': 'yes'},
{'deleted': True, 'soft_deleted': False, 'host': 'fake-mini',
'cleaned': False},
expected_attrs=['info_cache', 'security_groups',
'system_metadata']).AndReturn([a, b, c])
self.mox.StubOutWithMock(self.compute.driver, 'delete_instance_files')
self.compute.driver.delete_instance_files(
mox.IgnoreArg()).AndReturn(True)
self.compute.driver.delete_instance_files(
mox.IgnoreArg()).AndReturn(False)
self.mox.ReplayAll()
self.compute._run_pending_deletes({})
self.assertFalse(a.cleaned)
self.assertEqual('100', a.system_metadata['clean_attempts'])
self.assertTrue(b.cleaned)
self.assertEqual('4', b.system_metadata['clean_attempts'])
self.assertFalse(c.cleaned)
self.assertEqual('1', c.system_metadata['clean_attempts'])
def test_swap_volume_volume_api_usage(self):
# This test ensures that volume_id arguments are passed to volume_api
# and that volume states are OK
volumes = {}
old_volume_id = uuidutils.generate_uuid()
volumes[old_volume_id] = {'id': old_volume_id,
'display_name': 'old_volume',
'status': 'detaching'}
new_volume_id = uuidutils.generate_uuid()
volumes[new_volume_id] = {'id': new_volume_id,
'display_name': 'new_volume',
'status': 'attaching'}
def fake_vol_api_func(context, volume, *args):
self.assertTrue(uuidutils.is_uuid_like(volume))
return {}
def fake_vol_get(context, volume_id):
self.assertTrue(uuidutils.is_uuid_like(volume_id))
return volumes[volume_id]
def fake_vol_attach(context, volume_id, instance_uuid, connector):
self.assertTrue(uuidutils.is_uuid_like(volume_id))
self.assertIn(volumes[volume_id]['status'],
['available', 'attaching'])
volumes[volume_id]['status'] = 'in-use'
def fake_vol_unreserve(context, volume_id):
self.assertTrue(uuidutils.is_uuid_like(volume_id))
if volumes[volume_id]['status'] == 'attaching':
volumes[volume_id]['status'] = 'available'
def fake_vol_detach(context, volume_id):
self.assertTrue(uuidutils.is_uuid_like(volume_id))
volumes[volume_id]['status'] = 'available'
def fake_vol_migrate_volume_completion(context, old_volume_id,
new_volume_id, error=False):
self.assertTrue(uuidutils.is_uuid_like(old_volume_id))
self.assertTrue(uuidutils.is_uuid_like(old_volume_id))
return {'save_volume_id': new_volume_id}
def fake_func_exc(*args, **kwargs):
raise AttributeError # Random exception
self.stubs.Set(self.compute.volume_api, 'get', fake_vol_get)
self.stubs.Set(self.compute.volume_api, 'initialize_connection',
fake_vol_api_func)
self.stubs.Set(self.compute.volume_api, 'attach', fake_vol_attach)
self.stubs.Set(self.compute.volume_api, 'unreserve_volume',
fake_vol_unreserve)
self.stubs.Set(self.compute.volume_api, 'terminate_connection',
fake_vol_api_func)
self.stubs.Set(self.compute.volume_api, 'detach', fake_vol_detach)
self.stubs.Set(self.compute, '_get_instance_volume_bdm',
lambda x, y, z: {'device_name': '/dev/vdb',
'connection_info': '{"foo": "bar"}'})
self.stubs.Set(self.compute.driver, 'get_volume_connector',
lambda x: {})
self.stubs.Set(self.compute.driver, 'swap_volume',
lambda w, x, y, z: None)
self.stubs.Set(self.compute.volume_api, 'migrate_volume_completion',
fake_vol_migrate_volume_completion)
self.stubs.Set(self.compute.conductor_api,
'block_device_mapping_update_or_create',
lambda x, y: None)
self.stubs.Set(self.compute.conductor_api,
'instance_fault_create',
lambda x, y: None)
# Good path
self.compute.swap_volume(self.context, old_volume_id, new_volume_id,
{'uuid': 'fake'})
self.assertEqual(volumes[old_volume_id]['status'], 'available')
self.assertEqual(volumes[new_volume_id]['status'], 'in-use')
# Error paths
volumes[old_volume_id]['status'] = 'detaching'
volumes[new_volume_id]['status'] = 'attaching'
self.stubs.Set(self.compute.driver, 'swap_volume', fake_func_exc)
self.assertRaises(AttributeError, self.compute.swap_volume,
self.context, old_volume_id, new_volume_id,
{'uuid': 'fake'})
self.assertEqual(volumes[old_volume_id]['status'], 'detaching')
self.assertEqual(volumes[new_volume_id]['status'], 'attaching')
volumes[old_volume_id]['status'] = 'detaching'
volumes[new_volume_id]['status'] = 'attaching'
self.stubs.Set(self.compute.volume_api, 'initialize_connection',
fake_func_exc)
self.assertRaises(AttributeError, self.compute.swap_volume,
self.context, old_volume_id, new_volume_id,
{'uuid': 'fake'})
self.assertEqual(volumes[old_volume_id]['status'], 'detaching')
self.assertEqual(volumes[new_volume_id]['status'], 'available')
def test_check_can_live_migrate_source(self):
is_volume_backed = 'volume_backed'
bdms = 'bdms'
dest_check_data = dict(foo='bar')
db_instance = fake_instance.fake_db_instance()
instance = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), db_instance)
expected_dest_check_data = dict(dest_check_data,
is_volume_backed=is_volume_backed)
self.mox.StubOutWithMock(self.compute.conductor_api,
'block_device_mapping_get_all_by_instance')
self.mox.StubOutWithMock(self.compute.compute_api,
'is_volume_backed_instance')
self.mox.StubOutWithMock(self.compute.driver,
'check_can_live_migrate_source')
instance_p = obj_base.obj_to_primitive(instance)
self.compute.conductor_api.block_device_mapping_get_all_by_instance(
self.context, instance_p, legacy=False).AndReturn(bdms)
self.compute.compute_api.is_volume_backed_instance(
self.context, instance, bdms).AndReturn(is_volume_backed)
self.compute.driver.check_can_live_migrate_source(
self.context, instance, expected_dest_check_data)
self.mox.ReplayAll()
self.compute.check_can_live_migrate_source(
self.context, instance=instance,
dest_check_data=dest_check_data)
def _test_check_can_live_migrate_destination(self, do_raise=False,
has_mig_data=False):
db_instance = fake_instance.fake_db_instance(host='fake-host')
instance = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), db_instance)
instance.host = 'fake-host'
block_migration = 'block_migration'
disk_over_commit = 'disk_over_commit'
src_info = 'src_info'
dest_info = 'dest_info'
dest_check_data = dict(foo='bar')
mig_data = dict(cow='moo')
expected_result = dict(mig_data)
if has_mig_data:
dest_check_data['migrate_data'] = dict(cat='meow')
expected_result.update(cat='meow')
self.mox.StubOutWithMock(self.compute, '_get_compute_info')
self.mox.StubOutWithMock(self.compute.driver,
'check_can_live_migrate_destination')
self.mox.StubOutWithMock(self.compute.compute_rpcapi,
'check_can_live_migrate_source')
self.mox.StubOutWithMock(self.compute.driver,
'check_can_live_migrate_destination_cleanup')
self.compute._get_compute_info(self.context,
'fake-host').AndReturn(src_info)
self.compute._get_compute_info(self.context,
CONF.host).AndReturn(dest_info)
self.compute.driver.check_can_live_migrate_destination(
self.context, instance, src_info, dest_info,
block_migration, disk_over_commit).AndReturn(dest_check_data)
mock_meth = self.compute.compute_rpcapi.check_can_live_migrate_source(
self.context, instance, dest_check_data)
if do_raise:
mock_meth.AndRaise(test.TestingException())
else:
mock_meth.AndReturn(mig_data)
self.compute.driver.check_can_live_migrate_destination_cleanup(
self.context, dest_check_data)
self.mox.ReplayAll()
result = self.compute.check_can_live_migrate_destination(
self.context, instance=instance,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
self.assertEqual(expected_result, result)
def test_check_can_live_migrate_destination_success(self):
self._test_check_can_live_migrate_destination()
def test_check_can_live_migrate_destination_success_w_mig_data(self):
self._test_check_can_live_migrate_destination(has_mig_data=True)
def test_check_can_live_migrate_destination_fail(self):
self.assertRaises(
test.TestingException,
self._test_check_can_live_migrate_destination,
do_raise=True)
class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
def setUp(self):
super(ComputeManagerBuildInstanceTestCase, self).setUp()
self.compute = importutils.import_object(CONF.compute_manager)
self.context = context.RequestContext('fake', 'fake')
self.instance = fake_instance.fake_db_instance(
vm_state=vm_states.ACTIVE)
self.admin_pass = 'pass'
self.injected_files = []
self.image = {}
self.node = 'fake-node'
self.limits = {}
# override tracker with a version that doesn't need the database:
fake_rt = fake_resource_tracker.FakeResourceTracker(self.compute.host,
self.compute.driver, self.node)
self.compute._resource_tracker_dict[self.node] = fake_rt
def test_build_and_run_instance_called_with_proper_args(self):
self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
self.mox.StubOutWithMock(self.compute.conductor_api,
'action_event_start')
self.mox.StubOutWithMock(self.compute.conductor_api,
'action_event_finish')
self.compute._build_and_run_instance(self.context, self.instance,
self.image, self.injected_files, self.admin_pass, self.node,
self.limits)
self.compute.conductor_api.action_event_start(self.context,
mox.IgnoreArg())
self.compute.conductor_api.action_event_finish(self.context,
mox.IgnoreArg())
self.mox.ReplayAll()
self.compute.build_and_run_instance(self.context, self.instance,
self.image, request_spec={}, filter_properties=[],
injected_files=self.injected_files,
admin_password=self.admin_pass, node=self.node,
limits=self.limits)
def test_build_abort_exception(self):
self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
self.mox.StubOutWithMock(self.compute, '_set_instance_error_state')
self.mox.StubOutWithMock(self.compute.compute_task_api,
'build_instances')
self.mox.StubOutWithMock(self.compute.conductor_api,
'action_event_start')
self.mox.StubOutWithMock(self.compute.conductor_api,
'action_event_finish')
self.compute._build_and_run_instance(self.context, self.instance,
self.image, self.injected_files, self.admin_pass, self.node,
self.limits).AndRaise(exception.BuildAbortException(reason='',
instance_uuid=self.instance['uuid']))
self.compute._set_instance_error_state(self.context,
self.instance['uuid'])
self.compute.conductor_api.action_event_start(self.context,
mox.IgnoreArg())
self.compute.conductor_api.action_event_finish(self.context,
mox.IgnoreArg())
self.mox.ReplayAll()
self.compute.build_and_run_instance(self.context, self.instance,
self.image, request_spec={}, filter_properties=[],
injected_files=self.injected_files,
admin_password=self.admin_pass, node=self.node,
limits=self.limits)
def test_rescheduled_exception(self):
self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
self.mox.StubOutWithMock(self.compute, '_set_instance_error_state')
self.mox.StubOutWithMock(self.compute.compute_task_api,
'build_instances')
self.mox.StubOutWithMock(self.compute.conductor_api,
'action_event_start')
self.mox.StubOutWithMock(self.compute.conductor_api,
'action_event_finish')
self.compute._build_and_run_instance(self.context, self.instance,
self.image, self.injected_files, self.admin_pass, self.node,
self.limits).AndRaise(exception.RescheduledException(reason='',
instance_uuid=self.instance['uuid']))
self.compute.compute_task_api.build_instances(self.context,
[self.instance], self.image, [], self.admin_pass,
self.injected_files, None, None, None)
self.compute.conductor_api.action_event_start(self.context,
mox.IgnoreArg())
self.compute.conductor_api.action_event_finish(self.context,
mox.IgnoreArg())
self.mox.ReplayAll()
self.compute.build_and_run_instance(self.context, self.instance,
self.image, request_spec={}, filter_properties=[],
injected_files=self.injected_files,
admin_password=self.admin_pass, node=self.node,
limits=self.limits)
def test_instance_not_found(self):
self.mox.StubOutWithMock(self.compute.driver, 'spawn')
self.mox.StubOutWithMock(conductor_rpcapi.ConductorAPI,
'instance_update')
self.compute.driver.spawn(self.context, self.instance, self.image,
self.injected_files, self.admin_pass).AndRaise(
exception.InstanceNotFound(instance_id=1))
conductor_rpcapi.ConductorAPI.instance_update(
self.context, self.instance['uuid'], mox.IgnoreArg(), 'conductor')
self.mox.ReplayAll()
self.assertRaises(exception.BuildAbortException,
self.compute._build_and_run_instance, self.context,
self.instance, self.image, self.injected_files,
self.admin_pass, self.node, self.limits)
def test_reschedule_on_exception(self):
self.mox.StubOutWithMock(self.compute.driver, 'spawn')
self.mox.StubOutWithMock(conductor_rpcapi.ConductorAPI,
'instance_update')
self.compute.driver.spawn(self.context, self.instance, self.image,
self.injected_files, self.admin_pass).AndRaise(
test.TestingException())
conductor_rpcapi.ConductorAPI.instance_update(
self.context, self.instance['uuid'], mox.IgnoreArg(), 'conductor')
self.mox.ReplayAll()
self.assertRaises(exception.RescheduledException,
self.compute._build_and_run_instance, self.context,
self.instance, self.image, self.injected_files,
self.admin_pass, self.node, self.limits)
def test_unexpected_task_state(self):
self.mox.StubOutWithMock(self.compute.driver, 'spawn')
self.mox.StubOutWithMock(conductor_rpcapi.ConductorAPI,
'instance_update')
self.compute.driver.spawn(self.context, self.instance, self.image,
self.injected_files, self.admin_pass).AndRaise(
exception.UnexpectedTaskStateError(expected=None,
actual='deleting'))
conductor_rpcapi.ConductorAPI.instance_update(
self.context, self.instance['uuid'], mox.IgnoreArg(), 'conductor')
self.mox.ReplayAll()
self.assertRaises(exception.BuildAbortException,
self.compute._build_and_run_instance, self.context,
self.instance, self.image, self.injected_files,
self.admin_pass, self.node, self.limits)
def test_reschedule_on_resources_unavailable(self):
class FakeResourceTracker(object):
def instance_claim(self, context, instance, limits):
raise exception.ComputeResourcesUnavailable
self.mox.StubOutWithMock(self.compute, '_get_resource_tracker')
self.mox.StubOutWithMock(self.compute.compute_task_api,
'build_instances')
self.mox.StubOutWithMock(self.compute.conductor_api,
'action_event_start')
self.mox.StubOutWithMock(self.compute.conductor_api,
'action_event_finish')
self.compute._get_resource_tracker('node').AndReturn(
FakeResourceTracker())
self.compute.compute_task_api.build_instances(self.context,
[self.instance], self.image, [], self.admin_pass,
self.injected_files, None, None, None)
self.compute.conductor_api.action_event_start(self.context,
mox.IgnoreArg())
self.compute.conductor_api.action_event_finish(self.context,
mox.IgnoreArg())
self.mox.ReplayAll()
self.compute.build_and_run_instance(self.context, self.instance,
self.image, request_spec={}, filter_properties=[],
injected_files=self.injected_files,
admin_password=self.admin_pass, node=self.node,
limits=self.limits)
| true | true |
1c3b91be38d7610a2ed63edb3407d29e0df7c1fa | 2,801 | py | Python | rgw/v2/lib/nfs_ganesha/nfslib.py | rpratap-bot/ceph-qe-scripts | 8a7090d6707a8e7b927eabfc9c9212f343a35bc4 | [
"MIT"
] | null | null | null | rgw/v2/lib/nfs_ganesha/nfslib.py | rpratap-bot/ceph-qe-scripts | 8a7090d6707a8e7b927eabfc9c9212f343a35bc4 | [
"MIT"
] | null | null | null | rgw/v2/lib/nfs_ganesha/nfslib.py | rpratap-bot/ceph-qe-scripts | 8a7090d6707a8e7b927eabfc9c9212f343a35bc4 | [
"MIT"
] | null | null | null | import os, sys
import logging
sys.path.append(os.path.abspath(os.path.join(__file__, "../../../")))
from v2.lib.nfs_ganesha.write_io_info import IOInfo
import v2.utils.utils as utils
import v2.lib.manage_data as manage_date
log = logging.getLogger()
dir_info = {'basedir': 0,
'files': 10,
'subdir': 20
}
NFS_CONVENTIONS = {'basedir': 'bucket',
'file': 'object',
'subdir': 'object'}
class DoIO(object):
def __init__(self, rgw_user_info, mnt_pont):
self.rgw_user_info = rgw_user_info
self.mnt_point = mnt_pont
def write(self, io_type, fname, size=0):
"""
This function is to write IO on the mount point
Parameters:
io_type: basedir | subdir | file
fname(char): file name
size(int): size
Returns:
"""
# io_type should be: basedir | subdir | file
log.info('io_type: %s' % io_type)
log.info('fname: %s' % fname)
log.info('size: %s' % size)
s3_conv = NFS_CONVENTIONS.get(io_type)
ioinfo = IOInfo()
path = os.path.abspath(self.mnt_point)
full_path = os.path.join(path, fname)
log.info('abs_path: %s' % full_path)
try:
if io_type == 'basedir' or io_type == 'subdir':
log.info('creating dir, type: %s' % io_type)
os.makedirs(full_path)
io_info = {'name': os.path.basename(fname),
'type': 'dir',
's3_convention': s3_conv,
'bucket': 'self' if s3_conv == 'bucket' else fname.split('/')[0],
'md5': None
}
log.info('io_info: %s' % io_info)
ioinfo.add_io_info(self.rgw_user_info['access_key'], io_info)
if io_type == 'file':
log.info('io_type is file: %s' % io_type)
log.info('creating file with size: %s' % size)
finfo = manage_date.io_generator(full_path, size)
io_info = {'name': os.path.basename(fname),
'type': 'file',
's3_convention': s3_conv,
'bucket': fname.split('/')[0],
'md5': finfo['md5']}
log.info('io_info: %s' % io_info)
ioinfo.add_io_info(self.rgw_user_info['access_key'], io_info)
except (Exception) as e:
log.error('Write IO Execution failed')
log.error(e)
return False
def delete(self):
pass
def modify(self):
pass
class Config(object):
def __init__(self):
pass
| 26.67619 | 92 | 0.49875 | import os, sys
import logging
sys.path.append(os.path.abspath(os.path.join(__file__, "../../../")))
from v2.lib.nfs_ganesha.write_io_info import IOInfo
import v2.utils.utils as utils
import v2.lib.manage_data as manage_date
log = logging.getLogger()
dir_info = {'basedir': 0,
'files': 10,
'subdir': 20
}
NFS_CONVENTIONS = {'basedir': 'bucket',
'file': 'object',
'subdir': 'object'}
class DoIO(object):
def __init__(self, rgw_user_info, mnt_pont):
self.rgw_user_info = rgw_user_info
self.mnt_point = mnt_pont
def write(self, io_type, fname, size=0):
log.info('io_type: %s' % io_type)
log.info('fname: %s' % fname)
log.info('size: %s' % size)
s3_conv = NFS_CONVENTIONS.get(io_type)
ioinfo = IOInfo()
path = os.path.abspath(self.mnt_point)
full_path = os.path.join(path, fname)
log.info('abs_path: %s' % full_path)
try:
if io_type == 'basedir' or io_type == 'subdir':
log.info('creating dir, type: %s' % io_type)
os.makedirs(full_path)
io_info = {'name': os.path.basename(fname),
'type': 'dir',
's3_convention': s3_conv,
'bucket': 'self' if s3_conv == 'bucket' else fname.split('/')[0],
'md5': None
}
log.info('io_info: %s' % io_info)
ioinfo.add_io_info(self.rgw_user_info['access_key'], io_info)
if io_type == 'file':
log.info('io_type is file: %s' % io_type)
log.info('creating file with size: %s' % size)
finfo = manage_date.io_generator(full_path, size)
io_info = {'name': os.path.basename(fname),
'type': 'file',
's3_convention': s3_conv,
'bucket': fname.split('/')[0],
'md5': finfo['md5']}
log.info('io_info: %s' % io_info)
ioinfo.add_io_info(self.rgw_user_info['access_key'], io_info)
except (Exception) as e:
log.error('Write IO Execution failed')
log.error(e)
return False
def delete(self):
pass
def modify(self):
pass
class Config(object):
def __init__(self):
pass
| true | true |
1c3b93a07647ff7f7f0a8577a0848b269271d644 | 3,507 | py | Python | temboo/core/Library/Labs/GoodCitizen/EcoByZip.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | 7 | 2016-03-07T02:07:21.000Z | 2022-01-21T02:22:41.000Z | temboo/core/Library/Labs/GoodCitizen/EcoByZip.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | null | null | null | temboo/core/Library/Labs/GoodCitizen/EcoByZip.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | 8 | 2016-06-14T06:01:11.000Z | 2020-04-22T09:21:44.000Z | # -*- coding: utf-8 -*-
###############################################################################
#
# EcoByZip
# Returns a host of eco-conscious environmental information for a specified location based on zip code.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class EcoByZip(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the EcoByZip Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(EcoByZip, self).__init__(temboo_session, '/Library/Labs/GoodCitizen/EcoByZip')
def new_input_set(self):
return EcoByZipInputSet()
def _make_result_set(self, result, path):
return EcoByZipResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return EcoByZipChoreographyExecution(session, exec_id, path)
class EcoByZipInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the EcoByZip
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APICredentials(self, value):
"""
Set the value of the APICredentials input for this Choreo. ((optional, string) A JSON dictionary containing credentials for Genability. See Choreo documentation for formatting examples.)
"""
super(EcoByZipInputSet, self)._set_input('APICredentials', value)
def set_Limit(self, value):
"""
Set the value of the Limit input for this Choreo. ((optional, integer) The number of facility records to search for in the Envirofacts database.)
"""
super(EcoByZipInputSet, self)._set_input('Limit', value)
def set_Zip(self, value):
"""
Set the value of the Zip input for this Choreo. ((required, integer) The zip code for the user's current location.)
"""
super(EcoByZipInputSet, self)._set_input('Zip', value)
class EcoByZipResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the EcoByZip Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from the Eco Choreo.)
"""
return self._output.get('Response', None)
class EcoByZipChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return EcoByZipResultSet(response, path)
| 37.709677 | 194 | 0.682635 | true | true | |
1c3b93e3b4d64425f75c0f00fe5ccfbefb2f3147 | 43 | py | Python | docs/source/plots/var_plot_irf.py | madhushree14/statsmodels | 04f00006a7aeb1c93d6894caa420698400da6c33 | [
"BSD-3-Clause"
] | 6,931 | 2015-01-01T11:41:55.000Z | 2022-03-31T17:03:24.000Z | docs/source/plots/var_plot_irf.py | madhushree14/statsmodels | 04f00006a7aeb1c93d6894caa420698400da6c33 | [
"BSD-3-Clause"
] | 6,137 | 2015-01-01T00:33:45.000Z | 2022-03-31T22:53:17.000Z | docs/source/plots/var_plot_irf.py | madhushree14/statsmodels | 04f00006a7aeb1c93d6894caa420698400da6c33 | [
"BSD-3-Clause"
] | 2,608 | 2015-01-02T21:32:31.000Z | 2022-03-31T07:38:30.000Z | from var_plots import plot_irf
plot_irf()
| 10.75 | 30 | 0.813953 | from var_plots import plot_irf
plot_irf()
| true | true |
1c3b953f96fe6a07f40d66ce24e0c53ff7627cbb | 687 | py | Python | tests/test_validators.py | rhblind/django-timberjack | 7a3208b8aa85d6c29f9978a26f6fa920363aa65d | [
"MIT"
] | null | null | null | tests/test_validators.py | rhblind/django-timberjack | 7a3208b8aa85d6c29f9978a26f6fa920363aa65d | [
"MIT"
] | 1 | 2020-02-11T23:41:38.000Z | 2020-02-11T23:41:38.000Z | tests/test_validators.py | rhblind/django-timberjack | 7a3208b8aa85d6c29f9978a26f6fa920363aa65d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from django.test import TestCase
from mongoengine import ValidationError
from timberjack.validators import validate_ip_address
class ValidateIPAddressTestCase(TestCase):
def test_validate_valid_ip_address(self):
values = ['127.0.0.1', '1.1.1.1', '255.0.0.0', '0.0.0.0', 'fe80::1', '::1', '1:2:3:4:5:6:7:8']
for value in values:
self.assertEqual(validate_ip_address(value), value)
def test_validate_invalid_ip_address(self):
values = ['256.1.1.1', '25.1.1.', '25,1,1,1', '25.1 .1.1', '1:2', '::zzz', '12345::']
for value in values:
self.assertRaises(ValidationError, validate_ip_address, value)
| 36.157895 | 102 | 0.646288 |
from django.test import TestCase
from mongoengine import ValidationError
from timberjack.validators import validate_ip_address
class ValidateIPAddressTestCase(TestCase):
def test_validate_valid_ip_address(self):
values = ['127.0.0.1', '1.1.1.1', '255.0.0.0', '0.0.0.0', 'fe80::1', '::1', '1:2:3:4:5:6:7:8']
for value in values:
self.assertEqual(validate_ip_address(value), value)
def test_validate_invalid_ip_address(self):
values = ['256.1.1.1', '25.1.1.', '25,1,1,1', '25.1 .1.1', '1:2', '::zzz', '12345::']
for value in values:
self.assertRaises(ValidationError, validate_ip_address, value)
| true | true |
1c3b9569eb9997c66926e02d8ea3de077756c3c3 | 976 | py | Python | out/fibo.py | FardaleM/metalang | 171557c540f3e2c051ec39ea150afb740c1f615f | [
"BSD-2-Clause"
] | 22 | 2017-04-24T10:00:45.000Z | 2021-04-01T10:11:05.000Z | out/fibo.py | FardaleM/metalang | 171557c540f3e2c051ec39ea150afb740c1f615f | [
"BSD-2-Clause"
] | 12 | 2017-03-26T18:34:21.000Z | 2019-03-21T19:13:03.000Z | out/fibo.py | FardaleM/metalang | 171557c540f3e2c051ec39ea150afb740c1f615f | [
"BSD-2-Clause"
] | 7 | 2017-10-14T13:33:33.000Z | 2021-03-18T15:18:50.000Z | import sys
char_ = None
def readchar_():
global char_
if char_ == None:
char_ = sys.stdin.read(1)
return char_
def skipchar():
global char_
char_ = None
return
def stdinsep():
while True:
c = readchar_()
if c == '\n' or c == '\t' or c == '\r' or c == ' ':
skipchar()
else:
return
def readint():
c = readchar_()
if c == '-':
sign = -1
skipchar()
else:
sign = 1
out = 0
while True:
c = readchar_()
if c <= '9' and c >= '0' :
out = out * 10 + int(c)
skipchar()
else:
return out * sign
"""La suite de fibonaci"""
def fibo0(a, b, i):
out0 = 0
a2 = a
b2 = b
for j in range(0, i + 2):
out0 += a2
tmp = b2
b2 += a2
a2 = tmp
return out0
a = readint()
stdinsep()
b = readint()
stdinsep()
i = readint()
print("%d" % fibo0(a, b, i), end='')
| 16.827586 | 59 | 0.445697 | import sys
char_ = None
def readchar_():
global char_
if char_ == None:
char_ = sys.stdin.read(1)
return char_
def skipchar():
global char_
char_ = None
return
def stdinsep():
while True:
c = readchar_()
if c == '\n' or c == '\t' or c == '\r' or c == ' ':
skipchar()
else:
return
def readint():
c = readchar_()
if c == '-':
sign = -1
skipchar()
else:
sign = 1
out = 0
while True:
c = readchar_()
if c <= '9' and c >= '0' :
out = out * 10 + int(c)
skipchar()
else:
return out * sign
def fibo0(a, b, i):
out0 = 0
a2 = a
b2 = b
for j in range(0, i + 2):
out0 += a2
tmp = b2
b2 += a2
a2 = tmp
return out0
a = readint()
stdinsep()
b = readint()
stdinsep()
i = readint()
print("%d" % fibo0(a, b, i), end='')
| true | true |
1c3b962c436cc4031f7d176f27436338c96a5f33 | 109 | py | Python | raspy/components/potentiometers/__init__.py | cyrusbuilt/RasPy | 1e34840cc90ea7f19317e881162209d3d819eb09 | [
"MIT"
] | null | null | null | raspy/components/potentiometers/__init__.py | cyrusbuilt/RasPy | 1e34840cc90ea7f19317e881162209d3d819eb09 | [
"MIT"
] | null | null | null | raspy/components/potentiometers/__init__.py | cyrusbuilt/RasPy | 1e34840cc90ea7f19317e881162209d3d819eb09 | [
"MIT"
] | null | null | null | """This package provides objects for interfacing with potentiometers."""
__all__ = (
"potentiometer"
)
| 15.571429 | 72 | 0.715596 |
__all__ = (
"potentiometer"
)
| true | true |
1c3b969f3970cd9cd64fcb8ab6a8f3889957e301 | 13,060 | py | Python | Utils/request_contributor_review.py | ryantoddtq/content | 50027658da7189e37e9514fc03057d1c1bc3209f | [
"MIT"
] | 2 | 2020-07-27T10:35:41.000Z | 2020-12-14T15:44:18.000Z | Utils/request_contributor_review.py | Axonius/content | e058add82b7422338015cf14591512b9aad4d3e9 | [
"MIT"
] | 30 | 2022-03-16T14:07:34.000Z | 2022-03-31T17:37:51.000Z | Utils/request_contributor_review.py | Axonius/content | e058add82b7422338015cf14591512b9aad4d3e9 | [
"MIT"
] | 1 | 2022-01-23T17:30:09.000Z | 2022-01-23T17:30:09.000Z | import argparse
import requests
import os
import sys
from pathlib import Path
import json
import sendgrid
from sendgrid.helpers.mail import *
REPO_OWNER = "demisto"
REPO_NAME = "content"
PACKS_FOLDER = "Packs"
CONTENT_REPO_FULL_PATH = os.environ.get('GITHUB_WORKSPACE') or os.path.abspath(
os.path.join(__file__, '../../../..'))
PACKS_FULL_PATH = os.path.join(CONTENT_REPO_FULL_PATH, PACKS_FOLDER)
PACK_METADATA = "pack_metadata.json"
XSOAR_SUPPORT = "xsoar"
PACK_METADATA_GITHUB_USER_FIELD = "githubUser"
PR_COMMENT_PREFIX = "pack has been modified on files:\n"
PACK_METADATA_SUPPORT_EMAIL_FIELD = "email"
PACK_METADATA_DEV_EMAIL_FIELD = "devEmail"
EMAIL_FROM = "do-not-reply@xsoar-contrib.pan.dev" # disable-secrets-detection
def check_if_user_exists(github_user, github_token=None, verify_ssl=True):
user_endpoint = f"https://api.github.com/users/{github_user}"
headers = {'Authorization': 'Bearer ' + github_token} if github_token else {}
response = requests.get(user_endpoint, headers=headers, verify=verify_ssl)
if response.status_code not in [200, 201]:
print(f"Failed in pulling user {github_user} data:\n{response.text}")
sys.exit(1)
github_user_info = response.json()
if 'id' in github_user_info:
return True
else:
return False
def get_pr_author(pr_number, github_token, verify_ssl):
pr_endpoint = f"https://api.github.com/repos/{REPO_OWNER}/{REPO_NAME}/pulls/{pr_number}"
headers = {"Authorization": "Bearer " + github_token} if github_token else {}
response = requests.get(pr_endpoint, headers=headers, verify=verify_ssl)
if response.status_code not in [200, 201]:
print(f"Failed in pulling PR {pr_number} data:\n{response.text}")
sys.exit(1)
pr_info = response.json()
return pr_info.get('user', {}).get('login', '').lower()
def get_pr_modified_files_and_packs(pr_number, github_token, verify_ssl):
pr_endpoint = f"https://api.github.com/repos/{REPO_OWNER}/{REPO_NAME}/pulls/{pr_number}/files"
headers = {'Authorization': 'Bearer ' + github_token} if github_token else {}
response = requests.get(pr_endpoint, headers=headers, verify=verify_ssl)
if response.status_code not in [200, 201]:
print(f"Failed in pulling PR {pr_number} data:\n{response.text}")
sys.exit(1)
pr_changed_data = response.json()
pr_files = [f.get('filename') for f in pr_changed_data]
modified_packs = {Path(p).parts[1] for p in pr_files if p.startswith(PACKS_FOLDER) and len(Path(p).parts) > 1}
return modified_packs, pr_files
def tag_user_on_pr(reviewers: set, pr_number: str, pack: str, pack_files: set, github_token: str = None,
verify_ssl: bool = True):
comments_endpoint = f"https://api.github.com/repos/{REPO_OWNER}/{REPO_NAME}/issues/{pr_number}/comments"
headers = {"Authorization": "Bearer " + github_token} if github_token else {}
reviewers_comment = "\n".join({f"- @{r}" for r in reviewers})
pack_files_comment = "\n".join(pack_files)
comment_body = {
"body": f"### Your contributed {pack} {PR_COMMENT_PREFIX}\n"
f"{pack_files_comment}\n"
f" [Please review the changes here](https://github.com/demisto/content/pull/{pr_number}/files)\n"
f"{reviewers_comment}"
}
response = requests.post(comments_endpoint, headers=headers, verify=verify_ssl, json=comment_body)
if response.status_code not in [200, 201]:
print(f"Failed posting comment on PR {pr_number}:\n{response.text}")
sys.exit(1)
def get_pr_tagged_reviewers(pr_number, github_token, verify_ssl, pack):
result_tagged_reviewers = set()
comments_endpoint = f"https://api.github.com/repos/{REPO_OWNER}/{REPO_NAME}/issues/{pr_number}/comments"
headers = {"Authorization": "Bearer " + github_token} if github_token else {}
response = requests.get(comments_endpoint, headers=headers, verify=verify_ssl)
if response.status_code != requests.codes.ok:
print(f"Failed requesting PR {pr_number} comments:\n{response.text}")
sys.exit(1)
comments_info = response.json()
github_actions_bot_comments = [c.get('body', '') for c in comments_info if c.get('user', {}).get(
'login') == "github-actions[bot]" and f"### Your contributed {pack} {PR_COMMENT_PREFIX}\n" in c.get('body', '')]
for comment in github_actions_bot_comments:
tagged_reviewers = [line.lstrip("- @").rstrip("\n").lower() for line in comment.split('\n') if
line.startswith("- @")]
result_tagged_reviewers.update(tagged_reviewers)
return result_tagged_reviewers
def check_pack_and_request_review(pr_number, github_token=None, verify_ssl=True, email_api_token=None):
modified_packs, modified_files = get_pr_modified_files_and_packs(pr_number=pr_number, github_token=github_token,
verify_ssl=verify_ssl)
pr_author = get_pr_author(pr_number=pr_number, github_token=github_token, verify_ssl=verify_ssl)
for pack in modified_packs:
tagged_packs_reviewers = get_pr_tagged_reviewers(pr_number=pr_number, github_token=github_token,
verify_ssl=verify_ssl, pack=pack)
reviewers = set()
pack_metadata_path = os.path.join(PACKS_FULL_PATH, pack, PACK_METADATA)
if not os.path.exists(pack_metadata_path):
print(f"Not found {pack} {PACK_METADATA} file.")
continue
with open(pack_metadata_path, 'r') as pack_metadata_file:
pack_metadata = json.load(pack_metadata_file)
# Notify contributors if this is not new pack
if pack_metadata.get('support') != XSOAR_SUPPORT and pack_metadata.get('currentVersion') != '1.0.0':
notified_by_email = False
# Notify contributors by emailing them on dev email:
if reviewers_emails := pack_metadata.get(PACK_METADATA_DEV_EMAIL_FIELD):
reviewers_emails = reviewers_emails.split(',') if isinstance(reviewers_emails,
str) else reviewers_emails
notified_by_email = send_email_to_reviewers(
reviewers_emails=reviewers_emails,
api_token=email_api_token,
pack_name=pack,
pr_number=pr_number,
modified_files=modified_files
)
# Notify contributors by tagging them on github:
if pack_reviewers := pack_metadata.get(PACK_METADATA_GITHUB_USER_FIELD):
pack_reviewers = pack_reviewers if isinstance(pack_reviewers, list) else pack_reviewers.split(",")
github_users = [u.lower() for u in pack_reviewers]
for github_user in github_users:
user_exists = check_if_user_exists(github_user=github_user, github_token=github_token,
verify_ssl=verify_ssl)
if user_exists and github_user != pr_author and github_user not in tagged_packs_reviewers:
reviewers.add(github_user)
print(f"Found {github_user} default reviewer of pack {pack}")
notified_by_github = check_reviewers(reviewers=reviewers, pr_author=pr_author,
version=pack_metadata.get('currentVersion'),
modified_files=modified_files, pack=pack, pr_number=pr_number,
github_token=github_token,
verify_ssl=verify_ssl)
# Notify contributors by emailing them on support email:
if (reviewers_emails := pack_metadata.get(
PACK_METADATA_SUPPORT_EMAIL_FIELD)) and not notified_by_github and not notified_by_email:
reviewers_emails = reviewers_emails.split(',') if isinstance(reviewers_emails,
str) else reviewers_emails
send_email_to_reviewers(
reviewers_emails=reviewers_emails,
api_token=email_api_token,
pack_name=pack,
pr_number=pr_number,
modified_files=modified_files
)
elif pack_metadata.get('support') == XSOAR_SUPPORT:
print(f"Skipping check of {pack} pack supported by {XSOAR_SUPPORT}")
else:
print(f"{pack} pack has no default github reviewer")
def check_reviewers(reviewers: set, pr_author: str, version: str, modified_files: list, pack: str,
pr_number: str, github_token: str, verify_ssl: bool) -> bool:
""" Tag user on pr and ask for review if there are reviewers, and this is not new pack.
Args:
reviewers(set): reviwers to review the changes.
pr_author(str): Author of the pr.
version(str): pack version, from packmetadata.
modified_files(list): list of modified files
pack(str): pack name
pr_number(str): pr number on github
github_token(str): github token provided by the user
verify_ssl(bool): verify ssl
Returns:
true if notified contributors by github else false
"""
if reviewers:
if pr_author != 'xsoar-bot' or version != '1.0.0':
pack_files = {file for file in modified_files if file.startswith(PACKS_FOLDER)
and Path(file).parts[1] == pack}
tag_user_on_pr(
reviewers=reviewers,
pr_number=pr_number,
pack=pack,
pack_files=pack_files,
github_token=github_token,
verify_ssl=verify_ssl
)
return True
else:
return False
else:
print(f'{pack} pack no reviewers were found.')
return False
def send_email_to_reviewers(reviewers_emails: list, api_token: str, pack_name: str,
pr_number: str, modified_files: list) -> bool:
""" Compose mail and send it to the reviewers_emails, to review the changes in their pack
Args:
modified_files(list): modified files on pr
reviewers_emails(list(str)): reviewers of the pack to send mail to them
api_token(str): refresh token to send mails using gmail API
pack_name(str): pack that was modified
pr_number(str): github pr number
Return: true if mail was sent, else prints an error
"""
pack_files = {file for file in modified_files if file.startswith(PACKS_FOLDER)
and Path(file).parts[1] == pack_name}
modified_files_comment = ''.join([f'<li>{file}</li>' for file in pack_files])
email_subject = f'Cortex XSOAR: Changes made to {pack_name} content pack'
email_content = f"Hi,<br><br>Your contributed <b>{pack_name}</b> pack has been modified on files:<br>" \
f"<ul>{modified_files_comment}</ul>Please review the changes " \
f"<a href=\"https://github.com/demisto/content/pull/{pr_number}/files\">here</a>.<br><br>" \
f" Cortex XSOAR Content Team."
sg = sendgrid.SendGridAPIClient(api_token)
email_from = Email(EMAIL_FROM)
to_email = reviewers_emails
content = Content("text/html", email_content)
mail = Mail(email_from, to_email, email_subject, content)
try:
response = sg.client.mail.send.post(request_body=mail.get())
if response.status_code in range(200, 209):
print(f'Email sent to {",".join(reviewers_emails)} contributors of pack {pack_name}')
return True
else:
print('An error occurred during sending emails to contributors:\n{response}')
return False
except Exception as e:
print(f'An error occurred during sending emails to contributors:\n{str(e)}')
sys.exit(1)
def main():
parser = argparse.ArgumentParser(description='Requests contributor pack review.')
parser.add_argument('-p', '--pr_number', help='Opened PR number')
parser.add_argument('-g', '--github_token', help='Github token', required=False)
parser.add_argument('-e', '--email_api_token', help='Email API Token', required=False)
args = parser.parse_args()
pr_number = args.pr_number
github_token = args.github_token
verify_ssl = True if github_token else False
email_api_token = args.email_api_token if args.email_api_token else ''
if not verify_ssl:
urllib3.disable_warnings()
check_pack_and_request_review(pr_number=pr_number, github_token=github_token, verify_ssl=verify_ssl,
email_api_token=email_api_token)
if __name__ == "__main__":
main()
| 44.121622 | 120 | 0.638515 | import argparse
import requests
import os
import sys
from pathlib import Path
import json
import sendgrid
from sendgrid.helpers.mail import *
REPO_OWNER = "demisto"
REPO_NAME = "content"
PACKS_FOLDER = "Packs"
CONTENT_REPO_FULL_PATH = os.environ.get('GITHUB_WORKSPACE') or os.path.abspath(
os.path.join(__file__, '../../../..'))
PACKS_FULL_PATH = os.path.join(CONTENT_REPO_FULL_PATH, PACKS_FOLDER)
PACK_METADATA = "pack_metadata.json"
XSOAR_SUPPORT = "xsoar"
PACK_METADATA_GITHUB_USER_FIELD = "githubUser"
PR_COMMENT_PREFIX = "pack has been modified on files:\n"
PACK_METADATA_SUPPORT_EMAIL_FIELD = "email"
PACK_METADATA_DEV_EMAIL_FIELD = "devEmail"
EMAIL_FROM = "do-not-reply@xsoar-contrib.pan.dev"
def check_if_user_exists(github_user, github_token=None, verify_ssl=True):
user_endpoint = f"https://api.github.com/users/{github_user}"
headers = {'Authorization': 'Bearer ' + github_token} if github_token else {}
response = requests.get(user_endpoint, headers=headers, verify=verify_ssl)
if response.status_code not in [200, 201]:
print(f"Failed in pulling user {github_user} data:\n{response.text}")
sys.exit(1)
github_user_info = response.json()
if 'id' in github_user_info:
return True
else:
return False
def get_pr_author(pr_number, github_token, verify_ssl):
pr_endpoint = f"https://api.github.com/repos/{REPO_OWNER}/{REPO_NAME}/pulls/{pr_number}"
headers = {"Authorization": "Bearer " + github_token} if github_token else {}
response = requests.get(pr_endpoint, headers=headers, verify=verify_ssl)
if response.status_code not in [200, 201]:
print(f"Failed in pulling PR {pr_number} data:\n{response.text}")
sys.exit(1)
pr_info = response.json()
return pr_info.get('user', {}).get('login', '').lower()
def get_pr_modified_files_and_packs(pr_number, github_token, verify_ssl):
pr_endpoint = f"https://api.github.com/repos/{REPO_OWNER}/{REPO_NAME}/pulls/{pr_number}/files"
headers = {'Authorization': 'Bearer ' + github_token} if github_token else {}
response = requests.get(pr_endpoint, headers=headers, verify=verify_ssl)
if response.status_code not in [200, 201]:
print(f"Failed in pulling PR {pr_number} data:\n{response.text}")
sys.exit(1)
pr_changed_data = response.json()
pr_files = [f.get('filename') for f in pr_changed_data]
modified_packs = {Path(p).parts[1] for p in pr_files if p.startswith(PACKS_FOLDER) and len(Path(p).parts) > 1}
return modified_packs, pr_files
def tag_user_on_pr(reviewers: set, pr_number: str, pack: str, pack_files: set, github_token: str = None,
verify_ssl: bool = True):
comments_endpoint = f"https://api.github.com/repos/{REPO_OWNER}/{REPO_NAME}/issues/{pr_number}/comments"
headers = {"Authorization": "Bearer " + github_token} if github_token else {}
reviewers_comment = "\n".join({f"- @{r}" for r in reviewers})
pack_files_comment = "\n".join(pack_files)
comment_body = {
"body": f"### Your contributed {pack} {PR_COMMENT_PREFIX}\n"
f"{pack_files_comment}\n"
f" [Please review the changes here](https://github.com/demisto/content/pull/{pr_number}/files)\n"
f"{reviewers_comment}"
}
response = requests.post(comments_endpoint, headers=headers, verify=verify_ssl, json=comment_body)
if response.status_code not in [200, 201]:
print(f"Failed posting comment on PR {pr_number}:\n{response.text}")
sys.exit(1)
def get_pr_tagged_reviewers(pr_number, github_token, verify_ssl, pack):
result_tagged_reviewers = set()
comments_endpoint = f"https://api.github.com/repos/{REPO_OWNER}/{REPO_NAME}/issues/{pr_number}/comments"
headers = {"Authorization": "Bearer " + github_token} if github_token else {}
response = requests.get(comments_endpoint, headers=headers, verify=verify_ssl)
if response.status_code != requests.codes.ok:
print(f"Failed requesting PR {pr_number} comments:\n{response.text}")
sys.exit(1)
comments_info = response.json()
github_actions_bot_comments = [c.get('body', '') for c in comments_info if c.get('user', {}).get(
'login') == "github-actions[bot]" and f"### Your contributed {pack} {PR_COMMENT_PREFIX}\n" in c.get('body', '')]
for comment in github_actions_bot_comments:
tagged_reviewers = [line.lstrip("- @").rstrip("\n").lower() for line in comment.split('\n') if
line.startswith("- @")]
result_tagged_reviewers.update(tagged_reviewers)
return result_tagged_reviewers
def check_pack_and_request_review(pr_number, github_token=None, verify_ssl=True, email_api_token=None):
modified_packs, modified_files = get_pr_modified_files_and_packs(pr_number=pr_number, github_token=github_token,
verify_ssl=verify_ssl)
pr_author = get_pr_author(pr_number=pr_number, github_token=github_token, verify_ssl=verify_ssl)
for pack in modified_packs:
tagged_packs_reviewers = get_pr_tagged_reviewers(pr_number=pr_number, github_token=github_token,
verify_ssl=verify_ssl, pack=pack)
reviewers = set()
pack_metadata_path = os.path.join(PACKS_FULL_PATH, pack, PACK_METADATA)
if not os.path.exists(pack_metadata_path):
print(f"Not found {pack} {PACK_METADATA} file.")
continue
with open(pack_metadata_path, 'r') as pack_metadata_file:
pack_metadata = json.load(pack_metadata_file)
if pack_metadata.get('support') != XSOAR_SUPPORT and pack_metadata.get('currentVersion') != '1.0.0':
notified_by_email = False
if reviewers_emails := pack_metadata.get(PACK_METADATA_DEV_EMAIL_FIELD):
reviewers_emails = reviewers_emails.split(',') if isinstance(reviewers_emails,
str) else reviewers_emails
notified_by_email = send_email_to_reviewers(
reviewers_emails=reviewers_emails,
api_token=email_api_token,
pack_name=pack,
pr_number=pr_number,
modified_files=modified_files
)
if pack_reviewers := pack_metadata.get(PACK_METADATA_GITHUB_USER_FIELD):
pack_reviewers = pack_reviewers if isinstance(pack_reviewers, list) else pack_reviewers.split(",")
github_users = [u.lower() for u in pack_reviewers]
for github_user in github_users:
user_exists = check_if_user_exists(github_user=github_user, github_token=github_token,
verify_ssl=verify_ssl)
if user_exists and github_user != pr_author and github_user not in tagged_packs_reviewers:
reviewers.add(github_user)
print(f"Found {github_user} default reviewer of pack {pack}")
notified_by_github = check_reviewers(reviewers=reviewers, pr_author=pr_author,
version=pack_metadata.get('currentVersion'),
modified_files=modified_files, pack=pack, pr_number=pr_number,
github_token=github_token,
verify_ssl=verify_ssl)
if (reviewers_emails := pack_metadata.get(
PACK_METADATA_SUPPORT_EMAIL_FIELD)) and not notified_by_github and not notified_by_email:
reviewers_emails = reviewers_emails.split(',') if isinstance(reviewers_emails,
str) else reviewers_emails
send_email_to_reviewers(
reviewers_emails=reviewers_emails,
api_token=email_api_token,
pack_name=pack,
pr_number=pr_number,
modified_files=modified_files
)
elif pack_metadata.get('support') == XSOAR_SUPPORT:
print(f"Skipping check of {pack} pack supported by {XSOAR_SUPPORT}")
else:
print(f"{pack} pack has no default github reviewer")
def check_reviewers(reviewers: set, pr_author: str, version: str, modified_files: list, pack: str,
pr_number: str, github_token: str, verify_ssl: bool) -> bool:
if reviewers:
if pr_author != 'xsoar-bot' or version != '1.0.0':
pack_files = {file for file in modified_files if file.startswith(PACKS_FOLDER)
and Path(file).parts[1] == pack}
tag_user_on_pr(
reviewers=reviewers,
pr_number=pr_number,
pack=pack,
pack_files=pack_files,
github_token=github_token,
verify_ssl=verify_ssl
)
return True
else:
return False
else:
print(f'{pack} pack no reviewers were found.')
return False
def send_email_to_reviewers(reviewers_emails: list, api_token: str, pack_name: str,
pr_number: str, modified_files: list) -> bool:
pack_files = {file for file in modified_files if file.startswith(PACKS_FOLDER)
and Path(file).parts[1] == pack_name}
modified_files_comment = ''.join([f'<li>{file}</li>' for file in pack_files])
email_subject = f'Cortex XSOAR: Changes made to {pack_name} content pack'
email_content = f"Hi,<br><br>Your contributed <b>{pack_name}</b> pack has been modified on files:<br>" \
f"<ul>{modified_files_comment}</ul>Please review the changes " \
f"<a href=\"https://github.com/demisto/content/pull/{pr_number}/files\">here</a>.<br><br>" \
f" Cortex XSOAR Content Team."
sg = sendgrid.SendGridAPIClient(api_token)
email_from = Email(EMAIL_FROM)
to_email = reviewers_emails
content = Content("text/html", email_content)
mail = Mail(email_from, to_email, email_subject, content)
try:
response = sg.client.mail.send.post(request_body=mail.get())
if response.status_code in range(200, 209):
print(f'Email sent to {",".join(reviewers_emails)} contributors of pack {pack_name}')
return True
else:
print('An error occurred during sending emails to contributors:\n{response}')
return False
except Exception as e:
print(f'An error occurred during sending emails to contributors:\n{str(e)}')
sys.exit(1)
def main():
parser = argparse.ArgumentParser(description='Requests contributor pack review.')
parser.add_argument('-p', '--pr_number', help='Opened PR number')
parser.add_argument('-g', '--github_token', help='Github token', required=False)
parser.add_argument('-e', '--email_api_token', help='Email API Token', required=False)
args = parser.parse_args()
pr_number = args.pr_number
github_token = args.github_token
verify_ssl = True if github_token else False
email_api_token = args.email_api_token if args.email_api_token else ''
if not verify_ssl:
urllib3.disable_warnings()
check_pack_and_request_review(pr_number=pr_number, github_token=github_token, verify_ssl=verify_ssl,
email_api_token=email_api_token)
if __name__ == "__main__":
main()
| true | true |
1c3b9740aebd671c6731655fdc43f882dd7cb058 | 785 | py | Python | lesson-2/task5.py | GintoGloss/GeekUniversity-Python | b30da872bd5c68905ab66485ca06bdf3008b3995 | [
"Unlicense"
] | null | null | null | lesson-2/task5.py | GintoGloss/GeekUniversity-Python | b30da872bd5c68905ab66485ca06bdf3008b3995 | [
"Unlicense"
] | null | null | null | lesson-2/task5.py | GintoGloss/GeekUniversity-Python | b30da872bd5c68905ab66485ca06bdf3008b3995 | [
"Unlicense"
] | null | null | null | # 5. Реализовать структуру «Рейтинг», представляющую собой не возрастающий набор натуральных чисел. У пользователя
# необходимо запрашивать новый элемент рейтинга. Если в рейтинге существуют элементы с одинаковыми значениями,
# то новый элемент с тем же значением должен разместиться после них.
rating_list = [7, 5, 3, 3, 2]
rating_len = len(rating_list)
new_elem = input("Введите новый элемент рейтинга: ")
while (not new_elem.isdecimal()) or int(new_elem) == 0:
new_elem = input("Нужно ввести натуральное число! ")
new_elem = int(new_elem)
for index, elem in enumerate(rating_list[::-1]):
if elem >= new_elem:
rating_list.insert(rating_len - index, new_elem)
break
if len(rating_list) == rating_len:
rating_list.insert(0, new_elem)
print(rating_list)
| 37.380952 | 114 | 0.743949 |
rating_list = [7, 5, 3, 3, 2]
rating_len = len(rating_list)
new_elem = input("Введите новый элемент рейтинга: ")
while (not new_elem.isdecimal()) or int(new_elem) == 0:
new_elem = input("Нужно ввести натуральное число! ")
new_elem = int(new_elem)
for index, elem in enumerate(rating_list[::-1]):
if elem >= new_elem:
rating_list.insert(rating_len - index, new_elem)
break
if len(rating_list) == rating_len:
rating_list.insert(0, new_elem)
print(rating_list)
| true | true |
1c3b974fc9bd283a772dd33492c5eb9514f396f6 | 1,202 | py | Python | __init__.py | analyzeDFIR/analyzeEVTX-parser | b67f8698eee1223017f9d10e7bcdddb108324dd7 | [
"MIT"
] | null | null | null | __init__.py | analyzeDFIR/analyzeEVTX-parser | b67f8698eee1223017f9d10e7bcdddb108324dd7 | [
"MIT"
] | null | null | null | __init__.py | analyzeDFIR/analyzeEVTX-parser | b67f8698eee1223017f9d10e7bcdddb108324dd7 | [
"MIT"
] | null | null | null | ## -*- coding: UTF-8 -*-
## __init__.py
##
## Copyright (c) 2018 analyzeDFIR
##
## Permission is hereby granted, free of charge, to any person obtaining a copy
## of this software and associated documentation files (the "Software"), to deal
## in the Software without restriction, including without limitation the rights
## to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
## copies of the Software, and to permit persons to whom the Software is
## furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included in all
## copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
## SOFTWARE.
from .evtx import EVTX, EVTXChunk, EVTXRecord
| 48.08 | 81 | 0.755408 | true | true | |
1c3b98dc295b3ff99d75165f474884a35f9f607a | 4,153 | py | Python | hardnet/pytorch_sift.py | empty16/hardnet.pytorch | 39242bc1db52ec13a3b07d92ff2559809ac0a557 | [
"BSD-3-Clause"
] | 3 | 2020-03-02T05:00:38.000Z | 2021-12-19T08:46:32.000Z | hardnet/pytorch_sift.py | empty16/hardnet.pytorch | 39242bc1db52ec13a3b07d92ff2559809ac0a557 | [
"BSD-3-Clause"
] | null | null | null | hardnet/pytorch_sift.py | empty16/hardnet.pytorch | 39242bc1db52ec13a3b07d92ff2559809ac0a557 | [
"BSD-3-Clause"
] | 1 | 2020-12-04T09:34:50.000Z | 2020-12-04T09:34:50.000Z | import torch
import math
import torch.nn.init
import torch.nn as nn
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import numpy as np
import torch.nn.functional as F
from Utils import L2Norm
def getPoolingKernel(kernel_size = 25):
step = 1. / float(np.floor( kernel_size / 2.))
x_coef = np.arange(step/2., 1. ,step)
xc2 = np.hstack([x_coef,[1], x_coef[::-1]])
kernel = np.outer(xc2.T,xc2)
kernel = np.maximum(0,kernel)
return kernel
def get_bin_weight_kernel_size_and_stride(patch_size, num_spatial_bins):
bin_weight_stride = int(round(2.0 * math.floor(patch_size / 2) / float(num_spatial_bins + 1)))
bin_weight_kernel_size = int(2 * bin_weight_stride - 1)
return bin_weight_kernel_size, bin_weight_stride
# PyTorch implementation of SIFT descriptor
class SIFTNet(nn.Module):
def CircularGaussKernel(self, kernlen=21):
halfSize = kernlen / 2
r2 = float(halfSize*halfSize)
sigma2 = 0.9 * r2
disq = 0
kernel = np.zeros((kernlen, kernlen))
for y in xrange(kernlen):
for x in xrange(kernlen):
disq = (y - halfSize)*(y - halfSize) + (x - halfSize)*(x - halfSize)
if disq < r2:
kernel[y,x] = math.exp(-disq / sigma2)
else:
kernel[y,x] = 0.
return kernel
def __init__(self, patch_size = 65, num_ang_bins = 8, num_spatial_bins = 4, clipval = 0.2):
super(SIFTNet, self).__init__()
gk = torch.from_numpy(self.CircularGaussKernel(kernlen=patch_size).astype(np.float32))
self.bin_weight_kernel_size, self.bin_weight_stride = get_bin_weight_kernel_size_and_stride(patch_size, num_spatial_bins)
self.gk = Variable(gk)
self.num_ang_bins = num_ang_bins
self.num_spatial_bins = num_spatial_bins
self.clipval = clipval
self.gx = nn.Sequential(nn.Conv2d(1, 1, kernel_size=(1,3), bias = False))
for l in self.gx:
if isinstance(l, nn.Conv2d):
l.weight.data = torch.from_numpy(np.array([[[[-1, 0, 1]]]], dtype=np.float32))
self.gy = nn.Sequential(nn.Conv2d(1, 1, kernel_size=(3,1), bias = False))
for l in self.gy:
if isinstance(l, nn.Conv2d):
l.weight.data = torch.from_numpy(np.array([[[[-1], [0], [1]]]], dtype=np.float32))
self.pk = nn.Sequential(nn.Conv2d(1, 1, kernel_size=(self.bin_weight_kernel_size, self.bin_weight_kernel_size),
stride = (self.bin_weight_stride, self.bin_weight_stride),
bias = False))
for l in self.pk:
if isinstance(l, nn.Conv2d):
nw = getPoolingKernel(kernel_size = self.bin_weight_kernel_size)
new_weights = np.array(nw.reshape((1, 1, self.bin_weight_kernel_size, self.bin_weight_kernel_size)))
l.weight.data = torch.from_numpy(new_weights.astype(np.float32))
def forward(self, x):
gx = self.gx(F.pad(x, (1,1,0, 0), 'replicate'))
gy = self.gy(F.pad(x, (0,0, 1,1), 'replicate'))
mag = torch.sqrt(gx * gx + gy * gy + 1e-10)
ori = torch.atan2(gy,gx + 1e-8)
if x.is_cuda:
self.gk = self.gk.cuda()
else:
self.gk = self.gk.cpu()
mag = mag * self.gk.expand_as(mag)
o_big = (ori +2.0 * math.pi )/ (2.0 * math.pi) * float(self.num_ang_bins)
bo0_big = torch.floor(o_big)
wo1_big = o_big - bo0_big
bo0_big = bo0_big % self.num_ang_bins
bo1_big = (bo0_big + 1) % self.num_ang_bins
wo0_big = (1.0 - wo1_big) * mag
wo1_big = wo1_big * mag
ang_bins = []
for i in range(0, self.num_ang_bins):
ang_bins.append(self.pk((bo0_big == i).float() * wo0_big + (bo1_big == i).float() * wo1_big))
ang_bins = torch.cat(ang_bins,1)
ang_bins = ang_bins.view(ang_bins.size(0), -1)
ang_bins = L2Norm()(ang_bins)
ang_bins = torch.clamp(ang_bins, 0.,float(self.clipval))
ang_bins = L2Norm()(ang_bins)
return ang_bins
| 41.118812 | 129 | 0.601974 | import torch
import math
import torch.nn.init
import torch.nn as nn
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import numpy as np
import torch.nn.functional as F
from Utils import L2Norm
def getPoolingKernel(kernel_size = 25):
step = 1. / float(np.floor( kernel_size / 2.))
x_coef = np.arange(step/2., 1. ,step)
xc2 = np.hstack([x_coef,[1], x_coef[::-1]])
kernel = np.outer(xc2.T,xc2)
kernel = np.maximum(0,kernel)
return kernel
def get_bin_weight_kernel_size_and_stride(patch_size, num_spatial_bins):
bin_weight_stride = int(round(2.0 * math.floor(patch_size / 2) / float(num_spatial_bins + 1)))
bin_weight_kernel_size = int(2 * bin_weight_stride - 1)
return bin_weight_kernel_size, bin_weight_stride
class SIFTNet(nn.Module):
def CircularGaussKernel(self, kernlen=21):
halfSize = kernlen / 2
r2 = float(halfSize*halfSize)
sigma2 = 0.9 * r2
disq = 0
kernel = np.zeros((kernlen, kernlen))
for y in xrange(kernlen):
for x in xrange(kernlen):
disq = (y - halfSize)*(y - halfSize) + (x - halfSize)*(x - halfSize)
if disq < r2:
kernel[y,x] = math.exp(-disq / sigma2)
else:
kernel[y,x] = 0.
return kernel
def __init__(self, patch_size = 65, num_ang_bins = 8, num_spatial_bins = 4, clipval = 0.2):
super(SIFTNet, self).__init__()
gk = torch.from_numpy(self.CircularGaussKernel(kernlen=patch_size).astype(np.float32))
self.bin_weight_kernel_size, self.bin_weight_stride = get_bin_weight_kernel_size_and_stride(patch_size, num_spatial_bins)
self.gk = Variable(gk)
self.num_ang_bins = num_ang_bins
self.num_spatial_bins = num_spatial_bins
self.clipval = clipval
self.gx = nn.Sequential(nn.Conv2d(1, 1, kernel_size=(1,3), bias = False))
for l in self.gx:
if isinstance(l, nn.Conv2d):
l.weight.data = torch.from_numpy(np.array([[[[-1, 0, 1]]]], dtype=np.float32))
self.gy = nn.Sequential(nn.Conv2d(1, 1, kernel_size=(3,1), bias = False))
for l in self.gy:
if isinstance(l, nn.Conv2d):
l.weight.data = torch.from_numpy(np.array([[[[-1], [0], [1]]]], dtype=np.float32))
self.pk = nn.Sequential(nn.Conv2d(1, 1, kernel_size=(self.bin_weight_kernel_size, self.bin_weight_kernel_size),
stride = (self.bin_weight_stride, self.bin_weight_stride),
bias = False))
for l in self.pk:
if isinstance(l, nn.Conv2d):
nw = getPoolingKernel(kernel_size = self.bin_weight_kernel_size)
new_weights = np.array(nw.reshape((1, 1, self.bin_weight_kernel_size, self.bin_weight_kernel_size)))
l.weight.data = torch.from_numpy(new_weights.astype(np.float32))
def forward(self, x):
gx = self.gx(F.pad(x, (1,1,0, 0), 'replicate'))
gy = self.gy(F.pad(x, (0,0, 1,1), 'replicate'))
mag = torch.sqrt(gx * gx + gy * gy + 1e-10)
ori = torch.atan2(gy,gx + 1e-8)
if x.is_cuda:
self.gk = self.gk.cuda()
else:
self.gk = self.gk.cpu()
mag = mag * self.gk.expand_as(mag)
o_big = (ori +2.0 * math.pi )/ (2.0 * math.pi) * float(self.num_ang_bins)
bo0_big = torch.floor(o_big)
wo1_big = o_big - bo0_big
bo0_big = bo0_big % self.num_ang_bins
bo1_big = (bo0_big + 1) % self.num_ang_bins
wo0_big = (1.0 - wo1_big) * mag
wo1_big = wo1_big * mag
ang_bins = []
for i in range(0, self.num_ang_bins):
ang_bins.append(self.pk((bo0_big == i).float() * wo0_big + (bo1_big == i).float() * wo1_big))
ang_bins = torch.cat(ang_bins,1)
ang_bins = ang_bins.view(ang_bins.size(0), -1)
ang_bins = L2Norm()(ang_bins)
ang_bins = torch.clamp(ang_bins, 0.,float(self.clipval))
ang_bins = L2Norm()(ang_bins)
return ang_bins
| true | true |
1c3b9971b39385d358dbaa6778a6751566f1fb76 | 6,699 | py | Python | large_cohort/processes/q_ffl.py | garyxcheng/federated | ba7133ead6127af71ea9356e26bfd05c02f8324a | [
"Apache-2.0"
] | 330 | 2020-09-14T23:10:16.000Z | 2022-03-30T19:49:19.000Z | large_cohort/processes/q_ffl.py | garyxcheng/federated | ba7133ead6127af71ea9356e26bfd05c02f8324a | [
"Apache-2.0"
] | 52 | 2020-09-30T06:10:51.000Z | 2022-03-31T19:25:16.000Z | large_cohort/processes/q_ffl.py | garyxcheng/federated | ba7133ead6127af71ea9356e26bfd05c02f8324a | [
"Apache-2.0"
] | 119 | 2020-09-24T04:54:46.000Z | 2022-03-31T21:46:57.000Z | # Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An implementation of the q-Fair Federated Learning (q-FFL) algorithm.
Based on the paper:
Fair Resource Allocation in Federated Learning.
Tian Li, Maziar Sanjabi, Ahmad Beirami, Virginia Smith. ICLR 2020.
https://arxiv.org/abs/1602.05629
Note that the primary distinction between this implementation and the algorithm
described in the paper above is that the paper weights each client by their loss
after training. This requires an extra pass over each client's dataset. In order
to reduce training time on clients, we use the loss computed as the client
trains to do the weighting in q-FFL.
"""
from typing import Any, Callable, Optional
import tensorflow as tf
import tensorflow_federated as tff
DEFAULT_SERVER_OPTIMIZER_FN = lambda: tf.keras.optimizers.SGD(learning_rate=1.0)
def build_keras_output_to_loss_fn(
metric_builder=Callable[[], tf.keras.metrics.Metric]):
"""Creates a function that computes the result of a `tf.keras` metric."""
def output_to_loss_fn(output):
loss_variables = output['loss']
metric = metric_builder()
tf.nest.map_structure(lambda a, b: a.assign(b), metric.variables,
loss_variables)
return metric.result()
return output_to_loss_fn
def build_q_ffl_process(
model_fn: Callable[[], tff.learning.Model],
fairness_parameter: tf.Tensor,
client_optimizer_fn: Callable[[], tf.keras.optimizers.Optimizer],
server_optimizer_fn: Callable[
[], tf.keras.optimizers.Optimizer] = DEFAULT_SERVER_OPTIMIZER_FN,
broadcast_process: Optional[tff.templates.MeasuredProcess] = None,
model_update_aggregation_factory: Optional[
tff.aggregators.WeightedAggregationFactory] = None,
use_experimental_simulation_loop: bool = False,
output_to_loss_fn: Optional[Callable[[Any], tf.Tensor]] = None,
) -> tff.templates.IterativeProcess:
"""Builds an iterative process that performs q-FFL.
This function creates a `tff.templates.IterativeProcess` that performs
a variant of federated averaging on client models, where client updates are
weighted according by their loss raised to the power `fairness_parameter`.
The iterative process has the following methods inherited from
`tff.templates.IterativeProcess`:
* `initialize`: A `tff.Computation` with the functional type signature
`( -> S@SERVER)`, where `S` is a `tff.learning.framework.ServerState`
representing the initial state of the server.
* `next`: A `tff.Computation` with the functional type signature
`(<S@SERVER, {B*}@CLIENTS> -> <S@SERVER, T@SERVER>)` where `S` is a
`tff.learning.framework.ServerState` whose type matches that of the output
of `initialize`, and `{B*}@CLIENTS` represents the client datasets, where
`B` is the type of a single batch. This computation returns a
`tff.learning.framework.ServerState` representing the updated server state
and metrics that are the result of
`tff.learning.Model.federated_output_computation` during client training
and any other metrics from broadcast and aggregation processes.
The iterative process also has the following method not inherited from
`tff.templates.IterativeProcess`:
* `get_model_weights`: A `tff.Computation` that takes as input the
a `tff.learning.framework.ServerState`, and returns a
`tff.learning.ModelWeights` containing the state's model weights.
The internal logic of the resulting iterative process is the same as
`tff.learning.build_federated_averaging_process`, but with a custom weighting
function.
Args:
model_fn: A no-arg function that returns a `tff.learning.Model`. This method
must *not* capture TensorFlow tensors or variables and use them. The model
must be constructed entirely from scratch on each invocation, returning
the same pre-constructed model each call will result in an error.
fairness_parameter: A scalar tensor governing the exponent in the client
weights. Must be convertible to a scalar `tf.float32`.
client_optimizer_fn: A no-arg callable that returns a `tf.keras.Optimizer`.
server_optimizer_fn: A no-arg callable that returns a `tf.keras.Optimizer`.
By default, this uses `tf.keras.optimizers.SGD` with a learning rate of
1.0.
broadcast_process: a `tff.templates.MeasuredProcess` that broadcasts the
model weights on the server to the clients. It must support the signature
`(input_values@SERVER -> output_values@CLIENT)`. If set to default None,
the server model is broadcast to the clients using the default
tff.federated_broadcast.
model_update_aggregation_factory: An optional
`tff.aggregators.WeightedAggregationFactory` that constructs
`tff.templates.AggregationProcess` for aggregating the client model
updates on the server. If `None`, uses `tff.aggregators.MeanFactory`.
use_experimental_simulation_loop: Controls the reduce loop function for
input dataset. An experimental reduce loop is used for simulation. It is
currently necessary to set this flag to True for performant GPU
simulations.
output_to_loss_fn: An optional callable that takes the result of
`model_fn().report_local_outputs()` and returns a scalar tensor
representing the loss of the model. If set to `None`, this method will
assume that the loss will attempt to be extracted
`model_fn().report_local_outputs()['loss']`.
Returns:
A `tff.templates.IterativeProcess`.
"""
if output_to_loss_fn is None:
output_to_loss_fn = lambda x: x['loss']
def client_weighting(client_output):
loss = output_to_loss_fn(client_output)
return tf.math.pow(loss, fairness_parameter)
return tff.learning.build_federated_averaging_process(
model_fn=model_fn,
client_optimizer_fn=client_optimizer_fn,
server_optimizer_fn=server_optimizer_fn,
client_weighting=client_weighting,
broadcast_process=broadcast_process,
model_update_aggregation_factory=model_update_aggregation_factory,
use_experimental_simulation_loop=use_experimental_simulation_loop)
| 46.520833 | 80 | 0.753247 |
from typing import Any, Callable, Optional
import tensorflow as tf
import tensorflow_federated as tff
DEFAULT_SERVER_OPTIMIZER_FN = lambda: tf.keras.optimizers.SGD(learning_rate=1.0)
def build_keras_output_to_loss_fn(
metric_builder=Callable[[], tf.keras.metrics.Metric]):
def output_to_loss_fn(output):
loss_variables = output['loss']
metric = metric_builder()
tf.nest.map_structure(lambda a, b: a.assign(b), metric.variables,
loss_variables)
return metric.result()
return output_to_loss_fn
def build_q_ffl_process(
model_fn: Callable[[], tff.learning.Model],
fairness_parameter: tf.Tensor,
client_optimizer_fn: Callable[[], tf.keras.optimizers.Optimizer],
server_optimizer_fn: Callable[
[], tf.keras.optimizers.Optimizer] = DEFAULT_SERVER_OPTIMIZER_FN,
broadcast_process: Optional[tff.templates.MeasuredProcess] = None,
model_update_aggregation_factory: Optional[
tff.aggregators.WeightedAggregationFactory] = None,
use_experimental_simulation_loop: bool = False,
output_to_loss_fn: Optional[Callable[[Any], tf.Tensor]] = None,
) -> tff.templates.IterativeProcess:
if output_to_loss_fn is None:
output_to_loss_fn = lambda x: x['loss']
def client_weighting(client_output):
loss = output_to_loss_fn(client_output)
return tf.math.pow(loss, fairness_parameter)
return tff.learning.build_federated_averaging_process(
model_fn=model_fn,
client_optimizer_fn=client_optimizer_fn,
server_optimizer_fn=server_optimizer_fn,
client_weighting=client_weighting,
broadcast_process=broadcast_process,
model_update_aggregation_factory=model_update_aggregation_factory,
use_experimental_simulation_loop=use_experimental_simulation_loop)
| true | true |
1c3b9a147a8ab5ee9b8b9425ba2e1e83038eb05c | 59 | py | Python | detcon/__init__.py | isaaccorley/detcon-pytorch | a5e03faf0c27bdbe64b72625873c0b2d3a696f04 | [
"MIT"
] | 5 | 2021-10-30T05:10:42.000Z | 2022-03-26T08:44:17.000Z | detcon/__init__.py | isaaccorley/detcon-pytorch | a5e03faf0c27bdbe64b72625873c0b2d3a696f04 | [
"MIT"
] | 2 | 2021-12-20T08:52:50.000Z | 2021-12-28T15:14:48.000Z | detcon/__init__.py | isaaccorley/detcon-pytorch | a5e03faf0c27bdbe64b72625873c0b2d3a696f04 | [
"MIT"
] | 1 | 2022-02-01T11:02:40.000Z | 2022-02-01T11:02:40.000Z | from . import datasets, losses
from .models import DetConB
| 19.666667 | 30 | 0.79661 | from . import datasets, losses
from .models import DetConB
| true | true |
1c3b9a41211bbbc44d403ed53a0a4be53c4080f8 | 580 | py | Python | Set Perspective View.py | TSRChapman/LandArchTools | c8e2dd713e85c04a1d81ea959a7c8ce8ad8a7666 | [
"BSD-3-Clause"
] | null | null | null | Set Perspective View.py | TSRChapman/LandArchTools | c8e2dd713e85c04a1d81ea959a7c8ce8ad8a7666 | [
"BSD-3-Clause"
] | null | null | null | Set Perspective View.py | TSRChapman/LandArchTools | c8e2dd713e85c04a1d81ea959a7c8ce8ad8a7666 | [
"BSD-3-Clause"
] | null | null | null | '''
Copyright <2021> <Thomas Chapman>
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import rhinoscriptsyntax as rs
rs.Command("_setview _w _p")
| 41.428571 | 118 | 0.768966 |
import rhinoscriptsyntax as rs
rs.Command("_setview _w _p")
| true | true |
1c3b9b2da3dc917c8474a8fa1acb20b854120495 | 490 | py | Python | lists/migrations/0001_initial.py | luizppa/tdd-project | 9ded60b138c45bc24f670096bfedad4364ed1212 | [
"MIT"
] | null | null | null | lists/migrations/0001_initial.py | luizppa/tdd-project | 9ded60b138c45bc24f670096bfedad4364ed1212 | [
"MIT"
] | null | null | null | lists/migrations/0001_initial.py | luizppa/tdd-project | 9ded60b138c45bc24f670096bfedad4364ed1212 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2022-01-26 21:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
]
| 21.304348 | 114 | 0.597959 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
]
| true | true |
1c3b9d7214b7179f554910b926d12afb4f426760 | 5,455 | py | Python | packages/python/plotly/plotly/validators/bar/_marker.py | labaran1/plotly.py | 7ec751e8fed4a570c11ea4bea2231806389d62eb | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/bar/_marker.py | labaran1/plotly.py | 7ec751e8fed4a570c11ea4bea2231806389d62eb | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/bar/_marker.py | labaran1/plotly.py | 7ec751e8fed4a570c11ea4bea2231806389d62eb | [
"MIT"
] | null | null | null | import _plotly_utils.basevalidators
class MarkerValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="marker", parent_name="bar", **kwargs):
super(MarkerValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Marker"),
data_docs=kwargs.pop(
"data_docs",
"""
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.colorscale`. Has an
effect only if in `marker.color` is set to a
numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.color`) or the bounds set in
`marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color` is set to a numerical
array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.color` is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.cmin` and/or `marker.cmax` to
be equidistant to this point. Has an effect
only if in `marker.color` is set to a numerical
array. Value should have the same units as in
`marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.color` is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmax` must be set as well.
color
Sets the marker color. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorbar
:class:`plotly.graph_objects.bar.marker.ColorBa
r` instance or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color` is set to a numerical array. The
colorscale must be an array containing arrays
mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use `marker.cmin` and
`marker.cmax`. Alternatively, `colorscale` may
be a palette name string of the following list:
Blackbody,Bluered,Blues,Cividis,Earth,Electric,
Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,Rd
Bu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
line
:class:`plotly.graph_objects.bar.marker.Line`
instance or dict with compatible properties
opacity
Sets the opacity of the bars.
opacitysrc
Sets the source reference on Chart Studio Cloud
for `opacity`.
pattern
Sets the pattern within the marker.
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.color` is set to a
numerical array. If true, `marker.cmin` will
correspond to the last color in the array and
`marker.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is
displayed for this trace. Has an effect only if
in `marker.color` is set to a numerical array.
""",
),
**kwargs,
)
| 48.274336 | 74 | 0.550504 | import _plotly_utils.basevalidators
class MarkerValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="marker", parent_name="bar", **kwargs):
super(MarkerValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Marker"),
data_docs=kwargs.pop(
"data_docs",
"""
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.colorscale`. Has an
effect only if in `marker.color` is set to a
numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.color`) or the bounds set in
`marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color` is set to a numerical
array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.color` is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.cmin` and/or `marker.cmax` to
be equidistant to this point. Has an effect
only if in `marker.color` is set to a numerical
array. Value should have the same units as in
`marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.color` is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmax` must be set as well.
color
Sets the marker color. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorbar
:class:`plotly.graph_objects.bar.marker.ColorBa
r` instance or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color` is set to a numerical array. The
colorscale must be an array containing arrays
mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use `marker.cmin` and
`marker.cmax`. Alternatively, `colorscale` may
be a palette name string of the following list:
Blackbody,Bluered,Blues,Cividis,Earth,Electric,
Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,Rd
Bu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
line
:class:`plotly.graph_objects.bar.marker.Line`
instance or dict with compatible properties
opacity
Sets the opacity of the bars.
opacitysrc
Sets the source reference on Chart Studio Cloud
for `opacity`.
pattern
Sets the pattern within the marker.
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.color` is set to a
numerical array. If true, `marker.cmin` will
correspond to the last color in the array and
`marker.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is
displayed for this trace. Has an effect only if
in `marker.color` is set to a numerical array.
""",
),
**kwargs,
)
| true | true |
1c3b9e7b099769d60c76419175e7191bea797836 | 84 | py | Python | Dependencies/dcmtk/__init__.py | tclarke/coan | 3f3b179a98afaf5b0f16fea452dbe43fe18ab0ad | [
"MIT"
] | 1 | 2016-05-08T08:14:24.000Z | 2016-05-08T08:14:24.000Z | Dependencies/dcmtk/__init__.py | tclarke/coan | 3f3b179a98afaf5b0f16fea452dbe43fe18ab0ad | [
"MIT"
] | null | null | null | Dependencies/dcmtk/__init__.py | tclarke/coan | 3f3b179a98afaf5b0f16fea452dbe43fe18ab0ad | [
"MIT"
] | 2 | 2015-12-03T07:01:44.000Z | 2021-04-21T07:35:38.000Z | __doc__ = """DICOM I/O Library."""
package = {'downloadLocation':'dcmtk-3.5.4.zip'}
| 28 | 48 | 0.654762 | __doc__ = """DICOM I/O Library."""
package = {'downloadLocation':'dcmtk-3.5.4.zip'}
| true | true |
1c3ba0254b9dd006118668e7ac0f5ea277646eb6 | 2,189 | py | Python | uwsgi_tasks/utils.py | zanachka/uwsgi_tasks | cad3e72c8bd66979c6ac23a1cc68caff176435ab | [
"MIT"
] | 92 | 2015-01-21T06:25:18.000Z | 2022-02-23T22:29:05.000Z | uwsgi_tasks/utils.py | zanachka/uwsgi_tasks | cad3e72c8bd66979c6ac23a1cc68caff176435ab | [
"MIT"
] | 17 | 2015-04-30T21:02:05.000Z | 2021-03-09T17:30:00.000Z | uwsgi_tasks/utils.py | zanachka/uwsgi_tasks | cad3e72c8bd66979c6ac23a1cc68caff176435ab | [
"MIT"
] | 13 | 2015-04-30T20:54:36.000Z | 2022-02-12T17:06:15.000Z | # -*- coding: utf-8 -*-
import six
from importlib import import_module
def import_by_path(dotted_path):
"""Import a dotted module path and return the attribute/class designated
by the last name in the path. Raise ImportError if the import failed.
Adapted from Django 1.7
"""
try:
if not dotted_path.count('.'):
dotted_path = '.'.join(['__main__', dotted_path])
module_path, class_name = dotted_path.rsplit('.', 1)
except ValueError:
msg = '"{}" doesn\'t look like a module path'.format(dotted_path)
raise ImportError(msg)
try:
module = import_module(module_path)
except ImportError as ex:
raise ImportError('Failed to import "{}" - {}'.format(dotted_path, ex))
try:
return getattr(module, class_name)
except AttributeError:
msg = 'Module "{}" does not define a "{}" attribute/class'.format(
dotted_path, class_name)
raise ImportError(msg)
def get_function_path(function):
"""Get received function path (as string), to import function later
with `import_string`.
"""
if isinstance(function, six.string_types):
return function
func_path = []
module = getattr(function, '__module__', '__main__')
if module:
func_path.append(module)
func_path.append(function.__name__)
return '.'.join(func_path)
def django_setup(settings_module=None):
"""Initialize Django if required, must be run before performing
any task on spooler or mule.
"""
from django.conf import settings, ENVIRONMENT_VARIABLE
if settings.configured:
return
if settings_module:
import os
os.environ[ENVIRONMENT_VARIABLE] = settings_module
try:
# django > 1.7
from django import setup
except ImportError:
# django < 1.7
def setup():
settings._setup()
setup()
class ProxyDict(dict):
def __init__(self, dict_instance, key):
super(ProxyDict, self).__init__()
self.key = key
if self.key in dict_instance:
self.update(dict_instance[self.key])
dict_instance[self.key] = self
| 25.16092 | 79 | 0.638191 |
import six
from importlib import import_module
def import_by_path(dotted_path):
try:
if not dotted_path.count('.'):
dotted_path = '.'.join(['__main__', dotted_path])
module_path, class_name = dotted_path.rsplit('.', 1)
except ValueError:
msg = '"{}" doesn\'t look like a module path'.format(dotted_path)
raise ImportError(msg)
try:
module = import_module(module_path)
except ImportError as ex:
raise ImportError('Failed to import "{}" - {}'.format(dotted_path, ex))
try:
return getattr(module, class_name)
except AttributeError:
msg = 'Module "{}" does not define a "{}" attribute/class'.format(
dotted_path, class_name)
raise ImportError(msg)
def get_function_path(function):
if isinstance(function, six.string_types):
return function
func_path = []
module = getattr(function, '__module__', '__main__')
if module:
func_path.append(module)
func_path.append(function.__name__)
return '.'.join(func_path)
def django_setup(settings_module=None):
from django.conf import settings, ENVIRONMENT_VARIABLE
if settings.configured:
return
if settings_module:
import os
os.environ[ENVIRONMENT_VARIABLE] = settings_module
try:
# django > 1.7
from django import setup
except ImportError:
# django < 1.7
def setup():
settings._setup()
setup()
class ProxyDict(dict):
def __init__(self, dict_instance, key):
super(ProxyDict, self).__init__()
self.key = key
if self.key in dict_instance:
self.update(dict_instance[self.key])
dict_instance[self.key] = self
| true | true |
1c3ba176fbd1c0dd6f9f2e8c9fd3ac6065d5c80f | 8,368 | py | Python | nova/virt/libvirt/lvm.py | changbai1980/nova | 2739b6273161b82832f1ccdece1da3527097961c | [
"Apache-2.0"
] | 1 | 2019-05-11T22:40:48.000Z | 2019-05-11T22:40:48.000Z | nova/virt/libvirt/lvm.py | changbai1980/nova | 2739b6273161b82832f1ccdece1da3527097961c | [
"Apache-2.0"
] | null | null | null | nova/virt/libvirt/lvm.py | changbai1980/nova | 2739b6273161b82832f1ccdece1da3527097961c | [
"Apache-2.0"
] | null | null | null | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2011 OpenStack Foundation
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo.config import cfg
import six
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova.openstack.common import units
from nova.virt.libvirt import utils
CONF = cfg.CONF
CONF.import_opt('instances_path', 'nova.compute.manager')
LOG = logging.getLogger(__name__)
def create_volume(vg, lv, size, sparse=False):
"""Create LVM image.
Creates a LVM image with given size.
:param vg: existing volume group which should hold this image
:param lv: name for this image (logical volume)
:size: size of image in bytes
:sparse: create sparse logical volume
"""
vg_info = get_volume_group_info(vg)
free_space = vg_info['free']
def check_size(vg, lv, size):
if size > free_space:
raise RuntimeError(_('Insufficient Space on Volume Group %(vg)s.'
' Only %(free_space)db available,'
' but %(size)db required'
' by volume %(lv)s.') %
{'vg': vg,
'free_space': free_space,
'size': size,
'lv': lv})
if sparse:
preallocated_space = 64 * units.Mi
check_size(vg, lv, preallocated_space)
if free_space < size:
LOG.warn(_LW('Volume group %(vg)s will not be able'
' to hold sparse volume %(lv)s.'
' Virtual volume size is %(size)db,'
' but free space on volume group is'
' only %(free_space)db.'),
{'vg': vg,
'free_space': free_space,
'size': size,
'lv': lv})
cmd = ('lvcreate', '-L', '%db' % preallocated_space,
'--virtualsize', '%db' % size, '-n', lv, vg)
else:
check_size(vg, lv, size)
cmd = ('lvcreate', '-L', '%db' % size, '-n', lv, vg)
utils.execute(*cmd, run_as_root=True, attempts=3)
def get_volume_group_info(vg):
"""Return free/used/total space info for a volume group in bytes
:param vg: volume group name
:returns: A dict containing:
:total: How big the filesystem is (in bytes)
:free: How much space is free (in bytes)
:used: How much space is used (in bytes)
"""
out, err = utils.execute('vgs', '--noheadings', '--nosuffix',
'--separator', '|',
'--units', 'b', '-o', 'vg_size,vg_free', vg,
run_as_root=True)
info = out.split('|')
if len(info) != 2:
raise RuntimeError(_("vg %s must be LVM volume group") % vg)
return {'total': int(info[0]),
'free': int(info[1]),
'used': int(info[0]) - int(info[1])}
def list_volumes(vg):
"""List logical volumes paths for given volume group.
:param vg: volume group name
:returns: Return a logical volume list for given volume group
: Data format example
: ['volume-aaa', 'volume-bbb', 'volume-ccc']
"""
out, err = utils.execute('lvs', '--noheadings', '-o', 'lv_name', vg,
run_as_root=True)
return [line.strip() for line in out.splitlines()]
def volume_info(path):
"""Get logical volume info.
:param path: logical volume path
:returns: Return a dict object including info of given logical volume
: Data format example
: {'#Seg': '1', 'Move': '', 'Log': '', 'Meta%': '', 'Min': '-1',
: ...
: 'Free': '9983', 'LV': 'volume-aaa', 'Host': 'xyz.com',
: 'Active': 'active', 'Path': '/dev/vg/volume-aaa', '#LV': '3',
: 'Maj': '-1', 'VSize': '50.00g', 'VFree': '39.00g', 'Pool': '',
: 'VG Tags': '', 'KMaj': '253', 'Convert': '', 'LProfile': '',
: '#Ext': '12799', 'Attr': '-wi-a-----', 'VG': 'vg',
: ...
: 'LSize': '1.00g', '#PV': '1', '#VMdaCps': 'unmanaged'}
"""
out, err = utils.execute('lvs', '-o', 'vg_all,lv_all',
'--separator', '|', path, run_as_root=True)
info = [line.split('|') for line in out.splitlines()]
if len(info) != 2:
raise RuntimeError(_("Path %s must be LVM logical volume") % path)
return dict(zip(*info))
def get_volume_size(path):
"""Get logical volume size in bytes.
:param path: logical volume path
"""
out, _err = utils.execute('blockdev', '--getsize64', path,
run_as_root=True)
return int(out)
def _zero_volume(path, volume_size):
"""Write zeros over the specified path
:param path: logical volume path
:param size: number of zeros to write
"""
bs = units.Mi
direct_flags = ('oflag=direct',)
sync_flags = ()
remaining_bytes = volume_size
# The loop efficiently writes zeros using dd,
# and caters for versions of dd that don't have
# the easier to use iflag=count_bytes option.
while remaining_bytes:
zero_blocks = remaining_bytes / bs
seek_blocks = (volume_size - remaining_bytes) / bs
zero_cmd = ('dd', 'bs=%s' % bs,
'if=/dev/zero', 'of=%s' % path,
'seek=%s' % seek_blocks, 'count=%s' % zero_blocks)
zero_cmd += direct_flags
zero_cmd += sync_flags
if zero_blocks:
utils.execute(*zero_cmd, run_as_root=True)
remaining_bytes %= bs
bs /= units.Ki # Limit to 3 iterations
# Use O_DIRECT with initial block size and fdatasync otherwise
direct_flags = ()
sync_flags = ('conv=fdatasync',)
def clear_volume(path):
"""Obfuscate the logical volume.
:param path: logical volume path
"""
volume_clear = CONF.libvirt.volume_clear
if volume_clear not in ('none', 'shred', 'zero'):
LOG.error(_LE("ignoring unrecognized volume_clear='%s' value"),
volume_clear)
volume_clear = 'zero'
if volume_clear == 'none':
return
volume_clear_size = int(CONF.libvirt.volume_clear_size) * units.Mi
volume_size = get_volume_size(path)
if volume_clear_size != 0 and volume_clear_size < volume_size:
volume_size = volume_clear_size
if volume_clear == 'zero':
# NOTE(p-draigbrady): we could use shred to do the zeroing
# with -n0 -z, however only versions >= 8.22 perform as well as dd
_zero_volume(path, volume_size)
elif volume_clear == 'shred':
utils.execute('shred', '-n3', '-s%d' % volume_size, path,
run_as_root=True)
else:
raise exception.Invalid(_("volume_clear='%s' is not handled")
% volume_clear)
def remove_volumes(paths):
"""Remove one or more logical volume."""
errors = []
for path in paths:
clear_volume(path)
lvremove = ('lvremove', '-f', path)
try:
utils.execute(*lvremove, attempts=3, run_as_root=True)
except processutils.ProcessExecutionError as exp:
errors.append(six.text_type(exp))
if errors:
raise exception.VolumesNotRemoved(reason=(', ').join(errors))
| 35.012552 | 78 | 0.573136 |
from oslo.config import cfg
import six
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova.openstack.common import units
from nova.virt.libvirt import utils
CONF = cfg.CONF
CONF.import_opt('instances_path', 'nova.compute.manager')
LOG = logging.getLogger(__name__)
def create_volume(vg, lv, size, sparse=False):
vg_info = get_volume_group_info(vg)
free_space = vg_info['free']
def check_size(vg, lv, size):
if size > free_space:
raise RuntimeError(_('Insufficient Space on Volume Group %(vg)s.'
' Only %(free_space)db available,'
' but %(size)db required'
' by volume %(lv)s.') %
{'vg': vg,
'free_space': free_space,
'size': size,
'lv': lv})
if sparse:
preallocated_space = 64 * units.Mi
check_size(vg, lv, preallocated_space)
if free_space < size:
LOG.warn(_LW('Volume group %(vg)s will not be able'
' to hold sparse volume %(lv)s.'
' Virtual volume size is %(size)db,'
' but free space on volume group is'
' only %(free_space)db.'),
{'vg': vg,
'free_space': free_space,
'size': size,
'lv': lv})
cmd = ('lvcreate', '-L', '%db' % preallocated_space,
'--virtualsize', '%db' % size, '-n', lv, vg)
else:
check_size(vg, lv, size)
cmd = ('lvcreate', '-L', '%db' % size, '-n', lv, vg)
utils.execute(*cmd, run_as_root=True, attempts=3)
def get_volume_group_info(vg):
out, err = utils.execute('vgs', '--noheadings', '--nosuffix',
'--separator', '|',
'--units', 'b', '-o', 'vg_size,vg_free', vg,
run_as_root=True)
info = out.split('|')
if len(info) != 2:
raise RuntimeError(_("vg %s must be LVM volume group") % vg)
return {'total': int(info[0]),
'free': int(info[1]),
'used': int(info[0]) - int(info[1])}
def list_volumes(vg):
out, err = utils.execute('lvs', '--noheadings', '-o', 'lv_name', vg,
run_as_root=True)
return [line.strip() for line in out.splitlines()]
def volume_info(path):
out, err = utils.execute('lvs', '-o', 'vg_all,lv_all',
'--separator', '|', path, run_as_root=True)
info = [line.split('|') for line in out.splitlines()]
if len(info) != 2:
raise RuntimeError(_("Path %s must be LVM logical volume") % path)
return dict(zip(*info))
def get_volume_size(path):
out, _err = utils.execute('blockdev', '--getsize64', path,
run_as_root=True)
return int(out)
def _zero_volume(path, volume_size):
bs = units.Mi
direct_flags = ('oflag=direct',)
sync_flags = ()
remaining_bytes = volume_size
# the easier to use iflag=count_bytes option.
while remaining_bytes:
zero_blocks = remaining_bytes / bs
seek_blocks = (volume_size - remaining_bytes) / bs
zero_cmd = ('dd', 'bs=%s' % bs,
'if=/dev/zero', 'of=%s' % path,
'seek=%s' % seek_blocks, 'count=%s' % zero_blocks)
zero_cmd += direct_flags
zero_cmd += sync_flags
if zero_blocks:
utils.execute(*zero_cmd, run_as_root=True)
remaining_bytes %= bs
bs /= units.Ki # Limit to 3 iterations
# Use O_DIRECT with initial block size and fdatasync otherwise
direct_flags = ()
sync_flags = ('conv=fdatasync',)
def clear_volume(path):
volume_clear = CONF.libvirt.volume_clear
if volume_clear not in ('none', 'shred', 'zero'):
LOG.error(_LE("ignoring unrecognized volume_clear='%s' value"),
volume_clear)
volume_clear = 'zero'
if volume_clear == 'none':
return
volume_clear_size = int(CONF.libvirt.volume_clear_size) * units.Mi
volume_size = get_volume_size(path)
if volume_clear_size != 0 and volume_clear_size < volume_size:
volume_size = volume_clear_size
if volume_clear == 'zero':
# NOTE(p-draigbrady): we could use shred to do the zeroing
# with -n0 -z, however only versions >= 8.22 perform as well as dd
_zero_volume(path, volume_size)
elif volume_clear == 'shred':
utils.execute('shred', '-n3', '-s%d' % volume_size, path,
run_as_root=True)
else:
raise exception.Invalid(_("volume_clear='%s' is not handled")
% volume_clear)
def remove_volumes(paths):
errors = []
for path in paths:
clear_volume(path)
lvremove = ('lvremove', '-f', path)
try:
utils.execute(*lvremove, attempts=3, run_as_root=True)
except processutils.ProcessExecutionError as exp:
errors.append(six.text_type(exp))
if errors:
raise exception.VolumesNotRemoved(reason=(', ').join(errors))
| true | true |
1c3ba1c6237da82256774388a9e211c332d00c98 | 1,843 | py | Python | src/max_damage_player.py | cjyu81/MuTrainer | 4bc456b2c920585fd7210105af8a79952d0ec6a5 | [
"MIT"
] | null | null | null | src/max_damage_player.py | cjyu81/MuTrainer | 4bc456b2c920585fd7210105af8a79952d0ec6a5 | [
"MIT"
] | null | null | null | src/max_damage_player.py | cjyu81/MuTrainer | 4bc456b2c920585fd7210105af8a79952d0ec6a5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import asyncio
import time
from poke_env.player.player import Player
from poke_env.player.random_player import RandomPlayer
class MaxDamagePlayer(Player):
def choose_move(self, battle):
# If the player can attack, it will
if battle.available_moves:
# Finds the best move among available ones
best_move = max(battle.available_moves, key=lambda move: move.base_power)
return self.create_order(best_move)
# If no attack is available, a random switch will be made
else:
return self.choose_random_move(battle)
"""class MaxDamagePlayer(Player):
def choose_move(self, battle):
# If the player can attack, it will
if battle.available_moves:
# Finds the best move among available ones
best_move = max(battle.available_moves, key=lambda move: move.base_power)
return self.create_order(best_move)
# If no attack is available, a random switch will be made
else:
if battle.opponent_active_pokemon.item == "Choice Scarf":
self.player_message("Archlei", "I found a scarf")
return self.choose_random_move(battle)
"""
async def main():
start = time.time()
# We create two players.
random_player = RandomPlayer(battle_format="gen8randombattle")
max_damage_player = MaxDamagePlayer(battle_format="gen8randombattle")
# Now, let's evaluate our player
await max_damage_player.battle_against(random_player, n_battles=5)
print(
"Max damage player won %d / 100 battles [this took %f seconds]"
% (max_damage_player.n_won_battles, time.time() - start)
)
if __name__ == "__main__":
asyncio.get_event_loop().run_until_complete(main())
| 34.12963 | 86 | 0.655453 |
import asyncio
import time
from poke_env.player.player import Player
from poke_env.player.random_player import RandomPlayer
class MaxDamagePlayer(Player):
def choose_move(self, battle):
if battle.available_moves:
best_move = max(battle.available_moves, key=lambda move: move.base_power)
return self.create_order(best_move)
else:
return self.choose_random_move(battle)
async def main():
start = time.time()
random_player = RandomPlayer(battle_format="gen8randombattle")
max_damage_player = MaxDamagePlayer(battle_format="gen8randombattle")
await max_damage_player.battle_against(random_player, n_battles=5)
print(
"Max damage player won %d / 100 battles [this took %f seconds]"
% (max_damage_player.n_won_battles, time.time() - start)
)
if __name__ == "__main__":
asyncio.get_event_loop().run_until_complete(main())
| true | true |
1c3ba2386427f1f11255eabaa7bd9f3ff079e2ff | 604 | py | Python | exercises/fr/solution_02_06.py | tuanducdesign/spacy-course | f8d092c5fa2997fccb3f367d174dce8667932b3d | [
"MIT"
] | null | null | null | exercises/fr/solution_02_06.py | tuanducdesign/spacy-course | f8d092c5fa2997fccb3f367d174dce8667932b3d | [
"MIT"
] | null | null | null | exercises/fr/solution_02_06.py | tuanducdesign/spacy-course | f8d092c5fa2997fccb3f367d174dce8667932b3d | [
"MIT"
] | null | null | null | import spacy
nlp = spacy.blank("fr")
# Importe les classes Doc et Span
from spacy.tokens import Doc, Span
words = ["Elle", "aime", "David", "Bowie"]
spaces = [True, True, True, False]
# Crée un doc à partir des mots et des espaces
doc = Doc(nlp.vocab, words=words, spaces=spaces)
print(doc.text)
# Crée un span pour "David Bowie" à partir du doc
# et assigne-lui le label "PER"
span = Span(doc, 2, 4, label="PER")
print(span.text, span.label_)
# Ajoute le span aux entités du doc
doc.ents = [span]
# Affiche les textes et les labels des entités
print([(ent.text, ent.label_) for ent in doc.ents])
| 24.16 | 51 | 0.698675 | import spacy
nlp = spacy.blank("fr")
from spacy.tokens import Doc, Span
words = ["Elle", "aime", "David", "Bowie"]
spaces = [True, True, True, False]
doc = Doc(nlp.vocab, words=words, spaces=spaces)
print(doc.text)
span = Span(doc, 2, 4, label="PER")
print(span.text, span.label_)
doc.ents = [span]
print([(ent.text, ent.label_) for ent in doc.ents])
| true | true |
1c3ba262bbd3967017630478ef1f6306fe297aaa | 20,382 | py | Python | TF/esp_tf_utils.py | Rahul-chunduru/meanfield-theory-of-activation-functions | 97abc52b25d7a57dc75ce21dcccc419f58a393d4 | [
"Apache-2.0"
] | 6 | 2018-06-12T08:35:43.000Z | 2019-03-16T05:06:11.000Z | TF/esp_tf_utils.py | Rahul-chunduru/meanfield-theory-of-activation-functions | 97abc52b25d7a57dc75ce21dcccc419f58a393d4 | [
"Apache-2.0"
] | 1 | 2019-04-26T05:40:08.000Z | 2019-05-07T01:54:49.000Z | TF/esp_tf_utils.py | Rahul-chunduru/meanfield-theory-of-activation-functions | 97abc52b25d7a57dc75ce21dcccc419f58a393d4 | [
"Apache-2.0"
] | 2 | 2019-09-24T16:28:10.000Z | 2020-03-26T16:38:29.000Z | """
Helper functions for FFN with ESP
=================================================================
Author: Mirco Milletari <mirco@bambu.life> (2018)
Tensorflow implementation of a Feed Forward Deep network with ESP
activation, as defined in
"Expectation propagation: a probabilistic view of Deep Feed Forward Networks"
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
#Math Libraries
import numpy as np
#Visualization libraries
import matplotlib.pyplot as plt
#Tensor Flow
import tensorflow as tf
from tensorflow.python.framework import ops
# ======================================
# Initialize the Computational Graph
# ======================================
#One hot encoding for multiclass classification
def one_hot_econding(vect, N_classes, N_ch):
"""
One hot encoding:
For multilcass classification we need to convert the ground truth input vector to a matrix using one hot encoding.
Labels: Each class appearing in the ground truth vector is encoded in a column vector using: I_i = \Kdelta[i,Y_j] for j in [0, len(Y)],
where \Kdelta is the kroenecker symbol. As a result, the number of columns in the matrix is equal to N_classes, each column being a binary
truth tabel: 1 if the text is classified as belonging to book Y_i, 0 if it does not.
Arguments:
Y_labels -- ground truth vector
N_classes -- the number of classes in the ground truth vector
N_ch -- number of channels, if any (for the feature vector only)
Returns:
one_hot -- one hot matrix encoding
"""
# Create a tensot flow constant equal to the number of classes
C = tf.constant(N_classes, name="C")
one_hot_matrix = tf.one_hot(vect-1, C, axis=0) #axis=0 means it is mapping to column vectors
if N_ch != 0:
one_hot_matrix= tf.expand_dims(one_hot_matrix, 1)
# Create tensodr flow session
sess = tf.Session()
vect_hot = sess.run(one_hot_matrix)
sess.close()
return vect_hot
#Place Holders for the input/output data
def create_placeholders(Nfeat, Nlab):
"""
Creates the placeholders for the tensorflow session.
Arguments:
Nfeat -- scalar, size of the feature vector (number of features)
Nlab -- scalar, size of the label vector (number of labels)
Returns:
X -- placeholder for the data input, of shape [n_x, None] and dtype "float"
Y -- placeholder for the input labels, of shape [n_y, None] and dtype "float"
"""
X = tf.placeholder(shape= [Nfeat, None], dtype= "float64" )
Y = tf.placeholder(shape= [Nlab, None], dtype= "float64" )
return X, Y
#parameters initialization
def initialize_parameters(layers, activation, stbeta):
'''
Initialise the parameters of the model:
Arguments:
layers: Topology of the network. Array contaning number of layers and number of units in each layer.
activation: list of activation functions, for each layer in the network.
Evaluate:
L-- number of layers in the network (excluding the ouput)
first-- activation of the first layer
w-- weight matrix, dim: (l, l-1) initialized to a small number drawn from a standard normal distribution
mean 0 and std 1.
b-- bias vector, dim: (l,1)
beta-- inverse "temperature". initialized by sampling from a normal distribution. We Initialise beta small, i.e.
high temperature. Note that each unit has its own beta as it attains only local equilibrium.
Another possible initialization of beta is to 1 for each unit.
Note: If one uses relu as an activation, beta shold be initialized to one and be non trainable.
initialization:
Orthogonal weights: tf.initializers.orthogonal()
Xavier : tf.contrib.layers.xavier_initializer(seed=1)
'''
tf.set_random_seed(1) # defines the seed of the random number generator
parameters={}
L = len(layers) # number of layers in the network
first = activation[0] #Activation of the first layer
if first == 'esp':
train = True
init = tf.random_normal_initializer(stddev= stbeta)
#init = tf.ones_initializer()
else:
train= False
init = tf.ones_initializer()
for l in range(1, L):
parameters['w' + str(l)] = tf.get_variable('w' + str(l), [layers[l], layers[l-1]],dtype= 'float64' , initializer= tf.contrib.layers.xavier_initializer(seed=1) )
parameters['b' + str(l)] = tf.get_variable('b' + str(l), [layers[l], 1],dtype= 'float64', initializer = tf.zeros_initializer())
parameters['beta' + str(l)] = tf.get_variable('beta'+ str(l), [layers[l], 1], dtype= 'float64', initializer = init, trainable= train )
assert(parameters['w' + str(l)].shape == (layers[l], layers[l-1]))
assert(parameters['b' + str(l)].shape == (layers[l], 1))
assert(parameters['beta'+ str(l)].shape == (layers[l], 1))
return parameters
#Activation functions
def act(h,beta, activation):
"""
Activation functions:
esp -- finite temperature message passing
relu -- zero noise limit of esp
sigma -- Fermi-Dirac distribution
"""
if activation == "esp" or activation == "softmax":
A = tf.multiply(h, tf.nn.sigmoid(tf.multiply(beta,h)) )
elif activation == "sigmoid":
A = tf.nn.sigmoid(tf.multiply(beta,h))
elif activation == "relu":
A = tf.nn.relu(h)
return A
#--------Forward propagation----------------------------------------------------------------
def FW_prop(X,parameters, activation):
"""
Arguments:
X-- placeholder of the input data.
parameters-- dictionary of parameters, layer by layer, in the network.
activations-- list of activation functions to apply to the pre-activation outputs
Evaluates:
A_prev --activation of the previous layer, used in the fwd pass
cache_linear["Z"+str(l)]-- dictionary of pre-activation outputs
cache_act["A"+str(l)]-- dictionary of post-activation outputs
Returns:
caches-- array containing all the post and pre- activation values, layer by layer
"""
cache_linear={} #dictionary, cache of the linear outputs
cache_act={} #dictionary, cache of activations
L= len(activation)+1 # number of layers
a_prev= X
for l in range(1,L):
cache_linear["h"+str(l)] = tf.matmul(parameters["w"+str(l)], a_prev)+ parameters["b"+str(l)]
cache_act["a"+str(l)] = act(cache_linear["h"+str(l)], parameters['beta'+str(l)], activation[l-1])
a_prev= cache_act["a"+str(l)]
an = cache_act["a"+str(L-1)]
hn = cache_linear['h'+str(L-1)]
return an, hn, cache_linear, cache_act
#---------------cost function-----------------------------------------------------------
def obj(zn, betan, Y, activation):
"""
Arguments:
zn -- value of the output layer. This can either be equal to the last post activation value for esp and relu
or the last pre-activation output for sigmoid. This is so because TF autmotically includes the sigmoid
function in the definition of the cross entropy.
Y -- ground truth. This needs to be transposed
Returns:
cost -- cost function
"""
L= len(activation) #number of layers
m = Y.shape[1] #number of training examples
last = activation[L-1]
labels= tf.transpose(Y)
if last == 'sigmoid' or last == 'softmax': #use cross entropy loss function
logits= tf.transpose(betan*zn[1])
cost = tf.reduce_mean(tf.losses.sigmoid_cross_entropy(logits = logits, multi_class_labels=labels))
elif last == 'esp' or last == 'relu': #use minimum squared error (L2 loss)
out = tf.transpose(zn[0])
cost = tf.reduce_mean(tf.squared_difference(out, labels))/2
return cost
#------------Hessian-------------------
def flatten(tensor):
'''
Flattening function:
input: a tensor list
returns: a rank one tensor
'''
s= len(tensor) #number of tensors in the list
for i in range(s):
dl = tensor[i] #take one element of the gradient list (hence the zero)
d1, d2 = dl.get_shape() #Obtain tensor dimensions
fl = tf.reshape(dl,[-1, d1*d2]) #reshape the tensor to a (1, d1*d2) tensor
#concatenate over all the elemets in the list
if i==0: flattened = fl # the first time
else: flattened = tf.concat([flattened, fl], axis=1)
return flattened
#Hessian
def hessian(grads, par):
'''
Evaluates the exact Hessian matrix.
This function uses the same convention of the Autograd package.
Inputs:
grads --- the evaluated gradeints of the cost function
Returns:
hessian matrix: a (dim,dim) matrix of second derivatives, where 'dim' is the dimension of
the flattened gradient tensor.
'''
flat_grads = flatten(grads)[0] #flat gradients
dim = flat_grads.get_shape()[0] #get the dimensions of the flattened tensor
hess = [] #list
for i in range (dim):
dg_i = tf.gradients(flat_grads[i], par) #for each element of grads evaluate the gradients
dg_i_flat = flatten(dg_i) #flatten the resulting hessian onto a 1 d array
hess.append(dg_i_flat) #store row by row
return tf.reshape(hess,[dim, dim]) #returns the reshaped matrix
#=======================
# Main
#=======================
def Run_DNN(X_train, Y_train, X_test, Y_test, layers, activation, epoch_sample, stdbeta, starter_learning, num_iterations, with_hessian, save_model, Plot):
"""
Run the DNN to find the optimal set of paramters
Arguments:
X -- data, iput marix
Y -- true "label" vector
layers -- list containing the input size and each layer size
learning_rate -- learning rate of the gradient descent update rule
num_iterations -- number of iterations of the optimization loop
with_hessian -- if true evaluates the exact Hessian matrix at predefinite training intervals
stdbeta -- standard deviation of the noise paramters for initialization
Returns:
costs -- list contaning the value of the cost funciton (energy) at predefinite training intervals
Training metrics:
acc_train -- list containing the value of the task specific, training set accuracy at predefinite training intervals
acc_test -- list containing the value of the task specific, test set accuracy at predefinite training intervals
task and metrics:
1) Regression: Returns the R2 score
2) Binary Classification: Accuracy score
3) Multiclass Classification: Accuracy score
Other metrics can be easily implemented, but this is not important for this work.
gradients_and_par -- list containing the value of the gradients and the training parameters at predefinite training intervals
1) The format is: gradients_and_par[a][b][c]; [a] runs over the epochs, [c] in (0,1) selects the
gradienst and the parameters respectevely. e.g. gradients_and_par[5][2][0] returns the value of the gradient
of b1 at the 5th entry epoch. The epoch value is predetermined, e.g. one may want to store the results every
100 epochs, then [5] -- > 500 epochs.
2) [b] runs over the training parameters for each layer. e.g. for a 2 layer network with esp:
[0] --> w1, [1] --> b1, [2] --> beta1
[3] --> w2, [4] --> b2, [5] --> beta2
for Relu, there is no trainable beta, and the indexing [b] is adjusted accordingly.
Npar -- Total number of trainable unit-paramters in the network. This is printed out during training.
hessians -- list containing the value of the hessian matrix at predefinite training intervals. The format is
hessians[a][b][c], where [a] runs over the epoch. For fixed [a], hessians stores the value of the hessian matrix
evaluated at the critical points; this is a nxn matrix indexed by [b][c]. The size of the matrix is predetermined
by the number of parameters in the network.
residuals -- list containing the value of the residuals at predefinite training intervals. As we are only interested in the
sign of the residuals, we define it as the difference between the predicted output \hat{y} (an in the code)
and the training labels y (Y in the code).
"""
ops.reset_default_graph() # reset the computational graph
tf.set_random_seed(1) # to keep consistent results
#----------training/test set features-------------------------
X_tr = np.transpose(X_train) # the transpose is taken to adapt to TF convenntion. This is also
f , m = X_tr.shape # f: number of features, m: number of training examples
X_tst = np.transpose(X_test) # the transpose is taken to adapt to TF convenntion. This is also
_ , mt = X_tst.shape
#------------Initialise network-------------------------------
network = np.append(f, layers) # add the input layer to the list
L= len(activation)
actL = activation[L-1] # activation of the last layer. It determines the task
#-----------training/test set labels-------------------------------
if actL == 'softmax':
l= len(np.unique(Y_train))
Y_tr = one_hot_econding(Y_train, l,0 )
Y_tst = one_hot_econding(Y_test, l,0 )
else:
Y_tr = np.transpose(Y_train) # how we defined the placeholders.
Y_tst = np.transpose(Y_test)
l = Y_tr.shape[0]
#-----------------initialize parameters of the model--------------------------------------------------------
X, Y= create_placeholders(f, l) # Create Placeholders
parameters = initialize_parameters(network, activation, stdbeta)
betan = tf.identity(parameters['beta'+str(L)], name="betan") #add the output noise to the graph for later retrieval
an, hn, _ , _ = FW_prop(X, parameters, activation) #post and pre-activation output of the last layer
an = tf.identity(an, name= "an") #add the output post-activation value to the graph for later retrieval
hn = tf.identity(hn, name='hn') #add the output pre-activation value to the graph for later retrieval
#Create a saver for the Model
if save_model == True:
saver = tf.train.Saver()
#-----------------Initialize the cost and gradients---------------------------------------------------------
costs = [] #store the cost for different opochs
cost = obj([an,hn], betan, Y, activation)
#-----------------Initialize the optimizer-----------------------------------------------------------------
# Implement an exponential learning rate decay every 1000 epochs
#Implement a dynamical learning rate
global_step = tf.Variable(0., trainable=False)
rate = tf.train.exponential_decay(starter_learning, global_step, 500, 0.9) #exponential learning rate decay
#rate = starter_learning
tvars = tf.trainable_variables() #list of trainable variables
Npar= flatten(tvars).get_shape()[1] #total number of paramters in the network
print('there are:', Npar,'parameters in the network')
optimizer = tf.train.AdamOptimizer(learning_rate = rate) #Initialize Adam optimizer
grads_var = optimizer.compute_gradients(cost, tvars ) #Get gradients layer by layer. Note that this function returns the pair (grads, var)
grads = [grads_var[i][0] for i in range(len(grads_var))] #extract the gradients
min = optimizer.apply_gradients(grads_and_vars= grads_var, global_step= global_step) #Apply the gradients to look for critical points
gradients_and_par = [] #store gradients and training paramters for different epochs
hessians = [] #store the hessian for different epochs
residuals= [] #store the value of the residuals for different epochs
#gs = [] #store the value of the phase space factor for different epochs
if with_hessian == True: #if true, it evaluates
hess = hessian(grads, tvars) #Hessian matrix
res = tf.subtract(an, Y) #residual error
#---------------------------Initialize evaluation metrics----------------------------------------------------
e_len = len(epoch_sample)
acc_train = [] #store train accuracy for each epoch
acc_test = [] #store test accuracy for each epoch
if actL == 'sigmoid': #accuracy score for binary class classification
Yp = tf.greater(an , 0.5)
accuracy = tf.reduce_mean(tf.cast(tf.equal(Yp, tf.equal(Y,1.0)), "float"))
elif actL == 'esp' or actL == 'relu': #r2 score
norm= tf.reduce_mean( tf.squared_difference(Y,tf.reduce_mean(Y)) )
accuracy = 1 - tf.divide( tf.reduce_mean(tf.squared_difference(an, Y)), norm)
elif actL == 'softmax': #accuracy score for multiclass classification
Yp = tf.sigmoid(betan*hn)
correct = tf.equal(tf.argmax(Yp), tf.argmax(Y))
accuracy= tf.reduce_mean(tf.cast(correct, "float"))
#-----------------Initialize the graph and start the session-------------------------------------------------
init = tf.global_variables_initializer()
with tf.Session() as sess:
# Run the initialization
sess.run(init)
jj=0
for epoch in range(num_iterations):
_ , epoch_cost, epoch_grad, epoch_acc_train = sess.run([min, cost, grads_var, accuracy], feed_dict={X: X_tr, Y: Y_tr})
# Print the cost every interval epoch (here uses the inhomogenous interval but you can change it)
if jj< e_len and epoch % epoch_sample[jj] == 0:
#if epoch % 50 == 0:
print("Epoch %i, Cost: %f, Train accuracy: %f" % (epoch, epoch_cost,epoch_acc_train))
costs.append(epoch_cost) #store the costs
gradients_and_par.append(epoch_grad) #store grads and trainable parameters
#--------------Store the evaluation metrics------------------------------------
epoch_acc_test = sess.run(accuracy, feed_dict={X: X_tst, Y: Y_tst})
acc_test.append(epoch_acc_test)
acc_train.append(epoch_acc_train)
#------------------------------------------------------------------------------
jj+=1 #increase counter
#---------------------Evaluate and store the Hessian---------------------------
if with_hessian == True:
epoch_hess, epoch_res = sess.run([hess,res], feed_dict={X: X_tr, Y: Y_tr})
assert(epoch_hess.shape[1] == Npar) #check the dimensions of the hessian matrix
hessians.append(epoch_hess) #store the hessian
residuals.append(epoch_res) #store the residuals
#gs.append(epoch_g) #store the gs
else:
hessians.append(1) #returns just ones
residuals.append(1)
#gs.append(1)
# plot the cost at the end of training
if Plot== True:
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations')
plt.title("Learning rate =" + str(starter_learning))
plt.show()
print('Train accuracy', acc_train[jj-1])
print('Test accuracy', acc_test[jj-1])
accuracy = (acc_train, acc_test)
if save_model == True:
saver.save(sess, "saver/esp_model.ckpt")
sess.close()
return costs, accuracy, gradients_and_par, hessians, residuals
| 38.529301 | 168 | 0.61883 |
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.python.framework import ops
def one_hot_econding(vect, N_classes, N_ch):
C = tf.constant(N_classes, name="C")
one_hot_matrix = tf.one_hot(vect-1, C, axis=0)
if N_ch != 0:
one_hot_matrix= tf.expand_dims(one_hot_matrix, 1)
sess = tf.Session()
vect_hot = sess.run(one_hot_matrix)
sess.close()
return vect_hot
def create_placeholders(Nfeat, Nlab):
X = tf.placeholder(shape= [Nfeat, None], dtype= "float64" )
Y = tf.placeholder(shape= [Nlab, None], dtype= "float64" )
return X, Y
def initialize_parameters(layers, activation, stbeta):
tf.set_random_seed(1)
parameters={}
L = len(layers)
first = activation[0]
if first == 'esp':
train = True
init = tf.random_normal_initializer(stddev= stbeta)
else:
train= False
init = tf.ones_initializer()
for l in range(1, L):
parameters['w' + str(l)] = tf.get_variable('w' + str(l), [layers[l], layers[l-1]],dtype= 'float64' , initializer= tf.contrib.layers.xavier_initializer(seed=1) )
parameters['b' + str(l)] = tf.get_variable('b' + str(l), [layers[l], 1],dtype= 'float64', initializer = tf.zeros_initializer())
parameters['beta' + str(l)] = tf.get_variable('beta'+ str(l), [layers[l], 1], dtype= 'float64', initializer = init, trainable= train )
assert(parameters['w' + str(l)].shape == (layers[l], layers[l-1]))
assert(parameters['b' + str(l)].shape == (layers[l], 1))
assert(parameters['beta'+ str(l)].shape == (layers[l], 1))
return parameters
def act(h,beta, activation):
if activation == "esp" or activation == "softmax":
A = tf.multiply(h, tf.nn.sigmoid(tf.multiply(beta,h)) )
elif activation == "sigmoid":
A = tf.nn.sigmoid(tf.multiply(beta,h))
elif activation == "relu":
A = tf.nn.relu(h)
return A
def FW_prop(X,parameters, activation):
cache_linear={}
cache_act={}
L= len(activation)+1
a_prev= X
for l in range(1,L):
cache_linear["h"+str(l)] = tf.matmul(parameters["w"+str(l)], a_prev)+ parameters["b"+str(l)]
cache_act["a"+str(l)] = act(cache_linear["h"+str(l)], parameters['beta'+str(l)], activation[l-1])
a_prev= cache_act["a"+str(l)]
an = cache_act["a"+str(L-1)]
hn = cache_linear['h'+str(L-1)]
return an, hn, cache_linear, cache_act
def obj(zn, betan, Y, activation):
L= len(activation)
m = Y.shape[1]
last = activation[L-1]
labels= tf.transpose(Y)
if last == 'sigmoid' or last == 'softmax':
logits= tf.transpose(betan*zn[1])
cost = tf.reduce_mean(tf.losses.sigmoid_cross_entropy(logits = logits, multi_class_labels=labels))
elif last == 'esp' or last == 'relu':
out = tf.transpose(zn[0])
cost = tf.reduce_mean(tf.squared_difference(out, labels))/2
return cost
def flatten(tensor):
s= len(tensor)
for i in range(s):
dl = tensor[i]
d1, d2 = dl.get_shape()
fl = tf.reshape(dl,[-1, d1*d2])
if i==0: flattened = fl
else: flattened = tf.concat([flattened, fl], axis=1)
return flattened
def hessian(grads, par):
flat_grads = flatten(grads)[0]
dim = flat_grads.get_shape()[0]
hess = []
for i in range (dim):
dg_i = tf.gradients(flat_grads[i], par)
dg_i_flat = flatten(dg_i)
hess.append(dg_i_flat)
return tf.reshape(hess,[dim, dim])
def Run_DNN(X_train, Y_train, X_test, Y_test, layers, activation, epoch_sample, stdbeta, starter_learning, num_iterations, with_hessian, save_model, Plot):
ops.reset_default_graph()
tf.set_random_seed(1)
X_tr = np.transpose(X_train)
f , m = X_tr.shape
X_tst = np.transpose(X_test)
_ , mt = X_tst.shape
network = np.append(f, layers)
L= len(activation)
actL = activation[L-1]
if actL == 'softmax':
l= len(np.unique(Y_train))
Y_tr = one_hot_econding(Y_train, l,0 )
Y_tst = one_hot_econding(Y_test, l,0 )
else:
Y_tr = np.transpose(Y_train)
Y_tst = np.transpose(Y_test)
l = Y_tr.shape[0]
X, Y= create_placeholders(f, l)
parameters = initialize_parameters(network, activation, stdbeta)
betan = tf.identity(parameters['beta'+str(L)], name="betan")
an, hn, _ , _ = FW_prop(X, parameters, activation)
an = tf.identity(an, name= "an")
hn = tf.identity(hn, name='hn')
if save_model == True:
saver = tf.train.Saver()
costs = []
cost = obj([an,hn], betan, Y, activation)
global_step = tf.Variable(0., trainable=False)
rate = tf.train.exponential_decay(starter_learning, global_step, 500, 0.9)
tvars = tf.trainable_variables()
Npar= flatten(tvars).get_shape()[1]
print('there are:', Npar,'parameters in the network')
optimizer = tf.train.AdamOptimizer(learning_rate = rate)
grads_var = optimizer.compute_gradients(cost, tvars )
grads = [grads_var[i][0] for i in range(len(grads_var))]
min = optimizer.apply_gradients(grads_and_vars= grads_var, global_step= global_step)
gradients_and_par = []
hessians = []
residuals= []
tvars)
res = tf.subtract(an, Y)
e_len = len(epoch_sample)
acc_train = []
acc_test = []
if actL == 'sigmoid':
Yp = tf.greater(an , 0.5)
accuracy = tf.reduce_mean(tf.cast(tf.equal(Yp, tf.equal(Y,1.0)), "float"))
elif actL == 'esp' or actL == 'relu':
norm= tf.reduce_mean( tf.squared_difference(Y,tf.reduce_mean(Y)) )
accuracy = 1 - tf.divide( tf.reduce_mean(tf.squared_difference(an, Y)), norm)
elif actL == 'softmax':
Yp = tf.sigmoid(betan*hn)
correct = tf.equal(tf.argmax(Yp), tf.argmax(Y))
accuracy= tf.reduce_mean(tf.cast(correct, "float"))
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
jj=0
for epoch in range(num_iterations):
_ , epoch_cost, epoch_grad, epoch_acc_train = sess.run([min, cost, grads_var, accuracy], feed_dict={X: X_tr, Y: Y_tr})
if jj< e_len and epoch % epoch_sample[jj] == 0:
print("Epoch %i, Cost: %f, Train accuracy: %f" % (epoch, epoch_cost,epoch_acc_train))
costs.append(epoch_cost)
gradients_and_par.append(epoch_grad)
epoch_acc_test = sess.run(accuracy, feed_dict={X: X_tst, Y: Y_tst})
acc_test.append(epoch_acc_test)
acc_train.append(epoch_acc_train)
jj+=1
if with_hessian == True:
epoch_hess, epoch_res = sess.run([hess,res], feed_dict={X: X_tr, Y: Y_tr})
assert(epoch_hess.shape[1] == Npar)
hessians.append(epoch_hess)
residuals.append(epoch_res)
else:
hessians.append(1)
residuals.append(1)
if Plot== True:
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations')
plt.title("Learning rate =" + str(starter_learning))
plt.show()
print('Train accuracy', acc_train[jj-1])
print('Test accuracy', acc_test[jj-1])
accuracy = (acc_train, acc_test)
if save_model == True:
saver.save(sess, "saver/esp_model.ckpt")
sess.close()
return costs, accuracy, gradients_and_par, hessians, residuals
| true | true |
1c3ba32c797e288fa66e9565e66e9de634959d0c | 314 | py | Python | tests/test_rpc.py | laoshan-tech/xray-rpc | 95034a012c32b4c915a9612cd21970fd40f08bfd | [
"MIT"
] | 1 | 2021-05-27T22:04:09.000Z | 2021-05-27T22:04:09.000Z | tests/test_rpc.py | laoshan-tech/xray-rpc | 95034a012c32b4c915a9612cd21970fd40f08bfd | [
"MIT"
] | 1 | 2021-04-11T15:38:43.000Z | 2021-04-12T02:49:44.000Z | tests/test_rpc.py | laoshan-tech/xray-rpc | 95034a012c32b4c915a9612cd21970fd40f08bfd | [
"MIT"
] | null | null | null | class TestRPC(object):
def test_import(self):
from xray_rpc.app.stats.command import command_pb2_grpc as stats_command_pb2_grpc
from xray_rpc.core import config_pb2
assert issubclass(stats_command_pb2_grpc.StatsServiceStub, object)
assert issubclass(config_pb2.Config, object)
| 39.25 | 89 | 0.761146 | class TestRPC(object):
def test_import(self):
from xray_rpc.app.stats.command import command_pb2_grpc as stats_command_pb2_grpc
from xray_rpc.core import config_pb2
assert issubclass(stats_command_pb2_grpc.StatsServiceStub, object)
assert issubclass(config_pb2.Config, object)
| true | true |
1c3ba5e8ee78c0e5bcaca6f4cc049fae77128149 | 1,422 | py | Python | tools/scripts/dearimgui_update.py | mingw4/Cinder | 447f1366bb4c4a4821055043fc4945da4139c1c9 | [
"BSD-2-Clause"
] | 3,494 | 2015-01-02T08:42:09.000Z | 2022-03-31T14:16:23.000Z | tools/scripts/dearimgui_update.py | mingw4/Cinder | 447f1366bb4c4a4821055043fc4945da4139c1c9 | [
"BSD-2-Clause"
] | 1,284 | 2015-01-02T07:31:47.000Z | 2022-03-30T02:06:43.000Z | tools/scripts/dearimgui_update.py | mingw4/Cinder | 447f1366bb4c4a4821055043fc4945da4139c1c9 | [
"BSD-2-Clause"
] | 780 | 2015-01-02T22:14:29.000Z | 2022-03-30T00:16:56.000Z | import glob, os, shutil, sys
# "Usage: dearimgui_update.py SOURCE DEST"
im_path = os.path.abspath(sys.argv[1])
ci_path = os.path.abspath(sys.argv[2])
def copy_files(src_dir, target_dir, extension):
files = glob.iglob(os.path.join(src_dir, extension))
for src in files:
if os.path.isfile(src):
print( src + ' => ' + target_dir )
shutil.copy2(src, target_dir)
if __name__ == '__main__':
dearimgui_folders = ['', 'misc/cpp', 'misc/freetype']
for src in dearimgui_folders:
copy_files( os.path.join(im_path, src), os.path.join(ci_path, 'include/imgui'), '*.h')
copy_files( os.path.join(im_path, src), os.path.join(ci_path, 'src/imgui'), '*.cpp')
src = os.path.join(im_path,'examples/imgui_impl_opengl3.h')
dest = os.path.join(ci_path,'include/imgui')
shutil.copy2(src, dest)
print( src + ' => ' + dest )
src = os.path.join(im_path,'examples/imgui_impl_opengl3.cpp')
dest = os.path.join(ci_path,'src/imgui')
shutil.copy2(src, dest)
print( src + ' => ' + dest )
# Fixes linking on platforms such as Angle (prevents auto-detection of glad headers)
with open( os.path.join(ci_path, 'include/imgui/imgui_impl_opengl3.h'), 'r+' ) as file:
data = file.read().replace('#pragma once', '#pragma once\n#include \"cinder/CinderImGuiConfig.h\"\n')
file.seek(0)
file.write(data)
file.truncate()
| 39.5 | 109 | 0.642053 | import glob, os, shutil, sys
im_path = os.path.abspath(sys.argv[1])
ci_path = os.path.abspath(sys.argv[2])
def copy_files(src_dir, target_dir, extension):
files = glob.iglob(os.path.join(src_dir, extension))
for src in files:
if os.path.isfile(src):
print( src + ' => ' + target_dir )
shutil.copy2(src, target_dir)
if __name__ == '__main__':
dearimgui_folders = ['', 'misc/cpp', 'misc/freetype']
for src in dearimgui_folders:
copy_files( os.path.join(im_path, src), os.path.join(ci_path, 'include/imgui'), '*.h')
copy_files( os.path.join(im_path, src), os.path.join(ci_path, 'src/imgui'), '*.cpp')
src = os.path.join(im_path,'examples/imgui_impl_opengl3.h')
dest = os.path.join(ci_path,'include/imgui')
shutil.copy2(src, dest)
print( src + ' => ' + dest )
src = os.path.join(im_path,'examples/imgui_impl_opengl3.cpp')
dest = os.path.join(ci_path,'src/imgui')
shutil.copy2(src, dest)
print( src + ' => ' + dest )
with open( os.path.join(ci_path, 'include/imgui/imgui_impl_opengl3.h'), 'r+' ) as file:
data = file.read().replace('#pragma once', '#pragma once\n#include \"cinder/CinderImGuiConfig.h\"\n')
file.seek(0)
file.write(data)
file.truncate()
| true | true |
1c3ba6a84ec6f5569244fd31ac44b46e8d472e9d | 5,358 | py | Python | server/openslides/core/apps.py | danilobuerger/OpenSlides | 97950d5baa0e22027ea5c1f59a452f831c197cd2 | [
"MIT"
] | null | null | null | server/openslides/core/apps.py | danilobuerger/OpenSlides | 97950d5baa0e22027ea5c1f59a452f831c197cd2 | [
"MIT"
] | 2 | 2021-11-02T15:53:58.000Z | 2022-03-02T12:19:49.000Z | server/openslides/core/apps.py | danilobuerger/OpenSlides | 97950d5baa0e22027ea5c1f59a452f831c197cd2 | [
"MIT"
] | null | null | null | import sys
from typing import Any, Dict
from django.apps import AppConfig
from django.conf import settings
from django.db.models.signals import post_migrate, pre_delete
from openslides.utils import logging
from openslides.utils.schema_version import schema_version_handler
class CoreAppConfig(AppConfig):
name = "openslides.core"
verbose_name = "OpenSlides Core"
def ready(self):
# Import all required stuff.
# Let all client websocket message register
from ..utils.rest_api import router
from . import serializers # noqa
from .config import config
from .signals import (
autoupdate_for_many_to_many_relations,
cleanup_unused_permissions,
delete_django_app_permissions,
get_permission_change_data,
permission_change,
post_permission_creation,
)
from .views import (
ConfigViewSet,
CountdownViewSet,
ProjectionDefaultViewSet,
ProjectorMessageViewSet,
ProjectorViewSet,
TagViewSet,
)
# Collect all config variables before getting the constants.
config.collect_config_variables_from_apps()
# Connect signals.
post_permission_creation.connect(
delete_django_app_permissions, dispatch_uid="delete_django_app_permissions"
)
post_permission_creation.connect(
cleanup_unused_permissions, dispatch_uid="cleanup_unused_permissions"
)
permission_change.connect(
get_permission_change_data, dispatch_uid="core_get_permission_change_data"
)
post_migrate.connect(
manage_config, sender=self, dispatch_uid="core_manage_config"
)
pre_delete.connect(
autoupdate_for_many_to_many_relations,
dispatch_uid="core_autoupdate_for_many_to_many_relations",
)
# Register viewsets.
router.register(
self.get_model("Projector").get_collection_string(), ProjectorViewSet
)
router.register(
self.get_model("Projectiondefault").get_collection_string(),
ProjectionDefaultViewSet,
)
router.register(self.get_model("Tag").get_collection_string(), TagViewSet)
router.register(
self.get_model("ConfigStore").get_collection_string(),
ConfigViewSet,
"config",
)
router.register(
self.get_model("ProjectorMessage").get_collection_string(),
ProjectorMessageViewSet,
)
router.register(
self.get_model("Countdown").get_collection_string(), CountdownViewSet
)
if "runserver" in sys.argv or "changeconfig" in sys.argv:
from openslides.utils.startup import run_startup_hooks
run_startup_hooks()
def get_startup_hooks(self):
from openslides.core.models import History
from openslides.utils.cache import element_cache
from openslides.utils.constants import set_constants_from_apps
return {
10: element_cache.ensure_schema_version,
40: set_constants_from_apps,
90: History.objects.build_history,
}
def get_config_variables(self):
from .config_variables import get_config_variables
return get_config_variables()
def get_startup_elements(self):
"""
Yields all Cachables required on startup i. e. opening the websocket
connection.
"""
for model_name in (
"Projector",
"ProjectionDefault",
"Tag",
"ProjectorMessage",
"Countdown",
"ConfigStore",
):
yield self.get_model(model_name)
def get_angular_constants(self):
constants: Dict[str, Any] = {}
# Client settings
client_settings_keys = [
"AUTOUPDATE_DELAY",
"PRIORITIZED_GROUP_IDS",
"PING_INTERVAL",
"PING_TIMEOUT",
"ENABLE_ELECTRONIC_VOTING",
"JITSI_DOMAIN",
"JITSI_ROOM_NAME",
"JITSI_ROOM_PASSWORD",
"DEMO_USERS",
]
client_settings_dict = {}
for key in client_settings_keys:
try:
client_settings_dict[key] = getattr(settings, key)
except AttributeError:
# Settings key does not exist. Do nothing. The client will
# treat this as undefined.
pass
constants["Settings"] = client_settings_dict
constants["SchemaVersion"] = schema_version_handler.get()
return constants
def manage_config(**kwargs):
"""
Should be run after every migration. Saves default values
of all non db-existing config objects into the db. Deletes all
unnecessary old config values, e.g. all db entries, that does
not have a config_variable anymore. Increments the config version,
if at least one of the operations altered some data.
"""
from .config import config
altered = config.save_default_values()
altered = config.cleanup_old_config_values() or altered
if altered:
config.increment_version()
logging.getLogger(__name__).info("Updated config variables")
| 32.871166 | 87 | 0.636991 | import sys
from typing import Any, Dict
from django.apps import AppConfig
from django.conf import settings
from django.db.models.signals import post_migrate, pre_delete
from openslides.utils import logging
from openslides.utils.schema_version import schema_version_handler
class CoreAppConfig(AppConfig):
name = "openslides.core"
verbose_name = "OpenSlides Core"
def ready(self):
from ..utils.rest_api import router
from . import serializers
from .config import config
from .signals import (
autoupdate_for_many_to_many_relations,
cleanup_unused_permissions,
delete_django_app_permissions,
get_permission_change_data,
permission_change,
post_permission_creation,
)
from .views import (
ConfigViewSet,
CountdownViewSet,
ProjectionDefaultViewSet,
ProjectorMessageViewSet,
ProjectorViewSet,
TagViewSet,
)
config.collect_config_variables_from_apps()
post_permission_creation.connect(
delete_django_app_permissions, dispatch_uid="delete_django_app_permissions"
)
post_permission_creation.connect(
cleanup_unused_permissions, dispatch_uid="cleanup_unused_permissions"
)
permission_change.connect(
get_permission_change_data, dispatch_uid="core_get_permission_change_data"
)
post_migrate.connect(
manage_config, sender=self, dispatch_uid="core_manage_config"
)
pre_delete.connect(
autoupdate_for_many_to_many_relations,
dispatch_uid="core_autoupdate_for_many_to_many_relations",
)
router.register(
self.get_model("Projector").get_collection_string(), ProjectorViewSet
)
router.register(
self.get_model("Projectiondefault").get_collection_string(),
ProjectionDefaultViewSet,
)
router.register(self.get_model("Tag").get_collection_string(), TagViewSet)
router.register(
self.get_model("ConfigStore").get_collection_string(),
ConfigViewSet,
"config",
)
router.register(
self.get_model("ProjectorMessage").get_collection_string(),
ProjectorMessageViewSet,
)
router.register(
self.get_model("Countdown").get_collection_string(), CountdownViewSet
)
if "runserver" in sys.argv or "changeconfig" in sys.argv:
from openslides.utils.startup import run_startup_hooks
run_startup_hooks()
def get_startup_hooks(self):
from openslides.core.models import History
from openslides.utils.cache import element_cache
from openslides.utils.constants import set_constants_from_apps
return {
10: element_cache.ensure_schema_version,
40: set_constants_from_apps,
90: History.objects.build_history,
}
def get_config_variables(self):
from .config_variables import get_config_variables
return get_config_variables()
def get_startup_elements(self):
for model_name in (
"Projector",
"ProjectionDefault",
"Tag",
"ProjectorMessage",
"Countdown",
"ConfigStore",
):
yield self.get_model(model_name)
def get_angular_constants(self):
constants: Dict[str, Any] = {}
client_settings_keys = [
"AUTOUPDATE_DELAY",
"PRIORITIZED_GROUP_IDS",
"PING_INTERVAL",
"PING_TIMEOUT",
"ENABLE_ELECTRONIC_VOTING",
"JITSI_DOMAIN",
"JITSI_ROOM_NAME",
"JITSI_ROOM_PASSWORD",
"DEMO_USERS",
]
client_settings_dict = {}
for key in client_settings_keys:
try:
client_settings_dict[key] = getattr(settings, key)
except AttributeError:
pass
constants["Settings"] = client_settings_dict
constants["SchemaVersion"] = schema_version_handler.get()
return constants
def manage_config(**kwargs):
from .config import config
altered = config.save_default_values()
altered = config.cleanup_old_config_values() or altered
if altered:
config.increment_version()
logging.getLogger(__name__).info("Updated config variables")
| true | true |
1c3ba6e4bbe2444723334607b77450a25f2d8124 | 3,343 | py | Python | nizkctf/subrepo.py | MrMugiwara/2018 | 675a26bea30f67898d6ce00de5206476cb3d4146 | [
"MIT"
] | 1 | 2021-07-07T15:12:16.000Z | 2021-07-07T15:12:16.000Z | nizkctf/subrepo.py | MrMugiwara/2018 | 675a26bea30f67898d6ce00de5206476cb3d4146 | [
"MIT"
] | null | null | null | nizkctf/subrepo.py | MrMugiwara/2018 | 675a26bea30f67898d6ce00de5206476cb3d4146 | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals, division, print_function,\
absolute_import
import os
import subprocess
import base64
import pysodium
from .settings import Settings
from .localsettings import LocalSettings
from .repohost import RepoHost
SUBREPO_NAME = 'submissions'
class SubRepo(object):
@classmethod
def set_clone_into(cls, clone_into):
cls.clone_into = clone_into
cls.path = os.path.join(clone_into, SUBREPO_NAME)
@classmethod
def get_path(cls, subpath=''):
if os.path.exists(cls.path):
return os.path.join(cls.path, subpath)
raise EnvironmentError("The subrepository path ('%s') was not created "
"yet. Please call 'ctf login' to get it cloned "
"before performing any further actions." %
cls.path)
@classmethod
def clone(cls, fork=True):
repohost = RepoHost.instance()
upstream_url = repohost.get_ssh_url(Settings.submissions_project)
if fork:
forked_project, origin_url = \
repohost.fork(Settings.submissions_project)
LocalSettings.forked_project = forked_project
else:
origin_url = upstream_url
cls.git(['clone', origin_url, SUBREPO_NAME], cwd=cls.clone_into)
cls.git(['remote', 'add', 'upstream', upstream_url])
if fork:
cls.git(['remote', 'set-url', 'origin',
repohost.get_ssh_url(forked_project)])
@classmethod
def pull(cls):
cls.git(['checkout', 'master'])
cls.git(['pull', '--rebase', 'upstream', 'master'])
@classmethod
def push(cls, commit_message='commit', merge_request=True):
branch = 'master'
if merge_request:
branch = cls.random_branch()
cls.git(['checkout', '-b', branch, 'master'])
cls.git(['add', '-A'])
cls.git(['commit', '-m', commit_message],
returncodes={0, 1}) # do not fail on 'nothing to commit'
cls.git(['push', '-u', 'origin', branch])
if merge_request:
repohost = RepoHost.instance()
repohost.merge_request(LocalSettings.forked_project,
Settings.submissions_project,
source_branch=branch,
title=commit_message)
@staticmethod
def random_branch():
return base64.b32encode(pysodium.randombytes(10))\
.decode('utf-8').lower()
@classmethod
def git(cls, args, **kwargs):
returncodes = kwargs.pop('returncodes', {0})
if 'cwd' not in kwargs:
kwargs['cwd'] = cls.get_path()
p = subprocess.Popen(['git'] + args, **kwargs)
r = None
if 'stdout' in kwargs:
r = p.stdout.read()
returncode = p.wait()
if returncode not in returncodes:
raise GitError(returncode)
return r
class GitError(Exception):
def __init__(self, returncode, *args):
self.returncode = returncode
super(GitError, self).__init__(*args)
thisdir = os.path.dirname(os.path.realpath(__file__))
SubRepo.set_clone_into(os.path.realpath(os.path.join(thisdir, os.pardir)))
| 31.242991 | 79 | 0.584505 |
from __future__ import unicode_literals, division, print_function,\
absolute_import
import os
import subprocess
import base64
import pysodium
from .settings import Settings
from .localsettings import LocalSettings
from .repohost import RepoHost
SUBREPO_NAME = 'submissions'
class SubRepo(object):
@classmethod
def set_clone_into(cls, clone_into):
cls.clone_into = clone_into
cls.path = os.path.join(clone_into, SUBREPO_NAME)
@classmethod
def get_path(cls, subpath=''):
if os.path.exists(cls.path):
return os.path.join(cls.path, subpath)
raise EnvironmentError("The subrepository path ('%s') was not created "
"yet. Please call 'ctf login' to get it cloned "
"before performing any further actions." %
cls.path)
@classmethod
def clone(cls, fork=True):
repohost = RepoHost.instance()
upstream_url = repohost.get_ssh_url(Settings.submissions_project)
if fork:
forked_project, origin_url = \
repohost.fork(Settings.submissions_project)
LocalSettings.forked_project = forked_project
else:
origin_url = upstream_url
cls.git(['clone', origin_url, SUBREPO_NAME], cwd=cls.clone_into)
cls.git(['remote', 'add', 'upstream', upstream_url])
if fork:
cls.git(['remote', 'set-url', 'origin',
repohost.get_ssh_url(forked_project)])
@classmethod
def pull(cls):
cls.git(['checkout', 'master'])
cls.git(['pull', '--rebase', 'upstream', 'master'])
@classmethod
def push(cls, commit_message='commit', merge_request=True):
branch = 'master'
if merge_request:
branch = cls.random_branch()
cls.git(['checkout', '-b', branch, 'master'])
cls.git(['add', '-A'])
cls.git(['commit', '-m', commit_message],
returncodes={0, 1})
cls.git(['push', '-u', 'origin', branch])
if merge_request:
repohost = RepoHost.instance()
repohost.merge_request(LocalSettings.forked_project,
Settings.submissions_project,
source_branch=branch,
title=commit_message)
@staticmethod
def random_branch():
return base64.b32encode(pysodium.randombytes(10))\
.decode('utf-8').lower()
@classmethod
def git(cls, args, **kwargs):
returncodes = kwargs.pop('returncodes', {0})
if 'cwd' not in kwargs:
kwargs['cwd'] = cls.get_path()
p = subprocess.Popen(['git'] + args, **kwargs)
r = None
if 'stdout' in kwargs:
r = p.stdout.read()
returncode = p.wait()
if returncode not in returncodes:
raise GitError(returncode)
return r
class GitError(Exception):
def __init__(self, returncode, *args):
self.returncode = returncode
super(GitError, self).__init__(*args)
thisdir = os.path.dirname(os.path.realpath(__file__))
SubRepo.set_clone_into(os.path.realpath(os.path.join(thisdir, os.pardir)))
| true | true |
1c3ba750dca70b3fc9c0e5f11c889842c4731140 | 829 | py | Python | design_pattern/abstract_factory.py | ischaojie/learn-py | b24ec70c776fbc7176bdffbbd1b9ce46e6a25916 | [
"MIT"
] | null | null | null | design_pattern/abstract_factory.py | ischaojie/learn-py | b24ec70c776fbc7176bdffbbd1b9ce46e6a25916 | [
"MIT"
] | null | null | null | design_pattern/abstract_factory.py | ischaojie/learn-py | b24ec70c776fbc7176bdffbbd1b9ce46e6a25916 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
抽象工厂模式
将组件的实现通过一个工厂暴露出来,从而统一行为。
"""
from typing import Type
class Pet:
"""
宠物类
"""
def __init__(self, name: str):
self.name = name
def speak(self):
raise NotImplementedError
def __str__(self):
return self.name
class Dog(Pet):
def speak(self):
return "woof {}".format(self.name)
class Cat(Pet):
def speak(self):
return "meow {}".format(self.name)
class PetShop:
def __init__(self, pet_cls: Type[Pet]):
"""
pet_cls 对 Pet 进行导入
"""
self.pet_cls = pet_cls
def sale(self, name: str):
"""
将 Pet 类以及其子类的 name 通过此处进行统一
"""
return self.pet_cls(name)
if __name__ == '__main__':
shop = PetShop(Dog)
pet = shop.sale("tom")
print(pet.speak())
| 15.351852 | 43 | 0.55006 |
from typing import Type
class Pet:
def __init__(self, name: str):
self.name = name
def speak(self):
raise NotImplementedError
def __str__(self):
return self.name
class Dog(Pet):
def speak(self):
return "woof {}".format(self.name)
class Cat(Pet):
def speak(self):
return "meow {}".format(self.name)
class PetShop:
def __init__(self, pet_cls: Type[Pet]):
self.pet_cls = pet_cls
def sale(self, name: str):
return self.pet_cls(name)
if __name__ == '__main__':
shop = PetShop(Dog)
pet = shop.sale("tom")
print(pet.speak())
| true | true |
1c3ba7ebb306911a1f12655bba992630fc3e9222 | 1,754 | py | Python | patronclient/tests/unit/v2/test_cloudpipe.py | casbin/openstack-patron | b41b1262f3a52c8cc9f6b6bdf87be5a1abcf6d25 | [
"Apache-2.0"
] | null | null | null | patronclient/tests/unit/v2/test_cloudpipe.py | casbin/openstack-patron | b41b1262f3a52c8cc9f6b6bdf87be5a1abcf6d25 | [
"Apache-2.0"
] | null | null | null | patronclient/tests/unit/v2/test_cloudpipe.py | casbin/openstack-patron | b41b1262f3a52c8cc9f6b6bdf87be5a1abcf6d25 | [
"Apache-2.0"
] | null | null | null | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from patronclient.tests.unit.fixture_data import client
from patronclient.tests.unit.fixture_data import cloudpipe as data
from patronclient.tests.unit import utils
from patronclient.v2 import cloudpipe
class CloudpipeTest(utils.FixturedTestCase):
data_fixture_class = data.Fixture
scenarios = [('original', {'client_fixture_class': client.V1}),
('session', {'client_fixture_class': client.SessionV1})]
def test_list_cloudpipes(self):
cp = self.cs.cloudpipe.list()
self.assert_called('GET', '/os-cloudpipe')
[self.assertIsInstance(c, cloudpipe.Cloudpipe) for c in cp]
def test_create(self):
project = "test"
cp = self.cs.cloudpipe.create(project)
body = {'cloudpipe': {'project_id': project}}
self.assert_called('POST', '/os-cloudpipe', body)
self.assertIsInstance(cp, six.string_types)
def test_update(self):
self.cs.cloudpipe.update("192.168.1.1", 2345)
body = {'configure_project': {'vpn_ip': "192.168.1.1",
'vpn_port': 2345}}
self.assert_called('PUT', '/os-cloudpipe/configure-project', body)
| 38.130435 | 78 | 0.68016 |
import six
from patronclient.tests.unit.fixture_data import client
from patronclient.tests.unit.fixture_data import cloudpipe as data
from patronclient.tests.unit import utils
from patronclient.v2 import cloudpipe
class CloudpipeTest(utils.FixturedTestCase):
data_fixture_class = data.Fixture
scenarios = [('original', {'client_fixture_class': client.V1}),
('session', {'client_fixture_class': client.SessionV1})]
def test_list_cloudpipes(self):
cp = self.cs.cloudpipe.list()
self.assert_called('GET', '/os-cloudpipe')
[self.assertIsInstance(c, cloudpipe.Cloudpipe) for c in cp]
def test_create(self):
project = "test"
cp = self.cs.cloudpipe.create(project)
body = {'cloudpipe': {'project_id': project}}
self.assert_called('POST', '/os-cloudpipe', body)
self.assertIsInstance(cp, six.string_types)
def test_update(self):
self.cs.cloudpipe.update("192.168.1.1", 2345)
body = {'configure_project': {'vpn_ip': "192.168.1.1",
'vpn_port': 2345}}
self.assert_called('PUT', '/os-cloudpipe/configure-project', body)
| true | true |
1c3ba838ed53f1489f869377b5e50e8cfdf3e5bf | 4,379 | py | Python | ipodio/database.py | sourcery-ai-bot/ipodio | e32ab2d1928a2b47500dd0ce0cbd17f71102dbe2 | [
"BSD-3-Clause"
] | 9 | 2015-06-02T23:31:20.000Z | 2021-05-17T17:26:32.000Z | ipodio/database.py | sourcery-ai-bot/ipodio | e32ab2d1928a2b47500dd0ce0cbd17f71102dbe2 | [
"BSD-3-Clause"
] | null | null | null | ipodio/database.py | sourcery-ai-bot/ipodio | e32ab2d1928a2b47500dd0ce0cbd17f71102dbe2 | [
"BSD-3-Clause"
] | 3 | 2015-10-07T21:51:38.000Z | 2021-01-23T12:22:58.000Z | #-*- coding: utf-8 -*-
"""
Database
Stores all the tracks contained within the iPod indexed in a convenient way for
fast retrieval, iteration, addition and removal.
Intended as a very thin wrapper around gpod.Database in order to provide a
cleaner interface, extra functionality and resolve some flaws.
# Create the database without indexing its contents
database = ipodio.Database(gpod.Database('/ipod/mountpoint'))
# Index all tracks by its hash.
# The chosen structure is a dictionary of sets,
# which might be regarded as # a multi-dict.
database.update_index()
# The indexing allows to get a database by its inner contents
# and thus making it easy to detect and avoid duplication.
database.get_by_hash('some_calculated_track_hash')
# Basic operations along with a ipodio.Track instance
database.get(track)
database.add(track)
database.remove(track)
# The gpod Database reference is public until those use cases which need it
# are sorted out and implemented as part of the class.
database.internal
# A flag is maintained and updated on database modification
# so unnecessary expensive closing work is spared by checking.
if database.updated:
database.copy_files() # Physically send track files if needed
database.save() # Save the current database state and store it
"""
import gpod
from collections import defaultdict
from .track import Track
def first(iterable):
for item in iterable:
return item
class Playlist(object):
def __init__(self, playlist):
self.__playlist = playlist
@classmethod
def create(cls, name, database, internal_class=gpod.Playlist):
return cls(internal_class(database.internal, name))
@property
def internal(self):
return self.__playlist
@property
def name(self):
return self.__playlist.get_name()
@name.setter
def name(self, name):
self.__playlist.set_name(name)
@property
def is_master(self):
return self.__playlist.get_master()
@property
def tracks(self):
return [Track(track) for track in self.__playlist]
def append(self, track):
self.__playlist.add(track.internal)
def extend(self, tracks):
map(self.append, tracks)
def remove(self, track):
self.__playlist.remove(track.internal)
def discard(self, tracks):
map(self.remove, tracks)
class Database(object):
def __init__(self, database):
self.__database = database
self.index = defaultdict(set)
self.updated = False
@classmethod
def create(cls, mountpoint, internal_class=gpod.Database):
return cls(internal_class(mountpoint))
@property
def internal(self):
return self.__database
@property
def tracks(self):
return [Track(track) for track in self.__database]
@property
def playlists(self):
return [Playlist(playlist) for playlist in self.__database.Playlists]
def __add_index(self, track):
if not track.hash:
self.updated = True
track.update_hash()
self.index[track.hash].add(track)
def update_index(self):
for track in self.tracks:
self.__add_index(track)
def get(self, track):
return self.get_by_hash(track.hash)
def get_by_hash(self, hash):
return first(self.find_by_hash(hash))
def find_by_hash(self, hash):
return self.index[hash]
def add(self, track):
self.updated = True
self.__add_index(track)
self.__database.add(track.internal)
self.__database.Master.add(track.internal)
@property
def duplicates(self):
return [group for group in self.index.itervalues() if len(group) > 1]
def remove(self, track):
self.updated = True
self.__database.remove(track.internal, quiet=True)
def remove_playlist(self, playlist):
self.updated = True
# Avoid physically removing tracks from the iPod by setting ipod=False
# This may orphan tracks if they were only in this playlist
self.__database.remove(playlist.internal, quiet=True, ipod=False)
def copy_files(self, progress=None):
self.__database.copy_delayed_files(progress)
def save(self):
self.__database.close()
| 27.540881 | 79 | 0.675953 |
import gpod
from collections import defaultdict
from .track import Track
def first(iterable):
for item in iterable:
return item
class Playlist(object):
def __init__(self, playlist):
self.__playlist = playlist
@classmethod
def create(cls, name, database, internal_class=gpod.Playlist):
return cls(internal_class(database.internal, name))
@property
def internal(self):
return self.__playlist
@property
def name(self):
return self.__playlist.get_name()
@name.setter
def name(self, name):
self.__playlist.set_name(name)
@property
def is_master(self):
return self.__playlist.get_master()
@property
def tracks(self):
return [Track(track) for track in self.__playlist]
def append(self, track):
self.__playlist.add(track.internal)
def extend(self, tracks):
map(self.append, tracks)
def remove(self, track):
self.__playlist.remove(track.internal)
def discard(self, tracks):
map(self.remove, tracks)
class Database(object):
def __init__(self, database):
self.__database = database
self.index = defaultdict(set)
self.updated = False
@classmethod
def create(cls, mountpoint, internal_class=gpod.Database):
return cls(internal_class(mountpoint))
@property
def internal(self):
return self.__database
@property
def tracks(self):
return [Track(track) for track in self.__database]
@property
def playlists(self):
return [Playlist(playlist) for playlist in self.__database.Playlists]
def __add_index(self, track):
if not track.hash:
self.updated = True
track.update_hash()
self.index[track.hash].add(track)
def update_index(self):
for track in self.tracks:
self.__add_index(track)
def get(self, track):
return self.get_by_hash(track.hash)
def get_by_hash(self, hash):
return first(self.find_by_hash(hash))
def find_by_hash(self, hash):
return self.index[hash]
def add(self, track):
self.updated = True
self.__add_index(track)
self.__database.add(track.internal)
self.__database.Master.add(track.internal)
@property
def duplicates(self):
return [group for group in self.index.itervalues() if len(group) > 1]
def remove(self, track):
self.updated = True
self.__database.remove(track.internal, quiet=True)
def remove_playlist(self, playlist):
self.updated = True
self.__database.remove(playlist.internal, quiet=True, ipod=False)
def copy_files(self, progress=None):
self.__database.copy_delayed_files(progress)
def save(self):
self.__database.close()
| true | true |
1c3ba95195b1da46d7bee9e066627e9b38457b92 | 10,388 | py | Python | mqtt2influxdb/mqtt2influxdb.py | tvecera/bch-mqtt2influxdb | 6e3f6b898144ce89307e61858364d005f8f6a35d | [
"MIT"
] | 15 | 2018-01-14T16:27:11.000Z | 2020-06-08T00:44:16.000Z | mqtt2influxdb/mqtt2influxdb.py | tvecera/bch-mqtt2influxdb | 6e3f6b898144ce89307e61858364d005f8f6a35d | [
"MIT"
] | 11 | 2018-05-03T07:42:34.000Z | 2020-06-24T10:01:22.000Z | mqtt2influxdb/mqtt2influxdb.py | tvecera/bch-mqtt2influxdb | 6e3f6b898144ce89307e61858364d005f8f6a35d | [
"MIT"
] | 10 | 2020-08-07T11:22:24.000Z | 2022-01-13T15:54:17.000Z | #!/usr/bin/env python3
import os
import sys
import logging
import json
from datetime import datetime
import paho.mqtt.client
from paho.mqtt.client import topic_matches_sub
import influxdb
import jsonpath_ng
import requests
import base64
from requests.auth import HTTPBasicAuth
import http.client as http_client
import builtins
import py_expression_eval
import pycron
from .expr import variable_to_jsonpath
from .config import json_path
class Mqtt2InfluxDB:
def __init__(self, config):
self._points = config['points']
self._config = config
self._influxdb = influxdb.InfluxDBClient(config['influxdb']['host'],
config['influxdb']['port'],
config['influxdb'].get('username', 'root'),
config['influxdb'].get('password', 'root'),
ssl=config['influxdb'].get('ssl', False))
self._mqtt = paho.mqtt.client.Client()
if config['mqtt'].get('username', None):
self._mqtt.username_pw_set(config['mqtt']['username'],
config['mqtt'].get('password', None))
if config['mqtt'].get('cafile', None):
self._mqtt.tls_set(config['mqtt']['cafile'],
config['mqtt'].get('certfile', None),
config['mqtt'].get('keyfile', None))
self._mqtt.on_connect = self._on_mqtt_connect
self._mqtt.on_disconnect = self._on_mqtt_disconnect
self._mqtt.on_message = self._on_mqtt_message
def run(self):
logging.debug('InfluxDB create database %s', self._config['influxdb']['database'])
self._influxdb.create_database(self._config['influxdb']['database'])
self._influxdb.switch_database(self._config['influxdb']['database'])
for point in self._points:
if 'database' in point:
logging.debug('InfluxDB create database %s', point['database'])
self._influxdb.create_database(point['database'])
logging.info('MQTT broker host: %s, port: %d, use tls: %s',
self._config['mqtt']['host'],
self._config['mqtt']['port'],
bool(self._config['mqtt'].get('cafile', None)))
self._mqtt.connect_async(self._config['mqtt']['host'], self._config['mqtt']['port'], keepalive=10)
self._mqtt.loop_forever()
def _on_mqtt_connect(self, client, userdata, flags, rc):
logging.info('Connected to MQTT broker with code %s', rc)
lut = {paho.mqtt.client.CONNACK_REFUSED_PROTOCOL_VERSION: 'incorrect protocol version',
paho.mqtt.client.CONNACK_REFUSED_IDENTIFIER_REJECTED: 'invalid client identifier',
paho.mqtt.client.CONNACK_REFUSED_SERVER_UNAVAILABLE: 'server unavailable',
paho.mqtt.client.CONNACK_REFUSED_BAD_USERNAME_PASSWORD: 'bad username or password',
paho.mqtt.client.CONNACK_REFUSED_NOT_AUTHORIZED: 'not authorised'}
if rc != paho.mqtt.client.CONNACK_ACCEPTED:
logging.error('Connection refused from reason: %s', lut.get(rc, 'unknown code'))
if rc == paho.mqtt.client.CONNACK_ACCEPTED:
for point in self._points:
logging.info('subscribe %s', point['topic'])
client.subscribe(point['topic'])
def _on_mqtt_disconnect(self, client, userdata, rc):
logging.info('Disconnect from MQTT broker with code %s', rc)
def _on_mqtt_message(self, client, userdata, message):
logging.debug('mqtt_on_message %s %s', message.topic, message.payload)
msg = None
for point in self._points:
if topic_matches_sub(point['topic'], message.topic):
if not msg:
payload = message.payload.decode('utf-8')
if payload == '':
payload = 'null'
try:
payload = json.loads(payload)
except Exception as e:
logging.error('parse json: %s topic: %s payload: %s', e, message.topic, message.payload)
return
msg = {
"topic": message.topic.split('/'),
"payload": payload,
"timestamp": message.timestamp,
"qos": message.qos
}
if 'schedule' in point:
# check if current time is valid in schedule
if not pycron.is_now(point['schedule']):
logging.info('Skipping %s due to schedule %s' % (message.topic, point['schedule']))
continue
measurement = self._get_value_from_str_or_JSONPath(point['measurement'], msg)
if measurement is None:
logging.warning('unknown measurement')
return
record = {'measurement': measurement,
'time': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
'tags': {},
'fields': {}}
if 'base64decode' in self._config:
data = self._get_value_from_str_or_JSONPath(self._config['base64decode']["source"], msg)
dataDecoded = base64.b64decode(data)
msg.update({"base64decoded": {self._config['base64decode']["target"]: {"raw": dataDecoded}}})
dataDecoded = dataDecoded.hex()
msg.update({"base64decoded": {self._config['base64decode']["target"]: {"hex": dataDecoded}}})
if 'fields' in point:
if isinstance(point['fields'], jsonpath_ng.JSONPath):
record['fields'] = self._get_value_from_str_or_JSONPath(point['fields'], msg)
else:
for key in point['fields']:
if isinstance(point['fields'][key], dict):
val = self._get_value_from_str_or_JSONPath(point['fields'][key]['value'], msg)
convFunc = getattr(builtins, point['fields'][key]['type'], None)
if val is None:
continue
if convFunc:
try:
val = convFunc(val)
except ValueError:
val = None
logging.warning('invalid conversion function key')
else:
val = self._get_value_from_str_or_JSONPath(point['fields'][key], msg)
if key == 'value':
if isinstance(val, bool):
if 'type' in point['fields'] and point['fields']['type'] == 'booltoint':
val = int(val)
elif key == 'type':
if val == 'booltoint':
val = 'int'
if val is None:
logging.warning('Unable to get value for %s' % point['fields'][key])
continue
record['fields'][key] = val
if len(record['fields']) != len(point['fields']):
logging.warning('different number of fields')
if not record['fields']:
logging.warning('empty fields')
return
if 'tags' in point:
for key in point['tags']:
val = self._get_value_from_str_or_JSONPath(point['tags'][key], msg)
if val is None:
logging.warning('Unable to get value for tag %s' % point['tags'][key])
continue
record['tags'][key] = val
if len(record['tags']) != len(point['tags']):
logging.warning('different number of tags')
logging.debug('influxdb write %s', record)
self._influxdb.write_points([record], database=point.get('database', None))
if 'http' in self._config:
http_record = {}
for key in point['httpcontent']:
val = self._get_value_from_str_or_JSONPath(point['httpcontent'][key], msg)
if val is None:
continue
http_record.update({key: val})
action = getattr(requests, self._config['http']['action'], None)
if action:
r = action(url=self._config['http']['destination'], data=http_record, auth=HTTPBasicAuth(self._config['http']['username'], self._config['http']['password']))
else:
logging.error("Invalid HTTP method key!")
def _get_value_from_str_or_JSONPath(self, param, msg):
if isinstance(param, str):
return param
elif isinstance(param, jsonpath_ng.JSONPath):
tmp = param.find(msg)
if tmp:
return tmp[0].value
elif isinstance(param, py_expression_eval.Expression):
vars = {}
for var in param.variables():
# must start with JSON__
if var.startswith('JSON__'):
json_field = variable_to_jsonpath(var)
tmp = json_path(json_field).find(msg)
if tmp:
vars[var] = tmp[0].value
else:
logging.error('unable to find JSON field %s!' % json_field)
else:
logging.error('unknown variable %s in parser expression %s!' % (var, param.toString()))
logging.debug('evaluating expression %s using the variables %s' % (param.toString(), str(vars)))
return param.evaluate(vars)
| 45.561404 | 181 | 0.503273 |
import os
import sys
import logging
import json
from datetime import datetime
import paho.mqtt.client
from paho.mqtt.client import topic_matches_sub
import influxdb
import jsonpath_ng
import requests
import base64
from requests.auth import HTTPBasicAuth
import http.client as http_client
import builtins
import py_expression_eval
import pycron
from .expr import variable_to_jsonpath
from .config import json_path
class Mqtt2InfluxDB:
def __init__(self, config):
self._points = config['points']
self._config = config
self._influxdb = influxdb.InfluxDBClient(config['influxdb']['host'],
config['influxdb']['port'],
config['influxdb'].get('username', 'root'),
config['influxdb'].get('password', 'root'),
ssl=config['influxdb'].get('ssl', False))
self._mqtt = paho.mqtt.client.Client()
if config['mqtt'].get('username', None):
self._mqtt.username_pw_set(config['mqtt']['username'],
config['mqtt'].get('password', None))
if config['mqtt'].get('cafile', None):
self._mqtt.tls_set(config['mqtt']['cafile'],
config['mqtt'].get('certfile', None),
config['mqtt'].get('keyfile', None))
self._mqtt.on_connect = self._on_mqtt_connect
self._mqtt.on_disconnect = self._on_mqtt_disconnect
self._mqtt.on_message = self._on_mqtt_message
def run(self):
logging.debug('InfluxDB create database %s', self._config['influxdb']['database'])
self._influxdb.create_database(self._config['influxdb']['database'])
self._influxdb.switch_database(self._config['influxdb']['database'])
for point in self._points:
if 'database' in point:
logging.debug('InfluxDB create database %s', point['database'])
self._influxdb.create_database(point['database'])
logging.info('MQTT broker host: %s, port: %d, use tls: %s',
self._config['mqtt']['host'],
self._config['mqtt']['port'],
bool(self._config['mqtt'].get('cafile', None)))
self._mqtt.connect_async(self._config['mqtt']['host'], self._config['mqtt']['port'], keepalive=10)
self._mqtt.loop_forever()
def _on_mqtt_connect(self, client, userdata, flags, rc):
logging.info('Connected to MQTT broker with code %s', rc)
lut = {paho.mqtt.client.CONNACK_REFUSED_PROTOCOL_VERSION: 'incorrect protocol version',
paho.mqtt.client.CONNACK_REFUSED_IDENTIFIER_REJECTED: 'invalid client identifier',
paho.mqtt.client.CONNACK_REFUSED_SERVER_UNAVAILABLE: 'server unavailable',
paho.mqtt.client.CONNACK_REFUSED_BAD_USERNAME_PASSWORD: 'bad username or password',
paho.mqtt.client.CONNACK_REFUSED_NOT_AUTHORIZED: 'not authorised'}
if rc != paho.mqtt.client.CONNACK_ACCEPTED:
logging.error('Connection refused from reason: %s', lut.get(rc, 'unknown code'))
if rc == paho.mqtt.client.CONNACK_ACCEPTED:
for point in self._points:
logging.info('subscribe %s', point['topic'])
client.subscribe(point['topic'])
def _on_mqtt_disconnect(self, client, userdata, rc):
logging.info('Disconnect from MQTT broker with code %s', rc)
def _on_mqtt_message(self, client, userdata, message):
logging.debug('mqtt_on_message %s %s', message.topic, message.payload)
msg = None
for point in self._points:
if topic_matches_sub(point['topic'], message.topic):
if not msg:
payload = message.payload.decode('utf-8')
if payload == '':
payload = 'null'
try:
payload = json.loads(payload)
except Exception as e:
logging.error('parse json: %s topic: %s payload: %s', e, message.topic, message.payload)
return
msg = {
"topic": message.topic.split('/'),
"payload": payload,
"timestamp": message.timestamp,
"qos": message.qos
}
if 'schedule' in point:
if not pycron.is_now(point['schedule']):
logging.info('Skipping %s due to schedule %s' % (message.topic, point['schedule']))
continue
measurement = self._get_value_from_str_or_JSONPath(point['measurement'], msg)
if measurement is None:
logging.warning('unknown measurement')
return
record = {'measurement': measurement,
'time': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
'tags': {},
'fields': {}}
if 'base64decode' in self._config:
data = self._get_value_from_str_or_JSONPath(self._config['base64decode']["source"], msg)
dataDecoded = base64.b64decode(data)
msg.update({"base64decoded": {self._config['base64decode']["target"]: {"raw": dataDecoded}}})
dataDecoded = dataDecoded.hex()
msg.update({"base64decoded": {self._config['base64decode']["target"]: {"hex": dataDecoded}}})
if 'fields' in point:
if isinstance(point['fields'], jsonpath_ng.JSONPath):
record['fields'] = self._get_value_from_str_or_JSONPath(point['fields'], msg)
else:
for key in point['fields']:
if isinstance(point['fields'][key], dict):
val = self._get_value_from_str_or_JSONPath(point['fields'][key]['value'], msg)
convFunc = getattr(builtins, point['fields'][key]['type'], None)
if val is None:
continue
if convFunc:
try:
val = convFunc(val)
except ValueError:
val = None
logging.warning('invalid conversion function key')
else:
val = self._get_value_from_str_or_JSONPath(point['fields'][key], msg)
if key == 'value':
if isinstance(val, bool):
if 'type' in point['fields'] and point['fields']['type'] == 'booltoint':
val = int(val)
elif key == 'type':
if val == 'booltoint':
val = 'int'
if val is None:
logging.warning('Unable to get value for %s' % point['fields'][key])
continue
record['fields'][key] = val
if len(record['fields']) != len(point['fields']):
logging.warning('different number of fields')
if not record['fields']:
logging.warning('empty fields')
return
if 'tags' in point:
for key in point['tags']:
val = self._get_value_from_str_or_JSONPath(point['tags'][key], msg)
if val is None:
logging.warning('Unable to get value for tag %s' % point['tags'][key])
continue
record['tags'][key] = val
if len(record['tags']) != len(point['tags']):
logging.warning('different number of tags')
logging.debug('influxdb write %s', record)
self._influxdb.write_points([record], database=point.get('database', None))
if 'http' in self._config:
http_record = {}
for key in point['httpcontent']:
val = self._get_value_from_str_or_JSONPath(point['httpcontent'][key], msg)
if val is None:
continue
http_record.update({key: val})
action = getattr(requests, self._config['http']['action'], None)
if action:
r = action(url=self._config['http']['destination'], data=http_record, auth=HTTPBasicAuth(self._config['http']['username'], self._config['http']['password']))
else:
logging.error("Invalid HTTP method key!")
def _get_value_from_str_or_JSONPath(self, param, msg):
if isinstance(param, str):
return param
elif isinstance(param, jsonpath_ng.JSONPath):
tmp = param.find(msg)
if tmp:
return tmp[0].value
elif isinstance(param, py_expression_eval.Expression):
vars = {}
for var in param.variables():
if var.startswith('JSON__'):
json_field = variable_to_jsonpath(var)
tmp = json_path(json_field).find(msg)
if tmp:
vars[var] = tmp[0].value
else:
logging.error('unable to find JSON field %s!' % json_field)
else:
logging.error('unknown variable %s in parser expression %s!' % (var, param.toString()))
logging.debug('evaluating expression %s using the variables %s' % (param.toString(), str(vars)))
return param.evaluate(vars)
| true | true |
1c3ba9851458fc1035a3f92f164555d9d272be19 | 1,176 | py | Python | astroplpython/data/PowerFrequencyMeasurement.py | brianthomas/astroplpython | 2bb3aad304b308a272916b3ef90db9d0fd1ce508 | [
"MIT"
] | null | null | null | astroplpython/data/PowerFrequencyMeasurement.py | brianthomas/astroplpython | 2bb3aad304b308a272916b3ef90db9d0fd1ce508 | [
"MIT"
] | null | null | null | astroplpython/data/PowerFrequencyMeasurement.py | brianthomas/astroplpython | 2bb3aad304b308a272916b3ef90db9d0fd1ce508 | [
"MIT"
] | null | null | null | '''
Created on Jul 11, 2014
@author: thomas
'''
from astroplpython.data.Measurement import x_y
import collections
class p_f(x_y):
'''
An individual measurement of Power P at frequency f; P(f)
'''
@property
def power(self):
return self.x
@property
def frequency (self):
return self.y
def __str__(self):
return "p_f(p:" + str(self.power) + " f:" + str(self.frequency) + ")"
def __init__(self, power, frequency):
''' Constructor '''
super().__init__(power, frequency)
@staticmethod
def asTupleNumpyArrays (p_f_list):
''' Convert list of p_f[] into tuple of numpy arrays '''
pf = collections.namedtuple('pf', ['powers', 'frequencies'])
pf.powers = []
pf.frequencies = []
for val in p_f_list:
pf.powers.append(val.power)
pf.frequencies.append(val.frequency)
return pf
@staticmethod
def dbStrToArray (strarr):
''' Convert a (postgres) string representation of an array of values to x_t[] '''
return x_y._dbStrToArray(p_f, strarr)
| 24.5 | 89 | 0.570578 | from astroplpython.data.Measurement import x_y
import collections
class p_f(x_y):
@property
def power(self):
return self.x
@property
def frequency (self):
return self.y
def __str__(self):
return "p_f(p:" + str(self.power) + " f:" + str(self.frequency) + ")"
def __init__(self, power, frequency):
super().__init__(power, frequency)
@staticmethod
def asTupleNumpyArrays (p_f_list):
pf = collections.namedtuple('pf', ['powers', 'frequencies'])
pf.powers = []
pf.frequencies = []
for val in p_f_list:
pf.powers.append(val.power)
pf.frequencies.append(val.frequency)
return pf
@staticmethod
def dbStrToArray (strarr):
return x_y._dbStrToArray(p_f, strarr)
| true | true |
1c3baaba6660298a8f8cc9c03fba5f4c5a2d7874 | 2,084 | py | Python | Codes/Mathematical/random_number_generator.py | datta-agni/Python-Codes | 4c887a81f06da4732762e35ca6c2bb34bf1d7d41 | [
"MIT"
] | null | null | null | Codes/Mathematical/random_number_generator.py | datta-agni/Python-Codes | 4c887a81f06da4732762e35ca6c2bb34bf1d7d41 | [
"MIT"
] | null | null | null | Codes/Mathematical/random_number_generator.py | datta-agni/Python-Codes | 4c887a81f06da4732762e35ca6c2bb34bf1d7d41 | [
"MIT"
] | null | null | null | # Program to generate a random number between 0 and 9
# importing the random module
import random
def randomization_generators(
low=int(input("Enter the starting point of generation: ")),
high=int(input("Enter the end point for the generation: ")),
mean=int(
input(
"Enter the mean for generating random numbers from a probability distribution: "
)
),
sigma=int(
input(
"Enter the standard deviation for generating random numbers from a probability distribution: "
)
),
):
print("Return a random integer N such that a <= N <= b")
print(random.randint(low, high), "\n")
print("Generate n random bytes.")
print(
random.randbytes(int(input("Enter the size for random byte generator: "))), "\n"
)
print(
"Returns a non-negative Python integer with k random bits. This method is supplied with the MersenneTwister Generator."
)
print(
random.getrandbits(int(input("Enter the number of bits for generation: "))),
"\n",
)
print(
"Return a random floating point number N such that a <= N <= b for a <= b and b <= N <= a for b < a. The end-point value b may or may not be included in the range depending on floating-point rounding in the equation a + (b-a) * random()."
)
print(random.uniform(low, high), "\n")
print(
"Gaussian distribution. mu is the mean, and sigma is the standard deviation. This is slightly faster than the normalvariate() function defined below."
)
print(random.gauss(mean, sigma), "\n")
print(
"Log normal distribution. If you take the natural logarithm of this distribution, you’ll get a normal distribution with mean mu and standard deviation sigma. mu can have any value, and sigma must be greater than zero."
)
print(random.lognormvariate(mean, sigma), "\n")
print("Normal distribution. mu is the mean, and sigma is the standard deviation.")
print(random.normalvariate(mean, sigma))
if __name__ == "__main__":
randomization_generators()
| 36.561404 | 246 | 0.662188 |
import random
def randomization_generators(
low=int(input("Enter the starting point of generation: ")),
high=int(input("Enter the end point for the generation: ")),
mean=int(
input(
"Enter the mean for generating random numbers from a probability distribution: "
)
),
sigma=int(
input(
"Enter the standard deviation for generating random numbers from a probability distribution: "
)
),
):
print("Return a random integer N such that a <= N <= b")
print(random.randint(low, high), "\n")
print("Generate n random bytes.")
print(
random.randbytes(int(input("Enter the size for random byte generator: "))), "\n"
)
print(
"Returns a non-negative Python integer with k random bits. This method is supplied with the MersenneTwister Generator."
)
print(
random.getrandbits(int(input("Enter the number of bits for generation: "))),
"\n",
)
print(
"Return a random floating point number N such that a <= N <= b for a <= b and b <= N <= a for b < a. The end-point value b may or may not be included in the range depending on floating-point rounding in the equation a + (b-a) * random()."
)
print(random.uniform(low, high), "\n")
print(
"Gaussian distribution. mu is the mean, and sigma is the standard deviation. This is slightly faster than the normalvariate() function defined below."
)
print(random.gauss(mean, sigma), "\n")
print(
"Log normal distribution. If you take the natural logarithm of this distribution, you’ll get a normal distribution with mean mu and standard deviation sigma. mu can have any value, and sigma must be greater than zero."
)
print(random.lognormvariate(mean, sigma), "\n")
print("Normal distribution. mu is the mean, and sigma is the standard deviation.")
print(random.normalvariate(mean, sigma))
if __name__ == "__main__":
randomization_generators()
| true | true |
1c3bab1eca9dc99992fd701b30452ebb318e68c1 | 20,440 | py | Python | pyathena/classic/rst_handler.py | changgoo/pyathena-1 | c461ac3390d773537ce52393e3ebf68a3282aa46 | [
"MIT"
] | 1 | 2019-10-03T13:59:14.000Z | 2019-10-03T13:59:14.000Z | pyathena/classic/rst_handler.py | changgoo/pyathena-1 | c461ac3390d773537ce52393e3ebf68a3282aa46 | [
"MIT"
] | 3 | 2020-09-23T23:36:17.000Z | 2022-01-11T06:16:56.000Z | pyathena/classic/rst_handler.py | changgoo/pyathena-1 | c461ac3390d773537ce52393e3ebf68a3282aa46 | [
"MIT"
] | 2 | 2019-06-10T04:26:16.000Z | 2019-12-04T22:27:02.000Z | import struct
import numpy as np
import glob
import os
import sys
#writer
def parse_misc_info(rstfile):
fp=open(rstfile,'rb')
search_block=['par','time','data','star','user']
start={}
size={}
start['par']=0
iblock=0
while 1:
block=search_block[iblock]
size[block]=fp.tell()-start[block]
l=fp.readline()
if not l: break
if l.startswith(b'N_STEP') or l.startswith(b'DENSITY') or \
l.startswith(b'STAR') or l.startswith(b'USER'):
iblock+=1
start[search_block[iblock]]=start[block]+size[block]
data={}
search_block=['par','time','star','user']
for block in search_block:
if block in start:
fp.seek(start[block])
data[block]=fp.read(size[block])
fp.close()
return data
def write_onefile(newfile,data_part,data_par):
fp=open(newfile,'wb')
fields=['DENSITY', '1-MOMENTUM', '2-MOMENTUM', '3-MOMENTUM', 'ENERGY','POTENTIAL',
'1-FIELD', '2-FIELD', '3-FIELD',
'SCALAR 0','SCALAR 1','SCALAR 2','SCALAR 3','SCALAR 4',
'SCALAR 5','SCALAR 6','SCALAR 7','SCALAR 8','SCALAR 9']
for block in ['par','time']: fp.write(data_par[block])
fp.write(b'DENSITY\n')
fp.write(data_part['DENSITY'].flatten().tobytes('C'))
for f in fields[1:]:
if f in list(data_part.keys()):
#print f,data_part[f].shape
fp.write('\n{}\n'.format(f).encode())
fp.write(data_part[f].flatten().tobytes('C'))
fp.write(b'\n')
for block in ['star','user']:
if block in data_par: fp.write(data_par[block])
fp.close()
return
def write_allfile(pardata,rstdata,grids,grid_disp=np.array([0,0,0]),
id='newrst',dname='/tigress/changgoo/rst/',itime=0,verbose=False,scalar=0):
ngrids=len(grids)
# if not (ds.domain['Nx'][::-1] == rstdata['DENSITY'].shape).all():
# print 'mismatch in DIMENSIONS!!'
# print 'restart data dimension:', rstdata['DENSITY'].shape
# print 'new grid data dimension:', ds.domain['Nx'][::-1]
#
# return -1
fields = list(rstdata.keys())
cc_varnames=['DENSITY','1-MOMENTUM','2-MOMENTUM','3-MOMENTUM',\
'ENERGY','POTENTIAL']
fc_varnames=['1-FIELD','2-FIELD','3-FIELD']
for g in grids:
i=g['id']
if i == 0:
fname=id+'.%4.4d.rst' % itime
else:
fname=id+'-id%d.%4.4d.rst' % (i,itime)
gis=g['is']-grid_disp
gnx=g['Nx']
gie=gis+gnx
data={}
for f in cc_varnames:
if f in fields:
data[f]=rstdata[f][gis[2]:gie[2],gis[1]:gie[1],gis[0]:gie[0]]
for f in fc_varnames:
ib,jb,kb=(0,0,0)
if f in fields:
if f.startswith('1'): ib=1
if f.startswith('2'): jb=1
if f.startswith('3'): kb=1
data[f]=rstdata[f][gis[2]:gie[2]+kb,gis[1]:gie[1]+jb,gis[0]:gie[0]+ib]
for ns in range(scalar):
f='SCALAR %d' % ns
if f in fields:
data[f]=rstdata[f][gis[2]:gie[2],gis[1]:gie[1],gis[0]:gie[0]]
if verbose: dname+fname
write_onefile(dname+fname,data,pardata)
return
def get_eint(rstdata,neg_correct=True):
eint=rstdata['ENERGY'].copy()
eint -= 0.5*rstdata['1-MOMENTUM']**2/rstdata['DENSITY']
eint -= 0.5*rstdata['2-MOMENTUM']**2/rstdata['DENSITY']
eint -= 0.5*rstdata['3-MOMENTUM']**2/rstdata['DENSITY']
for i,f in enumerate(['1-FIELD','2-FIELD','3-FIELD']):
if f is '1-FIELD': Bc=0.5*(rstdata[f][:,:,:-1]+rstdata[f][:,:,1:])
elif f is '2-FIELD': Bc=0.5*(rstdata[f][:,:-1,:]+rstdata[f][:,1:,:])
elif f is '3-FIELD': Bc=0.5*(rstdata[f][:-1,:,:]+rstdata[f][1:,:,:])
eint -= 0.5*Bc**2
if neg_correct:
k_end,j_end,i_end = eint.shape
k_str=j_str=i_str = 0
k,j,i=np.where(eint<0)
eavg=[]
for kk,jj,ii in zip(k,j,i):
kl=kk if kk==k_str else kk-1
kh=kk+1 if kk==(k_end-1) else kk+2
jl=jj if jj==j_str else jj-1
jh=jj+1 if jj==(j_end-1) else jj+2
il=ii if ii==i_str else ii-1
ih=ii+1 if ii==(i_end-1) else ii+2
epart=eint[kl:kh,jl:jh,il:ih]
e_neg=epart[epart<0]
Nneg=len(e_neg)
eavg.append((epart.sum()-e_neg.sum())/(epart.size-e_neg.size))
print(kk,jj,ii,eint[kk,jj,ii],eavg[-1],epart.sum(),e_neg.sum())
eint[k,j,i]=np.array(eavg)
if len(eint[eint<0]) > 0: sys.exit("negative energy persist!")
return eint
def to_etot(rstdata):
eint=rstdata['ENERGY'].copy()
eint += 0.5*rstdata['1-MOMENTUM']**2/rstdata['DENSITY']
eint += 0.5*rstdata['2-MOMENTUM']**2/rstdata['DENSITY']
eint += 0.5*rstdata['3-MOMENTUM']**2/rstdata['DENSITY']
for i,f in enumerate(['1-FIELD','2-FIELD','3-FIELD']):
if f is '1-FIELD': Bc=0.5*(rstdata[f][:,:,:-1]+rstdata[f][:,:,1:])
elif f is '2-FIELD': Bc=0.5*(rstdata[f][:,:-1,:]+rstdata[f][:,1:,:])
elif f is '3-FIELD': Bc=0.5*(rstdata[f][:-1,:,:]+rstdata[f][1:,:,:])
eint += 0.5*Bc**2
return eint
def degrade(rstdata,scalar=0):
cc_varnames=['DENSITY','1-MOMENTUM','2-MOMENTUM','3-MOMENTUM',\
'ENERGY','POTENTIAL']
fc_varnames=['1-FIELD','2-FIELD','3-FIELD']
scalar_varnames=[]
for ns in range(scalar):
scalar_varnames.append('SCALAR %d' % ns)
if scalar: cc_varnames += scalar_varnames
rstdata_new={}
for f in cc_varnames:
if f is 'ENERGY':
data=get_eint(rstdata)
else:
data=rstdata[f].copy()
shape=np.array(data.shape)/2
newdata=np.zeros(shape,dtype='d')
for i in range(2):
for j in range(2):
for k in range(2):
newdata += data[k::2,j::2,i::2]
rstdata_new[f]=newdata*0.125
for f in fc_varnames:
data=rstdata[f].copy()
shape=np.array(data.shape)/2
if f is '1-FIELD':
newdata=np.zeros(shape+np.array([0,0,1]),dtype='d')
for j in range(2):
for k in range(2):
newdata += data[k::2,j::2,::2]
if f is '2-FIELD':
newdata=np.zeros(shape+np.array([0,1,0]),dtype='d')
for i in range(2):
for k in range(2):
newdata += data[k::2,::2,i::2]
if f is '3-FIELD':
newdata=np.zeros(shape+np.array([1,0,0]),dtype='d')
for j in range(2):
for i in range(2):
newdata += data[::2,j::2,i::2]
rstdata_new[f]=newdata*0.25
rstdata_new['ENERGY']=to_etot(rstdata_new)
return rstdata_new
def refine(rstdata,scalar=0):
cc_varnames=['DENSITY','1-MOMENTUM','2-MOMENTUM','3-MOMENTUM',\
'ENERGY']
if 'POTENTIAL' in rstdata: cc_varnames += ['POTENTIAL']
fc_varnames=['1-FIELD','2-FIELD','3-FIELD']
scalar_varnames=[]
for ns in range(scalar):
scalar_varnames.append('SCALAR %d' % ns)
if scalar: cc_varnames += scalar_varnames
rstdata_new={}
for f in cc_varnames:
if f is 'ENERGY':
data=get_eint(rstdata)
else:
data=rstdata[f]
shape=np.array(data.shape)*2
newdata=np.zeros(shape,dtype='d')
for i in range(2):
for j in range(2):
for k in range(2):
newdata[k::2,j::2,i::2] = data.copy()
rstdata_new[f]=newdata
for f in fc_varnames:
data=rstdata[f]
shape=np.array(data.shape)*2
if f is '1-FIELD':
newdata=np.zeros(shape-np.array([0,0,1]),dtype='d')
idata = 0.5*(data[:,:,:-1]+data[:,:,1:])
for j in range(2):
for k in range(2):
newdata[k::2,j::2,::2] = data.copy()
newdata[k::2,j::2,1::2] = idata.copy()
if f is '2-FIELD':
newdata=np.zeros(shape-np.array([0,1,0]),dtype='d')
idata = 0.5*(data[:,:-1,:]+data[:,1:,:])
for i in range(2):
for k in range(2):
newdata[k::2,::2,i::2] = data.copy()
newdata[k::2,1::2,i::2] = idata.copy()
if f is '3-FIELD':
newdata=np.zeros(shape-np.array([1,0,0]),dtype='d')
idata = 0.5*(data[:-1,:,:]+data[1:,:,:])
for j in range(2):
for i in range(2):
newdata[::2,j::2,i::2] = data.copy()
newdata[1::2,j::2,i::2] = idata.copy()
rstdata_new[f]=newdata
rstdata_new['ENERGY']=to_etot(rstdata_new)
return rstdata_new
def calculate_grid(Nx,NBx):
NGrids=(np.array(Nx)/np.array(NBx)).astype('int')
NProcs=NGrids[0]*NGrids[1]*NGrids[2]
grids=[]
i=0
print(Nx, NBx, NGrids, NProcs)
for n in range(NGrids[2]):
for m in range(NGrids[1]):
for l in range(NGrids[0]):
grid={}
grid['id']=i
grid['is']=np.array([l*NBx[0],m*NBx[1],n*NBx[2]]).astype('int')
grid['Nx']=np.array(NBx).astype('int')
grids.append(grid)
i += 1
return grids,NGrids
# reader
def parse_par(rstfile):
fp=open(rstfile,'rb')
par={}
line=fp.readline().decode('utf-8')
while 1:
if line.startswith('<'):
block=line[1:line.rfind('>')]
if block == 'par_end': break
par[block]={}
line=fp.readline().decode('utf-8')
if block in ['problem','domain1','time']:
sp = line.strip().split()
if len(sp) >= 3: par[block][sp[0]]=eval(sp[2])
else:
sp=line.split('=')
if len(sp) == 2: par[block][sp[0].strip()]=sp[1].split('#')[0].strip()
par[block]=fp.tell()
fp.close()
return par
def parse_rst(var,par,fm):
starpar=False
if 'star particles' in par['configure']:
if par['configure']['star particles'] == 'none':
starpar=False
else:
starpar=True
vtype='param'
cc_varnames=['DENSITY','1-MOMENTUM','2-MOMENTUM','3-MOMENTUM','ENERGY','POTENTIAL']
fc_varnames=['1-FIELD','2-FIELD','3-FIELD']
dm=par['domain1']
nx1=int(dm['Nx1']/dm['NGrid_x1'])
nx2=int(dm['Nx2']/dm['NGrid_x2'])
nx3=int(dm['Nx3']/dm['NGrid_x3'])
if var=='N_STEP':
ndata=1
dtype='i'
elif var=='TIME':
ndata=1
dtype='d'
elif var=='TIME_STEP':
ndata=1
if starpar: ndata+=1
dtype='d'
elif var in cc_varnames:
ndata=nx1*nx2*nx3
dtype='d'
vtype='ccvar'
elif var in fc_varnames:
if var.startswith('1'): nx1 += 1
if var.startswith('2'): nx2 += 1
if var.startswith('3'): nx3 += 1
ndata=nx1*nx2*nx3
dtype='d'
vtype='fcvar'
elif var.startswith('SCALAR'):
ndata=nx1*nx2*nx3
dtype='d'
vtype='ccvar'
elif var.startswith('STAR PARTICLE LIST'):
ndata=1
dtype='i'
vtype='star'
else:
return 0
fm[var]={}
fm[var]['ndata']=ndata
fm[var]['dtype']=dtype
fm[var]['vtype']=vtype
if vtype == 'ccvar' or vtype == 'fcvar':
fm[var]['nx']=(nx3,nx2,nx1)
return 1
def read_star(fp,nscal=0,ghost=True):
# This works for MST_4pc
# ivars=['id','merge_history','isnew','active']
# dvars=['m','x1','x2','x3','v1','v2','v3','age','mage','mdot',\
# 'x1_old','x2_old','x3_old',\
# 'm_old','M1_old','M2_old','M3_old',\
# 'navg','n2avg','v1avg','v2avg','v3avg',\
# 'eavg','Vol','radius','SFUV','SNRate',\
#'SNprob',\
# 'x1sn','x2sn','x3sn',\
# ]
# Latest restart file
ivars=['id','merge_history','isnew','active']
dvars=['m','x1','x2','x3','v1','v2','v3','age','mage','mdot',\
'x1_old','x2_old','x3_old',\
]
# additional fields depending on the version
for i in range(nscal):
dvars += ['metal{}'.format(i)]
if ghost:
dvars += ['mghost','M1ghost','M2ghost','M3ghost']
for i in range(nscal):
dvars += ['Sghost{}'.format(i)]
star_dict={}
dtype='i'
for var in ivars:
data=fp.read(struct.calcsize(dtype))
tmp=struct.unpack('<'+dtype,data)
star_dict[var]=tmp
dtype='d'
for var in dvars:
data=fp.read(struct.calcsize(dtype))
tmp=struct.unpack('<'+dtype,data)
#if var is 'm': print(var,tmp)
star_dict[var]=tmp
return star_dict
def read_rst_grid(rstfile,verbose=False,starghost=True):
par=parse_par(rstfile)
fp=open(rstfile,'rb')
fp.seek(par['par_end'])
rst={}
data_array={}
nscal=0
while 1:
l=fp.readline().decode('utf-8')
var=l.strip()
if parse_rst(var,par,rst):
dtype=rst[var]['dtype']
ndata=rst[var]['ndata']
vtype=rst[var]['vtype']
dsize=ndata*struct.calcsize(dtype)
data=fp.read(dsize)
if vtype == 'param':
if verbose: print(var,struct.unpack('<'+ndata*dtype,data))
elif vtype == 'star':
nstar,=struct.unpack('<'+ndata*dtype,data)
data=fp.read(dsize)
star_list=[]
if nstar > 0:
for i in range(nstar):
star_list.append(read_star(fp,nscal=nscal,ghost=starghost))
if verbose:
print(var, nstar)
print(star_list[0])
print(star_list[nstar-1])
data_array[var]=star_list
else:
arr=np.asarray(struct.unpack('<'+ndata*dtype,data))
arr.shape = rst[var]['nx']
data_array[var]=arr
if verbose: print(var, arr.mean(), arr.shape)
if var.startswith('SCALAR'): nscal += 1
fp.readline()
else:
break
if verbose: print(l, fp.tell())
fp.close()
return rst,data_array
def read(rstfile,grids,NGrids,parfile=None,verbose=False,starghost=True):
if parfile==None: par=parse_par(rstfile)
else: par=parse_par(parfile)
nprocs=len(grids)#par['domain1']['AutoWithNProc']
field_maps=[]
rstdata={}
nx=NGrids*grids[0]['Nx']
nx=nx[::-1]
#nx=ds.domain['Nx'][::-1]
print(nx,nprocs)
dirname=os.path.dirname(rstfile)
basename=os.path.basename(rstfile)
fm,data=read_rst_grid(rstfile,verbose=verbose,starghost=starghost)
g=grids[0]
gis=g['is']
gnx=g['Nx']
gie=gis+gnx
print(fm['DENSITY']['nx'],gnx)
for k in fm:
ib,jb,kb=(0,0,0)
if fm[k]['vtype'] == 'ccvar':
rstdata[k]=np.empty(nx,dtype=fm[k]['dtype'])
rstdata[k][gis[2]:gie[2],gis[1]:gie[1],gis[0]:gie[0]]=data[k]
elif fm[k]['vtype'] == 'fcvar':
if k.startswith('1'): ib=1
if k.startswith('2'): jb=1
if k.startswith('3'): kb=1
rstdata[k]=np.empty((nx[0]+kb,nx[1]+jb,nx[2]+ib),dtype=fm[k]['dtype'])
rstdata[k][gis[2]:gie[2]+kb,gis[1]:gie[1]+jb,gis[0]:gie[0]+ib]=data[k]
#for i in range(nprocs):
for i in range(1,nprocs):
g=grids[i]
gis=g['is']
gnx=g['Nx']
gie=gis+gnx
# if i % 50 == 0:
# print i,gis,gie
# print rstfile,g['filename']
rstfname = '%s/%s-id%d%s' % (dirname,basename[:-9],i,basename[-9:])
if not os.path.isfile(rstfname):
rstfname = '%s/../id%d/%s-id%d%s' % (dirname,i,basename[:-9],i,basename[-9:])
fm,data=read_rst_grid(rstfname,starghost=starghost)
if verbose > 1: print(i,fm['DENSITY']['nx'],gnx)
for k in fm:
ib,jb,kb=(0,0,0)
if fm[k]['vtype'] == 'ccvar':
rstdata[k][gis[2]:gie[2],gis[1]:gie[1],gis[0]:gie[0]]=data[k]
elif fm[k]['vtype'] == 'fcvar':
if k.startswith('1'): ib=1
if k.startswith('2'): jb=1
if k.startswith('3'): kb=1
rstdata[k][gis[2]:gie[2]+kb,gis[1]:gie[1]+jb,gis[0]:gie[0]+ib]=data[k]
return rstdata
def read_part(rstfile,grids,nx,verbose=False):
nprocs=len(grids)
field_maps=[]
rstdata={}
print(nx,nprocs)
basename=os.path.basename(rstfile)
pid=basename[:-9]
fm,data=read_rst_grid(rstfile,verbose=verbose)
g=grids[0]
gis=g['is']
gnx=g['Nx']
gie=gis+gnx
ks=gis[2]
print(fm['DENSITY']['nx'],gnx)
for k in fm:
ib,jb,kb=(0,0,0)
if fm[k]['vtype'] == 'ccvar':
rstdata[k]=np.empty(nx,dtype=fm[k]['dtype'])
elif fm[k]['vtype'] == 'fcvar':
if k.startswith('1'): ib=1
if k.startswith('2'): jb=1
if k.startswith('3'): kb=1
rstdata[k]=np.empty((nx[0]+kb,nx[1]+jb,nx[2]+ib),dtype=fm[k]['dtype'])
for i in range(nprocs):
g=grids[i]
gis=g['is']
gnx=g['Nx']
gie=gis+gnx
gid=g['id']
if gid > 0:
rstfname = rstfile.replace('{}.'.format(pid),'{}-id{}.'.format(pid,gid))
else:
rstfname = rstfile
if not os.path.isfile(rstfname):
rstfname = rstfile.replace('id{}/{}.'.format(gid,pid),
'id{}/{}-id{}.'.format(gid,pid,gid))
fm,data=read_rst_grid(rstfname)
if verbose > 1: print(i,fm['DENSITY']['nx'],gnx)
for k in fm:
ib,jb,kb=(0,0,0)
if fm[k]['vtype'] == 'ccvar':
rstdata[k][gis[2]-ks:gie[2]-ks,gis[1]:gie[1],gis[0]:gie[0]]=data[k]
elif fm[k]['vtype'] == 'fcvar':
if k.startswith('1'): ib=1
if k.startswith('2'): jb=1
if k.startswith('3'): kb=1
rstdata[k][gis[2]-ks:gie[2]-ks+kb,gis[1]:gie[1]+jb,gis[0]:gie[0]+ib]=data[k]
return rstdata
def set_xpos_with_dm(dm):
le=np.array([dm['x1min'],dm['x2min'],dm['x3min']])
re=np.array([dm['x1max'],dm['x2max'],dm['x3max']])
Lx=re-le
Nx=np.array([dm['Nx1'],dm['Nx2'],dm['Nx3']])
dx=Lx/Nx
xc={}
xf={}
for i,ax in zip(list(range(3)),['x','y','z']):
xf[ax]=np.arange(le[i],re[i]+dx[i],dx[i])
xc[ax]=np.arange(le[i],re[i],dx[i])+0.5*dx[i]
return xf,xc
def set_xpos(ds):
le=ds.domain['left_edge']
re=ds.domain['right_edge']
dx=ds.domain['dx']
xc={}
xf={}
for i,ax in zip(list(range(3)),['x','y','z']):
xf[ax]=np.arange(le[i],re[i]+dx[i],dx[i])
xc[ax]=np.arange(le[i],re[i],dx[i])+0.5*dx[i]
return xf,xc
def to_hdf5(h5file,rstdata,ds):
import h5py
Bx=rstdata['1-FIELD']
By=rstdata['2-FIELD']
Bz=rstdata['3-FIELD']
xf,xc=set_xpos(ds)
f=h5py.File(h5file,'a')
for name in ['Bfields','cell_centered_coord','face_centered_coord']:
if name in list(f.keys()):
grp=f[name]
else:
grp=f.create_group(name)
print(name)
grp=f['Bfields']
for name,B in zip(['Bx','By','Bz'],[Bx,By,Bz]):
if name in list(grp.keys()):
dset=grp[name]
else:
dset=grp.create_dataset(name,B.shape,data=B,dtype=B.dtype)
for k in list(grp.keys()):
for i,ax in enumerate(['z','y','x']):
grp[k].dims[i].label=ax
bfield=f['Bfields']
ccoord=f['cell_centered_coord']
fcoord=f['face_centered_coord']
for ax in ['x','y','z']:
if ax in list(ccoord.keys()):
print(ax)
else:
ccoord[ax] = xc[ax]
if ax in list(fcoord.keys()):
print(ax)
else:
fcoord[ax] = xf[ax]
for b in list(bfield.keys()):
bax=b[-1]
for i,ax in enumerate(['z','y','x']):
if ax == bax:
bfield[b].dims[i].attach_scale(fcoord[ax])
else:
bfield[b].dims[i].attach_scale(ccoord[ax])
f.close()
def divB(rstdata):
Bx=rstdata['1-FIELD']
By=rstdata['2-FIELD']
Bz=rstdata['3-FIELD']
dBx=np.diff(Bx,axis=2)
dBy=np.diff(By,axis=1)
dBz=np.diff(Bz,axis=0)
dB = dBx+dBy+dBz
return dB
| 30.598802 | 92 | 0.509883 | import struct
import numpy as np
import glob
import os
import sys
def parse_misc_info(rstfile):
fp=open(rstfile,'rb')
search_block=['par','time','data','star','user']
start={}
size={}
start['par']=0
iblock=0
while 1:
block=search_block[iblock]
size[block]=fp.tell()-start[block]
l=fp.readline()
if not l: break
if l.startswith(b'N_STEP') or l.startswith(b'DENSITY') or \
l.startswith(b'STAR') or l.startswith(b'USER'):
iblock+=1
start[search_block[iblock]]=start[block]+size[block]
data={}
search_block=['par','time','star','user']
for block in search_block:
if block in start:
fp.seek(start[block])
data[block]=fp.read(size[block])
fp.close()
return data
def write_onefile(newfile,data_part,data_par):
fp=open(newfile,'wb')
fields=['DENSITY', '1-MOMENTUM', '2-MOMENTUM', '3-MOMENTUM', 'ENERGY','POTENTIAL',
'1-FIELD', '2-FIELD', '3-FIELD',
'SCALAR 0','SCALAR 1','SCALAR 2','SCALAR 3','SCALAR 4',
'SCALAR 5','SCALAR 6','SCALAR 7','SCALAR 8','SCALAR 9']
for block in ['par','time']: fp.write(data_par[block])
fp.write(b'DENSITY\n')
fp.write(data_part['DENSITY'].flatten().tobytes('C'))
for f in fields[1:]:
if f in list(data_part.keys()):
fp.write('\n{}\n'.format(f).encode())
fp.write(data_part[f].flatten().tobytes('C'))
fp.write(b'\n')
for block in ['star','user']:
if block in data_par: fp.write(data_par[block])
fp.close()
return
def write_allfile(pardata,rstdata,grids,grid_disp=np.array([0,0,0]),
id='newrst',dname='/tigress/changgoo/rst/',itime=0,verbose=False,scalar=0):
ngrids=len(grids)
fields = list(rstdata.keys())
cc_varnames=['DENSITY','1-MOMENTUM','2-MOMENTUM','3-MOMENTUM',\
'ENERGY','POTENTIAL']
fc_varnames=['1-FIELD','2-FIELD','3-FIELD']
for g in grids:
i=g['id']
if i == 0:
fname=id+'.%4.4d.rst' % itime
else:
fname=id+'-id%d.%4.4d.rst' % (i,itime)
gis=g['is']-grid_disp
gnx=g['Nx']
gie=gis+gnx
data={}
for f in cc_varnames:
if f in fields:
data[f]=rstdata[f][gis[2]:gie[2],gis[1]:gie[1],gis[0]:gie[0]]
for f in fc_varnames:
ib,jb,kb=(0,0,0)
if f in fields:
if f.startswith('1'): ib=1
if f.startswith('2'): jb=1
if f.startswith('3'): kb=1
data[f]=rstdata[f][gis[2]:gie[2]+kb,gis[1]:gie[1]+jb,gis[0]:gie[0]+ib]
for ns in range(scalar):
f='SCALAR %d' % ns
if f in fields:
data[f]=rstdata[f][gis[2]:gie[2],gis[1]:gie[1],gis[0]:gie[0]]
if verbose: dname+fname
write_onefile(dname+fname,data,pardata)
return
def get_eint(rstdata,neg_correct=True):
eint=rstdata['ENERGY'].copy()
eint -= 0.5*rstdata['1-MOMENTUM']**2/rstdata['DENSITY']
eint -= 0.5*rstdata['2-MOMENTUM']**2/rstdata['DENSITY']
eint -= 0.5*rstdata['3-MOMENTUM']**2/rstdata['DENSITY']
for i,f in enumerate(['1-FIELD','2-FIELD','3-FIELD']):
if f is '1-FIELD': Bc=0.5*(rstdata[f][:,:,:-1]+rstdata[f][:,:,1:])
elif f is '2-FIELD': Bc=0.5*(rstdata[f][:,:-1,:]+rstdata[f][:,1:,:])
elif f is '3-FIELD': Bc=0.5*(rstdata[f][:-1,:,:]+rstdata[f][1:,:,:])
eint -= 0.5*Bc**2
if neg_correct:
k_end,j_end,i_end = eint.shape
k_str=j_str=i_str = 0
k,j,i=np.where(eint<0)
eavg=[]
for kk,jj,ii in zip(k,j,i):
kl=kk if kk==k_str else kk-1
kh=kk+1 if kk==(k_end-1) else kk+2
jl=jj if jj==j_str else jj-1
jh=jj+1 if jj==(j_end-1) else jj+2
il=ii if ii==i_str else ii-1
ih=ii+1 if ii==(i_end-1) else ii+2
epart=eint[kl:kh,jl:jh,il:ih]
e_neg=epart[epart<0]
Nneg=len(e_neg)
eavg.append((epart.sum()-e_neg.sum())/(epart.size-e_neg.size))
print(kk,jj,ii,eint[kk,jj,ii],eavg[-1],epart.sum(),e_neg.sum())
eint[k,j,i]=np.array(eavg)
if len(eint[eint<0]) > 0: sys.exit("negative energy persist!")
return eint
def to_etot(rstdata):
eint=rstdata['ENERGY'].copy()
eint += 0.5*rstdata['1-MOMENTUM']**2/rstdata['DENSITY']
eint += 0.5*rstdata['2-MOMENTUM']**2/rstdata['DENSITY']
eint += 0.5*rstdata['3-MOMENTUM']**2/rstdata['DENSITY']
for i,f in enumerate(['1-FIELD','2-FIELD','3-FIELD']):
if f is '1-FIELD': Bc=0.5*(rstdata[f][:,:,:-1]+rstdata[f][:,:,1:])
elif f is '2-FIELD': Bc=0.5*(rstdata[f][:,:-1,:]+rstdata[f][:,1:,:])
elif f is '3-FIELD': Bc=0.5*(rstdata[f][:-1,:,:]+rstdata[f][1:,:,:])
eint += 0.5*Bc**2
return eint
def degrade(rstdata,scalar=0):
cc_varnames=['DENSITY','1-MOMENTUM','2-MOMENTUM','3-MOMENTUM',\
'ENERGY','POTENTIAL']
fc_varnames=['1-FIELD','2-FIELD','3-FIELD']
scalar_varnames=[]
for ns in range(scalar):
scalar_varnames.append('SCALAR %d' % ns)
if scalar: cc_varnames += scalar_varnames
rstdata_new={}
for f in cc_varnames:
if f is 'ENERGY':
data=get_eint(rstdata)
else:
data=rstdata[f].copy()
shape=np.array(data.shape)/2
newdata=np.zeros(shape,dtype='d')
for i in range(2):
for j in range(2):
for k in range(2):
newdata += data[k::2,j::2,i::2]
rstdata_new[f]=newdata*0.125
for f in fc_varnames:
data=rstdata[f].copy()
shape=np.array(data.shape)/2
if f is '1-FIELD':
newdata=np.zeros(shape+np.array([0,0,1]),dtype='d')
for j in range(2):
for k in range(2):
newdata += data[k::2,j::2,::2]
if f is '2-FIELD':
newdata=np.zeros(shape+np.array([0,1,0]),dtype='d')
for i in range(2):
for k in range(2):
newdata += data[k::2,::2,i::2]
if f is '3-FIELD':
newdata=np.zeros(shape+np.array([1,0,0]),dtype='d')
for j in range(2):
for i in range(2):
newdata += data[::2,j::2,i::2]
rstdata_new[f]=newdata*0.25
rstdata_new['ENERGY']=to_etot(rstdata_new)
return rstdata_new
def refine(rstdata,scalar=0):
cc_varnames=['DENSITY','1-MOMENTUM','2-MOMENTUM','3-MOMENTUM',\
'ENERGY']
if 'POTENTIAL' in rstdata: cc_varnames += ['POTENTIAL']
fc_varnames=['1-FIELD','2-FIELD','3-FIELD']
scalar_varnames=[]
for ns in range(scalar):
scalar_varnames.append('SCALAR %d' % ns)
if scalar: cc_varnames += scalar_varnames
rstdata_new={}
for f in cc_varnames:
if f is 'ENERGY':
data=get_eint(rstdata)
else:
data=rstdata[f]
shape=np.array(data.shape)*2
newdata=np.zeros(shape,dtype='d')
for i in range(2):
for j in range(2):
for k in range(2):
newdata[k::2,j::2,i::2] = data.copy()
rstdata_new[f]=newdata
for f in fc_varnames:
data=rstdata[f]
shape=np.array(data.shape)*2
if f is '1-FIELD':
newdata=np.zeros(shape-np.array([0,0,1]),dtype='d')
idata = 0.5*(data[:,:,:-1]+data[:,:,1:])
for j in range(2):
for k in range(2):
newdata[k::2,j::2,::2] = data.copy()
newdata[k::2,j::2,1::2] = idata.copy()
if f is '2-FIELD':
newdata=np.zeros(shape-np.array([0,1,0]),dtype='d')
idata = 0.5*(data[:,:-1,:]+data[:,1:,:])
for i in range(2):
for k in range(2):
newdata[k::2,::2,i::2] = data.copy()
newdata[k::2,1::2,i::2] = idata.copy()
if f is '3-FIELD':
newdata=np.zeros(shape-np.array([1,0,0]),dtype='d')
idata = 0.5*(data[:-1,:,:]+data[1:,:,:])
for j in range(2):
for i in range(2):
newdata[::2,j::2,i::2] = data.copy()
newdata[1::2,j::2,i::2] = idata.copy()
rstdata_new[f]=newdata
rstdata_new['ENERGY']=to_etot(rstdata_new)
return rstdata_new
def calculate_grid(Nx,NBx):
NGrids=(np.array(Nx)/np.array(NBx)).astype('int')
NProcs=NGrids[0]*NGrids[1]*NGrids[2]
grids=[]
i=0
print(Nx, NBx, NGrids, NProcs)
for n in range(NGrids[2]):
for m in range(NGrids[1]):
for l in range(NGrids[0]):
grid={}
grid['id']=i
grid['is']=np.array([l*NBx[0],m*NBx[1],n*NBx[2]]).astype('int')
grid['Nx']=np.array(NBx).astype('int')
grids.append(grid)
i += 1
return grids,NGrids
def parse_par(rstfile):
fp=open(rstfile,'rb')
par={}
line=fp.readline().decode('utf-8')
while 1:
if line.startswith('<'):
block=line[1:line.rfind('>')]
if block == 'par_end': break
par[block]={}
line=fp.readline().decode('utf-8')
if block in ['problem','domain1','time']:
sp = line.strip().split()
if len(sp) >= 3: par[block][sp[0]]=eval(sp[2])
else:
sp=line.split('=')
if len(sp) == 2: par[block][sp[0].strip()]=sp[1].split('#')[0].strip()
par[block]=fp.tell()
fp.close()
return par
def parse_rst(var,par,fm):
starpar=False
if 'star particles' in par['configure']:
if par['configure']['star particles'] == 'none':
starpar=False
else:
starpar=True
vtype='param'
cc_varnames=['DENSITY','1-MOMENTUM','2-MOMENTUM','3-MOMENTUM','ENERGY','POTENTIAL']
fc_varnames=['1-FIELD','2-FIELD','3-FIELD']
dm=par['domain1']
nx1=int(dm['Nx1']/dm['NGrid_x1'])
nx2=int(dm['Nx2']/dm['NGrid_x2'])
nx3=int(dm['Nx3']/dm['NGrid_x3'])
if var=='N_STEP':
ndata=1
dtype='i'
elif var=='TIME':
ndata=1
dtype='d'
elif var=='TIME_STEP':
ndata=1
if starpar: ndata+=1
dtype='d'
elif var in cc_varnames:
ndata=nx1*nx2*nx3
dtype='d'
vtype='ccvar'
elif var in fc_varnames:
if var.startswith('1'): nx1 += 1
if var.startswith('2'): nx2 += 1
if var.startswith('3'): nx3 += 1
ndata=nx1*nx2*nx3
dtype='d'
vtype='fcvar'
elif var.startswith('SCALAR'):
ndata=nx1*nx2*nx3
dtype='d'
vtype='ccvar'
elif var.startswith('STAR PARTICLE LIST'):
ndata=1
dtype='i'
vtype='star'
else:
return 0
fm[var]={}
fm[var]['ndata']=ndata
fm[var]['dtype']=dtype
fm[var]['vtype']=vtype
if vtype == 'ccvar' or vtype == 'fcvar':
fm[var]['nx']=(nx3,nx2,nx1)
return 1
def read_star(fp,nscal=0,ghost=True):
ivars=['id','merge_history','isnew','active']
dvars=['m','x1','x2','x3','v1','v2','v3','age','mage','mdot',\
'x1_old','x2_old','x3_old',\
]
for i in range(nscal):
dvars += ['metal{}'.format(i)]
if ghost:
dvars += ['mghost','M1ghost','M2ghost','M3ghost']
for i in range(nscal):
dvars += ['Sghost{}'.format(i)]
star_dict={}
dtype='i'
for var in ivars:
data=fp.read(struct.calcsize(dtype))
tmp=struct.unpack('<'+dtype,data)
star_dict[var]=tmp
dtype='d'
for var in dvars:
data=fp.read(struct.calcsize(dtype))
tmp=struct.unpack('<'+dtype,data)
star_dict[var]=tmp
return star_dict
def read_rst_grid(rstfile,verbose=False,starghost=True):
par=parse_par(rstfile)
fp=open(rstfile,'rb')
fp.seek(par['par_end'])
rst={}
data_array={}
nscal=0
while 1:
l=fp.readline().decode('utf-8')
var=l.strip()
if parse_rst(var,par,rst):
dtype=rst[var]['dtype']
ndata=rst[var]['ndata']
vtype=rst[var]['vtype']
dsize=ndata*struct.calcsize(dtype)
data=fp.read(dsize)
if vtype == 'param':
if verbose: print(var,struct.unpack('<'+ndata*dtype,data))
elif vtype == 'star':
nstar,=struct.unpack('<'+ndata*dtype,data)
data=fp.read(dsize)
star_list=[]
if nstar > 0:
for i in range(nstar):
star_list.append(read_star(fp,nscal=nscal,ghost=starghost))
if verbose:
print(var, nstar)
print(star_list[0])
print(star_list[nstar-1])
data_array[var]=star_list
else:
arr=np.asarray(struct.unpack('<'+ndata*dtype,data))
arr.shape = rst[var]['nx']
data_array[var]=arr
if verbose: print(var, arr.mean(), arr.shape)
if var.startswith('SCALAR'): nscal += 1
fp.readline()
else:
break
if verbose: print(l, fp.tell())
fp.close()
return rst,data_array
def read(rstfile,grids,NGrids,parfile=None,verbose=False,starghost=True):
if parfile==None: par=parse_par(rstfile)
else: par=parse_par(parfile)
nprocs=len(grids)
field_maps=[]
rstdata={}
nx=NGrids*grids[0]['Nx']
nx=nx[::-1]
print(nx,nprocs)
dirname=os.path.dirname(rstfile)
basename=os.path.basename(rstfile)
fm,data=read_rst_grid(rstfile,verbose=verbose,starghost=starghost)
g=grids[0]
gis=g['is']
gnx=g['Nx']
gie=gis+gnx
print(fm['DENSITY']['nx'],gnx)
for k in fm:
ib,jb,kb=(0,0,0)
if fm[k]['vtype'] == 'ccvar':
rstdata[k]=np.empty(nx,dtype=fm[k]['dtype'])
rstdata[k][gis[2]:gie[2],gis[1]:gie[1],gis[0]:gie[0]]=data[k]
elif fm[k]['vtype'] == 'fcvar':
if k.startswith('1'): ib=1
if k.startswith('2'): jb=1
if k.startswith('3'): kb=1
rstdata[k]=np.empty((nx[0]+kb,nx[1]+jb,nx[2]+ib),dtype=fm[k]['dtype'])
rstdata[k][gis[2]:gie[2]+kb,gis[1]:gie[1]+jb,gis[0]:gie[0]+ib]=data[k]
for i in range(1,nprocs):
g=grids[i]
gis=g['is']
gnx=g['Nx']
gie=gis+gnx
rstfname = '%s/%s-id%d%s' % (dirname,basename[:-9],i,basename[-9:])
if not os.path.isfile(rstfname):
rstfname = '%s/../id%d/%s-id%d%s' % (dirname,i,basename[:-9],i,basename[-9:])
fm,data=read_rst_grid(rstfname,starghost=starghost)
if verbose > 1: print(i,fm['DENSITY']['nx'],gnx)
for k in fm:
ib,jb,kb=(0,0,0)
if fm[k]['vtype'] == 'ccvar':
rstdata[k][gis[2]:gie[2],gis[1]:gie[1],gis[0]:gie[0]]=data[k]
elif fm[k]['vtype'] == 'fcvar':
if k.startswith('1'): ib=1
if k.startswith('2'): jb=1
if k.startswith('3'): kb=1
rstdata[k][gis[2]:gie[2]+kb,gis[1]:gie[1]+jb,gis[0]:gie[0]+ib]=data[k]
return rstdata
def read_part(rstfile,grids,nx,verbose=False):
nprocs=len(grids)
field_maps=[]
rstdata={}
print(nx,nprocs)
basename=os.path.basename(rstfile)
pid=basename[:-9]
fm,data=read_rst_grid(rstfile,verbose=verbose)
g=grids[0]
gis=g['is']
gnx=g['Nx']
gie=gis+gnx
ks=gis[2]
print(fm['DENSITY']['nx'],gnx)
for k in fm:
ib,jb,kb=(0,0,0)
if fm[k]['vtype'] == 'ccvar':
rstdata[k]=np.empty(nx,dtype=fm[k]['dtype'])
elif fm[k]['vtype'] == 'fcvar':
if k.startswith('1'): ib=1
if k.startswith('2'): jb=1
if k.startswith('3'): kb=1
rstdata[k]=np.empty((nx[0]+kb,nx[1]+jb,nx[2]+ib),dtype=fm[k]['dtype'])
for i in range(nprocs):
g=grids[i]
gis=g['is']
gnx=g['Nx']
gie=gis+gnx
gid=g['id']
if gid > 0:
rstfname = rstfile.replace('{}.'.format(pid),'{}-id{}.'.format(pid,gid))
else:
rstfname = rstfile
if not os.path.isfile(rstfname):
rstfname = rstfile.replace('id{}/{}.'.format(gid,pid),
'id{}/{}-id{}.'.format(gid,pid,gid))
fm,data=read_rst_grid(rstfname)
if verbose > 1: print(i,fm['DENSITY']['nx'],gnx)
for k in fm:
ib,jb,kb=(0,0,0)
if fm[k]['vtype'] == 'ccvar':
rstdata[k][gis[2]-ks:gie[2]-ks,gis[1]:gie[1],gis[0]:gie[0]]=data[k]
elif fm[k]['vtype'] == 'fcvar':
if k.startswith('1'): ib=1
if k.startswith('2'): jb=1
if k.startswith('3'): kb=1
rstdata[k][gis[2]-ks:gie[2]-ks+kb,gis[1]:gie[1]+jb,gis[0]:gie[0]+ib]=data[k]
return rstdata
def set_xpos_with_dm(dm):
le=np.array([dm['x1min'],dm['x2min'],dm['x3min']])
re=np.array([dm['x1max'],dm['x2max'],dm['x3max']])
Lx=re-le
Nx=np.array([dm['Nx1'],dm['Nx2'],dm['Nx3']])
dx=Lx/Nx
xc={}
xf={}
for i,ax in zip(list(range(3)),['x','y','z']):
xf[ax]=np.arange(le[i],re[i]+dx[i],dx[i])
xc[ax]=np.arange(le[i],re[i],dx[i])+0.5*dx[i]
return xf,xc
def set_xpos(ds):
le=ds.domain['left_edge']
re=ds.domain['right_edge']
dx=ds.domain['dx']
xc={}
xf={}
for i,ax in zip(list(range(3)),['x','y','z']):
xf[ax]=np.arange(le[i],re[i]+dx[i],dx[i])
xc[ax]=np.arange(le[i],re[i],dx[i])+0.5*dx[i]
return xf,xc
def to_hdf5(h5file,rstdata,ds):
import h5py
Bx=rstdata['1-FIELD']
By=rstdata['2-FIELD']
Bz=rstdata['3-FIELD']
xf,xc=set_xpos(ds)
f=h5py.File(h5file,'a')
for name in ['Bfields','cell_centered_coord','face_centered_coord']:
if name in list(f.keys()):
grp=f[name]
else:
grp=f.create_group(name)
print(name)
grp=f['Bfields']
for name,B in zip(['Bx','By','Bz'],[Bx,By,Bz]):
if name in list(grp.keys()):
dset=grp[name]
else:
dset=grp.create_dataset(name,B.shape,data=B,dtype=B.dtype)
for k in list(grp.keys()):
for i,ax in enumerate(['z','y','x']):
grp[k].dims[i].label=ax
bfield=f['Bfields']
ccoord=f['cell_centered_coord']
fcoord=f['face_centered_coord']
for ax in ['x','y','z']:
if ax in list(ccoord.keys()):
print(ax)
else:
ccoord[ax] = xc[ax]
if ax in list(fcoord.keys()):
print(ax)
else:
fcoord[ax] = xf[ax]
for b in list(bfield.keys()):
bax=b[-1]
for i,ax in enumerate(['z','y','x']):
if ax == bax:
bfield[b].dims[i].attach_scale(fcoord[ax])
else:
bfield[b].dims[i].attach_scale(ccoord[ax])
f.close()
def divB(rstdata):
Bx=rstdata['1-FIELD']
By=rstdata['2-FIELD']
Bz=rstdata['3-FIELD']
dBx=np.diff(Bx,axis=2)
dBy=np.diff(By,axis=1)
dBz=np.diff(Bz,axis=0)
dB = dBx+dBy+dBz
return dB
| true | true |
1c3bab33a17d606bd8b00a0e94074be567b1c6ca | 2,300 | py | Python | render/deleters.py | VCL3D/BlenderScripts | d9671801d2a7686226c9fcf297d89a4388158733 | [
"MIT"
] | 11 | 2021-05-11T17:26:59.000Z | 2022-03-25T08:13:59.000Z | render/deleters.py | VCL3D/BlenderScripts | d9671801d2a7686226c9fcf297d89a4388158733 | [
"MIT"
] | null | null | null | render/deleters.py | VCL3D/BlenderScripts | d9671801d2a7686226c9fcf297d89a4388158733 | [
"MIT"
] | 2 | 2021-05-15T01:56:01.000Z | 2021-05-15T13:49:57.000Z | import bpy
import os
def delete_cameras_lights():
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_by_type(type='CAMERA')
bpy.ops.object.delete(use_global=False)
# bpy.ops.object.select_by_type(type='LAMP')
bpy.ops.object.select_by_type(type='LIGHT')
bpy.ops.object.delete(use_global=False)
bpy.ops.object.select_by_type(type='CAMERA')
bpy.ops.object.delete(use_global=False)
# bpy.ops.object.select_by_type(type='LAMP')
bpy.ops.object.select_by_type(type='LIGHT')
bpy.ops.object.delete(use_global=False)
for item in bpy.data.cameras:
bpy.data.cameras.remove(item, do_unlink=True)
for item in bpy.data.lamps:
bpy.data.lamps.remove(item, do_unlink=True)
def delete_all():
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_by_type(type='MESH')
bpy.ops.object.delete(use_global=False)
bpy.ops.object.select_by_type(type='ARMATURE')
bpy.ops.object.delete(use_global=False)
bpy.ops.object.select_by_type(type='CAMERA')
bpy.ops.object.delete(use_global=False)
# bpy.ops.object.select_by_type(type='LAMP')
bpy.ops.object.select_by_type(type='LIGHT')
bpy.ops.object.delete(use_global=False)
for item in bpy.data.meshes:
bpy.data.meshes.remove(item, do_unlink=True)
for item in bpy.data.armatures:
bpy.data.armatures.remove(item, do_unlink=True)
for item in bpy.data.actions:
bpy.data.actions.remove(item, do_unlink=True)
for item in bpy.data.cameras:
bpy.data.cameras.remove(item, do_unlink=True)
for item in bpy.data.lights:
bpy.data.lights.remove(item, do_unlink=True)
def delete_textureless():
bpy.ops.object.select_all(action='DESELECT')
for mesh in bpy.data.meshes:
slot = mesh.materials[0].texture_slots[0]
if slot is None or not os.path.isfile(mesh.materials[0].texture_slots[0].texture.image.filepath):
bpy.data.objects[mesh.name].select = True
bpy.ops.object.delete(use_global=False)
def delete_materials():
while len(bpy.data.materials):
bpy.data.materials.remove(bpy.data.materials[0], do_unlink=True)
for obj in bpy.data.objects:
if obj.data is not None:
obj.data.materials.clear()
| 36.507937 | 105 | 0.691304 | import bpy
import os
def delete_cameras_lights():
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_by_type(type='CAMERA')
bpy.ops.object.delete(use_global=False)
bpy.ops.object.select_by_type(type='LIGHT')
bpy.ops.object.delete(use_global=False)
bpy.ops.object.select_by_type(type='CAMERA')
bpy.ops.object.delete(use_global=False)
bpy.ops.object.select_by_type(type='LIGHT')
bpy.ops.object.delete(use_global=False)
for item in bpy.data.cameras:
bpy.data.cameras.remove(item, do_unlink=True)
for item in bpy.data.lamps:
bpy.data.lamps.remove(item, do_unlink=True)
def delete_all():
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_by_type(type='MESH')
bpy.ops.object.delete(use_global=False)
bpy.ops.object.select_by_type(type='ARMATURE')
bpy.ops.object.delete(use_global=False)
bpy.ops.object.select_by_type(type='CAMERA')
bpy.ops.object.delete(use_global=False)
bpy.ops.object.select_by_type(type='LIGHT')
bpy.ops.object.delete(use_global=False)
for item in bpy.data.meshes:
bpy.data.meshes.remove(item, do_unlink=True)
for item in bpy.data.armatures:
bpy.data.armatures.remove(item, do_unlink=True)
for item in bpy.data.actions:
bpy.data.actions.remove(item, do_unlink=True)
for item in bpy.data.cameras:
bpy.data.cameras.remove(item, do_unlink=True)
for item in bpy.data.lights:
bpy.data.lights.remove(item, do_unlink=True)
def delete_textureless():
bpy.ops.object.select_all(action='DESELECT')
for mesh in bpy.data.meshes:
slot = mesh.materials[0].texture_slots[0]
if slot is None or not os.path.isfile(mesh.materials[0].texture_slots[0].texture.image.filepath):
bpy.data.objects[mesh.name].select = True
bpy.ops.object.delete(use_global=False)
def delete_materials():
while len(bpy.data.materials):
bpy.data.materials.remove(bpy.data.materials[0], do_unlink=True)
for obj in bpy.data.objects:
if obj.data is not None:
obj.data.materials.clear()
| true | true |
1c3bac77eb0f911fbab4afc2653d0aed9119694d | 10,143 | py | Python | client/intelligent_collab/icollab.py | Rajpratik71/stacks-usecase | 19272c2d7f9ece306024b4bc9c97a0f9354c5f89 | [
"Apache-2.0"
] | 15 | 2019-09-19T09:32:11.000Z | 2021-03-25T15:45:11.000Z | client/intelligent_collab/icollab.py | Rajpratik71/stacks-usecase | 19272c2d7f9ece306024b4bc9c97a0f9354c5f89 | [
"Apache-2.0"
] | 8 | 2019-11-01T12:13:15.000Z | 2020-04-06T19:18:13.000Z | client/intelligent_collab/icollab.py | Rajpratik71/stacks-usecase | 19272c2d7f9ece306024b4bc9c97a0f9354c5f89 | [
"Apache-2.0"
] | 21 | 2019-09-19T16:57:41.000Z | 2021-09-23T04:57:25.000Z | #!/usr/bin/python3
"""Runner for Intelligent Collaboration App"""
import sys
import subprocess
import json
import requests
import re
import time
import yaml
import os
import tarfile
import shutil
import threading
MODELS_DIR = os.environ.get("MODELS_DIR", "./saved_models")
# Format for models lists is [(file_url, directory_name), ...]
TAR_MODELS = [("https://tfhub.dev/tensorflow/ssd_mobilenet_v2/2\
?tf-hub-format=compressed", "ssd_mobilenet_v2_2"),
("https://tfhub.dev/google/magenta/arbitrary-image-stylization\
-v1-256/2?tf-hub-format=compressed",
"magenta_arbitrary-image-stylization-v1-256_2")]
PB_MODELS = [("https://github.com/IntelAI/models/raw/master/models/object_\
detection/tensorflow/ssd-mobilenet/inference/ssdmobilenet_preprocess.pb",
"ssdmobilenet_preprocess"),
("https://storage.googleapis.com/intel-optimized-tensorflow/\
models/v1_8/ssdmobilenet_int8_pretrained_model_combinedNMS_s8.pb",
"ssdmobilenet_int8_pretrained_model_combinedNMS_s8")]
INT8_MODELS_PATH = "/ssd_mobilenet_int8"
def check_folder(folder):
exists = os.path.isdir(folder)
return exists
def wait_for_request(request):
count = 0
while request.ok is False and count < 100:
time.sleep(2)
count += 1
if count == 100:
print("ERROR: Model file " + file_path + " could not be downloaded.")
return False
return True
def download_targz_model(url, base_dir, model_dir):
if not check_folder(base_dir + "/" + model_dir):
os.makedirs(base_dir + "/" + model_dir)
model = requests.get(url)
completed = wait_for_request(model)
if completed:
with open(base_dir + "/" + model_dir + ".tar.gz", 'wb') as m:
m.write(model.content)
untar_model(base_dir + "/" + model_dir + ".tar.gz", base_dir + "/" +
model_dir)
def download_pb_model(url, base_dir, model_name):
if not check_folder(base_dir + INT8_MODELS_PATH):
os.makedirs(base_dir + INT8_MODELS_PATH)
model = requests.get(url)
completed = wait_for_request(model)
if completed:
with open(base_dir + INT8_MODELS_PATH + "/" + model_name + ".pb",
'wb') as m:
m.write(model.content)
print("INFO: Model is available at " + base_dir + INT8_MODELS_PATH)
def untar_model(file_path, target_path):
model_tar = tarfile.open(file_path)
model_tar.extractall(target_path)
model_tar.close()
os.remove(file_path)
print("INFO: Model is available at " + target_path)
def download_models():
# Create temporary directory
if not check_folder(MODELS_DIR):
os.makedirs(MODELS_DIR)
for model in TAR_MODELS:
model_exists = os.path.exists(MODELS_DIR + "/" + model[1] +
"/" + "saved_model.pb")
if not model_exists:
thread = threading.Thread(target=download_targz_model(
model[0], MODELS_DIR,
model[1]))
thread.daemon = True
thread.start()
else:
print("INFO: Model " + model[1] + " found locally. Skipping \
download.")
for model in PB_MODELS:
model_exists = os.path.exists(MODELS_DIR + INT8_MODELS_PATH + "/" +
model[1] + ".pb")
if not model_exists:
thread = threading.Thread(target=download_pb_model(
model[0], MODELS_DIR,
model[1]))
thread.daemon = True
thread.start()
else:
print("INFO: Model " + model[1] + " found locally. Skipping \
download.")
def clean_models():
folder_exists = check_folder(MODELS_DIR)
if folder_exists:
try:
shutil.rmtree(MODELS_DIR)
except shutil.Error as e:
print("WARNING: Could not remove temporary models directory. \
You might need to remove it manually.")
print("INFO: All Intelligent Collaboration Models were removed.")
compose_file = './docker-compose.yml'
def register_services(backend_port: int):
# Wait for main_controller API to be ready to accept requests
time.sleep(2)
with open(compose_file, 'r') as yml_file:
compose = yaml.safe_load(yml_file)
headers = {'Content-type': 'application/json',
'accept': 'application/json'}
net_name = list(compose['networks'].keys())[0]
for svc_name, config in compose['services'].items():
name = config['container_name']
ip = config['networks'][net_name]['ipv4_address']
port = 8000
if svc_name != 'effects_controller':
for env_var in config["environment"]:
if env_var.startswith("API_PORT="):
port = int(env_var.split("=")[1])
break
else:
port = 80
data = {
"name": name,
"ip": ip,
"port": port
}
# Register service
try:
response = requests.post(f"http://localhost:{backend_port}/services/",
data=json.dumps(data), headers=headers)
except Exception as e:
print(e)
continue
def register_iccam(backend_port: int):
headers = {'Content-type': 'application/json',
'accept': 'application/json'}
response = requests.post(f"http://localhost:{backend_port}/iccam/",
headers=headers)
def get_port_fwd():
ui_port = None
backend_port = None
services = subprocess.check_output(['docker-compose', 'ps', '-a'])
lines = services.decode().split("\n")
for line in lines:
if line.startswith('ic-ui-controller'):
ui_port = int(line.split()[-1].split('->')[0].split(":")[1])
if line.startswith('ic-main-controller'):
backend_port = int(line.split()[-1].split('->')[0].split(":")[1])
if ui_port and backend_port:
break
return ui_port, backend_port
def tear_down():
stop_app()
down_app()
print("ERROR: Something went wrong, services were rolled back")
sys.exit(1)
def start_app():
if not os.path.exists(MODELS_DIR):
os.mkdir(MODELS_DIR)
result = subprocess.call(['docker-compose up -d'],
shell=True, stderr=subprocess.STDOUT)
if result != 0:
print("ERROR: docker-compose failed")
tear_down()
ui_port, backend_port = get_port_fwd()
if not ui_port or not backend_port:
print("ERROR: backend service is not well defined")
tear_down()
try:
register_services(backend_port)
register_iccam(backend_port)
except:
print("ERROR: backend service is not reachable")
tear_down()
try:
download_models()
except:
print("ERROR: effect models couldn't be downloaded")
tear_down()
print(f"INFO: Intelligent Collaboration App started. Effects Controller \
UI is at http://localhost:{ui_port}, and backend API at http://localhost:{backend_port}/docs.")
def stop_app():
subprocess.call(['docker-compose stop'],
shell=True, stderr=subprocess.STDOUT)
print("INFO: Intelligent Collaboration App stopped")
def down_app():
stop_app()
subprocess.call(['docker-compose down'],
shell=True, stderr=subprocess.STDOUT)
print("INFO: All Intelligent Collaboration Resources were removed. Downloa\
ded models are kept at " + MODELS_DIR + ". Use \"clean\" option to also remove\
that directory.")
def list_services():
subprocess.call(['docker-compose ps'],
shell=True, stderr=subprocess.STDOUT)
def display_help():
print("\nThis script allows you to interact with Intelligent Collaboration\
.\n\nThere are 6 supported actions:\n\nstart - Initializes and start all Intel\
ligent Collaboration basic services.\n\nstop - Stops all running Intelligent C\
ollaboration services.\n\ndown - Stops and removes all Intelligent Collaborati\
on services and containers.\n\nlist - Lists all running services originated fr\
om current project directory.\n\nclean - Removes temporary directory where mo\
dels are saved.\n By default, this script will look for the models loc\
ally and will\n only attempt to download them if not found.\n\nhelp - \
Display this help.\n\nSyntaxis is as follows:\n\n python icollab.py [start\
|stop|down|list|clean|help]\n\nAfter initializing the app, you can take a look\
at the UI at\n\n http://localhost:8080.\n\nBy default the port 8080 is ass\
igned to the UI and the port 8000 is assigned\nto the backend.\nIf you want to\
use different ports use the environment variables UI_PORT and BACKEND_PORT r\
espectively.\n\nExample:\n\n UI_PORT=8081 BACKEND_PORT=8001 python icollab.\
py start\n")
if __name__ == '__main__':
if len(sys.argv) < 2:
raise ValueError('ERROR: Please provide action to execute. Options are\
\"start\", \"stop\", \"clean\", \"list\" and \"down\"')
dcompose = subprocess.call(['docker-compose -v'], shell=True,
stdout=subprocess.PIPE)
if dcompose != 0:
print('ERROR: \"docker-compose\" not found. Please ensure that it is \
appropriately installed. Aborting icollab.')
sys.exit()
if (sys.argv[1] == "start" or sys.argv[1] == "--start"):
start_app()
elif (sys.argv[1] == "stop" or sys.argv[1] == "--stop"):
stop_app()
elif (sys.argv[1] == "clean" or sys.argv[1] == "--clean"):
clean_models()
elif (sys.argv[1] == "down" or sys.argv[1] == "--down"):
stop_app()
down_app()
elif (sys.argv[1] == "list" or sys.argv[1] == "--list"):
list_services()
elif (sys.argv[1] == "help" or sys.argv[1] == "-h" or
sys.argv[1] == "--help"):
display_help()
else:
raise ValueError('ERROR: Action \"%s\" not recognized. Options are \
\"start\", \"stop\", \"down\" and \"list\".' % sys.argv[1])
sys.exit()
| 34.5 | 95 | 0.618062 |
import sys
import subprocess
import json
import requests
import re
import time
import yaml
import os
import tarfile
import shutil
import threading
MODELS_DIR = os.environ.get("MODELS_DIR", "./saved_models")
TAR_MODELS = [("https://tfhub.dev/tensorflow/ssd_mobilenet_v2/2\
?tf-hub-format=compressed", "ssd_mobilenet_v2_2"),
("https://tfhub.dev/google/magenta/arbitrary-image-stylization\
-v1-256/2?tf-hub-format=compressed",
"magenta_arbitrary-image-stylization-v1-256_2")]
PB_MODELS = [("https://github.com/IntelAI/models/raw/master/models/object_\
detection/tensorflow/ssd-mobilenet/inference/ssdmobilenet_preprocess.pb",
"ssdmobilenet_preprocess"),
("https://storage.googleapis.com/intel-optimized-tensorflow/\
models/v1_8/ssdmobilenet_int8_pretrained_model_combinedNMS_s8.pb",
"ssdmobilenet_int8_pretrained_model_combinedNMS_s8")]
INT8_MODELS_PATH = "/ssd_mobilenet_int8"
def check_folder(folder):
exists = os.path.isdir(folder)
return exists
def wait_for_request(request):
count = 0
while request.ok is False and count < 100:
time.sleep(2)
count += 1
if count == 100:
print("ERROR: Model file " + file_path + " could not be downloaded.")
return False
return True
def download_targz_model(url, base_dir, model_dir):
if not check_folder(base_dir + "/" + model_dir):
os.makedirs(base_dir + "/" + model_dir)
model = requests.get(url)
completed = wait_for_request(model)
if completed:
with open(base_dir + "/" + model_dir + ".tar.gz", 'wb') as m:
m.write(model.content)
untar_model(base_dir + "/" + model_dir + ".tar.gz", base_dir + "/" +
model_dir)
def download_pb_model(url, base_dir, model_name):
if not check_folder(base_dir + INT8_MODELS_PATH):
os.makedirs(base_dir + INT8_MODELS_PATH)
model = requests.get(url)
completed = wait_for_request(model)
if completed:
with open(base_dir + INT8_MODELS_PATH + "/" + model_name + ".pb",
'wb') as m:
m.write(model.content)
print("INFO: Model is available at " + base_dir + INT8_MODELS_PATH)
def untar_model(file_path, target_path):
model_tar = tarfile.open(file_path)
model_tar.extractall(target_path)
model_tar.close()
os.remove(file_path)
print("INFO: Model is available at " + target_path)
def download_models():
if not check_folder(MODELS_DIR):
os.makedirs(MODELS_DIR)
for model in TAR_MODELS:
model_exists = os.path.exists(MODELS_DIR + "/" + model[1] +
"/" + "saved_model.pb")
if not model_exists:
thread = threading.Thread(target=download_targz_model(
model[0], MODELS_DIR,
model[1]))
thread.daemon = True
thread.start()
else:
print("INFO: Model " + model[1] + " found locally. Skipping \
download.")
for model in PB_MODELS:
model_exists = os.path.exists(MODELS_DIR + INT8_MODELS_PATH + "/" +
model[1] + ".pb")
if not model_exists:
thread = threading.Thread(target=download_pb_model(
model[0], MODELS_DIR,
model[1]))
thread.daemon = True
thread.start()
else:
print("INFO: Model " + model[1] + " found locally. Skipping \
download.")
def clean_models():
folder_exists = check_folder(MODELS_DIR)
if folder_exists:
try:
shutil.rmtree(MODELS_DIR)
except shutil.Error as e:
print("WARNING: Could not remove temporary models directory. \
You might need to remove it manually.")
print("INFO: All Intelligent Collaboration Models were removed.")
compose_file = './docker-compose.yml'
def register_services(backend_port: int):
time.sleep(2)
with open(compose_file, 'r') as yml_file:
compose = yaml.safe_load(yml_file)
headers = {'Content-type': 'application/json',
'accept': 'application/json'}
net_name = list(compose['networks'].keys())[0]
for svc_name, config in compose['services'].items():
name = config['container_name']
ip = config['networks'][net_name]['ipv4_address']
port = 8000
if svc_name != 'effects_controller':
for env_var in config["environment"]:
if env_var.startswith("API_PORT="):
port = int(env_var.split("=")[1])
break
else:
port = 80
data = {
"name": name,
"ip": ip,
"port": port
}
try:
response = requests.post(f"http://localhost:{backend_port}/services/",
data=json.dumps(data), headers=headers)
except Exception as e:
print(e)
continue
def register_iccam(backend_port: int):
headers = {'Content-type': 'application/json',
'accept': 'application/json'}
response = requests.post(f"http://localhost:{backend_port}/iccam/",
headers=headers)
def get_port_fwd():
ui_port = None
backend_port = None
services = subprocess.check_output(['docker-compose', 'ps', '-a'])
lines = services.decode().split("\n")
for line in lines:
if line.startswith('ic-ui-controller'):
ui_port = int(line.split()[-1].split('->')[0].split(":")[1])
if line.startswith('ic-main-controller'):
backend_port = int(line.split()[-1].split('->')[0].split(":")[1])
if ui_port and backend_port:
break
return ui_port, backend_port
def tear_down():
stop_app()
down_app()
print("ERROR: Something went wrong, services were rolled back")
sys.exit(1)
def start_app():
if not os.path.exists(MODELS_DIR):
os.mkdir(MODELS_DIR)
result = subprocess.call(['docker-compose up -d'],
shell=True, stderr=subprocess.STDOUT)
if result != 0:
print("ERROR: docker-compose failed")
tear_down()
ui_port, backend_port = get_port_fwd()
if not ui_port or not backend_port:
print("ERROR: backend service is not well defined")
tear_down()
try:
register_services(backend_port)
register_iccam(backend_port)
except:
print("ERROR: backend service is not reachable")
tear_down()
try:
download_models()
except:
print("ERROR: effect models couldn't be downloaded")
tear_down()
print(f"INFO: Intelligent Collaboration App started. Effects Controller \
UI is at http://localhost:{ui_port}, and backend API at http://localhost:{backend_port}/docs.")
def stop_app():
subprocess.call(['docker-compose stop'],
shell=True, stderr=subprocess.STDOUT)
print("INFO: Intelligent Collaboration App stopped")
def down_app():
stop_app()
subprocess.call(['docker-compose down'],
shell=True, stderr=subprocess.STDOUT)
print("INFO: All Intelligent Collaboration Resources were removed. Downloa\
ded models are kept at " + MODELS_DIR + ". Use \"clean\" option to also remove\
that directory.")
def list_services():
subprocess.call(['docker-compose ps'],
shell=True, stderr=subprocess.STDOUT)
def display_help():
print("\nThis script allows you to interact with Intelligent Collaboration\
.\n\nThere are 6 supported actions:\n\nstart - Initializes and start all Intel\
ligent Collaboration basic services.\n\nstop - Stops all running Intelligent C\
ollaboration services.\n\ndown - Stops and removes all Intelligent Collaborati\
on services and containers.\n\nlist - Lists all running services originated fr\
om current project directory.\n\nclean - Removes temporary directory where mo\
dels are saved.\n By default, this script will look for the models loc\
ally and will\n only attempt to download them if not found.\n\nhelp - \
Display this help.\n\nSyntaxis is as follows:\n\n python icollab.py [start\
|stop|down|list|clean|help]\n\nAfter initializing the app, you can take a look\
at the UI at\n\n http://localhost:8080.\n\nBy default the port 8080 is ass\
igned to the UI and the port 8000 is assigned\nto the backend.\nIf you want to\
use different ports use the environment variables UI_PORT and BACKEND_PORT r\
espectively.\n\nExample:\n\n UI_PORT=8081 BACKEND_PORT=8001 python icollab.\
py start\n")
if __name__ == '__main__':
if len(sys.argv) < 2:
raise ValueError('ERROR: Please provide action to execute. Options are\
\"start\", \"stop\", \"clean\", \"list\" and \"down\"')
dcompose = subprocess.call(['docker-compose -v'], shell=True,
stdout=subprocess.PIPE)
if dcompose != 0:
print('ERROR: \"docker-compose\" not found. Please ensure that it is \
appropriately installed. Aborting icollab.')
sys.exit()
if (sys.argv[1] == "start" or sys.argv[1] == "--start"):
start_app()
elif (sys.argv[1] == "stop" or sys.argv[1] == "--stop"):
stop_app()
elif (sys.argv[1] == "clean" or sys.argv[1] == "--clean"):
clean_models()
elif (sys.argv[1] == "down" or sys.argv[1] == "--down"):
stop_app()
down_app()
elif (sys.argv[1] == "list" or sys.argv[1] == "--list"):
list_services()
elif (sys.argv[1] == "help" or sys.argv[1] == "-h" or
sys.argv[1] == "--help"):
display_help()
else:
raise ValueError('ERROR: Action \"%s\" not recognized. Options are \
\"start\", \"stop\", \"down\" and \"list\".' % sys.argv[1])
sys.exit()
| true | true |
1c3bacb5a940d60298ac8eeb273dc21d6fefc628 | 9,048 | py | Python | scalability/generate_report.py | zhiiker/ic | 370af58ea1231cdbb8d2e4f593a089dfd1c803cd | [
"Apache-2.0"
] | 941 | 2021-05-10T08:14:14.000Z | 2022-03-31T11:40:24.000Z | scalability/generate_report.py | zhiiker/ic | 370af58ea1231cdbb8d2e4f593a089dfd1c803cd | [
"Apache-2.0"
] | 3 | 2022-02-16T12:24:20.000Z | 2022-03-23T12:05:41.000Z | scalability/generate_report.py | zhiiker/ic | 370af58ea1231cdbb8d2e4f593a089dfd1c803cd | [
"Apache-2.0"
] | 122 | 2021-05-10T08:21:23.000Z | 2022-03-25T20:34:12.000Z | #!/usr/bin/env python3
import json
import os
import sys
import pybars
import report
def add_file(base, path, alt):
content = ""
try:
for p in path:
content += open(os.path.join(base, p)).read()
except Exception:
content += alt
return content
def generate_report(githash, timestamp):
"""Generate report for the given measurement."""
source = open("templates/experiment.html.hb", mode="r").read()
data = {
"iterations": [],
"timestamp": timestamp,
"githash": githash,
}
base = "{}/{}".format(githash, timestamp)
report_file = os.path.join(base, "report.html")
http_request_duration = []
wg_http_latency = []
wg_failure_rate = []
with open(report_file, "w") as outfile:
for i in sorted([int(i) for i in os.listdir(base) if i.isnumeric()]):
path = os.path.join(base, str(i))
if os.path.isdir(path):
iter_data = {}
print("Found measurement iteration {} in {}".format(i, path))
# Workload generator summaries
files = [os.path.join(path, f) for f in os.listdir(path) if f.startswith("summary_machine_")]
print("Files: ", files)
if len(files) > 0:
(failure_rate, t_median, t_average, t_max, t_min, total_requests, _, _) = report.evaluate_summaries(
files
)
compiler = pybars.Compiler()
template = compiler.compile(source)
iter_data.update(
{
"header": i,
"failure_rate": "{:.2f}".format(failure_rate * 100),
"failure_rate_color": "green" if failure_rate < 0.01 else "red",
"t_median": "{:.2f}".format(t_median),
"t_average": "{:.2f}".format(t_average),
"t_max": "{:.2f}".format(t_max),
"t_min": "{:.2f}".format(t_min),
"total_requests": total_requests,
}
)
wg_http_latency.append(t_median)
wg_failure_rate.append(failure_rate * 100)
# Search for flamegraph
flamegraph = [os.path.join(str(i), f) for f in os.listdir(path) if f.startswith("flamegraph_")]
print("Flamegraph is: ", flamegraph)
if len(flamegraph) > 0:
iter_data.update({"flamegraph": flamegraph[0]})
wg_commands = ""
i = 0
run = True
while run:
i += 1
try:
with open(os.path.join(path, f"workload-generator-cmd-{i}")) as wg_cmd_file:
wg_commands += wg_cmd_file.read() + "\n"
except Exception:
run = False
iter_data["wg_commands"] = [
"./ic-workload-generator {}".format(c)
for c in wg_commands.split("./ic-workload-generator")
if len(c.strip()) > 0
]
# Iteration configuration
load_total = None
try:
with open(os.path.join(path, "iteration.json")) as iteration_conf:
iter_data["configuration"] = json.loads(iteration_conf.read())
load_total = iter_data["configuration"]["configuration"]["load_total"]
except Exception as err:
print("Failed to parse iteration.json file for iteration {} - {}".format(i, err))
# Prometheus report
try:
with open(os.path.join(path, "prometheus.json")) as prometheus_metrics:
metrics = json.loads(prometheus_metrics.read())
try:
http_request_duration.append(metrics["http_request_duration"][0])
except Exception as err:
print(
f"Failed to determine HTTP request duration for iteration {i} in file {path}/prometheus.json - {err}"
)
t_start = int(metrics["http_request_rate"][0][0][0])
xdata = [int(x) - t_start for x, _ in metrics["http_request_rate"][0]]
ydata = [float(y) for _, y in metrics["http_request_rate"][0]]
plots = [
{
"x": xdata,
"y": ydata,
}
]
layout = {
"yaxis": {"title": "rate [requests / s]", "range": [0, 1.2 * max(ydata)]},
"xaxis": {"title": "iteration time [s]"},
}
if load_total is not None:
layout["shapes"] = [
{
"type": "line",
"x0": min(xdata),
"y0": load_total,
"x1": max(xdata),
"y1": load_total,
"line": {
"color": "red",
},
}
]
metrics.update(
{
"http_request_rate_plot": plots,
"http_request_rate_layout": layout,
}
)
iter_data.update({"prometheus": metrics})
except Exception as err:
print(f"Failed to parse prometheus.json file for iteration {i} - {err}")
data["iterations"].append(iter_data)
# Experiment details
try:
with open(os.path.join(base, "experiment.json")) as experiment_info:
experiment = json.loads(experiment_info.read())
data.update({"experiment": experiment})
except Exception:
print("Failed to parse experiment.json file")
exit(1)
experiment_name = data["experiment"]["experiment_name"]
experiment_template_file = "templates/{}.html.hb".format(experiment_name)
print("Experiment template file is: {}".format(experiment_template_file))
experiment_source = open(experiment_template_file, mode="r").read()
experiment_template = compiler.compile(experiment_source)
experiment_data = data["experiment"]
experiment_data["experiment_details"]["rps_max"] = (
"{:.1f}".format(experiment_data["experiment_details"]["rps_max"])
if "rps_max" in experiment_data["experiment_details"]
else "n.a."
)
print("Rendering experiment details with: ", experiment_data)
experiment_details = experiment_template(experiment_data)
data.update(
{
"experiment-details": experiment_details,
"plot-http-latency": [{"y": [e[1] for e in http_request_duration], "x": data["experiment"]["xlabels"]}],
"plot-wg-http-latency": [{"y": wg_http_latency, "x": data["experiment"]["xlabels"]}],
"plot-wg-failure-rate": [{"y": wg_failure_rate, "x": data["experiment"]["xlabels"]}],
"layout-http-latency": {
"yaxis": {"title": "latency [ms]"},
"xaxis": {"title": data["experiment"]["xtitle"]},
},
"layout-wg-http-latency": {
"yaxis": {"title": "latency [ms]"},
"xaxis": {"title": data["experiment"]["xtitle"]},
},
"layout-wg-failure-rate": {
"yaxis": {"title": "failure rate [%]"},
"xaxis": {"title": data["experiment"]["xtitle"]},
},
}
)
data["lscpu"] = add_file(f"{githash}/{timestamp}", ["lscpu.stdout.txt"], "lscpu data missing")
data["free"] = add_file(f"{githash}/{timestamp}", ["free.stdout.txt"], "free data missing")
data["subnet_info"] = add_file(f"{githash}/{timestamp}", ["subnet_info.json"], "subnet info data missing")
data["topology"] = add_file(f"{githash}/{timestamp}", ["topology.json"], "topology data missing")
output = template(data)
outfile.write(output)
print("Report is at file://{}/{}".format(os.getcwd(), report_file))
if __name__ == "__main__":
generate_report(sys.argv[1], sys.argv[2])
| 41.695853 | 133 | 0.461649 |
import json
import os
import sys
import pybars
import report
def add_file(base, path, alt):
content = ""
try:
for p in path:
content += open(os.path.join(base, p)).read()
except Exception:
content += alt
return content
def generate_report(githash, timestamp):
source = open("templates/experiment.html.hb", mode="r").read()
data = {
"iterations": [],
"timestamp": timestamp,
"githash": githash,
}
base = "{}/{}".format(githash, timestamp)
report_file = os.path.join(base, "report.html")
http_request_duration = []
wg_http_latency = []
wg_failure_rate = []
with open(report_file, "w") as outfile:
for i in sorted([int(i) for i in os.listdir(base) if i.isnumeric()]):
path = os.path.join(base, str(i))
if os.path.isdir(path):
iter_data = {}
print("Found measurement iteration {} in {}".format(i, path))
files = [os.path.join(path, f) for f in os.listdir(path) if f.startswith("summary_machine_")]
print("Files: ", files)
if len(files) > 0:
(failure_rate, t_median, t_average, t_max, t_min, total_requests, _, _) = report.evaluate_summaries(
files
)
compiler = pybars.Compiler()
template = compiler.compile(source)
iter_data.update(
{
"header": i,
"failure_rate": "{:.2f}".format(failure_rate * 100),
"failure_rate_color": "green" if failure_rate < 0.01 else "red",
"t_median": "{:.2f}".format(t_median),
"t_average": "{:.2f}".format(t_average),
"t_max": "{:.2f}".format(t_max),
"t_min": "{:.2f}".format(t_min),
"total_requests": total_requests,
}
)
wg_http_latency.append(t_median)
wg_failure_rate.append(failure_rate * 100)
flamegraph = [os.path.join(str(i), f) for f in os.listdir(path) if f.startswith("flamegraph_")]
print("Flamegraph is: ", flamegraph)
if len(flamegraph) > 0:
iter_data.update({"flamegraph": flamegraph[0]})
wg_commands = ""
i = 0
run = True
while run:
i += 1
try:
with open(os.path.join(path, f"workload-generator-cmd-{i}")) as wg_cmd_file:
wg_commands += wg_cmd_file.read() + "\n"
except Exception:
run = False
iter_data["wg_commands"] = [
"./ic-workload-generator {}".format(c)
for c in wg_commands.split("./ic-workload-generator")
if len(c.strip()) > 0
]
load_total = None
try:
with open(os.path.join(path, "iteration.json")) as iteration_conf:
iter_data["configuration"] = json.loads(iteration_conf.read())
load_total = iter_data["configuration"]["configuration"]["load_total"]
except Exception as err:
print("Failed to parse iteration.json file for iteration {} - {}".format(i, err))
try:
with open(os.path.join(path, "prometheus.json")) as prometheus_metrics:
metrics = json.loads(prometheus_metrics.read())
try:
http_request_duration.append(metrics["http_request_duration"][0])
except Exception as err:
print(
f"Failed to determine HTTP request duration for iteration {i} in file {path}/prometheus.json - {err}"
)
t_start = int(metrics["http_request_rate"][0][0][0])
xdata = [int(x) - t_start for x, _ in metrics["http_request_rate"][0]]
ydata = [float(y) for _, y in metrics["http_request_rate"][0]]
plots = [
{
"x": xdata,
"y": ydata,
}
]
layout = {
"yaxis": {"title": "rate [requests / s]", "range": [0, 1.2 * max(ydata)]},
"xaxis": {"title": "iteration time [s]"},
}
if load_total is not None:
layout["shapes"] = [
{
"type": "line",
"x0": min(xdata),
"y0": load_total,
"x1": max(xdata),
"y1": load_total,
"line": {
"color": "red",
},
}
]
metrics.update(
{
"http_request_rate_plot": plots,
"http_request_rate_layout": layout,
}
)
iter_data.update({"prometheus": metrics})
except Exception as err:
print(f"Failed to parse prometheus.json file for iteration {i} - {err}")
data["iterations"].append(iter_data)
try:
with open(os.path.join(base, "experiment.json")) as experiment_info:
experiment = json.loads(experiment_info.read())
data.update({"experiment": experiment})
except Exception:
print("Failed to parse experiment.json file")
exit(1)
experiment_name = data["experiment"]["experiment_name"]
experiment_template_file = "templates/{}.html.hb".format(experiment_name)
print("Experiment template file is: {}".format(experiment_template_file))
experiment_source = open(experiment_template_file, mode="r").read()
experiment_template = compiler.compile(experiment_source)
experiment_data = data["experiment"]
experiment_data["experiment_details"]["rps_max"] = (
"{:.1f}".format(experiment_data["experiment_details"]["rps_max"])
if "rps_max" in experiment_data["experiment_details"]
else "n.a."
)
print("Rendering experiment details with: ", experiment_data)
experiment_details = experiment_template(experiment_data)
data.update(
{
"experiment-details": experiment_details,
"plot-http-latency": [{"y": [e[1] for e in http_request_duration], "x": data["experiment"]["xlabels"]}],
"plot-wg-http-latency": [{"y": wg_http_latency, "x": data["experiment"]["xlabels"]}],
"plot-wg-failure-rate": [{"y": wg_failure_rate, "x": data["experiment"]["xlabels"]}],
"layout-http-latency": {
"yaxis": {"title": "latency [ms]"},
"xaxis": {"title": data["experiment"]["xtitle"]},
},
"layout-wg-http-latency": {
"yaxis": {"title": "latency [ms]"},
"xaxis": {"title": data["experiment"]["xtitle"]},
},
"layout-wg-failure-rate": {
"yaxis": {"title": "failure rate [%]"},
"xaxis": {"title": data["experiment"]["xtitle"]},
},
}
)
data["lscpu"] = add_file(f"{githash}/{timestamp}", ["lscpu.stdout.txt"], "lscpu data missing")
data["free"] = add_file(f"{githash}/{timestamp}", ["free.stdout.txt"], "free data missing")
data["subnet_info"] = add_file(f"{githash}/{timestamp}", ["subnet_info.json"], "subnet info data missing")
data["topology"] = add_file(f"{githash}/{timestamp}", ["topology.json"], "topology data missing")
output = template(data)
outfile.write(output)
print("Report is at file://{}/{}".format(os.getcwd(), report_file))
if __name__ == "__main__":
generate_report(sys.argv[1], sys.argv[2])
| true | true |
1c3bae3498469302a5a347b215e67a7b1cc21b18 | 87 | py | Python | makahiki/apps/widgets/ask_admin/__init__.py | justinslee/Wai-Not-Makahiki | 4b7dd685012ec64758affe0ecee3103596d16aa7 | [
"MIT"
] | 1 | 2015-07-22T11:31:20.000Z | 2015-07-22T11:31:20.000Z | makahiki/apps/widgets/ask_admin/__init__.py | justinslee/Wai-Not-Makahiki | 4b7dd685012ec64758affe0ecee3103596d16aa7 | [
"MIT"
] | null | null | null | makahiki/apps/widgets/ask_admin/__init__.py | justinslee/Wai-Not-Makahiki | 4b7dd685012ec64758affe0ecee3103596d16aa7 | [
"MIT"
] | null | null | null | """The ask_admin widget implements a "send feedback" button and corresponding form."""
| 43.5 | 86 | 0.770115 | true | true | |
1c3baf888f5130998962f4ec6d561434293266db | 9,714 | py | Python | archive/scripts/STIPS_Input_from_Sims/wingtips.py | benw1/WINGS | 32d4bfd073da0b86d2340cde25a5601d0a1ec95e | [
"RSA-MD"
] | 4 | 2022-01-04T18:24:56.000Z | 2022-01-27T08:23:37.000Z | scratch/test_phat/wingtips.py | benw1/WINGS | 32d4bfd073da0b86d2340cde25a5601d0a1ec95e | [
"RSA-MD"
] | null | null | null | scratch/test_phat/wingtips.py | benw1/WINGS | 32d4bfd073da0b86d2340cde25a5601d0a1ec95e | [
"RSA-MD"
] | null | null | null | #! /usr/bin/env python
'''
WFIRST Infrared Nearby Galaxies Test Image Product Simulator
Produces input files for the WFIRST STIPS simulator
'''
import time
import numpy as np
from astropy import wcs
from astropy.io import fits, ascii
from astropy.table import Table
class WingTips:
'''
Initialize WingTips object
'''
def __init__(self,infile=[],center=[0,0]):
if len(infile)==0:
self.tab = np.array([])
else:
if isinstance(infile,str):
infile = [infile]
self.tab = WingTips.read_stips(infile[0])
if len(infile)>1:
for i in range(1,len(infile)):
_tab = WingTips.read_stips(infile[i])
self.tab = np.vstack((self.tab,_tab))
center = WingTips.get_center(self.tab[:,0],self.tab[:,1])
self.center = center
self.n = self.tab.shape[0]
self.infile = infile
return None
''' Strip coordinates from WingTips object '''
def strip_radec(self,hasID=False):
_i = int(hasID)
self.tab = np.delete(self.tab,[_i,_i+1],1)
return None
''' Attach given RA-DEC to WingTips object'''
def attach_radec(self,radec,hasID=False):
if self.n != radec.shape[0]:
raise ValueError('Number of RA-DEC does not match sources')
_i = int(hasID)
self.tab = np.insert(self.tab,_i,radec.T,1)
self.center = WingTips.get_center(radec[:,0+_i],radec[:,1+_i])
return None
''' Replace RA-DEC of WingTips object '''
def replace_radec(self,radec,hasID=False):
self.strip_radec(hasID)
self.attach_radec(radec,hasID)
return None
'''
Return random RA-DEC for given image or WingTips object
Optionally, specify center and image size desired
'''
def random_radec_for(self,other,shape=(4096,4096),sample=False,n=0,hasID=False):
_i = int(hasID)
try:
if other.endswith('.fits'):
return WingTips.random_radec(self.n,imfile=other)
except AttributeError:
if not sample:
return WingTips.random_radec(self.n,center=other.center)
elif not bool(n):
return WingTips.sample_radec(n=self.n,radec1=False,radec2=other.tab[:,_i:_i+1])
else:
return WingTips.sample_radec(n=n,radec1=self.tab[:,_i:_i+1],radec2=other.tab[:,_i:_i+1])
''' Merge two WingTips objects '''
def merge_with(self,other,hasRADEC=True,hasID=False):
if self.tab.shape[1]!=other.tab.shape[1]:
raise ValueError('Number of columns does not match',self.tab.shape[1],other.tab.shape[1])
self.tab = np.vstack((self.tab,other.tab))
self.n = self.tab.shape[0]
self.infile.append(other.infile)
_i = int(hasID)
if hasRADEC:
self.center = WingTips.get_center(self.tab[:,0+_i],self.tab[:,1+_i])
return None
''' Convert flux to surface brightness for sersic profile galaxies '''
def flux_to_Sb(self,hasRADEC=True,hasID=False):
_i = int(hasID)
if hasRADEC:
_i = _i+2
_f = self.tab[:,_i].astype(float)
_r = self.tab[:,_i+3].astype(float)
_a = self.tab[:,_i+5].astype(float)
_s = (0.5*_f) / (np.pi * _r**2 * _a)
self.tab = np.delete(self.tab,_i,1)
self.tab = np.insert(self.tab,_i,_s.T,1)
return None
''' Write out a STIPS input file '''
def write_stips(self,outfile='temp.txt',hasID=False,hasCmnt=False,saveID=False,ipac=False):
_tab = WingTips.get_tabular(self.tab,hasID,hasCmnt,saveID)
_nms = ('id', 'ra', 'dec', 'flux', 'type', 'n', 're', 'phi', 'ratio', 'notes')
_fmt = ('%10d','%15.7f','%15.7f','%15.7f','%8s','%10.3f','%15.7f','%15.7f','%15.7f','%8s')
_t = Table(_tab, names=_nms)
if ipac:
ascii.write(_t, outfile, format='ipac', formats=dict(zip(_nms,_fmt)))
else:
ascii.write(_t, outfile, format='fixed_width', delimiter='', formats=dict(zip(_nms,_fmt)))
return print('Wrote out %s \n' % outfile)
''' Build a WingTips class object from scratch '''
@staticmethod
def from_scratch(flux, ra=[], dec=[], center=[], ID=[], Type=[], n=[], re=[], phi=[], ratio=[], notes=[], outfile=''):
_temp = WingTips()
_temp.n = len(flux)
_temp.infile = ['fromScratch']
if len(center)>0:
_temp.center = center
if len(ra)==0:
radec = _temp.random_radec_for(_temp)
ra,dec = radec[:,0],radec[:,1]
elif ((len(ra)==len(dec))&(len(ra)>0)):
_temp.center = WingTips.get_center(np.array(ra),np.array(dec))
else:
raise ValueError('Provide valid coordinate or center')
if ((len(Type)==0)|(Type is 'point')|(Type is 'sersic')):
if ((len(Type)==0)|(Type is 'point')):
Type = np.repeat(np.array(['point']),len(flux))
_ones = np.ones_like(flux)
n, re, phi, ratio = _ones, _ones, _ones, _ones
elif (Type=='sersic'):
Type = np.repeat(np.array(['sersic']),len(flux))
elif (len(Type)==len(flux)):
Type = np.array(Type)
_tab = np.array([ra,dec,flux,Type,n,re,phi,ratio]).T
if (len(ID)==len(flux)):
_tab=np.hstack((np.array(ID,ndmin=2).T,_tab))
if (len(notes)==len(flux)):
_tab=np.hstack((_tab,np.array(notes,ndmin=2).T))
_temp.tab = np.array(_tab)
if outfile is '':
return _temp
else:
_temp.write_stips(outfile,hasID=bool(ID),hasCmnt=bool(notes),saveID=bool(ID))
return None
'''
Read in a STIPS input file in ascii format and
return corrsponding NumPy array
'''
@staticmethod
def read_stips(infile,getRADEC=True,getID=False,getCmnt=False):
_tab = []
_infile = ascii.read(infile)
print('\nRead in %s \n' % infile)
if getID:
_tab.append(_infile['id'])
if getRADEC:
_tab.append(_infile['ra'])
_tab.append(_infile['dec'])
_tab.append(_infile['flux'])
_tab.append(_infile['type'])
_tab.append(_infile['n'])
_tab.append(_infile['re'])
_tab.append(_infile['phi'])
_tab.append(_infile['ratio'])
if getCmnt:
_tab.append(_infile['comment'])
return np.array(_tab).T
''' Return tabular lists for STIPS input file columns '''
@staticmethod
def get_tabular(_tab,hasID=False,hasCmnt=False,saveID=False):
_i = int(hasID)
if ~saveID:
_n = _tab.shape[0]
_ID = np.array(np.linspace(1,_n,_n),ndmin=2).T
_tab = np.hstack((_ID,_tab[:,_i:]))
if ~hasCmnt:
_cmnt = np.array(np.repeat(np.array(['comment']),_tab.shape[0],),ndmin=2).T
_tab = np.hstack((_tab,_cmnt))
return [_tab[:,0].astype(float), _tab[:,1].astype(float), _tab[:,2].astype(float), \
_tab[:,3].astype(float), _tab[:,4], _tab[:,5].astype(float), \
_tab[:,6].astype(float), _tab[:,7].astype(float), \
_tab[:,8].astype(float), _tab[:,9]]
''' Build WCS coordinate system from scratch '''
@staticmethod
def create_wcs(centers=[0,0],crpix=[2048,2048],cdelt=[-0.11/3600,0.11/3600],cunit=['deg','deg'],\
ctype=['RA---TAN','DEC--TAN'],lonpole=180,latpole=24.333335,\
equinox=2000.0,radesys='ICRS'):
_w = wcs.WCS()
_w.wcs.cdelt = cdelt
_w.wcs.crpix = crpix
_w.wcs.crval = centers
_w.wcs.cunit = cunit
_w.wcs.ctype = ctype
_w.wcs.lonpole = lonpole
_w.wcs.latpole = latpole
_w.wcs.radesys = radesys
_w.wcs.equinox = equinox
return _w
''' Return coordinate system for given image file'''
@staticmethod
def read_wcs(imfile):
print('Getting coordinates from %s \n' % imfile)
return wcs.WCS(fits.open(imfile)[1].header)
''' Return 'n' random radec for given image file or coordinate list '''
@staticmethod
def random_radec(n=10,center=[0,0],shape=(4096,4096),imfile=''):
_xy = np.random.rand(n,2)*shape
if imfile is not '':
_w = WingTips.read_wcs(imfile)
else:
_w = WingTips.create_wcs(center)
return _w.wcs_pix2world(_xy,1)
'''
Return a random sample of 'n' RA-DEC coordinates from 'radec2'
If radec1 is specified, then replace 'n' radom coordinates
in 'radec1' with random sample from 'radec2'
'''
@staticmethod
def sample_radec(n=10,radec1=False,radec2=[]):
in2 = np.random.randint(0,radec2.shape[0],n)
if ~radec1:
return radec2[in2,:]
else:
in1 = np.random.randint(0,radec1.shape[0],n)
radec1[in1,:] = radec2[in2,:]
return radec1
''' Return mean of RA-DEC positions given '''
@staticmethod
def get_center(ra,dec):
return [ra.astype(float).mean(),dec.astype(float).mean()]
'''
Convert mags to WFI instrument counts
Default is apparent AB mags
Specify 'dist' if absolute mags
Specify AB_Vega if Vega mags
'''
@staticmethod
def get_counts(mag,ZP,dist=0,AB_Vega=0):
if bool(dist):
print('\nDistance is d = %4.2f Mpc\n' % dist)
u = 25+5*np.log10(dist)
mag = mag+u
if bool(AB_Vega):
mag = mag + AB_Vega
return 10**((mag-ZP)/(-2.5))
| 34.692857 | 122 | 0.561149 |
import time
import numpy as np
from astropy import wcs
from astropy.io import fits, ascii
from astropy.table import Table
class WingTips:
def __init__(self,infile=[],center=[0,0]):
if len(infile)==0:
self.tab = np.array([])
else:
if isinstance(infile,str):
infile = [infile]
self.tab = WingTips.read_stips(infile[0])
if len(infile)>1:
for i in range(1,len(infile)):
_tab = WingTips.read_stips(infile[i])
self.tab = np.vstack((self.tab,_tab))
center = WingTips.get_center(self.tab[:,0],self.tab[:,1])
self.center = center
self.n = self.tab.shape[0]
self.infile = infile
return None
def strip_radec(self,hasID=False):
_i = int(hasID)
self.tab = np.delete(self.tab,[_i,_i+1],1)
return None
def attach_radec(self,radec,hasID=False):
if self.n != radec.shape[0]:
raise ValueError('Number of RA-DEC does not match sources')
_i = int(hasID)
self.tab = np.insert(self.tab,_i,radec.T,1)
self.center = WingTips.get_center(radec[:,0+_i],radec[:,1+_i])
return None
def replace_radec(self,radec,hasID=False):
self.strip_radec(hasID)
self.attach_radec(radec,hasID)
return None
def random_radec_for(self,other,shape=(4096,4096),sample=False,n=0,hasID=False):
_i = int(hasID)
try:
if other.endswith('.fits'):
return WingTips.random_radec(self.n,imfile=other)
except AttributeError:
if not sample:
return WingTips.random_radec(self.n,center=other.center)
elif not bool(n):
return WingTips.sample_radec(n=self.n,radec1=False,radec2=other.tab[:,_i:_i+1])
else:
return WingTips.sample_radec(n=n,radec1=self.tab[:,_i:_i+1],radec2=other.tab[:,_i:_i+1])
def merge_with(self,other,hasRADEC=True,hasID=False):
if self.tab.shape[1]!=other.tab.shape[1]:
raise ValueError('Number of columns does not match',self.tab.shape[1],other.tab.shape[1])
self.tab = np.vstack((self.tab,other.tab))
self.n = self.tab.shape[0]
self.infile.append(other.infile)
_i = int(hasID)
if hasRADEC:
self.center = WingTips.get_center(self.tab[:,0+_i],self.tab[:,1+_i])
return None
def flux_to_Sb(self,hasRADEC=True,hasID=False):
_i = int(hasID)
if hasRADEC:
_i = _i+2
_f = self.tab[:,_i].astype(float)
_r = self.tab[:,_i+3].astype(float)
_a = self.tab[:,_i+5].astype(float)
_s = (0.5*_f) / (np.pi * _r**2 * _a)
self.tab = np.delete(self.tab,_i,1)
self.tab = np.insert(self.tab,_i,_s.T,1)
return None
def write_stips(self,outfile='temp.txt',hasID=False,hasCmnt=False,saveID=False,ipac=False):
_tab = WingTips.get_tabular(self.tab,hasID,hasCmnt,saveID)
_nms = ('id', 'ra', 'dec', 'flux', 'type', 'n', 're', 'phi', 'ratio', 'notes')
_fmt = ('%10d','%15.7f','%15.7f','%15.7f','%8s','%10.3f','%15.7f','%15.7f','%15.7f','%8s')
_t = Table(_tab, names=_nms)
if ipac:
ascii.write(_t, outfile, format='ipac', formats=dict(zip(_nms,_fmt)))
else:
ascii.write(_t, outfile, format='fixed_width', delimiter='', formats=dict(zip(_nms,_fmt)))
return print('Wrote out %s \n' % outfile)
@staticmethod
def from_scratch(flux, ra=[], dec=[], center=[], ID=[], Type=[], n=[], re=[], phi=[], ratio=[], notes=[], outfile=''):
_temp = WingTips()
_temp.n = len(flux)
_temp.infile = ['fromScratch']
if len(center)>0:
_temp.center = center
if len(ra)==0:
radec = _temp.random_radec_for(_temp)
ra,dec = radec[:,0],radec[:,1]
elif ((len(ra)==len(dec))&(len(ra)>0)):
_temp.center = WingTips.get_center(np.array(ra),np.array(dec))
else:
raise ValueError('Provide valid coordinate or center')
if ((len(Type)==0)|(Type is 'point')|(Type is 'sersic')):
if ((len(Type)==0)|(Type is 'point')):
Type = np.repeat(np.array(['point']),len(flux))
_ones = np.ones_like(flux)
n, re, phi, ratio = _ones, _ones, _ones, _ones
elif (Type=='sersic'):
Type = np.repeat(np.array(['sersic']),len(flux))
elif (len(Type)==len(flux)):
Type = np.array(Type)
_tab = np.array([ra,dec,flux,Type,n,re,phi,ratio]).T
if (len(ID)==len(flux)):
_tab=np.hstack((np.array(ID,ndmin=2).T,_tab))
if (len(notes)==len(flux)):
_tab=np.hstack((_tab,np.array(notes,ndmin=2).T))
_temp.tab = np.array(_tab)
if outfile is '':
return _temp
else:
_temp.write_stips(outfile,hasID=bool(ID),hasCmnt=bool(notes),saveID=bool(ID))
return None
@staticmethod
def read_stips(infile,getRADEC=True,getID=False,getCmnt=False):
_tab = []
_infile = ascii.read(infile)
print('\nRead in %s \n' % infile)
if getID:
_tab.append(_infile['id'])
if getRADEC:
_tab.append(_infile['ra'])
_tab.append(_infile['dec'])
_tab.append(_infile['flux'])
_tab.append(_infile['type'])
_tab.append(_infile['n'])
_tab.append(_infile['re'])
_tab.append(_infile['phi'])
_tab.append(_infile['ratio'])
if getCmnt:
_tab.append(_infile['comment'])
return np.array(_tab).T
@staticmethod
def get_tabular(_tab,hasID=False,hasCmnt=False,saveID=False):
_i = int(hasID)
if ~saveID:
_n = _tab.shape[0]
_ID = np.array(np.linspace(1,_n,_n),ndmin=2).T
_tab = np.hstack((_ID,_tab[:,_i:]))
if ~hasCmnt:
_cmnt = np.array(np.repeat(np.array(['comment']),_tab.shape[0],),ndmin=2).T
_tab = np.hstack((_tab,_cmnt))
return [_tab[:,0].astype(float), _tab[:,1].astype(float), _tab[:,2].astype(float), \
_tab[:,3].astype(float), _tab[:,4], _tab[:,5].astype(float), \
_tab[:,6].astype(float), _tab[:,7].astype(float), \
_tab[:,8].astype(float), _tab[:,9]]
@staticmethod
def create_wcs(centers=[0,0],crpix=[2048,2048],cdelt=[-0.11/3600,0.11/3600],cunit=['deg','deg'],\
ctype=['RA---TAN','DEC--TAN'],lonpole=180,latpole=24.333335,\
equinox=2000.0,radesys='ICRS'):
_w = wcs.WCS()
_w.wcs.cdelt = cdelt
_w.wcs.crpix = crpix
_w.wcs.crval = centers
_w.wcs.cunit = cunit
_w.wcs.ctype = ctype
_w.wcs.lonpole = lonpole
_w.wcs.latpole = latpole
_w.wcs.radesys = radesys
_w.wcs.equinox = equinox
return _w
@staticmethod
def read_wcs(imfile):
print('Getting coordinates from %s \n' % imfile)
return wcs.WCS(fits.open(imfile)[1].header)
@staticmethod
def random_radec(n=10,center=[0,0],shape=(4096,4096),imfile=''):
_xy = np.random.rand(n,2)*shape
if imfile is not '':
_w = WingTips.read_wcs(imfile)
else:
_w = WingTips.create_wcs(center)
return _w.wcs_pix2world(_xy,1)
@staticmethod
def sample_radec(n=10,radec1=False,radec2=[]):
in2 = np.random.randint(0,radec2.shape[0],n)
if ~radec1:
return radec2[in2,:]
else:
in1 = np.random.randint(0,radec1.shape[0],n)
radec1[in1,:] = radec2[in2,:]
return radec1
@staticmethod
def get_center(ra,dec):
return [ra.astype(float).mean(),dec.astype(float).mean()]
@staticmethod
def get_counts(mag,ZP,dist=0,AB_Vega=0):
if bool(dist):
print('\nDistance is d = %4.2f Mpc\n' % dist)
u = 25+5*np.log10(dist)
mag = mag+u
if bool(AB_Vega):
mag = mag + AB_Vega
return 10**((mag-ZP)/(-2.5))
| true | true |
1c3bb0e2e1d90d4d7e41b9417c279b9e455b1708 | 30,144 | py | Python | python-sc2/sc2/units.py | manaccac/sc2_bot | 3aa8b3711378b71fd0a44662cdd7148846e39530 | [
"MIT"
] | 9 | 2020-04-28T12:12:23.000Z | 2022-03-14T03:45:45.000Z | python-sc2/sc2/units.py | manaccac/sc2_bot | 3aa8b3711378b71fd0a44662cdd7148846e39530 | [
"MIT"
] | 5 | 2021-03-30T05:10:49.000Z | 2022-01-13T04:32:05.000Z | python-sc2/sc2/units.py | manaccac/sc2_bot | 3aa8b3711378b71fd0a44662cdd7148846e39530 | [
"MIT"
] | 4 | 2020-04-28T12:14:58.000Z | 2022-02-22T07:15:28.000Z | from __future__ import annotations
import random
import warnings
import math
from itertools import chain
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union, Generator, TYPE_CHECKING
from .ids.unit_typeid import UnitTypeId
from .position import Point2, Point3
from .unit import Unit
import numpy as np
warnings.simplefilter("once")
if TYPE_CHECKING:
from .bot_ai import BotAI
class Units(list):
"""A collection of Unit objects. Makes it easy to select units by selectors."""
@classmethod
def from_proto(cls, units, bot_object: BotAI):
return cls((Unit(u, bot_object=bot_object) for u in units))
def __init__(self, units, bot_object: BotAI):
"""
:param units:
:param bot_object:
"""
super().__init__(units)
self._bot_object = bot_object
def __call__(self, *args, **kwargs):
return UnitSelection(self, *args, **kwargs)
def __iter__(self) -> Generator[Unit, None, None]:
return (item for item in super().__iter__())
def select(self, *args, **kwargs):
return UnitSelection(self, *args, **kwargs)
def copy(self):
return self.subgroup(self)
def __or__(self, other: Units) -> Units:
return Units(
chain(
iter(self),
(other_unit for other_unit in other if other_unit.tag not in (self_unit.tag for self_unit in self)),
),
self._bot_object,
)
def __add__(self, other: Units) -> Units:
return Units(
chain(
iter(self),
(other_unit for other_unit in other if other_unit.tag not in (self_unit.tag for self_unit in self)),
),
self._bot_object,
)
def __and__(self, other: Units) -> Units:
return Units(
(other_unit for other_unit in other if other_unit.tag in (self_unit.tag for self_unit in self)),
self._bot_object,
)
def __sub__(self, other: Units) -> Units:
return Units(
(self_unit for self_unit in self if self_unit.tag not in (other_unit.tag for other_unit in other)),
self._bot_object,
)
def __hash__(self):
return hash(unit.tag for unit in self)
@property
def amount(self) -> int:
return len(self)
@property
def empty(self) -> bool:
return not bool(self)
@property
def exists(self) -> bool:
return bool(self)
def find_by_tag(self, tag) -> Optional[Unit]:
for unit in self:
if unit.tag == tag:
return unit
return None
def by_tag(self, tag):
unit = self.find_by_tag(tag)
if unit is None:
raise KeyError("Unit not found")
return unit
@property
def first(self) -> Unit:
assert self, "Units object is empty"
return self[0]
def take(self, n: int) -> Units:
if n >= self.amount:
return self
else:
return self.subgroup(self[:n])
@property
def random(self) -> Unit:
assert self, "Units object is empty"
return random.choice(self)
def random_or(self, other: any) -> Unit:
return random.choice(self) if self else other
def random_group_of(self, n: int) -> Units:
""" Returns self if n >= self.amount. """
if n < 1:
return Units([], self._bot_object)
elif n >= self.amount:
return self
else:
return self.subgroup(random.sample(self, n))
# TODO: append, insert, remove, pop and extend functions should reset the cache for Units.positions because the number of units in the list has changed
# @property_immutable_cache
# def positions(self) -> np.ndarray:
# flat_units_positions = (coord for unit in self for coord in unit.position)
# unit_positions_np = np.fromiter(flat_units_positions, dtype=float, count=2 * len(self)).reshape((len(self), 2))
# return unit_positions_np
def in_attack_range_of(self, unit: Unit, bonus_distance: Union[int, float] = 0) -> Units:
"""
Filters units that are in attack range of the given unit.
This uses the unit and target unit.radius when calculating the distance, so it should be accurate.
Caution: This may not work well for static structures (bunker, sieged tank, planetary fortress, photon cannon, spine and spore crawler) because it seems attack ranges differ for static / immovable units.
Example::
enemy_zerglings = self.enemy_units(UnitTypeId.ZERGLING)
my_marine = next((unit for unit in self.units if unit.type_id == UnitTypeId.MARINE), None)
if my_marine:
all_zerglings_my_marine_can_attack = enemy_zerglings.in_attack_range_of(my_marine)
Example::
enemy_mutalisks = self.enemy_units(UnitTypeId.MUTALISK)
my_marauder = next((unit for unit in self.units if unit.type_id == UnitTypeId.MARAUDER), None)
if my_marauder:
all_mutalisks_my_marauder_can_attack = enemy_mutaliskss.in_attack_range_of(my_marauder)
# Is empty because mutalisk are flying and marauder cannot attack air
:param unit:
:param bonus_distance: """
return self.filter(lambda x: unit.target_in_range(x, bonus_distance=bonus_distance))
def closest_distance_to(self, position: Union[Unit, Point2, Point3]) -> float:
"""
Returns the distance between the closest unit from this group to the target unit.
Example::
enemy_zerglings = self.enemy_units(UnitTypeId.ZERGLING)
my_marine = next((unit for unit in self.units if unit.type_id == UnitTypeId.MARINE), None)
if my_marine:
closest_zergling_distance = enemy_zerglings.closest_distance_to(my_marine)
# Contains the distance between the marine and the closest zergling
:param position: """
assert self, "Units object is empty"
if isinstance(position, Unit):
return min(self._bot_object._distance_squared_unit_to_unit(unit, position) for unit in self) ** 0.5
return min(self._bot_object._distance_units_to_pos(self, position))
def furthest_distance_to(self, position: Union[Unit, Point2, Point3]) -> float:
"""
Returns the distance between the furthest unit from this group to the target unit
Example::
enemy_zerglings = self.enemy_units(UnitTypeId.ZERGLING)
my_marine = next((unit for unit in self.units if unit.type_id == UnitTypeId.MARINE), None)
if my_marine:
furthest_zergling_distance = enemy_zerglings.furthest_distance_to(my_marine)
# Contains the distance between the marine and the furthest away zergling
:param position: """
assert self, "Units object is empty"
if isinstance(position, Unit):
return max(self._bot_object._distance_squared_unit_to_unit(unit, position) for unit in self) ** 0.5
return max(self._bot_object._distance_units_to_pos(self, position))
def closest_to(self, position: Union[Unit, Point2, Point3]) -> Unit:
"""
Returns the closest unit (from this Units object) to the target unit or position.
Example::
enemy_zerglings = self.enemy_units(UnitTypeId.ZERGLING)
my_marine = next((unit for unit in self.units if unit.type_id == UnitTypeId.MARINE), None)
if my_marine:
closest_zergling = enemy_zerglings.closest_to(my_marine)
# Contains the zergling that is closest to the target marine
:param position: """
assert self, "Units object is empty"
if isinstance(position, Unit):
return min(
(unit1 for unit1 in self),
key=lambda unit2: self._bot_object._distance_squared_unit_to_unit(unit2, position),
)
distances = self._bot_object._distance_units_to_pos(self, position)
return min(((unit, dist) for unit, dist in zip(self, distances)), key=lambda my_tuple: my_tuple[1])[0]
def furthest_to(self, position: Union[Unit, Point2, Point3]) -> Unit:
"""
Returns the furhest unit (from this Units object) to the target unit or position.
Example::
enemy_zerglings = self.enemy_units(UnitTypeId.ZERGLING)
my_marine = next((unit for unit in self.units if unit.type_id == UnitTypeId.MARINE), None)
if my_marine:
furthest_zergling = enemy_zerglings.furthest_to(my_marine)
# Contains the zergling that is furthest away to the target marine
:param position: """
assert self, "Units object is empty"
if isinstance(position, Unit):
return max(
(unit1 for unit1 in self),
key=lambda unit2: self._bot_object._distance_squared_unit_to_unit(unit2, position),
)
distances = self._bot_object._distance_units_to_pos(self, position)
return max(((unit, dist) for unit, dist in zip(self, distances)), key=lambda my_tuple: my_tuple[1])[0]
def closer_than(self, distance: Union[int, float], position: Union[Unit, Point2, Point3]) -> Units:
"""
Returns all units (from this Units object) that are closer than 'distance' away from target unit or position.
Example::
enemy_zerglings = self.enemy_units(UnitTypeId.ZERGLING)
my_marine = next((unit for unit in self.units if unit.type_id == UnitTypeId.MARINE), None)
if my_marine:
close_zerglings = enemy_zerglings.closer_than(3, my_marine)
# Contains all zerglings that are distance 3 or less away from the marine (does not include unit radius in calculation)
:param distance:
:param position:
"""
if not self:
return self
if isinstance(position, Unit):
distance_squared = distance ** 2
return self.subgroup(
unit
for unit in self
if self._bot_object._distance_squared_unit_to_unit(unit, position) < distance_squared
)
distances = self._bot_object._distance_units_to_pos(self, position)
return self.subgroup(unit for unit, dist in zip(self, distances) if dist < distance)
def further_than(self, distance: Union[int, float], position: Union[Unit, Point2, Point3]) -> Units:
"""
Returns all units (from this Units object) that are further than 'distance' away from target unit or position.
Example::
enemy_zerglings = self.enemy_units(UnitTypeId.ZERGLING)
my_marine = next((unit for unit in self.units if unit.type_id == UnitTypeId.MARINE), None)
if my_marine:
far_zerglings = enemy_zerglings.further_than(3, my_marine)
# Contains all zerglings that are distance 3 or more away from the marine (does not include unit radius in calculation)
:param distance:
:param position:
"""
if not self:
return self
if isinstance(position, Unit):
distance_squared = distance ** 2
return self.subgroup(
unit
for unit in self
if distance_squared < self._bot_object._distance_squared_unit_to_unit(unit, position)
)
distances = self._bot_object._distance_units_to_pos(self, position)
return self.subgroup(unit for unit, dist in zip(self, distances) if distance < dist)
def in_distance_between(
self, position: Union[Unit, Point2, Tuple[float, float]], distance1: float, distance2: float
) -> Units:
"""
Returns units that are further than distance1 and closer than distance2 to unit or position.
Example::
enemy_zerglings = self.enemy_units(UnitTypeId.ZERGLING)
my_marine = next((unit for unit in self.units if unit.type_id == UnitTypeId.MARINE), None)
if my_marine:
zerglings_filtered = enemy_zerglings.in_distance_between(my_marine, 3, 5)
# Contains all zerglings that are between distance 3 and 5 away from the marine (does not include unit radius in calculation)
:param position:
:param distance1:
:param distance2:
"""
if not self:
return self
if isinstance(position, Unit):
distance1_squared = distance1 ** 2
distance2_squared = distance2 ** 2
return self.subgroup(
unit
for unit in self
if distance1_squared
< self._bot_object._distance_squared_unit_to_unit(unit, position)
< distance2_squared
)
distances = self._bot_object._distance_units_to_pos(self, position)
return self.subgroup(unit for unit, dist in zip(self, distances) if distance1 < dist < distance2)
def closest_n_units(self, position: Union[Unit, Point2], n: int) -> Units:
"""
Returns the n closest units in distance to position.
Example::
enemy_zerglings = self.enemy_units(UnitTypeId.ZERGLING)
my_marine = next((unit for unit in self.units if unit.type_id == UnitTypeId.MARINE), None)
if my_marine:
zerglings_filtered = enemy_zerglings.closest_n_units(my_marine, 5)
# Contains 5 zerglings that are the closest to the marine
:param position:
:param n:
"""
if not self:
return self
return self.subgroup(self._list_sorted_by_distance_to(position)[:n])
def furthest_n_units(self, position: Union[Unit, Point2, np.ndarray], n: int) -> Units:
"""
Returns the n furhest units in distance to position.
Example::
enemy_zerglings = self.enemy_units(UnitTypeId.ZERGLING)
my_marine = next((unit for unit in self.units if unit.type_id == UnitTypeId.MARINE), None)
if my_marine:
zerglings_filtered = enemy_zerglings.furthest_n_units(my_marine, 5)
# Contains 5 zerglings that are the furthest to the marine
:param position:
:param n:
"""
if not self:
return self
return self.subgroup(self._list_sorted_by_distance_to(position)[-n:])
def in_distance_of_group(self, other_units: Units, distance: float) -> Units:
""" Returns units that are closer than distance from any unit in the other units object.
:param other_units:
:param distance:
"""
assert other_units, "Other units object is empty"
# Return self because there are no enemies
if not self:
return self
distance_squared = distance ** 2
if len(self) == 1:
if any(
self._bot_object._distance_squared_unit_to_unit(self[0], target) < distance_squared
for target in other_units
):
return self
else:
return self.subgroup([])
return self.subgroup(
self_unit
for self_unit in self
if any(
self._bot_object._distance_squared_unit_to_unit(self_unit, other_unit) < distance_squared
for other_unit in other_units
)
)
def in_closest_distance_to_group(self, other_units: Units) -> Unit:
"""
Returns unit in shortest distance from any unit in self to any unit in group.
Loops over all units in self, then loops over all units in other_units and calculates the shortest distance. Returns the units that is closest to any unit of 'other_units'.
:param other_units: """
assert self, "Units object is empty"
assert other_units, "Given units object is empty"
return min(
self,
key=lambda self_unit: min(
self._bot_object._distance_squared_unit_to_unit(self_unit, other_unit) for other_unit in other_units
),
)
def _list_sorted_closest_to_distance(self, position: Union[Unit, Point2], distance: float) -> List[Unit]:
""" This function should be a bit faster than using units.sorted(key=lambda u: u.distance_to(position)) """
if isinstance(position, Unit):
return sorted(
self,
key=lambda unit: abs(self._bot_object._distance_squared_unit_to_unit(unit, position) - distance),
reverse=True,
)
distances = self._bot_object._distance_units_to_pos(self, position)
unit_dist_dict = {unit.tag: dist for unit, dist in zip(self, distances)}
return sorted(self, key=lambda unit2: abs(unit_dist_dict[unit2.tag] - distance), reverse=True)
def n_closest_to_distance(self, position: Union[Point2, np.ndarray], distance: Union[int, float], n: int) -> Units:
""" Returns n units that are the closest to distance away.
For example if the distance is set to 5 and you want 3 units, from units with distance [3, 4, 5, 6, 7] to position,
the units with distance [4, 5, 6] will be returned """
return self.subgroup(self._list_sorted_closest_to_distance(position=position, distance=distance)[:n])
def n_furthest_to_distance(self, position: Union[Point2, np.ndarray], distance: Union[int, float], n: int) -> Units:
""" Inverse of the function 'n_closest_to_distance', returns the furthest units instead """
return self.subgroup(self._list_sorted_closest_to_distance(position=position, distance=distance)[-n:])
def subgroup(self, units):
"""
Creates a new mutable Units object from Units or list object.
:param units: """
return Units(units, self._bot_object)
def filter(self, pred: callable) -> Units:
"""
Filters the current Units object and returns a new Units object.
Example::
from sc2.ids.unit_typeid import UnitTypeId
my_marines = self.units.filter(lambda unit: unit.type_id == UnitTypeId.MARINE)
completed_structures = self.structures.filter(lambda structure: structure.is_ready)
queens_with_energy_to_inject = self.units.filter(lambda unit: unit.type_id == UnitTypeId.QUEEN and unit.energy >= 25)
orbitals_with_energy_to_mule = self.structures.filter(lambda structure: structure.type_id == UnitTypeId.ORBITALCOMMAND and structure.energy >= 50)
my_units_that_can_shoot_up = self.units.filter(lambda unit: unit.can_attack_air)
See more unit properties in unit.py
:param pred:
"""
assert callable(pred), "Function is not callable"
return self.subgroup(filter(pred, self))
def sorted(self, key: callable, reverse: bool = False) -> Units:
return self.subgroup(sorted(self, key=key, reverse=reverse))
def _list_sorted_by_distance_to(self, position: Union[Unit, Point2], reverse: bool = False) -> List[Unit]:
""" This function should be a bit faster than using units.sorted(key=lambda u: u.distance_to(position)) """
if isinstance(position, Unit):
return sorted(
self, key=lambda unit: self._bot_object._distance_squared_unit_to_unit(unit, position), reverse=reverse
)
distances = self._bot_object._distance_units_to_pos(self, position)
unit_dist_dict = {unit.tag: dist for unit, dist in zip(self, distances)}
return sorted(self, key=lambda unit2: unit_dist_dict[unit2.tag], reverse=reverse)
def sorted_by_distance_to(self, position: Union[Unit, Point2], reverse: bool = False) -> Units:
""" This function should be a bit faster than using units.sorted(key=lambda u: u.distance_to(position)) """
return self.subgroup(self._list_sorted_by_distance_to(position, reverse=reverse))
def tags_in(self, other: Union[Set[int], List[int], Dict[int, Any]]) -> Units:
""" Filters all units that have their tags in the 'other' set/list/dict
Example::
my_inject_queens = self.units.tags_in(self.queen_tags_assigned_to_do_injects)
# Do not use the following as it is slower because it first loops over all units to filter out if they are queens and loops over those again to check if their tags are in the list/set
my_inject_queens_slow = self.units(QUEEN).tags_in(self.queen_tags_assigned_to_do_injects)
:param other:
"""
return self.filter(lambda unit: unit.tag in other)
def tags_not_in(self, other: Union[Set[int], List[int], Dict[int, Any]]) -> Units:
"""
Filters all units that have their tags not in the 'other' set/list/dict
Example::
my_non_inject_queens = self.units.tags_not_in(self.queen_tags_assigned_to_do_injects)
# Do not use the following as it is slower because it first loops over all units to filter out if they are queens and loops over those again to check if their tags are in the list/set
my_non_inject_queens_slow = self.units(QUEEN).tags_not_in(self.queen_tags_assigned_to_do_injects)
:param other:
"""
return self.filter(lambda unit: unit.tag not in other)
def of_type(self, other: Union[UnitTypeId, Set[UnitTypeId], List[UnitTypeId], Dict[UnitTypeId, Any]]) -> Units:
"""
Filters all units that are of a specific type
Example::
# Use a set instead of lists in the argument
some_attack_units = self.units.of_type({ZERGLING, ROACH, HYDRALISK, BROODLORD})
:param other: """
if isinstance(other, UnitTypeId):
other = {other}
elif isinstance(other, list):
other = set(other)
return self.filter(lambda unit: unit.type_id in other)
def exclude_type(self, other: Union[UnitTypeId, Set[UnitTypeId], List[UnitTypeId], Dict[UnitTypeId, Any]]) -> Units:
"""
Filters all units that are not of a specific type
Example::
# Use a set instead of lists in the argument
ignore_units = self.enemy_units.exclude_type({LARVA, EGG, OVERLORD})
:param other: """
if isinstance(other, UnitTypeId):
other = {other}
elif isinstance(other, list):
other = set(other)
return self.filter(lambda unit: unit.type_id not in other)
def same_tech(self, other: Set[UnitTypeId]) -> Units:
"""
Returns all structures that have the same base structure.
Untested: This should return the equivalents for WarpPrism, Observer, Overseer, SupplyDepot and others
Example::
# All command centers, flying command centers, orbital commands, flying orbital commands, planetary fortress
terran_townhalls = self.townhalls.same_tech(UnitTypeId.COMMANDCENTER)
# All hatcheries, lairs and hives
zerg_townhalls = self.townhalls.same_tech({UnitTypeId.HATCHERY})
# All spires and greater spires
spires = self.townhalls.same_tech({UnitTypeId.SPIRE})
# The following returns the same
spires = self.townhalls.same_tech({UnitTypeId.GREATERSPIRE})
# This also works with multiple unit types
zerg_townhalls_and_spires = self.structures.same_tech({UnitTypeId.HATCHERY, UnitTypeId.SPIRE})
:param other:
"""
assert isinstance(other, set), (
f"Please use a set as this filter function is already fairly slow. For example"
+ " 'self.units.same_tech({UnitTypeId.LAIR})'"
)
tech_alias_types: Set[int] = {u.value for u in other}
unit_data = self._bot_object._game_data.units
for unitType in other:
for same in unit_data[unitType.value]._proto.tech_alias:
tech_alias_types.add(same)
return self.filter(
lambda unit: unit._proto.unit_type in tech_alias_types
or any(same in tech_alias_types for same in unit._type_data._proto.tech_alias)
)
def same_unit(self, other: Union[UnitTypeId, Set[UnitTypeId], List[UnitTypeId], Dict[UnitTypeId, Any]]) -> Units:
"""
Returns all units that have the same base unit while being in different modes.
Untested: This should return the equivalents for WarpPrism, Observer, Overseer, SupplyDepot and other units that have different modes but still act as the same unit
Example::
# All command centers on the ground and flying
ccs = self.townhalls.same_unit(UnitTypeId.COMMANDCENTER)
# All orbital commands on the ground and flying
ocs = self.townhalls.same_unit(UnitTypeId.ORBITALCOMMAND)
# All roaches and burrowed roaches
roaches = self.units.same_unit(UnitTypeId.ROACH)
# This is useful because roach has a different type id when burrowed
burrowed_roaches = self.units(UnitTypeId.ROACHBURROWED)
:param other:
"""
if isinstance(other, UnitTypeId):
other = {other}
unit_alias_types: Set[int] = {u.value for u in other}
unit_data = self._bot_object._game_data.units
for unitType in other:
unit_alias_types.add(unit_data[unitType.value]._proto.unit_alias)
unit_alias_types.discard(0)
return self.filter(
lambda unit: unit._proto.unit_type in unit_alias_types
or unit._type_data._proto.unit_alias in unit_alias_types
)
@property
def center(self) -> Point2:
""" Returns the central position of all units. """
assert self, f"Units object is empty"
amount = self.amount
return Point2(
(sum(unit._proto.pos.x for unit in self) / amount, sum(unit._proto.pos.y for unit in self) / amount,)
)
@property
def selected(self) -> Units:
""" Returns all units that are selected by the human player. """
return self.filter(lambda unit: unit.is_selected)
@property
def tags(self) -> Set[int]:
""" Returns all unit tags as a set. """
return {unit.tag for unit in self}
@property
def ready(self) -> Units:
""" Returns all structures that are ready (construction complete). """
return self.filter(lambda unit: unit.is_ready)
@property
def not_ready(self) -> Units:
""" Returns all structures that are not ready (construction not complete). """
return self.filter(lambda unit: not unit.is_ready)
@property
def idle(self) -> Units:
""" Returns all units or structures that are doing nothing (unit is standing still, structure is doing nothing). """
return self.filter(lambda unit: unit.is_idle)
@property
def owned(self) -> Units:
""" Deprecated: All your units. """
return self.filter(lambda unit: unit.is_mine)
@property
def enemy(self) -> Units:
""" Deprecated: All enemy units."""
return self.filter(lambda unit: unit.is_enemy)
@property
def flying(self) -> Units:
""" Returns all units that are flying. """
return self.filter(lambda unit: unit.is_flying)
@property
def not_flying(self) -> Units:
""" Returns all units that not are flying. """
return self.filter(lambda unit: not unit.is_flying)
@property
def structure(self) -> Units:
""" Deprecated: All structures. """
return self.filter(lambda unit: unit.is_structure)
@property
def not_structure(self) -> Units:
""" Deprecated: All units that are not structures. """
return self.filter(lambda unit: not unit.is_structure)
@property
def gathering(self) -> Units:
""" Returns all workers that are mining minerals or vespene (gather command). """
return self.filter(lambda unit: unit.is_gathering)
@property
def returning(self) -> Units:
""" Returns all workers that are carrying minerals or vespene and are returning to a townhall. """
return self.filter(lambda unit: unit.is_returning)
@property
def collecting(self) -> Units:
""" Returns all workers that are mining or returning resources. """
return self.filter(lambda unit: unit.is_collecting)
@property
def visible(self) -> Units:
""" Returns all units or structures that are visible.
TODO: add proper description on which units are exactly visible (not snapshots?) """
return self.filter(lambda unit: unit.is_visible)
@property
def mineral_field(self) -> Units:
""" Returns all units that are mineral fields. """
return self.filter(lambda unit: unit.is_mineral_field)
@property
def vespene_geyser(self) -> Units:
""" Returns all units that are vespene geysers. """
return self.filter(lambda unit: unit.is_vespene_geyser)
@property
def prefer_idle(self) -> Units:
""" Sorts units based on if they are idle. Idle units come first. """
return self.sorted(lambda unit: unit.is_idle, reverse=True)
class UnitSelection(Units):
def __init__(self, parent, selection=None):
if isinstance(selection, (UnitTypeId)):
super().__init__((unit for unit in parent if unit.type_id == selection), parent._bot_object)
elif isinstance(selection, set):
assert all(isinstance(t, UnitTypeId) for t in selection), f"Not all ids in selection are of type UnitTypeId"
super().__init__((unit for unit in parent if unit.type_id in selection), parent._bot_object)
elif selection is None:
super().__init__((unit for unit in parent), parent._bot_object)
else:
assert isinstance(
selection, (UnitTypeId, set)
), f"selection is not None or of type UnitTypeId or Set[UnitTypeId]"
| 41.236662 | 211 | 0.640293 | from __future__ import annotations
import random
import warnings
import math
from itertools import chain
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union, Generator, TYPE_CHECKING
from .ids.unit_typeid import UnitTypeId
from .position import Point2, Point3
from .unit import Unit
import numpy as np
warnings.simplefilter("once")
if TYPE_CHECKING:
from .bot_ai import BotAI
class Units(list):
@classmethod
def from_proto(cls, units, bot_object: BotAI):
return cls((Unit(u, bot_object=bot_object) for u in units))
def __init__(self, units, bot_object: BotAI):
super().__init__(units)
self._bot_object = bot_object
def __call__(self, *args, **kwargs):
return UnitSelection(self, *args, **kwargs)
def __iter__(self) -> Generator[Unit, None, None]:
return (item for item in super().__iter__())
def select(self, *args, **kwargs):
return UnitSelection(self, *args, **kwargs)
def copy(self):
return self.subgroup(self)
def __or__(self, other: Units) -> Units:
return Units(
chain(
iter(self),
(other_unit for other_unit in other if other_unit.tag not in (self_unit.tag for self_unit in self)),
),
self._bot_object,
)
def __add__(self, other: Units) -> Units:
return Units(
chain(
iter(self),
(other_unit for other_unit in other if other_unit.tag not in (self_unit.tag for self_unit in self)),
),
self._bot_object,
)
def __and__(self, other: Units) -> Units:
return Units(
(other_unit for other_unit in other if other_unit.tag in (self_unit.tag for self_unit in self)),
self._bot_object,
)
def __sub__(self, other: Units) -> Units:
return Units(
(self_unit for self_unit in self if self_unit.tag not in (other_unit.tag for other_unit in other)),
self._bot_object,
)
def __hash__(self):
return hash(unit.tag for unit in self)
@property
def amount(self) -> int:
return len(self)
@property
def empty(self) -> bool:
return not bool(self)
@property
def exists(self) -> bool:
return bool(self)
def find_by_tag(self, tag) -> Optional[Unit]:
for unit in self:
if unit.tag == tag:
return unit
return None
def by_tag(self, tag):
unit = self.find_by_tag(tag)
if unit is None:
raise KeyError("Unit not found")
return unit
@property
def first(self) -> Unit:
assert self, "Units object is empty"
return self[0]
def take(self, n: int) -> Units:
if n >= self.amount:
return self
else:
return self.subgroup(self[:n])
@property
def random(self) -> Unit:
assert self, "Units object is empty"
return random.choice(self)
def random_or(self, other: any) -> Unit:
return random.choice(self) if self else other
def random_group_of(self, n: int) -> Units:
if n < 1:
return Units([], self._bot_object)
elif n >= self.amount:
return self
else:
return self.subgroup(random.sample(self, n))
def in_attack_range_of(self, unit: Unit, bonus_distance: Union[int, float] = 0) -> Units:
return self.filter(lambda x: unit.target_in_range(x, bonus_distance=bonus_distance))
def closest_distance_to(self, position: Union[Unit, Point2, Point3]) -> float:
assert self, "Units object is empty"
if isinstance(position, Unit):
return min(self._bot_object._distance_squared_unit_to_unit(unit, position) for unit in self) ** 0.5
return min(self._bot_object._distance_units_to_pos(self, position))
def furthest_distance_to(self, position: Union[Unit, Point2, Point3]) -> float:
assert self, "Units object is empty"
if isinstance(position, Unit):
return max(self._bot_object._distance_squared_unit_to_unit(unit, position) for unit in self) ** 0.5
return max(self._bot_object._distance_units_to_pos(self, position))
def closest_to(self, position: Union[Unit, Point2, Point3]) -> Unit:
assert self, "Units object is empty"
if isinstance(position, Unit):
return min(
(unit1 for unit1 in self),
key=lambda unit2: self._bot_object._distance_squared_unit_to_unit(unit2, position),
)
distances = self._bot_object._distance_units_to_pos(self, position)
return min(((unit, dist) for unit, dist in zip(self, distances)), key=lambda my_tuple: my_tuple[1])[0]
def furthest_to(self, position: Union[Unit, Point2, Point3]) -> Unit:
assert self, "Units object is empty"
if isinstance(position, Unit):
return max(
(unit1 for unit1 in self),
key=lambda unit2: self._bot_object._distance_squared_unit_to_unit(unit2, position),
)
distances = self._bot_object._distance_units_to_pos(self, position)
return max(((unit, dist) for unit, dist in zip(self, distances)), key=lambda my_tuple: my_tuple[1])[0]
def closer_than(self, distance: Union[int, float], position: Union[Unit, Point2, Point3]) -> Units:
if not self:
return self
if isinstance(position, Unit):
distance_squared = distance ** 2
return self.subgroup(
unit
for unit in self
if self._bot_object._distance_squared_unit_to_unit(unit, position) < distance_squared
)
distances = self._bot_object._distance_units_to_pos(self, position)
return self.subgroup(unit for unit, dist in zip(self, distances) if dist < distance)
def further_than(self, distance: Union[int, float], position: Union[Unit, Point2, Point3]) -> Units:
if not self:
return self
if isinstance(position, Unit):
distance_squared = distance ** 2
return self.subgroup(
unit
for unit in self
if distance_squared < self._bot_object._distance_squared_unit_to_unit(unit, position)
)
distances = self._bot_object._distance_units_to_pos(self, position)
return self.subgroup(unit for unit, dist in zip(self, distances) if distance < dist)
def in_distance_between(
self, position: Union[Unit, Point2, Tuple[float, float]], distance1: float, distance2: float
) -> Units:
if not self:
return self
if isinstance(position, Unit):
distance1_squared = distance1 ** 2
distance2_squared = distance2 ** 2
return self.subgroup(
unit
for unit in self
if distance1_squared
< self._bot_object._distance_squared_unit_to_unit(unit, position)
< distance2_squared
)
distances = self._bot_object._distance_units_to_pos(self, position)
return self.subgroup(unit for unit, dist in zip(self, distances) if distance1 < dist < distance2)
def closest_n_units(self, position: Union[Unit, Point2], n: int) -> Units:
if not self:
return self
return self.subgroup(self._list_sorted_by_distance_to(position)[:n])
def furthest_n_units(self, position: Union[Unit, Point2, np.ndarray], n: int) -> Units:
if not self:
return self
return self.subgroup(self._list_sorted_by_distance_to(position)[-n:])
def in_distance_of_group(self, other_units: Units, distance: float) -> Units:
assert other_units, "Other units object is empty"
if not self:
return self
distance_squared = distance ** 2
if len(self) == 1:
if any(
self._bot_object._distance_squared_unit_to_unit(self[0], target) < distance_squared
for target in other_units
):
return self
else:
return self.subgroup([])
return self.subgroup(
self_unit
for self_unit in self
if any(
self._bot_object._distance_squared_unit_to_unit(self_unit, other_unit) < distance_squared
for other_unit in other_units
)
)
def in_closest_distance_to_group(self, other_units: Units) -> Unit:
assert self, "Units object is empty"
assert other_units, "Given units object is empty"
return min(
self,
key=lambda self_unit: min(
self._bot_object._distance_squared_unit_to_unit(self_unit, other_unit) for other_unit in other_units
),
)
def _list_sorted_closest_to_distance(self, position: Union[Unit, Point2], distance: float) -> List[Unit]:
if isinstance(position, Unit):
return sorted(
self,
key=lambda unit: abs(self._bot_object._distance_squared_unit_to_unit(unit, position) - distance),
reverse=True,
)
distances = self._bot_object._distance_units_to_pos(self, position)
unit_dist_dict = {unit.tag: dist for unit, dist in zip(self, distances)}
return sorted(self, key=lambda unit2: abs(unit_dist_dict[unit2.tag] - distance), reverse=True)
def n_closest_to_distance(self, position: Union[Point2, np.ndarray], distance: Union[int, float], n: int) -> Units:
return self.subgroup(self._list_sorted_closest_to_distance(position=position, distance=distance)[:n])
def n_furthest_to_distance(self, position: Union[Point2, np.ndarray], distance: Union[int, float], n: int) -> Units:
return self.subgroup(self._list_sorted_closest_to_distance(position=position, distance=distance)[-n:])
def subgroup(self, units):
return Units(units, self._bot_object)
def filter(self, pred: callable) -> Units:
assert callable(pred), "Function is not callable"
return self.subgroup(filter(pred, self))
def sorted(self, key: callable, reverse: bool = False) -> Units:
return self.subgroup(sorted(self, key=key, reverse=reverse))
def _list_sorted_by_distance_to(self, position: Union[Unit, Point2], reverse: bool = False) -> List[Unit]:
if isinstance(position, Unit):
return sorted(
self, key=lambda unit: self._bot_object._distance_squared_unit_to_unit(unit, position), reverse=reverse
)
distances = self._bot_object._distance_units_to_pos(self, position)
unit_dist_dict = {unit.tag: dist for unit, dist in zip(self, distances)}
return sorted(self, key=lambda unit2: unit_dist_dict[unit2.tag], reverse=reverse)
def sorted_by_distance_to(self, position: Union[Unit, Point2], reverse: bool = False) -> Units:
return self.subgroup(self._list_sorted_by_distance_to(position, reverse=reverse))
def tags_in(self, other: Union[Set[int], List[int], Dict[int, Any]]) -> Units:
return self.filter(lambda unit: unit.tag in other)
def tags_not_in(self, other: Union[Set[int], List[int], Dict[int, Any]]) -> Units:
return self.filter(lambda unit: unit.tag not in other)
def of_type(self, other: Union[UnitTypeId, Set[UnitTypeId], List[UnitTypeId], Dict[UnitTypeId, Any]]) -> Units:
if isinstance(other, UnitTypeId):
other = {other}
elif isinstance(other, list):
other = set(other)
return self.filter(lambda unit: unit.type_id in other)
def exclude_type(self, other: Union[UnitTypeId, Set[UnitTypeId], List[UnitTypeId], Dict[UnitTypeId, Any]]) -> Units:
if isinstance(other, UnitTypeId):
other = {other}
elif isinstance(other, list):
other = set(other)
return self.filter(lambda unit: unit.type_id not in other)
def same_tech(self, other: Set[UnitTypeId]) -> Units:
assert isinstance(other, set), (
f"Please use a set as this filter function is already fairly slow. For example"
+ " 'self.units.same_tech({UnitTypeId.LAIR})'"
)
tech_alias_types: Set[int] = {u.value for u in other}
unit_data = self._bot_object._game_data.units
for unitType in other:
for same in unit_data[unitType.value]._proto.tech_alias:
tech_alias_types.add(same)
return self.filter(
lambda unit: unit._proto.unit_type in tech_alias_types
or any(same in tech_alias_types for same in unit._type_data._proto.tech_alias)
)
def same_unit(self, other: Union[UnitTypeId, Set[UnitTypeId], List[UnitTypeId], Dict[UnitTypeId, Any]]) -> Units:
if isinstance(other, UnitTypeId):
other = {other}
unit_alias_types: Set[int] = {u.value for u in other}
unit_data = self._bot_object._game_data.units
for unitType in other:
unit_alias_types.add(unit_data[unitType.value]._proto.unit_alias)
unit_alias_types.discard(0)
return self.filter(
lambda unit: unit._proto.unit_type in unit_alias_types
or unit._type_data._proto.unit_alias in unit_alias_types
)
@property
def center(self) -> Point2:
assert self, f"Units object is empty"
amount = self.amount
return Point2(
(sum(unit._proto.pos.x for unit in self) / amount, sum(unit._proto.pos.y for unit in self) / amount,)
)
@property
def selected(self) -> Units:
return self.filter(lambda unit: unit.is_selected)
@property
def tags(self) -> Set[int]:
return {unit.tag for unit in self}
@property
def ready(self) -> Units:
return self.filter(lambda unit: unit.is_ready)
@property
def not_ready(self) -> Units:
return self.filter(lambda unit: not unit.is_ready)
@property
def idle(self) -> Units:
return self.filter(lambda unit: unit.is_idle)
@property
def owned(self) -> Units:
return self.filter(lambda unit: unit.is_mine)
@property
def enemy(self) -> Units:
return self.filter(lambda unit: unit.is_enemy)
@property
def flying(self) -> Units:
return self.filter(lambda unit: unit.is_flying)
@property
def not_flying(self) -> Units:
return self.filter(lambda unit: not unit.is_flying)
@property
def structure(self) -> Units:
return self.filter(lambda unit: unit.is_structure)
@property
def not_structure(self) -> Units:
return self.filter(lambda unit: not unit.is_structure)
@property
def gathering(self) -> Units:
return self.filter(lambda unit: unit.is_gathering)
@property
def returning(self) -> Units:
return self.filter(lambda unit: unit.is_returning)
@property
def collecting(self) -> Units:
return self.filter(lambda unit: unit.is_collecting)
@property
def visible(self) -> Units:
return self.filter(lambda unit: unit.is_visible)
@property
def mineral_field(self) -> Units:
return self.filter(lambda unit: unit.is_mineral_field)
@property
def vespene_geyser(self) -> Units:
return self.filter(lambda unit: unit.is_vespene_geyser)
@property
def prefer_idle(self) -> Units:
return self.sorted(lambda unit: unit.is_idle, reverse=True)
class UnitSelection(Units):
def __init__(self, parent, selection=None):
if isinstance(selection, (UnitTypeId)):
super().__init__((unit for unit in parent if unit.type_id == selection), parent._bot_object)
elif isinstance(selection, set):
assert all(isinstance(t, UnitTypeId) for t in selection), f"Not all ids in selection are of type UnitTypeId"
super().__init__((unit for unit in parent if unit.type_id in selection), parent._bot_object)
elif selection is None:
super().__init__((unit for unit in parent), parent._bot_object)
else:
assert isinstance(
selection, (UnitTypeId, set)
), f"selection is not None or of type UnitTypeId or Set[UnitTypeId]"
| true | true |
1c3bb115a47bdc77dc8dabb9c45848ab2475a739 | 4,164 | py | Python | openGaussBase/testcase/TOOLS/SERVER_TOOLS/gs_om/Opengauss_Function_Tools_gs_om_Case0021.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/TOOLS/SERVER_TOOLS/gs_om/Opengauss_Function_Tools_gs_om_Case0021.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/TOOLS/SERVER_TOOLS/gs_om/Opengauss_Function_Tools_gs_om_Case0021.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 服务端工具
Case Name : 数据库启动时,指定日志存放路径 日志文件存放路径
Description :
1.查看数据库状态:gs_om -t status --detail
2.关闭数据库:gs_om -t stop;
3.启动时,指定日志文件存放路径不正确
4.启动时,指定日志文件存放路径正确
5.在指定路径下,查看日志:
6.查看数据库状态:gs_om -t status --detail
7.删除日志文件
Expect :
1.状态正常
2.关闭数据库
3.执行失败
4.日志生成在指定路径下
5.查看日志
6.状态正常
7.删除成功
History :
"""
import os
import unittest
from yat.test import Node
from yat.test import macro
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
from testcase.utils.CommonSH import CommonSH
class Tools(unittest.TestCase):
def setUp(self):
self.log = Logger()
self.log.info('---Opengauss_Function_Tools_gs_om_Case0021start---')
self.dbuser_node = Node('dbuser')
self.constant = Constant()
self.commonsh = CommonSH()
def test_server_tools1(self):
self.log.info('-----------------查看数据库状态-------------------')
status_cmd = f'source {macro.DB_ENV_PATH};' \
f'gs_om -t status --detail ;'
self.log.info(status_cmd)
status_msg = self.dbuser_node.sh(status_cmd).result()
self.log.info(status_msg)
self.assertTrue("Degraded" in status_msg or "Normal" in status_msg)
self.log.info('----------关闭数据库----------')
certificate_path = os.path.join(macro.DB_INSTANCE_PATH, 'server.*')
ls_cmd = f'ls -l {certificate_path}'
self.log.info(ls_cmd)
stop_cmd = f'source {macro.DB_ENV_PATH}; ' \
f'gs_om -t stop;'
self.log.info(stop_cmd)
stop_msg = self.dbuser_node.sh(stop_cmd).result()
self.log.info(stop_msg)
self.assertIn(self.constant.GS_OM_STOP_SUCCESS_MSG, stop_msg)
self.log.info('---启动时,指定日志文件存放路径不正确---')
start_cmd = f'source {macro.DB_ENV_PATH}; ' \
f'gs_om -t start -l {macro.DB_INSTANCE_PATH}/omlog.txt;'
self.log.info(start_cmd)
start_msg = self.dbuser_node.sh(start_cmd).result()
self.log.info(start_msg)
self.assertIn('/cluster/dn1/omlog.txt] should be \'.log\'', start_msg)
self.log.info('---启动时,指定日志文件存放路径正确---')
start_cmd = f'source {macro.DB_ENV_PATH}; ' \
f'gs_om -t start -l {macro.DB_INSTANCE_PATH}/omlog.log;'
self.log.info(start_cmd)
start_msg = self.dbuser_node.sh(start_cmd).result()
self.log.info(start_msg)
self.assertIn(self.constant.GS_OM_START_SUCCESS_MSG, start_msg)
self.log.info('---查看日志---')
cat_cmd = f'source {macro.DB_ENV_PATH}; ' \
f'cat {macro.DB_INSTANCE_PATH}/omlog*'
self.log.info(cat_cmd)
cat_msg = self.dbuser_node.sh(cat_cmd).result()
self.log.info(cat_msg)
self.assertIn('Operation succeeded: Start', cat_msg)
self.log.info('-----------------查看数据库状态-------------------')
status_cmd = f'source {macro.DB_ENV_PATH};' \
f'gs_om -t status --detail ;'
self.log.info(status_cmd)
status_msg = self.dbuser_node.sh(status_cmd).result()
self.log.info(status_msg)
self.assertTrue("Degraded" in status_msg or "Normal" in status_msg)
def tearDown(self):
self.log.info('-----------清理环境,启动数据库-------------')
rm_cmd = f'source {macro.DB_ENV_PATH};' \
f'rm -rf {macro.DB_INSTANCE_PATH}/omlog*'
self.log.info(rm_cmd)
rm_msg = self.dbuser_node.sh(rm_cmd).result()
self.log.info(rm_msg)
start_cmd = self.commonsh.start_db_cluster()
self.log.info(start_cmd)
self.log.info('--Opengauss_Function_Tools_gs_om_Case0021finish--')
| 35.896552 | 84 | 0.632565 |
import os
import unittest
from yat.test import Node
from yat.test import macro
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
from testcase.utils.CommonSH import CommonSH
class Tools(unittest.TestCase):
def setUp(self):
self.log = Logger()
self.log.info('---Opengauss_Function_Tools_gs_om_Case0021start---')
self.dbuser_node = Node('dbuser')
self.constant = Constant()
self.commonsh = CommonSH()
def test_server_tools1(self):
self.log.info('-----------------查看数据库状态-------------------')
status_cmd = f'source {macro.DB_ENV_PATH};' \
f'gs_om -t status --detail ;'
self.log.info(status_cmd)
status_msg = self.dbuser_node.sh(status_cmd).result()
self.log.info(status_msg)
self.assertTrue("Degraded" in status_msg or "Normal" in status_msg)
self.log.info('----------关闭数据库----------')
certificate_path = os.path.join(macro.DB_INSTANCE_PATH, 'server.*')
ls_cmd = f'ls -l {certificate_path}'
self.log.info(ls_cmd)
stop_cmd = f'source {macro.DB_ENV_PATH}; ' \
f'gs_om -t stop;'
self.log.info(stop_cmd)
stop_msg = self.dbuser_node.sh(stop_cmd).result()
self.log.info(stop_msg)
self.assertIn(self.constant.GS_OM_STOP_SUCCESS_MSG, stop_msg)
self.log.info('---启动时,指定日志文件存放路径不正确---')
start_cmd = f'source {macro.DB_ENV_PATH}; ' \
f'gs_om -t start -l {macro.DB_INSTANCE_PATH}/omlog.txt;'
self.log.info(start_cmd)
start_msg = self.dbuser_node.sh(start_cmd).result()
self.log.info(start_msg)
self.assertIn('/cluster/dn1/omlog.txt] should be \'.log\'', start_msg)
self.log.info('---启动时,指定日志文件存放路径正确---')
start_cmd = f'source {macro.DB_ENV_PATH}; ' \
f'gs_om -t start -l {macro.DB_INSTANCE_PATH}/omlog.log;'
self.log.info(start_cmd)
start_msg = self.dbuser_node.sh(start_cmd).result()
self.log.info(start_msg)
self.assertIn(self.constant.GS_OM_START_SUCCESS_MSG, start_msg)
self.log.info('---查看日志---')
cat_cmd = f'source {macro.DB_ENV_PATH}; ' \
f'cat {macro.DB_INSTANCE_PATH}/omlog*'
self.log.info(cat_cmd)
cat_msg = self.dbuser_node.sh(cat_cmd).result()
self.log.info(cat_msg)
self.assertIn('Operation succeeded: Start', cat_msg)
self.log.info('-----------------查看数据库状态-------------------')
status_cmd = f'source {macro.DB_ENV_PATH};' \
f'gs_om -t status --detail ;'
self.log.info(status_cmd)
status_msg = self.dbuser_node.sh(status_cmd).result()
self.log.info(status_msg)
self.assertTrue("Degraded" in status_msg or "Normal" in status_msg)
def tearDown(self):
self.log.info('-----------清理环境,启动数据库-------------')
rm_cmd = f'source {macro.DB_ENV_PATH};' \
f'rm -rf {macro.DB_INSTANCE_PATH}/omlog*'
self.log.info(rm_cmd)
rm_msg = self.dbuser_node.sh(rm_cmd).result()
self.log.info(rm_msg)
start_cmd = self.commonsh.start_db_cluster()
self.log.info(start_cmd)
self.log.info('--Opengauss_Function_Tools_gs_om_Case0021finish--')
| true | true |
1c3bb29f89d89209a3b9f7d405e87b11f19a59db | 16,143 | py | Python | annotation/typed.py | raabf/typeannotations | 7a3bffbb3b7e690b6f064f70a586820e3a741c5f | [
"Apache-2.0"
] | 84 | 2015-01-15T13:25:21.000Z | 2021-09-29T08:02:32.000Z | annotation/typed.py | raabf/typeannotations | 7a3bffbb3b7e690b6f064f70a586820e3a741c5f | [
"Apache-2.0"
] | 4 | 2015-04-05T18:16:37.000Z | 2017-02-11T11:59:43.000Z | annotation/typed.py | raabf/typeannotations | 7a3bffbb3b7e690b6f064f70a586820e3a741c5f | [
"Apache-2.0"
] | 4 | 2015-02-12T04:21:07.000Z | 2019-07-30T16:54:01.000Z | # Written by Manuel Cerón
# Copyright Manuel Cerón. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""Tools for adding type annotations in Python.
This module provides a set of tools for type checking and annotations:
- typechecked() provides a decorator for checking the types in annotations.
- Interface provides a subclass to define structural interfaces.
- union() provides a group of types.
- predicate() provides type that checks a precondition.
"""
__author__ = ('Manuel Cerón <ceronman@gmail.com>')
__all__ = ['AnyType', 'Interface', 'only', 'optional', 'options', 'predicate',
'typechecked', 'typedef', 'union']
import functools
import inspect
EMPTY_ANNOTATION = inspect.Signature.empty
class UnionMeta(type):
"""Metaclass for union types.
An object is an instance of a union type if it is instance of any of the
members of the union.
>>> NumberOrString = union(int, str)
>>> isinstance(1, NumberOrString)
True
>>> isinstance('string', NumberOrString)
True
>>> issubclass(int, NumberOrString)
True
>>> issubclass(str, NumberOrString)
True
"""
def __new__(mcls, name, bases, namespace):
cls = super().__new__(mcls, name, bases, namespace)
types = getattr(cls, '__types__', None)
if not isinstance(types, set):
raise TypeError('Union requires a __types__ set')
if any(not isinstance(t, type) for t in types):
raise TypeError('Union __types__ elements must be type')
return cls
def __instancecheck__(cls, instance):
"""Override for isinstance(instance, cls)."""
return any(isinstance(instance, t) for t in cls.__types__)
def __subclasscheck__(cls, subclass):
"""Override for isinstance(instance, cls)."""
if isinstance(subclass, UnionMeta):
return all(issubclass(t, cls) for t in subclass.__types__)
return any(issubclass(subclass, t) for t in cls.__types__)
def __repr__(cls):
return '<union {0}>'.format(repr(cls.__types__))
def union(*args):
"""A convenience function for creating unions. See UnionMeta."""
return UnionMeta('union', (), {'__types__': set(args)})
class AnyTypeMeta(type):
"""Metaclass for AnyType.
Any object is instance of AnyType and any type is sublcass of anytype.
>>> isinstance(1, AnyType)
True
>>> isinstance(None, AnyType)
True
>>> isinstance('string', AnyType)
True
>>> issubclass(int, AnyType)
True
>>> issubclass(str, AnyType)
True
>>> issubclass(None, AnyType)
True
"""
def __new__(mcls, name, bases, namespace):
return super().__new__(mcls, name, bases, namespace)
def __instancecheck__(cls, instance):
"""Override for isinstance(instance, cls)."""
return True
def __subclasscheck__(cls, subclass):
"""Override for isinstance(instance, cls)."""
return True
class AnyType(metaclass=AnyTypeMeta):
"""See AnyTypeMeta."""
pass
def _multi_instanceof(a, b, t):
return isinstance(a, t) and isinstance(b, t)
def _check_signature_constraint(instance, constraint):
if isinstance(constraint, type):
return issubclass(instance, constraint)
elif _multi_instanceof(instance, constraint, list) or _multi_instanceof(instance, constraint, set):
ret = True
for sub_type in instance:
ret &= any(_check_signature_constraint(sub_type, con) for con in constraint)
return ret
elif _multi_instanceof(instance, constraint, tuple) and len(constraint) == len(instance):
return all(_check_signature_constraint(sub, con) for sub, con in zip(instance, constraint))
elif _multi_instanceof(instance, constraint, dict):
ret = True
for sub_key, sub_val in instance.items():
ret &= any(
(_check_signature_constraint(sub_key, key_constraint) and
_check_signature_constraint(sub_val, value_constraint))
for key_constraint, value_constraint in constraint.items())
return ret
else:
return False
def _implements_signature(function, signature):
"""True if the given function implements the given inspect.Signature."""
try:
instance_signature = inspect.signature(function)
except TypeError:
return False
except ValueError: # we got a builtin.
return True
cls_params = signature.parameters.values()
instance_params = instance_signature.parameters.values()
if len(cls_params) != len(instance_params):
return False
for cls_param, instance_param in zip(cls_params, instance_params):
if cls_param.name != instance_param.name:
return False
cls_annotation = cls_param.annotation
instance_annotation = instance_param.annotation
if cls_annotation is EMPTY_ANNOTATION:
cls_annotation = AnyType
if instance_annotation is EMPTY_ANNOTATION:
instance_annotation = AnyType
if not _check_signature_constraint(cls_annotation, instance_annotation):
return False
cls_annotation = signature.return_annotation
instance_annotation = instance_signature.return_annotation
if cls_annotation is EMPTY_ANNOTATION:
cls_annotation = AnyType
if instance_annotation is EMPTY_ANNOTATION:
instance_annotation = AnyType
if not _check_signature_constraint(instance_annotation, cls_annotation):
return False
return True
class InterfaceMeta(type):
"""Metaclass for an Interface.
An interface defines a set methods and attributes that an object must
implement. Any object implementing those will be considered an instance of
the interface.
>>> class IterableWithLen(Interface):
... def __iter__():
... pass
... def __len__():
... pass
...
>>> isinstance([], IterableWithLen)
True
>>> isinstance({}, IterableWithLen)
True
>>> isinstance(1, IterableWithLen)
False
>>> isinstance(iter([]), IterableWithLen)
False
>>> issubclass(list, IterableWithLen)
True
>>> issubclass(int, IterableWithLen)
False
>>> class Person(Interface):
... name = str
... age = int
... def say_hello(name: str) -> str:
... pass
...
>>> class Developer:
... def __init__(self, name, age):
... self.name = name
... self.age = age
... def say_hello(self, name: str) -> str:
... return 'hello ' + name
...
>>> isinstance(Developer('dave', 20), Person)
True
"""
def __new__(mcls, name, bases, namespace):
cls = super().__new__(mcls, name, bases, namespace)
# TODO: check base classes, prevent multiple inheritance.
cls.__signatures__ = {}
cls.__attributes__ = {}
for name, value in namespace.items():
if name in ('__qualname__', '__module__', '__doc__'):
continue
if inspect.isfunction(value):
mcls.add_method(cls, value)
continue
mcls.add_attribute(cls, name, value)
return cls
def __instancecheck__(cls, instance):
"""Override for isinstance(instance, cls)."""
for name, type_ in cls.__attributes__.items():
try:
attribute = getattr(instance, name)
except AttributeError:
return False
if not isinstance(attribute, type_):
return False
for name, signature in cls.__signatures__.items():
function = getattr(instance, name, None)
if not _implements_signature(function, signature):
return False
return True
def __subclasscheck__(cls, subclass):
"""Override for isinstance(instance, cls)."""
if cls is subclass:
return True
# TODO: support attributes
for name, signature in cls.__signatures__.items():
try:
function = inspect.getattr_static(subclass, name)
except AttributeError:
return False
if isinstance(function, (staticmethod, classmethod)):
return False
try:
subclass_signature = inspect.signature(function)
except TypeError:
return False
except ValueError: # we probably got a builtin
return True
cls_params = list(signature.parameters.values())
subclass_params = list(subclass_signature.parameters.values())
subclass_params.pop(0) # remove 'self'
if len(cls_params) != len(subclass_params):
return False
for cls_param, instance_param in zip(cls_params, subclass_params):
if cls_param.name != instance_param.name:
return False
cls_annotation = cls_param.annotation
instance_annotation = instance_param.annotation
if cls_annotation is EMPTY_ANNOTATION:
cls_annotation = AnyType
if instance_annotation is EMPTY_ANNOTATION:
instance_annotation = AnyType
if not issubclass(cls_annotation, instance_annotation):
return False
cls_annotation = signature.return_annotation
instance_annotation = subclass_signature.return_annotation
if cls_annotation is EMPTY_ANNOTATION:
cls_annotation = AnyType
if instance_annotation is EMPTY_ANNOTATION:
instance_annotation = AnyType
if not issubclass(instance_annotation, cls_annotation):
return False
return True
def add_method(cls, method):
"""Adds a new method to an Interface."""
# TODO check that signatures contain only types as annotations.
try:
cls.__signatures__[method.__name__] = inspect.signature(method)
except (TypeError, AttributeError):
raise TypeError('Interface methods should have a signature')
return method
def add_attribute(cls, name, type_=AnyType):
"""Adds a new attribute to an Interface."""
if not isinstance(type_, type):
# TODO the error message below is incomplete.
raise TypeError('Interface attributes should be type')
cls.__attributes__[name] = type_
class Interface(metaclass=InterfaceMeta):
"""See InterfaceMeta."""
pass
class PredicateMeta(type):
"""Metaclass for a predicate.
An object is an instance of a predicate if applying the predicate to the
object returns True.
>>> Positive = predicate(lambda x: x > 0)
>>> isinstance(1, Positive)
True
>>> isinstance(0, Positive)
False
"""
def __new__(mcls, name, bases, namespace):
return super().__new__(mcls, name, bases, namespace)
def __instancecheck__(cls, instance):
try:
return cls.__predicate__(instance)
except AttributeError:
return False
def __subclasscheck__(cls, subclass):
return False
def predicate(function, name=None):
"""Convenience function to create predicates. See PredicateMeta.
>>> Even = predicate(lambda x: x % 2 == 0)
>>> isinstance(2, Even)
True
>>> isinstance(1, Even)
False
"""
name = name or function.__name__
return PredicateMeta(name, (), {'__predicate__': function})
def optional(type_):
"""Optional type predicate. An object can be None or the specified type.
>>> isinstance(1, optional(int))
True
>>> isinstance(None, optional(int))
True
"""
return predicate(lambda x: (x is None or isinstance(x, type_)), 'optional')
def typedef(function):
"""A type representing a given function signature.
It should be used as decorator:
>>> @typedef
... def callback(a: int) -> int:
... pass
...
>>> def handler(a: int) -> int:
... return a
...
>>> isinstance(handler, callback)
True
>>> isinstance(lambda x: x, callback)
False
"""
signature = inspect.signature(function)
return predicate(lambda x: _implements_signature(x, signature), 'typedef')
def options(*args):
"""A predicate type for a set of predefined values.
>>> Days = options('mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun')
>>> isinstance('mon', Days)
True
>>> isinstance('other', Days)
False
"""
return predicate(lambda x: x in args, 'options')
def only(type_):
"""A predicate requiring an exact type, not super classes.
>>> isinstance(True, only(bool))
True
>>> isinstance(1, only(bool))
False
"""
return predicate(lambda x: type(x) is type_, 'only')
def _check_type_constraint(value, constraint):
if isinstance(constraint, type):
return isinstance(value, constraint)
elif _multi_instanceof(value, constraint, list) or _multi_instanceof(value, constraint, set):
if len(constraint):
ret = True
for sub_val in value:
ret &= any(_check_type_constraint(sub_val, con) for con in constraint)
return ret
else:
return True
elif _multi_instanceof(value, constraint, tuple) and len(constraint) == len(value):
return all(_check_type_constraint(sub, con) for sub, con in zip(value, constraint))
elif _multi_instanceof(value, constraint, dict):
if len(constraint):
ret = True
for sub_key, sub_val in value.items():
ret &= any(
(_check_type_constraint(sub_key, key_constraint) and
_check_type_constraint(sub_val, value_constraint))
for key_constraint, value_constraint in constraint.items())
return ret
else:
return True
else:
return False
def _check_argument_types(signature, *args, **kwargs):
"""Check that the arguments of a function match the given signature."""
bound_arguments = signature.bind(*args, **kwargs)
parameters = signature.parameters
for name, value in bound_arguments.arguments.items():
annotation = parameters[name].annotation
if annotation is EMPTY_ANNOTATION:
annotation = AnyType
if not _check_type_constraint(value, annotation):
raise TypeError('Incorrect type for "{0}"'.format(name))
def _check_return_type(signature, return_value):
"""Check that the return value of a function matches the signature."""
annotation = signature.return_annotation
if annotation is EMPTY_ANNOTATION:
annotation = AnyType
if not _check_type_constraint(return_value, annotation):
raise TypeError('Incorrect return type')
return return_value
def typechecked(target):
"""A decorator to make a function check its types at runtime.
>>> @typechecked
... def test(a: int):
... return a
...
>>> test(1)
1
>>> test('string')
Traceback (most recent call last):
...
TypeError: Incorrect type for "a"
"""
signature = inspect.signature(target)
@functools.wraps(target)
def wrapper(*args, **kwargs):
_check_argument_types(signature, *args, **kwargs)
return _check_return_type(signature, target(*args, **kwargs))
return wrapper
if __name__ == '__main__':
import doctest
doctest.testmod()
| 31.966337 | 103 | 0.634702 |
__author__ = ('Manuel Cerón <ceronman@gmail.com>')
__all__ = ['AnyType', 'Interface', 'only', 'optional', 'options', 'predicate',
'typechecked', 'typedef', 'union']
import functools
import inspect
EMPTY_ANNOTATION = inspect.Signature.empty
class UnionMeta(type):
def __new__(mcls, name, bases, namespace):
cls = super().__new__(mcls, name, bases, namespace)
types = getattr(cls, '__types__', None)
if not isinstance(types, set):
raise TypeError('Union requires a __types__ set')
if any(not isinstance(t, type) for t in types):
raise TypeError('Union __types__ elements must be type')
return cls
def __instancecheck__(cls, instance):
return any(isinstance(instance, t) for t in cls.__types__)
def __subclasscheck__(cls, subclass):
if isinstance(subclass, UnionMeta):
return all(issubclass(t, cls) for t in subclass.__types__)
return any(issubclass(subclass, t) for t in cls.__types__)
def __repr__(cls):
return '<union {0}>'.format(repr(cls.__types__))
def union(*args):
return UnionMeta('union', (), {'__types__': set(args)})
class AnyTypeMeta(type):
def __new__(mcls, name, bases, namespace):
return super().__new__(mcls, name, bases, namespace)
def __instancecheck__(cls, instance):
return True
def __subclasscheck__(cls, subclass):
return True
class AnyType(metaclass=AnyTypeMeta):
pass
def _multi_instanceof(a, b, t):
return isinstance(a, t) and isinstance(b, t)
def _check_signature_constraint(instance, constraint):
if isinstance(constraint, type):
return issubclass(instance, constraint)
elif _multi_instanceof(instance, constraint, list) or _multi_instanceof(instance, constraint, set):
ret = True
for sub_type in instance:
ret &= any(_check_signature_constraint(sub_type, con) for con in constraint)
return ret
elif _multi_instanceof(instance, constraint, tuple) and len(constraint) == len(instance):
return all(_check_signature_constraint(sub, con) for sub, con in zip(instance, constraint))
elif _multi_instanceof(instance, constraint, dict):
ret = True
for sub_key, sub_val in instance.items():
ret &= any(
(_check_signature_constraint(sub_key, key_constraint) and
_check_signature_constraint(sub_val, value_constraint))
for key_constraint, value_constraint in constraint.items())
return ret
else:
return False
def _implements_signature(function, signature):
try:
instance_signature = inspect.signature(function)
except TypeError:
return False
except ValueError:
return True
cls_params = signature.parameters.values()
instance_params = instance_signature.parameters.values()
if len(cls_params) != len(instance_params):
return False
for cls_param, instance_param in zip(cls_params, instance_params):
if cls_param.name != instance_param.name:
return False
cls_annotation = cls_param.annotation
instance_annotation = instance_param.annotation
if cls_annotation is EMPTY_ANNOTATION:
cls_annotation = AnyType
if instance_annotation is EMPTY_ANNOTATION:
instance_annotation = AnyType
if not _check_signature_constraint(cls_annotation, instance_annotation):
return False
cls_annotation = signature.return_annotation
instance_annotation = instance_signature.return_annotation
if cls_annotation is EMPTY_ANNOTATION:
cls_annotation = AnyType
if instance_annotation is EMPTY_ANNOTATION:
instance_annotation = AnyType
if not _check_signature_constraint(instance_annotation, cls_annotation):
return False
return True
class InterfaceMeta(type):
def __new__(mcls, name, bases, namespace):
cls = super().__new__(mcls, name, bases, namespace)
cls.__signatures__ = {}
cls.__attributes__ = {}
for name, value in namespace.items():
if name in ('__qualname__', '__module__', '__doc__'):
continue
if inspect.isfunction(value):
mcls.add_method(cls, value)
continue
mcls.add_attribute(cls, name, value)
return cls
def __instancecheck__(cls, instance):
for name, type_ in cls.__attributes__.items():
try:
attribute = getattr(instance, name)
except AttributeError:
return False
if not isinstance(attribute, type_):
return False
for name, signature in cls.__signatures__.items():
function = getattr(instance, name, None)
if not _implements_signature(function, signature):
return False
return True
def __subclasscheck__(cls, subclass):
if cls is subclass:
return True
for name, signature in cls.__signatures__.items():
try:
function = inspect.getattr_static(subclass, name)
except AttributeError:
return False
if isinstance(function, (staticmethod, classmethod)):
return False
try:
subclass_signature = inspect.signature(function)
except TypeError:
return False
except ValueError:
return True
cls_params = list(signature.parameters.values())
subclass_params = list(subclass_signature.parameters.values())
subclass_params.pop(0)
if len(cls_params) != len(subclass_params):
return False
for cls_param, instance_param in zip(cls_params, subclass_params):
if cls_param.name != instance_param.name:
return False
cls_annotation = cls_param.annotation
instance_annotation = instance_param.annotation
if cls_annotation is EMPTY_ANNOTATION:
cls_annotation = AnyType
if instance_annotation is EMPTY_ANNOTATION:
instance_annotation = AnyType
if not issubclass(cls_annotation, instance_annotation):
return False
cls_annotation = signature.return_annotation
instance_annotation = subclass_signature.return_annotation
if cls_annotation is EMPTY_ANNOTATION:
cls_annotation = AnyType
if instance_annotation is EMPTY_ANNOTATION:
instance_annotation = AnyType
if not issubclass(instance_annotation, cls_annotation):
return False
return True
def add_method(cls, method):
try:
cls.__signatures__[method.__name__] = inspect.signature(method)
except (TypeError, AttributeError):
raise TypeError('Interface methods should have a signature')
return method
def add_attribute(cls, name, type_=AnyType):
if not isinstance(type_, type):
raise TypeError('Interface attributes should be type')
cls.__attributes__[name] = type_
class Interface(metaclass=InterfaceMeta):
pass
class PredicateMeta(type):
def __new__(mcls, name, bases, namespace):
return super().__new__(mcls, name, bases, namespace)
def __instancecheck__(cls, instance):
try:
return cls.__predicate__(instance)
except AttributeError:
return False
def __subclasscheck__(cls, subclass):
return False
def predicate(function, name=None):
name = name or function.__name__
return PredicateMeta(name, (), {'__predicate__': function})
def optional(type_):
return predicate(lambda x: (x is None or isinstance(x, type_)), 'optional')
def typedef(function):
signature = inspect.signature(function)
return predicate(lambda x: _implements_signature(x, signature), 'typedef')
def options(*args):
return predicate(lambda x: x in args, 'options')
def only(type_):
return predicate(lambda x: type(x) is type_, 'only')
def _check_type_constraint(value, constraint):
if isinstance(constraint, type):
return isinstance(value, constraint)
elif _multi_instanceof(value, constraint, list) or _multi_instanceof(value, constraint, set):
if len(constraint):
ret = True
for sub_val in value:
ret &= any(_check_type_constraint(sub_val, con) for con in constraint)
return ret
else:
return True
elif _multi_instanceof(value, constraint, tuple) and len(constraint) == len(value):
return all(_check_type_constraint(sub, con) for sub, con in zip(value, constraint))
elif _multi_instanceof(value, constraint, dict):
if len(constraint):
ret = True
for sub_key, sub_val in value.items():
ret &= any(
(_check_type_constraint(sub_key, key_constraint) and
_check_type_constraint(sub_val, value_constraint))
for key_constraint, value_constraint in constraint.items())
return ret
else:
return True
else:
return False
def _check_argument_types(signature, *args, **kwargs):
bound_arguments = signature.bind(*args, **kwargs)
parameters = signature.parameters
for name, value in bound_arguments.arguments.items():
annotation = parameters[name].annotation
if annotation is EMPTY_ANNOTATION:
annotation = AnyType
if not _check_type_constraint(value, annotation):
raise TypeError('Incorrect type for "{0}"'.format(name))
def _check_return_type(signature, return_value):
annotation = signature.return_annotation
if annotation is EMPTY_ANNOTATION:
annotation = AnyType
if not _check_type_constraint(return_value, annotation):
raise TypeError('Incorrect return type')
return return_value
def typechecked(target):
signature = inspect.signature(target)
@functools.wraps(target)
def wrapper(*args, **kwargs):
_check_argument_types(signature, *args, **kwargs)
return _check_return_type(signature, target(*args, **kwargs))
return wrapper
if __name__ == '__main__':
import doctest
doctest.testmod()
| true | true |
1c3bb400a73babd821204f687e96422e40b4cfe7 | 6,137 | py | Python | tests/test_api.py | FollowTheProcess/pytoil | b13acb14f015ae5399d7697bdc3e0e475dff03ec | [
"Apache-2.0"
] | 6 | 2021-05-08T20:31:03.000Z | 2022-03-08T01:25:43.000Z | tests/test_api.py | FollowTheProcess/pytoil | b13acb14f015ae5399d7697bdc3e0e475dff03ec | [
"Apache-2.0"
] | 116 | 2021-07-08T11:21:22.000Z | 2022-03-30T14:04:51.000Z | tests/test_api.py | FollowTheProcess/pytoil | b13acb14f015ae5399d7697bdc3e0e475dff03ec | [
"Apache-2.0"
] | null | null | null | """
Tests for the API module.
Author: Tom Fleet
Created: 19/06/2021
"""
import httpx
import pytest
from pytest_httpx import HTTPXMock
from pytoil.api import API
from pytoil.api.models import Repository, RepoSummaryInfo
def test_api_init():
api = API(username="me", token="sometoken")
assert api.username == "me"
assert api.token == "sometoken"
assert api.baseurl == "https://api.github.com/"
assert api.headers == {
"Accept": "application/vnd.github.v3+json",
"Authorization": f"token {api.token}",
}
def test_api_repr():
api = API(username="me", token="sometoken")
assert repr(api) == "API(username='me', token='sometoken')"
@pytest.mark.parametrize("bad_status_code", [400, 401, 403, 404, 500, 504, 505])
def test_get_raises_on_bad_status(httpx_mock: HTTPXMock, bad_status_code):
httpx_mock.add_response(
url="https://api.github.com/user/repos", status_code=bad_status_code
)
api = API(token="definitelynotatoken", username="me")
with pytest.raises(httpx.HTTPStatusError):
api.get("user/repos")
def test_get_returns_correct_response(httpx_mock: HTTPXMock, fake_repos_response):
httpx_mock.add_response(
url="https://api.github.com/user/repos",
json=fake_repos_response,
status_code=200,
)
api = API(token="definitelynotatoken", username="me")
r = api.get("user/repos")
assert r == fake_repos_response
@pytest.mark.parametrize("bad_status_code", [400, 401, 403, 404, 500, 504, 505])
def test_post_raises_on_bad_status(httpx_mock: HTTPXMock, bad_status_code):
httpx_mock.add_response(
url="https://api.github.com/user/repos", status_code=bad_status_code
)
api = API(token="definitelynotatoken", username="me")
with pytest.raises(httpx.HTTPStatusError):
api.post("user/repos")
def test_post_returns_correct_response(httpx_mock: HTTPXMock, fake_repos_response):
httpx_mock.add_response(
url="https://api.github.com/user/repos",
json=fake_repos_response,
status_code=200,
)
api = API(token="definitelynotatoken", username="me")
r = api.post("user/repos")
assert r == fake_repos_response
def test_get_repo_returns_correct_response(httpx_mock: HTTPXMock, fake_repo_response):
httpx_mock.add_response(
url="https://api.github.com/repos/me/pytoil",
json=fake_repo_response,
status_code=200,
)
api = API(token="definitelynotatoken", username="me")
assert api.get_repo(repo="pytoil") == Repository(**fake_repo_response)
def test_get_repos_returns_correct_response(httpx_mock: HTTPXMock, fake_repos_response):
httpx_mock.add_response(
url="https://api.github.com/user/repos?type=owner",
json=fake_repos_response,
status_code=200,
)
api = API(token="definitelynotatoken", username="me")
assert api.get_repos() == [Repository(**repo) for repo in fake_repos_response]
def test_get_repo_names_returns_correct_names(
httpx_mock: HTTPXMock, fake_repos_response
):
httpx_mock.add_response(
url="https://api.github.com/user/repos?type=owner",
json=fake_repos_response,
status_code=200,
)
api = API(token="definitelynotatoken", username="me")
# These are the names in the fake_repos_response fixture
wanted_names = {
"aircraft_crashes",
"cookie_pypackage",
"eu_energy_analysis",
"FollowTheProcess",
"followtheprocess.github.io",
"goignore",
"gotoil",
"go_cookie",
"lightweight_ds_cookie",
"msc_project",
"nox",
"poetry_pypackage",
"pymechtest",
"pytoil",
"testygo",
}
assert api.get_repo_names() == wanted_names
def test_get_repo_info_returns_correct_info(httpx_mock: HTTPXMock, fake_repo_response):
httpx_mock.add_response(
url="https://api.github.com/repos/me/pytoil",
json=fake_repo_response,
status_code=200,
)
api = API(token="definitelynotatoken", username="me")
# What we want out
# This comes from an actual gh response for pytoil
want = {
"name": "pytoil",
"description": "CLI to automate the development workflow :robot:",
"created_at": "2021-02-04T15:05:23Z",
"updated_at": "2021-09-01T15:29:56Z",
"size": 4443,
"license": "Apache License 2.0",
}
assert api.get_repo_info("pytoil") == RepoSummaryInfo(**want)
def test_get_repo_info_correctly_handles_missing_license(
httpx_mock: HTTPXMock, fake_repo_response_no_license
):
httpx_mock.add_response(
url="https://api.github.com/repos/me/repo",
json=fake_repo_response_no_license,
status_code=200,
)
api = API(token="definitelynotatoken", username="me")
assert api.get_repo_info(repo="repo").license is None
def test_create_fork(httpx_mock: HTTPXMock, fake_repo_response):
httpx_mock.add_response(
url="https://api.github.com/repos/someone/project/forks",
json=fake_repo_response,
status_code=202,
)
api = API(token="definitelynotatoken", username="me")
assert api.create_fork(owner="someone", repo="project") == Repository(
**fake_repo_response
)
def test_get_forks(httpx_mock: HTTPXMock, fake_repos_response):
httpx_mock.add_response(
url="https://api.github.com/user/repos?type=owner",
json=fake_repos_response,
status_code=200,
)
api = API(username="me", token="definitelynotatoken")
# Should only return the ones where fork is True
assert api.get_forks() == [
Repository(**repo) for repo in fake_repos_response if repo["fork"]
]
def test_get_fork_names(httpx_mock: HTTPXMock, fake_repos_response):
httpx_mock.add_response(
url="https://api.github.com/user/repos?type=owner",
json=fake_repos_response,
status_code=200,
)
api = API(username="me", token="definitelynotatoken")
# Should only return the ones where fork is True
assert api.get_fork_names() == ["nox"]
| 26.226496 | 88 | 0.675411 |
import httpx
import pytest
from pytest_httpx import HTTPXMock
from pytoil.api import API
from pytoil.api.models import Repository, RepoSummaryInfo
def test_api_init():
api = API(username="me", token="sometoken")
assert api.username == "me"
assert api.token == "sometoken"
assert api.baseurl == "https://api.github.com/"
assert api.headers == {
"Accept": "application/vnd.github.v3+json",
"Authorization": f"token {api.token}",
}
def test_api_repr():
api = API(username="me", token="sometoken")
assert repr(api) == "API(username='me', token='sometoken')"
@pytest.mark.parametrize("bad_status_code", [400, 401, 403, 404, 500, 504, 505])
def test_get_raises_on_bad_status(httpx_mock: HTTPXMock, bad_status_code):
httpx_mock.add_response(
url="https://api.github.com/user/repos", status_code=bad_status_code
)
api = API(token="definitelynotatoken", username="me")
with pytest.raises(httpx.HTTPStatusError):
api.get("user/repos")
def test_get_returns_correct_response(httpx_mock: HTTPXMock, fake_repos_response):
httpx_mock.add_response(
url="https://api.github.com/user/repos",
json=fake_repos_response,
status_code=200,
)
api = API(token="definitelynotatoken", username="me")
r = api.get("user/repos")
assert r == fake_repos_response
@pytest.mark.parametrize("bad_status_code", [400, 401, 403, 404, 500, 504, 505])
def test_post_raises_on_bad_status(httpx_mock: HTTPXMock, bad_status_code):
httpx_mock.add_response(
url="https://api.github.com/user/repos", status_code=bad_status_code
)
api = API(token="definitelynotatoken", username="me")
with pytest.raises(httpx.HTTPStatusError):
api.post("user/repos")
def test_post_returns_correct_response(httpx_mock: HTTPXMock, fake_repos_response):
httpx_mock.add_response(
url="https://api.github.com/user/repos",
json=fake_repos_response,
status_code=200,
)
api = API(token="definitelynotatoken", username="me")
r = api.post("user/repos")
assert r == fake_repos_response
def test_get_repo_returns_correct_response(httpx_mock: HTTPXMock, fake_repo_response):
httpx_mock.add_response(
url="https://api.github.com/repos/me/pytoil",
json=fake_repo_response,
status_code=200,
)
api = API(token="definitelynotatoken", username="me")
assert api.get_repo(repo="pytoil") == Repository(**fake_repo_response)
def test_get_repos_returns_correct_response(httpx_mock: HTTPXMock, fake_repos_response):
httpx_mock.add_response(
url="https://api.github.com/user/repos?type=owner",
json=fake_repos_response,
status_code=200,
)
api = API(token="definitelynotatoken", username="me")
assert api.get_repos() == [Repository(**repo) for repo in fake_repos_response]
def test_get_repo_names_returns_correct_names(
httpx_mock: HTTPXMock, fake_repos_response
):
httpx_mock.add_response(
url="https://api.github.com/user/repos?type=owner",
json=fake_repos_response,
status_code=200,
)
api = API(token="definitelynotatoken", username="me")
wanted_names = {
"aircraft_crashes",
"cookie_pypackage",
"eu_energy_analysis",
"FollowTheProcess",
"followtheprocess.github.io",
"goignore",
"gotoil",
"go_cookie",
"lightweight_ds_cookie",
"msc_project",
"nox",
"poetry_pypackage",
"pymechtest",
"pytoil",
"testygo",
}
assert api.get_repo_names() == wanted_names
def test_get_repo_info_returns_correct_info(httpx_mock: HTTPXMock, fake_repo_response):
httpx_mock.add_response(
url="https://api.github.com/repos/me/pytoil",
json=fake_repo_response,
status_code=200,
)
api = API(token="definitelynotatoken", username="me")
want = {
"name": "pytoil",
"description": "CLI to automate the development workflow :robot:",
"created_at": "2021-02-04T15:05:23Z",
"updated_at": "2021-09-01T15:29:56Z",
"size": 4443,
"license": "Apache License 2.0",
}
assert api.get_repo_info("pytoil") == RepoSummaryInfo(**want)
def test_get_repo_info_correctly_handles_missing_license(
httpx_mock: HTTPXMock, fake_repo_response_no_license
):
httpx_mock.add_response(
url="https://api.github.com/repos/me/repo",
json=fake_repo_response_no_license,
status_code=200,
)
api = API(token="definitelynotatoken", username="me")
assert api.get_repo_info(repo="repo").license is None
def test_create_fork(httpx_mock: HTTPXMock, fake_repo_response):
httpx_mock.add_response(
url="https://api.github.com/repos/someone/project/forks",
json=fake_repo_response,
status_code=202,
)
api = API(token="definitelynotatoken", username="me")
assert api.create_fork(owner="someone", repo="project") == Repository(
**fake_repo_response
)
def test_get_forks(httpx_mock: HTTPXMock, fake_repos_response):
httpx_mock.add_response(
url="https://api.github.com/user/repos?type=owner",
json=fake_repos_response,
status_code=200,
)
api = API(username="me", token="definitelynotatoken")
assert api.get_forks() == [
Repository(**repo) for repo in fake_repos_response if repo["fork"]
]
def test_get_fork_names(httpx_mock: HTTPXMock, fake_repos_response):
httpx_mock.add_response(
url="https://api.github.com/user/repos?type=owner",
json=fake_repos_response,
status_code=200,
)
api = API(username="me", token="definitelynotatoken")
assert api.get_fork_names() == ["nox"]
| true | true |
1c3bb4a8700b7b6ceeb2282618fdb0a9777458d7 | 7,392 | py | Python | util/esum_cxx.py | yut23/Microphysics | 3c4985213c5e5b1ad2602b0bba2ce164b847361a | [
"BSD-3-Clause"
] | 16 | 2017-08-17T11:12:01.000Z | 2021-06-10T23:11:08.000Z | util/esum_cxx.py | Youhichka/Microphysics | 6f28333d40c9e15fdfbb1c4dc208e887fb5549c3 | [
"BSD-3-Clause"
] | 533 | 2017-06-08T13:52:11.000Z | 2022-01-28T16:13:29.000Z | util/esum_cxx.py | Youhichka/Microphysics | 6f28333d40c9e15fdfbb1c4dc208e887fb5549c3 | [
"BSD-3-Clause"
] | 34 | 2017-08-16T16:29:20.000Z | 2021-09-09T16:19:15.000Z | import os
import re
import argparse
module_start = """
// NOTE: THIS FILE IS AUTOMATICALLY GENERATED
// DO NOT EDIT BY HAND
// Re-run esum_cxx.py to update this file
// Fortran 2003 implementation of the msum routine
// provided by Raymond Hettinger:
// https://code.activestate.com/recipes/393090/
// This routine calculates the sum of N numbers
// exactly to within double precision arithmetic.
// Ported to C++.
// For perfomance reasons we implement a specialized
// version of esum for each possible value of N >= 3.
// Also for performance reasons, we explicitly unroll
// the outer loop of the msum method into groups of 3
// (and a group of 4 at the end, for even N). This seems
// to be significantly faster, but should still be exact
// to within the arithmetic because each one of the
// individual msums is (although this does not necessarily
// mean that the result is the same).
// This routine is called "esum" for generality
// because in principle we could add implementations
// other than msum that do exact arithmetic, without
// changing the interface as seen in the networks.
#ifndef _esum_H_
#define _esum_H_
#include <AMReX_REAL.H>
#include <AMReX_Array.H>
#include <ArrayUtilities.H>
using namespace amrex;
"""
module_end = """
#endif
"""
esum_base_template_start = """
template<int n, class T>
AMREX_GPU_HOST_DEVICE AMREX_INLINE
Real esum(T const& array)
{
// return value
Real sum = 0.0_rt;
switch (n) {
case 1:
sum = array(1);
break;
case 2:
sum = array(1) + array(2);
break;
"""
esum_base_template_end = """
default:
sum = 0.0_rt;
break;
}
return sum;
}
"""
esum_template_start = """
template<class T>
AMREX_GPU_HOST_DEVICE AMREX_INLINE
Real esum@NUM@(T const& array)
{
// return value
Real esum;
"""
esum_template_end = """
return esum;
}
"""
sum_template = """
esum = ArrayUtil::Math::sum(array, 1, @NUM@);
"""
kahan_template = """
esum = array(1);
Real x = 0._rt;
for (int i = 2; i <= @NUM@; ++i) {
Real y = array(i) - x;
Real z = esum + y;
x = (z - esum) - y;
esum = z;
}
"""
msum_template_start = """
// Indices for tracking the partials array.
// j keeps track of how many entries in partials are actually used.
// The algorithm we model this off of, written in Python, simply
// deletes array entries at the end of every outer loop iteration.
// The Fortran equivalent to this might be to just zero them out,
// but this results in a huge performance hit given how often
// this routine is called during in a burn. So we opt instead to
// just track how many of the values are meaningful, which j does
// automatically, and ignore any data in the remaining slots.
int i, j, k, km;
// Note that for performance reasons we are not
// initializing any unused values in this array.
Array1D<Real, 0, @NUMPARTIALS@> partials;
// Some temporary variables for holding intermediate data.
Real x, y, z;
// These temporary variables need to be explicitly
// constructed for the algorithm to make sense.
// If the compiler optimizes away the statement
// lo = y - (hi - x), the approach fails. This could
// be avoided with the volatile keyword, but at the
// expense of forcing additional memory usage
// which would slow down the calculation. Instead
// we will rely on the compiler not to optimize
// the statement away. This should be true for gcc
// by default but is not necessarily true for all
// compilers. In particular, Intel does not do this
// by default, so you must use the -assume-protect-parens
// flag for ifort.
Real hi, lo;
// The first partial is just the first term.
esum = array(1);
"""
msum_template = """
j = 0;
partials(0) = esum;
for (i = 2; i <= @NUM@; ++i) {
km = j;
j = 0;
x = array(i+@START@);
for (k = 0; k <= km; ++k) {
y = partials(k);
if (std::abs(x) < std::abs(y)) {
// Swap x, y
z = y;
y = x;
x = z;
}
hi = x + y;
lo = y - (hi - x);
if (lo != 0.0_rt) {
partials(j) = lo;
j++;
}
x = hi;
}
partials(j) = x;
}
esum = ArrayUtil::Math::sum(partials, 0, j);
"""
if __name__ == "__main__":
sum_method = 0
unroll = True
parser = argparse.ArgumentParser()
parser.add_argument('-s', help='summation method: -1 == sum(); 0 == msum; 1 == Kahan')
parser.add_argument('--unroll', help='For msum, should we explicitly unroll the loop?')
args = parser.parse_args()
if args.s != None:
sum_method = int(args.s)
if args.unroll != None:
if args.unroll == "True":
unroll = True
elif args.unroll == "False":
unroll = False
else:
raise ValueError("--unroll can only be True or False.")
with open("esum.H", "w") as ef:
ef.write(module_start)
first_esum = 3;
last_esum = 30;
for num in range(first_esum, last_esum + 1):
ef.write(esum_template_start.replace("@NUM@", str(num)))
if sum_method == -1:
# ArrayUtil::Math::sum is just a sequential loop
ef.write(sum_template.replace("@NUM@", str(num)))
elif sum_method == 0:
# msum
if unroll:
ef.write(msum_template_start.replace("@NUM@", str(num)).replace("@NUMPARTIALS@", str(4)))
i = 1
while (i < num):
if (i == num - 3):
if (i > 0):
offset = i-1
else:
offset = 0
ef.write(msum_template.replace("@START@", str(offset)).replace("@NUM@", str(4)))
break
else:
if (i > 0):
offset = i-1
else:
offset = 0
ef.write(msum_template.replace("@START@", str(offset)).replace("@NUM@", str(3)))
i += 2
else:
ef.write(msum_template_start.replace("@NUM@", str(num)).replace("@NUMPARTIALS@", str(num-1)))
ef.write(msum_template.replace("@START@", str(0)).replace("@NUM@", str(num)))
elif sum_method == 1:
# Kahan
ef.write(kahan_template.replace("@NUM@", str(num)))
else:
raise ValueError("Unknown summation method.")
ef.write(esum_template_end.replace("@NUM@", str(num)))
ef.write("\n")
# Now write out the base template that can call all the others
ef.write(esum_base_template_start)
for num in range(first_esum, last_esum + 1):
ef.write(" case {}:\n".format(num))
ef.write(" sum = esum{}(array);\n".format(num))
ef.write(" break;\n")
ef.write(esum_base_template_end)
ef.write("\n")
ef.write(module_end)
| 24.805369 | 113 | 0.552489 | import os
import re
import argparse
module_start = """
// NOTE: THIS FILE IS AUTOMATICALLY GENERATED
// DO NOT EDIT BY HAND
// Re-run esum_cxx.py to update this file
// Fortran 2003 implementation of the msum routine
// provided by Raymond Hettinger:
// https://code.activestate.com/recipes/393090/
// This routine calculates the sum of N numbers
// exactly to within double precision arithmetic.
// Ported to C++.
// For perfomance reasons we implement a specialized
// version of esum for each possible value of N >= 3.
// Also for performance reasons, we explicitly unroll
// the outer loop of the msum method into groups of 3
// (and a group of 4 at the end, for even N). This seems
// to be significantly faster, but should still be exact
// to within the arithmetic because each one of the
// individual msums is (although this does not necessarily
// mean that the result is the same).
// This routine is called "esum" for generality
// because in principle we could add implementations
// other than msum that do exact arithmetic, without
// changing the interface as seen in the networks.
#ifndef _esum_H_
#define _esum_H_
#include <AMReX_REAL.H>
#include <AMReX_Array.H>
#include <ArrayUtilities.H>
using namespace amrex;
"""
module_end = """
#endif
"""
esum_base_template_start = """
template<int n, class T>
AMREX_GPU_HOST_DEVICE AMREX_INLINE
Real esum(T const& array)
{
// return value
Real sum = 0.0_rt;
switch (n) {
case 1:
sum = array(1);
break;
case 2:
sum = array(1) + array(2);
break;
"""
esum_base_template_end = """
default:
sum = 0.0_rt;
break;
}
return sum;
}
"""
esum_template_start = """
template<class T>
AMREX_GPU_HOST_DEVICE AMREX_INLINE
Real esum@NUM@(T const& array)
{
// return value
Real esum;
"""
esum_template_end = """
return esum;
}
"""
sum_template = """
esum = ArrayUtil::Math::sum(array, 1, @NUM@);
"""
kahan_template = """
esum = array(1);
Real x = 0._rt;
for (int i = 2; i <= @NUM@; ++i) {
Real y = array(i) - x;
Real z = esum + y;
x = (z - esum) - y;
esum = z;
}
"""
msum_template_start = """
// Indices for tracking the partials array.
// j keeps track of how many entries in partials are actually used.
// The algorithm we model this off of, written in Python, simply
// deletes array entries at the end of every outer loop iteration.
// The Fortran equivalent to this might be to just zero them out,
// but this results in a huge performance hit given how often
// this routine is called during in a burn. So we opt instead to
// just track how many of the values are meaningful, which j does
// automatically, and ignore any data in the remaining slots.
int i, j, k, km;
// Note that for performance reasons we are not
// initializing any unused values in this array.
Array1D<Real, 0, @NUMPARTIALS@> partials;
// Some temporary variables for holding intermediate data.
Real x, y, z;
// These temporary variables need to be explicitly
// constructed for the algorithm to make sense.
// If the compiler optimizes away the statement
// lo = y - (hi - x), the approach fails. This could
// be avoided with the volatile keyword, but at the
// expense of forcing additional memory usage
// which would slow down the calculation. Instead
// we will rely on the compiler not to optimize
// the statement away. This should be true for gcc
// by default but is not necessarily true for all
// compilers. In particular, Intel does not do this
// by default, so you must use the -assume-protect-parens
// flag for ifort.
Real hi, lo;
// The first partial is just the first term.
esum = array(1);
"""
msum_template = """
j = 0;
partials(0) = esum;
for (i = 2; i <= @NUM@; ++i) {
km = j;
j = 0;
x = array(i+@START@);
for (k = 0; k <= km; ++k) {
y = partials(k);
if (std::abs(x) < std::abs(y)) {
// Swap x, y
z = y;
y = x;
x = z;
}
hi = x + y;
lo = y - (hi - x);
if (lo != 0.0_rt) {
partials(j) = lo;
j++;
}
x = hi;
}
partials(j) = x;
}
esum = ArrayUtil::Math::sum(partials, 0, j);
"""
if __name__ == "__main__":
sum_method = 0
unroll = True
parser = argparse.ArgumentParser()
parser.add_argument('-s', help='summation method: -1 == sum(); 0 == msum; 1 == Kahan')
parser.add_argument('--unroll', help='For msum, should we explicitly unroll the loop?')
args = parser.parse_args()
if args.s != None:
sum_method = int(args.s)
if args.unroll != None:
if args.unroll == "True":
unroll = True
elif args.unroll == "False":
unroll = False
else:
raise ValueError("--unroll can only be True or False.")
with open("esum.H", "w") as ef:
ef.write(module_start)
first_esum = 3;
last_esum = 30;
for num in range(first_esum, last_esum + 1):
ef.write(esum_template_start.replace("@NUM@", str(num)))
if sum_method == -1:
ef.write(sum_template.replace("@NUM@", str(num)))
elif sum_method == 0:
if unroll:
ef.write(msum_template_start.replace("@NUM@", str(num)).replace("@NUMPARTIALS@", str(4)))
i = 1
while (i < num):
if (i == num - 3):
if (i > 0):
offset = i-1
else:
offset = 0
ef.write(msum_template.replace("@START@", str(offset)).replace("@NUM@", str(4)))
break
else:
if (i > 0):
offset = i-1
else:
offset = 0
ef.write(msum_template.replace("@START@", str(offset)).replace("@NUM@", str(3)))
i += 2
else:
ef.write(msum_template_start.replace("@NUM@", str(num)).replace("@NUMPARTIALS@", str(num-1)))
ef.write(msum_template.replace("@START@", str(0)).replace("@NUM@", str(num)))
elif sum_method == 1:
ef.write(kahan_template.replace("@NUM@", str(num)))
else:
raise ValueError("Unknown summation method.")
ef.write(esum_template_end.replace("@NUM@", str(num)))
ef.write("\n")
ef.write(esum_base_template_start)
for num in range(first_esum, last_esum + 1):
ef.write(" case {}:\n".format(num))
ef.write(" sum = esum{}(array);\n".format(num))
ef.write(" break;\n")
ef.write(esum_base_template_end)
ef.write("\n")
ef.write(module_end)
| true | true |
1c3bb4c51056c6275e8d40ff8c2a85830d4c3984 | 8,686 | py | Python | tools/xctoolrunner/xctoolrunner.py | michaeleisel/rules_apple | 424c73847eba4d2a093fa59df1aa22b5629b0fda | [
"Apache-2.0"
] | null | null | null | tools/xctoolrunner/xctoolrunner.py | michaeleisel/rules_apple | 424c73847eba4d2a093fa59df1aa22b5629b0fda | [
"Apache-2.0"
] | null | null | null | tools/xctoolrunner/xctoolrunner.py | michaeleisel/rules_apple | 424c73847eba4d2a093fa59df1aa22b5629b0fda | [
"Apache-2.0"
] | null | null | null | # Lint as: python2, python3
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper for "xcrun" tools.
This script only runs on Darwin and you must have Xcode installed.
Usage:
xctoolrunner [SUBCOMMAND] [<args>...]
Subcommands:
actool [<args>...]
coremlc [<args>...]
ibtool [<args>...]
mapc [<args>...]
momc [<args>...]
"""
import argparse
import os
import re
import sys
from build_bazel_rules_apple.tools.wrapper_common import execute
# This prefix is set for rules_apple rules in:
# apple/internal/utils/xctoolrunner.bzl
_PATH_PREFIX = "[ABSOLUTE]"
_PATH_PREFIX_LEN = len(_PATH_PREFIX)
def _apply_realpath(argv):
"""Run "realpath" on any path-related arguments.
Paths passed into the tool will be prefixed with the contents of _PATH_PREFIX.
If we find an argument with this prefix, we strip out the prefix and run
"realpath".
Args:
argv: A list of command line arguments.
"""
for i, arg in enumerate(argv):
if arg.startswith(_PATH_PREFIX):
arg = arg[_PATH_PREFIX_LEN:]
argv[i] = os.path.realpath(arg)
def ibtool_filtering(tool_exit_status, raw_stdout, raw_stderr):
"""Filter messages from ibtool.
Args:
tool_exit_status: The exit status of "xcrun ibtool".
raw_stdout: This is the unmodified stdout captured from "xcrun ibtool".
raw_stderr: This is the unmodified stderr captured from "xcrun ibtool".
Returns:
A tuple of the filtered stdout and strerr.
"""
spurious_patterns = [
re.compile(x)
for x in [r"WARNING: Unhandled destination metrics: \(null\)"]
]
def is_spurious_message(line):
for pattern in spurious_patterns:
match = pattern.search(line)
if match is not None:
return True
return False
stdout = []
for line in raw_stdout.splitlines():
if not is_spurious_message(line):
stdout.append(line + "\n")
# Some of the time, in a successful run, ibtool reports on stderr some
# internal assertions and ask "Please file a bug report with Apple", but
# it isn't clear that there is really a problem. Since everything else
# (warnings about assets, etc.) is reported on stdout, just drop stderr
# on successful runs.
if tool_exit_status == 0:
raw_stderr = None
return ("".join(stdout), raw_stderr)
def ibtool(_, toolargs):
"""Assemble the call to "xcrun ibtool"."""
xcrunargs = ["xcrun",
"ibtool",
"--errors",
"--warnings",
"--notices",
"--auto-activate-custom-fonts",
"--output-format",
"human-readable-text"]
_apply_realpath(toolargs)
xcrunargs += toolargs
# If we are running into problems figuring out "ibtool" issues, there are a
# couple of environment variables that may help. Both of the following must be
# set to work.
# IBToolDebugLogFile=<OUTPUT FILE PATH>
# IBToolDebugLogLevel=4
# You may also see if
# IBToolNeverDeque=1
# helps.
return_code, _, _ = execute.execute_and_filter_output(
xcrunargs,
trim_paths=True,
filtering=ibtool_filtering,
print_output=True)
return return_code
def actool_filtering(tool_exit_status, raw_stdout, raw_stderr):
"""Filter the stdout messages from "actool".
Args:
tool_exit_status: The exit status of "xcrun actool".
raw_stdout: This is the unmodified stdout captured from "xcrun actool".
raw_stderr: This is the unmodified stderr captured from "xcrun actool".
Returns:
A tuple of the filtered stdout and strerr.
"""
section_header = re.compile("^/\\* ([^ ]+) \\*/$")
excluded_sections = ["com.apple.actool.compilation-results"]
spurious_patterns = [
re.compile(x) for x in [
r"\[\]\[ipad\]\[76x76\]\[\]\[\]\[1x\]\[\]\[\]: notice: \(null\)",
r"\[\]\[ipad\]\[76x76\]\[\]\[\]\[1x\]\[\]\[\]: notice: 76x76@1x app "
r"icons only apply to iPad apps targeting releases of iOS prior to "
r"10\.0\.",
]
]
def is_spurious_message(line):
for pattern in spurious_patterns:
match = pattern.search(line)
if match is not None:
return True
return False
output = []
current_section = None
data_in_section = False
for line in raw_stdout.splitlines():
header_match = section_header.search(line)
if header_match:
data_in_section = False
current_section = header_match.group(1)
continue
if not current_section:
output.append(line + "\n")
elif current_section not in excluded_sections:
if is_spurious_message(line):
continue
if not data_in_section:
data_in_section = True
output.append("/* %s */\n" % current_section)
output.append(line + "\n")
# Some of the time, in a successful run, actool reports on stderr some
# internal assertions and ask "Please file a bug report with Apple", but
# it isn't clear that there is really a problem. Since everything else
# (warnings about assets, etc.) is reported on stdout, just drop stderr
# on successful runs.
if tool_exit_status == 0:
raw_stderr = None
return ("".join(output), raw_stderr)
def actool(_, toolargs):
"""Assemble the call to "xcrun actool"."""
xcrunargs = ["xcrun",
"actool",
"--errors",
"--warnings",
"--notices",
"--output-format",
"human-readable-text"]
_apply_realpath(toolargs)
xcrunargs += toolargs
# The argument coming after "--compile" is the output directory. "actool"
# expects an directory to exist at that path. Create an empty directory there
# if one doesn't exist yet.
for idx, arg in enumerate(toolargs):
if arg == "--compile":
output_dir = toolargs[idx + 1]
if not os.path.exists(output_dir):
os.makedirs(output_dir)
break
# If we are running into problems figuring out "actool" issues, there are a
# couple of environment variables that may help. Both of the following must be
# set to work.
# IBToolDebugLogFile=<OUTPUT FILE PATH>
# IBToolDebugLogLevel=4
# You may also see if
# IBToolNeverDeque=1
# helps.
# Yes, IBTOOL appears to be correct here due to "actool" and "ibtool" being
# based on the same codebase.
return_code, _, _ = execute.execute_and_filter_output(
xcrunargs,
trim_paths=True,
filtering=actool_filtering,
print_output=True)
return return_code
def coremlc(_, toolargs):
"""Assemble the call to "xcrun coremlc"."""
xcrunargs = ["xcrun", "coremlc"]
_apply_realpath(toolargs)
xcrunargs += toolargs
return_code, _, _ = execute.execute_and_filter_output(
xcrunargs,
print_output=True)
return return_code
def momc(_, toolargs):
"""Assemble the call to "xcrun momc"."""
xcrunargs = ["xcrun", "momc"]
_apply_realpath(toolargs)
xcrunargs += toolargs
return_code, _, _ = execute.execute_and_filter_output(
xcrunargs,
print_output=True)
return return_code
def mapc(_, toolargs):
"""Assemble the call to "xcrun mapc"."""
xcrunargs = ["xcrun", "mapc"]
_apply_realpath(toolargs)
xcrunargs += toolargs
return_code, _, _ = execute.execute_and_filter_output(
xcrunargs,
print_output=True)
return return_code
def main(argv):
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
# IBTOOL Argument Parser
ibtool_parser = subparsers.add_parser("ibtool")
ibtool_parser.set_defaults(func=ibtool)
# ACTOOL Argument Parser
actool_parser = subparsers.add_parser("actool")
actool_parser.set_defaults(func=actool)
# COREMLC Argument Parser
mapc_parser = subparsers.add_parser("coremlc")
mapc_parser.set_defaults(func=coremlc)
# MOMC Argument Parser
momc_parser = subparsers.add_parser("momc")
momc_parser.set_defaults(func=momc)
# MAPC Argument Parser
mapc_parser = subparsers.add_parser("mapc")
mapc_parser.set_defaults(func=mapc)
# Parse the command line and execute subcommand
args, toolargs = parser.parse_known_args(argv)
sys.exit(args.func(args, toolargs))
if __name__ == "__main__":
main(sys.argv[1:])
| 28.019355 | 80 | 0.677872 |
import argparse
import os
import re
import sys
from build_bazel_rules_apple.tools.wrapper_common import execute
_PATH_PREFIX = "[ABSOLUTE]"
_PATH_PREFIX_LEN = len(_PATH_PREFIX)
def _apply_realpath(argv):
for i, arg in enumerate(argv):
if arg.startswith(_PATH_PREFIX):
arg = arg[_PATH_PREFIX_LEN:]
argv[i] = os.path.realpath(arg)
def ibtool_filtering(tool_exit_status, raw_stdout, raw_stderr):
spurious_patterns = [
re.compile(x)
for x in [r"WARNING: Unhandled destination metrics: \(null\)"]
]
def is_spurious_message(line):
for pattern in spurious_patterns:
match = pattern.search(line)
if match is not None:
return True
return False
stdout = []
for line in raw_stdout.splitlines():
if not is_spurious_message(line):
stdout.append(line + "\n")
# (warnings about assets, etc.) is reported on stdout, just drop stderr
# on successful runs.
if tool_exit_status == 0:
raw_stderr = None
return ("".join(stdout), raw_stderr)
def ibtool(_, toolargs):
xcrunargs = ["xcrun",
"ibtool",
"--errors",
"--warnings",
"--notices",
"--auto-activate-custom-fonts",
"--output-format",
"human-readable-text"]
_apply_realpath(toolargs)
xcrunargs += toolargs
# If we are running into problems figuring out "ibtool" issues, there are a
# couple of environment variables that may help. Both of the following must be
# set to work.
# IBToolDebugLogFile=<OUTPUT FILE PATH>
# IBToolDebugLogLevel=4
# You may also see if
# IBToolNeverDeque=1
# helps.
return_code, _, _ = execute.execute_and_filter_output(
xcrunargs,
trim_paths=True,
filtering=ibtool_filtering,
print_output=True)
return return_code
def actool_filtering(tool_exit_status, raw_stdout, raw_stderr):
section_header = re.compile("^/\\* ([^ ]+) \\*/$")
excluded_sections = ["com.apple.actool.compilation-results"]
spurious_patterns = [
re.compile(x) for x in [
r"\[\]\[ipad\]\[76x76\]\[\]\[\]\[1x\]\[\]\[\]: notice: \(null\)",
r"\[\]\[ipad\]\[76x76\]\[\]\[\]\[1x\]\[\]\[\]: notice: 76x76@1x app "
r"icons only apply to iPad apps targeting releases of iOS prior to "
r"10\.0\.",
]
]
def is_spurious_message(line):
for pattern in spurious_patterns:
match = pattern.search(line)
if match is not None:
return True
return False
output = []
current_section = None
data_in_section = False
for line in raw_stdout.splitlines():
header_match = section_header.search(line)
if header_match:
data_in_section = False
current_section = header_match.group(1)
continue
if not current_section:
output.append(line + "\n")
elif current_section not in excluded_sections:
if is_spurious_message(line):
continue
if not data_in_section:
data_in_section = True
output.append("/* %s */\n" % current_section)
output.append(line + "\n")
# Some of the time, in a successful run, actool reports on stderr some
# internal assertions and ask "Please file a bug report with Apple", but
# it isn't clear that there is really a problem. Since everything else
if tool_exit_status == 0:
raw_stderr = None
return ("".join(output), raw_stderr)
def actool(_, toolargs):
xcrunargs = ["xcrun",
"actool",
"--errors",
"--warnings",
"--notices",
"--output-format",
"human-readable-text"]
_apply_realpath(toolargs)
xcrunargs += toolargs
for idx, arg in enumerate(toolargs):
if arg == "--compile":
output_dir = toolargs[idx + 1]
if not os.path.exists(output_dir):
os.makedirs(output_dir)
break
# If we are running into problems figuring out "actool" issues, there are a
# couple of environment variables that may help. Both of the following must be
# set to work.
# IBToolDebugLogFile=<OUTPUT FILE PATH>
# IBToolDebugLogLevel=4
# You may also see if
# IBToolNeverDeque=1
# helps.
# Yes, IBTOOL appears to be correct here due to "actool" and "ibtool" being
# based on the same codebase.
return_code, _, _ = execute.execute_and_filter_output(
xcrunargs,
trim_paths=True,
filtering=actool_filtering,
print_output=True)
return return_code
def coremlc(_, toolargs):
xcrunargs = ["xcrun", "coremlc"]
_apply_realpath(toolargs)
xcrunargs += toolargs
return_code, _, _ = execute.execute_and_filter_output(
xcrunargs,
print_output=True)
return return_code
def momc(_, toolargs):
xcrunargs = ["xcrun", "momc"]
_apply_realpath(toolargs)
xcrunargs += toolargs
return_code, _, _ = execute.execute_and_filter_output(
xcrunargs,
print_output=True)
return return_code
def mapc(_, toolargs):
xcrunargs = ["xcrun", "mapc"]
_apply_realpath(toolargs)
xcrunargs += toolargs
return_code, _, _ = execute.execute_and_filter_output(
xcrunargs,
print_output=True)
return return_code
def main(argv):
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
# IBTOOL Argument Parser
ibtool_parser = subparsers.add_parser("ibtool")
ibtool_parser.set_defaults(func=ibtool)
# ACTOOL Argument Parser
actool_parser = subparsers.add_parser("actool")
actool_parser.set_defaults(func=actool)
# COREMLC Argument Parser
mapc_parser = subparsers.add_parser("coremlc")
mapc_parser.set_defaults(func=coremlc)
# MOMC Argument Parser
momc_parser = subparsers.add_parser("momc")
momc_parser.set_defaults(func=momc)
# MAPC Argument Parser
mapc_parser = subparsers.add_parser("mapc")
mapc_parser.set_defaults(func=mapc)
# Parse the command line and execute subcommand
args, toolargs = parser.parse_known_args(argv)
sys.exit(args.func(args, toolargs))
if __name__ == "__main__":
main(sys.argv[1:])
| true | true |
1c3bb5d7b8ef35ba870c07dc260cf6862e87d7c9 | 544 | py | Python | backend/todo/migrations/0006_auto_20190403_0603.py | Bhunesh2000/todoWithDjango | e5fa52a087180b66ae283e6b36fe790323d7b920 | [
"MIT"
] | null | null | null | backend/todo/migrations/0006_auto_20190403_0603.py | Bhunesh2000/todoWithDjango | e5fa52a087180b66ae283e6b36fe790323d7b920 | [
"MIT"
] | 11 | 2019-04-03T09:49:17.000Z | 2022-02-10T08:23:26.000Z | backend/todo/migrations/0006_auto_20190403_0603.py | Bhunesh2000/todoWithDjango | e5fa52a087180b66ae283e6b36fe790323d7b920 | [
"MIT"
] | 1 | 2019-10-21T19:26:29.000Z | 2019-10-21T19:26:29.000Z | # Generated by Django 2.2 on 2019-04-03 06:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('todo', '0005_auto_20190403_0445'),
]
operations = [
migrations.AddField(
model_name='todo',
name='completed',
field=models.BooleanField(default=False, null=True),
),
migrations.AlterField(
model_name='todo',
name='timeTaskl',
field=models.TimeField(null=True),
),
]
| 22.666667 | 64 | 0.573529 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('todo', '0005_auto_20190403_0445'),
]
operations = [
migrations.AddField(
model_name='todo',
name='completed',
field=models.BooleanField(default=False, null=True),
),
migrations.AlterField(
model_name='todo',
name='timeTaskl',
field=models.TimeField(null=True),
),
]
| true | true |
1c3bb5ef3ecbbd63192d2619e582be5c16041aec | 569 | py | Python | asgi_tortoise_orm/middleware.py | erm/asgi-tortoise-orm | 6f192c6418d6f0d7144e7fd7c2f01f0498ff4020 | [
"MIT"
] | 1 | 2019-05-27T08:51:18.000Z | 2019-05-27T08:51:18.000Z | asgi_tortoise_orm/middleware.py | erm/asgi-tortoise-orm | 6f192c6418d6f0d7144e7fd7c2f01f0498ff4020 | [
"MIT"
] | null | null | null | asgi_tortoise_orm/middleware.py | erm/asgi-tortoise-orm | 6f192c6418d6f0d7144e7fd7c2f01f0498ff4020 | [
"MIT"
] | 1 | 2018-11-14T10:15:42.000Z | 2018-11-14T10:15:42.000Z | from tortoise import Tortoise
class TortoiseMiddleware:
def __init__(self, app):
self.app = app
def __call__(self, scope):
return _TortoiseResponder(self.app, scope)
class _TortoiseResponder:
def __init__(self, app, scope):
self.app = app
self.scope = scope
async def __call__(self, receive, send):
await Tortoise.init(
db_url="postgres://localhost:5432/tortoise",
modules={"models": ["myapp.models"]},
)
asgi = self.app(self.scope)
await asgi(receive, send)
| 23.708333 | 56 | 0.615114 | from tortoise import Tortoise
class TortoiseMiddleware:
def __init__(self, app):
self.app = app
def __call__(self, scope):
return _TortoiseResponder(self.app, scope)
class _TortoiseResponder:
def __init__(self, app, scope):
self.app = app
self.scope = scope
async def __call__(self, receive, send):
await Tortoise.init(
db_url="postgres://localhost:5432/tortoise",
modules={"models": ["myapp.models"]},
)
asgi = self.app(self.scope)
await asgi(receive, send)
| true | true |
1c3bb64903af96d443a57b5692c22d59027102c2 | 2,892 | py | Python | tests/unit/modules/test_win_certutil.py | nevins-b/salt | 56363bc41ca36e757103df3504d1bb07e3a7251b | [
"Apache-2.0"
] | null | null | null | tests/unit/modules/test_win_certutil.py | nevins-b/salt | 56363bc41ca36e757103df3504d1bb07e3a7251b | [
"Apache-2.0"
] | null | null | null | tests/unit/modules/test_win_certutil.py | nevins-b/salt | 56363bc41ca36e757103df3504d1bb07e3a7251b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
# Import Salt Libs
import salt.modules.win_certutil as certutil
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase
from tests.support.mock import (
MagicMock,
patch
)
class CertUtilTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
return {certutil: {}}
def test_get_serial(self):
'''
Test getting the serial number from a certificate
'''
expected = 'XYZABC'
mock = MagicMock(return_value='CertInfo\r\nSerial: XYZABC\r\nOtherStuff')
with patch.dict(certutil.__salt__, {'cmd.run': mock}):
out = certutil.get_cert_serial('/path/to/cert.cer')
mock.assert_called_once_with('certutil.exe -verify /path/to/cert.cer')
self.assertEqual(expected, out)
def test_get_serials(self):
'''
Test getting the all the serial numbers from a store
'''
expected = ['XYZABC', '123456']
mock = MagicMock(return_value='CertInfo\r\nSerial Number: XYZABC\r\nSerial Number: 123456\r\n')
with patch.dict(certutil.__salt__, {'cmd.run': mock}):
out = certutil.get_stored_cert_serials('TrustedPublisher')
mock.assert_called_once_with('certutil.exe -store TrustedPublisher')
self.assertEqual(expected, out)
def test_add_store(self):
'''
Test adding a certificate to a specific store
'''
cmd_mock = MagicMock(return_value='CertInfo\r\nSerial: XYZABC\r\nOtherStuff')
cache_mock = MagicMock(return_value='/tmp/cert.cer')
with patch.dict(certutil.__salt__, {'cmd.run': cmd_mock,
'cp.cache_file': cache_mock}):
certutil.add_store('salt://path/to/file', 'TrustedPublisher')
cmd_mock.assert_called_once_with('certutil.exe -addstore TrustedPublisher /tmp/cert.cer')
cache_mock.assert_called_once_with('salt://path/to/file', 'base')
@patch('salt.modules.win_certutil.get_cert_serial')
def test_del_store(self, cert_serial_mock):
'''
Test removing a certificate to a specific store
'''
cmd_mock = MagicMock(return_value='CertInfo\r\nSerial: XYZABC\r\nOtherStuff')
cache_mock = MagicMock(return_value='/tmp/cert.cer')
cert_serial_mock.return_value = "ABCDEF"
with patch.dict(certutil.__salt__, {'cmd.run': cmd_mock,
'cp.cache_file': cache_mock}):
certutil.del_store('salt://path/to/file', 'TrustedPublisher')
cmd_mock.assert_called_once_with('certutil.exe -delstore TrustedPublisher ABCDEF')
cache_mock.assert_called_once_with('salt://path/to/file', 'base')
| 41.314286 | 103 | 0.649032 |
from __future__ import absolute_import
import salt.modules.win_certutil as certutil
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase
from tests.support.mock import (
MagicMock,
patch
)
class CertUtilTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
return {certutil: {}}
def test_get_serial(self):
expected = 'XYZABC'
mock = MagicMock(return_value='CertInfo\r\nSerial: XYZABC\r\nOtherStuff')
with patch.dict(certutil.__salt__, {'cmd.run': mock}):
out = certutil.get_cert_serial('/path/to/cert.cer')
mock.assert_called_once_with('certutil.exe -verify /path/to/cert.cer')
self.assertEqual(expected, out)
def test_get_serials(self):
expected = ['XYZABC', '123456']
mock = MagicMock(return_value='CertInfo\r\nSerial Number: XYZABC\r\nSerial Number: 123456\r\n')
with patch.dict(certutil.__salt__, {'cmd.run': mock}):
out = certutil.get_stored_cert_serials('TrustedPublisher')
mock.assert_called_once_with('certutil.exe -store TrustedPublisher')
self.assertEqual(expected, out)
def test_add_store(self):
cmd_mock = MagicMock(return_value='CertInfo\r\nSerial: XYZABC\r\nOtherStuff')
cache_mock = MagicMock(return_value='/tmp/cert.cer')
with patch.dict(certutil.__salt__, {'cmd.run': cmd_mock,
'cp.cache_file': cache_mock}):
certutil.add_store('salt://path/to/file', 'TrustedPublisher')
cmd_mock.assert_called_once_with('certutil.exe -addstore TrustedPublisher /tmp/cert.cer')
cache_mock.assert_called_once_with('salt://path/to/file', 'base')
@patch('salt.modules.win_certutil.get_cert_serial')
def test_del_store(self, cert_serial_mock):
cmd_mock = MagicMock(return_value='CertInfo\r\nSerial: XYZABC\r\nOtherStuff')
cache_mock = MagicMock(return_value='/tmp/cert.cer')
cert_serial_mock.return_value = "ABCDEF"
with patch.dict(certutil.__salt__, {'cmd.run': cmd_mock,
'cp.cache_file': cache_mock}):
certutil.del_store('salt://path/to/file', 'TrustedPublisher')
cmd_mock.assert_called_once_with('certutil.exe -delstore TrustedPublisher ABCDEF')
cache_mock.assert_called_once_with('salt://path/to/file', 'base')
| true | true |
1c3bb65200aaa36dc133fe48545071a401c25379 | 13,259 | py | Python | ORIGINAL/discord/ext/tasks/__init__.py | DiEVeXx/discord.py | 812e5eb5ee611689dab272534a5c65d0102e5957 | [
"MIT"
] | null | null | null | ORIGINAL/discord/ext/tasks/__init__.py | DiEVeXx/discord.py | 812e5eb5ee611689dab272534a5c65d0102e5957 | [
"MIT"
] | null | null | null | ORIGINAL/discord/ext/tasks/__init__.py | DiEVeXx/discord.py | 812e5eb5ee611689dab272534a5c65d0102e5957 | [
"MIT"
] | null | null | null | import asyncio
import datetime
import aiohttp
import websockets
from ORIGINAL import discord
import inspect
import logging
from ORIGINAL.discord.backoff import ExponentialBackoff
log = logging.getLogger(__name__)
class Loop:
"""A background task helper that abstracts the loop and reconnection logic for you.
The main interface to create this is through :func:`loop`.
"""
def __init__(self, coro, seconds, hours, minutes, count, reconnect, loop):
self.coro = coro
self.reconnect = reconnect
self.loop = loop or asyncio.get_event_loop()
self.count = count
self._current_loop = 0
self._task = None
self._injected = None
self._valid_exception = (
OSError,
discord.HTTPException,
discord.GatewayNotFound,
discord.ConnectionClosed,
aiohttp.ClientError,
asyncio.TimeoutError,
websockets.InvalidHandshake,
websockets.WebSocketProtocolError,
)
self._before_loop = None
self._after_loop = None
self._is_being_cancelled = False
self._has_failed = False
self._stop_next_iteration = False
if self.count is not None and self.count <= 0:
raise ValueError('count must be greater than 0 or None.')
self.change_interval(seconds=seconds, minutes=minutes, hours=hours)
self._last_iteration = None
self._next_iteration = None
if not inspect.iscoroutinefunction(self.coro):
raise TypeError('Expected coroutine function, not {0.__name__!r}.'.format(type(self.coro)))
async def _call_loop_function(self, name):
coro = getattr(self, '_' + name)
if coro is None:
return
if self._injected is not None:
await coro(self._injected)
else:
await coro()
async def _loop(self, *args, **kwargs):
backoff = ExponentialBackoff()
await self._call_loop_function('before_loop')
sleep_until = ORIGINAL.discord.utils.sleep_until
self._next_iteration = datetime.datetime.now(datetime.timezone.utc)
try:
while True:
self._last_iteration = self._next_iteration
self._next_iteration = self._get_next_sleep_time()
try:
await self.coro(*args, **kwargs)
now = datetime.datetime.now(datetime.timezone.utc)
if now > self._next_iteration:
self._next_iteration = now
except self._valid_exception as exc:
if not self.reconnect:
raise
await asyncio.sleep(backoff.delay())
else:
if self._stop_next_iteration:
return
self._current_loop += 1
if self._current_loop == self.count:
break
await sleep_until(self._next_iteration)
except asyncio.CancelledError:
self._is_being_cancelled = True
raise
except Exception:
self._has_failed = True
log.exception('Internal background task failed.')
raise
finally:
await self._call_loop_function('after_loop')
self._is_being_cancelled = False
self._current_loop = 0
self._stop_next_iteration = False
self._has_failed = False
def __get__(self, obj, objtype):
if obj is None:
return self
self._injected = obj
return self
@property
def current_loop(self):
""":class:`int`: The current iteration of the loop."""
return self._current_loop
@property
def next_iteration(self):
"""Optional[:class:`datetime.datetime`]: When the next iteration of the loop will occur.
.. versionadded:: 1.3
"""
if self._task is None and self._sleep:
return None
elif self._task and self._task.done() or self._stop_next_iteration:
return None
return self._next_iteration
def start(self, *args, **kwargs):
r"""Starts the internal task in the event loop.
Parameters
------------
\*args
The arguments to to use.
\*\*kwargs
The keyword arguments to use.
Raises
--------
RuntimeError
A task has already been launched and is running.
Returns
---------
:class:`asyncio.Task`
The task that has been created.
"""
if self._task is not None and not self._task.done():
raise RuntimeError('Task is already launched and is not completed.')
if self._injected is not None:
args = (self._injected, *args)
self._task = self.loop.create_task(self._loop(*args, **kwargs))
return self._task
def stop(self):
r"""Gracefully stops the task from running.
Unlike :meth:`cancel`\, this allows the task to finish its
current iteration before gracefully exiting.
.. note::
If the internal function raises an error that can be
handled before finishing then it will retry until
it succeeds.
If this is undesirable, either remove the error handling
before stopping via :meth:`clear_exception_types` or
use :meth:`cancel` instead.
.. versionadded:: 1.2
"""
if self._task and not self._task.done():
self._stop_next_iteration = True
def _can_be_cancelled(self):
return not self._is_being_cancelled and self._task and not self._task.done()
def cancel(self):
"""Cancels the internal task, if it is running."""
if self._can_be_cancelled():
self._task.cancel()
def restart(self, *args, **kwargs):
r"""A convenience method to restart the internal task.
.. note::
Due to the way this function works, the task is not
returned like :meth:`start`.
Parameters
------------
\*args
The arguments to to use.
\*\*kwargs
The keyword arguments to use.
"""
def restart_when_over(fut, *, args=args, kwargs=kwargs):
self._task.remove_done_callback(restart_when_over)
self.start(*args, **kwargs)
if self._can_be_cancelled():
self._task.add_done_callback(restart_when_over)
self._task.cancel()
def add_exception_type(self, *exceptions):
r"""Adds exception types to be handled during the reconnect logic.
By default the exception types handled are those handled by
:meth:`discord.Client.connect`\, which includes a lot of internet disconnection
errors.
This function is useful if you're interacting with a 3rd party library that
raises its own set of exceptions.
Parameters
------------
\*exceptions: Type[:class:`BaseException`]
An argument list of exception classes to handle.
Raises
--------
TypeError
An exception passed is either not a class or not inherited from :class:`BaseException`.
"""
for exc in exceptions:
if not inspect.isclass(exc):
raise TypeError('{0!r} must be a class.'.format(exc))
if not issubclass(exc, BaseException):
raise TypeError('{0!r} must inherit from BaseException.'.format(exc))
self._valid_exception = (*self._valid_exception, *exceptions)
def clear_exception_types(self):
"""Removes all exception types that are handled.
.. note::
This operation obviously cannot be undone!
"""
self._valid_exception = tuple()
def remove_exception_type(self, *exceptions):
r"""Removes exception types from being handled during the reconnect logic.
Parameters
------------
\*exceptions: Type[:class:`BaseException`]
An argument list of exception classes to handle.
Returns
---------
:class:`bool`
Whether all exceptions were successfully removed.
"""
old_length = len(self._valid_exception)
self._valid_exception = tuple(x for x in self._valid_exception if x not in exceptions)
return len(self._valid_exception) == old_length - len(exceptions)
def get_task(self):
"""Optional[:class:`asyncio.Task`]: Fetches the internal task or ``None`` if there isn't one running."""
return self._task
def is_being_cancelled(self):
"""Whether the task is being cancelled."""
return self._is_being_cancelled
def failed(self):
""":class:`bool`: Whether the internal task has failed.
.. versionadded:: 1.2
"""
return self._has_failed
def before_loop(self, coro):
"""A decorator that registers a coroutine to be called before the loop starts running.
This is useful if you want to wait for some bot state before the loop starts,
such as :meth:`discord.Client.wait_until_ready`.
The coroutine must take no arguments (except ``self`` in a class context).
Parameters
------------
coro: :ref:`coroutine <coroutine>`
The coroutine to register before the loop runs.
Raises
-------
TypeError
The function was not a coroutine.
"""
if not inspect.iscoroutinefunction(coro):
raise TypeError('Expected coroutine function, received {0.__name__!r}.'.format(type(coro)))
self._before_loop = coro
return coro
def after_loop(self, coro):
"""A decorator that register a coroutine to be called after the loop finished running.
The coroutine must take no arguments (except ``self`` in a class context).
.. note::
This coroutine is called even during cancellation. If it is desirable
to tell apart whether something was cancelled or not, check to see
whether :meth:`is_being_cancelled` is ``True`` or not.
Parameters
------------
coro: :ref:`coroutine <coroutine>`
The coroutine to register after the loop finishes.
Raises
-------
TypeError
The function was not a coroutine.
"""
if not inspect.iscoroutinefunction(coro):
raise TypeError('Expected coroutine function, received {0.__name__!r}.'.format(type(coro)))
self._after_loop = coro
return coro
def _get_next_sleep_time(self):
return self._last_iteration + datetime.timedelta(seconds=self._sleep)
def change_interval(self, *, seconds=0, minutes=0, hours=0):
"""Changes the interval for the sleep time.
.. note::
This only applies on the next loop iteration. If it is desirable for the change of interval
to be applied right away, cancel the task with :meth:`cancel`.
.. versionadded:: 1.2
Parameters
------------
seconds: :class:`float`
The number of seconds between every iteration.
minutes: :class:`float`
The number of minutes between every iteration.
hours: :class:`float`
The number of hours between every iteration.
Raises
-------
ValueError
An invalid value was given.
"""
sleep = seconds + (minutes * 60.0) + (hours * 3600.0)
if sleep < 0:
raise ValueError('Total number of seconds cannot be less than zero.')
self._sleep = sleep
self.seconds = seconds
self.hours = hours
self.minutes = minutes
def loop(*, seconds=0, minutes=0, hours=0, count=None, reconnect=True, loop=None):
"""A decorator that schedules a task in the background for you with
optional reconnect logic. The decorator returns a :class:`Loop`.
Parameters
------------
seconds: :class:`float`
The number of seconds between every iteration.
minutes: :class:`float`
The number of minutes between every iteration.
hours: :class:`float`
The number of hours between every iteration.
count: Optional[:class:`int`]
The number of loops to do, ``None`` if it should be an
infinite loop.
reconnect: :class:`bool`
Whether to handle errors and restart the task
using an exponential back-off algorithm similar to the
one used in :meth:`discord.Client.connect`.
loop: :class:`asyncio.AbstractEventLoop`
The loop to use to register the task, if not given
defaults to :func:`asyncio.get_event_loop`.
Raises
--------
ValueError
An invalid value was given.
TypeError
The function was not a coroutine.
"""
def decorator(func):
return Loop(func, seconds=seconds, minutes=minutes, hours=hours,
count=count, reconnect=reconnect, loop=loop)
return decorator
| 32.900744 | 112 | 0.598386 | import asyncio
import datetime
import aiohttp
import websockets
from ORIGINAL import discord
import inspect
import logging
from ORIGINAL.discord.backoff import ExponentialBackoff
log = logging.getLogger(__name__)
class Loop:
def __init__(self, coro, seconds, hours, minutes, count, reconnect, loop):
self.coro = coro
self.reconnect = reconnect
self.loop = loop or asyncio.get_event_loop()
self.count = count
self._current_loop = 0
self._task = None
self._injected = None
self._valid_exception = (
OSError,
discord.HTTPException,
discord.GatewayNotFound,
discord.ConnectionClosed,
aiohttp.ClientError,
asyncio.TimeoutError,
websockets.InvalidHandshake,
websockets.WebSocketProtocolError,
)
self._before_loop = None
self._after_loop = None
self._is_being_cancelled = False
self._has_failed = False
self._stop_next_iteration = False
if self.count is not None and self.count <= 0:
raise ValueError('count must be greater than 0 or None.')
self.change_interval(seconds=seconds, minutes=minutes, hours=hours)
self._last_iteration = None
self._next_iteration = None
if not inspect.iscoroutinefunction(self.coro):
raise TypeError('Expected coroutine function, not {0.__name__!r}.'.format(type(self.coro)))
async def _call_loop_function(self, name):
coro = getattr(self, '_' + name)
if coro is None:
return
if self._injected is not None:
await coro(self._injected)
else:
await coro()
async def _loop(self, *args, **kwargs):
backoff = ExponentialBackoff()
await self._call_loop_function('before_loop')
sleep_until = ORIGINAL.discord.utils.sleep_until
self._next_iteration = datetime.datetime.now(datetime.timezone.utc)
try:
while True:
self._last_iteration = self._next_iteration
self._next_iteration = self._get_next_sleep_time()
try:
await self.coro(*args, **kwargs)
now = datetime.datetime.now(datetime.timezone.utc)
if now > self._next_iteration:
self._next_iteration = now
except self._valid_exception as exc:
if not self.reconnect:
raise
await asyncio.sleep(backoff.delay())
else:
if self._stop_next_iteration:
return
self._current_loop += 1
if self._current_loop == self.count:
break
await sleep_until(self._next_iteration)
except asyncio.CancelledError:
self._is_being_cancelled = True
raise
except Exception:
self._has_failed = True
log.exception('Internal background task failed.')
raise
finally:
await self._call_loop_function('after_loop')
self._is_being_cancelled = False
self._current_loop = 0
self._stop_next_iteration = False
self._has_failed = False
def __get__(self, obj, objtype):
if obj is None:
return self
self._injected = obj
return self
@property
def current_loop(self):
return self._current_loop
@property
def next_iteration(self):
if self._task is None and self._sleep:
return None
elif self._task and self._task.done() or self._stop_next_iteration:
return None
return self._next_iteration
def start(self, *args, **kwargs):
if self._task is not None and not self._task.done():
raise RuntimeError('Task is already launched and is not completed.')
if self._injected is not None:
args = (self._injected, *args)
self._task = self.loop.create_task(self._loop(*args, **kwargs))
return self._task
def stop(self):
if self._task and not self._task.done():
self._stop_next_iteration = True
def _can_be_cancelled(self):
return not self._is_being_cancelled and self._task and not self._task.done()
def cancel(self):
if self._can_be_cancelled():
self._task.cancel()
def restart(self, *args, **kwargs):
def restart_when_over(fut, *, args=args, kwargs=kwargs):
self._task.remove_done_callback(restart_when_over)
self.start(*args, **kwargs)
if self._can_be_cancelled():
self._task.add_done_callback(restart_when_over)
self._task.cancel()
def add_exception_type(self, *exceptions):
for exc in exceptions:
if not inspect.isclass(exc):
raise TypeError('{0!r} must be a class.'.format(exc))
if not issubclass(exc, BaseException):
raise TypeError('{0!r} must inherit from BaseException.'.format(exc))
self._valid_exception = (*self._valid_exception, *exceptions)
def clear_exception_types(self):
self._valid_exception = tuple()
def remove_exception_type(self, *exceptions):
old_length = len(self._valid_exception)
self._valid_exception = tuple(x for x in self._valid_exception if x not in exceptions)
return len(self._valid_exception) == old_length - len(exceptions)
def get_task(self):
return self._task
def is_being_cancelled(self):
return self._is_being_cancelled
def failed(self):
return self._has_failed
def before_loop(self, coro):
if not inspect.iscoroutinefunction(coro):
raise TypeError('Expected coroutine function, received {0.__name__!r}.'.format(type(coro)))
self._before_loop = coro
return coro
def after_loop(self, coro):
if not inspect.iscoroutinefunction(coro):
raise TypeError('Expected coroutine function, received {0.__name__!r}.'.format(type(coro)))
self._after_loop = coro
return coro
def _get_next_sleep_time(self):
return self._last_iteration + datetime.timedelta(seconds=self._sleep)
def change_interval(self, *, seconds=0, minutes=0, hours=0):
sleep = seconds + (minutes * 60.0) + (hours * 3600.0)
if sleep < 0:
raise ValueError('Total number of seconds cannot be less than zero.')
self._sleep = sleep
self.seconds = seconds
self.hours = hours
self.minutes = minutes
def loop(*, seconds=0, minutes=0, hours=0, count=None, reconnect=True, loop=None):
def decorator(func):
return Loop(func, seconds=seconds, minutes=minutes, hours=hours,
count=count, reconnect=reconnect, loop=loop)
return decorator
| true | true |
1c3bb731700e025c1b8f1ddb272dc096b4d17c18 | 4,309 | py | Python | mlflow/store/gcs_artifact_repo.py | 0wu/mlflow | 2b5a21af05defcfa80255c081b5d9f07443f3f64 | [
"Apache-2.0"
] | null | null | null | mlflow/store/gcs_artifact_repo.py | 0wu/mlflow | 2b5a21af05defcfa80255c081b5d9f07443f3f64 | [
"Apache-2.0"
] | null | null | null | mlflow/store/gcs_artifact_repo.py | 0wu/mlflow | 2b5a21af05defcfa80255c081b5d9f07443f3f64 | [
"Apache-2.0"
] | null | null | null | import os
from six.moves import urllib
from mlflow.entities.file_info import FileInfo
from mlflow.store.artifact_repo import ArtifactRepository
from mlflow.utils.file_utils import build_path, get_relative_path, TempDir
class GCSArtifactRepository(ArtifactRepository):
"""
Stores artifacts on Google Cloud Storage.
Assumes the google credentials are available in the environment,
see https://google-cloud.readthedocs.io/en/latest/core/auth.html.
"""
def __init__(self, artifact_uri, client=None):
if client:
self.gcs = client
else:
from google.cloud import storage as gcs_storage
self.gcs = gcs_storage
super(GCSArtifactRepository, self).__init__(artifact_uri)
@staticmethod
def parse_gcs_uri(uri):
"""Parse an GCS URI, returning (bucket, path)"""
parsed = urllib.parse.urlparse(uri)
if parsed.scheme != "gs":
raise Exception("Not a GCS URI: %s" % uri)
path = parsed.path
if path.startswith('/'):
path = path[1:]
return parsed.netloc, path
def log_artifact(self, local_file, artifact_path=None):
(bucket, dest_path) = self.parse_gcs_uri(self.artifact_uri)
if artifact_path:
dest_path = build_path(dest_path, artifact_path)
dest_path = build_path(dest_path, os.path.basename(local_file))
gcs_bucket = self.gcs.Client().get_bucket(bucket)
blob = gcs_bucket.blob(dest_path)
blob.upload_from_filename(local_file)
def log_artifacts(self, local_dir, artifact_path=None):
(bucket, dest_path) = self.parse_gcs_uri(self.artifact_uri)
if artifact_path:
dest_path = build_path(dest_path, artifact_path)
gcs_bucket = self.gcs.Client().get_bucket(bucket)
local_dir = os.path.abspath(local_dir)
for (root, _, filenames) in os.walk(local_dir):
upload_path = dest_path
if root != local_dir:
rel_path = get_relative_path(local_dir, root)
upload_path = build_path(dest_path, rel_path)
for f in filenames:
path = build_path(upload_path, f)
gcs_bucket.blob(path).upload_from_filename(build_path(root, f))
def list_artifacts(self, path=None):
(bucket, artifact_path) = self.parse_gcs_uri(self.artifact_uri)
dest_path = artifact_path
if path:
dest_path = build_path(dest_path, path)
prefix = dest_path + "/"
bkt = self.gcs.Client().get_bucket(bucket)
infos = self._list_folders(bkt, prefix, artifact_path)
results = bkt.list_blobs(prefix=prefix, delimiter="/")
for result in results:
blob_path = result.name[len(artifact_path)+1:]
infos.append(FileInfo(blob_path, False, result.size))
return sorted(infos, key=lambda f: f.path)
def _list_folders(self, bkt, prefix, artifact_path):
results = bkt.list_blobs(prefix=prefix, delimiter="/")
dir_paths = set()
for page in results.pages:
dir_paths.update(page.prefixes)
return [FileInfo(path[len(artifact_path)+1:-1], True, None)for path in dir_paths]
def download_artifacts(self, artifact_path):
with TempDir(remove_on_exit=False) as tmp:
return self._download_artifacts_into(artifact_path, tmp.path())
def _download_artifacts_into(self, artifact_path, dest_dir):
"""Private version of download_artifacts that takes a destination directory."""
basename = os.path.basename(artifact_path)
local_path = build_path(dest_dir, basename)
listing = self.list_artifacts(artifact_path)
if len(listing) > 0:
# Artifact_path is a directory, so make a directory for it and download everything
os.mkdir(local_path)
for file_info in listing:
self._download_artifacts_into(file_info.path, local_path)
else:
(bucket, remote_path) = self.parse_gcs_uri(self.artifact_uri)
remote_path = build_path(remote_path, artifact_path)
gcs_bucket = self.gcs.Client().get_bucket(bucket)
gcs_bucket.get_blob(remote_path).download_to_filename(local_path)
return local_path
| 39.53211 | 94 | 0.660246 | import os
from six.moves import urllib
from mlflow.entities.file_info import FileInfo
from mlflow.store.artifact_repo import ArtifactRepository
from mlflow.utils.file_utils import build_path, get_relative_path, TempDir
class GCSArtifactRepository(ArtifactRepository):
def __init__(self, artifact_uri, client=None):
if client:
self.gcs = client
else:
from google.cloud import storage as gcs_storage
self.gcs = gcs_storage
super(GCSArtifactRepository, self).__init__(artifact_uri)
@staticmethod
def parse_gcs_uri(uri):
parsed = urllib.parse.urlparse(uri)
if parsed.scheme != "gs":
raise Exception("Not a GCS URI: %s" % uri)
path = parsed.path
if path.startswith('/'):
path = path[1:]
return parsed.netloc, path
def log_artifact(self, local_file, artifact_path=None):
(bucket, dest_path) = self.parse_gcs_uri(self.artifact_uri)
if artifact_path:
dest_path = build_path(dest_path, artifact_path)
dest_path = build_path(dest_path, os.path.basename(local_file))
gcs_bucket = self.gcs.Client().get_bucket(bucket)
blob = gcs_bucket.blob(dest_path)
blob.upload_from_filename(local_file)
def log_artifacts(self, local_dir, artifact_path=None):
(bucket, dest_path) = self.parse_gcs_uri(self.artifact_uri)
if artifact_path:
dest_path = build_path(dest_path, artifact_path)
gcs_bucket = self.gcs.Client().get_bucket(bucket)
local_dir = os.path.abspath(local_dir)
for (root, _, filenames) in os.walk(local_dir):
upload_path = dest_path
if root != local_dir:
rel_path = get_relative_path(local_dir, root)
upload_path = build_path(dest_path, rel_path)
for f in filenames:
path = build_path(upload_path, f)
gcs_bucket.blob(path).upload_from_filename(build_path(root, f))
def list_artifacts(self, path=None):
(bucket, artifact_path) = self.parse_gcs_uri(self.artifact_uri)
dest_path = artifact_path
if path:
dest_path = build_path(dest_path, path)
prefix = dest_path + "/"
bkt = self.gcs.Client().get_bucket(bucket)
infos = self._list_folders(bkt, prefix, artifact_path)
results = bkt.list_blobs(prefix=prefix, delimiter="/")
for result in results:
blob_path = result.name[len(artifact_path)+1:]
infos.append(FileInfo(blob_path, False, result.size))
return sorted(infos, key=lambda f: f.path)
def _list_folders(self, bkt, prefix, artifact_path):
results = bkt.list_blobs(prefix=prefix, delimiter="/")
dir_paths = set()
for page in results.pages:
dir_paths.update(page.prefixes)
return [FileInfo(path[len(artifact_path)+1:-1], True, None)for path in dir_paths]
def download_artifacts(self, artifact_path):
with TempDir(remove_on_exit=False) as tmp:
return self._download_artifacts_into(artifact_path, tmp.path())
def _download_artifacts_into(self, artifact_path, dest_dir):
basename = os.path.basename(artifact_path)
local_path = build_path(dest_dir, basename)
listing = self.list_artifacts(artifact_path)
if len(listing) > 0:
os.mkdir(local_path)
for file_info in listing:
self._download_artifacts_into(file_info.path, local_path)
else:
(bucket, remote_path) = self.parse_gcs_uri(self.artifact_uri)
remote_path = build_path(remote_path, artifact_path)
gcs_bucket = self.gcs.Client().get_bucket(bucket)
gcs_bucket.get_blob(remote_path).download_to_filename(local_path)
return local_path
| true | true |
1c3bb868be6fb7c76207db8e6603eead6f143d93 | 221 | py | Python | Exercise 028.py | sayan-datapython/Learn_Python_Full_Course_for_Beginners | 1cca3df62cfd7ae5a0c85c019b0f5fe4f6f4740c | [
"MIT"
] | null | null | null | Exercise 028.py | sayan-datapython/Learn_Python_Full_Course_for_Beginners | 1cca3df62cfd7ae5a0c85c019b0f5fe4f6f4740c | [
"MIT"
] | null | null | null | Exercise 028.py | sayan-datapython/Learn_Python_Full_Course_for_Beginners | 1cca3df62cfd7ae5a0c85c019b0f5fe4f6f4740c | [
"MIT"
] | 1 | 2020-09-30T14:25:51.000Z | 2020-09-30T14:25:51.000Z | import random
player = int(input('Enter any number between 1 to 5: '))
if player == random.randint(0,5):
print('Your Win, right number {}'.format(player))
else:
print('Not the number, PC win {}'.format(player))
| 24.555556 | 56 | 0.665158 | import random
player = int(input('Enter any number between 1 to 5: '))
if player == random.randint(0,5):
print('Your Win, right number {}'.format(player))
else:
print('Not the number, PC win {}'.format(player))
| true | true |
1c3bb8db1a9efd72cb6495569f020bdb0f2f5985 | 1,358 | py | Python | dolphin/task_manager/resources.py | ThisIsClark/dolphin | 204cffd3faa1c83fde90942537737fe441406cd1 | [
"Apache-2.0"
] | null | null | null | dolphin/task_manager/resources.py | ThisIsClark/dolphin | 204cffd3faa1c83fde90942537737fe441406cd1 | [
"Apache-2.0"
] | null | null | null | dolphin/task_manager/resources.py | ThisIsClark/dolphin | 204cffd3faa1c83fde90942537737fe441406cd1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The SODA Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log
LOG = log.getLogger(__name__)
class StoragePoolTask:
def __init__(self, context, storage_id):
self.storage_id = storage_id
self.context = context
def sync(self):
LOG.info('Pool sync func...')
# TODO:
# 1. call the driver.list_pools(context)
# 2. update the list_pools info to DB
pass
def remove(self):
pass
class StorageVolumeTask:
def __init__(self, context, storage_id):
self.storage_id = storage_id
self.context = context
def sync(self):
LOG.info('Volume sync func...')
# TODO:
# 1. call the driver.list_volumes(context)
# 2. update the list_pools info to DB
pass
def remove(self):
pass
| 27.16 | 74 | 0.669367 |
from oslo_log import log
LOG = log.getLogger(__name__)
class StoragePoolTask:
def __init__(self, context, storage_id):
self.storage_id = storage_id
self.context = context
def sync(self):
LOG.info('Pool sync func...')
pass
def remove(self):
pass
class StorageVolumeTask:
def __init__(self, context, storage_id):
self.storage_id = storage_id
self.context = context
def sync(self):
LOG.info('Volume sync func...')
pass
def remove(self):
pass
| true | true |
1c3bb9b1864c1ec80b70dee70f1b782a1a0c9ca5 | 397 | py | Python | packages/grid/backend/grid/core/celery_config.py | leosole/PySyft | 01606f08f5ec5510840644e198301cd25c3ccfa5 | [
"Apache-1.1"
] | null | null | null | packages/grid/backend/grid/core/celery_config.py | leosole/PySyft | 01606f08f5ec5510840644e198301cd25c3ccfa5 | [
"Apache-1.1"
] | null | null | null | packages/grid/backend/grid/core/celery_config.py | leosole/PySyft | 01606f08f5ec5510840644e198301cd25c3ccfa5 | [
"Apache-1.1"
] | null | null | null | worker_send_task_event = False
task_ignore_result = True
task_time_limit = 600 # Rasswanth: should modify after optimizing PC
task_acks_late = True
broker_pool_limit = 500
worker_prefetch_multiplier = 1
task_routes = {
"grid.worker.msg_without_reply": "main-queue",
"delivery_mode": "transient",
}
accept_content = ["application/syft"]
task_serializer = "syft"
result_serializer = "syft"
| 28.357143 | 69 | 0.775819 | worker_send_task_event = False
task_ignore_result = True
task_time_limit = 600
task_acks_late = True
broker_pool_limit = 500
worker_prefetch_multiplier = 1
task_routes = {
"grid.worker.msg_without_reply": "main-queue",
"delivery_mode": "transient",
}
accept_content = ["application/syft"]
task_serializer = "syft"
result_serializer = "syft"
| true | true |
1c3bb9f80d17be7bc27d33f99a830fdbfd84454a | 3,921 | py | Python | equivalence/tables/node.py | Shurik412/equivalence_rastr_win3 | c37119f9cc59dcf96d236cd8f3ec090dc65a5db6 | [
"MIT"
] | null | null | null | equivalence/tables/node.py | Shurik412/equivalence_rastr_win3 | c37119f9cc59dcf96d236cd8f3ec090dc65a5db6 | [
"MIT"
] | null | null | null | equivalence/tables/node.py | Shurik412/equivalence_rastr_win3 | c37119f9cc59dcf96d236cd8f3ec090dc65a5db6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Модуль переменных таблицы узлов RastrWin3
from dataclasses import dataclass
@dataclass(frozen=True)
class Node:
"""
Параметры таблицы узлов
"""
table: str = 'node'
table_name: str = '"Узлы"'
sel: str = 'sel' # отметка узла
sta: str = 'sta' # S, состояние узла
tip: str = 'tip' # Тип, тип узла
ny: str = 'ny' # Номер, номер узла
name: str = 'name' # Название, Название узла
nsx: str = 'nsx' # номер СХН
na: str = 'na' # район
npa: str = 'npa' # территрория
sta_r: str = 'sra_r' # S_р-p Состояние реактора
nga: str = 'nga' # Nн-гр Номер нагрузочной группы
na_name: str = 'na_name' # Название района
base_area: str = 'base_area' # Остров N БУ в острове
dnsx: str = 'dnsx' # N дсхн Номер ДСХН
Id: str = 'Id' # N Номер
nrk: str = 'nrk' # N_р-р Число реакторов
supernode: str = 'supernode' # Т-узел Топологический узел
na_no: str = 'na_no' #
tip0: str = 'tip0' # Тип0 закорочен ли узел на землю в нулевой последовательности
# Проводимость, шунты
bsh: str = 'bsh' # проводимость шунта
bshr: str = 'bshr' # B_расч, Суммарная проводимость шунта
psh: str = 'psh' # P_ш, Мощность шунта
qsh: str = 'qsh' # Q_ш, Мощность шунта
bsh0: str = 'bsh0' # реактивная проводимость шунта нулевой последовательности на землю
brk: str = 'brk' # B_р-р Проводимость одного реактора
grk: str = 'grk' # G_р-р Проводимость одного реактора (G)
gsh: str = 'gsh' # G_ш Проводимость шунта G
gshr: str = 'gshr' # G_расч Суммарная проводимость шунта (G)
gsh0: str = 'gsh0' # g0 активная проводимость шунта нулевой последовательности на землю
Ysh: str = 'Ysh' # Y_ш Комплексная проводимость шунта
# Напряжение
vzd: str = 'vzd' # V_зд, заданный модуль напряжения
uhom: str = 'uhom' # Uном,номинальное напряжение
vras: str = 'vras' # U, расчетный модуль напряжения
delta: str = 'delta' # расчетный угол напряжения
umax: str = 'umax' # максимальное заданое напряжение
umin: str = 'umin' # минимальное заданое напряжение
uc: str = 'uc' # Комплекс напряжения
U_izm: str = 'U_izm' # U к.изм Напряжение при контрольном замере
vras0: str = 'vras0' # V0 Расчетный модуль напряжения нулевой последовательности
vras1: str = 'vras1' # V1 Расчетный модуль напряжения прямой последовательности
vras2: str = 'vras2' # V2 Расчетный модуль напряжения обратной последовательности
# Q реактивная мощность
qn: str = 'qn' # мощность нагрузки Q
qn_max: str = 'qn_max' # Qнагр максимальное
qn_min: str = 'qn_min' # Qнагр минимальное
nebal_q: str = 'nebal_q' # небаланс
qg: str = 'qg' # мощность генерации Q
qmin: str = 'qmin' # минимальная генерация Q
qmax: str = 'qmax' # максимальная генерация Q
qgr: str = 'qgr' # Qг, Расчетная генерация Q
dqmin: str = 'dqmin' # Нижний предел по Q для генераторов, заданных шунтами в ПО "Мустанг" (для ПО "Мустанг")
dqmax: str = 'dqmax' # Верхний предел по Q для генераторов, заданных шунтами в ПО "Мустанг" (для ПО "Мустанг")
# P активная мощность
pn: str = 'pn' # мощность нагрузки Р
pn_max: str = 'pn_max' # максимум нагрузки
pn_min: str = 'pn_min' # минимум нагрузки
pg_max: str = 'pg_max' # максимальная мощность Pg
pg_min: str = 'pg_min' # минимальная мощность Pg
pg_nom: str = 'pg_nom' # номинальная мощность Pg
nebal: str = 'nebal' # небаланс Р Р_неб
pg: str = 'pg' # мощность генерации Р
dpg: str = 'dpg' # Разница Pг_расч-P_г
dpn: str = 'dpn' # Разница Ррасч - Рном
dqn: str = 'dqn' # Разница Qрасч - Qном
# S Полная мощность
sg: str = 'sg' # S_g, Комплексная мощность генерации
exist_load: str = 'exist_load' # PNесть, Признак наличия нагрузки в узле
exist_gen: str = 'exist_gen' # PGесть, Признак наличия генерации в узле
| 44.05618 | 115 | 0.644223 |
from dataclasses import dataclass
@dataclass(frozen=True)
class Node:
table: str = 'node'
table_name: str = '"Узлы"'
sel: str = 'sel'
sta: str = 'sta'
tip: str = 'tip'
ny: str = 'ny'
name: str = 'name'
nsx: str = 'nsx'
na: str = 'na'
npa: str = 'npa'
sta_r: str = 'sra_r'
nga: str = 'nga'
na_name: str = 'na_name'
base_area: str = 'base_area'
dnsx: str = 'dnsx'
Id: str = 'Id'
nrk: str = 'nrk'
supernode: str = 'supernode'
na_no: str = 'na_no'
tip0: str = 'tip0'
bsh: str = 'bsh'
bshr: str = 'bshr'
psh: str = 'psh'
qsh: str = 'qsh'
bsh0: str = 'bsh0'
brk: str = 'brk'
grk: str = 'grk'
gsh: str = 'gsh'
gshr: str = 'gshr'
gsh0: str = 'gsh0'
Ysh: str = 'Ysh'
vzd: str = 'vzd'
uhom: str = 'uhom'
vras: str = 'vras'
delta: str = 'delta'
umax: str = 'umax'
umin: str = 'umin'
uc: str = 'uc'
U_izm: str = 'U_izm'
vras0: str = 'vras0'
vras1: str = 'vras1'
vras2: str = 'vras2'
qn: str = 'qn'
qn_max: str = 'qn_max'
qn_min: str = 'qn_min'
nebal_q: str = 'nebal_q'
qg: str = 'qg'
qmin: str = 'qmin'
qmax: str = 'qmax'
qgr: str = 'qgr'
dqmin: str = 'dqmin'
dqmax: str = 'dqmax'
pn: str = 'pn'
pn_max: str = 'pn_max'
pn_min: str = 'pn_min'
pg_max: str = 'pg_max'
pg_min: str = 'pg_min'
pg_nom: str = 'pg_nom'
nebal: str = 'nebal'
pg: str = 'pg'
dpg: str = 'dpg'
dpn: str = 'dpn'
dqn: str = 'dqn'
sg: str = 'sg'
exist_load: str = 'exist_load'
exist_gen: str = 'exist_gen'
| true | true |
1c3bba37f01fac3d2024abcc3b5f5d44e94bdb92 | 370 | py | Python | tests/config.py | balexander85/tweet_capture | 81caf672c4ac47de84f0aa2ce5a8d89b69e5349c | [
"MIT"
] | null | null | null | tests/config.py | balexander85/tweet_capture | 81caf672c4ac47de84f0aa2ce5a8d89b69e5349c | [
"MIT"
] | 2 | 2022-02-14T23:56:05.000Z | 2022-02-23T02:10:48.000Z | tests/config.py | balexander85/tweet_capture | 81caf672c4ac47de84f0aa2ce5a8d89b69e5349c | [
"MIT"
] | null | null | null | """config.py
Module containing the paths for chromedriver and conf directory
"""
from configparser import ConfigParser
from pathlib import Path
BASE_DIR = Path.cwd()
CONFIG_PATH = str(BASE_DIR.joinpath("tests", "conf", "config.ini"))
config = ConfigParser()
config.read(CONFIG_PATH)
# Selenium config
CHROME_DRIVER_PATH = config.get("default", "CHROME_DRIVER_PATH")
| 23.125 | 67 | 0.772973 | from configparser import ConfigParser
from pathlib import Path
BASE_DIR = Path.cwd()
CONFIG_PATH = str(BASE_DIR.joinpath("tests", "conf", "config.ini"))
config = ConfigParser()
config.read(CONFIG_PATH)
CHROME_DRIVER_PATH = config.get("default", "CHROME_DRIVER_PATH")
| true | true |
1c3bbb7ca9541a8bea721a842cf1ed28423a4111 | 9,768 | py | Python | pkgcore/test/test_plugin.py | pombreda/pkgcore | b438fc573af1a031d7ce12adbbf299bab5338451 | [
"BSD-3-Clause"
] | 1 | 2021-07-05T13:10:18.000Z | 2021-07-05T13:10:18.000Z | pkgcore/test/test_plugin.py | vapier/pkgcore | 35a7e4f4f0fc61dd9c4dc72d35a57e2e9d5b832f | [
"BSD-3-Clause"
] | 8 | 2015-03-24T14:21:44.000Z | 2015-03-24T14:21:44.000Z | pkgcore/test/test_plugin.py | vapier/pkgcore | 35a7e4f4f0fc61dd9c4dc72d35a57e2e9d5b832f | [
"BSD-3-Clause"
] | null | null | null | # Copyright: 2011 Brian Harring <ferringb@gmail.com>
# Copyright: 2006 Marien Zwart <marienz@gentoo.org>
# License: BSD/GPL2
import logging
import os
import shutil
import sys
import tempfile
from snakeoil import lists
from snakeoil.osutils import pjoin
from pkgcore import plugin
from pkgcore.test import silence_logging, TestCase
class LowPlug(object):
priority = 1
class ModulesTest(TestCase):
def setUp(self):
# Set up some test modules for our use.
self.dir = tempfile.mkdtemp()
self.dir2 = tempfile.mkdtemp()
self.packdir = pjoin(self.dir, 'mod_testplug')
self.packdir2 = pjoin(self.dir2, 'mod_testplug')
os.mkdir(self.packdir)
os.mkdir(self.packdir2)
with open(pjoin(self.packdir, '__init__.py'), 'w') as init:
init.write('''
from pkgcore.plugins import extend_path
extend_path(__path__, __name__)
''')
filename = pjoin(self.packdir, 'plug.py')
with open(filename, 'w') as plug:
plug.write('''
class DisabledPlug(object):
disabled = True
class HighPlug(object):
priority = 7
class LowPlug(object):
priority = 1
low_plug = LowPlug()
high_plug = HighPlug()
pkgcore_plugins = {
'plugtest': [
DisabledPlug,
high_plug,
'pkgcore.test.test_plugin.LowPlug',
]
}
''')
# Move the mtime 2 seconds into the past so the .pyc file has
# a different mtime.
st = os.stat(filename)
os.utime(filename, (st.st_atime, st.st_mtime - 2))
with open(pjoin(self.packdir, 'plug2.py'), 'w') as plug2:
plug2.write('# I do not have any pkgcore_plugins for you!\n')
with open(pjoin(self.packdir2, 'plug.py'), 'w') as plug:
plug.write('''
# This file is later on sys.path than the plug.py in packdir, so it should
# not have any effect on the tests.
class HiddenPlug(object):
priority = 8
pkgcore_plugins = {'plugtest': [HiddenPlug]}
''')
# Append it to the path
sys.path.insert(0, self.dir2)
sys.path.insert(0, self.dir)
def tearDown(self):
# pop the test module dir from path
sys.path.pop(0)
sys.path.pop(0)
# and kill it
shutil.rmtree(self.dir)
shutil.rmtree(self.dir2)
# make sure we don't keep the sys.modules entries around
sys.modules.pop('mod_testplug', None)
sys.modules.pop('mod_testplug.plug', None)
sys.modules.pop('mod_testplug.plug2', None)
def test_extend_path(self):
import mod_testplug
expected = lists.stable_unique(
pjoin(p, 'mod_testplug')
for p in sys.path if os.path.isdir(p))
self.assertEqual(
expected, mod_testplug.__path__,
set(expected) ^ set(mod_testplug.__path__))
def _runit(self, method):
plugin._global_cache.clear()
method()
mtime = os.path.getmtime(pjoin(self.packdir, plugin.CACHE_FILENAME))
method()
plugin._global_cache.clear()
method()
method()
self.assertEqual(
mtime,
os.path.getmtime(pjoin(self.packdir, plugin.CACHE_FILENAME)))
# We cannot write this since it contains an unimportable plugin.
self.assertFalse(
os.path.exists(pjoin(self.packdir2, plugin.CACHE_FILENAME)))
def _test_plug(self):
import mod_testplug
self.assertIdentical(None, plugin.get_plugin('spork', mod_testplug))
plugins = list(plugin.get_plugins('plugtest', mod_testplug))
self.assertEqual(2, len(plugins), plugins)
plugin.get_plugin('plugtest', mod_testplug)
self.assertEqual(
'HighPlug',
plugin.get_plugin('plugtest', mod_testplug).__class__.__name__)
with open(pjoin(self.packdir, plugin.CACHE_FILENAME)) as f:
lines = f.readlines()
self.assertEqual(3, len(lines))
self.assertEqual(plugin.CACHE_HEADER + "\n", lines[0])
lines.pop(0)
lines.sort()
mtime = int(os.path.getmtime(pjoin(self.packdir, 'plug2.py')))
self.assertEqual('plug2:%s:\n' % (mtime,), lines[0])
mtime = int(os.path.getmtime(pjoin(self.packdir, 'plug.py')))
self.assertEqual(
'plug:%s:plugtest,7,1:plugtest,1,pkgcore.test.test_plugin.LowPlug:'
'plugtest,0,0\n' % (mtime,),
lines[1])
def test_plug(self):
self._runit(self._test_plug)
def _test_no_unneeded_import(self):
import mod_testplug
list(plugin.get_plugins('spork', mod_testplug))
sys.modules.pop('mod_testplug.plug')
# This one is not loaded if we are testing with a good cache.
sys.modules.pop('mod_testplug.plug2', None)
list(plugin.get_plugins('plugtest', mod_testplug))
# Extra messages since getting all of sys.modules printed is annoying.
self.assertIn('mod_testplug.plug', sys.modules, 'plug not loaded')
self.assertNotIn('mod_testplug.plug2', sys.modules, 'plug2 loaded')
def test_no_unneeded_import(self):
self._runit(self._test_no_unneeded_import)
@silence_logging(logging.root)
def test_cache_corruption(self):
import mod_testplug
list(plugin.get_plugins('spork', mod_testplug))
filename = pjoin(self.packdir, plugin.CACHE_FILENAME)
cachefile = open(filename, 'a')
try:
cachefile.write('corruption\n')
finally:
cachefile.close()
# Shift the file into the past a little or the rewritten file
# will occasionally have the same mtime as the corrupt one.
st = os.stat(filename)
corrupt_mtime = st.st_mtime - 2
os.utime(filename, (st.st_atime, corrupt_mtime))
plugin._global_cache.clear()
self._test_plug()
good_mtime = os.path.getmtime(
pjoin(self.packdir, plugin.CACHE_FILENAME))
plugin._global_cache.clear()
self._test_plug()
self.assertEqual(good_mtime, os.path.getmtime(
pjoin(self.packdir, plugin.CACHE_FILENAME)))
self.assertNotEqual(good_mtime, corrupt_mtime)
def test_rewrite_on_remove(self):
filename = pjoin(self.packdir, 'extra.py')
plug = open(filename, 'w')
try:
plug.write('pkgcore_plugins = {"plugtest": [object()]}\n')
finally:
plug.close()
plugin._global_cache.clear()
import mod_testplug
self.assertEqual(
3, len(list(plugin.get_plugins('plugtest', mod_testplug))))
os.unlink(filename)
plugin._global_cache.clear()
self._test_plug()
@silence_logging(logging.root)
def test_priority_caching(self):
plug3 = open(pjoin(self.packdir, 'plug3.py'), 'w')
try:
plug3.write('''
class LowPlug(object):
priority = 6
pkgcore_plugins = {
'plugtest': [LowPlug()],
}
''')
finally:
plug3.close()
plug4 = open(pjoin(self.packdir, 'plug4.py'), 'w')
try:
plug4.write('''
# First file tried, only a disabled plugin.
class HighDisabledPlug(object):
priority = 15
disabled = True
pkgcore_plugins = {
'plugtest': [HighDisabledPlug()],
}
''')
finally:
plug4.close()
plug5 = open(pjoin(self.packdir, 'plug5.py'), 'w')
try:
plug5.write('''
# Second file tried, with a skipped low priority plugin.
class HighDisabledPlug(object):
priority = 12
disabled = True
class LowPlug(object):
priority = 6
pkgcore_plugins = {
'plugtest': [HighDisabledPlug(), LowPlug()],
}
''')
finally:
plug5.close()
plug6 = open(pjoin(self.packdir, 'plug6.py'), 'w')
try:
plug6.write('''
# Not tried, bogus priority.
class BogusPlug(object):
priority = 'spoon'
pkgcore_plugins = {
'plugtest': [BogusPlug()],
}
''')
finally:
plug6.close()
self._runit(self._test_priority_caching)
def _test_priority_caching(self):
import mod_testplug
list(plugin.get_plugins('spork', mod_testplug))
sys.modules.pop('mod_testplug.plug', None)
sys.modules.pop('mod_testplug.plug2', None)
sys.modules.pop('mod_testplug.plug3', None)
sys.modules.pop('mod_testplug.plug4', None)
sys.modules.pop('mod_testplug.plug5', None)
sys.modules.pop('mod_testplug.plug6', None)
best_plug = plugin.get_plugin('plugtest', mod_testplug)
from mod_testplug import plug
self.assertEqual(plug.high_plug, best_plug)
# Extra messages since getting all of sys.modules printed is annoying.
self.assertIn('mod_testplug.plug', sys.modules, 'plug not loaded')
self.assertNotIn('mod_testplug.plug2', sys.modules, 'plug2 loaded')
self.assertNotIn('mod_testplug.plug3', sys.modules, 'plug3 loaded')
self.assertIn('mod_testplug.plug4', sys.modules, 'plug4 not loaded')
self.assertIn('mod_testplug.plug5', sys.modules, 'plug4 not loaded')
self.assertNotIn('mod_testplug.plug6', sys.modules, 'plug6 loaded')
@silence_logging(logging.root)
def test_header_change_invalidates_cache(self):
# Write the cache
plugin._global_cache.clear()
import mod_testplug
list(plugin.get_plugins('testplug', mod_testplug))
# Modify the cache.
filename = pjoin(self.packdir, plugin.CACHE_FILENAME)
with open(filename) as f:
cache = f.readlines()
cache[0] = 'not really a pkgcore plugin cache\n'
with open(filename, 'w') as f:
f.write(''.join(cache))
# And test if it is properly rewritten.
plugin._global_cache.clear()
self._test_plug()
| 32.778523 | 79 | 0.627662 |
import logging
import os
import shutil
import sys
import tempfile
from snakeoil import lists
from snakeoil.osutils import pjoin
from pkgcore import plugin
from pkgcore.test import silence_logging, TestCase
class LowPlug(object):
priority = 1
class ModulesTest(TestCase):
def setUp(self):
self.dir = tempfile.mkdtemp()
self.dir2 = tempfile.mkdtemp()
self.packdir = pjoin(self.dir, 'mod_testplug')
self.packdir2 = pjoin(self.dir2, 'mod_testplug')
os.mkdir(self.packdir)
os.mkdir(self.packdir2)
with open(pjoin(self.packdir, '__init__.py'), 'w') as init:
init.write('''
from pkgcore.plugins import extend_path
extend_path(__path__, __name__)
''')
filename = pjoin(self.packdir, 'plug.py')
with open(filename, 'w') as plug:
plug.write('''
class DisabledPlug(object):
disabled = True
class HighPlug(object):
priority = 7
class LowPlug(object):
priority = 1
low_plug = LowPlug()
high_plug = HighPlug()
pkgcore_plugins = {
'plugtest': [
DisabledPlug,
high_plug,
'pkgcore.test.test_plugin.LowPlug',
]
}
''')
st = os.stat(filename)
os.utime(filename, (st.st_atime, st.st_mtime - 2))
with open(pjoin(self.packdir, 'plug2.py'), 'w') as plug2:
plug2.write('# I do not have any pkgcore_plugins for you!\n')
with open(pjoin(self.packdir2, 'plug.py'), 'w') as plug:
plug.write('''
# This file is later on sys.path than the plug.py in packdir, so it should
# not have any effect on the tests.
class HiddenPlug(object):
priority = 8
pkgcore_plugins = {'plugtest': [HiddenPlug]}
''')
sys.path.insert(0, self.dir2)
sys.path.insert(0, self.dir)
def tearDown(self):
sys.path.pop(0)
sys.path.pop(0)
shutil.rmtree(self.dir)
shutil.rmtree(self.dir2)
sys.modules.pop('mod_testplug', None)
sys.modules.pop('mod_testplug.plug', None)
sys.modules.pop('mod_testplug.plug2', None)
def test_extend_path(self):
import mod_testplug
expected = lists.stable_unique(
pjoin(p, 'mod_testplug')
for p in sys.path if os.path.isdir(p))
self.assertEqual(
expected, mod_testplug.__path__,
set(expected) ^ set(mod_testplug.__path__))
def _runit(self, method):
plugin._global_cache.clear()
method()
mtime = os.path.getmtime(pjoin(self.packdir, plugin.CACHE_FILENAME))
method()
plugin._global_cache.clear()
method()
method()
self.assertEqual(
mtime,
os.path.getmtime(pjoin(self.packdir, plugin.CACHE_FILENAME)))
# We cannot write this since it contains an unimportable plugin.
self.assertFalse(
os.path.exists(pjoin(self.packdir2, plugin.CACHE_FILENAME)))
def _test_plug(self):
import mod_testplug
self.assertIdentical(None, plugin.get_plugin('spork', mod_testplug))
plugins = list(plugin.get_plugins('plugtest', mod_testplug))
self.assertEqual(2, len(plugins), plugins)
plugin.get_plugin('plugtest', mod_testplug)
self.assertEqual(
'HighPlug',
plugin.get_plugin('plugtest', mod_testplug).__class__.__name__)
with open(pjoin(self.packdir, plugin.CACHE_FILENAME)) as f:
lines = f.readlines()
self.assertEqual(3, len(lines))
self.assertEqual(plugin.CACHE_HEADER + "\n", lines[0])
lines.pop(0)
lines.sort()
mtime = int(os.path.getmtime(pjoin(self.packdir, 'plug2.py')))
self.assertEqual('plug2:%s:\n' % (mtime,), lines[0])
mtime = int(os.path.getmtime(pjoin(self.packdir, 'plug.py')))
self.assertEqual(
'plug:%s:plugtest,7,1:plugtest,1,pkgcore.test.test_plugin.LowPlug:'
'plugtest,0,0\n' % (mtime,),
lines[1])
def test_plug(self):
self._runit(self._test_plug)
def _test_no_unneeded_import(self):
import mod_testplug
list(plugin.get_plugins('spork', mod_testplug))
sys.modules.pop('mod_testplug.plug')
# This one is not loaded if we are testing with a good cache.
sys.modules.pop('mod_testplug.plug2', None)
list(plugin.get_plugins('plugtest', mod_testplug))
# Extra messages since getting all of sys.modules printed is annoying.
self.assertIn('mod_testplug.plug', sys.modules, 'plug not loaded')
self.assertNotIn('mod_testplug.plug2', sys.modules, 'plug2 loaded')
def test_no_unneeded_import(self):
self._runit(self._test_no_unneeded_import)
@silence_logging(logging.root)
def test_cache_corruption(self):
import mod_testplug
list(plugin.get_plugins('spork', mod_testplug))
filename = pjoin(self.packdir, plugin.CACHE_FILENAME)
cachefile = open(filename, 'a')
try:
cachefile.write('corruption\n')
finally:
cachefile.close()
# Shift the file into the past a little or the rewritten file
# will occasionally have the same mtime as the corrupt one.
st = os.stat(filename)
corrupt_mtime = st.st_mtime - 2
os.utime(filename, (st.st_atime, corrupt_mtime))
plugin._global_cache.clear()
self._test_plug()
good_mtime = os.path.getmtime(
pjoin(self.packdir, plugin.CACHE_FILENAME))
plugin._global_cache.clear()
self._test_plug()
self.assertEqual(good_mtime, os.path.getmtime(
pjoin(self.packdir, plugin.CACHE_FILENAME)))
self.assertNotEqual(good_mtime, corrupt_mtime)
def test_rewrite_on_remove(self):
filename = pjoin(self.packdir, 'extra.py')
plug = open(filename, 'w')
try:
plug.write('pkgcore_plugins = {"plugtest": [object()]}\n')
finally:
plug.close()
plugin._global_cache.clear()
import mod_testplug
self.assertEqual(
3, len(list(plugin.get_plugins('plugtest', mod_testplug))))
os.unlink(filename)
plugin._global_cache.clear()
self._test_plug()
@silence_logging(logging.root)
def test_priority_caching(self):
plug3 = open(pjoin(self.packdir, 'plug3.py'), 'w')
try:
plug3.write('''
class LowPlug(object):
priority = 6
pkgcore_plugins = {
'plugtest': [LowPlug()],
}
''')
finally:
plug3.close()
plug4 = open(pjoin(self.packdir, 'plug4.py'), 'w')
try:
plug4.write('''
# First file tried, only a disabled plugin.
class HighDisabledPlug(object):
priority = 15
disabled = True
pkgcore_plugins = {
'plugtest': [HighDisabledPlug()],
}
''')
finally:
plug4.close()
plug5 = open(pjoin(self.packdir, 'plug5.py'), 'w')
try:
plug5.write('''
# Second file tried, with a skipped low priority plugin.
class HighDisabledPlug(object):
priority = 12
disabled = True
class LowPlug(object):
priority = 6
pkgcore_plugins = {
'plugtest': [HighDisabledPlug(), LowPlug()],
}
''')
finally:
plug5.close()
plug6 = open(pjoin(self.packdir, 'plug6.py'), 'w')
try:
plug6.write('''
# Not tried, bogus priority.
class BogusPlug(object):
priority = 'spoon'
pkgcore_plugins = {
'plugtest': [BogusPlug()],
}
''')
finally:
plug6.close()
self._runit(self._test_priority_caching)
def _test_priority_caching(self):
import mod_testplug
list(plugin.get_plugins('spork', mod_testplug))
sys.modules.pop('mod_testplug.plug', None)
sys.modules.pop('mod_testplug.plug2', None)
sys.modules.pop('mod_testplug.plug3', None)
sys.modules.pop('mod_testplug.plug4', None)
sys.modules.pop('mod_testplug.plug5', None)
sys.modules.pop('mod_testplug.plug6', None)
best_plug = plugin.get_plugin('plugtest', mod_testplug)
from mod_testplug import plug
self.assertEqual(plug.high_plug, best_plug)
# Extra messages since getting all of sys.modules printed is annoying.
self.assertIn('mod_testplug.plug', sys.modules, 'plug not loaded')
self.assertNotIn('mod_testplug.plug2', sys.modules, 'plug2 loaded')
self.assertNotIn('mod_testplug.plug3', sys.modules, 'plug3 loaded')
self.assertIn('mod_testplug.plug4', sys.modules, 'plug4 not loaded')
self.assertIn('mod_testplug.plug5', sys.modules, 'plug4 not loaded')
self.assertNotIn('mod_testplug.plug6', sys.modules, 'plug6 loaded')
@silence_logging(logging.root)
def test_header_change_invalidates_cache(self):
# Write the cache
plugin._global_cache.clear()
import mod_testplug
list(plugin.get_plugins('testplug', mod_testplug))
# Modify the cache.
filename = pjoin(self.packdir, plugin.CACHE_FILENAME)
with open(filename) as f:
cache = f.readlines()
cache[0] = 'not really a pkgcore plugin cache\n'
with open(filename, 'w') as f:
f.write(''.join(cache))
# And test if it is properly rewritten.
plugin._global_cache.clear()
self._test_plug()
| true | true |
1c3bbb94848eda13cf7e65df610d7865e1377e94 | 1,825 | py | Python | column_self_define.py | xiaojinchuan/flask-sqlalchemy-usage | db7f41351ca06d7007cb87ef78beaaf631ae4e21 | [
"MIT"
] | null | null | null | column_self_define.py | xiaojinchuan/flask-sqlalchemy-usage | db7f41351ca06d7007cb87ef78beaaf631ae4e21 | [
"MIT"
] | null | null | null | column_self_define.py | xiaojinchuan/flask-sqlalchemy-usage | db7f41351ca06d7007cb87ef78beaaf631ae4e21 | [
"MIT"
] | null | null | null | """
在mysql5.7中使用JSON列
为JSON列中某些内容创建虚拟列,并为此虚拟列建立索引,以方便查询
"""
from sqlalchemy.schema import CreateColumn
from sqlalchemy.ext.compiler import compiles
@compiles(CreateColumn, 'mysql')
def mysql_genereted_column(element, compiler, **kwargs):
"""
如果字段定义中含有generated_with, 将根据info中的generate方法修改创建列的过程
mysql 指此函数只对mysql有效
:param element:
:param compiler:
:param kwargs:
:return:
"""
column = element.element
if 'generated_with' not in column.info:
return compiler.visit_create_column(element, **kwargs)
column_def = f'{compiler.visit_create_column(element, **kwargs)} ' \
f'GENERATED ALWAYS AS ({column.info["generated_with"]}) VIRTUAL'
return column_def
if __name__ == "__main__":
from flask import Flask
from flask_sqlalchemy import SQLAlchemy, orm
from sqlalchemy import func, text, Column, FetchedValue
app = Flask(__name__)
# app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://'
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:qwer1234@127.0.0.1:3306/test_db?charset=utf8mb4'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_RECORD_QUERIES'] = True
app.config['SQLALCHEMY_ECHO'] = True
db = SQLAlchemy(app, session_options={'autocommit': False})
class TestModel(db.Model):
id = Column(db.Integer, primary_key=True)
name = Column(db.String(64))
data = Column(db.JSON)
# 这里创建一个写明要generate的列,这列的内容取自JSON列data中的'K'字段, 并创建索引
K = Column(db.Integer, FetchedValue(), index=True, info={'generated_with': "json_extract(data, '$.K')"})
db.create_all()
db.session.add(TestModel(id=1, name='name', data={'K': 3}))
db.session.commit()
k = db.session.query(TestModel.K).filter(TestModel.id == 1).first()[0]
assert k == 3
| 30.932203 | 114 | 0.693151 |
from sqlalchemy.schema import CreateColumn
from sqlalchemy.ext.compiler import compiles
@compiles(CreateColumn, 'mysql')
def mysql_genereted_column(element, compiler, **kwargs):
column = element.element
if 'generated_with' not in column.info:
return compiler.visit_create_column(element, **kwargs)
column_def = f'{compiler.visit_create_column(element, **kwargs)} ' \
f'GENERATED ALWAYS AS ({column.info["generated_with"]}) VIRTUAL'
return column_def
if __name__ == "__main__":
from flask import Flask
from flask_sqlalchemy import SQLAlchemy, orm
from sqlalchemy import func, text, Column, FetchedValue
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:qwer1234@127.0.0.1:3306/test_db?charset=utf8mb4'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_RECORD_QUERIES'] = True
app.config['SQLALCHEMY_ECHO'] = True
db = SQLAlchemy(app, session_options={'autocommit': False})
class TestModel(db.Model):
id = Column(db.Integer, primary_key=True)
name = Column(db.String(64))
data = Column(db.JSON)
K = Column(db.Integer, FetchedValue(), index=True, info={'generated_with': "json_extract(data, '$.K')"})
db.create_all()
db.session.add(TestModel(id=1, name='name', data={'K': 3}))
db.session.commit()
k = db.session.query(TestModel.K).filter(TestModel.id == 1).first()[0]
assert k == 3
| true | true |
1c3bbbe31941967bac1f78f34bcb108e8f5aa6f8 | 237 | py | Python | tests/apps/page_parts/pages/without.py | T4rk1n/dazzler | 69c49422dc19c910445ab265b1d3481041de8f43 | [
"MIT"
] | 15 | 2019-12-19T11:57:30.000Z | 2021-11-15T23:34:41.000Z | tests/apps/page_parts/pages/without.py | T4rk1n/dazzler | 69c49422dc19c910445ab265b1d3481041de8f43 | [
"MIT"
] | 196 | 2019-09-21T15:10:14.000Z | 2022-03-31T11:07:48.000Z | tests/apps/page_parts/pages/without.py | T4rk1n/dazzler | 69c49422dc19c910445ab265b1d3481041de8f43 | [
"MIT"
] | 7 | 2019-10-30T19:38:15.000Z | 2021-12-01T04:54:16.000Z | from dazzler.components import core
from dazzler.system import Page
page = Page(
__name__,
core.Container([
core.Text('Without', identity='without'),
]),
include_app_footer=False,
include_app_header=False,
)
| 19.75 | 49 | 0.687764 | from dazzler.components import core
from dazzler.system import Page
page = Page(
__name__,
core.Container([
core.Text('Without', identity='without'),
]),
include_app_footer=False,
include_app_header=False,
)
| true | true |
1c3bbc27ea91e63e86a79cac8a61c37bf33b24ed | 17,777 | py | Python | tacker/vnfm/monitor_drivers/zabbix/zabbix.py | qubitn1nja/tacker | 5b40d877a7050cbbec6bdf8ccd77eb7cf224f1b1 | [
"Apache-2.0"
] | 1 | 2020-10-13T05:20:38.000Z | 2020-10-13T05:20:38.000Z | tacker/vnfm/monitor_drivers/zabbix/zabbix.py | qubitn1nja/tacker | 5b40d877a7050cbbec6bdf8ccd77eb7cf224f1b1 | [
"Apache-2.0"
] | null | null | null | tacker/vnfm/monitor_drivers/zabbix/zabbix.py | qubitn1nja/tacker | 5b40d877a7050cbbec6bdf8ccd77eb7cf224f1b1 | [
"Apache-2.0"
] | 1 | 2020-02-08T08:15:53.000Z | 2020-02-08T08:15:53.000Z | # All Rights Reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import requests
import time
import copy
from oslo_log import log as logging
from tacker.vnfm.monitor_drivers import abstract_driver
from tacker.vnfm.monitor_drivers.zabbix import zabbix_api as zapi
LOG = logging.getLogger(__name__)
class VNFMonitorZabbix(abstract_driver.VNFMonitorAbstractDriver):
params = ['application', 'OS']
def __init__(self):
self.kwargs = None
self.vnf = None
self.vduname = []
self.URL = None
self.hostinfo = {}
self.tenant_id = None
def get_type(self):
"""Return one of predefined type of the hosting vnf drivers."""
plugin_type = 'zabbix'
return plugin_type
def get_name(self):
"""Return a symbolic name for the VNF Monitor plugin."""
plugin_name = 'zabbix'
return plugin_name
def get_description(self):
"""Return description of VNF Monitor plugin."""
plugin_descript = 'Tacker VNFMonitor Zabbix Driver'
return plugin_descript
def monitor_get_config(self, plugin, context, vnf):
"""Return dict of monitor configuration data.
:param plugin:
:param context:
:param vnf:
:returns: dict
:returns: dict of monitor configuration data
"""
return {}
def monitor_url(self, plugin, context, vnf):
"""Return the url of vnf to monitor.
:param plugin:
:param context:
:param vnf:
:returns: string
:returns: url of vnf to monitor
"""
pass
def send_post(self, query):
response = requests.post(self.URL, headers=zapi.HEADERS,
data=json.dumps(query))
return dict(response.json())
@staticmethod
def check_error(response):
try:
if 'result' not in response:
raise ValueError
except ValueError:
LOG.error('Cannot request error : %s', response['error']['data'])
def create_graph(self, itemid, name, nodename):
temp_graph_api = copy.deepcopy(zapi.dGRAPH_CREATE_API)
gitems = [{'itemid': itemid, 'color': '00AA00'}]
temp_graph_api['auth'] = \
self.hostinfo[nodename]['zbx_info']['zabbix_token']
temp_graph_api['params']['gitems'] = gitems
temp_graph_api['params']['name'] = name
response = self.send_post(temp_graph_api)
VNFMonitorZabbix.check_error(response)
def create_action(self):
for vdu in self.vduname:
temp_action_api = copy.deepcopy(zapi.dACTION_CREATE_API)
temp_action_api['auth'] = \
self.hostinfo[vdu]['zbx_info']['zabbix_token']
tempname_api = temp_action_api['params']['operations'][0]
temp_filter = temp_action_api['params']['filter']
for info in (self.hostinfo[vdu]['actioninfo']):
tempname_api['opcommand_hst'][0]['hostid'] = \
self.hostinfo[vdu]['hostid']
now = time.localtime()
rtime = str(now.tm_hour) + str(now.tm_min) + str(now.tm_sec)
temp_name = "Trigger Action " + \
str(
vdu + rtime + " " +
info['item'] + " " + info['action']
)
temp_action_api['params']['name'] = temp_name
if (info['action'] == 'cmd') and \
(info['item'] != 'os_agent_info'):
tempname_api['opcommand']['command'] = info['cmd-action']
elif (info['item'] == 'os_agent_info') \
and (info['action'] == 'cmd'):
tempname_api['opcommand']['authtype'] = 0
tempname_api['opcommand']['username'] = \
self.hostinfo[vdu]['appinfo']['ssh_username']
tempname_api['opcommand']['password'] = \
self.hostinfo[vdu]['appinfo']['ssh_password']
tempname_api['opcommand']['type'] = 2
tempname_api['opcommand']['command'] = info['cmd-action']
tempname_api['opcommand']['port'] = 22
temp_filter['conditions'][0]['value'] = info['trigger_id']
response = self.send_post(temp_action_api)
VNFMonitorZabbix.check_error(response)
continue
temp_filter['conditions'][0]['value'] = info['trigger_id']
response = self.send_post(temp_action_api)
VNFMonitorZabbix.check_error(response)
def create_vdu_host(self):
for vdu in self.vduname:
temp_host_api = zapi.dHOST_CREATE_API
temp_group_api = zapi.dGROUP_GET_API
temp_host_api['auth'] = \
self.hostinfo[vdu]['zbx_info']['zabbix_token']
temp_group_api['auth'] = \
self.hostinfo[vdu]['zbx_info']['zabbix_token']
response = self.send_post(temp_group_api)
gid = response['result'][0]['groupid']
temp_host_api['params']['host'] = str(vdu)
if type(self.hostinfo[vdu]['mgmt_ip']) is list:
for vduip in (self.hostinfo[vdu]['mgmt_ip']):
temp_host_api['params']['interfaces'][0]['ip'] = vduip
temp_host_api['params']['templates'][0]['templateid'] = \
self.hostinfo[vdu]['template_id'][0]
temp_host_api['params']['groups'][0]['groupid'] = gid
response = self.send_post(temp_host_api)
else:
temp_host_api['params']['interfaces'][0]['ip'] = \
self.hostinfo[vdu]['mgmt_ip']
temp_host_api['params']['templates'][0]['templateid'] = \
self.hostinfo[vdu]['template_id'][0]
temp_host_api['params']['groups'][0]['groupid'] = gid
response = self.send_post(temp_host_api)
if 'error' in response:
now = time.localtime()
rtime = str(now.tm_hour) + str(now.tm_min) + str(now.tm_sec)
temp_host_api['params']['host'] = str(vdu) + rtime
response = self.send_post(temp_host_api)
self.hostinfo[vdu]['hostid'] = response['result']['hostids'][0]
def create_trigger(self, trigger_params, vduname):
temp_trigger_api = copy.deepcopy(zapi.dTRIGGER_CREATE_API)
temp_trigger_api['auth'] = \
self.hostinfo[vduname]['zbx_info']['zabbix_token']
temp_trigger_api['params'] = trigger_params
temp_trigger_api['templateid'] = \
str(
self.hostinfo[vduname]['template_id'][0])
response = self.send_post(temp_trigger_api)
VNFMonitorZabbix.check_error(response)
return response['result']
def _create_trigger(self):
trigger_params = []
trig_act_pa = []
for vdu in self.vduname:
temp_trigger_list = copy.deepcopy(zapi.dTRIGGER_LIST)
temp_vdu_name = self.hostinfo[vdu]['appinfo']['app_name']
temp_vdu_port = self.hostinfo[vdu]['appinfo']['app_port']
for para in VNFMonitorZabbix.params:
for item in self.hostinfo[vdu]['parameters'][para].keys():
action_list = copy.deepcopy(zapi.dACTION_LIST)
temp_item = self.hostinfo[vdu]['parameters'][para][item]
if ('app_name' != item)\
and ('app_port' != item) \
and ('ssh_username' != item) \
and ('ssh_password' != item):
if 'condition' \
in temp_item.keys():
temp_con = temp_item['condition']
if len(temp_con) == 2:
temp_comparrision = temp_con[0]
temp_comparrision_value = temp_con[1]
temp_trigger_list[item][0]['expression'] += \
self.hostinfo[vdu]['template_name'] + ':'\
+ str(
zapi.dITEM_KEY_COMP[item].replace(
'*', str(temp_vdu_name))) \
+ str(
zapi.COMP_VALUE[temp_comparrision]) \
+ str(
temp_comparrision_value)
else:
temp_comparrision = temp_con[0]
if 'os_agent_info' == item:
temp_trigger_list[item][0]['expression'] += \
self.hostinfo[vdu]['template_name'] + ':' \
+ str(zapi.dITEM_KEY_COMP[item])
else:
temp_trigger_list[item][0]['expression'] += \
self.hostinfo[vdu]['template_name'] + ':' \
+ str(
zapi.dITEM_KEY_COMP[item].replace(
'*', str(temp_vdu_port))) \
+ str(
zapi.COMP_VALUE[temp_comparrision])
if 'actionname' in \
temp_item.keys():
trig_act_pa.append(temp_trigger_list[item][0])
response = self.create_trigger(trig_act_pa, vdu)
del trig_act_pa[:]
action_list['action'] = \
temp_item['actionname']
action_list['trigger_id'] = \
response['triggerids'][0]
action_list['item'] = item
if 'cmd' == \
temp_item['actionname']:
action_list['cmd-action'] = \
temp_item['cmd-action']
self.hostinfo[vdu]['actioninfo'].append(
action_list)
else:
trigger_params.append(
temp_trigger_list[item][0])
if len(trigger_params) != 0:
self.create_trigger(trigger_params, vdu)
del trigger_params[:]
def create_item(self):
# Create _ITEM
for vdu in self.vduname:
temp_item_api = copy.deepcopy(zapi.dITEM_CREATE_API)
temp_item_api['auth'] = \
self.hostinfo[vdu]['zbx_info']['zabbix_token']
self.hostinfo[vdu]['appinfo'] = \
copy.deepcopy(zapi.dAPP_INFO)
temp_app = self.hostinfo[vdu]['parameters']['application']
temp_item_api['params']['hostid'] = \
self.hostinfo[vdu]['template_id'][0]
for para in VNFMonitorZabbix.params:
if 'application' == para:
for app_info in \
temp_app.keys():
self.hostinfo[vdu]['appinfo'][app_info] = \
temp_app[app_info]
for item in (self.hostinfo[vdu]['parameters'][para].keys()):
if ('app_name' != item) and ('app_port' != item) \
and ('ssh_username' != item) \
and ('ssh_password' != item):
temp_item_api['params']['name'] = \
zapi.dITEM_KEY_INFO[item]['name']
temp_item_api['params']['value_type'] = \
zapi.dITEM_KEY_INFO[item]['value_type']
if item == 'app_status':
temp = zapi.dITEM_KEY_INFO[item]['key_']
temp_item_api['params']['key_'] = temp.replace(
'*', str(
self.hostinfo[vdu]['appinfo']['app_port']))
elif item == 'app_memory':
temp = zapi.dITEM_KEY_INFO[item]['key_']
temp_item_api['params']['key_'] = temp.replace(
'*',
str(
self.hostinfo[vdu]['appinfo']['app_name']))
else:
temp_item_api['params']['key_'] = \
zapi.dITEM_KEY_INFO[item]['key_']
response = self.send_post(temp_item_api)
self.create_graph(
response['result']['itemids'][0],
temp_item_api['params']['name'], vdu)
VNFMonitorZabbix.check_error(response)
def create_template(self):
temp_template_api = copy.deepcopy(zapi.dTEMPLATE_CREATE_API)
for vdu in self.vduname:
temp_template_api['params']['host'] = "Tacker Template " + str(vdu)
temp_template_api['auth'] = \
self.hostinfo[vdu]['zbx_info']['zabbix_token']
response = self.send_post(temp_template_api)
if 'error' in response:
if "already exists." in response['error']['data']:
now = time.localtime()
rtime = str(now.tm_hour) + str(now.tm_min) + str(
now.tm_sec)
temp_template_api['params']['host'] = \
"Tacker Template " + str(vdu) + rtime
response = self.send_post(temp_template_api)
VNFMonitorZabbix.check_error(response)
self.hostinfo[vdu]['template_id'] = \
response['result']['templateids']
self.hostinfo[vdu]['template_name'] =\
temp_template_api['params']['host']
def add_host_to_zabbix(self):
self.create_template()
self.create_item()
self._create_trigger()
self.create_vdu_host()
self.create_action()
def get_token_from_zbxserver(self, node):
temp_auth_api = copy.deepcopy(zapi.dAUTH_API)
temp_auth_api['params']['user'] = \
self.hostinfo[node]['zbx_info']['zabbix_user']
temp_auth_api['params']['password'] = \
self.hostinfo[node]['zbx_info']['zabbix_pass']
zabbixip = \
self.hostinfo[node]['zbx_info']['zabbix_ip']
zabbixport = \
self.hostinfo[node]['zbx_info']['zabbix_port']
self.URL = "http://" + zabbixip + ":" + \
str(zabbixport) + zapi.URL_
response = requests.post(
self.URL,
headers=zapi.HEADERS,
data=json.dumps(temp_auth_api)
)
response_dict = dict(response.json())
VNFMonitorZabbix.check_error(response_dict)
LOG.info('Success Connect Zabbix Server')
return response_dict['result']
def set_zbx_info(self, node):
self.hostinfo[node]['zbx_info'] = \
copy.deepcopy(zapi.dZBX_INFO)
self.hostinfo[node]['zbx_info']['zabbix_user'] = \
self.kwargs['vdus'][node]['zabbix_username']
self.hostinfo[node]['zbx_info']['zabbix_pass'] = \
self.kwargs['vdus'][node]['zabbix_password']
self.hostinfo[node]['zbx_info']['zabbix_ip'] = \
self.kwargs['vdus'][node]['zabbix_server_ip']
self.hostinfo[node]['zbx_info']['zabbix_port'] = \
self.kwargs['vdus'][node]['zabbix_server_port']
self.hostinfo[node]['zbx_info']['zabbix_token'] = \
self.get_token_from_zbxserver(node)
def set_vdu_info(self):
temp_vduname = self.kwargs['vdus'].keys()
for node in temp_vduname:
if 'application' in \
self.kwargs['vdus'][node]['parameters'].keys() \
and 'OS'\
in self.kwargs['vdus'][node]['parameters'].keys():
self.vduname.append(node)
self.hostinfo[node] = copy.deepcopy(zapi.dVDU_INFO)
self.set_zbx_info(node)
self.hostinfo[node]['mgmt_ip'] = \
self.kwargs['vdus'][node]['mgmt_ip']
self.hostinfo[node]['parameters'] = \
self.kwargs['vdus'][node]['parameters']
self.hostinfo[node]['vdu_id'] = self.vnf['id']
def add_to_appmonitor(self, vnf, kwargs):
self.__init__()
self.kwargs = kwargs
self.vnf = vnf
self.set_vdu_info()
self.tenant_id = self.vnf['vnfd']['tenant_id']
self.add_host_to_zabbix()
def monitor_call(self, vnf, kwargs):
pass
def monitor_app_driver(self, plugin, context, vnf, service_instance):
return self.get_name()
| 42.630695 | 79 | 0.504922 |
import json
import requests
import time
import copy
from oslo_log import log as logging
from tacker.vnfm.monitor_drivers import abstract_driver
from tacker.vnfm.monitor_drivers.zabbix import zabbix_api as zapi
LOG = logging.getLogger(__name__)
class VNFMonitorZabbix(abstract_driver.VNFMonitorAbstractDriver):
params = ['application', 'OS']
def __init__(self):
self.kwargs = None
self.vnf = None
self.vduname = []
self.URL = None
self.hostinfo = {}
self.tenant_id = None
def get_type(self):
plugin_type = 'zabbix'
return plugin_type
def get_name(self):
plugin_name = 'zabbix'
return plugin_name
def get_description(self):
plugin_descript = 'Tacker VNFMonitor Zabbix Driver'
return plugin_descript
def monitor_get_config(self, plugin, context, vnf):
return {}
def monitor_url(self, plugin, context, vnf):
pass
def send_post(self, query):
response = requests.post(self.URL, headers=zapi.HEADERS,
data=json.dumps(query))
return dict(response.json())
@staticmethod
def check_error(response):
try:
if 'result' not in response:
raise ValueError
except ValueError:
LOG.error('Cannot request error : %s', response['error']['data'])
def create_graph(self, itemid, name, nodename):
temp_graph_api = copy.deepcopy(zapi.dGRAPH_CREATE_API)
gitems = [{'itemid': itemid, 'color': '00AA00'}]
temp_graph_api['auth'] = \
self.hostinfo[nodename]['zbx_info']['zabbix_token']
temp_graph_api['params']['gitems'] = gitems
temp_graph_api['params']['name'] = name
response = self.send_post(temp_graph_api)
VNFMonitorZabbix.check_error(response)
def create_action(self):
for vdu in self.vduname:
temp_action_api = copy.deepcopy(zapi.dACTION_CREATE_API)
temp_action_api['auth'] = \
self.hostinfo[vdu]['zbx_info']['zabbix_token']
tempname_api = temp_action_api['params']['operations'][0]
temp_filter = temp_action_api['params']['filter']
for info in (self.hostinfo[vdu]['actioninfo']):
tempname_api['opcommand_hst'][0]['hostid'] = \
self.hostinfo[vdu]['hostid']
now = time.localtime()
rtime = str(now.tm_hour) + str(now.tm_min) + str(now.tm_sec)
temp_name = "Trigger Action " + \
str(
vdu + rtime + " " +
info['item'] + " " + info['action']
)
temp_action_api['params']['name'] = temp_name
if (info['action'] == 'cmd') and \
(info['item'] != 'os_agent_info'):
tempname_api['opcommand']['command'] = info['cmd-action']
elif (info['item'] == 'os_agent_info') \
and (info['action'] == 'cmd'):
tempname_api['opcommand']['authtype'] = 0
tempname_api['opcommand']['username'] = \
self.hostinfo[vdu]['appinfo']['ssh_username']
tempname_api['opcommand']['password'] = \
self.hostinfo[vdu]['appinfo']['ssh_password']
tempname_api['opcommand']['type'] = 2
tempname_api['opcommand']['command'] = info['cmd-action']
tempname_api['opcommand']['port'] = 22
temp_filter['conditions'][0]['value'] = info['trigger_id']
response = self.send_post(temp_action_api)
VNFMonitorZabbix.check_error(response)
continue
temp_filter['conditions'][0]['value'] = info['trigger_id']
response = self.send_post(temp_action_api)
VNFMonitorZabbix.check_error(response)
def create_vdu_host(self):
for vdu in self.vduname:
temp_host_api = zapi.dHOST_CREATE_API
temp_group_api = zapi.dGROUP_GET_API
temp_host_api['auth'] = \
self.hostinfo[vdu]['zbx_info']['zabbix_token']
temp_group_api['auth'] = \
self.hostinfo[vdu]['zbx_info']['zabbix_token']
response = self.send_post(temp_group_api)
gid = response['result'][0]['groupid']
temp_host_api['params']['host'] = str(vdu)
if type(self.hostinfo[vdu]['mgmt_ip']) is list:
for vduip in (self.hostinfo[vdu]['mgmt_ip']):
temp_host_api['params']['interfaces'][0]['ip'] = vduip
temp_host_api['params']['templates'][0]['templateid'] = \
self.hostinfo[vdu]['template_id'][0]
temp_host_api['params']['groups'][0]['groupid'] = gid
response = self.send_post(temp_host_api)
else:
temp_host_api['params']['interfaces'][0]['ip'] = \
self.hostinfo[vdu]['mgmt_ip']
temp_host_api['params']['templates'][0]['templateid'] = \
self.hostinfo[vdu]['template_id'][0]
temp_host_api['params']['groups'][0]['groupid'] = gid
response = self.send_post(temp_host_api)
if 'error' in response:
now = time.localtime()
rtime = str(now.tm_hour) + str(now.tm_min) + str(now.tm_sec)
temp_host_api['params']['host'] = str(vdu) + rtime
response = self.send_post(temp_host_api)
self.hostinfo[vdu]['hostid'] = response['result']['hostids'][0]
def create_trigger(self, trigger_params, vduname):
temp_trigger_api = copy.deepcopy(zapi.dTRIGGER_CREATE_API)
temp_trigger_api['auth'] = \
self.hostinfo[vduname]['zbx_info']['zabbix_token']
temp_trigger_api['params'] = trigger_params
temp_trigger_api['templateid'] = \
str(
self.hostinfo[vduname]['template_id'][0])
response = self.send_post(temp_trigger_api)
VNFMonitorZabbix.check_error(response)
return response['result']
def _create_trigger(self):
trigger_params = []
trig_act_pa = []
for vdu in self.vduname:
temp_trigger_list = copy.deepcopy(zapi.dTRIGGER_LIST)
temp_vdu_name = self.hostinfo[vdu]['appinfo']['app_name']
temp_vdu_port = self.hostinfo[vdu]['appinfo']['app_port']
for para in VNFMonitorZabbix.params:
for item in self.hostinfo[vdu]['parameters'][para].keys():
action_list = copy.deepcopy(zapi.dACTION_LIST)
temp_item = self.hostinfo[vdu]['parameters'][para][item]
if ('app_name' != item)\
and ('app_port' != item) \
and ('ssh_username' != item) \
and ('ssh_password' != item):
if 'condition' \
in temp_item.keys():
temp_con = temp_item['condition']
if len(temp_con) == 2:
temp_comparrision = temp_con[0]
temp_comparrision_value = temp_con[1]
temp_trigger_list[item][0]['expression'] += \
self.hostinfo[vdu]['template_name'] + ':'\
+ str(
zapi.dITEM_KEY_COMP[item].replace(
'*', str(temp_vdu_name))) \
+ str(
zapi.COMP_VALUE[temp_comparrision]) \
+ str(
temp_comparrision_value)
else:
temp_comparrision = temp_con[0]
if 'os_agent_info' == item:
temp_trigger_list[item][0]['expression'] += \
self.hostinfo[vdu]['template_name'] + ':' \
+ str(zapi.dITEM_KEY_COMP[item])
else:
temp_trigger_list[item][0]['expression'] += \
self.hostinfo[vdu]['template_name'] + ':' \
+ str(
zapi.dITEM_KEY_COMP[item].replace(
'*', str(temp_vdu_port))) \
+ str(
zapi.COMP_VALUE[temp_comparrision])
if 'actionname' in \
temp_item.keys():
trig_act_pa.append(temp_trigger_list[item][0])
response = self.create_trigger(trig_act_pa, vdu)
del trig_act_pa[:]
action_list['action'] = \
temp_item['actionname']
action_list['trigger_id'] = \
response['triggerids'][0]
action_list['item'] = item
if 'cmd' == \
temp_item['actionname']:
action_list['cmd-action'] = \
temp_item['cmd-action']
self.hostinfo[vdu]['actioninfo'].append(
action_list)
else:
trigger_params.append(
temp_trigger_list[item][0])
if len(trigger_params) != 0:
self.create_trigger(trigger_params, vdu)
del trigger_params[:]
def create_item(self):
for vdu in self.vduname:
temp_item_api = copy.deepcopy(zapi.dITEM_CREATE_API)
temp_item_api['auth'] = \
self.hostinfo[vdu]['zbx_info']['zabbix_token']
self.hostinfo[vdu]['appinfo'] = \
copy.deepcopy(zapi.dAPP_INFO)
temp_app = self.hostinfo[vdu]['parameters']['application']
temp_item_api['params']['hostid'] = \
self.hostinfo[vdu]['template_id'][0]
for para in VNFMonitorZabbix.params:
if 'application' == para:
for app_info in \
temp_app.keys():
self.hostinfo[vdu]['appinfo'][app_info] = \
temp_app[app_info]
for item in (self.hostinfo[vdu]['parameters'][para].keys()):
if ('app_name' != item) and ('app_port' != item) \
and ('ssh_username' != item) \
and ('ssh_password' != item):
temp_item_api['params']['name'] = \
zapi.dITEM_KEY_INFO[item]['name']
temp_item_api['params']['value_type'] = \
zapi.dITEM_KEY_INFO[item]['value_type']
if item == 'app_status':
temp = zapi.dITEM_KEY_INFO[item]['key_']
temp_item_api['params']['key_'] = temp.replace(
'*', str(
self.hostinfo[vdu]['appinfo']['app_port']))
elif item == 'app_memory':
temp = zapi.dITEM_KEY_INFO[item]['key_']
temp_item_api['params']['key_'] = temp.replace(
'*',
str(
self.hostinfo[vdu]['appinfo']['app_name']))
else:
temp_item_api['params']['key_'] = \
zapi.dITEM_KEY_INFO[item]['key_']
response = self.send_post(temp_item_api)
self.create_graph(
response['result']['itemids'][0],
temp_item_api['params']['name'], vdu)
VNFMonitorZabbix.check_error(response)
def create_template(self):
temp_template_api = copy.deepcopy(zapi.dTEMPLATE_CREATE_API)
for vdu in self.vduname:
temp_template_api['params']['host'] = "Tacker Template " + str(vdu)
temp_template_api['auth'] = \
self.hostinfo[vdu]['zbx_info']['zabbix_token']
response = self.send_post(temp_template_api)
if 'error' in response:
if "already exists." in response['error']['data']:
now = time.localtime()
rtime = str(now.tm_hour) + str(now.tm_min) + str(
now.tm_sec)
temp_template_api['params']['host'] = \
"Tacker Template " + str(vdu) + rtime
response = self.send_post(temp_template_api)
VNFMonitorZabbix.check_error(response)
self.hostinfo[vdu]['template_id'] = \
response['result']['templateids']
self.hostinfo[vdu]['template_name'] =\
temp_template_api['params']['host']
def add_host_to_zabbix(self):
self.create_template()
self.create_item()
self._create_trigger()
self.create_vdu_host()
self.create_action()
def get_token_from_zbxserver(self, node):
temp_auth_api = copy.deepcopy(zapi.dAUTH_API)
temp_auth_api['params']['user'] = \
self.hostinfo[node]['zbx_info']['zabbix_user']
temp_auth_api['params']['password'] = \
self.hostinfo[node]['zbx_info']['zabbix_pass']
zabbixip = \
self.hostinfo[node]['zbx_info']['zabbix_ip']
zabbixport = \
self.hostinfo[node]['zbx_info']['zabbix_port']
self.URL = "http://" + zabbixip + ":" + \
str(zabbixport) + zapi.URL_
response = requests.post(
self.URL,
headers=zapi.HEADERS,
data=json.dumps(temp_auth_api)
)
response_dict = dict(response.json())
VNFMonitorZabbix.check_error(response_dict)
LOG.info('Success Connect Zabbix Server')
return response_dict['result']
def set_zbx_info(self, node):
self.hostinfo[node]['zbx_info'] = \
copy.deepcopy(zapi.dZBX_INFO)
self.hostinfo[node]['zbx_info']['zabbix_user'] = \
self.kwargs['vdus'][node]['zabbix_username']
self.hostinfo[node]['zbx_info']['zabbix_pass'] = \
self.kwargs['vdus'][node]['zabbix_password']
self.hostinfo[node]['zbx_info']['zabbix_ip'] = \
self.kwargs['vdus'][node]['zabbix_server_ip']
self.hostinfo[node]['zbx_info']['zabbix_port'] = \
self.kwargs['vdus'][node]['zabbix_server_port']
self.hostinfo[node]['zbx_info']['zabbix_token'] = \
self.get_token_from_zbxserver(node)
def set_vdu_info(self):
temp_vduname = self.kwargs['vdus'].keys()
for node in temp_vduname:
if 'application' in \
self.kwargs['vdus'][node]['parameters'].keys() \
and 'OS'\
in self.kwargs['vdus'][node]['parameters'].keys():
self.vduname.append(node)
self.hostinfo[node] = copy.deepcopy(zapi.dVDU_INFO)
self.set_zbx_info(node)
self.hostinfo[node]['mgmt_ip'] = \
self.kwargs['vdus'][node]['mgmt_ip']
self.hostinfo[node]['parameters'] = \
self.kwargs['vdus'][node]['parameters']
self.hostinfo[node]['vdu_id'] = self.vnf['id']
def add_to_appmonitor(self, vnf, kwargs):
self.__init__()
self.kwargs = kwargs
self.vnf = vnf
self.set_vdu_info()
self.tenant_id = self.vnf['vnfd']['tenant_id']
self.add_host_to_zabbix()
def monitor_call(self, vnf, kwargs):
pass
def monitor_app_driver(self, plugin, context, vnf, service_instance):
return self.get_name()
| true | true |
1c3bbd283dbda4b1f40e22b50b367a2c196bb93f | 278 | py | Python | examples/advanced/convexHull.py | CommanderPho/vedo | a0784a47d1b1a92f71b81f153c12856ccf7ec242 | [
"MIT"
] | 1 | 2022-03-22T21:49:29.000Z | 2022-03-22T21:49:29.000Z | examples/advanced/convexHull.py | CommanderPho/vedo | a0784a47d1b1a92f71b81f153c12856ccf7ec242 | [
"MIT"
] | null | null | null | examples/advanced/convexHull.py | CommanderPho/vedo | a0784a47d1b1a92f71b81f153c12856ccf7ec242 | [
"MIT"
] | null | null | null | """Create the Convex Hull of a Mesh or a set of input points"""
from vedo import *
settings.defaultFont = 'Bongas'
settings.useDepthPeeling = True
spid = Mesh(dataurl+"spider.ply").c("brown")
ch = ConvexHull(spid.points()).alpha(0.2)
show(spid, ch, __doc__, axes=1).close()
| 23.166667 | 63 | 0.708633 | from vedo import *
settings.defaultFont = 'Bongas'
settings.useDepthPeeling = True
spid = Mesh(dataurl+"spider.ply").c("brown")
ch = ConvexHull(spid.points()).alpha(0.2)
show(spid, ch, __doc__, axes=1).close()
| true | true |
1c3bbf598e20b126cffd55cf727b8673e654f30c | 499 | py | Python | tests/contrib/test_uv.py | ZacheryGuan/python-pinyin | c16f4bc72ebbb4deabb31f867ec6db913a8a6327 | [
"MIT"
] | 1 | 2020-12-06T20:48:01.000Z | 2020-12-06T20:48:01.000Z | tests/contrib/test_uv.py | ZacheryGuan/python-pinyin | c16f4bc72ebbb4deabb31f867ec6db913a8a6327 | [
"MIT"
] | null | null | null | tests/contrib/test_uv.py | ZacheryGuan/python-pinyin | c16f4bc72ebbb4deabb31f867ec6db913a8a6327 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from pypinyin import lazy_pinyin
from pypinyin.contrib.uv import V2UMixin
from pypinyin.converter import DefaultConverter
from pypinyin.core import Pinyin
class MyConverter(V2UMixin, DefaultConverter):
pass
my_pinyin = Pinyin(MyConverter())
def test_v2u():
assert lazy_pinyin('战略') == ['zhan', 'lve']
assert my_pinyin.lazy_pinyin('战略') == ['zhan', 'lüe']
assert lazy_pinyin('战略', v_to_u=True) == ['zhan', 'lüe']
| 23.761905 | 60 | 0.719439 |
from __future__ import unicode_literals
from pypinyin import lazy_pinyin
from pypinyin.contrib.uv import V2UMixin
from pypinyin.converter import DefaultConverter
from pypinyin.core import Pinyin
class MyConverter(V2UMixin, DefaultConverter):
pass
my_pinyin = Pinyin(MyConverter())
def test_v2u():
assert lazy_pinyin('战略') == ['zhan', 'lve']
assert my_pinyin.lazy_pinyin('战略') == ['zhan', 'lüe']
assert lazy_pinyin('战略', v_to_u=True) == ['zhan', 'lüe']
| true | true |
1c3bbf68330d376d9e95e148de4b54a17025e527 | 57,123 | py | Python | monai/transforms/croppad/array.py | davidiommi/MONAI_0_7_0 | c288dd065ab18aaf018ea01b54f3ec515e6444dd | [
"Apache-2.0"
] | null | null | null | monai/transforms/croppad/array.py | davidiommi/MONAI_0_7_0 | c288dd065ab18aaf018ea01b54f3ec515e6444dd | [
"Apache-2.0"
] | null | null | null | monai/transforms/croppad/array.py | davidiommi/MONAI_0_7_0 | c288dd065ab18aaf018ea01b54f3ec515e6444dd | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A collection of "vanilla" transforms for crop and pad operations
https://github.com/Project-MONAI/MONAI/wiki/MONAI_Design
"""
from itertools import chain
from math import ceil
from typing import Any, Callable, List, Optional, Sequence, Tuple, Union
import numpy as np
import torch
from torch.nn.functional import pad as pad_pt
from monai.config import IndexSelection
from monai.config.type_definitions import NdarrayOrTensor
from monai.data.utils import get_random_patch, get_valid_patch_size
from monai.transforms.transform import Randomizable, Transform
from monai.transforms.utils import (
compute_divisible_spatial_size,
convert_pad_mode,
generate_label_classes_crop_centers,
generate_pos_neg_label_crop_centers,
generate_spatial_bounding_box,
is_positive,
map_binary_to_indices,
map_classes_to_indices,
weighted_patch_samples,
)
from monai.transforms.utils_pytorch_numpy_unification import floor_divide, maximum
from monai.utils import (
Method,
NumpyPadMode,
PytorchPadMode,
ensure_tuple,
ensure_tuple_rep,
fall_back_tuple,
look_up_option,
)
from monai.utils.enums import TransformBackends
from monai.utils.type_conversion import convert_data_type
__all__ = [
"SpatialPad",
"BorderPad",
"DivisiblePad",
"SpatialCrop",
"CenterSpatialCrop",
"CenterScaleCrop",
"RandSpatialCrop",
"RandScaleCrop",
"RandSpatialCropSamples",
"CropForeground",
"RandWeightedCrop",
"RandCropByPosNegLabel",
"RandCropByLabelClasses",
"ResizeWithPadOrCrop",
"BoundingRect",
]
class Pad(Transform):
"""
Perform padding for a given an amount of padding in each dimension.
If input is `torch.Tensor`, `torch.nn.functional.pad` will be used, otherwise, `np.pad` will be used.
Args:
to_pad: the amount to be padded in each dimension [(low_H, high_H), (low_W, high_W), ...].
mode: available modes for numpy array:{``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``,
``"mean"``, ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}.
One of the listed string values or a user supplied function. Defaults to ``"constant"``.
See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
kwargs: other arguments for the `np.pad` or `torch.pad` function.
note that `np.pad` treats channel dimension as the first dimension.
"""
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(
self,
to_pad: List[Tuple[int, int]],
mode: Union[NumpyPadMode, PytorchPadMode, str] = NumpyPadMode.CONSTANT,
**kwargs,
) -> None:
self.to_pad = to_pad
self.mode = mode
self.kwargs = kwargs
@staticmethod
def _np_pad(img: np.ndarray, all_pad_width, mode, **kwargs) -> np.ndarray:
return np.pad(img, all_pad_width, mode=mode, **kwargs) # type: ignore
@staticmethod
def _pt_pad(img: torch.Tensor, all_pad_width, mode, **kwargs) -> torch.Tensor:
pt_pad_width = [val for sublist in all_pad_width[1:] for val in sublist[::-1]][::-1]
# torch.pad expects `[B, C, H, W, [D]]` shape
return pad_pt(img.unsqueeze(0), pt_pad_width, mode=mode, **kwargs).squeeze(0)
def __call__(
self,
img: NdarrayOrTensor,
mode: Optional[Union[NumpyPadMode, PytorchPadMode, str]] = None,
) -> NdarrayOrTensor:
"""
Args:
img: data to be transformed, assuming `img` is channel-first and
padding doesn't apply to the channel dim.
mode: available modes for numpy array:{``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``,
``"mean"``, ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"`` or ``"circular"``}.
One of the listed string values or a user supplied function. Defaults to `self.mode`.
See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
"""
if not np.asarray(self.to_pad).any():
# all zeros, skip padding
return img
mode = convert_pad_mode(dst=img, mode=mode or self.mode).value
pad = self._pt_pad if isinstance(img, torch.Tensor) else self._np_pad
return pad(img, self.to_pad, mode, **self.kwargs) # type: ignore
class SpatialPad(Transform):
"""
Performs padding to the data, symmetric for all sides or all on one side for each dimension.
If input is `torch.Tensor` and mode is `constant`, `torch.nn.functional.pad` will be used.
Otherwise, `np.pad` will be used (input converted to `np.ndarray` if necessary).
Uses np.pad so in practice, a mode needs to be provided. See numpy.lib.arraypad.pad
for additional details.
Args:
spatial_size: the spatial size of output data after padding, if a dimension of the input
data size is bigger than the pad size, will not pad that dimension.
If its components have non-positive values, the corresponding size of input image will be used
(no padding). for example: if the spatial size of input data is [30, 30, 30] and
`spatial_size=[32, 25, -1]`, the spatial size of output data will be [32, 30, 30].
method: {``"symmetric"``, ``"end"``}
Pad image symmetrically on every side or only pad at the end sides. Defaults to ``"symmetric"``.
mode: available modes for numpy array:{``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``,
``"mean"``, ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}.
One of the listed string values or a user supplied function. Defaults to ``"constant"``.
See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
kwargs: other arguments for the `np.pad` or `torch.pad` function.
note that `np.pad` treats channel dimension as the first dimension.
"""
backend = Pad.backend
def __init__(
self,
spatial_size: Union[Sequence[int], int],
method: Union[Method, str] = Method.SYMMETRIC,
mode: Union[NumpyPadMode, PytorchPadMode, str] = NumpyPadMode.CONSTANT,
**kwargs,
) -> None:
self.spatial_size = spatial_size
self.method: Method = look_up_option(method, Method)
self.mode = mode
self.kwargs = kwargs
def _determine_data_pad_width(self, data_shape: Sequence[int]) -> List[Tuple[int, int]]:
spatial_size = fall_back_tuple(self.spatial_size, data_shape)
if self.method == Method.SYMMETRIC:
pad_width = []
for i, sp_i in enumerate(spatial_size):
width = max(sp_i - data_shape[i], 0)
pad_width.append((width // 2, width - (width // 2)))
return pad_width
return [(0, max(sp_i - data_shape[i], 0)) for i, sp_i in enumerate(spatial_size)]
def __call__(
self,
img: NdarrayOrTensor,
mode: Optional[Union[NumpyPadMode, PytorchPadMode, str]] = None,
) -> NdarrayOrTensor:
"""
Args:
img: data to be transformed, assuming `img` is channel-first and
padding doesn't apply to the channel dim.
mode: available modes for numpy array:{``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``,
``"mean"``, ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}.
One of the listed string values or a user supplied function. Defaults to `self.mode`.
See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
"""
data_pad_width = self._determine_data_pad_width(img.shape[1:])
all_pad_width = [(0, 0)] + data_pad_width
if not np.asarray(all_pad_width).any():
# all zeros, skip padding
return img
padder = Pad(all_pad_width, mode or self.mode, **self.kwargs)
return padder(img)
class BorderPad(Transform):
"""
Pad the input data by adding specified borders to every dimension.
Args:
spatial_border: specified size for every spatial border. Any -ve values will be set to 0. It can be 3 shapes:
- single int number, pad all the borders with the same size.
- length equals the length of image shape, pad every spatial dimension separately.
for example, image shape(CHW) is [1, 4, 4], spatial_border is [2, 1],
pad every border of H dim with 2, pad every border of W dim with 1, result shape is [1, 8, 6].
- length equals 2 x (length of image shape), pad every border of every dimension separately.
for example, image shape(CHW) is [1, 4, 4], spatial_border is [1, 2, 3, 4], pad top of H dim with 1,
pad bottom of H dim with 2, pad left of W dim with 3, pad right of W dim with 4.
the result shape is [1, 7, 11].
mode: available modes for numpy array:{``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``,
``"mean"``, ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}.
One of the listed string values or a user supplied function. Defaults to ``"constant"``.
See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
kwargs: other arguments for the `np.pad` or `torch.pad` function.
note that `np.pad` treats channel dimension as the first dimension.
"""
backend = Pad.backend
def __init__(
self,
spatial_border: Union[Sequence[int], int],
mode: Union[NumpyPadMode, PytorchPadMode, str] = NumpyPadMode.CONSTANT,
**kwargs,
) -> None:
self.spatial_border = spatial_border
self.mode = mode
self.kwargs = kwargs
def __call__(
self,
img: NdarrayOrTensor,
mode: Optional[Union[NumpyPadMode, PytorchPadMode, str]] = None,
) -> NdarrayOrTensor:
"""
Args:
img: data to be transformed, assuming `img` is channel-first and
padding doesn't apply to the channel dim.
mode: available modes for numpy array:{``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``,
``"mean"``, ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}.
One of the listed string values or a user supplied function. Defaults to `self.mode`.
See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
Raises:
ValueError: When ``self.spatial_border`` does not contain ints.
ValueError: When ``self.spatial_border`` length is not one of
[1, len(spatial_shape), 2*len(spatial_shape)].
"""
spatial_shape = img.shape[1:]
spatial_border = ensure_tuple(self.spatial_border)
if not all(isinstance(b, int) for b in spatial_border):
raise ValueError(f"self.spatial_border must contain only ints, got {spatial_border}.")
spatial_border = tuple(max(0, b) for b in spatial_border)
if len(spatial_border) == 1:
data_pad_width = [(spatial_border[0], spatial_border[0]) for _ in spatial_shape]
elif len(spatial_border) == len(spatial_shape):
data_pad_width = [(sp, sp) for sp in spatial_border[: len(spatial_shape)]]
elif len(spatial_border) == len(spatial_shape) * 2:
data_pad_width = [(spatial_border[2 * i], spatial_border[2 * i + 1]) for i in range(len(spatial_shape))]
else:
raise ValueError(
f"Unsupported spatial_border length: {len(spatial_border)}, available options are "
f"[1, len(spatial_shape)={len(spatial_shape)}, 2*len(spatial_shape)={2*len(spatial_shape)}]."
)
all_pad_width = [(0, 0)] + data_pad_width
padder = Pad(all_pad_width, mode or self.mode, **self.kwargs)
return padder(img)
class DivisiblePad(Transform):
"""
Pad the input data, so that the spatial sizes are divisible by `k`.
"""
backend = SpatialPad.backend
def __init__(
self,
k: Union[Sequence[int], int],
mode: Union[NumpyPadMode, PytorchPadMode, str] = NumpyPadMode.CONSTANT,
method: Union[Method, str] = Method.SYMMETRIC,
**kwargs,
) -> None:
"""
Args:
k: the target k for each spatial dimension.
if `k` is negative or 0, the original size is preserved.
if `k` is an int, the same `k` be applied to all the input spatial dimensions.
mode: available modes for numpy array:{``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``,
``"mean"``, ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}.
One of the listed string values or a user supplied function. Defaults to ``"constant"``.
See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
method: {``"symmetric"``, ``"end"``}
Pad image symmetrically on every side or only pad at the end sides. Defaults to ``"symmetric"``.
kwargs: other arguments for the `np.pad` or `torch.pad` function.
note that `np.pad` treats channel dimension as the first dimension.
See also :py:class:`monai.transforms.SpatialPad`
"""
self.k = k
self.mode: NumpyPadMode = NumpyPadMode(mode)
self.method: Method = Method(method)
self.kwargs = kwargs
def __call__(
self,
img: NdarrayOrTensor,
mode: Optional[Union[NumpyPadMode, PytorchPadMode, str]] = None,
) -> NdarrayOrTensor:
"""
Args:
img: data to be transformed, assuming `img` is channel-first
and padding doesn't apply to the channel dim.
mode: available modes for numpy array:{``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``,
``"mean"``, ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}.
One of the listed string values or a user supplied function. Defaults to `self.mode`.
See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
"""
new_size = compute_divisible_spatial_size(spatial_shape=img.shape[1:], k=self.k)
spatial_pad = SpatialPad(
spatial_size=new_size,
method=self.method,
mode=mode or self.mode,
**self.kwargs,
)
return spatial_pad(img)
class SpatialCrop(Transform):
"""
General purpose cropper to produce sub-volume region of interest (ROI).
If a dimension of the expected ROI size is bigger than the input image size, will not crop that dimension.
So the cropped result may be smaller than the expected ROI, and the cropped results of several images may
not have exactly the same shape.
It can support to crop ND spatial (channel-first) data.
The cropped region can be parameterised in various ways:
- a list of slices for each spatial dimension (allows for use of -ve indexing and `None`)
- a spatial center and size
- the start and end coordinates of the ROI
"""
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(
self,
roi_center: Union[Sequence[int], NdarrayOrTensor, None] = None,
roi_size: Union[Sequence[int], NdarrayOrTensor, None] = None,
roi_start: Union[Sequence[int], NdarrayOrTensor, None] = None,
roi_end: Union[Sequence[int], NdarrayOrTensor, None] = None,
roi_slices: Optional[Sequence[slice]] = None,
) -> None:
"""
Args:
roi_center: voxel coordinates for center of the crop ROI.
roi_size: size of the crop ROI, if a dimension of ROI size is bigger than image size,
will not crop that dimension of the image.
roi_start: voxel coordinates for start of the crop ROI.
roi_end: voxel coordinates for end of the crop ROI, if a coordinate is out of image,
use the end coordinate of image.
roi_slices: list of slices for each of the spatial dimensions.
"""
roi_start_torch: torch.Tensor
if roi_slices:
if not all(s.step is None or s.step == 1 for s in roi_slices):
raise ValueError("Only slice steps of 1/None are currently supported")
self.slices = list(roi_slices)
else:
if roi_center is not None and roi_size is not None:
roi_center = torch.as_tensor(roi_center, dtype=torch.int16)
roi_size = torch.as_tensor(roi_size, dtype=torch.int16, device=roi_center.device)
roi_start_torch = maximum( # type: ignore
roi_center - floor_divide(roi_size, 2),
torch.zeros_like(roi_center),
)
roi_end_torch = maximum(roi_start_torch + roi_size, roi_start_torch)
else:
if roi_start is None or roi_end is None:
raise ValueError("Please specify either roi_center, roi_size or roi_start, roi_end.")
roi_start_torch = torch.as_tensor(roi_start, dtype=torch.int16)
roi_start_torch = maximum(roi_start_torch, torch.zeros_like(roi_start_torch)) # type: ignore
roi_end_torch = maximum(torch.as_tensor(roi_end, dtype=torch.int16), roi_start_torch)
# convert to slices (accounting for 1d)
if roi_start_torch.numel() == 1:
self.slices = [slice(int(roi_start_torch.item()), int(roi_end_torch.item()))]
else:
self.slices = [slice(int(s.item()), int(e.item())) for s, e in zip(roi_start_torch, roi_end_torch)]
def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
"""
Apply the transform to `img`, assuming `img` is channel-first and
slicing doesn't apply to the channel dim.
"""
sd = min(len(self.slices), len(img.shape[1:])) # spatial dims
slices = [slice(None)] + self.slices[:sd]
return img[tuple(slices)]
class CenterSpatialCrop(Transform):
"""
Crop at the center of image with specified ROI size.
If a dimension of the expected ROI size is bigger than the input image size, will not crop that dimension.
So the cropped result may be smaller than the expected ROI, and the cropped results of several images may
not have exactly the same shape.
Args:
roi_size: the spatial size of the crop region e.g. [224,224,128]
if a dimension of ROI size is bigger than image size, will not crop that dimension of the image.
If its components have non-positive values, the corresponding size of input image will be used.
for example: if the spatial size of input data is [40, 40, 40] and `roi_size=[32, 64, -1]`,
the spatial size of output data will be [32, 40, 40].
"""
backend = SpatialCrop.backend
def __init__(self, roi_size: Union[Sequence[int], int]) -> None:
self.roi_size = roi_size
def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
"""
Apply the transform to `img`, assuming `img` is channel-first and
slicing doesn't apply to the channel dim.
"""
roi_size = fall_back_tuple(self.roi_size, img.shape[1:])
center = [i // 2 for i in img.shape[1:]]
cropper = SpatialCrop(roi_center=center, roi_size=roi_size)
return cropper(img)
class CenterScaleCrop(Transform):
"""
Crop at the center of image with specified scale of ROI size.
Args:
roi_scale: specifies the expected scale of image size to crop. e.g. [0.3, 0.4, 0.5] or a number for all dims.
If its components have non-positive values, will use `1.0` instead, which means the input image size.
"""
backend = CenterSpatialCrop.backend
def __init__(self, roi_scale: Union[Sequence[float], float]):
self.roi_scale = roi_scale
def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
img_size = img.shape[1:]
ndim = len(img_size)
roi_size = [ceil(r * s) for r, s in zip(ensure_tuple_rep(self.roi_scale, ndim), img_size)]
sp_crop = CenterSpatialCrop(roi_size=roi_size)
return sp_crop(img=img)
class RandSpatialCrop(Randomizable, Transform):
"""
Crop image with random size or specific size ROI. It can crop at a random position as center
or at the image center. And allows to set the minimum and maximum size to limit the randomly generated ROI.
Note: even `random_size=False`, if a dimension of the expected ROI size is bigger than the input image size,
will not crop that dimension. So the cropped result may be smaller than the expected ROI, and the cropped results
of several images may not have exactly the same shape.
Args:
roi_size: if `random_size` is True, it specifies the minimum crop region.
if `random_size` is False, it specifies the expected ROI size to crop. e.g. [224, 224, 128]
if a dimension of ROI size is bigger than image size, will not crop that dimension of the image.
If its components have non-positive values, the corresponding size of input image will be used.
for example: if the spatial size of input data is [40, 40, 40] and `roi_size=[32, 64, -1]`,
the spatial size of output data will be [32, 40, 40].
max_roi_size: if `random_size` is True and `roi_size` specifies the min crop region size, `max_roi_size`
can specify the max crop region size. if None, defaults to the input image size.
if its components have non-positive values, the corresponding size of input image will be used.
random_center: crop at random position as center or the image center.
random_size: crop with random size or specific size ROI.
if True, the actual size is sampled from `randint(roi_size, max_roi_size + 1)`.
"""
backend = CenterSpatialCrop.backend
def __init__(
self,
roi_size: Union[Sequence[int], int],
max_roi_size: Optional[Union[Sequence[int], int]] = None,
random_center: bool = True,
random_size: bool = True,
) -> None:
self.roi_size = roi_size
self.max_roi_size = max_roi_size
self.random_center = random_center
self.random_size = random_size
self._size: Optional[Sequence[int]] = None
self._slices: Optional[Tuple[slice, ...]] = None
def randomize(self, img_size: Sequence[int]) -> None:
self._size = fall_back_tuple(self.roi_size, img_size)
if self.random_size:
max_size = img_size if self.max_roi_size is None else fall_back_tuple(self.max_roi_size, img_size)
if any(i > j for i, j in zip(self._size, max_size)):
raise ValueError(f"min ROI size: {self._size} is bigger than max ROI size: {max_size}.")
self._size = tuple(self.R.randint(low=self._size[i], high=max_size[i] + 1) for i in range(len(img_size)))
if self.random_center:
valid_size = get_valid_patch_size(img_size, self._size)
self._slices = (slice(None),) + get_random_patch(img_size, valid_size, self.R)
def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
"""
Apply the transform to `img`, assuming `img` is channel-first and
slicing doesn't apply to the channel dim.
"""
self.randomize(img.shape[1:])
if self._size is None:
raise RuntimeError("self._size not specified.")
if self.random_center:
return img[self._slices]
cropper = CenterSpatialCrop(self._size)
return cropper(img)
class RandScaleCrop(RandSpatialCrop):
"""
Subclass of :py:class:`monai.transforms.RandSpatialCrop`. Crop image with
random size or specific size ROI. It can crop at a random position as
center or at the image center. And allows to set the minimum and maximum
scale of image size to limit the randomly generated ROI.
Args:
roi_scale: if `random_size` is True, it specifies the minimum crop size: `roi_scale * image spatial size`.
if `random_size` is False, it specifies the expected scale of image size to crop. e.g. [0.3, 0.4, 0.5].
If its components have non-positive values, will use `1.0` instead, which means the input image size.
max_roi_scale: if `random_size` is True and `roi_scale` specifies the min crop region size, `max_roi_scale`
can specify the max crop region size: `max_roi_scale * image spatial size`.
if None, defaults to the input image size. if its components have non-positive values,
will use `1.0` instead, which means the input image size.
random_center: crop at random position as center or the image center.
random_size: crop with random size or specified size ROI by `roi_scale * image spatial size`.
if True, the actual size is sampled from
`randint(roi_scale * image spatial size, max_roi_scale * image spatial size + 1)`.
"""
def __init__(
self,
roi_scale: Union[Sequence[float], float],
max_roi_scale: Optional[Union[Sequence[float], float]] = None,
random_center: bool = True,
random_size: bool = True,
) -> None:
super().__init__(roi_size=-1, max_roi_size=None, random_center=random_center, random_size=random_size)
self.roi_scale = roi_scale
self.max_roi_scale = max_roi_scale
def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
"""
Apply the transform to `img`, assuming `img` is channel-first and
slicing doesn't apply to the channel dim.
"""
img_size = img.shape[1:]
ndim = len(img_size)
self.roi_size = [ceil(r * s) for r, s in zip(ensure_tuple_rep(self.roi_scale, ndim), img_size)]
if self.max_roi_scale is not None:
self.max_roi_size = [ceil(r * s) for r, s in zip(ensure_tuple_rep(self.max_roi_scale, ndim), img_size)]
else:
self.max_roi_size = None
return super().__call__(img=img)
class RandSpatialCropSamples(Randomizable, Transform):
"""
Crop image with random size or specific size ROI to generate a list of N samples.
It can crop at a random position as center or at the image center. And allows to set
the minimum size to limit the randomly generated ROI.
It will return a list of cropped images.
Note: even `random_size=False`, if a dimension of the expected ROI size is bigger than the input image size,
will not crop that dimension. So the cropped result may be smaller than the expected ROI, and the cropped
results of several images may not have exactly the same shape.
Args:
roi_size: if `random_size` is True, it specifies the minimum crop region.
if `random_size` is False, it specifies the expected ROI size to crop. e.g. [224, 224, 128]
if a dimension of ROI size is bigger than image size, will not crop that dimension of the image.
If its components have non-positive values, the corresponding size of input image will be used.
for example: if the spatial size of input data is [40, 40, 40] and `roi_size=[32, 64, -1]`,
the spatial size of output data will be [32, 40, 40].
num_samples: number of samples (crop regions) to take in the returned list.
max_roi_size: if `random_size` is True and `roi_size` specifies the min crop region size, `max_roi_size`
can specify the max crop region size. if None, defaults to the input image size.
if its components have non-positive values, the corresponding size of input image will be used.
random_center: crop at random position as center or the image center.
random_size: crop with random size or specific size ROI.
The actual size is sampled from `randint(roi_size, img_size)`.
Raises:
ValueError: When ``num_samples`` is nonpositive.
"""
backend = RandScaleCrop.backend
def __init__(
self,
roi_size: Union[Sequence[int], int],
num_samples: int,
max_roi_size: Optional[Union[Sequence[int], int]] = None,
random_center: bool = True,
random_size: bool = True,
) -> None:
if num_samples < 1:
raise ValueError(f"num_samples must be positive, got {num_samples}.")
self.num_samples = num_samples
self.cropper = RandSpatialCrop(roi_size, max_roi_size, random_center, random_size)
def set_random_state(
self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None
) -> "Randomizable":
super().set_random_state(seed=seed, state=state)
self.cropper.set_random_state(state=self.R)
return self
def randomize(self, data: Optional[Any] = None) -> None:
pass
def __call__(self, img: NdarrayOrTensor) -> List[NdarrayOrTensor]:
"""
Apply the transform to `img`, assuming `img` is channel-first and
cropping doesn't change the channel dim.
"""
return [self.cropper(img) for _ in range(self.num_samples)]
class CropForeground(Transform):
"""
Crop an image using a bounding box. The bounding box is generated by selecting foreground using select_fn
at channels channel_indices. margin is added in each spatial dimension of the bounding box.
The typical usage is to help training and evaluation if the valid part is small in the whole medical image.
Users can define arbitrary function to select expected foreground from the whole image or specified channels.
And it can also add margin to every dim of the bounding box of foreground object.
For example:
.. code-block:: python
image = np.array(
[[[0, 0, 0, 0, 0],
[0, 1, 2, 1, 0],
[0, 1, 3, 2, 0],
[0, 1, 2, 1, 0],
[0, 0, 0, 0, 0]]]) # 1x5x5, single channel 5x5 image
def threshold_at_one(x):
# threshold at 1
return x > 1
cropper = CropForeground(select_fn=threshold_at_one, margin=0)
print(cropper(image))
[[[2, 1],
[3, 2],
[2, 1]]]
"""
def __init__(
self,
select_fn: Callable = is_positive,
channel_indices: Optional[IndexSelection] = None,
margin: Union[Sequence[int], int] = 0,
return_coords: bool = False,
k_divisible: Union[Sequence[int], int] = 1,
mode: Union[NumpyPadMode, str] = NumpyPadMode.CONSTANT,
**np_kwargs,
) -> None:
"""
Args:
select_fn: function to select expected foreground, default is to select values > 0.
channel_indices: if defined, select foreground only on the specified channels
of image. if None, select foreground on the whole image.
margin: add margin value to spatial dims of the bounding box, if only 1 value provided, use it for all dims.
return_coords: whether return the coordinates of spatial bounding box for foreground.
k_divisible: make each spatial dimension to be divisible by k, default to 1.
if `k_divisible` is an int, the same `k` be applied to all the input spatial dimensions.
mode: padding mode {``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``, ``"mean"``,
``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
one of the listed string values or a user supplied function. Defaults to ``"constant"``.
see also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
np_kwargs: other args for `np.pad` API, note that `np.pad` treats channel dimension as the first dimension.
more details: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
"""
self.select_fn = select_fn
self.channel_indices = ensure_tuple(channel_indices) if channel_indices is not None else None
self.margin = margin
self.return_coords = return_coords
self.k_divisible = k_divisible
self.mode: NumpyPadMode = look_up_option(mode, NumpyPadMode)
self.np_kwargs = np_kwargs
def compute_bounding_box(self, img: np.ndarray):
"""
Compute the start points and end points of bounding box to crop.
And adjust bounding box coords to be divisible by `k`.
"""
box_start, box_end = generate_spatial_bounding_box(img, self.select_fn, self.channel_indices, self.margin)
box_start_ = np.asarray(box_start, dtype=np.int16)
box_end_ = np.asarray(box_end, dtype=np.int16)
orig_spatial_size = box_end_ - box_start_
# make the spatial size divisible by `k`
spatial_size = np.asarray(compute_divisible_spatial_size(spatial_shape=orig_spatial_size, k=self.k_divisible))
# update box_start and box_end
box_start_ = box_start_ - np.floor_divide(np.asarray(spatial_size) - orig_spatial_size, 2)
box_end_ = box_start_ + spatial_size
return box_start_, box_end_
def crop_pad(
self,
img: np.ndarray,
box_start: np.ndarray,
box_end: np.ndarray,
mode: Optional[Union[NumpyPadMode, str]] = None,
):
"""
Crop and pad based on the bounding box.
"""
cropped = SpatialCrop(roi_start=box_start, roi_end=box_end)(img)
pad_to_start = np.maximum(-box_start, 0)
pad_to_end = np.maximum(box_end - np.asarray(img.shape[1:]), 0)
pad = list(chain(*zip(pad_to_start.tolist(), pad_to_end.tolist())))
return BorderPad(spatial_border=pad, mode=mode or self.mode, **self.np_kwargs)(cropped)
def __call__(self, img: np.ndarray, mode: Optional[Union[NumpyPadMode, str]] = None):
"""
Apply the transform to `img`, assuming `img` is channel-first and
slicing doesn't change the channel dim.
"""
img, *_ = convert_data_type(img, np.ndarray) # type: ignore
box_start, box_end = self.compute_bounding_box(img)
cropped = self.crop_pad(img, box_start, box_end, mode)
if self.return_coords:
return cropped, box_start, box_end
return cropped
class RandWeightedCrop(Randomizable, Transform):
"""
Samples a list of `num_samples` image patches according to the provided `weight_map`.
Args:
spatial_size: the spatial size of the image patch e.g. [224, 224, 128].
If its components have non-positive values, the corresponding size of `img` will be used.
num_samples: number of samples (image patches) to take in the returned list.
weight_map: weight map used to generate patch samples. The weights must be non-negative.
Each element denotes a sampling weight of the spatial location. 0 indicates no sampling.
It should be a single-channel array in shape, for example, `(1, spatial_dim_0, spatial_dim_1, ...)`.
"""
def __init__(
self, spatial_size: Union[Sequence[int], int], num_samples: int = 1, weight_map: Optional[np.ndarray] = None
):
self.spatial_size = ensure_tuple(spatial_size)
self.num_samples = int(num_samples)
self.weight_map = weight_map
self.centers: List[np.ndarray] = []
def randomize(self, weight_map: np.ndarray) -> None:
self.centers = weighted_patch_samples(
spatial_size=self.spatial_size, w=weight_map[0], n_samples=self.num_samples, r_state=self.R
) # using only the first channel as weight map
def __call__(self, img: np.ndarray, weight_map: Optional[np.ndarray] = None) -> List[np.ndarray]:
"""
Args:
img: input image to sample patches from. assuming `img` is a channel-first array.
weight_map: weight map used to generate patch samples. The weights must be non-negative.
Each element denotes a sampling weight of the spatial location. 0 indicates no sampling.
It should be a single-channel array in shape, for example, `(1, spatial_dim_0, spatial_dim_1, ...)`
Returns:
A list of image patches
"""
img, *_ = convert_data_type(img, np.ndarray) # type: ignore
if weight_map is None:
weight_map = self.weight_map
if weight_map is None:
raise ValueError("weight map must be provided for weighted patch sampling.")
if img.shape[1:] != weight_map.shape[1:]:
raise ValueError(f"image and weight map spatial shape mismatch: {img.shape[1:]} vs {weight_map.shape[1:]}.")
weight_map, *_ = convert_data_type(weight_map, np.ndarray) # type: ignore
self.randomize(weight_map)
_spatial_size = fall_back_tuple(self.spatial_size, weight_map.shape[1:])
results = []
for center in self.centers:
cropper = SpatialCrop(roi_center=center, roi_size=_spatial_size)
cropped: np.ndarray = cropper(img) # type: ignore
results.append(cropped)
return results
class RandCropByPosNegLabel(Randomizable, Transform):
"""
Crop random fixed sized regions with the center being a foreground or background voxel
based on the Pos Neg Ratio.
And will return a list of arrays for all the cropped images.
For example, crop two (3 x 3) arrays from (5 x 5) array with pos/neg=1::
[[[0, 0, 0, 0, 0],
[0, 1, 2, 1, 0], [[0, 1, 2], [[2, 1, 0],
[0, 1, 3, 0, 0], --> [0, 1, 3], [3, 0, 0],
[0, 0, 0, 0, 0], [0, 0, 0]] [0, 0, 0]]
[0, 0, 0, 0, 0]]]
If a dimension of the expected spatial size is bigger than the input image size,
will not crop that dimension. So the cropped result may be smaller than expected size, and the cropped
results of several images may not have exactly same shape.
Args:
spatial_size: the spatial size of the crop region e.g. [224, 224, 128].
if a dimension of ROI size is bigger than image size, will not crop that dimension of the image.
if its components have non-positive values, the corresponding size of `label` will be used.
for example: if the spatial size of input data is [40, 40, 40] and `spatial_size=[32, 64, -1]`,
the spatial size of output data will be [32, 40, 40].
label: the label image that is used for finding foreground/background, if None, must set at
`self.__call__`. Non-zero indicates foreground, zero indicates background.
pos: used with `neg` together to calculate the ratio ``pos / (pos + neg)`` for the probability
to pick a foreground voxel as a center rather than a background voxel.
neg: used with `pos` together to calculate the ratio ``pos / (pos + neg)`` for the probability
to pick a foreground voxel as a center rather than a background voxel.
num_samples: number of samples (crop regions) to take in each list.
image: optional image data to help select valid area, can be same as `img` or another image array.
if not None, use ``label == 0 & image > image_threshold`` to select the negative
sample (background) center. So the crop center will only come from the valid image areas.
image_threshold: if enabled `image`, use ``image > image_threshold`` to determine
the valid image content areas.
fg_indices: if provided pre-computed foreground indices of `label`, will ignore above `image` and
`image_threshold`, and randomly select crop centers based on them, need to provide `fg_indices`
and `bg_indices` together, expect to be 1 dim array of spatial indices after flattening.
a typical usage is to call `FgBgToIndices` transform first and cache the results.
bg_indices: if provided pre-computed background indices of `label`, will ignore above `image` and
`image_threshold`, and randomly select crop centers based on them, need to provide `fg_indices`
and `bg_indices` together, expect to be 1 dim array of spatial indices after flattening.
a typical usage is to call `FgBgToIndices` transform first and cache the results.
Raises:
ValueError: When ``pos`` or ``neg`` are negative.
ValueError: When ``pos=0`` and ``neg=0``. Incompatible values.
"""
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(
self,
spatial_size: Union[Sequence[int], int],
label: Optional[NdarrayOrTensor] = None,
pos: float = 1.0,
neg: float = 1.0,
num_samples: int = 1,
image: Optional[NdarrayOrTensor] = None,
image_threshold: float = 0.0,
fg_indices: Optional[NdarrayOrTensor] = None,
bg_indices: Optional[NdarrayOrTensor] = None,
) -> None:
self.spatial_size = ensure_tuple(spatial_size)
self.label = label
if pos < 0 or neg < 0:
raise ValueError(f"pos and neg must be nonnegative, got pos={pos} neg={neg}.")
if pos + neg == 0:
raise ValueError("Incompatible values: pos=0 and neg=0.")
self.pos_ratio = pos / (pos + neg)
self.num_samples = num_samples
self.image = image
self.image_threshold = image_threshold
self.centers: Optional[List[List[int]]] = None
self.fg_indices = fg_indices
self.bg_indices = bg_indices
def randomize(
self,
label: NdarrayOrTensor,
fg_indices: Optional[NdarrayOrTensor] = None,
bg_indices: Optional[NdarrayOrTensor] = None,
image: Optional[NdarrayOrTensor] = None,
) -> None:
self.spatial_size = fall_back_tuple(self.spatial_size, default=label.shape[1:])
if fg_indices is None or bg_indices is None:
if self.fg_indices is not None and self.bg_indices is not None:
fg_indices_ = self.fg_indices
bg_indices_ = self.bg_indices
else:
fg_indices_, bg_indices_ = map_binary_to_indices(label, image, self.image_threshold)
else:
fg_indices_ = fg_indices
bg_indices_ = bg_indices
self.centers = generate_pos_neg_label_crop_centers(
self.spatial_size, self.num_samples, self.pos_ratio, label.shape[1:], fg_indices_, bg_indices_, self.R
)
def __call__(
self,
img: NdarrayOrTensor,
label: Optional[NdarrayOrTensor] = None,
image: Optional[NdarrayOrTensor] = None,
fg_indices: Optional[NdarrayOrTensor] = None,
bg_indices: Optional[NdarrayOrTensor] = None,
) -> List[NdarrayOrTensor]:
"""
Args:
img: input data to crop samples from based on the pos/neg ratio of `label` and `image`.
Assumes `img` is a channel-first array.
label: the label image that is used for finding foreground/background, if None, use `self.label`.
image: optional image data to help select valid area, can be same as `img` or another image array.
use ``label == 0 & image > image_threshold`` to select the negative sample(background) center.
so the crop center will only exist on valid image area. if None, use `self.image`.
fg_indices: foreground indices to randomly select crop centers,
need to provide `fg_indices` and `bg_indices` together.
bg_indices: background indices to randomly select crop centers,
need to provide `fg_indices` and `bg_indices` together.
"""
if label is None:
label = self.label
if label is None:
raise ValueError("label should be provided.")
if image is None:
image = self.image
self.randomize(label, fg_indices, bg_indices, image)
results: List[NdarrayOrTensor] = []
if self.centers is not None:
for center in self.centers:
cropper = SpatialCrop(roi_center=tuple(center), roi_size=self.spatial_size)
results.append(cropper(img))
return results
class RandCropByLabelClasses(Randomizable, Transform):
"""
Crop random fixed sized regions with the center being a class based on the specified ratios of every class.
The label data can be One-Hot format array or Argmax data. And will return a list of arrays for all the
cropped images. For example, crop two (3 x 3) arrays from (5 x 5) array with `ratios=[1, 2, 3, 1]`::
image = np.array([
[[0.0, 0.3, 0.4, 0.2, 0.0],
[0.0, 0.1, 0.2, 0.1, 0.4],
[0.0, 0.3, 0.5, 0.2, 0.0],
[0.1, 0.2, 0.1, 0.1, 0.0],
[0.0, 0.1, 0.2, 0.1, 0.0]]
])
label = np.array([
[[0, 0, 0, 0, 0],
[0, 1, 2, 1, 0],
[0, 1, 3, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
])
cropper = RandCropByLabelClasses(
spatial_size=[3, 3],
ratios=[1, 2, 3, 1],
num_classes=4,
num_samples=2,
)
label_samples = cropper(img=label, label=label, image=image)
The 2 randomly cropped samples of `label` can be:
[[0, 1, 2], [[0, 0, 0],
[0, 1, 3], [1, 2, 1],
[0, 0, 0]] [1, 3, 0]]
If a dimension of the expected spatial size is bigger than the input image size,
will not crop that dimension. So the cropped result may be smaller than expected size, and the cropped
results of several images may not have exactly same shape.
Args:
spatial_size: the spatial size of the crop region e.g. [224, 224, 128].
if a dimension of ROI size is bigger than image size, will not crop that dimension of the image.
if its components have non-positive values, the corresponding size of `label` will be used.
for example: if the spatial size of input data is [40, 40, 40] and `spatial_size=[32, 64, -1]`,
the spatial size of output data will be [32, 40, 40].
ratios: specified ratios of every class in the label to generate crop centers, including background class.
if None, every class will have the same ratio to generate crop centers.
label: the label image that is used for finding every classes, if None, must set at `self.__call__`.
num_classes: number of classes for argmax label, not necessary for One-Hot label.
num_samples: number of samples (crop regions) to take in each list.
image: if image is not None, only return the indices of every class that are within the valid
region of the image (``image > image_threshold``).
image_threshold: if enabled `image`, use ``image > image_threshold`` to
determine the valid image content area and select class indices only in this area.
indices: if provided pre-computed indices of every class, will ignore above `image` and
`image_threshold`, and randomly select crop centers based on them, expect to be 1 dim array
of spatial indices after flattening. a typical usage is to call `ClassesToIndices` transform first
and cache the results for better performance.
"""
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(
self,
spatial_size: Union[Sequence[int], int],
ratios: Optional[List[Union[float, int]]] = None,
label: Optional[NdarrayOrTensor] = None,
num_classes: Optional[int] = None,
num_samples: int = 1,
image: Optional[NdarrayOrTensor] = None,
image_threshold: float = 0.0,
indices: Optional[List[NdarrayOrTensor]] = None,
) -> None:
self.spatial_size = ensure_tuple(spatial_size)
self.ratios = ratios
self.label = label
self.num_classes = num_classes
self.num_samples = num_samples
self.image = image
self.image_threshold = image_threshold
self.centers: Optional[List[List[int]]] = None
self.indices = indices
def randomize(
self,
label: NdarrayOrTensor,
indices: Optional[List[NdarrayOrTensor]] = None,
image: Optional[NdarrayOrTensor] = None,
) -> None:
self.spatial_size = fall_back_tuple(self.spatial_size, default=label.shape[1:])
indices_: Sequence[NdarrayOrTensor]
if indices is None:
if self.indices is not None:
indices_ = self.indices
else:
indices_ = map_classes_to_indices(label, self.num_classes, image, self.image_threshold)
else:
indices_ = indices
self.centers = generate_label_classes_crop_centers(
self.spatial_size, self.num_samples, label.shape[1:], indices_, self.ratios, self.R
)
def __call__(
self,
img: NdarrayOrTensor,
label: Optional[NdarrayOrTensor] = None,
image: Optional[NdarrayOrTensor] = None,
indices: Optional[List[NdarrayOrTensor]] = None,
) -> List[NdarrayOrTensor]:
"""
Args:
img: input data to crop samples from based on the ratios of every class, assumes `img` is a
channel-first array.
label: the label image that is used for finding indices of every class, if None, use `self.label`.
image: optional image data to help select valid area, can be same as `img` or another image array.
use ``image > image_threshold`` to select the centers only in valid region. if None, use `self.image`.
indices: list of indices for every class in the image, used to randomly select crop centers.
"""
if label is None:
label = self.label
if label is None:
raise ValueError("label should be provided.")
if image is None:
image = self.image
self.randomize(label, indices, image)
results: List[NdarrayOrTensor] = []
if self.centers is not None:
for center in self.centers:
cropper = SpatialCrop(roi_center=tuple(center), roi_size=self.spatial_size)
results.append(cropper(img))
return results
class ResizeWithPadOrCrop(Transform):
"""
Resize an image to a target spatial size by either centrally cropping the image or
padding it evenly with a user-specified mode.
When the dimension is smaller than the target size, do symmetric padding along that dim.
When the dimension is larger than the target size, do central cropping along that dim.
Args:
spatial_size: the spatial size of output data after padding or crop.
If has non-positive values, the corresponding size of input image will be used (no padding).
mode: {``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``, ``"mean"``,
``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
One of the listed string values or a user supplied function for padding. Defaults to ``"constant"``.
See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
method: {``"symmetric"``, ``"end"``}
Pad image symmetrically on every side or only pad at the end sides. Defaults to ``"symmetric"``.
np_kwargs: other args for `np.pad` API, note that `np.pad` treats channel dimension as the first dimension.
more details: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
"""
backend = list(set(SpatialPad.backend) & set(CenterSpatialCrop.backend))
def __init__(
self,
spatial_size: Union[Sequence[int], int],
mode: Union[NumpyPadMode, str] = NumpyPadMode.CONSTANT,
method: Union[Method, str] = Method.SYMMETRIC,
**np_kwargs,
):
self.padder = SpatialPad(spatial_size=spatial_size, method=method, mode=mode, **np_kwargs)
self.cropper = CenterSpatialCrop(roi_size=spatial_size)
def __call__(self, img: NdarrayOrTensor, mode: Optional[Union[NumpyPadMode, str]] = None) -> NdarrayOrTensor:
"""
Args:
img: data to pad or crop, assuming `img` is channel-first and
padding or cropping doesn't apply to the channel dim.
mode: {``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``, ``"mean"``,
``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
One of the listed string values or a user supplied function for padding.
If None, defaults to the ``mode`` in construction.
See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
"""
return self.padder(self.cropper(img), mode=mode) # type: ignore
class BoundingRect(Transform):
"""
Compute coordinates of axis-aligned bounding rectangles from input image `img`.
The output format of the coordinates is (shape is [channel, 2 * spatial dims]):
[[1st_spatial_dim_start, 1st_spatial_dim_end,
2nd_spatial_dim_start, 2nd_spatial_dim_end,
...,
Nth_spatial_dim_start, Nth_spatial_dim_end],
...
[1st_spatial_dim_start, 1st_spatial_dim_end,
2nd_spatial_dim_start, 2nd_spatial_dim_end,
...,
Nth_spatial_dim_start, Nth_spatial_dim_end]]
The bounding boxes edges are aligned with the input image edges.
This function returns [-1, -1, ...] if there's no positive intensity.
Args:
select_fn: function to select expected foreground, default is to select values > 0.
"""
def __init__(self, select_fn: Callable = is_positive) -> None:
self.select_fn = select_fn
def __call__(self, img: np.ndarray) -> np.ndarray:
"""
See also: :py:class:`monai.transforms.utils.generate_spatial_bounding_box`.
"""
img, *_ = convert_data_type(img, np.ndarray) # type: ignore
bbox = []
for channel in range(img.shape[0]):
start_, end_ = generate_spatial_bounding_box(img, select_fn=self.select_fn, channel_indices=channel)
bbox.append([i for k in zip(start_, end_) for i in k])
return np.stack(bbox, axis=0)
| 47.761706 | 120 | 0.633825 |
from itertools import chain
from math import ceil
from typing import Any, Callable, List, Optional, Sequence, Tuple, Union
import numpy as np
import torch
from torch.nn.functional import pad as pad_pt
from monai.config import IndexSelection
from monai.config.type_definitions import NdarrayOrTensor
from monai.data.utils import get_random_patch, get_valid_patch_size
from monai.transforms.transform import Randomizable, Transform
from monai.transforms.utils import (
compute_divisible_spatial_size,
convert_pad_mode,
generate_label_classes_crop_centers,
generate_pos_neg_label_crop_centers,
generate_spatial_bounding_box,
is_positive,
map_binary_to_indices,
map_classes_to_indices,
weighted_patch_samples,
)
from monai.transforms.utils_pytorch_numpy_unification import floor_divide, maximum
from monai.utils import (
Method,
NumpyPadMode,
PytorchPadMode,
ensure_tuple,
ensure_tuple_rep,
fall_back_tuple,
look_up_option,
)
from monai.utils.enums import TransformBackends
from monai.utils.type_conversion import convert_data_type
__all__ = [
"SpatialPad",
"BorderPad",
"DivisiblePad",
"SpatialCrop",
"CenterSpatialCrop",
"CenterScaleCrop",
"RandSpatialCrop",
"RandScaleCrop",
"RandSpatialCropSamples",
"CropForeground",
"RandWeightedCrop",
"RandCropByPosNegLabel",
"RandCropByLabelClasses",
"ResizeWithPadOrCrop",
"BoundingRect",
]
class Pad(Transform):
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(
self,
to_pad: List[Tuple[int, int]],
mode: Union[NumpyPadMode, PytorchPadMode, str] = NumpyPadMode.CONSTANT,
**kwargs,
) -> None:
self.to_pad = to_pad
self.mode = mode
self.kwargs = kwargs
@staticmethod
def _np_pad(img: np.ndarray, all_pad_width, mode, **kwargs) -> np.ndarray:
return np.pad(img, all_pad_width, mode=mode, **kwargs)
@staticmethod
def _pt_pad(img: torch.Tensor, all_pad_width, mode, **kwargs) -> torch.Tensor:
pt_pad_width = [val for sublist in all_pad_width[1:] for val in sublist[::-1]][::-1]
return pad_pt(img.unsqueeze(0), pt_pad_width, mode=mode, **kwargs).squeeze(0)
def __call__(
self,
img: NdarrayOrTensor,
mode: Optional[Union[NumpyPadMode, PytorchPadMode, str]] = None,
) -> NdarrayOrTensor:
if not np.asarray(self.to_pad).any():
return img
mode = convert_pad_mode(dst=img, mode=mode or self.mode).value
pad = self._pt_pad if isinstance(img, torch.Tensor) else self._np_pad
return pad(img, self.to_pad, mode, **self.kwargs)
class SpatialPad(Transform):
backend = Pad.backend
def __init__(
self,
spatial_size: Union[Sequence[int], int],
method: Union[Method, str] = Method.SYMMETRIC,
mode: Union[NumpyPadMode, PytorchPadMode, str] = NumpyPadMode.CONSTANT,
**kwargs,
) -> None:
self.spatial_size = spatial_size
self.method: Method = look_up_option(method, Method)
self.mode = mode
self.kwargs = kwargs
def _determine_data_pad_width(self, data_shape: Sequence[int]) -> List[Tuple[int, int]]:
spatial_size = fall_back_tuple(self.spatial_size, data_shape)
if self.method == Method.SYMMETRIC:
pad_width = []
for i, sp_i in enumerate(spatial_size):
width = max(sp_i - data_shape[i], 0)
pad_width.append((width // 2, width - (width // 2)))
return pad_width
return [(0, max(sp_i - data_shape[i], 0)) for i, sp_i in enumerate(spatial_size)]
def __call__(
self,
img: NdarrayOrTensor,
mode: Optional[Union[NumpyPadMode, PytorchPadMode, str]] = None,
) -> NdarrayOrTensor:
data_pad_width = self._determine_data_pad_width(img.shape[1:])
all_pad_width = [(0, 0)] + data_pad_width
if not np.asarray(all_pad_width).any():
return img
padder = Pad(all_pad_width, mode or self.mode, **self.kwargs)
return padder(img)
class BorderPad(Transform):
backend = Pad.backend
def __init__(
self,
spatial_border: Union[Sequence[int], int],
mode: Union[NumpyPadMode, PytorchPadMode, str] = NumpyPadMode.CONSTANT,
**kwargs,
) -> None:
self.spatial_border = spatial_border
self.mode = mode
self.kwargs = kwargs
def __call__(
self,
img: NdarrayOrTensor,
mode: Optional[Union[NumpyPadMode, PytorchPadMode, str]] = None,
) -> NdarrayOrTensor:
spatial_shape = img.shape[1:]
spatial_border = ensure_tuple(self.spatial_border)
if not all(isinstance(b, int) for b in spatial_border):
raise ValueError(f"self.spatial_border must contain only ints, got {spatial_border}.")
spatial_border = tuple(max(0, b) for b in spatial_border)
if len(spatial_border) == 1:
data_pad_width = [(spatial_border[0], spatial_border[0]) for _ in spatial_shape]
elif len(spatial_border) == len(spatial_shape):
data_pad_width = [(sp, sp) for sp in spatial_border[: len(spatial_shape)]]
elif len(spatial_border) == len(spatial_shape) * 2:
data_pad_width = [(spatial_border[2 * i], spatial_border[2 * i + 1]) for i in range(len(spatial_shape))]
else:
raise ValueError(
f"Unsupported spatial_border length: {len(spatial_border)}, available options are "
f"[1, len(spatial_shape)={len(spatial_shape)}, 2*len(spatial_shape)={2*len(spatial_shape)}]."
)
all_pad_width = [(0, 0)] + data_pad_width
padder = Pad(all_pad_width, mode or self.mode, **self.kwargs)
return padder(img)
class DivisiblePad(Transform):
backend = SpatialPad.backend
def __init__(
self,
k: Union[Sequence[int], int],
mode: Union[NumpyPadMode, PytorchPadMode, str] = NumpyPadMode.CONSTANT,
method: Union[Method, str] = Method.SYMMETRIC,
**kwargs,
) -> None:
self.k = k
self.mode: NumpyPadMode = NumpyPadMode(mode)
self.method: Method = Method(method)
self.kwargs = kwargs
def __call__(
self,
img: NdarrayOrTensor,
mode: Optional[Union[NumpyPadMode, PytorchPadMode, str]] = None,
) -> NdarrayOrTensor:
new_size = compute_divisible_spatial_size(spatial_shape=img.shape[1:], k=self.k)
spatial_pad = SpatialPad(
spatial_size=new_size,
method=self.method,
mode=mode or self.mode,
**self.kwargs,
)
return spatial_pad(img)
class SpatialCrop(Transform):
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(
self,
roi_center: Union[Sequence[int], NdarrayOrTensor, None] = None,
roi_size: Union[Sequence[int], NdarrayOrTensor, None] = None,
roi_start: Union[Sequence[int], NdarrayOrTensor, None] = None,
roi_end: Union[Sequence[int], NdarrayOrTensor, None] = None,
roi_slices: Optional[Sequence[slice]] = None,
) -> None:
roi_start_torch: torch.Tensor
if roi_slices:
if not all(s.step is None or s.step == 1 for s in roi_slices):
raise ValueError("Only slice steps of 1/None are currently supported")
self.slices = list(roi_slices)
else:
if roi_center is not None and roi_size is not None:
roi_center = torch.as_tensor(roi_center, dtype=torch.int16)
roi_size = torch.as_tensor(roi_size, dtype=torch.int16, device=roi_center.device)
roi_start_torch = maximum(
roi_center - floor_divide(roi_size, 2),
torch.zeros_like(roi_center),
)
roi_end_torch = maximum(roi_start_torch + roi_size, roi_start_torch)
else:
if roi_start is None or roi_end is None:
raise ValueError("Please specify either roi_center, roi_size or roi_start, roi_end.")
roi_start_torch = torch.as_tensor(roi_start, dtype=torch.int16)
roi_start_torch = maximum(roi_start_torch, torch.zeros_like(roi_start_torch))
roi_end_torch = maximum(torch.as_tensor(roi_end, dtype=torch.int16), roi_start_torch)
if roi_start_torch.numel() == 1:
self.slices = [slice(int(roi_start_torch.item()), int(roi_end_torch.item()))]
else:
self.slices = [slice(int(s.item()), int(e.item())) for s, e in zip(roi_start_torch, roi_end_torch)]
def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
sd = min(len(self.slices), len(img.shape[1:]))
slices = [slice(None)] + self.slices[:sd]
return img[tuple(slices)]
class CenterSpatialCrop(Transform):
backend = SpatialCrop.backend
def __init__(self, roi_size: Union[Sequence[int], int]) -> None:
self.roi_size = roi_size
def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
roi_size = fall_back_tuple(self.roi_size, img.shape[1:])
center = [i // 2 for i in img.shape[1:]]
cropper = SpatialCrop(roi_center=center, roi_size=roi_size)
return cropper(img)
class CenterScaleCrop(Transform):
backend = CenterSpatialCrop.backend
def __init__(self, roi_scale: Union[Sequence[float], float]):
self.roi_scale = roi_scale
def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
img_size = img.shape[1:]
ndim = len(img_size)
roi_size = [ceil(r * s) for r, s in zip(ensure_tuple_rep(self.roi_scale, ndim), img_size)]
sp_crop = CenterSpatialCrop(roi_size=roi_size)
return sp_crop(img=img)
class RandSpatialCrop(Randomizable, Transform):
backend = CenterSpatialCrop.backend
def __init__(
self,
roi_size: Union[Sequence[int], int],
max_roi_size: Optional[Union[Sequence[int], int]] = None,
random_center: bool = True,
random_size: bool = True,
) -> None:
self.roi_size = roi_size
self.max_roi_size = max_roi_size
self.random_center = random_center
self.random_size = random_size
self._size: Optional[Sequence[int]] = None
self._slices: Optional[Tuple[slice, ...]] = None
def randomize(self, img_size: Sequence[int]) -> None:
self._size = fall_back_tuple(self.roi_size, img_size)
if self.random_size:
max_size = img_size if self.max_roi_size is None else fall_back_tuple(self.max_roi_size, img_size)
if any(i > j for i, j in zip(self._size, max_size)):
raise ValueError(f"min ROI size: {self._size} is bigger than max ROI size: {max_size}.")
self._size = tuple(self.R.randint(low=self._size[i], high=max_size[i] + 1) for i in range(len(img_size)))
if self.random_center:
valid_size = get_valid_patch_size(img_size, self._size)
self._slices = (slice(None),) + get_random_patch(img_size, valid_size, self.R)
def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
self.randomize(img.shape[1:])
if self._size is None:
raise RuntimeError("self._size not specified.")
if self.random_center:
return img[self._slices]
cropper = CenterSpatialCrop(self._size)
return cropper(img)
class RandScaleCrop(RandSpatialCrop):
def __init__(
self,
roi_scale: Union[Sequence[float], float],
max_roi_scale: Optional[Union[Sequence[float], float]] = None,
random_center: bool = True,
random_size: bool = True,
) -> None:
super().__init__(roi_size=-1, max_roi_size=None, random_center=random_center, random_size=random_size)
self.roi_scale = roi_scale
self.max_roi_scale = max_roi_scale
def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
img_size = img.shape[1:]
ndim = len(img_size)
self.roi_size = [ceil(r * s) for r, s in zip(ensure_tuple_rep(self.roi_scale, ndim), img_size)]
if self.max_roi_scale is not None:
self.max_roi_size = [ceil(r * s) for r, s in zip(ensure_tuple_rep(self.max_roi_scale, ndim), img_size)]
else:
self.max_roi_size = None
return super().__call__(img=img)
class RandSpatialCropSamples(Randomizable, Transform):
backend = RandScaleCrop.backend
def __init__(
self,
roi_size: Union[Sequence[int], int],
num_samples: int,
max_roi_size: Optional[Union[Sequence[int], int]] = None,
random_center: bool = True,
random_size: bool = True,
) -> None:
if num_samples < 1:
raise ValueError(f"num_samples must be positive, got {num_samples}.")
self.num_samples = num_samples
self.cropper = RandSpatialCrop(roi_size, max_roi_size, random_center, random_size)
def set_random_state(
self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None
) -> "Randomizable":
super().set_random_state(seed=seed, state=state)
self.cropper.set_random_state(state=self.R)
return self
def randomize(self, data: Optional[Any] = None) -> None:
pass
def __call__(self, img: NdarrayOrTensor) -> List[NdarrayOrTensor]:
return [self.cropper(img) for _ in range(self.num_samples)]
class CropForeground(Transform):
def __init__(
self,
select_fn: Callable = is_positive,
channel_indices: Optional[IndexSelection] = None,
margin: Union[Sequence[int], int] = 0,
return_coords: bool = False,
k_divisible: Union[Sequence[int], int] = 1,
mode: Union[NumpyPadMode, str] = NumpyPadMode.CONSTANT,
**np_kwargs,
) -> None:
self.select_fn = select_fn
self.channel_indices = ensure_tuple(channel_indices) if channel_indices is not None else None
self.margin = margin
self.return_coords = return_coords
self.k_divisible = k_divisible
self.mode: NumpyPadMode = look_up_option(mode, NumpyPadMode)
self.np_kwargs = np_kwargs
def compute_bounding_box(self, img: np.ndarray):
box_start, box_end = generate_spatial_bounding_box(img, self.select_fn, self.channel_indices, self.margin)
box_start_ = np.asarray(box_start, dtype=np.int16)
box_end_ = np.asarray(box_end, dtype=np.int16)
orig_spatial_size = box_end_ - box_start_
spatial_size = np.asarray(compute_divisible_spatial_size(spatial_shape=orig_spatial_size, k=self.k_divisible))
box_start_ = box_start_ - np.floor_divide(np.asarray(spatial_size) - orig_spatial_size, 2)
box_end_ = box_start_ + spatial_size
return box_start_, box_end_
def crop_pad(
self,
img: np.ndarray,
box_start: np.ndarray,
box_end: np.ndarray,
mode: Optional[Union[NumpyPadMode, str]] = None,
):
cropped = SpatialCrop(roi_start=box_start, roi_end=box_end)(img)
pad_to_start = np.maximum(-box_start, 0)
pad_to_end = np.maximum(box_end - np.asarray(img.shape[1:]), 0)
pad = list(chain(*zip(pad_to_start.tolist(), pad_to_end.tolist())))
return BorderPad(spatial_border=pad, mode=mode or self.mode, **self.np_kwargs)(cropped)
def __call__(self, img: np.ndarray, mode: Optional[Union[NumpyPadMode, str]] = None):
img, *_ = convert_data_type(img, np.ndarray)
box_start, box_end = self.compute_bounding_box(img)
cropped = self.crop_pad(img, box_start, box_end, mode)
if self.return_coords:
return cropped, box_start, box_end
return cropped
class RandWeightedCrop(Randomizable, Transform):
def __init__(
self, spatial_size: Union[Sequence[int], int], num_samples: int = 1, weight_map: Optional[np.ndarray] = None
):
self.spatial_size = ensure_tuple(spatial_size)
self.num_samples = int(num_samples)
self.weight_map = weight_map
self.centers: List[np.ndarray] = []
def randomize(self, weight_map: np.ndarray) -> None:
self.centers = weighted_patch_samples(
spatial_size=self.spatial_size, w=weight_map[0], n_samples=self.num_samples, r_state=self.R
)
def __call__(self, img: np.ndarray, weight_map: Optional[np.ndarray] = None) -> List[np.ndarray]:
img, *_ = convert_data_type(img, np.ndarray)
if weight_map is None:
weight_map = self.weight_map
if weight_map is None:
raise ValueError("weight map must be provided for weighted patch sampling.")
if img.shape[1:] != weight_map.shape[1:]:
raise ValueError(f"image and weight map spatial shape mismatch: {img.shape[1:]} vs {weight_map.shape[1:]}.")
weight_map, *_ = convert_data_type(weight_map, np.ndarray)
self.randomize(weight_map)
_spatial_size = fall_back_tuple(self.spatial_size, weight_map.shape[1:])
results = []
for center in self.centers:
cropper = SpatialCrop(roi_center=center, roi_size=_spatial_size)
cropped: np.ndarray = cropper(img)
results.append(cropped)
return results
class RandCropByPosNegLabel(Randomizable, Transform):
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(
self,
spatial_size: Union[Sequence[int], int],
label: Optional[NdarrayOrTensor] = None,
pos: float = 1.0,
neg: float = 1.0,
num_samples: int = 1,
image: Optional[NdarrayOrTensor] = None,
image_threshold: float = 0.0,
fg_indices: Optional[NdarrayOrTensor] = None,
bg_indices: Optional[NdarrayOrTensor] = None,
) -> None:
self.spatial_size = ensure_tuple(spatial_size)
self.label = label
if pos < 0 or neg < 0:
raise ValueError(f"pos and neg must be nonnegative, got pos={pos} neg={neg}.")
if pos + neg == 0:
raise ValueError("Incompatible values: pos=0 and neg=0.")
self.pos_ratio = pos / (pos + neg)
self.num_samples = num_samples
self.image = image
self.image_threshold = image_threshold
self.centers: Optional[List[List[int]]] = None
self.fg_indices = fg_indices
self.bg_indices = bg_indices
def randomize(
self,
label: NdarrayOrTensor,
fg_indices: Optional[NdarrayOrTensor] = None,
bg_indices: Optional[NdarrayOrTensor] = None,
image: Optional[NdarrayOrTensor] = None,
) -> None:
self.spatial_size = fall_back_tuple(self.spatial_size, default=label.shape[1:])
if fg_indices is None or bg_indices is None:
if self.fg_indices is not None and self.bg_indices is not None:
fg_indices_ = self.fg_indices
bg_indices_ = self.bg_indices
else:
fg_indices_, bg_indices_ = map_binary_to_indices(label, image, self.image_threshold)
else:
fg_indices_ = fg_indices
bg_indices_ = bg_indices
self.centers = generate_pos_neg_label_crop_centers(
self.spatial_size, self.num_samples, self.pos_ratio, label.shape[1:], fg_indices_, bg_indices_, self.R
)
def __call__(
self,
img: NdarrayOrTensor,
label: Optional[NdarrayOrTensor] = None,
image: Optional[NdarrayOrTensor] = None,
fg_indices: Optional[NdarrayOrTensor] = None,
bg_indices: Optional[NdarrayOrTensor] = None,
) -> List[NdarrayOrTensor]:
if label is None:
label = self.label
if label is None:
raise ValueError("label should be provided.")
if image is None:
image = self.image
self.randomize(label, fg_indices, bg_indices, image)
results: List[NdarrayOrTensor] = []
if self.centers is not None:
for center in self.centers:
cropper = SpatialCrop(roi_center=tuple(center), roi_size=self.spatial_size)
results.append(cropper(img))
return results
class RandCropByLabelClasses(Randomizable, Transform):
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(
self,
spatial_size: Union[Sequence[int], int],
ratios: Optional[List[Union[float, int]]] = None,
label: Optional[NdarrayOrTensor] = None,
num_classes: Optional[int] = None,
num_samples: int = 1,
image: Optional[NdarrayOrTensor] = None,
image_threshold: float = 0.0,
indices: Optional[List[NdarrayOrTensor]] = None,
) -> None:
self.spatial_size = ensure_tuple(spatial_size)
self.ratios = ratios
self.label = label
self.num_classes = num_classes
self.num_samples = num_samples
self.image = image
self.image_threshold = image_threshold
self.centers: Optional[List[List[int]]] = None
self.indices = indices
def randomize(
self,
label: NdarrayOrTensor,
indices: Optional[List[NdarrayOrTensor]] = None,
image: Optional[NdarrayOrTensor] = None,
) -> None:
self.spatial_size = fall_back_tuple(self.spatial_size, default=label.shape[1:])
indices_: Sequence[NdarrayOrTensor]
if indices is None:
if self.indices is not None:
indices_ = self.indices
else:
indices_ = map_classes_to_indices(label, self.num_classes, image, self.image_threshold)
else:
indices_ = indices
self.centers = generate_label_classes_crop_centers(
self.spatial_size, self.num_samples, label.shape[1:], indices_, self.ratios, self.R
)
def __call__(
self,
img: NdarrayOrTensor,
label: Optional[NdarrayOrTensor] = None,
image: Optional[NdarrayOrTensor] = None,
indices: Optional[List[NdarrayOrTensor]] = None,
) -> List[NdarrayOrTensor]:
if label is None:
label = self.label
if label is None:
raise ValueError("label should be provided.")
if image is None:
image = self.image
self.randomize(label, indices, image)
results: List[NdarrayOrTensor] = []
if self.centers is not None:
for center in self.centers:
cropper = SpatialCrop(roi_center=tuple(center), roi_size=self.spatial_size)
results.append(cropper(img))
return results
class ResizeWithPadOrCrop(Transform):
backend = list(set(SpatialPad.backend) & set(CenterSpatialCrop.backend))
def __init__(
self,
spatial_size: Union[Sequence[int], int],
mode: Union[NumpyPadMode, str] = NumpyPadMode.CONSTANT,
method: Union[Method, str] = Method.SYMMETRIC,
**np_kwargs,
):
self.padder = SpatialPad(spatial_size=spatial_size, method=method, mode=mode, **np_kwargs)
self.cropper = CenterSpatialCrop(roi_size=spatial_size)
def __call__(self, img: NdarrayOrTensor, mode: Optional[Union[NumpyPadMode, str]] = None) -> NdarrayOrTensor:
return self.padder(self.cropper(img), mode=mode)
class BoundingRect(Transform):
def __init__(self, select_fn: Callable = is_positive) -> None:
self.select_fn = select_fn
def __call__(self, img: np.ndarray) -> np.ndarray:
img, *_ = convert_data_type(img, np.ndarray)
bbox = []
for channel in range(img.shape[0]):
start_, end_ = generate_spatial_bounding_box(img, select_fn=self.select_fn, channel_indices=channel)
bbox.append([i for k in zip(start_, end_) for i in k])
return np.stack(bbox, axis=0)
| true | true |
1c3bc0ca4ddaa841443813539b0429a144605411 | 320 | py | Python | audit_tools/users/apps.py | diegoauyon/audit_tools | c92e24b038a240222e275041585112c55a388587 | [
"MIT"
] | null | null | null | audit_tools/users/apps.py | diegoauyon/audit_tools | c92e24b038a240222e275041585112c55a388587 | [
"MIT"
] | null | null | null | audit_tools/users/apps.py | diegoauyon/audit_tools | c92e24b038a240222e275041585112c55a388587 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class UsersConfig(AppConfig):
name = "audit_tools.users"
verbose_name = _("Users")
def ready(self):
try:
import audit_tools.users.signals # noqa F401
except ImportError:
pass
| 22.857143 | 57 | 0.6625 | from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class UsersConfig(AppConfig):
name = "audit_tools.users"
verbose_name = _("Users")
def ready(self):
try:
import audit_tools.users.signals
except ImportError:
pass
| true | true |
1c3bc0eb348601d75e6d8e8eee9dd1263abeeef7 | 2,553 | py | Python | tests/functional/test_help.py | kpinc/pip | 9ab837e30dc11be72a4cbbf103e6b4cdfbb3cbb0 | [
"MIT"
] | 1 | 2018-07-24T14:56:11.000Z | 2018-07-24T14:56:11.000Z | tests/functional/test_help.py | kpinc/pip | 9ab837e30dc11be72a4cbbf103e6b4cdfbb3cbb0 | [
"MIT"
] | null | null | null | tests/functional/test_help.py | kpinc/pip | 9ab837e30dc11be72a4cbbf103e6b4cdfbb3cbb0 | [
"MIT"
] | 1 | 2019-06-28T05:23:31.000Z | 2019-06-28T05:23:31.000Z | import pytest
from mock import Mock
from pip._internal.basecommand import ERROR, SUCCESS
from pip._internal.commands import commands_dict as commands
from pip._internal.commands.help import HelpCommand
from pip._internal.exceptions import CommandError
def test_run_method_should_return_success_when_finds_command_name():
"""
Test HelpCommand.run for existing command
"""
options_mock = Mock()
args = ('freeze',)
help_cmd = HelpCommand()
status = help_cmd.run(options_mock, args)
assert status == SUCCESS
def test_run_method_should_return_success_when_command_name_not_specified():
"""
Test HelpCommand.run when there are no args
"""
options_mock = Mock()
args = ()
help_cmd = HelpCommand()
status = help_cmd.run(options_mock, args)
assert status == SUCCESS
def test_run_method_should_raise_command_error_when_command_does_not_exist():
"""
Test HelpCommand.run for non-existing command
"""
options_mock = Mock()
args = ('mycommand',)
help_cmd = HelpCommand()
with pytest.raises(CommandError):
help_cmd.run(options_mock, args)
def test_help_command_should_exit_status_ok_when_command_exists(script):
"""
Test `help` command for existing command
"""
result = script.pip('help', 'freeze')
assert result.returncode == SUCCESS
def test_help_command_should_exit_status_ok_when_no_cmd_is_specified(script):
"""
Test `help` command for no command
"""
result = script.pip('help')
assert result.returncode == SUCCESS
def test_help_command_should_exit_status_error_when_cmd_does_not_exist(script):
"""
Test `help` command for non-existing command
"""
result = script.pip('help', 'mycommand', expect_error=True)
assert result.returncode == ERROR
def test_help_commands_equally_functional(in_memory_pip):
"""
Test if `pip help` and 'pip --help' behave the same way.
"""
results = list(map(in_memory_pip.pip, ('help', '--help')))
results.append(in_memory_pip.pip())
out = map(lambda x: x.stdout, results)
ret = map(lambda x: x.returncode, results)
msg = '"pip --help" != "pip help" != "pip"'
assert len(set(out)) == 1, 'output of: ' + msg
assert sum(ret) == 0, 'exit codes of: ' + msg
assert all(len(o) > 0 for o in out)
for name, cls in commands.items():
if cls.hidden:
continue
assert (
in_memory_pip.pip('help', name).stdout ==
in_memory_pip.pip(name, '--help').stdout != ""
)
| 28.054945 | 79 | 0.680376 | import pytest
from mock import Mock
from pip._internal.basecommand import ERROR, SUCCESS
from pip._internal.commands import commands_dict as commands
from pip._internal.commands.help import HelpCommand
from pip._internal.exceptions import CommandError
def test_run_method_should_return_success_when_finds_command_name():
options_mock = Mock()
args = ('freeze',)
help_cmd = HelpCommand()
status = help_cmd.run(options_mock, args)
assert status == SUCCESS
def test_run_method_should_return_success_when_command_name_not_specified():
options_mock = Mock()
args = ()
help_cmd = HelpCommand()
status = help_cmd.run(options_mock, args)
assert status == SUCCESS
def test_run_method_should_raise_command_error_when_command_does_not_exist():
options_mock = Mock()
args = ('mycommand',)
help_cmd = HelpCommand()
with pytest.raises(CommandError):
help_cmd.run(options_mock, args)
def test_help_command_should_exit_status_ok_when_command_exists(script):
result = script.pip('help', 'freeze')
assert result.returncode == SUCCESS
def test_help_command_should_exit_status_ok_when_no_cmd_is_specified(script):
result = script.pip('help')
assert result.returncode == SUCCESS
def test_help_command_should_exit_status_error_when_cmd_does_not_exist(script):
result = script.pip('help', 'mycommand', expect_error=True)
assert result.returncode == ERROR
def test_help_commands_equally_functional(in_memory_pip):
results = list(map(in_memory_pip.pip, ('help', '--help')))
results.append(in_memory_pip.pip())
out = map(lambda x: x.stdout, results)
ret = map(lambda x: x.returncode, results)
msg = '"pip --help" != "pip help" != "pip"'
assert len(set(out)) == 1, 'output of: ' + msg
assert sum(ret) == 0, 'exit codes of: ' + msg
assert all(len(o) > 0 for o in out)
for name, cls in commands.items():
if cls.hidden:
continue
assert (
in_memory_pip.pip('help', name).stdout ==
in_memory_pip.pip(name, '--help').stdout != ""
)
| true | true |
1c3bc2d48eb7393dec3dbdb31c36e7909183d803 | 1,644 | py | Python | helpers/decorators.py | mfmviip/Arabic | 6687885cc795817677f892939860e4f196d11433 | [
"Apache-2.0"
] | null | null | null | helpers/decorators.py | mfmviip/Arabic | 6687885cc795817677f892939860e4f196d11433 | [
"Apache-2.0"
] | null | null | null | helpers/decorators.py | mfmviip/Arabic | 6687885cc795817677f892939860e4f196d11433 | [
"Apache-2.0"
] | null | null | null | # Calls Music 1 - Telegram bot for streaming audio in group calls
# Copyright (C) 2021 MFMVIP
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from typing import Callable
from pyrogram import Client
from pyrogram.types import Message
from helpers.admins import get_administrators
from config import SUDO_USERS
def errors(func: Callable) -> Callable:
async def decorator(client: Client, message: Message):
try:
return await func(client, message)
except Exception as e:
await message.reply(f"{type(e).__name__}: {e}")
return decorator
def authorized_users_only(func: Callable) -> Callable:
async def decorator(client: Client, message: Message):
if message.from_user.id in SUDO_USERS:
return await func(client, message)
administrators = await get_administrators(message.chat)
for administrator in administrators:
if administrator == message.from_user.id:
return await func(client, message)
return decorator
| 33.55102 | 74 | 0.725061 |
from typing import Callable
from pyrogram import Client
from pyrogram.types import Message
from helpers.admins import get_administrators
from config import SUDO_USERS
def errors(func: Callable) -> Callable:
async def decorator(client: Client, message: Message):
try:
return await func(client, message)
except Exception as e:
await message.reply(f"{type(e).__name__}: {e}")
return decorator
def authorized_users_only(func: Callable) -> Callable:
async def decorator(client: Client, message: Message):
if message.from_user.id in SUDO_USERS:
return await func(client, message)
administrators = await get_administrators(message.chat)
for administrator in administrators:
if administrator == message.from_user.id:
return await func(client, message)
return decorator
| true | true |
1c3bc40597cbe01e2e381244860e4ccd246e1bc1 | 7,468 | py | Python | hyou/view.py | endast/hyou | e95455fde1c56707529283b17a333a2b8596edae | [
"Apache-2.0"
] | 1 | 2021-07-06T23:38:10.000Z | 2021-07-06T23:38:10.000Z | hyou/view.py | endast/hyou | e95455fde1c56707529283b17a333a2b8596edae | [
"Apache-2.0"
] | null | null | null | hyou/view.py | endast/hyou | e95455fde1c56707529283b17a333a2b8596edae | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Google Inc. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import (
absolute_import, division, print_function, unicode_literals)
import six
from . import py3
from . import util
class View(util.CustomMutableFixedList):
def __init__(self, worksheet, api, start_row, end_row, start_col, end_col):
self._worksheet = worksheet
self._api = api
self._start_row = start_row
self._end_row = end_row
self._start_col = start_col
self._end_col = end_col
self._view_rows = [
ViewRow(self, row, start_col, end_col)
for row in py3.range(start_row, end_row)]
self._input_value_map = {}
self._cells_fetched = False
self._queued_updates = []
def refresh(self):
self._input_value_map.clear()
self._cells_fetched = False
del self._queued_updates[:]
def _ensure_cells_fetched(self):
if self._cells_fetched:
return
range_str = util.format_range_a1_notation(
self._worksheet.title, self._start_row, self._end_row,
self._start_col, self._end_col)
response = self._api.sheets.spreadsheets().values().get(
spreadsheetId=self._worksheet._spreadsheet.key,
range=py3.str_to_native_str(range_str),
majorDimension='ROWS',
valueRenderOption='FORMATTED_VALUE',
dateTimeRenderOption='FORMATTED_STRING').execute()
self._input_value_map = {}
for i, row in enumerate(response.get('values', [])):
index_row = self._start_row + i
for j, value in enumerate(row):
index_col = self._start_col + j
self._input_value_map.setdefault((index_row, index_col), value)
self._cells_fetched = True
def commit(self):
if not self._queued_updates:
return
request = {
'data': [
{
'range': util.format_range_a1_notation(
self._worksheet.title, row, row + 1, col, col + 1),
'majorDimension': 'ROWS',
'values': [[value]],
}
for row, col, value in self._queued_updates
],
'valueInputOption': 'USER_ENTERED',
'includeValuesInResponse': False,
}
self._api.sheets.spreadsheets().values().batchUpdate(
spreadsheetId=self._worksheet._spreadsheet.key,
body=request).execute()
del self._queued_updates[:]
def __getitem__(self, index):
return self._view_rows[index]
def __setitem__(self, index, new_value):
if isinstance(index, slice):
start, stop, step = index.indices(len(self))
assert step == 1, 'slicing with step is not supported'
if stop < start:
stop = start
if len(new_value) != stop - start:
raise ValueError(
'Tried to assign %d values to %d element slice' %
(len(new_value), stop - start))
for i, new_value_one in py3.zip(py3.range(start, stop), new_value):
self[i] = new_value_one
return
self._view_rows[index][:] = new_value
def __len__(self):
return self.rows
def __iter__(self):
return iter(self._view_rows)
def __repr__(self):
return str('View(%r)') % (self._view_rows,)
@property
def rows(self):
return self._end_row - self._start_row
@property
def cols(self):
return self._end_col - self._start_col
@property
def start_row(self):
return self._start_row
@property
def end_row(self):
return self._end_row
@property
def start_col(self):
return self._start_col
@property
def end_col(self):
return self._end_col
class ViewRow(util.CustomMutableFixedList):
def __init__(self, view, row, start_col, end_col):
self._view = view
self._row = row
self._start_col = start_col
self._end_col = end_col
def __getitem__(self, index):
if isinstance(index, slice):
start, stop, step = index.indices(len(self))
assert step == 1, 'slicing with step is not supported'
if stop < start:
stop = start
return ViewRow(
self._view, self._row,
self._start_col + start, self._start_col + stop)
assert isinstance(index, six.integer_types)
if index < 0:
col = self._end_col + index
else:
col = self._start_col + index
if not (self._start_col <= col < self._end_col):
raise IndexError('Column %d is out of range.' % col)
if (self._row, col) not in self._view._input_value_map:
self._view._ensure_cells_fetched()
return self._view._input_value_map.get((self._row, col), '')
def __setitem__(self, index, new_value):
if isinstance(index, slice):
start, stop, step = index.indices(len(self))
assert step == 1, 'slicing with step is not supported'
if stop < start:
stop = start
if len(new_value) != stop - start:
raise ValueError(
'Tried to assign %d values to %d element slice' %
(len(new_value), stop - start))
for i, new_value_one in py3.zip(py3.range(start, stop), new_value):
self[i] = new_value_one
return
assert isinstance(index, six.integer_types)
if index < 0:
col = self._end_col + index
else:
col = self._start_col + index
if not (self._start_col <= col < self._end_col):
raise IndexError('Column %d is out of range.' % col)
if new_value is None:
new_value = ''
elif isinstance(new_value, six.integer_types):
new_value = '%d' % new_value
elif isinstance(new_value, float):
# Do best not to lose precision...
new_value = '%.20e' % new_value
elif isinstance(new_value, py3.bytes):
# May raise UnicodeDecodeError.
new_value = new_value.decode('ascii')
elif not isinstance(new_value, py3.str):
new_value = py3.str(new_value)
assert isinstance(new_value, py3.str)
self._view._input_value_map[(self._row, col)] = new_value
self._view._queued_updates.append((self._row, col, new_value))
def __len__(self):
return self._end_col - self._start_col
def __iter__(self):
self._view._ensure_cells_fetched()
for col in py3.range(self._start_col, self._end_col):
yield self._view._input_value_map.get((self._row, col), '')
def __repr__(self):
return repr(list(self))
| 35.226415 | 79 | 0.595474 |
from __future__ import (
absolute_import, division, print_function, unicode_literals)
import six
from . import py3
from . import util
class View(util.CustomMutableFixedList):
def __init__(self, worksheet, api, start_row, end_row, start_col, end_col):
self._worksheet = worksheet
self._api = api
self._start_row = start_row
self._end_row = end_row
self._start_col = start_col
self._end_col = end_col
self._view_rows = [
ViewRow(self, row, start_col, end_col)
for row in py3.range(start_row, end_row)]
self._input_value_map = {}
self._cells_fetched = False
self._queued_updates = []
def refresh(self):
self._input_value_map.clear()
self._cells_fetched = False
del self._queued_updates[:]
def _ensure_cells_fetched(self):
if self._cells_fetched:
return
range_str = util.format_range_a1_notation(
self._worksheet.title, self._start_row, self._end_row,
self._start_col, self._end_col)
response = self._api.sheets.spreadsheets().values().get(
spreadsheetId=self._worksheet._spreadsheet.key,
range=py3.str_to_native_str(range_str),
majorDimension='ROWS',
valueRenderOption='FORMATTED_VALUE',
dateTimeRenderOption='FORMATTED_STRING').execute()
self._input_value_map = {}
for i, row in enumerate(response.get('values', [])):
index_row = self._start_row + i
for j, value in enumerate(row):
index_col = self._start_col + j
self._input_value_map.setdefault((index_row, index_col), value)
self._cells_fetched = True
def commit(self):
if not self._queued_updates:
return
request = {
'data': [
{
'range': util.format_range_a1_notation(
self._worksheet.title, row, row + 1, col, col + 1),
'majorDimension': 'ROWS',
'values': [[value]],
}
for row, col, value in self._queued_updates
],
'valueInputOption': 'USER_ENTERED',
'includeValuesInResponse': False,
}
self._api.sheets.spreadsheets().values().batchUpdate(
spreadsheetId=self._worksheet._spreadsheet.key,
body=request).execute()
del self._queued_updates[:]
def __getitem__(self, index):
return self._view_rows[index]
def __setitem__(self, index, new_value):
if isinstance(index, slice):
start, stop, step = index.indices(len(self))
assert step == 1, 'slicing with step is not supported'
if stop < start:
stop = start
if len(new_value) != stop - start:
raise ValueError(
'Tried to assign %d values to %d element slice' %
(len(new_value), stop - start))
for i, new_value_one in py3.zip(py3.range(start, stop), new_value):
self[i] = new_value_one
return
self._view_rows[index][:] = new_value
def __len__(self):
return self.rows
def __iter__(self):
return iter(self._view_rows)
def __repr__(self):
return str('View(%r)') % (self._view_rows,)
@property
def rows(self):
return self._end_row - self._start_row
@property
def cols(self):
return self._end_col - self._start_col
@property
def start_row(self):
return self._start_row
@property
def end_row(self):
return self._end_row
@property
def start_col(self):
return self._start_col
@property
def end_col(self):
return self._end_col
class ViewRow(util.CustomMutableFixedList):
def __init__(self, view, row, start_col, end_col):
self._view = view
self._row = row
self._start_col = start_col
self._end_col = end_col
def __getitem__(self, index):
if isinstance(index, slice):
start, stop, step = index.indices(len(self))
assert step == 1, 'slicing with step is not supported'
if stop < start:
stop = start
return ViewRow(
self._view, self._row,
self._start_col + start, self._start_col + stop)
assert isinstance(index, six.integer_types)
if index < 0:
col = self._end_col + index
else:
col = self._start_col + index
if not (self._start_col <= col < self._end_col):
raise IndexError('Column %d is out of range.' % col)
if (self._row, col) not in self._view._input_value_map:
self._view._ensure_cells_fetched()
return self._view._input_value_map.get((self._row, col), '')
def __setitem__(self, index, new_value):
if isinstance(index, slice):
start, stop, step = index.indices(len(self))
assert step == 1, 'slicing with step is not supported'
if stop < start:
stop = start
if len(new_value) != stop - start:
raise ValueError(
'Tried to assign %d values to %d element slice' %
(len(new_value), stop - start))
for i, new_value_one in py3.zip(py3.range(start, stop), new_value):
self[i] = new_value_one
return
assert isinstance(index, six.integer_types)
if index < 0:
col = self._end_col + index
else:
col = self._start_col + index
if not (self._start_col <= col < self._end_col):
raise IndexError('Column %d is out of range.' % col)
if new_value is None:
new_value = ''
elif isinstance(new_value, six.integer_types):
new_value = '%d' % new_value
elif isinstance(new_value, float):
new_value = '%.20e' % new_value
elif isinstance(new_value, py3.bytes):
new_value = new_value.decode('ascii')
elif not isinstance(new_value, py3.str):
new_value = py3.str(new_value)
assert isinstance(new_value, py3.str)
self._view._input_value_map[(self._row, col)] = new_value
self._view._queued_updates.append((self._row, col, new_value))
def __len__(self):
return self._end_col - self._start_col
def __iter__(self):
self._view._ensure_cells_fetched()
for col in py3.range(self._start_col, self._end_col):
yield self._view._input_value_map.get((self._row, col), '')
def __repr__(self):
return repr(list(self))
| true | true |
1c3bc42d40ce6c3bf59f9f7448c64b5915065d1e | 1,245 | py | Python | tests/operators/test_zype_operator.py | Dminor7/airflow-provider-zype | 949b2665509ac12c3691ebd47e8ac3f641bd73de | [
"Apache-2.0"
] | null | null | null | tests/operators/test_zype_operator.py | Dminor7/airflow-provider-zype | 949b2665509ac12c3691ebd47e8ac3f641bd73de | [
"Apache-2.0"
] | null | null | null | tests/operators/test_zype_operator.py | Dminor7/airflow-provider-zype | 949b2665509ac12c3691ebd47e8ac3f641bd73de | [
"Apache-2.0"
] | null | null | null | """
Unittest module to test Operators.
Requires the unittest, pytest, and requests-mock Python libraries.
Run test:
python -m unittest tests.operators.test_zype_operator.TestZypeOperator
"""
import json
import logging
import os
import unittest
from unittest import mock
# Import Operator
from zype_provider.operators.zype_operator import ZypeOperator
log = logging.getLogger(__name__)
# Mock the `conn_zype` Airflow connection
@mock.patch.dict("os.environ", AIRFLOW_CONN_CONN_ZYPE="zype://:<YOUR_API_KEY>@")
class TestZypeOperator(unittest.TestCase):
"""
Test Zype Operator.
"""
def test_operator(self):
per_page = 30
operator = ZypeOperator(
task_id="run_operator",
zype_conn_id="conn_zype",
resource="list_videos",
max_pages=1,
request_kwargs={"params": {"per_page": per_page}},
)
# Airflow calls the operator's execute method at runtime with the task run's bespoke context dictionary
data = operator.execute(context={})
log.info("Execution Complete")
# Assert the API call returns expected mocked payload
assert len(data) == per_page
if __name__ == "__main__":
unittest.main()
| 23.055556 | 111 | 0.682731 |
import json
import logging
import os
import unittest
from unittest import mock
from zype_provider.operators.zype_operator import ZypeOperator
log = logging.getLogger(__name__)
@mock.patch.dict("os.environ", AIRFLOW_CONN_CONN_ZYPE="zype://:<YOUR_API_KEY>@")
class TestZypeOperator(unittest.TestCase):
def test_operator(self):
per_page = 30
operator = ZypeOperator(
task_id="run_operator",
zype_conn_id="conn_zype",
resource="list_videos",
max_pages=1,
request_kwargs={"params": {"per_page": per_page}},
)
data = operator.execute(context={})
log.info("Execution Complete")
assert len(data) == per_page
if __name__ == "__main__":
unittest.main()
| true | true |
1c3bc49c607f819f24310097910301e35f2df79c | 2,032 | py | Python | pykeyvi/tests/dictionary_compiler_test.py | jsdelivrbot/keyvi-1 | c56fb92da5eed655bf20e46d56f1e117b2556e3e | [
"Apache-2.0"
] | 147 | 2015-10-06T19:10:01.000Z | 2021-08-19T07:52:02.000Z | pykeyvi/tests/dictionary_compiler_test.py | jsdelivrbot/keyvi-1 | c56fb92da5eed655bf20e46d56f1e117b2556e3e | [
"Apache-2.0"
] | 148 | 2015-10-06T09:24:56.000Z | 2018-12-08T08:42:54.000Z | pykeyvi/tests/dictionary_compiler_test.py | jsdelivrbot/keyvi-1 | c56fb92da5eed655bf20e46d56f1e117b2556e3e | [
"Apache-2.0"
] | 34 | 2015-10-09T06:55:52.000Z | 2021-01-05T18:43:57.000Z | # -*- coding: utf-8 -*-
# Usage: py.test tests
import os
import pykeyvi
import shutil
import tempfile
import test_tools
from pytest import raises
def test_compiler_no_compile_edge_case():
c = pykeyvi.KeyOnlyDictionaryCompiler({"memory_limit_mb":"10"})
c.Add("abc")
c.Add("abd")
del c
def test_compiler_no_compile_edge_case_empty():
c = pykeyvi.KeyOnlyDictionaryCompiler({"memory_limit_mb":"10"})
del c
def test_compiler_empty():
c = pykeyvi.KeyOnlyDictionaryCompiler({"memory_limit_mb":"10"})
with test_tools.tmp_dictionary(c, 'empty.kv') as d:
assert len(d) == 0
def test_compiler_empty_json():
c = pykeyvi.JsonDictionaryCompiler({"memory_limit_mb":"10"})
with test_tools.tmp_dictionary(c, 'empty_json.kv') as d:
assert len(d) == 0
def test_tmp_dir():
cwd = os.getcwd()
os.chdir(tempfile.gettempdir())
try:
os.mkdir("tmp_dir_test")
os.chdir(os.path.join(tempfile.gettempdir(), "tmp_dir_test"))
c = pykeyvi.JsonDictionaryCompiler({"memory_limit_mb":"10"})
c.Add("abc", "{'a':2}")
assert os.listdir('.') == []
c.Compile()
assert os.listdir('.') == []
del c
assert os.listdir('.') == []
finally:
os.chdir(cwd)
os.rmdir(os.path.join(tempfile.gettempdir(), "tmp_dir_test"))
def test_tmp_dir_defined():
def run_compile(tmpdir):
c = pykeyvi.JsonDictionaryCompiler({"memory_limit_mb":"10", "temporary_path": tmpdir})
c.Add("abc", "{'a':2}")
c.Compile()
assert os.listdir(test_dir) != []
test_dir = os.path.join(tempfile.gettempdir(), "tmp_dir_test_defined")
try:
os.mkdir(test_dir)
run_compile(test_dir)
finally:
pykeyvi.JsonDictionaryCompiler({"memory_limit_mb":"10"})
shutil.rmtree(test_dir)
def test_compile_step_missing():
c = pykeyvi.KeyOnlyDictionaryCompiler()
c.Add("abc")
c.Add("abd")
with raises(RuntimeError):
c.WriteToFile("compile_step_missing.kv")
| 27.459459 | 94 | 0.643701 |
import os
import pykeyvi
import shutil
import tempfile
import test_tools
from pytest import raises
def test_compiler_no_compile_edge_case():
c = pykeyvi.KeyOnlyDictionaryCompiler({"memory_limit_mb":"10"})
c.Add("abc")
c.Add("abd")
del c
def test_compiler_no_compile_edge_case_empty():
c = pykeyvi.KeyOnlyDictionaryCompiler({"memory_limit_mb":"10"})
del c
def test_compiler_empty():
c = pykeyvi.KeyOnlyDictionaryCompiler({"memory_limit_mb":"10"})
with test_tools.tmp_dictionary(c, 'empty.kv') as d:
assert len(d) == 0
def test_compiler_empty_json():
c = pykeyvi.JsonDictionaryCompiler({"memory_limit_mb":"10"})
with test_tools.tmp_dictionary(c, 'empty_json.kv') as d:
assert len(d) == 0
def test_tmp_dir():
cwd = os.getcwd()
os.chdir(tempfile.gettempdir())
try:
os.mkdir("tmp_dir_test")
os.chdir(os.path.join(tempfile.gettempdir(), "tmp_dir_test"))
c = pykeyvi.JsonDictionaryCompiler({"memory_limit_mb":"10"})
c.Add("abc", "{'a':2}")
assert os.listdir('.') == []
c.Compile()
assert os.listdir('.') == []
del c
assert os.listdir('.') == []
finally:
os.chdir(cwd)
os.rmdir(os.path.join(tempfile.gettempdir(), "tmp_dir_test"))
def test_tmp_dir_defined():
def run_compile(tmpdir):
c = pykeyvi.JsonDictionaryCompiler({"memory_limit_mb":"10", "temporary_path": tmpdir})
c.Add("abc", "{'a':2}")
c.Compile()
assert os.listdir(test_dir) != []
test_dir = os.path.join(tempfile.gettempdir(), "tmp_dir_test_defined")
try:
os.mkdir(test_dir)
run_compile(test_dir)
finally:
pykeyvi.JsonDictionaryCompiler({"memory_limit_mb":"10"})
shutil.rmtree(test_dir)
def test_compile_step_missing():
c = pykeyvi.KeyOnlyDictionaryCompiler()
c.Add("abc")
c.Add("abd")
with raises(RuntimeError):
c.WriteToFile("compile_step_missing.kv")
| true | true |
1c3bc5a85717b0debece6004bd124e39021b7f9a | 1,504 | py | Python | rdmo/views/views.py | m6121/rdmo | db3990c7525138c6ce9634fc3e5b6b8ee9b915c8 | [
"Apache-2.0"
] | 77 | 2016-08-09T11:40:20.000Z | 2022-03-06T11:03:26.000Z | rdmo/views/views.py | m6121/rdmo | db3990c7525138c6ce9634fc3e5b6b8ee9b915c8 | [
"Apache-2.0"
] | 377 | 2016-07-01T13:59:36.000Z | 2022-03-30T13:53:19.000Z | rdmo/views/views.py | m6121/rdmo | db3990c7525138c6ce9634fc3e5b6b8ee9b915c8 | [
"Apache-2.0"
] | 47 | 2016-06-23T11:32:19.000Z | 2022-03-01T11:34:37.000Z | import logging
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from django.views.generic import ListView, TemplateView
from rdmo.core.exports import XMLResponse
from rdmo.core.utils import get_model_field_meta, render_to_format
from rdmo.core.views import CSRFViewMixin, ModelPermissionMixin
from .models import View
from .renderers import ViewRenderer
from .serializers.export import ViewExportSerializer
log = logging.getLogger(__name__)
class ViewsView(ModelPermissionMixin, CSRFViewMixin, TemplateView):
template_name = 'views/views.html'
permission_required = 'views.view_view'
def get_context_data(self, **kwargs):
context = super(ViewsView, self).get_context_data(**kwargs)
context['export_formats'] = settings.EXPORT_FORMATS
context['meta'] = {
'View': get_model_field_meta(View)
}
return context
class ViewsExportView(ModelPermissionMixin, ListView):
model = View
context_object_name = 'views'
permission_required = 'views.view_view'
def render_to_response(self, context, **response_kwargs):
format = self.kwargs.get('format')
if format == 'xml':
serializer = ViewExportSerializer(context['views'], many=True)
xml = ViewRenderer().render(serializer.data)
return XMLResponse(xml, name='views')
else:
return render_to_format(self.request, format, _('Views'), 'views/views_export.html', context)
| 34.181818 | 105 | 0.724734 | import logging
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from django.views.generic import ListView, TemplateView
from rdmo.core.exports import XMLResponse
from rdmo.core.utils import get_model_field_meta, render_to_format
from rdmo.core.views import CSRFViewMixin, ModelPermissionMixin
from .models import View
from .renderers import ViewRenderer
from .serializers.export import ViewExportSerializer
log = logging.getLogger(__name__)
class ViewsView(ModelPermissionMixin, CSRFViewMixin, TemplateView):
template_name = 'views/views.html'
permission_required = 'views.view_view'
def get_context_data(self, **kwargs):
context = super(ViewsView, self).get_context_data(**kwargs)
context['export_formats'] = settings.EXPORT_FORMATS
context['meta'] = {
'View': get_model_field_meta(View)
}
return context
class ViewsExportView(ModelPermissionMixin, ListView):
model = View
context_object_name = 'views'
permission_required = 'views.view_view'
def render_to_response(self, context, **response_kwargs):
format = self.kwargs.get('format')
if format == 'xml':
serializer = ViewExportSerializer(context['views'], many=True)
xml = ViewRenderer().render(serializer.data)
return XMLResponse(xml, name='views')
else:
return render_to_format(self.request, format, _('Views'), 'views/views_export.html', context)
| true | true |
1c3bc63ed82243be89e16b8f67010dc1fd4599a0 | 21,647 | py | Python | pyIndego/indego_async_client.py | MagaliDB/pyIndego | 3764eb01b5ef6a2903c7eaede578208b32e755a1 | [
"MIT"
] | null | null | null | pyIndego/indego_async_client.py | MagaliDB/pyIndego | 3764eb01b5ef6a2903c7eaede578208b32e755a1 | [
"MIT"
] | null | null | null | pyIndego/indego_async_client.py | MagaliDB/pyIndego | 3764eb01b5ef6a2903c7eaede578208b32e755a1 | [
"MIT"
] | null | null | null | """API for Bosch API server for Indego lawn mower."""
import asyncio
import logging
from socket import error as SocketError
from typing import Any
import aiohttp
from aiohttp import (
ClientOSError,
ClientResponseError,
ServerTimeoutError,
TooManyRedirects,
)
from aiohttp.helpers import BasicAuth
from aiohttp.web_exceptions import HTTPGatewayTimeout
from . import __version__
from .const import (
COMMANDS,
CONTENT_TYPE_JSON,
DEFAULT_BODY,
DEFAULT_CALENDAR,
DEFAULT_HEADER,
DEFAULT_URL,
Methods,
)
from .indego_base_client import IndegoBaseClient
from .states import Calendar
_LOGGER = logging.getLogger(__name__)
class IndegoAsyncClient(IndegoBaseClient):
"""Class for Indego Async Client."""
def __init__(
self,
username: str,
password: str,
serial: str = None,
map_filename: str = None,
api_url: str = DEFAULT_URL,
session: aiohttp.ClientSession = None,
):
"""Initialize the Async Client.
Args:
username (str): username for Indego Account
password (str): password for Indego Account
serial (str): serial number of the mower
map_filename (str, optional): Filename to store maps in. Defaults to None.
api_url (str, optional): url for the api, defaults to DEFAULT_URL.
"""
super().__init__(username, password, serial, map_filename, api_url)
if session:
self._session = session
else:
self._session = aiohttp.ClientSession(raise_for_status=False)
async def __aenter__(self):
"""Enter for async with."""
await self.start()
return self
async def __aexit__(self, exc_type, exc_value, traceback):
"""Exit for async with."""
await self.close()
async def start(self):
"""Login if not done."""
if not self._logged_in:
await self.login()
async def close(self):
"""Close the aiohttp session."""
await self._session.close()
async def delete_alert(self, alert_index: int):
"""Delete the alert with the specified index.
Args:
alert_index (int): index of alert to be deleted, should be in range or length of alerts.
"""
if not self._alerts_loaded:
raise ValueError("Alerts not loaded, please run update_alerts first.")
alert_id = self._get_alert_by_index(alert_index)
if alert_id:
return await self._request(Methods.DELETE, f"alerts/{alert_id}/")
async def delete_all_alerts(self):
"""Delete all the alert."""
if not self._alerts_loaded:
raise ValueError("Alerts not loaded, please run update_alerts first.")
if self.alerts_count > 0:
return await asyncio.gather(
*[
self._request(Methods.DELETE, f"alerts/{alert.alert_id}")
for alert in self.alerts
]
)
_LOGGER.info("No alerts to delete")
return None
async def download_map(self, filename: str = None):
"""Download the map.
Args:
filename (str, optional): Filename for the map. Defaults to None, can also be filled by the filename set in init.
"""
if not self.serial:
return
if filename:
self.map_filename = filename
if not self.map_filename:
raise ValueError("No map filename defined.")
lawn_map = await self.get(f"alms/{self.serial}/map")
if lawn_map:
with open(self.map_filename, "wb") as file:
file.write(lawn_map)
async def put_alert_read(self, alert_index: int):
"""Set the alert to read.
Args:
alert_index (int): index of alert to be deleted, should be in range or length of alerts.
"""
if not self._alerts_loaded:
raise ValueError("Alerts not loaded, please run update_alerts first.")
alert_id = self._get_alert_by_index(alert_index)
if alert_id:
return await self._request(
Methods.PUT, f"alerts/{alert_id}", data={"read_status": "read"}
)
async def put_all_alerts_read(self):
"""Set to read the read_status of all alerts."""
if not self._alerts_loaded:
raise ValueError("Alerts not loaded, please run update_alerts first.")
if self.alerts_count > 0:
return await asyncio.gather(
*[
self._request(
Methods.PUT,
f"alerts/{alert.alert_id}",
data={"read_status": "read"},
)
for alert in self.alerts
]
)
_LOGGER.info("No alerts to set to read")
return None
async def put_command(self, command: str):
"""Send a command to the mower.
Args:
command (str): command should be one of "mow", "pause", "returnToDock"
Returns:
str: either result of the call or 'Wrong Command'
"""
if command in COMMANDS:
if not self.serial:
return
return await self.put(f"alms/{self.serial}/state", {"state": command})
raise ValueError("Wrong Command, use one of 'mow', 'pause', 'returnToDock'")
async def put_mow_mode(self, command: Any):
"""Set the mower to mode manual (false-ish) or predictive (true-ish).
Args:
command (str/bool): should be str that is bool-ish (true, True, false, False) or a bool.
Returns:
str: either result of the call or 'Wrong Command'
"""
if command in ("true", "false", "True", "False") or isinstance(command, bool):
if not self.serial:
return
return await self.put(
f"alms/{self.serial}/predictive", {"enabled": command}
)
raise ValueError("Wrong Command, use one True or False")
async def put_predictive_cal(self, calendar: dict = DEFAULT_CALENDAR):
"""Set the predictive calendar."""
try:
Calendar(**calendar["cals"][0])
except TypeError as exc:
raise ValueError("Value for calendar is not valid") from exc
if not self.serial:
return
return await self.put(f"alms/{self.serial}/predictive/calendar", calendar)
async def update_alerts(self):
"""Update alerts."""
self._update_alerts(await self.get("alerts"))
async def get_alerts(self):
"""Update alerts and return them."""
await self.update_alerts()
return self.alerts
async def update_all(self):
"""Update all states."""
update_list = [
self.update_alerts(),
self.update_calendar(),
self.update_config(),
self.update_generic_data(),
self.update_last_completed_mow(),
self.update_location(),
self.update_network(),
self.update_next_mow(),
self.update_operating_data(),
self.update_predictive_calendar(),
self.update_predictive_schedule(),
self.update_security(),
self.update_setup(),
self.update_state(),
self.update_updates_available(),
self.update_user(),
]
results = await asyncio.gather(*update_list, return_exceptions=True)
for res in results:
if res:
_LOGGER.warning(res)
async def update_calendar(self):
"""Update calendar."""
if not self.serial:
return
self._update_calendar(await self.get(f"alms/{self.serial}/calendar"))
async def get_calendar(self):
"""Update calendar and return them."""
await self.update_calendar()
return self.calendar
async def update_config(self):
"""Update config."""
if not self.serial:
return
self._update_config(await self.get(f"alms/{self.serial}/config"))
async def get_config(self):
"""Update config and return it."""
await self.update_config()
return self.config
async def update_generic_data(self):
"""Update generic data."""
if not self.serial:
return
self._update_generic_data(await self.get(f"alms/{self.serial}"))
async def get_generic_data(self):
"""Update generic_data and return it."""
await self.update_generic_data()
return self.generic_data
async def update_last_completed_mow(self):
"""Update last completed mow."""
if not self.serial:
return
self._update_last_completed_mow(
await self.get(f"alms/{self.serial}/predictive/lastcutting")
)
async def get_last_completed_mow(self):
"""Update last_completed_mow and return it."""
await self.update_last_completed_mow()
return self.last_completed_mow
async def update_location(self):
"""Update location."""
if not self.serial:
return
self._update_location(await self.get(f"alms/{self.serial}/predictive/location"))
async def get_location(self):
"""Update location and return it."""
await self.update_location()
return self.location
async def update_network(self):
"""Update network."""
if not self.serial:
return
self._update_network(await self.get(f"alms/{self.serial}/network"))
async def get_network(self):
"""Update network and return it."""
await self.update_network()
return self.network
async def update_next_mow(self):
"""Update next mow datetime."""
if not self.serial:
return
self._update_next_mow(
await self.get(f"alms/{self.serial}/predictive/nextcutting")
)
async def get_next_mow(self):
"""Update next_mow and return it."""
await self.update_next_mow()
return self.next_mow
async def update_operating_data(self):
"""Update operating data."""
if not self.serial:
return
self._update_operating_data(await self.get(f"alms/{self.serial}/operatingData"))
async def get_operating_data(self):
"""Update operating_data and return it."""
await self.update_operating_data()
return self.operating_data
async def update_predictive_calendar(self):
"""Update predictive_calendar."""
if not self.serial:
return
self._update_predictive_calendar(
await self.get(f"alms/{self.serial}/predictive/calendar")
)
async def get_predictive_calendar(self):
"""Update predictive_calendar and return it."""
await self.update_predictive_calendar()
return self.predictive_calendar
async def update_predictive_schedule(self):
"""Update predictive_schedule."""
if not self.serial:
return
self._update_predictive_schedule(
await self.get(f"alms/{self.serial}/predictive/schedule")
)
async def get_predictive_schedule(self):
"""Update predictive_schedule and return it."""
await self.update_predictive_schedule()
return self.predictive_schedule
async def update_security(self):
"""Update security."""
if not self.serial:
return
self._update_security(await self.get(f"alms/{self.serial}/security"))
async def get_security(self):
"""Update security and return it."""
await self.update_security()
return self.security
async def update_setup(self):
"""Update setup."""
if not self.serial:
return
self._update_setup(await self.get(f"alms/{self.serial}/setup"))
async def get_setup(self):
"""Update setup and return it."""
await self.update_setup()
return self.setup
async def update_state(self, force=False, longpoll=False, longpoll_timeout=120):
"""Update state. Can be both forced and with longpoll.
Args:
force (bool, optional): Force the state refresh, wakes up the mower. Defaults to False.
longpoll (bool, optional): Do a longpoll. Defaults to False.
longpoll_timeout (int, optional): Timeout of the longpoll. Defaults to 120, maximum is 300.
Raises:
ValueError: when the longpoll timeout is longer then 300 seconds.
"""
if not self.serial:
return
path = f"alms/{self.serial}/state"
if longpoll:
if longpoll_timeout > 300:
raise ValueError(
"Longpoll timeout must be less than or equal 300 seconds."
)
last_state = 0
if self.state:
if self.state.state:
last_state = self.state.state
path = f"{path}?longpoll=true&timeout={longpoll_timeout}&last={last_state}"
if force:
if longpoll:
path = f"{path}&forceRefresh=true"
else:
path = f"{path}?forceRefresh=true"
self._update_state(await self.get(path, timeout=longpoll_timeout + 30))
async def get_state(self, force=False, longpoll=False, longpoll_timeout=120):
"""Update state and return it.
Args:
force (bool, optional): Force the state refresh, wakes up the mower. Defaults to False.
longpoll (bool, optional): Do a longpoll. Defaults to False.
longpoll_timeout (int, optional): Timeout of the longpoll. Defaults to 120, maximum is 300.
Raises:
ValueError: when the longpoll timeout is longer then 300 seconds.
"""
await self.update_state(force, longpoll, longpoll_timeout)
return self.state
async def update_updates_available(self):
"""Update updates available."""
if not self.serial:
return
if self._online:
self._update_updates_available(
await self.get(f"alms/{self.serial}/updates")
)
async def get_updates_available(self):
"""Update updates_available and return it."""
await self.update_updates_available()
return self.update_available
async def update_user(self):
"""Update users."""
self._update_user(await self.get(f"users/{self._userid}"))
async def get_user(self):
"""Update user and return it."""
await self.update_user()
return self.user
async def login(self, attempts: int = 0):
"""Login to the api and store the context."""
response = await self._request(
method=Methods.GET,
path="authenticate/check",
data=DEFAULT_BODY,
headers=DEFAULT_HEADER,
auth=BasicAuth(self._username, self._password),
timeout=30,
attempts=attempts,
)
self._login(response)
if response is not None:
_LOGGER.debug("Logged in")
if not self._serial:
list_of_mowers = await self.get("alms")
self._serial = list_of_mowers[0].get("alm_sn")
_LOGGER.debug("Serial added")
return True
return False
async def _request( # noqa: C901
self,
method: Methods,
path: str,
data: dict = None,
headers: dict = None,
auth: BasicAuth = None,
timeout: int = 30,
attempts: int = 0,
):
"""Request implemented by the subclasses either synchronously or asynchronously.
Args:
method (Methods): HTTP method to be executed.
path (str): url to call on top of base_url.
data (dict, optional): if applicable, data to be sent, defaults to None.
headers (dict, optional): headers to be included, defaults to None, which should be filled by the method.
auth (BasicAuth or HTTPBasicAuth, optional): login specific attribute, defaults to None.
timeout (int, optional): Timeout for the api call. Defaults to 30.
attempts (int, optional): Number to keep track of retries, after three starts delaying, after five quites.
"""
if 3 <= attempts < 5:
_LOGGER.info("Three or four attempts done, waiting 30 seconds")
await asyncio.sleep(30)
if attempts == 5:
_LOGGER.warning("Five attempts done, please try again later")
return None
url = f"{self._api_url}{path}"
if not headers:
headers = DEFAULT_HEADER.copy()
headers["x-im-context-id"] = self._contextid
_LOGGER.debug("Sending %s to %s", method.value, url)
try:
async with self._session.request(
method=method.value,
url=url,
json=data if data else DEFAULT_BODY,
headers=headers,
auth=auth,
timeout=timeout,
) as response:
status = response.status
_LOGGER.debug("status: %s", status)
if status == 200:
if response.content_type == CONTENT_TYPE_JSON:
resp = await response.json()
_LOGGER.debug("Response: %s", resp)
return resp # await response.json()
return await response.content.read()
if status == 204:
_LOGGER.debug("204: No content in response from server")
return None
if status == 400:
_LOGGER.warning(
"400: Bad Request: won't retry. Message: %s",
(await response.content.read()).decode("UTF-8"),
)
return None
if status == 401:
if path == "authenticate/check":
_LOGGER.info(
"401: Unauthorized, credentials are wrong, won't retry"
)
return None
_LOGGER.info("401: Unauthorized: logging in again")
login_result = await self.login()
if login_result:
return await self._request(
method=method,
path=path,
data=data,
timeout=timeout,
attempts=attempts + 1,
)
return None
if status == 403:
_LOGGER.error("403: Forbidden: won't retry")
return None
if status == 405:
_LOGGER.error(
"405: Method not allowed: %s is used but not allowed, try a different method for path %s, won't retry",
method,
path,
)
return None
if status == 500:
_LOGGER.debug("500: Internal Server Error")
return None
if status == 501:
_LOGGER.debug("501: Not implemented yet")
return None
if status == 504:
if url.find("longpoll=true") > 0:
_LOGGER.debug("504: longpoll stopped, no updates")
return None
response.raise_for_status()
except (asyncio.TimeoutError, ServerTimeoutError, HTTPGatewayTimeout) as exc:
_LOGGER.info("%s: Timeout on Bosch servers, retrying", exc)
return await self._request(
method=method,
path=path,
data=data,
timeout=timeout,
attempts=attempts + 1,
)
except ClientOSError as exc:
_LOGGER.debug("%s: Failed to update Indego status, longpoll timeout", exc)
return None
except (TooManyRedirects, ClientResponseError, SocketError) as exc:
_LOGGER.error("%s: Failed %s to Indego, won't retry", exc, method.value)
return None
except asyncio.CancelledError:
_LOGGER.debug("Task cancelled by task runner")
return None
except Exception as exc:
_LOGGER.error("Request to %s gave a unhandled error: %s", url, exc)
return None
async def get(self, path: str, timeout: int = 30):
"""Get implemented by the subclasses either synchronously or asynchronously.
Args:
path (str): url to call on top of base_url
timeout (int, optional): Timeout for the api call. Defaults to 30.
"""
return await self._request(method=Methods.GET, path=path, timeout=timeout)
async def put(self, path: str, data: dict, timeout: int = 30):
"""Put implemented by the subclasses either synchronously or asynchronously.
Args:
path (str): url to call on top of base_url
data (dict): data to put
timeout (int, optional): Timeout for the api call. Defaults to 30.
"""
return await self._request(
method=Methods.PUT, path=path, data=data, timeout=timeout
)
| 35.603618 | 127 | 0.571396 | import asyncio
import logging
from socket import error as SocketError
from typing import Any
import aiohttp
from aiohttp import (
ClientOSError,
ClientResponseError,
ServerTimeoutError,
TooManyRedirects,
)
from aiohttp.helpers import BasicAuth
from aiohttp.web_exceptions import HTTPGatewayTimeout
from . import __version__
from .const import (
COMMANDS,
CONTENT_TYPE_JSON,
DEFAULT_BODY,
DEFAULT_CALENDAR,
DEFAULT_HEADER,
DEFAULT_URL,
Methods,
)
from .indego_base_client import IndegoBaseClient
from .states import Calendar
_LOGGER = logging.getLogger(__name__)
class IndegoAsyncClient(IndegoBaseClient):
def __init__(
self,
username: str,
password: str,
serial: str = None,
map_filename: str = None,
api_url: str = DEFAULT_URL,
session: aiohttp.ClientSession = None,
):
super().__init__(username, password, serial, map_filename, api_url)
if session:
self._session = session
else:
self._session = aiohttp.ClientSession(raise_for_status=False)
async def __aenter__(self):
await self.start()
return self
async def __aexit__(self, exc_type, exc_value, traceback):
await self.close()
async def start(self):
if not self._logged_in:
await self.login()
async def close(self):
await self._session.close()
async def delete_alert(self, alert_index: int):
if not self._alerts_loaded:
raise ValueError("Alerts not loaded, please run update_alerts first.")
alert_id = self._get_alert_by_index(alert_index)
if alert_id:
return await self._request(Methods.DELETE, f"alerts/{alert_id}/")
async def delete_all_alerts(self):
if not self._alerts_loaded:
raise ValueError("Alerts not loaded, please run update_alerts first.")
if self.alerts_count > 0:
return await asyncio.gather(
*[
self._request(Methods.DELETE, f"alerts/{alert.alert_id}")
for alert in self.alerts
]
)
_LOGGER.info("No alerts to delete")
return None
async def download_map(self, filename: str = None):
if not self.serial:
return
if filename:
self.map_filename = filename
if not self.map_filename:
raise ValueError("No map filename defined.")
lawn_map = await self.get(f"alms/{self.serial}/map")
if lawn_map:
with open(self.map_filename, "wb") as file:
file.write(lawn_map)
async def put_alert_read(self, alert_index: int):
if not self._alerts_loaded:
raise ValueError("Alerts not loaded, please run update_alerts first.")
alert_id = self._get_alert_by_index(alert_index)
if alert_id:
return await self._request(
Methods.PUT, f"alerts/{alert_id}", data={"read_status": "read"}
)
async def put_all_alerts_read(self):
if not self._alerts_loaded:
raise ValueError("Alerts not loaded, please run update_alerts first.")
if self.alerts_count > 0:
return await asyncio.gather(
*[
self._request(
Methods.PUT,
f"alerts/{alert.alert_id}",
data={"read_status": "read"},
)
for alert in self.alerts
]
)
_LOGGER.info("No alerts to set to read")
return None
async def put_command(self, command: str):
if command in COMMANDS:
if not self.serial:
return
return await self.put(f"alms/{self.serial}/state", {"state": command})
raise ValueError("Wrong Command, use one of 'mow', 'pause', 'returnToDock'")
async def put_mow_mode(self, command: Any):
if command in ("true", "false", "True", "False") or isinstance(command, bool):
if not self.serial:
return
return await self.put(
f"alms/{self.serial}/predictive", {"enabled": command}
)
raise ValueError("Wrong Command, use one True or False")
async def put_predictive_cal(self, calendar: dict = DEFAULT_CALENDAR):
try:
Calendar(**calendar["cals"][0])
except TypeError as exc:
raise ValueError("Value for calendar is not valid") from exc
if not self.serial:
return
return await self.put(f"alms/{self.serial}/predictive/calendar", calendar)
async def update_alerts(self):
self._update_alerts(await self.get("alerts"))
async def get_alerts(self):
await self.update_alerts()
return self.alerts
async def update_all(self):
update_list = [
self.update_alerts(),
self.update_calendar(),
self.update_config(),
self.update_generic_data(),
self.update_last_completed_mow(),
self.update_location(),
self.update_network(),
self.update_next_mow(),
self.update_operating_data(),
self.update_predictive_calendar(),
self.update_predictive_schedule(),
self.update_security(),
self.update_setup(),
self.update_state(),
self.update_updates_available(),
self.update_user(),
]
results = await asyncio.gather(*update_list, return_exceptions=True)
for res in results:
if res:
_LOGGER.warning(res)
async def update_calendar(self):
if not self.serial:
return
self._update_calendar(await self.get(f"alms/{self.serial}/calendar"))
async def get_calendar(self):
await self.update_calendar()
return self.calendar
async def update_config(self):
if not self.serial:
return
self._update_config(await self.get(f"alms/{self.serial}/config"))
async def get_config(self):
await self.update_config()
return self.config
async def update_generic_data(self):
if not self.serial:
return
self._update_generic_data(await self.get(f"alms/{self.serial}"))
async def get_generic_data(self):
await self.update_generic_data()
return self.generic_data
async def update_last_completed_mow(self):
if not self.serial:
return
self._update_last_completed_mow(
await self.get(f"alms/{self.serial}/predictive/lastcutting")
)
async def get_last_completed_mow(self):
await self.update_last_completed_mow()
return self.last_completed_mow
async def update_location(self):
if not self.serial:
return
self._update_location(await self.get(f"alms/{self.serial}/predictive/location"))
async def get_location(self):
await self.update_location()
return self.location
async def update_network(self):
if not self.serial:
return
self._update_network(await self.get(f"alms/{self.serial}/network"))
async def get_network(self):
await self.update_network()
return self.network
async def update_next_mow(self):
if not self.serial:
return
self._update_next_mow(
await self.get(f"alms/{self.serial}/predictive/nextcutting")
)
async def get_next_mow(self):
await self.update_next_mow()
return self.next_mow
async def update_operating_data(self):
if not self.serial:
return
self._update_operating_data(await self.get(f"alms/{self.serial}/operatingData"))
async def get_operating_data(self):
await self.update_operating_data()
return self.operating_data
async def update_predictive_calendar(self):
if not self.serial:
return
self._update_predictive_calendar(
await self.get(f"alms/{self.serial}/predictive/calendar")
)
async def get_predictive_calendar(self):
await self.update_predictive_calendar()
return self.predictive_calendar
async def update_predictive_schedule(self):
if not self.serial:
return
self._update_predictive_schedule(
await self.get(f"alms/{self.serial}/predictive/schedule")
)
async def get_predictive_schedule(self):
await self.update_predictive_schedule()
return self.predictive_schedule
async def update_security(self):
if not self.serial:
return
self._update_security(await self.get(f"alms/{self.serial}/security"))
async def get_security(self):
await self.update_security()
return self.security
async def update_setup(self):
if not self.serial:
return
self._update_setup(await self.get(f"alms/{self.serial}/setup"))
async def get_setup(self):
await self.update_setup()
return self.setup
async def update_state(self, force=False, longpoll=False, longpoll_timeout=120):
if not self.serial:
return
path = f"alms/{self.serial}/state"
if longpoll:
if longpoll_timeout > 300:
raise ValueError(
"Longpoll timeout must be less than or equal 300 seconds."
)
last_state = 0
if self.state:
if self.state.state:
last_state = self.state.state
path = f"{path}?longpoll=true&timeout={longpoll_timeout}&last={last_state}"
if force:
if longpoll:
path = f"{path}&forceRefresh=true"
else:
path = f"{path}?forceRefresh=true"
self._update_state(await self.get(path, timeout=longpoll_timeout + 30))
async def get_state(self, force=False, longpoll=False, longpoll_timeout=120):
await self.update_state(force, longpoll, longpoll_timeout)
return self.state
async def update_updates_available(self):
if not self.serial:
return
if self._online:
self._update_updates_available(
await self.get(f"alms/{self.serial}/updates")
)
async def get_updates_available(self):
await self.update_updates_available()
return self.update_available
async def update_user(self):
self._update_user(await self.get(f"users/{self._userid}"))
async def get_user(self):
await self.update_user()
return self.user
async def login(self, attempts: int = 0):
response = await self._request(
method=Methods.GET,
path="authenticate/check",
data=DEFAULT_BODY,
headers=DEFAULT_HEADER,
auth=BasicAuth(self._username, self._password),
timeout=30,
attempts=attempts,
)
self._login(response)
if response is not None:
_LOGGER.debug("Logged in")
if not self._serial:
list_of_mowers = await self.get("alms")
self._serial = list_of_mowers[0].get("alm_sn")
_LOGGER.debug("Serial added")
return True
return False
async def _request(
self,
method: Methods,
path: str,
data: dict = None,
headers: dict = None,
auth: BasicAuth = None,
timeout: int = 30,
attempts: int = 0,
):
if 3 <= attempts < 5:
_LOGGER.info("Three or four attempts done, waiting 30 seconds")
await asyncio.sleep(30)
if attempts == 5:
_LOGGER.warning("Five attempts done, please try again later")
return None
url = f"{self._api_url}{path}"
if not headers:
headers = DEFAULT_HEADER.copy()
headers["x-im-context-id"] = self._contextid
_LOGGER.debug("Sending %s to %s", method.value, url)
try:
async with self._session.request(
method=method.value,
url=url,
json=data if data else DEFAULT_BODY,
headers=headers,
auth=auth,
timeout=timeout,
) as response:
status = response.status
_LOGGER.debug("status: %s", status)
if status == 200:
if response.content_type == CONTENT_TYPE_JSON:
resp = await response.json()
_LOGGER.debug("Response: %s", resp)
return resp
return await response.content.read()
if status == 204:
_LOGGER.debug("204: No content in response from server")
return None
if status == 400:
_LOGGER.warning(
"400: Bad Request: won't retry. Message: %s",
(await response.content.read()).decode("UTF-8"),
)
return None
if status == 401:
if path == "authenticate/check":
_LOGGER.info(
"401: Unauthorized, credentials are wrong, won't retry"
)
return None
_LOGGER.info("401: Unauthorized: logging in again")
login_result = await self.login()
if login_result:
return await self._request(
method=method,
path=path,
data=data,
timeout=timeout,
attempts=attempts + 1,
)
return None
if status == 403:
_LOGGER.error("403: Forbidden: won't retry")
return None
if status == 405:
_LOGGER.error(
"405: Method not allowed: %s is used but not allowed, try a different method for path %s, won't retry",
method,
path,
)
return None
if status == 500:
_LOGGER.debug("500: Internal Server Error")
return None
if status == 501:
_LOGGER.debug("501: Not implemented yet")
return None
if status == 504:
if url.find("longpoll=true") > 0:
_LOGGER.debug("504: longpoll stopped, no updates")
return None
response.raise_for_status()
except (asyncio.TimeoutError, ServerTimeoutError, HTTPGatewayTimeout) as exc:
_LOGGER.info("%s: Timeout on Bosch servers, retrying", exc)
return await self._request(
method=method,
path=path,
data=data,
timeout=timeout,
attempts=attempts + 1,
)
except ClientOSError as exc:
_LOGGER.debug("%s: Failed to update Indego status, longpoll timeout", exc)
return None
except (TooManyRedirects, ClientResponseError, SocketError) as exc:
_LOGGER.error("%s: Failed %s to Indego, won't retry", exc, method.value)
return None
except asyncio.CancelledError:
_LOGGER.debug("Task cancelled by task runner")
return None
except Exception as exc:
_LOGGER.error("Request to %s gave a unhandled error: %s", url, exc)
return None
async def get(self, path: str, timeout: int = 30):
return await self._request(method=Methods.GET, path=path, timeout=timeout)
async def put(self, path: str, data: dict, timeout: int = 30):
return await self._request(
method=Methods.PUT, path=path, data=data, timeout=timeout
)
| true | true |
1c3bc64c5a559fb9951515855548dfdd9a4e1a71 | 207 | py | Python | 2014-1-bcc-ip-L5-DanilloMoraes/2014-1-bcc-ip-LISTA5/Q5.py | DanilloMLS/lista_de_exercicios | 4eb9ecfafec6804472218c9020d2a3715760a5a5 | [
"MIT"
] | null | null | null | 2014-1-bcc-ip-L5-DanilloMoraes/2014-1-bcc-ip-LISTA5/Q5.py | DanilloMLS/lista_de_exercicios | 4eb9ecfafec6804472218c9020d2a3715760a5a5 | [
"MIT"
] | null | null | null | 2014-1-bcc-ip-L5-DanilloMoraes/2014-1-bcc-ip-LISTA5/Q5.py | DanilloMLS/lista_de_exercicios | 4eb9ecfafec6804472218c9020d2a3715760a5a5 | [
"MIT"
] | null | null | null | def inserir(v,valor,p):
u=[]
pos=p-1
if pos<0:
pos=0
for i in range(0,len(v)):
if i==pos:
u.append(valor)
u.append(v[i])
return u
| 15.923077 | 30 | 0.400966 | def inserir(v,valor,p):
u=[]
pos=p-1
if pos<0:
pos=0
for i in range(0,len(v)):
if i==pos:
u.append(valor)
u.append(v[i])
return u
| true | true |
1c3bc73806d3e8bea7d8c6d8f48e0c8eb5dfd69f | 2,185 | bzl | Python | apple/bundling/clang_support.bzl | pwnartist/rules_apple | cc8a1f2b576c2b757940f93388d04a7789d9824e | [
"Apache-2.0"
] | null | null | null | apple/bundling/clang_support.bzl | pwnartist/rules_apple | cc8a1f2b576c2b757940f93388d04a7789d9824e | [
"Apache-2.0"
] | null | null | null | apple/bundling/clang_support.bzl | pwnartist/rules_apple | cc8a1f2b576c2b757940f93388d04a7789d9824e | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Supporting functions for Clang libraries."""
load("@build_bazel_rules_apple//apple/bundling:bundling_support.bzl",
"bundling_support")
load("@build_bazel_rules_apple//apple/bundling:binary_support.bzl",
"binary_support")
load("@build_bazel_rules_apple//apple/bundling:file_support.bzl",
"file_support")
load("@build_bazel_rules_apple//apple/bundling:platform_support.bzl",
"platform_support")
load("@build_bazel_rules_apple//apple:utils.bzl", "xcrun_action")
def _should_package_clang_runtime(ctx):
"""Returns whether the Clang runtime should be bundled."""
return ctx.var.get("apple_bundle_clang_rt") == "1"
def _register_runtime_lib_actions(ctx, binary_artifact):
"""Creates an archive with Clang runtime libraries.
Args:
ctx: The Skylark context.
binary_artifact: The bundle binary to be processed with clang's runtime
tool.
Returns:
A `File` object representing the ZIP file containing runtime libraries.
"""
zip_file = file_support.intermediate(ctx, "%{name}.clang_rt_libs.zip")
platform_support.xcode_env_action(
ctx,
inputs=[binary_artifact],
outputs=[zip_file],
executable=ctx.executable._clangrttool,
arguments=[
binary_artifact.path,
zip_file.path,
],
mnemonic="ClangRuntimeLibsCopy",
# This action needs to read the contents of the Xcode bundle.
no_sandbox=True,
)
return zip_file
clang_support = struct(
register_runtime_lib_actions=_register_runtime_lib_actions,
should_package_clang_runtime=_should_package_clang_runtime
)
| 34.140625 | 75 | 0.748741 |
load("@build_bazel_rules_apple//apple/bundling:bundling_support.bzl",
"bundling_support")
load("@build_bazel_rules_apple//apple/bundling:binary_support.bzl",
"binary_support")
load("@build_bazel_rules_apple//apple/bundling:file_support.bzl",
"file_support")
load("@build_bazel_rules_apple//apple/bundling:platform_support.bzl",
"platform_support")
load("@build_bazel_rules_apple//apple:utils.bzl", "xcrun_action")
def _should_package_clang_runtime(ctx):
return ctx.var.get("apple_bundle_clang_rt") == "1"
def _register_runtime_lib_actions(ctx, binary_artifact):
zip_file = file_support.intermediate(ctx, "%{name}.clang_rt_libs.zip")
platform_support.xcode_env_action(
ctx,
inputs=[binary_artifact],
outputs=[zip_file],
executable=ctx.executable._clangrttool,
arguments=[
binary_artifact.path,
zip_file.path,
],
mnemonic="ClangRuntimeLibsCopy",
no_sandbox=True,
)
return zip_file
clang_support = struct(
register_runtime_lib_actions=_register_runtime_lib_actions,
should_package_clang_runtime=_should_package_clang_runtime
)
| true | true |
1c3bc7749ef232c26ea63157494ddbc0cefeeb54 | 69,606 | py | Python | nipype/interfaces/afni/preprocess.py | dmordom/nipype | e815741ad68d63b7134b6db6225aabb0c38511ac | [
"BSD-3-Clause"
] | 1 | 2018-04-18T12:13:37.000Z | 2018-04-18T12:13:37.000Z | nipype/interfaces/afni/preprocess.py | ito-takuya/nipype | 9099a5809487b55868cdec82a719030419cbd6ba | [
"BSD-3-Clause"
] | null | null | null | nipype/interfaces/afni/preprocess.py | ito-takuya/nipype | 9099a5809487b55868cdec82a719030419cbd6ba | [
"BSD-3-Clause"
] | 1 | 2021-09-08T14:31:47.000Z | 2021-09-08T14:31:47.000Z | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft = python sts = 4 ts = 4 sw = 4 et:
"""Afni preprocessing interfaces
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
import warnings
import os
import re
from ..base import (Directory, TraitedSpec,
traits, isdefined, File, InputMultiPath, Undefined)
from ...utils.filemanip import (load_json, save_json, split_filename)
from nipype.utils.filemanip import fname_presuffix
from .base import AFNICommand, AFNICommandInputSpec,\
AFNICommandOutputSpec
from nipype.interfaces.base import CommandLineInputSpec, CommandLine,\
OutputMultiPath
warn = warnings.warn
warnings.filterwarnings('always', category=UserWarning)
class To3DInputSpec(AFNICommandInputSpec):
out_file = File(name_template="%s", desc='output image file name',
argstr='-prefix %s', name_source=["in_folder"])
in_folder = Directory(desc='folder with DICOM images to convert',
argstr='%s/*.dcm',
position=-1,
mandatory=True,
exists=True)
filetype = traits.Enum('spgr', 'fse', 'epan', 'anat', 'ct', 'spct',
'pet', 'mra', 'bmap', 'diff',
'omri', 'abuc', 'fim', 'fith', 'fico', 'fitt', 'fift',
'fizt', 'fict', 'fibt',
'fibn', 'figt', 'fipt',
'fbuc', argstr='-%s', desc='type of datafile being converted')
skipoutliers = traits.Bool(desc='skip the outliers check',
argstr='-skip_outliers')
assumemosaic = traits.Bool(desc='assume that Siemens image is mosaic',
argstr='-assume_dicom_mosaic')
datatype = traits.Enum('short', 'float', 'byte', 'complex',
desc='set output file datatype', argstr='-datum %s')
funcparams = traits.Str(desc='parameters for functional data',
argstr='-time:zt %s alt+z2')
class To3D(AFNICommand):
"""Create a 3D dataset from 2D image files using AFNI to3d command
For complete details, see the `to3d Documentation
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/to3d.html>`_
Examples
========
>>> from nipype.interfaces import afni
>>> To3D = afni.To3D()
>>> To3D.inputs.datatype = 'float'
>>> To3D.inputs.in_folder = '.'
>>> To3D.inputs.out_file = 'dicomdir.nii'
>>> To3D.inputs.filetype = "anat"
>>> To3D.cmdline #doctest: +ELLIPSIS
'to3d -datum float -anat -prefix dicomdir.nii ./*.dcm'
>>> res = To3D.run() #doctest: +SKIP
"""
_cmd = 'to3d'
input_spec = To3DInputSpec
output_spec = AFNICommandOutputSpec
class TShiftInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dTShift',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_tshift", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
tr = traits.Str(desc='manually set the TR' +
'You can attach suffix "s" for seconds or "ms" for milliseconds.',
argstr='-TR %s')
tzero = traits.Float(desc='align each slice to given time offset',
argstr='-tzero %s',
xor=['tslice'])
tslice = traits.Int(desc='align each slice to time offset of given slice',
argstr='-slice %s',
xor=['tzero'])
ignore = traits.Int(desc='ignore the first set of points specified',
argstr='-ignore %s')
interp = traits.Enum(('Fourier', 'linear', 'cubic', 'quintic', 'heptic'),
desc='different interpolation methods (see 3dTShift for details)' +
' default = Fourier', argstr='-%s')
tpattern = traits.Enum(('alt+z', 'alt+z2', 'alt-z',
'alt-z2', 'seq+z', 'seq-z'),
desc='use specified slice time pattern rather than one in header',
argstr='-tpattern %s')
rlt = traits.Bool(desc='Before shifting, remove the mean and linear trend',
argstr="-rlt")
rltplus = traits.Bool(desc='Before shifting,' +
' remove the mean and linear trend and ' +
'later put back the mean',
argstr="-rlt+")
class TShift(AFNICommand):
"""Shifts voxel time series from input
so that seperate slices are aligned to the same
temporal origin
For complete details, see the `3dTshift Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dTshift.html>
Examples
========
>>> from nipype.interfaces import afni as afni
>>> tshift = afni.TShift()
>>> tshift.inputs.in_file = 'functional.nii'
>>> tshift.inputs.tpattern = 'alt+z'
>>> tshift.inputs.tzero = 0.0
>>> tshift.cmdline #doctest:
'3dTshift -prefix functional_tshift -tpattern alt+z -tzero 0.0 functional.nii'
>>> res = tshift.run() # doctest: +SKIP
"""
_cmd = '3dTshift'
input_spec = TShiftInputSpec
output_spec = AFNICommandOutputSpec
class RefitInputSpec(CommandLineInputSpec):
in_file = File(desc='input file to 3drefit',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=True)
deoblique = traits.Bool(desc='replace current transformation' +
' matrix with cardinal matrix',
argstr='-deoblique')
xorigin = traits.Str(desc='x distance for edge voxel offset',
argstr='-xorigin %s')
yorigin = traits.Str(desc='y distance for edge voxel offset',
argstr='-yorigin %s')
zorigin = traits.Str(desc='z distance for edge voxel offset',
argstr='-zorigin %s')
class Refit(CommandLine):
"""Changes some of the information inside a 3D dataset's header
For complete details, see the `3drefit Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3drefit.html>
Examples
========
>>> from nipype.interfaces import afni as afni
>>> refit = afni.Refit()
>>> refit.inputs.in_file = 'structural.nii'
>>> refit.inputs.deoblique = True
>>> refit.cmdline
'3drefit -deoblique structural.nii'
>>> res = refit.run() # doctest: +SKIP
"""
_cmd = '3drefit'
input_spec = RefitInputSpec
output_spec = AFNICommandOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs["out_file"] = os.path.abspath(self.inputs.in_file)
return outputs
class WarpInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dWarp',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_warp", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
tta2mni = traits.Bool(desc='transform dataset from Talairach to MNI152',
argstr='-tta2mni')
mni2tta = traits.Bool(desc='transform dataset from MNI152 to Talaraich',
argstr='-mni2tta')
matparent = File(desc="apply transformation from 3dWarpDrive",
argstr="-matparent %s",
exists=True)
deoblique = traits.Bool(desc='transform dataset from oblique to cardinal',
argstr='-deoblique')
interp = traits.Enum(('linear', 'cubic', 'NN', 'quintic'),
desc='spatial interpolation methods [default = linear]',
argstr='-%s')
gridset = File(desc="copy grid of specified dataset",
argstr="-gridset %s",
exists=True)
zpad = traits.Int(desc="pad input dataset with N planes" +
" of zero on all sides.",
argstr="-zpad %d")
class Warp(AFNICommand):
"""Use 3dWarp for spatially transforming a dataset
For complete details, see the `3dWarp Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dWarp.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> warp = afni.Warp()
>>> warp.inputs.in_file = 'structural.nii'
>>> warp.inputs.deoblique = True
>>> warp.inputs.out_file = "trans.nii.gz"
>>> warp.cmdline
'3dWarp -deoblique -prefix trans.nii.gz structural.nii'
>>> res = warp.run() # doctest: +SKIP
"""
_cmd = '3dWarp'
input_spec = WarpInputSpec
output_spec = AFNICommandOutputSpec
class ResampleInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dresample',
argstr='-inset %s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_resample", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
orientation = traits.Str(desc='new orientation code',
argstr='-orient %s')
resample_mode = traits.Enum('NN', 'Li', 'Cu', 'Bk',
argstr='-rmode %s',
desc="resampling method from set {'NN', 'Li', 'Cu', 'Bk'}. These are for 'Nearest Neighbor', 'Linear', 'Cubic' and 'Blocky' interpolation, respectively. Default is NN.")
voxel_size = traits.Tuple(*[traits.Float()]*3,
argstr='-dxyz %f %f %f',
desc="resample to new dx, dy and dz")
master = traits.File(argstr='-master %s',
desc='align dataset grid to a reference file')
class Resample(AFNICommand):
"""Resample or reorient an image using AFNI 3dresample command
For complete details, see the `3dresample Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dresample.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> resample = afni.Resample()
>>> resample.inputs.in_file = 'functional.nii'
>>> resample.inputs.orientation= 'RPI'
>>> resample.inputs.outputtype = "NIFTI"
>>> resample.cmdline
'3dresample -orient RPI -prefix functional_resample.nii -inset functional.nii'
>>> res = resample.run() # doctest: +SKIP
"""
_cmd = '3dresample'
input_spec = ResampleInputSpec
output_spec = AFNICommandOutputSpec
class AutoTcorrelateInputSpec(AFNICommandInputSpec):
in_file = File(desc='timeseries x space (volume or surface) file',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
polort = traits.Int(
desc='Remove polynomical trend of order m or -1 for no detrending',
argstr="-polort %d")
eta2 = traits.Bool(desc='eta^2 similarity',
argstr="-eta2")
mask = File(exists=True, desc="mask of voxels",
argstr="-mask %s")
mask_only_targets = traits.Bool(desc="use mask only on targets voxels",
argstr="-mask_only_targets",
xor=['mask_source'])
mask_source = File(exists=True,
desc="mask for source voxels",
argstr="-mask_source %s",
xor=['mask_only_targets'])
out_file = File(name_template="%s_similarity_matrix.1D", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
class AutoTcorrelate(AFNICommand):
"""Computes the correlation coefficient between the time series of each
pair of voxels in the input dataset, and stores the output into a
new anatomical bucket dataset [scaled to shorts to save memory space].
Examples
========
>>> from nipype.interfaces import afni as afni
>>> corr = afni.AutoTcorrelate()
>>> corr.inputs.in_file = 'functional.nii'
>>> corr.inputs.polort = -1
>>> corr.inputs.eta2 = True
>>> corr.inputs.mask = 'mask.nii'
>>> corr.inputs.mask_only_targets = True
>>> corr.cmdline # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
'3dAutoTcorrelate -eta2 -mask mask.nii -mask_only_targets -prefix functional_similarity_matrix.1D -polort -1 functional.nii'
>>> res = corr.run() # doctest: +SKIP
"""
input_spec = AutoTcorrelateInputSpec
output_spec = AFNICommandOutputSpec
_cmd = '3dAutoTcorrelate'
def _overload_extension(self, value, name=None):
path, base, ext = split_filename(value)
if ext.lower() not in [".1d", ".nii.gz", ".nii"]:
ext = ext + ".1D"
return os.path.join(path, base + ext)
class TStatInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dTstat',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_tstat", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
mask = File(desc='mask file',
argstr='-mask %s',
exists=True)
options = traits.Str(desc='selected statistical output',
argstr='%s')
class TStat(AFNICommand):
"""Compute voxel-wise statistics using AFNI 3dTstat command
For complete details, see the `3dTstat Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dTstat.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> tstat = afni.TStat()
>>> tstat.inputs.in_file = 'functional.nii'
>>> tstat.inputs.args= '-mean'
>>> tstat.inputs.out_file = "stats"
>>> tstat.cmdline
'3dTstat -mean -prefix stats functional.nii'
>>> res = tstat.run() # doctest: +SKIP
"""
_cmd = '3dTstat'
input_spec = TStatInputSpec
output_spec = AFNICommandOutputSpec
class DetrendInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dDetrend',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_detrend", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
class Detrend(AFNICommand):
"""This program removes components from voxel time series using
linear least squares
For complete details, see the `3dDetrend Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dDetrend.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> detrend = afni.Detrend()
>>> detrend.inputs.in_file = 'functional.nii'
>>> detrend.inputs.args = '-polort 2'
>>> detrend.inputs.outputtype = "AFNI"
>>> detrend.cmdline
'3dDetrend -polort 2 -prefix functional_detrend functional.nii'
>>> res = detrend.run() # doctest: +SKIP
"""
_cmd = '3dDetrend'
input_spec = DetrendInputSpec
output_spec = AFNICommandOutputSpec
class DespikeInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dDespike',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_despike", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
class Despike(AFNICommand):
"""Removes 'spikes' from the 3D+time input dataset
For complete details, see the `3dDespike Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dDespike.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> despike = afni.Despike()
>>> despike.inputs.in_file = 'functional.nii'
>>> despike.cmdline
'3dDespike -prefix functional_despike functional.nii'
>>> res = despike.run() # doctest: +SKIP
"""
_cmd = '3dDespike'
input_spec = DespikeInputSpec
output_spec = AFNICommandOutputSpec
class AutomaskInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dAutomask',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_mask", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
brain_file = File(name_template="%s_masked",
desc="output file from 3dAutomask",
argstr='-apply_prefix %s',
name_source="in_file")
clfrac = traits.Float(desc='sets the clip level fraction' +
' (must be 0.1-0.9). ' +
'A small value will tend to make the mask larger [default = 0.5].',
argstr="-clfrac %s")
dilate = traits.Int(desc='dilate the mask outwards',
argstr="-dilate %s")
erode = traits.Int(desc='erode the mask inwards',
argstr="-erode %s")
class AutomaskOutputSpec(TraitedSpec):
out_file = File(desc='mask file',
exists=True)
brain_file = File(desc='brain file (skull stripped)', exists=True)
class Automask(AFNICommand):
"""Create a brain-only mask of the image using AFNI 3dAutomask command
For complete details, see the `3dAutomask Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dAutomask.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> automask = afni.Automask()
>>> automask.inputs.in_file = 'functional.nii'
>>> automask.inputs.dilate = 1
>>> automask.inputs.outputtype = "NIFTI"
>>> automask.cmdline #doctest: +ELLIPSIS
'3dAutomask -apply_prefix functional_masked.nii -dilate 1 -prefix functional_mask.nii functional.nii'
>>> res = automask.run() # doctest: +SKIP
"""
_cmd = '3dAutomask'
input_spec = AutomaskInputSpec
output_spec = AutomaskOutputSpec
class VolregInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dvolreg',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_volreg", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
basefile = File(desc='base file for registration',
argstr='-base %s',
position=-6,
exists=True)
zpad = traits.Int(desc='Zeropad around the edges' +
' by \'n\' voxels during rotations',
argstr='-zpad %d',
position=-5)
md1d_file = File(name_template='%s_md.1D', desc='max displacement output file',
argstr='-maxdisp1D %s', name_source="in_file",
keep_extension=True, position=-4)
oned_file = File(name_template='%s.1D', desc='1D movement parameters output file',
argstr='-1Dfile %s',
name_source="in_file",
keep_extension=True)
verbose = traits.Bool(desc='more detailed description of the process',
argstr='-verbose')
timeshift = traits.Bool(desc='time shift to mean slice time offset',
argstr='-tshift 0')
copyorigin = traits.Bool(desc='copy base file origin coords to output',
argstr='-twodup')
class VolregOutputSpec(TraitedSpec):
out_file = File(desc='registered file', exists=True)
md1d_file = File(desc='max displacement info file', exists=True)
oned_file = File(desc='movement parameters info file', exists=True)
class Volreg(AFNICommand):
"""Register input volumes to a base volume using AFNI 3dvolreg command
For complete details, see the `3dvolreg Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dvolreg.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> volreg = afni.Volreg()
>>> volreg.inputs.in_file = 'functional.nii'
>>> volreg.inputs.args = '-Fourier -twopass'
>>> volreg.inputs.zpad = 4
>>> volreg.inputs.outputtype = "NIFTI"
>>> volreg.cmdline #doctest: +ELLIPSIS
'3dvolreg -Fourier -twopass -1Dfile functional.1D -prefix functional_volreg.nii -zpad 4 -maxdisp1D functional_md.1D functional.nii'
>>> res = volreg.run() # doctest: +SKIP
"""
_cmd = '3dvolreg'
input_spec = VolregInputSpec
output_spec = VolregOutputSpec
class MergeInputSpec(AFNICommandInputSpec):
in_files = InputMultiPath(
File(desc='input file to 3dmerge', exists=True),
argstr='%s',
position=-1,
mandatory=True,
copyfile=False)
out_file = File(name_template="%s_merge", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
doall = traits.Bool(desc='apply options to all sub-bricks in dataset',
argstr='-doall')
blurfwhm = traits.Int(desc='FWHM blur value (mm)',
argstr='-1blur_fwhm %d',
units='mm')
class Merge(AFNICommand):
"""Merge or edit volumes using AFNI 3dmerge command
For complete details, see the `3dmerge Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dmerge.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> merge = afni.Merge()
>>> merge.inputs.in_files = ['functional.nii', 'functional2.nii']
>>> merge.inputs.blurfwhm = 4
>>> merge.inputs.doall = True
>>> merge.inputs.out_file = 'e7.nii'
>>> res = merge.run() # doctest: +SKIP
"""
_cmd = '3dmerge'
input_spec = MergeInputSpec
output_spec = AFNICommandOutputSpec
class CopyInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dcopy',
argstr='%s',
position=-2,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_copy", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
class Copy(AFNICommand):
"""Copies an image of one type to an image of the same
or different type using 3dcopy command
For complete details, see the `3dcopy Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dcopy.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> copy = afni.Copy()
>>> copy.inputs.in_file = 'functional.nii'
>>> copy.inputs.out_file = 'new_func.nii'
>>> res = copy.run() # doctest: +SKIP
"""
_cmd = '3dcopy'
input_spec = CopyInputSpec
output_spec = AFNICommandOutputSpec
class FourierInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dFourier',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_fourier", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
lowpass = traits.Float(desc='lowpass',
argstr='-lowpass %f',
position=0,
mandatory=True)
highpass = traits.Float(desc='highpass',
argstr='-highpass %f',
position=1,
mandatory=True)
class Fourier(AFNICommand):
"""Program to lowpass and/or highpass each voxel time series in a
dataset, via the FFT
For complete details, see the `3dFourier Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dfourier.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> fourier = afni.Fourier()
>>> fourier.inputs.in_file = 'functional.nii'
>>> fourier.inputs.args = '-retrend'
>>> fourier.inputs.highpass = 0.005
>>> fourier.inputs.lowpass = 0.1
>>> res = fourier.run() # doctest: +SKIP
"""
_cmd = '3dFourier'
input_spec = FourierInputSpec
output_spec = AFNICommandOutputSpec
class BandpassInputSpec(AFNICommandInputSpec):
in_file = File(
desc='input file to 3dBandpass',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(
name_template='%s_bp',
desc='output file from 3dBandpass',
argstr='-prefix %s',
position=1,
name_source='in_file',
genfile=True)
lowpass = traits.Float(
desc='lowpass',
argstr='%f',
position=-2,
mandatory=True)
highpass = traits.Float(
desc='highpass',
argstr='%f',
position=-3,
mandatory=True)
mask = File(
desc='mask file',
position=2,
argstr='-mask %s',
exists=True)
despike = traits.Bool(
argstr='-despike',
desc="""Despike each time series before other processing.
++ Hopefully, you don't actually need to do this,
which is why it is optional.""")
orthogonalize_file = InputMultiPath(
File(exists=True),
argstr="-ort %s",
desc="""Also orthogonalize input to columns in f.1D
++ Multiple '-ort' options are allowed.""")
orthogonalize_dset = File(
exists=True,
argstr="-dsort %s",
desc="""Orthogonalize each voxel to the corresponding
voxel time series in dataset 'fset', which must
have the same spatial and temporal grid structure
as the main input dataset.
++ At present, only one '-dsort' option is allowed.""")
no_detrend = traits.Bool(
argstr='-nodetrend',
desc="""Skip the quadratic detrending of the input that
occurs before the FFT-based bandpassing.
++ You would only want to do this if the dataset
had been detrended already in some other program.""")
tr = traits.Float(
argstr="-dt %f",
desc="set time step (TR) in sec [default=from dataset header]")
nfft = traits.Int(
argstr='-nfft %d',
desc="set the FFT length [must be a legal value]")
normalize = traits.Bool(
argstr='-norm',
desc="""Make all output time series have L2 norm = 1
++ i.e., sum of squares = 1""")
automask = traits.Bool(
argstr='-automask',
desc="Create a mask from the input dataset")
blur = traits.Float(
argstr='-blur %f',
desc="""Blur (inside the mask only) with a filter
width (FWHM) of 'fff' millimeters.""")
localPV = traits.Float(
argstr='-localPV %f',
desc="""Replace each vector by the local Principal Vector
(AKA first singular vector) from a neighborhood
of radius 'rrr' millimiters.
++ Note that the PV time series is L2 normalized.
++ This option is mostly for Bob Cox to have fun with.""")
notrans = traits.Bool(
argstr='-notrans',
desc="""Don't check for initial positive transients in the data:
++ The test is a little slow, so skipping it is OK,
if you KNOW the data time series are transient-free.""")
class Bandpass(AFNICommand):
"""Program to lowpass and/or highpass each voxel time series in a
dataset, offering more/different options than Fourier
For complete details, see the `3dBandpass Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dbandpass.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> from nipype.testing import example_data
>>> bandpass = afni.Bandpass()
>>> bandpass.inputs.in_file = example_data('functional.nii')
>>> bandpass.inputs.highpass = 0.005
>>> bandpass.inputs.lowpass = 0.1
>>> res = bandpass.run() # doctest: +SKIP
"""
_cmd = '3dBandpass'
input_spec = BandpassInputSpec
output_spec = AFNICommandOutputSpec
class ZCutUpInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dZcutup',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_zcupup", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
keep = traits.Str(desc='slice range to keep in output',
argstr='-keep %s')
class ZCutUp(AFNICommand):
"""Cut z-slices from a volume using AFNI 3dZcutup command
For complete details, see the `3dZcutup Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dZcutup.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> zcutup = afni.ZCutUp()
>>> zcutup.inputs.in_file = 'functional.nii'
>>> zcutup.inputs.out_file = 'functional_zcutup.nii'
>>> zcutup.inputs.keep= '0 10'
>>> res = zcutup.run() # doctest: +SKIP
"""
_cmd = '3dZcutup'
input_spec = ZCutUpInputSpec
output_spec = AFNICommandOutputSpec
class AllineateInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dAllineate',
argstr='-source %s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
reference = File(
exists=True,
argstr='-base %s',
desc="""file to be used as reference, the first volume will be used
if not given the reference will be the first volume of in_file.""")
out_file = File(
desc='output file from 3dAllineate',
argstr='-prefix %s',
position=-2,
name_source='%s_allineate',
genfile=True)
out_param_file = File(
argstr='-1Dparam_save %s',
desc='Save the warp parameters in ASCII (.1D) format.')
in_param_file = File(
exists=True,
argstr='-1Dparam_apply %s',
desc="""Read warp parameters from file and apply them to
the source dataset, and produce a new dataset""")
out_matrix = File(
argstr='-1Dmatrix_save %s',
desc='Save the transformation matrix for each volume.')
in_matrix = File(desc='matrix to align input file',
argstr='-1Dmatrix_apply %s',
position=-3)
_cost_funcs = [
'leastsq', 'ls',
'mutualinfo', 'mi',
'corratio_mul', 'crM',
'norm_mutualinfo', 'nmi',
'hellinger', 'hel',
'corratio_add', 'crA',
'corratio_uns', 'crU']
cost = traits.Enum(
*_cost_funcs, argstr='-cost %s',
desc="""Defines the 'cost' function that defines the matching
between the source and the base""")
_interp_funcs = [
'nearestneighbour', 'linear', 'cubic', 'quintic', 'wsinc5']
interpolation = traits.Enum(
*_interp_funcs[:-1], argstr='-interp %s',
desc='Defines interpolation method to use during matching')
final_interpolation = traits.Enum(
*_interp_funcs, argstr='-final %s',
desc='Defines interpolation method used to create the output dataset')
# TECHNICAL OPTIONS (used for fine control of the program):
nmatch = traits.Int(
argstr='-nmatch %d',
desc='Use at most n scattered points to match the datasets.')
no_pad = traits.Bool(
argstr='-nopad',
desc='Do not use zero-padding on the base image.')
zclip = traits.Bool(
argstr='-zclip',
desc='Replace negative values in the input datasets (source & base) with zero.')
convergence = traits.Float(
argstr='-conv %f',
desc='Convergence test in millimeters (default 0.05mm).')
usetemp = traits.Bool(argstr='-usetemp', desc='temporary file use')
check = traits.List(
traits.Enum(*_cost_funcs), argstr='-check %s',
desc="""After cost functional optimization is done, start at the
final parameters and RE-optimize using this new cost functions.
If the results are too different, a warning message will be
printed. However, the final parameters from the original
optimization will be used to create the output dataset.""")
# ** PARAMETERS THAT AFFECT THE COST OPTIMIZATION STRATEGY **
one_pass = traits.Bool(
argstr='-onepass',
desc="""Use only the refining pass -- do not try a coarse
resolution pass first. Useful if you know that only
small amounts of image alignment are needed.""")
two_pass = traits.Bool(
argstr='-twopass',
desc="""Use a two pass alignment strategy for all volumes, searching
for a large rotation+shift and then refining the alignment.""")
two_blur = traits.Float(
argstr='-twoblur',
desc='Set the blurring radius for the first pass in mm.')
two_first = traits.Bool(
argstr='-twofirst',
desc="""Use -twopass on the first image to be registered, and
then on all subsequent images from the source dataset,
use results from the first image's coarse pass to start
the fine pass.""")
two_best = traits.Int(
argstr='-twobest %d',
desc="""In the coarse pass, use the best 'bb' set of initial
points to search for the starting point for the fine
pass. If bb==0, then no search is made for the best
starting point, and the identity transformation is
used as the starting point. [Default=5; min=0 max=11]""")
fine_blur = traits.Float(
argstr='-fineblur %f',
desc="""Set the blurring radius to use in the fine resolution
pass to 'x' mm. A small amount (1-2 mm?) of blurring at
the fine step may help with convergence, if there is
some problem, especially if the base volume is very noisy.
[Default == 0 mm = no blurring at the final alignment pass]""")
center_of_mass = traits.Str(
argstr='-cmass%s',
desc='Use the center-of-mass calculation to bracket the shifts.')
autoweight = traits.Str(
argstr='-autoweight%s',
desc="""Compute a weight function using the 3dAutomask
algorithm plus some blurring of the base image.""")
automask = traits.Int(
argstr='-automask+%d',
desc="""Compute a mask function, set a value for dilation or 0.""")
autobox = traits.Bool(
argstr='-autobox',
desc="""Expand the -automask function to enclose a rectangular
box that holds the irregular mask.""")
nomask = traits.Bool(
argstr='-nomask',
desc="""Don't compute the autoweight/mask; if -weight is not
also used, then every voxel will be counted equally.""")
weight_file = File(
argstr='-weight %s', exists=True,
desc="""Set the weighting for each voxel in the base dataset;
larger weights mean that voxel count more in the cost function.
Must be defined on the same grid as the base dataset""")
out_weight_file = traits.File(
argstr='-wtprefix %s',
desc="""Write the weight volume to disk as a dataset""")
source_mask = File(
exists=True, argstr='-source_mask %s',
desc='mask the input dataset')
source_automask = traits.Int(
argstr='-source_automask+%d',
desc='Automatically mask the source dataset with dilation or 0.')
warp_type = traits.Enum(
'shift_only', 'shift_rotate', 'shift_rotate_scale', 'affine_general',
argstr='-warp %s',
desc='Set the warp type.')
warpfreeze = traits.Bool(
argstr='-warpfreeze',
desc='Freeze the non-rigid body parameters after first volume.')
replacebase = traits.Bool(
argstr='-replacebase',
desc="""If the source has more than one volume, then after the first
volume is aligned to the base""")
replacemeth = traits.Enum(
*_cost_funcs,
argstr='-replacemeth %s',
desc="""After first volume is aligned, switch method for later volumes.
For use with '-replacebase'.""")
epi = traits.Bool(
argstr='-EPI',
desc="""Treat the source dataset as being composed of warped
EPI slices, and the base as comprising anatomically
'true' images. Only phase-encoding direction image
shearing and scaling will be allowed with this option.""")
master = File(
exists=True, argstr='-master %s',
desc='Write the output dataset on the same grid as this file')
newgrid = traits.Float(
argstr='-newgrid %f',
desc='Write the output dataset using isotropic grid spacing in mm')
# Non-linear experimental
_nwarp_types = ['bilinear',
'cubic', 'quintic', 'heptic', 'nonic',
'poly3', 'poly5', 'poly7', 'poly9'] # same non-hellenistic
nwarp = traits.Enum(
*_nwarp_types, argstr='-nwarp %s',
desc='Experimental nonlinear warping: bilinear or legendre poly.')
_dirs = ['X', 'Y', 'Z', 'I', 'J', 'K']
nwarp_fixmot = traits.List(
traits.Enum(*_dirs),
argstr='-nwarp_fixmot%s',
desc='To fix motion along directions.')
nwarp_fixdep = traits.List(
traits.Enum(*_dirs),
argstr='-nwarp_fixdep%s',
desc='To fix non-linear warp dependency along directions.')
class AllineateOutputSpec(TraitedSpec):
out_file = File(desc='output image file name')
matrix = File(desc='matrix to align input file')
class Allineate(AFNICommand):
"""Program to align one dataset (the 'source') to a base dataset
For complete details, see the `3dAllineate Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dAllineate.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> allineate = afni.Allineate()
>>> allineate.inputs.in_file = 'functional.nii'
>>> allineate.inputs.out_file= 'functional_allineate.nii'
>>> allineate.inputs.in_matrix= 'cmatrix.mat'
>>> res = allineate.run() # doctest: +SKIP
"""
_cmd = '3dAllineate'
input_spec = AllineateInputSpec
output_spec = AllineateOutputSpec
def _format_arg(self, name, trait_spec, value):
if name == 'nwarp_fixmot' or name == 'nwarp_fixdep':
arg = ' '.join([trait_spec.argstr % v for v in value])
return arg
return super(Allineate, self)._format_arg(name, trait_spec, value)
def _list_outputs(self):
outputs = self.output_spec().get()
if not isdefined(self.inputs.out_file):
outputs['out_file'] = self._gen_fname(self.inputs.in_file,
suffix=self.inputs.suffix)
else:
outputs['out_file'] = os.path.abspath(self.inputs.out_file)
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._list_outputs()[name]
class MaskaveInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dmaskave',
argstr='%s',
position=-2,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_maskave.1D", desc='output image file name',
keep_extension=True,
argstr="> %s", name_source="in_file", position=-1)
mask = File(desc='matrix to align input file',
argstr='-mask %s',
position=1,
exists=True)
quiet = traits.Bool(desc='matrix to align input file',
argstr='-quiet',
position=2)
class Maskave(AFNICommand):
"""Computes average of all voxels in the input dataset
which satisfy the criterion in the options list
For complete details, see the `3dmaskave Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dmaskave.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> maskave = afni.Maskave()
>>> maskave.inputs.in_file = 'functional.nii'
>>> maskave.inputs.mask= 'seed_mask.nii'
>>> maskave.inputs.quiet= True
>>> maskave.cmdline #doctest: +ELLIPSIS
'3dmaskave -mask seed_mask.nii -quiet functional.nii > functional_maskave.1D'
>>> res = maskave.run() # doctest: +SKIP
"""
_cmd = '3dmaskave'
input_spec = MaskaveInputSpec
output_spec = AFNICommandOutputSpec
class SkullStripInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dSkullStrip',
argstr='-input %s',
position=1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_skullstrip", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
class SkullStrip(AFNICommand):
"""A program to extract the brain from surrounding
tissue from MRI T1-weighted images
For complete details, see the `3dSkullStrip Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dSkullStrip.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> skullstrip = afni.SkullStrip()
>>> skullstrip.inputs.in_file = 'functional.nii'
>>> skullstrip.inputs.args = '-o_ply'
>>> res = skullstrip.run() # doctest: +SKIP
"""
_cmd = '3dSkullStrip'
input_spec = SkullStripInputSpec
output_spec = AFNICommandOutputSpec
class TCatInputSpec(AFNICommandInputSpec):
in_files = InputMultiPath(
File(exists=True),
desc='input file to 3dTcat',
argstr=' %s',
position=-1,
mandatory=True,
copyfile=False)
out_file = File(name_template="%s_tcat", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
rlt = traits.Str(desc='options', argstr='-rlt%s', position=1)
class TCat(AFNICommand):
"""Concatenate sub-bricks from input datasets into
one big 3D+time dataset
For complete details, see the `3dTcat Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dTcat.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> tcat = afni.TCat()
>>> tcat.inputs.in_files = ['functional.nii', 'functional2.nii']
>>> tcat.inputs.out_file= 'functional_tcat.nii'
>>> tcat.inputs.rlt = '+'
>>> res = tcat.run() # doctest: +SKIP
"""
_cmd = '3dTcat'
input_spec = TCatInputSpec
output_spec = AFNICommandOutputSpec
class FimInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dfim+',
argstr=' -input %s',
position=1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_fim", desc='output image file name',
argstr='-bucket %s', name_source="in_file")
ideal_file = File(desc='ideal time series file name',
argstr='-ideal_file %s',
position=2,
mandatory=True,
exists=True)
fim_thr = traits.Float(desc='fim internal mask threshold value',
argstr='-fim_thr %f', position=3)
out = traits.Str(desc='Flag to output the specified parameter',
argstr='-out %s', position=4)
class Fim(AFNICommand):
"""Program to calculate the cross-correlation of
an ideal reference waveform with the measured FMRI
time series for each voxel
For complete details, see the `3dfim+ Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dfim+.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> fim = afni.Fim()
>>> fim.inputs.in_file = 'functional.nii'
>>> fim.inputs.ideal_file= 'seed.1D'
>>> fim.inputs.out_file = 'functional_corr.nii'
>>> fim.inputs.out = 'Correlation'
>>> fim.inputs.fim_thr = 0.0009
>>> res = fim.run() # doctest: +SKIP
"""
_cmd = '3dfim+'
input_spec = FimInputSpec
output_spec = AFNICommandOutputSpec
class TCorrelateInputSpec(AFNICommandInputSpec):
xset = File(desc='input xset',
argstr=' %s',
position=-2,
mandatory=True,
exists=True,
copyfile=False)
yset = File(desc='input yset',
argstr=' %s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_tcorr", desc='output image file name',
argstr='-prefix %s', name_source="xset")
pearson = traits.Bool(desc='Correlation is the normal' +
' Pearson correlation coefficient',
argstr='-pearson',
position=1)
polort = traits.Int(desc='Remove polynomical trend of order m',
argstr='-polort %d', position=2)
class TCorrelate(AFNICommand):
"""Computes the correlation coefficient between corresponding voxel
time series in two input 3D+time datasets 'xset' and 'yset'
For complete details, see the `3dTcorrelate Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dTcorrelate.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> tcorrelate = afni.TCorrelate()
>>> tcorrelate.inputs.xset= 'u_rc1s1_Template.nii'
>>> tcorrelate.inputs.yset = 'u_rc1s2_Template.nii'
>>> tcorrelate.inputs.out_file = 'functional_tcorrelate.nii.gz'
>>> tcorrelate.inputs.polort = -1
>>> tcorrelate.inputs.pearson = True
>>> res = tcarrelate.run() # doctest: +SKIP
"""
_cmd = '3dTcorrelate'
input_spec = TCorrelateInputSpec
output_spec = AFNICommandOutputSpec
class TCorr1DInputSpec(AFNICommandInputSpec):
xset = File(desc = '3d+time dataset input',
argstr = ' %s',
position = -2,
mandatory = True,
exists = True,
copyfile=False)
y_1d = File(desc = '1D time series file input',
argstr = ' %s',
position = -1,
mandatory = True,
exists = True)
out_file = File(desc = 'output filename prefix',
name_template='%s_correlation.nii.gz',
argstr = '-prefix %s',
name_source = 'xset',
keep_extension = True)
pearson = traits.Bool(desc='Correlation is the normal' +
' Pearson correlation coefficient',
argstr=' -pearson',
xor=['spearman','quadrant','ktaub'],
position=1)
spearman = traits.Bool(desc='Correlation is the' +
' Spearman (rank) correlation coefficient',
argstr=' -spearman',
xor=['pearson','quadrant','ktaub'],
position=1)
quadrant = traits.Bool(desc='Correlation is the' +
' quadrant correlation coefficient',
argstr=' -quadrant',
xor=['pearson','spearman','ktaub'],
position=1)
ktaub = traits.Bool(desc='Correlation is the' +
' Kendall\'s tau_b correlation coefficient',
argstr=' -ktaub',
xor=['pearson','spearman','quadrant'],
position=1)
class TCorr1DOutputSpec(TraitedSpec):
out_file = File(desc = 'output file containing correlations',
exists = True)
class TCorr1D(AFNICommand):
"""Computes the correlation coefficient between each voxel time series
in the input 3D+time dataset.
For complete details, see the `3dTcorr1D Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dTcorr1D.html>`_
>>> from nipype.interfaces import afni as afni
>>> tcorr1D = afni.TCorr1D()
>>> tcorr1D.inputs.xset= 'u_rc1s1_Template.nii'
>>> tcorr1D.inputs.y_1d = 'seed.1D'
>>> tcorr1D.cmdline
'3dTcorr1D -prefix u_rc1s1_Template_correlation.nii.gz u_rc1s1_Template.nii seed.1D'
>>> res = tcorr1D.run() # doctest: +SKIP
"""
_cmd = '3dTcorr1D'
input_spec = TCorr1DInputSpec
output_spec = TCorr1DOutputSpec
class BrickStatInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dmaskave',
argstr='%s',
position=-1,
mandatory=True,
exists=True)
mask = File(desc='-mask dset = use dset as mask to include/exclude voxels',
argstr='-mask %s',
position=2,
exists=True)
min = traits.Bool(desc='print the minimum value in dataset',
argstr='-min',
position=1)
class BrickStatOutputSpec(TraitedSpec):
min_val = traits.Float(desc='output')
class BrickStat(AFNICommand):
"""Compute maximum and/or minimum voxel values of an input dataset
For complete details, see the `3dBrickStat Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dBrickStat.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> brickstat = afni.BrickStat()
>>> brickstat.inputs.in_file = 'functional.nii'
>>> brickstat.inputs.mask = 'skeleton_mask.nii.gz'
>>> brickstat.inputs.min = True
>>> res = brickstat.run() # doctest: +SKIP
"""
_cmd = '3dBrickStat'
input_spec = BrickStatInputSpec
output_spec = BrickStatOutputSpec
def aggregate_outputs(self, runtime=None, needed_outputs=None):
outputs = self._outputs()
outfile = os.path.join(os.getcwd(), 'stat_result.json')
if runtime is None:
try:
min_val = load_json(outfile)['stat']
except IOError:
return self.run().outputs
else:
min_val = []
for line in runtime.stdout.split('\n'):
if line:
values = line.split()
if len(values) > 1:
min_val.append([float(val) for val in values])
else:
min_val.extend([float(val) for val in values])
if len(min_val) == 1:
min_val = min_val[0]
save_json(outfile, dict(stat=min_val))
outputs.min_val = min_val
return outputs
class ROIStatsInputSpec(CommandLineInputSpec):
in_file = File(desc='input file to 3dROIstats',
argstr='%s',
position=-1,
mandatory=True,
exists=True)
mask = File(desc='input mask',
argstr='-mask %s',
position=3,
exists=True)
mask_f2short = traits.Bool(
desc='Tells the program to convert a float mask ' +
'to short integers, by simple rounding.',
argstr='-mask_f2short',
position=2)
quiet = traits.Bool(desc='execute quietly',
argstr='-quiet',
position=1)
terminal_output = traits.Enum('allatonce',
desc=('Control terminal output:'
'`allatonce` - waits till command is '
'finished to display output'),
nohash=True, mandatory=True, usedefault=True)
class ROIStatsOutputSpec(TraitedSpec):
stats = File(desc='output tab separated values file', exists=True)
class ROIStats(CommandLine):
"""Display statistics over masked regions
For complete details, see the `3dROIstats Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dROIstats.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> roistats = afni.ROIStats()
>>> roistats.inputs.in_file = 'functional.nii'
>>> roistats.inputs.mask = 'skeleton_mask.nii.gz'
>>> roistats.inputs.quiet=True
>>> res = roistats.run() # doctest: +SKIP
"""
_cmd = '3dROIstats'
input_spec = ROIStatsInputSpec
output_spec = ROIStatsOutputSpec
def aggregate_outputs(self, runtime=None, needed_outputs=None):
outputs = self._outputs()
output_filename = "roi_stats.csv"
f = open(output_filename, "w")
f.write(runtime.stdout)
f.close()
outputs.stats = os.path.abspath(output_filename)
return outputs
class CalcInputSpec(AFNICommandInputSpec):
in_file_a = File(desc='input file to 3dcalc',
argstr='-a %s', position=0, mandatory=True, exists=True)
in_file_b = File(desc='operand file to 3dcalc',
argstr=' -b %s', position=1, exists=True)
in_file_c = File(desc='operand file to 3dcalc',
argstr=' -c %s', position=2, exists=True)
out_file = File(name_template="%s_calc", desc='output image file name',
argstr='-prefix %s', name_source="in_file_a")
expr = traits.Str(desc='expr', argstr='-expr "%s"', position=3,
mandatory=True)
start_idx = traits.Int(desc='start index for in_file_a',
requires=['stop_idx'])
stop_idx = traits.Int(desc='stop index for in_file_a',
requires=['start_idx'])
single_idx = traits.Int(desc='volume index for in_file_a')
other = File(desc='other options', argstr='')
class Calc(AFNICommand):
"""This program does voxel-by-voxel arithmetic on 3D datasets
For complete details, see the `3dcalc Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dcalc.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> calc = afni.Calc()
>>> calc.inputs.in_file_a = 'functional.nii'
>>> calc.inputs.in_file_b = 'functional2.nii'
>>> calc.inputs.expr='a*b'
>>> calc.inputs.out_file = 'functional_calc.nii.gz'
>>> calc.inputs.outputtype = "NIFTI"
>>> calc.cmdline #doctest: +ELLIPSIS
'3dcalc -a functional.nii -b functional2.nii -expr "a*b" -prefix functional_calc.nii.gz'
"""
_cmd = '3dcalc'
input_spec = CalcInputSpec
output_spec = AFNICommandOutputSpec
def _format_arg(self, name, trait_spec, value):
if name == 'in_file_a':
arg = trait_spec.argstr % value
if isdefined(self.inputs.start_idx):
arg += '[%d..%d]' % (self.inputs.start_idx,
self.inputs.stop_idx)
if isdefined(self.inputs.single_idx):
arg += '[%d]' % (self.inputs.single_idx)
return arg
return super(Calc, self)._format_arg(name, trait_spec, value)
def _parse_inputs(self, skip=None):
"""Skip the arguments without argstr metadata
"""
return super(Calc, self)._parse_inputs(
skip=('start_idx', 'stop_idx', 'other'))
class BlurInMaskInputSpec(AFNICommandInputSpec):
in_file = File(
desc='input file to 3dSkullStrip',
argstr='-input %s',
position=1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template='%s_blur', desc='output to the file', argstr='-prefix %s',
name_source='in_file', position=-1)
mask = File(
desc='Mask dataset, if desired. Blurring will occur only within the mask. Voxels NOT in the mask will be set to zero in the output.',
argstr='-mask %s')
multimask = File(
desc='Multi-mask dataset -- each distinct nonzero value in dataset will be treated as a separate mask for blurring purposes.',
argstr='-Mmask %s')
automask = traits.Bool(
desc='Create an automask from the input dataset.',
argstr='-automask')
fwhm = traits.Float(
desc='fwhm kernel size',
argstr='-FWHM %f',
mandatory=True)
preserve = traits.Bool(
desc='Normally, voxels not in the mask will be set to zero in the output. If you want the original values in the dataset to be preserved in the output, use this option.',
argstr='-preserve')
float_out = traits.Bool(
desc='Save dataset as floats, no matter what the input data type is.',
argstr='-float')
options = traits.Str(desc='options', argstr='%s', position=2)
class BlurInMask(AFNICommand):
""" Blurs a dataset spatially inside a mask. That's all. Experimental.
For complete details, see the `3dBlurInMask Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dBlurInMask.html>
Examples
========
>>> from nipype.interfaces import afni as afni
>>> bim = afni.BlurInMask()
>>> bim.inputs.in_file = 'functional.nii'
>>> bim.inputs.mask = 'mask.nii'
>>> bim.inputs.fwhm = 5.0
>>> bim.cmdline #doctest: +ELLIPSIS
'3dBlurInMask -input functional.nii -FWHM 5.000000 -mask mask.nii -prefix functional_blur'
>>> res = bim.run() # doctest: +SKIP
"""
_cmd = '3dBlurInMask'
input_spec = BlurInMaskInputSpec
output_spec = AFNICommandOutputSpec
class TCorrMapInputSpec(AFNICommandInputSpec):
in_file = File(exists=True, argstr='-input %s', mandatory=True, copyfile=False)
seeds = File(exists=True, argstr='-seed %s', xor=('seeds_width'))
mask = File(exists=True, argstr='-mask %s')
automask = traits.Bool(argstr='-automask')
polort = traits.Int(argstr='-polort %d')
bandpass = traits.Tuple((traits.Float(), traits.Float()),
argstr='-bpass %f %f')
regress_out_timeseries = traits.File(exists=True, argstr='-ort %s')
blur_fwhm = traits.Float(argstr='-Gblur %f')
seeds_width = traits.Float(argstr='-Mseed %f', xor=('seeds'))
# outputs
mean_file = File(argstr='-Mean %s', suffix='_mean', name_source="in_file")
zmean = File(argstr='-Zmean %s', suffix='_zmean', name_source="in_file")
qmean = File(argstr='-Qmean %s', suffix='_qmean', name_source="in_file")
pmean = File(argstr='-Pmean %s', suffix='_pmean', name_source="in_file")
_thresh_opts = ('absolute_threshold',
'var_absolute_threshold',
'var_absolute_threshold_normalize')
thresholds = traits.List(traits.Int())
absolute_threshold = File(
argstr='-Thresh %f %s', suffix='_thresh',
name_source="in_file", xor=_thresh_opts)
var_absolute_threshold = File(
argstr='-VarThresh %f %f %f %s', suffix='_varthresh',
name_source="in_file", xor=_thresh_opts)
var_absolute_threshold_normalize = File(
argstr='-VarThreshN %f %f %f %s', suffix='_varthreshn',
name_source="in_file", xor=_thresh_opts)
correlation_maps = File(
argstr='-CorrMap %s', name_source="in_file")
correlation_maps_masked = File(
argstr='-CorrMask %s', name_source="in_file")
_expr_opts = ('average_expr', 'average_expr_nonzero', 'sum_expr')
expr = traits.Str()
average_expr = File(
argstr='-Aexpr %s %s', suffix='_aexpr',
name_source='in_file', xor=_expr_opts)
average_expr_nonzero = File(
argstr='-Cexpr %s %s', suffix='_cexpr',
name_source='in_file', xor=_expr_opts)
sum_expr = File(
argstr='-Sexpr %s %s', suffix='_sexpr',
name_source='in_file', xor=_expr_opts)
histogram_bin_numbers = traits.Int()
histogram = File(
name_source='in_file', argstr='-Hist %d %s', suffix='_hist')
class TCorrMapOutputSpec(TraitedSpec):
mean_file = File()
zmean = File()
qmean = File()
pmean = File()
absolute_threshold = File()
var_absolute_threshold = File()
var_absolute_threshold_normalize = File()
correlation_maps = File()
correlation_maps_masked = File()
average_expr = File()
average_expr_nonzero = File()
sum_expr = File()
histogram = File()
class TCorrMap(AFNICommand):
""" For each voxel time series, computes the correlation between it
and all other voxels, and combines this set of values into the
output dataset(s) in some way.
For complete details, see the `3dTcorrMap Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dTcorrMap.html>
Examples
========
>>> from nipype.interfaces import afni as afni
>>> tcm = afni.TCorrMap()
>>> tcm.inputs.in_file = 'functional.nii'
>>> tcm.inputs.mask = 'mask.nii'
>>> tcm.mean_file = '%s_meancorr.nii'
>>> res = tcm.run() # doctest: +SKIP
"""
_cmd = '3dTcorrMap'
input_spec = TCorrMapInputSpec
output_spec = TCorrMapOutputSpec
_additional_metadata = ['suffix']
def _format_arg(self, name, trait_spec, value):
if name in self.inputs._thresh_opts:
return trait_spec.argstr % self.inputs.thresholds + [value]
elif name in self.inputs._expr_opts:
return trait_spec.argstr % (self.inputs.expr, value)
elif name == 'histogram':
return trait_spec.argstr % (self.inputs.histogram_bin_numbers,
value)
else:
return super(TCorrMap, self)._format_arg(name, trait_spec, value)
class AutoboxInputSpec(AFNICommandInputSpec):
in_file = File(exists=True, mandatory=True, argstr='-input %s',
desc='input file', copyfile=False)
padding = traits.Int(
argstr='-npad %d',
desc='Number of extra voxels to pad on each side of box')
out_file = File(argstr="-prefix %s", name_source="in_file")
no_clustering = traits.Bool(
argstr='-noclust',
desc="""Don't do any clustering to find box. Any non-zero
voxel will be preserved in the cropped volume.
The default method uses some clustering to find the
cropping box, and will clip off small isolated blobs.""")
class AutoboxOuputSpec(TraitedSpec): # out_file not mandatory
x_min = traits.Int()
x_max = traits.Int()
y_min = traits.Int()
y_max = traits.Int()
z_min = traits.Int()
z_max = traits.Int()
out_file = File(desc='output file')
class Autobox(AFNICommand):
""" Computes size of a box that fits around the volume.
Also can be used to crop the volume to that box.
For complete details, see the `3dAutobox Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dAutobox.html>
Examples
========
>>> from nipype.interfaces import afni as afni
>>> abox = afni.Autobox()
>>> abox.inputs.in_file = 'structural.nii'
>>> abox.inputs.padding = 5
>>> res = abox.run() # doctest: +SKIP
"""
_cmd = '3dAutobox'
input_spec = AutoboxInputSpec
output_spec = AutoboxOuputSpec
def aggregate_outputs(self, runtime=None, needed_outputs=None):
outputs = self._outputs()
pattern = 'x=(?P<x_min>-?\d+)\.\.(?P<x_max>-?\d+) y=(?P<y_min>-?\d+)\.\.(?P<y_max>-?\d+) z=(?P<z_min>-?\d+)\.\.(?P<z_max>-?\d+)'
for line in runtime.stderr.split('\n'):
m = re.search(pattern, line)
if m:
d = m.groupdict()
for k in d.keys():
d[k] = int(d[k])
outputs.set(**d)
outputs.set(out_file=self._gen_filename('out_file'))
return outputs
def _gen_filename(self, name):
if name == 'out_file' and (not isdefined(self.inputs.out_file)):
return Undefined
return super(Autobox, self)._gen_filename(name)
class RetroicorInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dretroicor',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(desc='output image file name', argstr='-prefix %s', mandatory=True, position=1)
card = File(desc='1D cardiac data file for cardiac correction',
argstr='-card %s',
position=-2,
exists=True)
resp = File(desc='1D respiratory waveform data for correction',
argstr='-resp %s',
position=-3,
exists=True)
threshold = traits.Int(desc='Threshold for detection of R-wave peaks in input (Make sure it is above the background noise level, Try 3/4 or 4/5 times range plus minimum)',
argstr='-threshold %d',
position=-4)
order = traits.Int(desc='The order of the correction (2 is typical)',
argstr='-order %s',
position=-5)
cardphase = File(desc='Filename for 1D cardiac phase output',
argstr='-cardphase %s',
position=-6,
hash_files=False)
respphase = File(desc='Filename for 1D resp phase output',
argstr='-respphase %s',
position=-7,
hash_files=False)
class Retroicor(AFNICommand):
"""Performs Retrospective Image Correction for physiological
motion effects, using a slightly modified version of the
RETROICOR algorithm
The durations of the physiological inputs are assumed to equal
the duration of the dataset. Any constant sampling rate may be
used, but 40 Hz seems to be acceptable. This program's cardiac
peak detection algorithm is rather simplistic, so you might try
using the scanner's cardiac gating output (transform it to a
spike wave if necessary).
This program uses slice timing information embedded in the
dataset to estimate the proper cardiac/respiratory phase for
each slice. It makes sense to run this program before any
program that may destroy the slice timings (e.g. 3dvolreg for
motion correction).
For complete details, see the `3dretroicor Documentation.
<http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dretroicor.html>`_
Examples
========
>>> from nipype.interfaces import afni as afni
>>> ret = afni.Retroicor()
>>> ret.inputs.in_file = 'functional.nii'
>>> ret.inputs.card = 'mask.1D'
>>> ret.inputs.resp = 'resp.1D'
>>> res = ret.run() # doctest: +SKIP
"""
_cmd = '3dretroicor'
input_spec = RetroicorInputSpec
output_spec = AFNICommandOutputSpec
class AFNItoNIFTIInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dAFNItoNIFTI',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s.nii", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
hash_files = False
class AFNItoNIFTI(AFNICommand):
"""Changes AFNI format files to NIFTI format using 3dAFNItoNIFTI
see AFNI Documentation: <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dAFNItoNIFTI.html>
this can also convert 2D or 1D data, which you can numpy.squeeze() to remove extra dimensions
Examples
========
>>> from nipype.interfaces import afni as afni
>>> a2n = afni.AFNItoNIFTI()
>>> a2n.inputs.in_file = 'afni_output.3D'
>>> a2n.inputs.out_file = 'afni_output.nii'
>>> a2n.cmdline
'3dAFNItoNIFTI -prefix afni_output.nii afni_output.3D'
"""
_cmd = '3dAFNItoNIFTI'
input_spec = AFNItoNIFTIInputSpec
output_spec = AFNICommandOutputSpec
def _overload_extension(self, value, name=None):
path, base, ext = split_filename(value)
if ext.lower() not in [".1d", ".nii.gz", ".1D"]:
ext = ext + ".nii"
return os.path.join(path, base + ext)
def _gen_filename(self, name):
return os.path.abspath(super(AFNItoNIFTI, self)._gen_filename(name))
| 35.934951 | 202 | 0.592061 |
import warnings
import os
import re
from ..base import (Directory, TraitedSpec,
traits, isdefined, File, InputMultiPath, Undefined)
from ...utils.filemanip import (load_json, save_json, split_filename)
from nipype.utils.filemanip import fname_presuffix
from .base import AFNICommand, AFNICommandInputSpec,\
AFNICommandOutputSpec
from nipype.interfaces.base import CommandLineInputSpec, CommandLine,\
OutputMultiPath
warn = warnings.warn
warnings.filterwarnings('always', category=UserWarning)
class To3DInputSpec(AFNICommandInputSpec):
out_file = File(name_template="%s", desc='output image file name',
argstr='-prefix %s', name_source=["in_folder"])
in_folder = Directory(desc='folder with DICOM images to convert',
argstr='%s/*.dcm',
position=-1,
mandatory=True,
exists=True)
filetype = traits.Enum('spgr', 'fse', 'epan', 'anat', 'ct', 'spct',
'pet', 'mra', 'bmap', 'diff',
'omri', 'abuc', 'fim', 'fith', 'fico', 'fitt', 'fift',
'fizt', 'fict', 'fibt',
'fibn', 'figt', 'fipt',
'fbuc', argstr='-%s', desc='type of datafile being converted')
skipoutliers = traits.Bool(desc='skip the outliers check',
argstr='-skip_outliers')
assumemosaic = traits.Bool(desc='assume that Siemens image is mosaic',
argstr='-assume_dicom_mosaic')
datatype = traits.Enum('short', 'float', 'byte', 'complex',
desc='set output file datatype', argstr='-datum %s')
funcparams = traits.Str(desc='parameters for functional data',
argstr='-time:zt %s alt+z2')
class To3D(AFNICommand):
_cmd = 'to3d'
input_spec = To3DInputSpec
output_spec = AFNICommandOutputSpec
class TShiftInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dTShift',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_tshift", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
tr = traits.Str(desc='manually set the TR' +
'You can attach suffix "s" for seconds or "ms" for milliseconds.',
argstr='-TR %s')
tzero = traits.Float(desc='align each slice to given time offset',
argstr='-tzero %s',
xor=['tslice'])
tslice = traits.Int(desc='align each slice to time offset of given slice',
argstr='-slice %s',
xor=['tzero'])
ignore = traits.Int(desc='ignore the first set of points specified',
argstr='-ignore %s')
interp = traits.Enum(('Fourier', 'linear', 'cubic', 'quintic', 'heptic'),
desc='different interpolation methods (see 3dTShift for details)' +
' default = Fourier', argstr='-%s')
tpattern = traits.Enum(('alt+z', 'alt+z2', 'alt-z',
'alt-z2', 'seq+z', 'seq-z'),
desc='use specified slice time pattern rather than one in header',
argstr='-tpattern %s')
rlt = traits.Bool(desc='Before shifting, remove the mean and linear trend',
argstr="-rlt")
rltplus = traits.Bool(desc='Before shifting,' +
' remove the mean and linear trend and ' +
'later put back the mean',
argstr="-rlt+")
class TShift(AFNICommand):
_cmd = '3dTshift'
input_spec = TShiftInputSpec
output_spec = AFNICommandOutputSpec
class RefitInputSpec(CommandLineInputSpec):
in_file = File(desc='input file to 3drefit',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=True)
deoblique = traits.Bool(desc='replace current transformation' +
' matrix with cardinal matrix',
argstr='-deoblique')
xorigin = traits.Str(desc='x distance for edge voxel offset',
argstr='-xorigin %s')
yorigin = traits.Str(desc='y distance for edge voxel offset',
argstr='-yorigin %s')
zorigin = traits.Str(desc='z distance for edge voxel offset',
argstr='-zorigin %s')
class Refit(CommandLine):
_cmd = '3drefit'
input_spec = RefitInputSpec
output_spec = AFNICommandOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs["out_file"] = os.path.abspath(self.inputs.in_file)
return outputs
class WarpInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dWarp',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_warp", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
tta2mni = traits.Bool(desc='transform dataset from Talairach to MNI152',
argstr='-tta2mni')
mni2tta = traits.Bool(desc='transform dataset from MNI152 to Talaraich',
argstr='-mni2tta')
matparent = File(desc="apply transformation from 3dWarpDrive",
argstr="-matparent %s",
exists=True)
deoblique = traits.Bool(desc='transform dataset from oblique to cardinal',
argstr='-deoblique')
interp = traits.Enum(('linear', 'cubic', 'NN', 'quintic'),
desc='spatial interpolation methods [default = linear]',
argstr='-%s')
gridset = File(desc="copy grid of specified dataset",
argstr="-gridset %s",
exists=True)
zpad = traits.Int(desc="pad input dataset with N planes" +
" of zero on all sides.",
argstr="-zpad %d")
class Warp(AFNICommand):
_cmd = '3dWarp'
input_spec = WarpInputSpec
output_spec = AFNICommandOutputSpec
class ResampleInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dresample',
argstr='-inset %s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_resample", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
orientation = traits.Str(desc='new orientation code',
argstr='-orient %s')
resample_mode = traits.Enum('NN', 'Li', 'Cu', 'Bk',
argstr='-rmode %s',
desc="resampling method from set {'NN', 'Li', 'Cu', 'Bk'}. These are for 'Nearest Neighbor', 'Linear', 'Cubic' and 'Blocky' interpolation, respectively. Default is NN.")
voxel_size = traits.Tuple(*[traits.Float()]*3,
argstr='-dxyz %f %f %f',
desc="resample to new dx, dy and dz")
master = traits.File(argstr='-master %s',
desc='align dataset grid to a reference file')
class Resample(AFNICommand):
_cmd = '3dresample'
input_spec = ResampleInputSpec
output_spec = AFNICommandOutputSpec
class AutoTcorrelateInputSpec(AFNICommandInputSpec):
in_file = File(desc='timeseries x space (volume or surface) file',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
polort = traits.Int(
desc='Remove polynomical trend of order m or -1 for no detrending',
argstr="-polort %d")
eta2 = traits.Bool(desc='eta^2 similarity',
argstr="-eta2")
mask = File(exists=True, desc="mask of voxels",
argstr="-mask %s")
mask_only_targets = traits.Bool(desc="use mask only on targets voxels",
argstr="-mask_only_targets",
xor=['mask_source'])
mask_source = File(exists=True,
desc="mask for source voxels",
argstr="-mask_source %s",
xor=['mask_only_targets'])
out_file = File(name_template="%s_similarity_matrix.1D", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
class AutoTcorrelate(AFNICommand):
input_spec = AutoTcorrelateInputSpec
output_spec = AFNICommandOutputSpec
_cmd = '3dAutoTcorrelate'
def _overload_extension(self, value, name=None):
path, base, ext = split_filename(value)
if ext.lower() not in [".1d", ".nii.gz", ".nii"]:
ext = ext + ".1D"
return os.path.join(path, base + ext)
class TStatInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dTstat',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_tstat", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
mask = File(desc='mask file',
argstr='-mask %s',
exists=True)
options = traits.Str(desc='selected statistical output',
argstr='%s')
class TStat(AFNICommand):
_cmd = '3dTstat'
input_spec = TStatInputSpec
output_spec = AFNICommandOutputSpec
class DetrendInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dDetrend',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_detrend", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
class Detrend(AFNICommand):
_cmd = '3dDetrend'
input_spec = DetrendInputSpec
output_spec = AFNICommandOutputSpec
class DespikeInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dDespike',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_despike", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
class Despike(AFNICommand):
_cmd = '3dDespike'
input_spec = DespikeInputSpec
output_spec = AFNICommandOutputSpec
class AutomaskInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dAutomask',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_mask", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
brain_file = File(name_template="%s_masked",
desc="output file from 3dAutomask",
argstr='-apply_prefix %s',
name_source="in_file")
clfrac = traits.Float(desc='sets the clip level fraction' +
' (must be 0.1-0.9). ' +
'A small value will tend to make the mask larger [default = 0.5].',
argstr="-clfrac %s")
dilate = traits.Int(desc='dilate the mask outwards',
argstr="-dilate %s")
erode = traits.Int(desc='erode the mask inwards',
argstr="-erode %s")
class AutomaskOutputSpec(TraitedSpec):
out_file = File(desc='mask file',
exists=True)
brain_file = File(desc='brain file (skull stripped)', exists=True)
class Automask(AFNICommand):
_cmd = '3dAutomask'
input_spec = AutomaskInputSpec
output_spec = AutomaskOutputSpec
class VolregInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dvolreg',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_volreg", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
basefile = File(desc='base file for registration',
argstr='-base %s',
position=-6,
exists=True)
zpad = traits.Int(desc='Zeropad around the edges' +
' by \'n\' voxels during rotations',
argstr='-zpad %d',
position=-5)
md1d_file = File(name_template='%s_md.1D', desc='max displacement output file',
argstr='-maxdisp1D %s', name_source="in_file",
keep_extension=True, position=-4)
oned_file = File(name_template='%s.1D', desc='1D movement parameters output file',
argstr='-1Dfile %s',
name_source="in_file",
keep_extension=True)
verbose = traits.Bool(desc='more detailed description of the process',
argstr='-verbose')
timeshift = traits.Bool(desc='time shift to mean slice time offset',
argstr='-tshift 0')
copyorigin = traits.Bool(desc='copy base file origin coords to output',
argstr='-twodup')
class VolregOutputSpec(TraitedSpec):
out_file = File(desc='registered file', exists=True)
md1d_file = File(desc='max displacement info file', exists=True)
oned_file = File(desc='movement parameters info file', exists=True)
class Volreg(AFNICommand):
_cmd = '3dvolreg'
input_spec = VolregInputSpec
output_spec = VolregOutputSpec
class MergeInputSpec(AFNICommandInputSpec):
in_files = InputMultiPath(
File(desc='input file to 3dmerge', exists=True),
argstr='%s',
position=-1,
mandatory=True,
copyfile=False)
out_file = File(name_template="%s_merge", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
doall = traits.Bool(desc='apply options to all sub-bricks in dataset',
argstr='-doall')
blurfwhm = traits.Int(desc='FWHM blur value (mm)',
argstr='-1blur_fwhm %d',
units='mm')
class Merge(AFNICommand):
_cmd = '3dmerge'
input_spec = MergeInputSpec
output_spec = AFNICommandOutputSpec
class CopyInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dcopy',
argstr='%s',
position=-2,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_copy", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
class Copy(AFNICommand):
_cmd = '3dcopy'
input_spec = CopyInputSpec
output_spec = AFNICommandOutputSpec
class FourierInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dFourier',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_fourier", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
lowpass = traits.Float(desc='lowpass',
argstr='-lowpass %f',
position=0,
mandatory=True)
highpass = traits.Float(desc='highpass',
argstr='-highpass %f',
position=1,
mandatory=True)
class Fourier(AFNICommand):
_cmd = '3dFourier'
input_spec = FourierInputSpec
output_spec = AFNICommandOutputSpec
class BandpassInputSpec(AFNICommandInputSpec):
in_file = File(
desc='input file to 3dBandpass',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(
name_template='%s_bp',
desc='output file from 3dBandpass',
argstr='-prefix %s',
position=1,
name_source='in_file',
genfile=True)
lowpass = traits.Float(
desc='lowpass',
argstr='%f',
position=-2,
mandatory=True)
highpass = traits.Float(
desc='highpass',
argstr='%f',
position=-3,
mandatory=True)
mask = File(
desc='mask file',
position=2,
argstr='-mask %s',
exists=True)
despike = traits.Bool(
argstr='-despike',
desc="""Despike each time series before other processing.
++ Hopefully, you don't actually need to do this,
which is why it is optional.""")
orthogonalize_file = InputMultiPath(
File(exists=True),
argstr="-ort %s",
desc="""Also orthogonalize input to columns in f.1D
++ Multiple '-ort' options are allowed.""")
orthogonalize_dset = File(
exists=True,
argstr="-dsort %s",
desc="""Orthogonalize each voxel to the corresponding
voxel time series in dataset 'fset', which must
have the same spatial and temporal grid structure
as the main input dataset.
++ At present, only one '-dsort' option is allowed.""")
no_detrend = traits.Bool(
argstr='-nodetrend',
desc="""Skip the quadratic detrending of the input that
occurs before the FFT-based bandpassing.
++ You would only want to do this if the dataset
had been detrended already in some other program.""")
tr = traits.Float(
argstr="-dt %f",
desc="set time step (TR) in sec [default=from dataset header]")
nfft = traits.Int(
argstr='-nfft %d',
desc="set the FFT length [must be a legal value]")
normalize = traits.Bool(
argstr='-norm',
desc="""Make all output time series have L2 norm = 1
++ i.e., sum of squares = 1""")
automask = traits.Bool(
argstr='-automask',
desc="Create a mask from the input dataset")
blur = traits.Float(
argstr='-blur %f',
desc="""Blur (inside the mask only) with a filter
width (FWHM) of 'fff' millimeters.""")
localPV = traits.Float(
argstr='-localPV %f',
desc="""Replace each vector by the local Principal Vector
(AKA first singular vector) from a neighborhood
of radius 'rrr' millimiters.
++ Note that the PV time series is L2 normalized.
++ This option is mostly for Bob Cox to have fun with.""")
notrans = traits.Bool(
argstr='-notrans',
desc="""Don't check for initial positive transients in the data:
++ The test is a little slow, so skipping it is OK,
if you KNOW the data time series are transient-free.""")
class Bandpass(AFNICommand):
_cmd = '3dBandpass'
input_spec = BandpassInputSpec
output_spec = AFNICommandOutputSpec
class ZCutUpInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dZcutup',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_zcupup", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
keep = traits.Str(desc='slice range to keep in output',
argstr='-keep %s')
class ZCutUp(AFNICommand):
_cmd = '3dZcutup'
input_spec = ZCutUpInputSpec
output_spec = AFNICommandOutputSpec
class AllineateInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dAllineate',
argstr='-source %s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
reference = File(
exists=True,
argstr='-base %s',
desc="""file to be used as reference, the first volume will be used
if not given the reference will be the first volume of in_file.""")
out_file = File(
desc='output file from 3dAllineate',
argstr='-prefix %s',
position=-2,
name_source='%s_allineate',
genfile=True)
out_param_file = File(
argstr='-1Dparam_save %s',
desc='Save the warp parameters in ASCII (.1D) format.')
in_param_file = File(
exists=True,
argstr='-1Dparam_apply %s',
desc="""Read warp parameters from file and apply them to
the source dataset, and produce a new dataset""")
out_matrix = File(
argstr='-1Dmatrix_save %s',
desc='Save the transformation matrix for each volume.')
in_matrix = File(desc='matrix to align input file',
argstr='-1Dmatrix_apply %s',
position=-3)
_cost_funcs = [
'leastsq', 'ls',
'mutualinfo', 'mi',
'corratio_mul', 'crM',
'norm_mutualinfo', 'nmi',
'hellinger', 'hel',
'corratio_add', 'crA',
'corratio_uns', 'crU']
cost = traits.Enum(
*_cost_funcs, argstr='-cost %s',
desc="""Defines the 'cost' function that defines the matching
between the source and the base""")
_interp_funcs = [
'nearestneighbour', 'linear', 'cubic', 'quintic', 'wsinc5']
interpolation = traits.Enum(
*_interp_funcs[:-1], argstr='-interp %s',
desc='Defines interpolation method to use during matching')
final_interpolation = traits.Enum(
*_interp_funcs, argstr='-final %s',
desc='Defines interpolation method used to create the output dataset')
nmatch = traits.Int(
argstr='-nmatch %d',
desc='Use at most n scattered points to match the datasets.')
no_pad = traits.Bool(
argstr='-nopad',
desc='Do not use zero-padding on the base image.')
zclip = traits.Bool(
argstr='-zclip',
desc='Replace negative values in the input datasets (source & base) with zero.')
convergence = traits.Float(
argstr='-conv %f',
desc='Convergence test in millimeters (default 0.05mm).')
usetemp = traits.Bool(argstr='-usetemp', desc='temporary file use')
check = traits.List(
traits.Enum(*_cost_funcs), argstr='-check %s',
desc="""After cost functional optimization is done, start at the
final parameters and RE-optimize using this new cost functions.
If the results are too different, a warning message will be
printed. However, the final parameters from the original
optimization will be used to create the output dataset.""")
one_pass = traits.Bool(
argstr='-onepass',
desc="""Use only the refining pass -- do not try a coarse
resolution pass first. Useful if you know that only
small amounts of image alignment are needed.""")
two_pass = traits.Bool(
argstr='-twopass',
desc="""Use a two pass alignment strategy for all volumes, searching
for a large rotation+shift and then refining the alignment.""")
two_blur = traits.Float(
argstr='-twoblur',
desc='Set the blurring radius for the first pass in mm.')
two_first = traits.Bool(
argstr='-twofirst',
desc="""Use -twopass on the first image to be registered, and
then on all subsequent images from the source dataset,
use results from the first image's coarse pass to start
the fine pass.""")
two_best = traits.Int(
argstr='-twobest %d',
desc="""In the coarse pass, use the best 'bb' set of initial
points to search for the starting point for the fine
pass. If bb==0, then no search is made for the best
starting point, and the identity transformation is
used as the starting point. [Default=5; min=0 max=11]""")
fine_blur = traits.Float(
argstr='-fineblur %f',
desc="""Set the blurring radius to use in the fine resolution
pass to 'x' mm. A small amount (1-2 mm?) of blurring at
the fine step may help with convergence, if there is
some problem, especially if the base volume is very noisy.
[Default == 0 mm = no blurring at the final alignment pass]""")
center_of_mass = traits.Str(
argstr='-cmass%s',
desc='Use the center-of-mass calculation to bracket the shifts.')
autoweight = traits.Str(
argstr='-autoweight%s',
desc="""Compute a weight function using the 3dAutomask
algorithm plus some blurring of the base image.""")
automask = traits.Int(
argstr='-automask+%d',
desc="""Compute a mask function, set a value for dilation or 0.""")
autobox = traits.Bool(
argstr='-autobox',
desc="""Expand the -automask function to enclose a rectangular
box that holds the irregular mask.""")
nomask = traits.Bool(
argstr='-nomask',
desc="""Don't compute the autoweight/mask; if -weight is not
also used, then every voxel will be counted equally.""")
weight_file = File(
argstr='-weight %s', exists=True,
desc="""Set the weighting for each voxel in the base dataset;
larger weights mean that voxel count more in the cost function.
Must be defined on the same grid as the base dataset""")
out_weight_file = traits.File(
argstr='-wtprefix %s',
desc="""Write the weight volume to disk as a dataset""")
source_mask = File(
exists=True, argstr='-source_mask %s',
desc='mask the input dataset')
source_automask = traits.Int(
argstr='-source_automask+%d',
desc='Automatically mask the source dataset with dilation or 0.')
warp_type = traits.Enum(
'shift_only', 'shift_rotate', 'shift_rotate_scale', 'affine_general',
argstr='-warp %s',
desc='Set the warp type.')
warpfreeze = traits.Bool(
argstr='-warpfreeze',
desc='Freeze the non-rigid body parameters after first volume.')
replacebase = traits.Bool(
argstr='-replacebase',
desc="""If the source has more than one volume, then after the first
volume is aligned to the base""")
replacemeth = traits.Enum(
*_cost_funcs,
argstr='-replacemeth %s',
desc="""After first volume is aligned, switch method for later volumes.
For use with '-replacebase'.""")
epi = traits.Bool(
argstr='-EPI',
desc="""Treat the source dataset as being composed of warped
EPI slices, and the base as comprising anatomically
'true' images. Only phase-encoding direction image
shearing and scaling will be allowed with this option.""")
master = File(
exists=True, argstr='-master %s',
desc='Write the output dataset on the same grid as this file')
newgrid = traits.Float(
argstr='-newgrid %f',
desc='Write the output dataset using isotropic grid spacing in mm')
_nwarp_types = ['bilinear',
'cubic', 'quintic', 'heptic', 'nonic',
'poly3', 'poly5', 'poly7', 'poly9']
nwarp = traits.Enum(
*_nwarp_types, argstr='-nwarp %s',
desc='Experimental nonlinear warping: bilinear or legendre poly.')
_dirs = ['X', 'Y', 'Z', 'I', 'J', 'K']
nwarp_fixmot = traits.List(
traits.Enum(*_dirs),
argstr='-nwarp_fixmot%s',
desc='To fix motion along directions.')
nwarp_fixdep = traits.List(
traits.Enum(*_dirs),
argstr='-nwarp_fixdep%s',
desc='To fix non-linear warp dependency along directions.')
class AllineateOutputSpec(TraitedSpec):
out_file = File(desc='output image file name')
matrix = File(desc='matrix to align input file')
class Allineate(AFNICommand):
_cmd = '3dAllineate'
input_spec = AllineateInputSpec
output_spec = AllineateOutputSpec
def _format_arg(self, name, trait_spec, value):
if name == 'nwarp_fixmot' or name == 'nwarp_fixdep':
arg = ' '.join([trait_spec.argstr % v for v in value])
return arg
return super(Allineate, self)._format_arg(name, trait_spec, value)
def _list_outputs(self):
outputs = self.output_spec().get()
if not isdefined(self.inputs.out_file):
outputs['out_file'] = self._gen_fname(self.inputs.in_file,
suffix=self.inputs.suffix)
else:
outputs['out_file'] = os.path.abspath(self.inputs.out_file)
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._list_outputs()[name]
class MaskaveInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dmaskave',
argstr='%s',
position=-2,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_maskave.1D", desc='output image file name',
keep_extension=True,
argstr="> %s", name_source="in_file", position=-1)
mask = File(desc='matrix to align input file',
argstr='-mask %s',
position=1,
exists=True)
quiet = traits.Bool(desc='matrix to align input file',
argstr='-quiet',
position=2)
class Maskave(AFNICommand):
_cmd = '3dmaskave'
input_spec = MaskaveInputSpec
output_spec = AFNICommandOutputSpec
class SkullStripInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dSkullStrip',
argstr='-input %s',
position=1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_skullstrip", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
class SkullStrip(AFNICommand):
_cmd = '3dSkullStrip'
input_spec = SkullStripInputSpec
output_spec = AFNICommandOutputSpec
class TCatInputSpec(AFNICommandInputSpec):
in_files = InputMultiPath(
File(exists=True),
desc='input file to 3dTcat',
argstr=' %s',
position=-1,
mandatory=True,
copyfile=False)
out_file = File(name_template="%s_tcat", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
rlt = traits.Str(desc='options', argstr='-rlt%s', position=1)
class TCat(AFNICommand):
_cmd = '3dTcat'
input_spec = TCatInputSpec
output_spec = AFNICommandOutputSpec
class FimInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dfim+',
argstr=' -input %s',
position=1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_fim", desc='output image file name',
argstr='-bucket %s', name_source="in_file")
ideal_file = File(desc='ideal time series file name',
argstr='-ideal_file %s',
position=2,
mandatory=True,
exists=True)
fim_thr = traits.Float(desc='fim internal mask threshold value',
argstr='-fim_thr %f', position=3)
out = traits.Str(desc='Flag to output the specified parameter',
argstr='-out %s', position=4)
class Fim(AFNICommand):
_cmd = '3dfim+'
input_spec = FimInputSpec
output_spec = AFNICommandOutputSpec
class TCorrelateInputSpec(AFNICommandInputSpec):
xset = File(desc='input xset',
argstr=' %s',
position=-2,
mandatory=True,
exists=True,
copyfile=False)
yset = File(desc='input yset',
argstr=' %s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s_tcorr", desc='output image file name',
argstr='-prefix %s', name_source="xset")
pearson = traits.Bool(desc='Correlation is the normal' +
' Pearson correlation coefficient',
argstr='-pearson',
position=1)
polort = traits.Int(desc='Remove polynomical trend of order m',
argstr='-polort %d', position=2)
class TCorrelate(AFNICommand):
_cmd = '3dTcorrelate'
input_spec = TCorrelateInputSpec
output_spec = AFNICommandOutputSpec
class TCorr1DInputSpec(AFNICommandInputSpec):
xset = File(desc = '3d+time dataset input',
argstr = ' %s',
position = -2,
mandatory = True,
exists = True,
copyfile=False)
y_1d = File(desc = '1D time series file input',
argstr = ' %s',
position = -1,
mandatory = True,
exists = True)
out_file = File(desc = 'output filename prefix',
name_template='%s_correlation.nii.gz',
argstr = '-prefix %s',
name_source = 'xset',
keep_extension = True)
pearson = traits.Bool(desc='Correlation is the normal' +
' Pearson correlation coefficient',
argstr=' -pearson',
xor=['spearman','quadrant','ktaub'],
position=1)
spearman = traits.Bool(desc='Correlation is the' +
' Spearman (rank) correlation coefficient',
argstr=' -spearman',
xor=['pearson','quadrant','ktaub'],
position=1)
quadrant = traits.Bool(desc='Correlation is the' +
' quadrant correlation coefficient',
argstr=' -quadrant',
xor=['pearson','spearman','ktaub'],
position=1)
ktaub = traits.Bool(desc='Correlation is the' +
' Kendall\'s tau_b correlation coefficient',
argstr=' -ktaub',
xor=['pearson','spearman','quadrant'],
position=1)
class TCorr1DOutputSpec(TraitedSpec):
out_file = File(desc = 'output file containing correlations',
exists = True)
class TCorr1D(AFNICommand):
_cmd = '3dTcorr1D'
input_spec = TCorr1DInputSpec
output_spec = TCorr1DOutputSpec
class BrickStatInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dmaskave',
argstr='%s',
position=-1,
mandatory=True,
exists=True)
mask = File(desc='-mask dset = use dset as mask to include/exclude voxels',
argstr='-mask %s',
position=2,
exists=True)
min = traits.Bool(desc='print the minimum value in dataset',
argstr='-min',
position=1)
class BrickStatOutputSpec(TraitedSpec):
min_val = traits.Float(desc='output')
class BrickStat(AFNICommand):
_cmd = '3dBrickStat'
input_spec = BrickStatInputSpec
output_spec = BrickStatOutputSpec
def aggregate_outputs(self, runtime=None, needed_outputs=None):
outputs = self._outputs()
outfile = os.path.join(os.getcwd(), 'stat_result.json')
if runtime is None:
try:
min_val = load_json(outfile)['stat']
except IOError:
return self.run().outputs
else:
min_val = []
for line in runtime.stdout.split('\n'):
if line:
values = line.split()
if len(values) > 1:
min_val.append([float(val) for val in values])
else:
min_val.extend([float(val) for val in values])
if len(min_val) == 1:
min_val = min_val[0]
save_json(outfile, dict(stat=min_val))
outputs.min_val = min_val
return outputs
class ROIStatsInputSpec(CommandLineInputSpec):
in_file = File(desc='input file to 3dROIstats',
argstr='%s',
position=-1,
mandatory=True,
exists=True)
mask = File(desc='input mask',
argstr='-mask %s',
position=3,
exists=True)
mask_f2short = traits.Bool(
desc='Tells the program to convert a float mask ' +
'to short integers, by simple rounding.',
argstr='-mask_f2short',
position=2)
quiet = traits.Bool(desc='execute quietly',
argstr='-quiet',
position=1)
terminal_output = traits.Enum('allatonce',
desc=('Control terminal output:'
'`allatonce` - waits till command is '
'finished to display output'),
nohash=True, mandatory=True, usedefault=True)
class ROIStatsOutputSpec(TraitedSpec):
stats = File(desc='output tab separated values file', exists=True)
class ROIStats(CommandLine):
_cmd = '3dROIstats'
input_spec = ROIStatsInputSpec
output_spec = ROIStatsOutputSpec
def aggregate_outputs(self, runtime=None, needed_outputs=None):
outputs = self._outputs()
output_filename = "roi_stats.csv"
f = open(output_filename, "w")
f.write(runtime.stdout)
f.close()
outputs.stats = os.path.abspath(output_filename)
return outputs
class CalcInputSpec(AFNICommandInputSpec):
in_file_a = File(desc='input file to 3dcalc',
argstr='-a %s', position=0, mandatory=True, exists=True)
in_file_b = File(desc='operand file to 3dcalc',
argstr=' -b %s', position=1, exists=True)
in_file_c = File(desc='operand file to 3dcalc',
argstr=' -c %s', position=2, exists=True)
out_file = File(name_template="%s_calc", desc='output image file name',
argstr='-prefix %s', name_source="in_file_a")
expr = traits.Str(desc='expr', argstr='-expr "%s"', position=3,
mandatory=True)
start_idx = traits.Int(desc='start index for in_file_a',
requires=['stop_idx'])
stop_idx = traits.Int(desc='stop index for in_file_a',
requires=['start_idx'])
single_idx = traits.Int(desc='volume index for in_file_a')
other = File(desc='other options', argstr='')
class Calc(AFNICommand):
_cmd = '3dcalc'
input_spec = CalcInputSpec
output_spec = AFNICommandOutputSpec
def _format_arg(self, name, trait_spec, value):
if name == 'in_file_a':
arg = trait_spec.argstr % value
if isdefined(self.inputs.start_idx):
arg += '[%d..%d]' % (self.inputs.start_idx,
self.inputs.stop_idx)
if isdefined(self.inputs.single_idx):
arg += '[%d]' % (self.inputs.single_idx)
return arg
return super(Calc, self)._format_arg(name, trait_spec, value)
def _parse_inputs(self, skip=None):
return super(Calc, self)._parse_inputs(
skip=('start_idx', 'stop_idx', 'other'))
class BlurInMaskInputSpec(AFNICommandInputSpec):
in_file = File(
desc='input file to 3dSkullStrip',
argstr='-input %s',
position=1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template='%s_blur', desc='output to the file', argstr='-prefix %s',
name_source='in_file', position=-1)
mask = File(
desc='Mask dataset, if desired. Blurring will occur only within the mask. Voxels NOT in the mask will be set to zero in the output.',
argstr='-mask %s')
multimask = File(
desc='Multi-mask dataset -- each distinct nonzero value in dataset will be treated as a separate mask for blurring purposes.',
argstr='-Mmask %s')
automask = traits.Bool(
desc='Create an automask from the input dataset.',
argstr='-automask')
fwhm = traits.Float(
desc='fwhm kernel size',
argstr='-FWHM %f',
mandatory=True)
preserve = traits.Bool(
desc='Normally, voxels not in the mask will be set to zero in the output. If you want the original values in the dataset to be preserved in the output, use this option.',
argstr='-preserve')
float_out = traits.Bool(
desc='Save dataset as floats, no matter what the input data type is.',
argstr='-float')
options = traits.Str(desc='options', argstr='%s', position=2)
class BlurInMask(AFNICommand):
_cmd = '3dBlurInMask'
input_spec = BlurInMaskInputSpec
output_spec = AFNICommandOutputSpec
class TCorrMapInputSpec(AFNICommandInputSpec):
in_file = File(exists=True, argstr='-input %s', mandatory=True, copyfile=False)
seeds = File(exists=True, argstr='-seed %s', xor=('seeds_width'))
mask = File(exists=True, argstr='-mask %s')
automask = traits.Bool(argstr='-automask')
polort = traits.Int(argstr='-polort %d')
bandpass = traits.Tuple((traits.Float(), traits.Float()),
argstr='-bpass %f %f')
regress_out_timeseries = traits.File(exists=True, argstr='-ort %s')
blur_fwhm = traits.Float(argstr='-Gblur %f')
seeds_width = traits.Float(argstr='-Mseed %f', xor=('seeds'))
# outputs
mean_file = File(argstr='-Mean %s', suffix='_mean', name_source="in_file")
zmean = File(argstr='-Zmean %s', suffix='_zmean', name_source="in_file")
qmean = File(argstr='-Qmean %s', suffix='_qmean', name_source="in_file")
pmean = File(argstr='-Pmean %s', suffix='_pmean', name_source="in_file")
_thresh_opts = ('absolute_threshold',
'var_absolute_threshold',
'var_absolute_threshold_normalize')
thresholds = traits.List(traits.Int())
absolute_threshold = File(
argstr='-Thresh %f %s', suffix='_thresh',
name_source="in_file", xor=_thresh_opts)
var_absolute_threshold = File(
argstr='-VarThresh %f %f %f %s', suffix='_varthresh',
name_source="in_file", xor=_thresh_opts)
var_absolute_threshold_normalize = File(
argstr='-VarThreshN %f %f %f %s', suffix='_varthreshn',
name_source="in_file", xor=_thresh_opts)
correlation_maps = File(
argstr='-CorrMap %s', name_source="in_file")
correlation_maps_masked = File(
argstr='-CorrMask %s', name_source="in_file")
_expr_opts = ('average_expr', 'average_expr_nonzero', 'sum_expr')
expr = traits.Str()
average_expr = File(
argstr='-Aexpr %s %s', suffix='_aexpr',
name_source='in_file', xor=_expr_opts)
average_expr_nonzero = File(
argstr='-Cexpr %s %s', suffix='_cexpr',
name_source='in_file', xor=_expr_opts)
sum_expr = File(
argstr='-Sexpr %s %s', suffix='_sexpr',
name_source='in_file', xor=_expr_opts)
histogram_bin_numbers = traits.Int()
histogram = File(
name_source='in_file', argstr='-Hist %d %s', suffix='_hist')
class TCorrMapOutputSpec(TraitedSpec):
mean_file = File()
zmean = File()
qmean = File()
pmean = File()
absolute_threshold = File()
var_absolute_threshold = File()
var_absolute_threshold_normalize = File()
correlation_maps = File()
correlation_maps_masked = File()
average_expr = File()
average_expr_nonzero = File()
sum_expr = File()
histogram = File()
class TCorrMap(AFNICommand):
_cmd = '3dTcorrMap'
input_spec = TCorrMapInputSpec
output_spec = TCorrMapOutputSpec
_additional_metadata = ['suffix']
def _format_arg(self, name, trait_spec, value):
if name in self.inputs._thresh_opts:
return trait_spec.argstr % self.inputs.thresholds + [value]
elif name in self.inputs._expr_opts:
return trait_spec.argstr % (self.inputs.expr, value)
elif name == 'histogram':
return trait_spec.argstr % (self.inputs.histogram_bin_numbers,
value)
else:
return super(TCorrMap, self)._format_arg(name, trait_spec, value)
class AutoboxInputSpec(AFNICommandInputSpec):
in_file = File(exists=True, mandatory=True, argstr='-input %s',
desc='input file', copyfile=False)
padding = traits.Int(
argstr='-npad %d',
desc='Number of extra voxels to pad on each side of box')
out_file = File(argstr="-prefix %s", name_source="in_file")
no_clustering = traits.Bool(
argstr='-noclust',
desc="""Don't do any clustering to find box. Any non-zero
voxel will be preserved in the cropped volume.
The default method uses some clustering to find the
cropping box, and will clip off small isolated blobs.""")
class AutoboxOuputSpec(TraitedSpec):
x_min = traits.Int()
x_max = traits.Int()
y_min = traits.Int()
y_max = traits.Int()
z_min = traits.Int()
z_max = traits.Int()
out_file = File(desc='output file')
class Autobox(AFNICommand):
_cmd = '3dAutobox'
input_spec = AutoboxInputSpec
output_spec = AutoboxOuputSpec
def aggregate_outputs(self, runtime=None, needed_outputs=None):
outputs = self._outputs()
pattern = 'x=(?P<x_min>-?\d+)\.\.(?P<x_max>-?\d+) y=(?P<y_min>-?\d+)\.\.(?P<y_max>-?\d+) z=(?P<z_min>-?\d+)\.\.(?P<z_max>-?\d+)'
for line in runtime.stderr.split('\n'):
m = re.search(pattern, line)
if m:
d = m.groupdict()
for k in d.keys():
d[k] = int(d[k])
outputs.set(**d)
outputs.set(out_file=self._gen_filename('out_file'))
return outputs
def _gen_filename(self, name):
if name == 'out_file' and (not isdefined(self.inputs.out_file)):
return Undefined
return super(Autobox, self)._gen_filename(name)
class RetroicorInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dretroicor',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(desc='output image file name', argstr='-prefix %s', mandatory=True, position=1)
card = File(desc='1D cardiac data file for cardiac correction',
argstr='-card %s',
position=-2,
exists=True)
resp = File(desc='1D respiratory waveform data for correction',
argstr='-resp %s',
position=-3,
exists=True)
threshold = traits.Int(desc='Threshold for detection of R-wave peaks in input (Make sure it is above the background noise level, Try 3/4 or 4/5 times range plus minimum)',
argstr='-threshold %d',
position=-4)
order = traits.Int(desc='The order of the correction (2 is typical)',
argstr='-order %s',
position=-5)
cardphase = File(desc='Filename for 1D cardiac phase output',
argstr='-cardphase %s',
position=-6,
hash_files=False)
respphase = File(desc='Filename for 1D resp phase output',
argstr='-respphase %s',
position=-7,
hash_files=False)
class Retroicor(AFNICommand):
_cmd = '3dretroicor'
input_spec = RetroicorInputSpec
output_spec = AFNICommandOutputSpec
class AFNItoNIFTIInputSpec(AFNICommandInputSpec):
in_file = File(desc='input file to 3dAFNItoNIFTI',
argstr='%s',
position=-1,
mandatory=True,
exists=True,
copyfile=False)
out_file = File(name_template="%s.nii", desc='output image file name',
argstr='-prefix %s', name_source="in_file")
hash_files = False
class AFNItoNIFTI(AFNICommand):
_cmd = '3dAFNItoNIFTI'
input_spec = AFNItoNIFTIInputSpec
output_spec = AFNICommandOutputSpec
def _overload_extension(self, value, name=None):
path, base, ext = split_filename(value)
if ext.lower() not in [".1d", ".nii.gz", ".1D"]:
ext = ext + ".nii"
return os.path.join(path, base + ext)
def _gen_filename(self, name):
return os.path.abspath(super(AFNItoNIFTI, self)._gen_filename(name))
| true | true |
1c3bcce09af9641bcb3f62c534a49d3b56c740a5 | 3,282 | py | Python | models/.ipynb_checkpoints/position_encoding-checkpoint.py | jaehyek/deformable-DETR-2 | 930f36b9c491e35ce7870f2711c28243152ee058 | [
"MIT"
] | 12 | 2021-03-16T15:33:06.000Z | 2022-03-03T00:31:52.000Z | models/position_encoding.py | ver0z/Deformable-DETR | 7f1f4ffd1d716f681c7cbb2570e2c7a3d4bcf417 | [
"Apache-2.0"
] | 3 | 2021-07-15T20:55:13.000Z | 2022-01-20T11:56:05.000Z | models/position_encoding.py | ver0z/Deformable-DETR | 7f1f4ffd1d716f681c7cbb2570e2c7a3d4bcf417 | [
"Apache-2.0"
] | 6 | 2021-03-16T15:26:15.000Z | 2021-12-29T01:55:15.000Z |
"""
Various positional encodings for the transformer.
"""
import math
import torch
from torch import nn
from util.misc import NestedTensor
class PositionEmbeddingSine(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one
used by the Attention is all you need paper, generalized to work on images.
"""
def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
super().__init__()
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, tensor_list: NestedTensor):
x = tensor_list.tensors
mask = tensor_list.mask
assert mask is not None
not_mask = ~mask
y_embed = not_mask.cumsum(1, dtype=torch.float32)
x_embed = not_mask.cumsum(2, dtype=torch.float32)
if self.normalize:
eps = 1e-6
y_embed = (y_embed - 0.5) / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = (x_embed - 0.5) / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
class PositionEmbeddingLearned(nn.Module):
"""
Absolute pos embedding, learned.
"""
def __init__(self, num_pos_feats=256):
super().__init__()
self.row_embed = nn.Embedding(50, num_pos_feats)
self.col_embed = nn.Embedding(50, num_pos_feats)
self.reset_parameters()
def reset_parameters(self):
nn.init.uniform_(self.row_embed.weight)
nn.init.uniform_(self.col_embed.weight)
def forward(self, tensor_list: NestedTensor):
x = tensor_list.tensors
h, w = x.shape[-2:]
i = torch.arange(w, device=x.device)
j = torch.arange(h, device=x.device)
x_emb = self.col_embed(i)
y_emb = self.row_embed(j)
pos = torch.cat([
x_emb.unsqueeze(0).repeat(h, 1, 1),
y_emb.unsqueeze(1).repeat(1, w, 1),
], dim=-1).permute(2, 0, 1).unsqueeze(0).repeat(x.shape[0], 1, 1, 1)
return pos
def build_position_encoding(args):
N_steps = args.hidden_dim // 2
if args.position_embedding in ('v2', 'sine'):
# TODO find a better way of exposing other arguments
position_embedding = PositionEmbeddingSine(N_steps, normalize=True)
elif args.position_embedding in ('v3', 'learned'):
position_embedding = PositionEmbeddingLearned(N_steps)
else:
raise ValueError(f"not supported {args.position_embedding}")
return position_embedding
| 36.466667 | 103 | 0.618221 |
import math
import torch
from torch import nn
from util.misc import NestedTensor
class PositionEmbeddingSine(nn.Module):
def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
super().__init__()
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, tensor_list: NestedTensor):
x = tensor_list.tensors
mask = tensor_list.mask
assert mask is not None
not_mask = ~mask
y_embed = not_mask.cumsum(1, dtype=torch.float32)
x_embed = not_mask.cumsum(2, dtype=torch.float32)
if self.normalize:
eps = 1e-6
y_embed = (y_embed - 0.5) / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = (x_embed - 0.5) / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
class PositionEmbeddingLearned(nn.Module):
def __init__(self, num_pos_feats=256):
super().__init__()
self.row_embed = nn.Embedding(50, num_pos_feats)
self.col_embed = nn.Embedding(50, num_pos_feats)
self.reset_parameters()
def reset_parameters(self):
nn.init.uniform_(self.row_embed.weight)
nn.init.uniform_(self.col_embed.weight)
def forward(self, tensor_list: NestedTensor):
x = tensor_list.tensors
h, w = x.shape[-2:]
i = torch.arange(w, device=x.device)
j = torch.arange(h, device=x.device)
x_emb = self.col_embed(i)
y_emb = self.row_embed(j)
pos = torch.cat([
x_emb.unsqueeze(0).repeat(h, 1, 1),
y_emb.unsqueeze(1).repeat(1, w, 1),
], dim=-1).permute(2, 0, 1).unsqueeze(0).repeat(x.shape[0], 1, 1, 1)
return pos
def build_position_encoding(args):
N_steps = args.hidden_dim // 2
if args.position_embedding in ('v2', 'sine'):
position_embedding = PositionEmbeddingSine(N_steps, normalize=True)
elif args.position_embedding in ('v3', 'learned'):
position_embedding = PositionEmbeddingLearned(N_steps)
else:
raise ValueError(f"not supported {args.position_embedding}")
return position_embedding
| true | true |
1c3bcd602197858f1701e139a57d06f0ab74f495 | 62,326 | py | Python | rcnn/lib/python3.6/site-packages/tensorflow/compiler/xla/xla_data_pb2.py | dreamingweaver/making_passportImage | 68f23411780ff82abe934dfae5fc04acb80f2c49 | [
"MIT"
] | 1 | 2019-01-12T13:17:32.000Z | 2019-01-12T13:17:32.000Z | rcnn/lib/python3.6/site-packages/tensorflow/compiler/xla/xla_data_pb2.py | dreamingweaver/making_passportImage | 68f23411780ff82abe934dfae5fc04acb80f2c49 | [
"MIT"
] | null | null | null | rcnn/lib/python3.6/site-packages/tensorflow/compiler/xla/xla_data_pb2.py | dreamingweaver/making_passportImage | 68f23411780ff82abe934dfae5fc04acb80f2c49 | [
"MIT"
] | null | null | null | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/compiler/xla/xla_data.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/compiler/xla/xla_data.proto',
package='xla',
syntax='proto3',
serialized_options=_b('\370\001\001'),
serialized_pb=_b('\n&tensorflow/compiler/xla/xla_data.proto\x12\x03xla\"\xb7\x01\n\rPaddingConfig\x12=\n\ndimensions\x18\x01 \x03(\x0b\x32).xla.PaddingConfig.PaddingConfigDimension\x1ag\n\x16PaddingConfigDimension\x12\x18\n\x10\x65\x64ge_padding_low\x18\x01 \x01(\x03\x12\x19\n\x11\x65\x64ge_padding_high\x18\x02 \x01(\x03\x12\x18\n\x10interior_padding\x18\x03 \x01(\x03\"\x9f\x01\n\x06Layout\x12\x1b\n\x06\x66ormat\x18\x04 \x01(\x0e\x32\x0b.xla.Format\x12\x16\n\x0eminor_to_major\x18\x01 \x03(\x03\x12\x19\n\x11padded_dimensions\x18\x02 \x03(\x03\x12(\n\rpadding_value\x18\x03 \x01(\x0e\x32\x11.xla.PaddingValue\x12\x1b\n\x13max_sparse_elements\x18\x05 \x01(\x03\"\x90\x01\n\x05Shape\x12(\n\x0c\x65lement_type\x18\x02 \x01(\x0e\x32\x12.xla.PrimitiveType\x12\x12\n\ndimensions\x18\x03 \x03(\x03\x12 \n\x0ctuple_shapes\x18\x04 \x03(\x0b\x32\n.xla.Shape\x12\x1b\n\x06layout\x18\x05 \x01(\x0b\x32\x0b.xla.LayoutJ\x04\x08\x01\x10\x02R\x04rank\"c\n\x0cProgramShape\x12\x1e\n\nparameters\x18\x01 \x03(\x0b\x32\n.xla.Shape\x12\x1a\n\x06result\x18\x02 \x01(\x0b\x32\n.xla.Shape\x12\x17\n\x0fparameter_names\x18\x03 \x03(\t\"D\n\x10\x43omputationStats\x12\x12\n\nflop_count\x18\x01 \x01(\x01\x12\x1c\n\x14transcendental_count\x18\x02 \x01(\x01\"X\n\nOpMetadata\x12\x0f\n\x07op_type\x18\x01 \x01(\t\x12\x0f\n\x07op_name\x18\x02 \x01(\t\x12\x13\n\x0bsource_file\x18\x03 \x01(\t\x12\x13\n\x0bsource_line\x18\x04 \x01(\x05\"\xc8\x01\n\x10\x45xecutionProfile\x12\x1d\n\x15\x63ompilation_cache_hit\x18\x01 \x01(\x08\x12\x17\n\x0f\x63ompile_time_ms\x18\x02 \x01(\x03\x12\x1b\n\x13\x63ompute_cycle_count\x18\x03 \x01(\x03\x12\x17\n\x0f\x63ompute_time_ns\x18\x04 \x01(\x03\x12$\n\x1c\x63ompute_and_transfer_time_ns\x18\x05 \x01(\x03\x12 \n\x18\x65xecutable_size_in_bytes\x18\x06 \x01(\x03\"!\n\x0f\x45xecutionHandle\x12\x0e\n\x06handle\x18\x01 \x01(\x03\"\"\n\x10GlobalDataHandle\x12\x0e\n\x06handle\x18\x01 \x01(\x03\"4\n\x0c\x44\x65viceHandle\x12\x0e\n\x06handle\x18\x01 \x01(\x03\x12\x14\n\x0c\x64\x65vice_count\x18\x02 \x01(\x03\"\x1f\n\rChannelHandle\x12\x0e\n\x06handle\x18\x01 \x01(\x03\"\xc5\x01\n\x15\x44\x65viceAssignmentProto\x12\x15\n\rreplica_count\x18\x01 \x01(\x05\x12\x19\n\x11\x63omputation_count\x18\x02 \x01(\x05\x12I\n\x13\x63omputation_devices\x18\x03 \x03(\x0b\x32,.xla.DeviceAssignmentProto.ComputationDevice\x1a/\n\x11\x43omputationDevice\x12\x1a\n\x12replica_device_ids\x18\x01 \x03(\x05\"\x87\x02\n\x0cLiteralProto\x12\x19\n\x05shape\x18\x01 \x01(\x0b\x32\n.xla.Shape\x12\r\n\x05preds\x18\x02 \x03(\x08\x12\x0b\n\x03u8s\x18\x03 \x01(\x0c\x12\x0c\n\x04s32s\x18\x04 \x03(\x05\x12\x0c\n\x04s64s\x18\x05 \x03(\x03\x12\x0c\n\x04u32s\x18\x06 \x03(\r\x12\x0c\n\x04u64s\x18\x07 \x03(\x04\x12\x0c\n\x04\x66\x33\x32s\x18\x08 \x03(\x02\x12\x0c\n\x04\x66\x36\x34s\x18\t \x03(\x01\x12\x0c\n\x04\x63\x36\x34s\x18\x0c \x03(\x02\x12)\n\x0etuple_literals\x18\n \x03(\x0b\x32\x11.xla.LiteralProto\x12\x0c\n\x04\x66\x31\x36s\x18\x0b \x01(\x0c\x12\r\n\x05\x62\x66\x31\x36s\x18\r \x01(\x0c\x12\x16\n\x0esparse_indices\x18\x0e \x03(\x03\"\xa3\x01\n\x0fWindowDimension\x12\x0c\n\x04size\x18\x01 \x01(\x03\x12\x0e\n\x06stride\x18\x02 \x01(\x03\x12\x13\n\x0bpadding_low\x18\x03 \x01(\x03\x12\x14\n\x0cpadding_high\x18\x04 \x01(\x03\x12\x17\n\x0fwindow_dilation\x18\x05 \x01(\x03\x12\x15\n\rbase_dilation\x18\x06 \x01(\x03\x12\x17\n\x0fwindow_reversal\x18\x07 \x01(\x08\"2\n\x06Window\x12(\n\ndimensions\x18\x01 \x03(\x0b\x32\x14.xla.WindowDimension\"\x8f\x01\n\x16GatherDimensionNumbers\x12\x1a\n\x12output_window_dims\x18\x01 \x03(\x03\x12\x1a\n\x12\x65lided_window_dims\x18\x02 \x03(\x03\x12#\n\x1bgather_dims_to_operand_dims\x18\x03 \x03(\x03\x12\x18\n\x10index_vector_dim\x18\x04 \x01(\x03\"\xd8\x02\n\x1b\x43onvolutionDimensionNumbers\x12\x1d\n\x15input_batch_dimension\x18\x07 \x01(\x03\x12\x1f\n\x17input_feature_dimension\x18\x08 \x01(\x03\x12 \n\x18input_spatial_dimensions\x18\x0b \x03(\x03\x12&\n\x1ekernel_input_feature_dimension\x18\x03 \x01(\x03\x12\'\n\x1fkernel_output_feature_dimension\x18\x04 \x01(\x03\x12!\n\x19kernel_spatial_dimensions\x18\x06 \x03(\x03\x12\x1e\n\x16output_batch_dimension\x18\t \x01(\x03\x12 \n\x18output_feature_dimension\x18\n \x01(\x03\x12!\n\x19output_spatial_dimensions\x18\x0c \x03(\x03\"\x99\x01\n\x13\x44otDimensionNumbers\x12\"\n\x1alhs_contracting_dimensions\x18\x01 \x03(\x03\x12\"\n\x1arhs_contracting_dimensions\x18\x02 \x03(\x03\x12\x1c\n\x14lhs_batch_dimensions\x18\x03 \x03(\x03\x12\x1c\n\x14rhs_batch_dimensions\x18\x04 \x03(\x03\"\xfa\x01\n\nOpSharding\x12\"\n\x04type\x18\x01 \x01(\x0e\x32\x14.xla.OpSharding.Type\x12\x1e\n\ntile_shape\x18\x02 \x01(\x0b\x32\n.xla.Shape\x12\"\n\x1atile_assignment_dimensions\x18\x03 \x03(\x03\x12\x1f\n\x17tile_assignment_devices\x18\x04 \x03(\x03\x12(\n\x0ftuple_shardings\x18\x05 \x03(\x0b\x32\x0f.xla.OpSharding\"9\n\x04Type\x12\x0e\n\nREPLICATED\x10\x00\x12\x0b\n\x07MAXIMAL\x10\x01\x12\t\n\x05TUPLE\x10\x02\x12\t\n\x05OTHER\x10\x03*\xcb\x01\n\rPrimitiveType\x12\x1a\n\x16PRIMITIVE_TYPE_INVALID\x10\x00\x12\x08\n\x04PRED\x10\x01\x12\x06\n\x02S8\x10\x02\x12\x07\n\x03S16\x10\x03\x12\x07\n\x03S32\x10\x04\x12\x07\n\x03S64\x10\x05\x12\x06\n\x02U8\x10\x06\x12\x07\n\x03U16\x10\x07\x12\x07\n\x03U32\x10\x08\x12\x07\n\x03U64\x10\t\x12\x07\n\x03\x46\x31\x36\x10\n\x12\x07\n\x03\x46\x33\x32\x10\x0b\x12\x08\n\x04\x42\x46\x31\x36\x10\x10\x12\x07\n\x03\x46\x36\x34\x10\x0c\x12\x07\n\x03\x43\x36\x34\x10\x0f\x12\t\n\x05TUPLE\x10\r\x12\n\n\x06OPAQUE\x10\x0e\x12\t\n\x05TOKEN\x10\x11*l\n\x0cPaddingValue\x12\x0f\n\x0bINVALID_PAD\x10\x00\x12\x0c\n\x08ZERO_PAD\x10\x01\x12\x0b\n\x07ONE_PAD\x10\x02\x12\x0e\n\nLOWEST_PAD\x10\x03\x12\x0f\n\x0bHIGHEST_PAD\x10\x04\x12\x0f\n\x0bUNKNOWN_PAD\x10\x05*3\n\x06\x46ormat\x12\x12\n\x0eINVALID_FORMAT\x10\x00\x12\t\n\x05\x44\x45NSE\x10\x01\x12\n\n\x06SPARSE\x10\x02*1\n\x07\x46\x66tType\x12\x07\n\x03\x46\x46T\x10\x00\x12\x08\n\x04IFFT\x10\x01\x12\x08\n\x04RFFT\x10\x02\x12\t\n\x05IRFFT\x10\x03*F\n\x12RandomDistribution\x12\x0f\n\x0bRNG_INVALID\x10\x00\x12\x0f\n\x0bRNG_UNIFORM\x10\x01\x12\x0e\n\nRNG_NORMAL\x10\x02\x42\x03\xf8\x01\x01\x62\x06proto3')
)
_PRIMITIVETYPE = _descriptor.EnumDescriptor(
name='PrimitiveType',
full_name='xla.PrimitiveType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='PRIMITIVE_TYPE_INVALID', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PRED', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='S8', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='S16', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='S32', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='S64', index=5, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='U8', index=6, number=6,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='U16', index=7, number=7,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='U32', index=8, number=8,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='U64', index=9, number=9,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='F16', index=10, number=10,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='F32', index=11, number=11,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BF16', index=12, number=16,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='F64', index=13, number=12,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='C64', index=14, number=15,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TUPLE', index=15, number=13,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OPAQUE', index=16, number=14,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TOKEN', index=17, number=17,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=2751,
serialized_end=2954,
)
_sym_db.RegisterEnumDescriptor(_PRIMITIVETYPE)
PrimitiveType = enum_type_wrapper.EnumTypeWrapper(_PRIMITIVETYPE)
_PADDINGVALUE = _descriptor.EnumDescriptor(
name='PaddingValue',
full_name='xla.PaddingValue',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='INVALID_PAD', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ZERO_PAD', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ONE_PAD', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LOWEST_PAD', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HIGHEST_PAD', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN_PAD', index=5, number=5,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=2956,
serialized_end=3064,
)
_sym_db.RegisterEnumDescriptor(_PADDINGVALUE)
PaddingValue = enum_type_wrapper.EnumTypeWrapper(_PADDINGVALUE)
_FORMAT = _descriptor.EnumDescriptor(
name='Format',
full_name='xla.Format',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='INVALID_FORMAT', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DENSE', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SPARSE', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=3066,
serialized_end=3117,
)
_sym_db.RegisterEnumDescriptor(_FORMAT)
Format = enum_type_wrapper.EnumTypeWrapper(_FORMAT)
_FFTTYPE = _descriptor.EnumDescriptor(
name='FftType',
full_name='xla.FftType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='FFT', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IFFT', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RFFT', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IRFFT', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=3119,
serialized_end=3168,
)
_sym_db.RegisterEnumDescriptor(_FFTTYPE)
FftType = enum_type_wrapper.EnumTypeWrapper(_FFTTYPE)
_RANDOMDISTRIBUTION = _descriptor.EnumDescriptor(
name='RandomDistribution',
full_name='xla.RandomDistribution',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='RNG_INVALID', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RNG_UNIFORM', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RNG_NORMAL', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=3170,
serialized_end=3240,
)
_sym_db.RegisterEnumDescriptor(_RANDOMDISTRIBUTION)
RandomDistribution = enum_type_wrapper.EnumTypeWrapper(_RANDOMDISTRIBUTION)
PRIMITIVE_TYPE_INVALID = 0
PRED = 1
S8 = 2
S16 = 3
S32 = 4
S64 = 5
U8 = 6
U16 = 7
U32 = 8
U64 = 9
F16 = 10
F32 = 11
BF16 = 16
F64 = 12
C64 = 15
TUPLE = 13
OPAQUE = 14
TOKEN = 17
INVALID_PAD = 0
ZERO_PAD = 1
ONE_PAD = 2
LOWEST_PAD = 3
HIGHEST_PAD = 4
UNKNOWN_PAD = 5
INVALID_FORMAT = 0
DENSE = 1
SPARSE = 2
FFT = 0
IFFT = 1
RFFT = 2
IRFFT = 3
RNG_INVALID = 0
RNG_UNIFORM = 1
RNG_NORMAL = 2
_OPSHARDING_TYPE = _descriptor.EnumDescriptor(
name='Type',
full_name='xla.OpSharding.Type',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='REPLICATED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MAXIMAL', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TUPLE', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OTHER', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=2691,
serialized_end=2748,
)
_sym_db.RegisterEnumDescriptor(_OPSHARDING_TYPE)
_PADDINGCONFIG_PADDINGCONFIGDIMENSION = _descriptor.Descriptor(
name='PaddingConfigDimension',
full_name='xla.PaddingConfig.PaddingConfigDimension',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='edge_padding_low', full_name='xla.PaddingConfig.PaddingConfigDimension.edge_padding_low', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='edge_padding_high', full_name='xla.PaddingConfig.PaddingConfigDimension.edge_padding_high', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='interior_padding', full_name='xla.PaddingConfig.PaddingConfigDimension.interior_padding', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=128,
serialized_end=231,
)
_PADDINGCONFIG = _descriptor.Descriptor(
name='PaddingConfig',
full_name='xla.PaddingConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dimensions', full_name='xla.PaddingConfig.dimensions', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_PADDINGCONFIG_PADDINGCONFIGDIMENSION, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=48,
serialized_end=231,
)
_LAYOUT = _descriptor.Descriptor(
name='Layout',
full_name='xla.Layout',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='format', full_name='xla.Layout.format', index=0,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='minor_to_major', full_name='xla.Layout.minor_to_major', index=1,
number=1, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='padded_dimensions', full_name='xla.Layout.padded_dimensions', index=2,
number=2, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='padding_value', full_name='xla.Layout.padding_value', index=3,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_sparse_elements', full_name='xla.Layout.max_sparse_elements', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=234,
serialized_end=393,
)
_SHAPE = _descriptor.Descriptor(
name='Shape',
full_name='xla.Shape',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='element_type', full_name='xla.Shape.element_type', index=0,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dimensions', full_name='xla.Shape.dimensions', index=1,
number=3, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tuple_shapes', full_name='xla.Shape.tuple_shapes', index=2,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='layout', full_name='xla.Shape.layout', index=3,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=396,
serialized_end=540,
)
_PROGRAMSHAPE = _descriptor.Descriptor(
name='ProgramShape',
full_name='xla.ProgramShape',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='parameters', full_name='xla.ProgramShape.parameters', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='result', full_name='xla.ProgramShape.result', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='parameter_names', full_name='xla.ProgramShape.parameter_names', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=542,
serialized_end=641,
)
_COMPUTATIONSTATS = _descriptor.Descriptor(
name='ComputationStats',
full_name='xla.ComputationStats',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='flop_count', full_name='xla.ComputationStats.flop_count', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='transcendental_count', full_name='xla.ComputationStats.transcendental_count', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=643,
serialized_end=711,
)
_OPMETADATA = _descriptor.Descriptor(
name='OpMetadata',
full_name='xla.OpMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='op_type', full_name='xla.OpMetadata.op_type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='op_name', full_name='xla.OpMetadata.op_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='source_file', full_name='xla.OpMetadata.source_file', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='source_line', full_name='xla.OpMetadata.source_line', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=713,
serialized_end=801,
)
_EXECUTIONPROFILE = _descriptor.Descriptor(
name='ExecutionProfile',
full_name='xla.ExecutionProfile',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='compilation_cache_hit', full_name='xla.ExecutionProfile.compilation_cache_hit', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='compile_time_ms', full_name='xla.ExecutionProfile.compile_time_ms', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='compute_cycle_count', full_name='xla.ExecutionProfile.compute_cycle_count', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='compute_time_ns', full_name='xla.ExecutionProfile.compute_time_ns', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='compute_and_transfer_time_ns', full_name='xla.ExecutionProfile.compute_and_transfer_time_ns', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='executable_size_in_bytes', full_name='xla.ExecutionProfile.executable_size_in_bytes', index=5,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=804,
serialized_end=1004,
)
_EXECUTIONHANDLE = _descriptor.Descriptor(
name='ExecutionHandle',
full_name='xla.ExecutionHandle',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='handle', full_name='xla.ExecutionHandle.handle', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1006,
serialized_end=1039,
)
_GLOBALDATAHANDLE = _descriptor.Descriptor(
name='GlobalDataHandle',
full_name='xla.GlobalDataHandle',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='handle', full_name='xla.GlobalDataHandle.handle', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1041,
serialized_end=1075,
)
_DEVICEHANDLE = _descriptor.Descriptor(
name='DeviceHandle',
full_name='xla.DeviceHandle',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='handle', full_name='xla.DeviceHandle.handle', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='device_count', full_name='xla.DeviceHandle.device_count', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1077,
serialized_end=1129,
)
_CHANNELHANDLE = _descriptor.Descriptor(
name='ChannelHandle',
full_name='xla.ChannelHandle',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='handle', full_name='xla.ChannelHandle.handle', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1131,
serialized_end=1162,
)
_DEVICEASSIGNMENTPROTO_COMPUTATIONDEVICE = _descriptor.Descriptor(
name='ComputationDevice',
full_name='xla.DeviceAssignmentProto.ComputationDevice',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='replica_device_ids', full_name='xla.DeviceAssignmentProto.ComputationDevice.replica_device_ids', index=0,
number=1, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1315,
serialized_end=1362,
)
_DEVICEASSIGNMENTPROTO = _descriptor.Descriptor(
name='DeviceAssignmentProto',
full_name='xla.DeviceAssignmentProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='replica_count', full_name='xla.DeviceAssignmentProto.replica_count', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='computation_count', full_name='xla.DeviceAssignmentProto.computation_count', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='computation_devices', full_name='xla.DeviceAssignmentProto.computation_devices', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_DEVICEASSIGNMENTPROTO_COMPUTATIONDEVICE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1165,
serialized_end=1362,
)
_LITERALPROTO = _descriptor.Descriptor(
name='LiteralProto',
full_name='xla.LiteralProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='shape', full_name='xla.LiteralProto.shape', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='preds', full_name='xla.LiteralProto.preds', index=1,
number=2, type=8, cpp_type=7, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='u8s', full_name='xla.LiteralProto.u8s', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='s32s', full_name='xla.LiteralProto.s32s', index=3,
number=4, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='s64s', full_name='xla.LiteralProto.s64s', index=4,
number=5, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='u32s', full_name='xla.LiteralProto.u32s', index=5,
number=6, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='u64s', full_name='xla.LiteralProto.u64s', index=6,
number=7, type=4, cpp_type=4, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='f32s', full_name='xla.LiteralProto.f32s', index=7,
number=8, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='f64s', full_name='xla.LiteralProto.f64s', index=8,
number=9, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='c64s', full_name='xla.LiteralProto.c64s', index=9,
number=12, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tuple_literals', full_name='xla.LiteralProto.tuple_literals', index=10,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='f16s', full_name='xla.LiteralProto.f16s', index=11,
number=11, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bf16s', full_name='xla.LiteralProto.bf16s', index=12,
number=13, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sparse_indices', full_name='xla.LiteralProto.sparse_indices', index=13,
number=14, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1365,
serialized_end=1628,
)
_WINDOWDIMENSION = _descriptor.Descriptor(
name='WindowDimension',
full_name='xla.WindowDimension',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='size', full_name='xla.WindowDimension.size', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stride', full_name='xla.WindowDimension.stride', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='padding_low', full_name='xla.WindowDimension.padding_low', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='padding_high', full_name='xla.WindowDimension.padding_high', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='window_dilation', full_name='xla.WindowDimension.window_dilation', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='base_dilation', full_name='xla.WindowDimension.base_dilation', index=5,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='window_reversal', full_name='xla.WindowDimension.window_reversal', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1631,
serialized_end=1794,
)
_WINDOW = _descriptor.Descriptor(
name='Window',
full_name='xla.Window',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dimensions', full_name='xla.Window.dimensions', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1796,
serialized_end=1846,
)
_GATHERDIMENSIONNUMBERS = _descriptor.Descriptor(
name='GatherDimensionNumbers',
full_name='xla.GatherDimensionNumbers',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='output_window_dims', full_name='xla.GatherDimensionNumbers.output_window_dims', index=0,
number=1, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='elided_window_dims', full_name='xla.GatherDimensionNumbers.elided_window_dims', index=1,
number=2, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gather_dims_to_operand_dims', full_name='xla.GatherDimensionNumbers.gather_dims_to_operand_dims', index=2,
number=3, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='index_vector_dim', full_name='xla.GatherDimensionNumbers.index_vector_dim', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1849,
serialized_end=1992,
)
_CONVOLUTIONDIMENSIONNUMBERS = _descriptor.Descriptor(
name='ConvolutionDimensionNumbers',
full_name='xla.ConvolutionDimensionNumbers',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='input_batch_dimension', full_name='xla.ConvolutionDimensionNumbers.input_batch_dimension', index=0,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='input_feature_dimension', full_name='xla.ConvolutionDimensionNumbers.input_feature_dimension', index=1,
number=8, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='input_spatial_dimensions', full_name='xla.ConvolutionDimensionNumbers.input_spatial_dimensions', index=2,
number=11, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='kernel_input_feature_dimension', full_name='xla.ConvolutionDimensionNumbers.kernel_input_feature_dimension', index=3,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='kernel_output_feature_dimension', full_name='xla.ConvolutionDimensionNumbers.kernel_output_feature_dimension', index=4,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='kernel_spatial_dimensions', full_name='xla.ConvolutionDimensionNumbers.kernel_spatial_dimensions', index=5,
number=6, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_batch_dimension', full_name='xla.ConvolutionDimensionNumbers.output_batch_dimension', index=6,
number=9, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_feature_dimension', full_name='xla.ConvolutionDimensionNumbers.output_feature_dimension', index=7,
number=10, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_spatial_dimensions', full_name='xla.ConvolutionDimensionNumbers.output_spatial_dimensions', index=8,
number=12, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1995,
serialized_end=2339,
)
_DOTDIMENSIONNUMBERS = _descriptor.Descriptor(
name='DotDimensionNumbers',
full_name='xla.DotDimensionNumbers',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='lhs_contracting_dimensions', full_name='xla.DotDimensionNumbers.lhs_contracting_dimensions', index=0,
number=1, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rhs_contracting_dimensions', full_name='xla.DotDimensionNumbers.rhs_contracting_dimensions', index=1,
number=2, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lhs_batch_dimensions', full_name='xla.DotDimensionNumbers.lhs_batch_dimensions', index=2,
number=3, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rhs_batch_dimensions', full_name='xla.DotDimensionNumbers.rhs_batch_dimensions', index=3,
number=4, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2342,
serialized_end=2495,
)
_OPSHARDING = _descriptor.Descriptor(
name='OpSharding',
full_name='xla.OpSharding',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='xla.OpSharding.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tile_shape', full_name='xla.OpSharding.tile_shape', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tile_assignment_dimensions', full_name='xla.OpSharding.tile_assignment_dimensions', index=2,
number=3, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tile_assignment_devices', full_name='xla.OpSharding.tile_assignment_devices', index=3,
number=4, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tuple_shardings', full_name='xla.OpSharding.tuple_shardings', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_OPSHARDING_TYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2498,
serialized_end=2748,
)
_PADDINGCONFIG_PADDINGCONFIGDIMENSION.containing_type = _PADDINGCONFIG
_PADDINGCONFIG.fields_by_name['dimensions'].message_type = _PADDINGCONFIG_PADDINGCONFIGDIMENSION
_LAYOUT.fields_by_name['format'].enum_type = _FORMAT
_LAYOUT.fields_by_name['padding_value'].enum_type = _PADDINGVALUE
_SHAPE.fields_by_name['element_type'].enum_type = _PRIMITIVETYPE
_SHAPE.fields_by_name['tuple_shapes'].message_type = _SHAPE
_SHAPE.fields_by_name['layout'].message_type = _LAYOUT
_PROGRAMSHAPE.fields_by_name['parameters'].message_type = _SHAPE
_PROGRAMSHAPE.fields_by_name['result'].message_type = _SHAPE
_DEVICEASSIGNMENTPROTO_COMPUTATIONDEVICE.containing_type = _DEVICEASSIGNMENTPROTO
_DEVICEASSIGNMENTPROTO.fields_by_name['computation_devices'].message_type = _DEVICEASSIGNMENTPROTO_COMPUTATIONDEVICE
_LITERALPROTO.fields_by_name['shape'].message_type = _SHAPE
_LITERALPROTO.fields_by_name['tuple_literals'].message_type = _LITERALPROTO
_WINDOW.fields_by_name['dimensions'].message_type = _WINDOWDIMENSION
_OPSHARDING.fields_by_name['type'].enum_type = _OPSHARDING_TYPE
_OPSHARDING.fields_by_name['tile_shape'].message_type = _SHAPE
_OPSHARDING.fields_by_name['tuple_shardings'].message_type = _OPSHARDING
_OPSHARDING_TYPE.containing_type = _OPSHARDING
DESCRIPTOR.message_types_by_name['PaddingConfig'] = _PADDINGCONFIG
DESCRIPTOR.message_types_by_name['Layout'] = _LAYOUT
DESCRIPTOR.message_types_by_name['Shape'] = _SHAPE
DESCRIPTOR.message_types_by_name['ProgramShape'] = _PROGRAMSHAPE
DESCRIPTOR.message_types_by_name['ComputationStats'] = _COMPUTATIONSTATS
DESCRIPTOR.message_types_by_name['OpMetadata'] = _OPMETADATA
DESCRIPTOR.message_types_by_name['ExecutionProfile'] = _EXECUTIONPROFILE
DESCRIPTOR.message_types_by_name['ExecutionHandle'] = _EXECUTIONHANDLE
DESCRIPTOR.message_types_by_name['GlobalDataHandle'] = _GLOBALDATAHANDLE
DESCRIPTOR.message_types_by_name['DeviceHandle'] = _DEVICEHANDLE
DESCRIPTOR.message_types_by_name['ChannelHandle'] = _CHANNELHANDLE
DESCRIPTOR.message_types_by_name['DeviceAssignmentProto'] = _DEVICEASSIGNMENTPROTO
DESCRIPTOR.message_types_by_name['LiteralProto'] = _LITERALPROTO
DESCRIPTOR.message_types_by_name['WindowDimension'] = _WINDOWDIMENSION
DESCRIPTOR.message_types_by_name['Window'] = _WINDOW
DESCRIPTOR.message_types_by_name['GatherDimensionNumbers'] = _GATHERDIMENSIONNUMBERS
DESCRIPTOR.message_types_by_name['ConvolutionDimensionNumbers'] = _CONVOLUTIONDIMENSIONNUMBERS
DESCRIPTOR.message_types_by_name['DotDimensionNumbers'] = _DOTDIMENSIONNUMBERS
DESCRIPTOR.message_types_by_name['OpSharding'] = _OPSHARDING
DESCRIPTOR.enum_types_by_name['PrimitiveType'] = _PRIMITIVETYPE
DESCRIPTOR.enum_types_by_name['PaddingValue'] = _PADDINGVALUE
DESCRIPTOR.enum_types_by_name['Format'] = _FORMAT
DESCRIPTOR.enum_types_by_name['FftType'] = _FFTTYPE
DESCRIPTOR.enum_types_by_name['RandomDistribution'] = _RANDOMDISTRIBUTION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PaddingConfig = _reflection.GeneratedProtocolMessageType('PaddingConfig', (_message.Message,), dict(
PaddingConfigDimension = _reflection.GeneratedProtocolMessageType('PaddingConfigDimension', (_message.Message,), dict(
DESCRIPTOR = _PADDINGCONFIG_PADDINGCONFIGDIMENSION,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.PaddingConfig.PaddingConfigDimension)
))
,
DESCRIPTOR = _PADDINGCONFIG,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.PaddingConfig)
))
_sym_db.RegisterMessage(PaddingConfig)
_sym_db.RegisterMessage(PaddingConfig.PaddingConfigDimension)
Layout = _reflection.GeneratedProtocolMessageType('Layout', (_message.Message,), dict(
DESCRIPTOR = _LAYOUT,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.Layout)
))
_sym_db.RegisterMessage(Layout)
Shape = _reflection.GeneratedProtocolMessageType('Shape', (_message.Message,), dict(
DESCRIPTOR = _SHAPE,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.Shape)
))
_sym_db.RegisterMessage(Shape)
ProgramShape = _reflection.GeneratedProtocolMessageType('ProgramShape', (_message.Message,), dict(
DESCRIPTOR = _PROGRAMSHAPE,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.ProgramShape)
))
_sym_db.RegisterMessage(ProgramShape)
ComputationStats = _reflection.GeneratedProtocolMessageType('ComputationStats', (_message.Message,), dict(
DESCRIPTOR = _COMPUTATIONSTATS,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.ComputationStats)
))
_sym_db.RegisterMessage(ComputationStats)
OpMetadata = _reflection.GeneratedProtocolMessageType('OpMetadata', (_message.Message,), dict(
DESCRIPTOR = _OPMETADATA,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.OpMetadata)
))
_sym_db.RegisterMessage(OpMetadata)
ExecutionProfile = _reflection.GeneratedProtocolMessageType('ExecutionProfile', (_message.Message,), dict(
DESCRIPTOR = _EXECUTIONPROFILE,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.ExecutionProfile)
))
_sym_db.RegisterMessage(ExecutionProfile)
ExecutionHandle = _reflection.GeneratedProtocolMessageType('ExecutionHandle', (_message.Message,), dict(
DESCRIPTOR = _EXECUTIONHANDLE,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.ExecutionHandle)
))
_sym_db.RegisterMessage(ExecutionHandle)
GlobalDataHandle = _reflection.GeneratedProtocolMessageType('GlobalDataHandle', (_message.Message,), dict(
DESCRIPTOR = _GLOBALDATAHANDLE,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.GlobalDataHandle)
))
_sym_db.RegisterMessage(GlobalDataHandle)
DeviceHandle = _reflection.GeneratedProtocolMessageType('DeviceHandle', (_message.Message,), dict(
DESCRIPTOR = _DEVICEHANDLE,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.DeviceHandle)
))
_sym_db.RegisterMessage(DeviceHandle)
ChannelHandle = _reflection.GeneratedProtocolMessageType('ChannelHandle', (_message.Message,), dict(
DESCRIPTOR = _CHANNELHANDLE,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.ChannelHandle)
))
_sym_db.RegisterMessage(ChannelHandle)
DeviceAssignmentProto = _reflection.GeneratedProtocolMessageType('DeviceAssignmentProto', (_message.Message,), dict(
ComputationDevice = _reflection.GeneratedProtocolMessageType('ComputationDevice', (_message.Message,), dict(
DESCRIPTOR = _DEVICEASSIGNMENTPROTO_COMPUTATIONDEVICE,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.DeviceAssignmentProto.ComputationDevice)
))
,
DESCRIPTOR = _DEVICEASSIGNMENTPROTO,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.DeviceAssignmentProto)
))
_sym_db.RegisterMessage(DeviceAssignmentProto)
_sym_db.RegisterMessage(DeviceAssignmentProto.ComputationDevice)
LiteralProto = _reflection.GeneratedProtocolMessageType('LiteralProto', (_message.Message,), dict(
DESCRIPTOR = _LITERALPROTO,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.LiteralProto)
))
_sym_db.RegisterMessage(LiteralProto)
WindowDimension = _reflection.GeneratedProtocolMessageType('WindowDimension', (_message.Message,), dict(
DESCRIPTOR = _WINDOWDIMENSION,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.WindowDimension)
))
_sym_db.RegisterMessage(WindowDimension)
Window = _reflection.GeneratedProtocolMessageType('Window', (_message.Message,), dict(
DESCRIPTOR = _WINDOW,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.Window)
))
_sym_db.RegisterMessage(Window)
GatherDimensionNumbers = _reflection.GeneratedProtocolMessageType('GatherDimensionNumbers', (_message.Message,), dict(
DESCRIPTOR = _GATHERDIMENSIONNUMBERS,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.GatherDimensionNumbers)
))
_sym_db.RegisterMessage(GatherDimensionNumbers)
ConvolutionDimensionNumbers = _reflection.GeneratedProtocolMessageType('ConvolutionDimensionNumbers', (_message.Message,), dict(
DESCRIPTOR = _CONVOLUTIONDIMENSIONNUMBERS,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.ConvolutionDimensionNumbers)
))
_sym_db.RegisterMessage(ConvolutionDimensionNumbers)
DotDimensionNumbers = _reflection.GeneratedProtocolMessageType('DotDimensionNumbers', (_message.Message,), dict(
DESCRIPTOR = _DOTDIMENSIONNUMBERS,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.DotDimensionNumbers)
))
_sym_db.RegisterMessage(DotDimensionNumbers)
OpSharding = _reflection.GeneratedProtocolMessageType('OpSharding', (_message.Message,), dict(
DESCRIPTOR = _OPSHARDING,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.OpSharding)
))
_sym_db.RegisterMessage(OpSharding)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 39.723391 | 6,055 | 0.744794 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/compiler/xla/xla_data.proto',
package='xla',
syntax='proto3',
serialized_options=_b('\370\001\001'),
serialized_pb=_b('\n&tensorflow/compiler/xla/xla_data.proto\x12\x03xla\"\xb7\x01\n\rPaddingConfig\x12=\n\ndimensions\x18\x01 \x03(\x0b\x32).xla.PaddingConfig.PaddingConfigDimension\x1ag\n\x16PaddingConfigDimension\x12\x18\n\x10\x65\x64ge_padding_low\x18\x01 \x01(\x03\x12\x19\n\x11\x65\x64ge_padding_high\x18\x02 \x01(\x03\x12\x18\n\x10interior_padding\x18\x03 \x01(\x03\"\x9f\x01\n\x06Layout\x12\x1b\n\x06\x66ormat\x18\x04 \x01(\x0e\x32\x0b.xla.Format\x12\x16\n\x0eminor_to_major\x18\x01 \x03(\x03\x12\x19\n\x11padded_dimensions\x18\x02 \x03(\x03\x12(\n\rpadding_value\x18\x03 \x01(\x0e\x32\x11.xla.PaddingValue\x12\x1b\n\x13max_sparse_elements\x18\x05 \x01(\x03\"\x90\x01\n\x05Shape\x12(\n\x0c\x65lement_type\x18\x02 \x01(\x0e\x32\x12.xla.PrimitiveType\x12\x12\n\ndimensions\x18\x03 \x03(\x03\x12 \n\x0ctuple_shapes\x18\x04 \x03(\x0b\x32\n.xla.Shape\x12\x1b\n\x06layout\x18\x05 \x01(\x0b\x32\x0b.xla.LayoutJ\x04\x08\x01\x10\x02R\x04rank\"c\n\x0cProgramShape\x12\x1e\n\nparameters\x18\x01 \x03(\x0b\x32\n.xla.Shape\x12\x1a\n\x06result\x18\x02 \x01(\x0b\x32\n.xla.Shape\x12\x17\n\x0fparameter_names\x18\x03 \x03(\t\"D\n\x10\x43omputationStats\x12\x12\n\nflop_count\x18\x01 \x01(\x01\x12\x1c\n\x14transcendental_count\x18\x02 \x01(\x01\"X\n\nOpMetadata\x12\x0f\n\x07op_type\x18\x01 \x01(\t\x12\x0f\n\x07op_name\x18\x02 \x01(\t\x12\x13\n\x0bsource_file\x18\x03 \x01(\t\x12\x13\n\x0bsource_line\x18\x04 \x01(\x05\"\xc8\x01\n\x10\x45xecutionProfile\x12\x1d\n\x15\x63ompilation_cache_hit\x18\x01 \x01(\x08\x12\x17\n\x0f\x63ompile_time_ms\x18\x02 \x01(\x03\x12\x1b\n\x13\x63ompute_cycle_count\x18\x03 \x01(\x03\x12\x17\n\x0f\x63ompute_time_ns\x18\x04 \x01(\x03\x12$\n\x1c\x63ompute_and_transfer_time_ns\x18\x05 \x01(\x03\x12 \n\x18\x65xecutable_size_in_bytes\x18\x06 \x01(\x03\"!\n\x0f\x45xecutionHandle\x12\x0e\n\x06handle\x18\x01 \x01(\x03\"\"\n\x10GlobalDataHandle\x12\x0e\n\x06handle\x18\x01 \x01(\x03\"4\n\x0c\x44\x65viceHandle\x12\x0e\n\x06handle\x18\x01 \x01(\x03\x12\x14\n\x0c\x64\x65vice_count\x18\x02 \x01(\x03\"\x1f\n\rChannelHandle\x12\x0e\n\x06handle\x18\x01 \x01(\x03\"\xc5\x01\n\x15\x44\x65viceAssignmentProto\x12\x15\n\rreplica_count\x18\x01 \x01(\x05\x12\x19\n\x11\x63omputation_count\x18\x02 \x01(\x05\x12I\n\x13\x63omputation_devices\x18\x03 \x03(\x0b\x32,.xla.DeviceAssignmentProto.ComputationDevice\x1a/\n\x11\x43omputationDevice\x12\x1a\n\x12replica_device_ids\x18\x01 \x03(\x05\"\x87\x02\n\x0cLiteralProto\x12\x19\n\x05shape\x18\x01 \x01(\x0b\x32\n.xla.Shape\x12\r\n\x05preds\x18\x02 \x03(\x08\x12\x0b\n\x03u8s\x18\x03 \x01(\x0c\x12\x0c\n\x04s32s\x18\x04 \x03(\x05\x12\x0c\n\x04s64s\x18\x05 \x03(\x03\x12\x0c\n\x04u32s\x18\x06 \x03(\r\x12\x0c\n\x04u64s\x18\x07 \x03(\x04\x12\x0c\n\x04\x66\x33\x32s\x18\x08 \x03(\x02\x12\x0c\n\x04\x66\x36\x34s\x18\t \x03(\x01\x12\x0c\n\x04\x63\x36\x34s\x18\x0c \x03(\x02\x12)\n\x0etuple_literals\x18\n \x03(\x0b\x32\x11.xla.LiteralProto\x12\x0c\n\x04\x66\x31\x36s\x18\x0b \x01(\x0c\x12\r\n\x05\x62\x66\x31\x36s\x18\r \x01(\x0c\x12\x16\n\x0esparse_indices\x18\x0e \x03(\x03\"\xa3\x01\n\x0fWindowDimension\x12\x0c\n\x04size\x18\x01 \x01(\x03\x12\x0e\n\x06stride\x18\x02 \x01(\x03\x12\x13\n\x0bpadding_low\x18\x03 \x01(\x03\x12\x14\n\x0cpadding_high\x18\x04 \x01(\x03\x12\x17\n\x0fwindow_dilation\x18\x05 \x01(\x03\x12\x15\n\rbase_dilation\x18\x06 \x01(\x03\x12\x17\n\x0fwindow_reversal\x18\x07 \x01(\x08\"2\n\x06Window\x12(\n\ndimensions\x18\x01 \x03(\x0b\x32\x14.xla.WindowDimension\"\x8f\x01\n\x16GatherDimensionNumbers\x12\x1a\n\x12output_window_dims\x18\x01 \x03(\x03\x12\x1a\n\x12\x65lided_window_dims\x18\x02 \x03(\x03\x12#\n\x1bgather_dims_to_operand_dims\x18\x03 \x03(\x03\x12\x18\n\x10index_vector_dim\x18\x04 \x01(\x03\"\xd8\x02\n\x1b\x43onvolutionDimensionNumbers\x12\x1d\n\x15input_batch_dimension\x18\x07 \x01(\x03\x12\x1f\n\x17input_feature_dimension\x18\x08 \x01(\x03\x12 \n\x18input_spatial_dimensions\x18\x0b \x03(\x03\x12&\n\x1ekernel_input_feature_dimension\x18\x03 \x01(\x03\x12\'\n\x1fkernel_output_feature_dimension\x18\x04 \x01(\x03\x12!\n\x19kernel_spatial_dimensions\x18\x06 \x03(\x03\x12\x1e\n\x16output_batch_dimension\x18\t \x01(\x03\x12 \n\x18output_feature_dimension\x18\n \x01(\x03\x12!\n\x19output_spatial_dimensions\x18\x0c \x03(\x03\"\x99\x01\n\x13\x44otDimensionNumbers\x12\"\n\x1alhs_contracting_dimensions\x18\x01 \x03(\x03\x12\"\n\x1arhs_contracting_dimensions\x18\x02 \x03(\x03\x12\x1c\n\x14lhs_batch_dimensions\x18\x03 \x03(\x03\x12\x1c\n\x14rhs_batch_dimensions\x18\x04 \x03(\x03\"\xfa\x01\n\nOpSharding\x12\"\n\x04type\x18\x01 \x01(\x0e\x32\x14.xla.OpSharding.Type\x12\x1e\n\ntile_shape\x18\x02 \x01(\x0b\x32\n.xla.Shape\x12\"\n\x1atile_assignment_dimensions\x18\x03 \x03(\x03\x12\x1f\n\x17tile_assignment_devices\x18\x04 \x03(\x03\x12(\n\x0ftuple_shardings\x18\x05 \x03(\x0b\x32\x0f.xla.OpSharding\"9\n\x04Type\x12\x0e\n\nREPLICATED\x10\x00\x12\x0b\n\x07MAXIMAL\x10\x01\x12\t\n\x05TUPLE\x10\x02\x12\t\n\x05OTHER\x10\x03*\xcb\x01\n\rPrimitiveType\x12\x1a\n\x16PRIMITIVE_TYPE_INVALID\x10\x00\x12\x08\n\x04PRED\x10\x01\x12\x06\n\x02S8\x10\x02\x12\x07\n\x03S16\x10\x03\x12\x07\n\x03S32\x10\x04\x12\x07\n\x03S64\x10\x05\x12\x06\n\x02U8\x10\x06\x12\x07\n\x03U16\x10\x07\x12\x07\n\x03U32\x10\x08\x12\x07\n\x03U64\x10\t\x12\x07\n\x03\x46\x31\x36\x10\n\x12\x07\n\x03\x46\x33\x32\x10\x0b\x12\x08\n\x04\x42\x46\x31\x36\x10\x10\x12\x07\n\x03\x46\x36\x34\x10\x0c\x12\x07\n\x03\x43\x36\x34\x10\x0f\x12\t\n\x05TUPLE\x10\r\x12\n\n\x06OPAQUE\x10\x0e\x12\t\n\x05TOKEN\x10\x11*l\n\x0cPaddingValue\x12\x0f\n\x0bINVALID_PAD\x10\x00\x12\x0c\n\x08ZERO_PAD\x10\x01\x12\x0b\n\x07ONE_PAD\x10\x02\x12\x0e\n\nLOWEST_PAD\x10\x03\x12\x0f\n\x0bHIGHEST_PAD\x10\x04\x12\x0f\n\x0bUNKNOWN_PAD\x10\x05*3\n\x06\x46ormat\x12\x12\n\x0eINVALID_FORMAT\x10\x00\x12\t\n\x05\x44\x45NSE\x10\x01\x12\n\n\x06SPARSE\x10\x02*1\n\x07\x46\x66tType\x12\x07\n\x03\x46\x46T\x10\x00\x12\x08\n\x04IFFT\x10\x01\x12\x08\n\x04RFFT\x10\x02\x12\t\n\x05IRFFT\x10\x03*F\n\x12RandomDistribution\x12\x0f\n\x0bRNG_INVALID\x10\x00\x12\x0f\n\x0bRNG_UNIFORM\x10\x01\x12\x0e\n\nRNG_NORMAL\x10\x02\x42\x03\xf8\x01\x01\x62\x06proto3')
)
_PRIMITIVETYPE = _descriptor.EnumDescriptor(
name='PrimitiveType',
full_name='xla.PrimitiveType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='PRIMITIVE_TYPE_INVALID', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PRED', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='S8', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='S16', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='S32', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='S64', index=5, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='U8', index=6, number=6,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='U16', index=7, number=7,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='U32', index=8, number=8,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='U64', index=9, number=9,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='F16', index=10, number=10,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='F32', index=11, number=11,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BF16', index=12, number=16,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='F64', index=13, number=12,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='C64', index=14, number=15,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TUPLE', index=15, number=13,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OPAQUE', index=16, number=14,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TOKEN', index=17, number=17,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=2751,
serialized_end=2954,
)
_sym_db.RegisterEnumDescriptor(_PRIMITIVETYPE)
PrimitiveType = enum_type_wrapper.EnumTypeWrapper(_PRIMITIVETYPE)
_PADDINGVALUE = _descriptor.EnumDescriptor(
name='PaddingValue',
full_name='xla.PaddingValue',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='INVALID_PAD', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ZERO_PAD', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ONE_PAD', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LOWEST_PAD', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HIGHEST_PAD', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN_PAD', index=5, number=5,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=2956,
serialized_end=3064,
)
_sym_db.RegisterEnumDescriptor(_PADDINGVALUE)
PaddingValue = enum_type_wrapper.EnumTypeWrapper(_PADDINGVALUE)
_FORMAT = _descriptor.EnumDescriptor(
name='Format',
full_name='xla.Format',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='INVALID_FORMAT', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DENSE', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SPARSE', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=3066,
serialized_end=3117,
)
_sym_db.RegisterEnumDescriptor(_FORMAT)
Format = enum_type_wrapper.EnumTypeWrapper(_FORMAT)
_FFTTYPE = _descriptor.EnumDescriptor(
name='FftType',
full_name='xla.FftType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='FFT', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IFFT', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RFFT', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IRFFT', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=3119,
serialized_end=3168,
)
_sym_db.RegisterEnumDescriptor(_FFTTYPE)
FftType = enum_type_wrapper.EnumTypeWrapper(_FFTTYPE)
_RANDOMDISTRIBUTION = _descriptor.EnumDescriptor(
name='RandomDistribution',
full_name='xla.RandomDistribution',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='RNG_INVALID', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RNG_UNIFORM', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RNG_NORMAL', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=3170,
serialized_end=3240,
)
_sym_db.RegisterEnumDescriptor(_RANDOMDISTRIBUTION)
RandomDistribution = enum_type_wrapper.EnumTypeWrapper(_RANDOMDISTRIBUTION)
PRIMITIVE_TYPE_INVALID = 0
PRED = 1
S8 = 2
S16 = 3
S32 = 4
S64 = 5
U8 = 6
U16 = 7
U32 = 8
U64 = 9
F16 = 10
F32 = 11
BF16 = 16
F64 = 12
C64 = 15
TUPLE = 13
OPAQUE = 14
TOKEN = 17
INVALID_PAD = 0
ZERO_PAD = 1
ONE_PAD = 2
LOWEST_PAD = 3
HIGHEST_PAD = 4
UNKNOWN_PAD = 5
INVALID_FORMAT = 0
DENSE = 1
SPARSE = 2
FFT = 0
IFFT = 1
RFFT = 2
IRFFT = 3
RNG_INVALID = 0
RNG_UNIFORM = 1
RNG_NORMAL = 2
_OPSHARDING_TYPE = _descriptor.EnumDescriptor(
name='Type',
full_name='xla.OpSharding.Type',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='REPLICATED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MAXIMAL', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TUPLE', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OTHER', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=2691,
serialized_end=2748,
)
_sym_db.RegisterEnumDescriptor(_OPSHARDING_TYPE)
_PADDINGCONFIG_PADDINGCONFIGDIMENSION = _descriptor.Descriptor(
name='PaddingConfigDimension',
full_name='xla.PaddingConfig.PaddingConfigDimension',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='edge_padding_low', full_name='xla.PaddingConfig.PaddingConfigDimension.edge_padding_low', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='edge_padding_high', full_name='xla.PaddingConfig.PaddingConfigDimension.edge_padding_high', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='interior_padding', full_name='xla.PaddingConfig.PaddingConfigDimension.interior_padding', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=128,
serialized_end=231,
)
_PADDINGCONFIG = _descriptor.Descriptor(
name='PaddingConfig',
full_name='xla.PaddingConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dimensions', full_name='xla.PaddingConfig.dimensions', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_PADDINGCONFIG_PADDINGCONFIGDIMENSION, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=48,
serialized_end=231,
)
_LAYOUT = _descriptor.Descriptor(
name='Layout',
full_name='xla.Layout',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='format', full_name='xla.Layout.format', index=0,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='minor_to_major', full_name='xla.Layout.minor_to_major', index=1,
number=1, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='padded_dimensions', full_name='xla.Layout.padded_dimensions', index=2,
number=2, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='padding_value', full_name='xla.Layout.padding_value', index=3,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_sparse_elements', full_name='xla.Layout.max_sparse_elements', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=234,
serialized_end=393,
)
_SHAPE = _descriptor.Descriptor(
name='Shape',
full_name='xla.Shape',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='element_type', full_name='xla.Shape.element_type', index=0,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dimensions', full_name='xla.Shape.dimensions', index=1,
number=3, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tuple_shapes', full_name='xla.Shape.tuple_shapes', index=2,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='layout', full_name='xla.Shape.layout', index=3,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=396,
serialized_end=540,
)
_PROGRAMSHAPE = _descriptor.Descriptor(
name='ProgramShape',
full_name='xla.ProgramShape',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='parameters', full_name='xla.ProgramShape.parameters', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='result', full_name='xla.ProgramShape.result', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='parameter_names', full_name='xla.ProgramShape.parameter_names', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=542,
serialized_end=641,
)
_COMPUTATIONSTATS = _descriptor.Descriptor(
name='ComputationStats',
full_name='xla.ComputationStats',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='flop_count', full_name='xla.ComputationStats.flop_count', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='transcendental_count', full_name='xla.ComputationStats.transcendental_count', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=643,
serialized_end=711,
)
_OPMETADATA = _descriptor.Descriptor(
name='OpMetadata',
full_name='xla.OpMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='op_type', full_name='xla.OpMetadata.op_type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='op_name', full_name='xla.OpMetadata.op_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='source_file', full_name='xla.OpMetadata.source_file', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='source_line', full_name='xla.OpMetadata.source_line', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=713,
serialized_end=801,
)
_EXECUTIONPROFILE = _descriptor.Descriptor(
name='ExecutionProfile',
full_name='xla.ExecutionProfile',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='compilation_cache_hit', full_name='xla.ExecutionProfile.compilation_cache_hit', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='compile_time_ms', full_name='xla.ExecutionProfile.compile_time_ms', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='compute_cycle_count', full_name='xla.ExecutionProfile.compute_cycle_count', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='compute_time_ns', full_name='xla.ExecutionProfile.compute_time_ns', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='compute_and_transfer_time_ns', full_name='xla.ExecutionProfile.compute_and_transfer_time_ns', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='executable_size_in_bytes', full_name='xla.ExecutionProfile.executable_size_in_bytes', index=5,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=804,
serialized_end=1004,
)
_EXECUTIONHANDLE = _descriptor.Descriptor(
name='ExecutionHandle',
full_name='xla.ExecutionHandle',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='handle', full_name='xla.ExecutionHandle.handle', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1006,
serialized_end=1039,
)
_GLOBALDATAHANDLE = _descriptor.Descriptor(
name='GlobalDataHandle',
full_name='xla.GlobalDataHandle',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='handle', full_name='xla.GlobalDataHandle.handle', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1041,
serialized_end=1075,
)
_DEVICEHANDLE = _descriptor.Descriptor(
name='DeviceHandle',
full_name='xla.DeviceHandle',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='handle', full_name='xla.DeviceHandle.handle', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='device_count', full_name='xla.DeviceHandle.device_count', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1077,
serialized_end=1129,
)
_CHANNELHANDLE = _descriptor.Descriptor(
name='ChannelHandle',
full_name='xla.ChannelHandle',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='handle', full_name='xla.ChannelHandle.handle', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1131,
serialized_end=1162,
)
_DEVICEASSIGNMENTPROTO_COMPUTATIONDEVICE = _descriptor.Descriptor(
name='ComputationDevice',
full_name='xla.DeviceAssignmentProto.ComputationDevice',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='replica_device_ids', full_name='xla.DeviceAssignmentProto.ComputationDevice.replica_device_ids', index=0,
number=1, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1315,
serialized_end=1362,
)
_DEVICEASSIGNMENTPROTO = _descriptor.Descriptor(
name='DeviceAssignmentProto',
full_name='xla.DeviceAssignmentProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='replica_count', full_name='xla.DeviceAssignmentProto.replica_count', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='computation_count', full_name='xla.DeviceAssignmentProto.computation_count', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='computation_devices', full_name='xla.DeviceAssignmentProto.computation_devices', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_DEVICEASSIGNMENTPROTO_COMPUTATIONDEVICE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1165,
serialized_end=1362,
)
_LITERALPROTO = _descriptor.Descriptor(
name='LiteralProto',
full_name='xla.LiteralProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='shape', full_name='xla.LiteralProto.shape', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='preds', full_name='xla.LiteralProto.preds', index=1,
number=2, type=8, cpp_type=7, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='u8s', full_name='xla.LiteralProto.u8s', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='s32s', full_name='xla.LiteralProto.s32s', index=3,
number=4, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='s64s', full_name='xla.LiteralProto.s64s', index=4,
number=5, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='u32s', full_name='xla.LiteralProto.u32s', index=5,
number=6, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='u64s', full_name='xla.LiteralProto.u64s', index=6,
number=7, type=4, cpp_type=4, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='f32s', full_name='xla.LiteralProto.f32s', index=7,
number=8, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='f64s', full_name='xla.LiteralProto.f64s', index=8,
number=9, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='c64s', full_name='xla.LiteralProto.c64s', index=9,
number=12, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tuple_literals', full_name='xla.LiteralProto.tuple_literals', index=10,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='f16s', full_name='xla.LiteralProto.f16s', index=11,
number=11, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bf16s', full_name='xla.LiteralProto.bf16s', index=12,
number=13, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sparse_indices', full_name='xla.LiteralProto.sparse_indices', index=13,
number=14, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1365,
serialized_end=1628,
)
_WINDOWDIMENSION = _descriptor.Descriptor(
name='WindowDimension',
full_name='xla.WindowDimension',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='size', full_name='xla.WindowDimension.size', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stride', full_name='xla.WindowDimension.stride', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='padding_low', full_name='xla.WindowDimension.padding_low', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='padding_high', full_name='xla.WindowDimension.padding_high', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='window_dilation', full_name='xla.WindowDimension.window_dilation', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='base_dilation', full_name='xla.WindowDimension.base_dilation', index=5,
number=6, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='window_reversal', full_name='xla.WindowDimension.window_reversal', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1631,
serialized_end=1794,
)
_WINDOW = _descriptor.Descriptor(
name='Window',
full_name='xla.Window',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dimensions', full_name='xla.Window.dimensions', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1796,
serialized_end=1846,
)
_GATHERDIMENSIONNUMBERS = _descriptor.Descriptor(
name='GatherDimensionNumbers',
full_name='xla.GatherDimensionNumbers',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='output_window_dims', full_name='xla.GatherDimensionNumbers.output_window_dims', index=0,
number=1, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='elided_window_dims', full_name='xla.GatherDimensionNumbers.elided_window_dims', index=1,
number=2, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gather_dims_to_operand_dims', full_name='xla.GatherDimensionNumbers.gather_dims_to_operand_dims', index=2,
number=3, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='index_vector_dim', full_name='xla.GatherDimensionNumbers.index_vector_dim', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1849,
serialized_end=1992,
)
_CONVOLUTIONDIMENSIONNUMBERS = _descriptor.Descriptor(
name='ConvolutionDimensionNumbers',
full_name='xla.ConvolutionDimensionNumbers',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='input_batch_dimension', full_name='xla.ConvolutionDimensionNumbers.input_batch_dimension', index=0,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='input_feature_dimension', full_name='xla.ConvolutionDimensionNumbers.input_feature_dimension', index=1,
number=8, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='input_spatial_dimensions', full_name='xla.ConvolutionDimensionNumbers.input_spatial_dimensions', index=2,
number=11, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='kernel_input_feature_dimension', full_name='xla.ConvolutionDimensionNumbers.kernel_input_feature_dimension', index=3,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='kernel_output_feature_dimension', full_name='xla.ConvolutionDimensionNumbers.kernel_output_feature_dimension', index=4,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='kernel_spatial_dimensions', full_name='xla.ConvolutionDimensionNumbers.kernel_spatial_dimensions', index=5,
number=6, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_batch_dimension', full_name='xla.ConvolutionDimensionNumbers.output_batch_dimension', index=6,
number=9, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_feature_dimension', full_name='xla.ConvolutionDimensionNumbers.output_feature_dimension', index=7,
number=10, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_spatial_dimensions', full_name='xla.ConvolutionDimensionNumbers.output_spatial_dimensions', index=8,
number=12, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1995,
serialized_end=2339,
)
_DOTDIMENSIONNUMBERS = _descriptor.Descriptor(
name='DotDimensionNumbers',
full_name='xla.DotDimensionNumbers',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='lhs_contracting_dimensions', full_name='xla.DotDimensionNumbers.lhs_contracting_dimensions', index=0,
number=1, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rhs_contracting_dimensions', full_name='xla.DotDimensionNumbers.rhs_contracting_dimensions', index=1,
number=2, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lhs_batch_dimensions', full_name='xla.DotDimensionNumbers.lhs_batch_dimensions', index=2,
number=3, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rhs_batch_dimensions', full_name='xla.DotDimensionNumbers.rhs_batch_dimensions', index=3,
number=4, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2342,
serialized_end=2495,
)
_OPSHARDING = _descriptor.Descriptor(
name='OpSharding',
full_name='xla.OpSharding',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='xla.OpSharding.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tile_shape', full_name='xla.OpSharding.tile_shape', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tile_assignment_dimensions', full_name='xla.OpSharding.tile_assignment_dimensions', index=2,
number=3, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tile_assignment_devices', full_name='xla.OpSharding.tile_assignment_devices', index=3,
number=4, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tuple_shardings', full_name='xla.OpSharding.tuple_shardings', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_OPSHARDING_TYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2498,
serialized_end=2748,
)
_PADDINGCONFIG_PADDINGCONFIGDIMENSION.containing_type = _PADDINGCONFIG
_PADDINGCONFIG.fields_by_name['dimensions'].message_type = _PADDINGCONFIG_PADDINGCONFIGDIMENSION
_LAYOUT.fields_by_name['format'].enum_type = _FORMAT
_LAYOUT.fields_by_name['padding_value'].enum_type = _PADDINGVALUE
_SHAPE.fields_by_name['element_type'].enum_type = _PRIMITIVETYPE
_SHAPE.fields_by_name['tuple_shapes'].message_type = _SHAPE
_SHAPE.fields_by_name['layout'].message_type = _LAYOUT
_PROGRAMSHAPE.fields_by_name['parameters'].message_type = _SHAPE
_PROGRAMSHAPE.fields_by_name['result'].message_type = _SHAPE
_DEVICEASSIGNMENTPROTO_COMPUTATIONDEVICE.containing_type = _DEVICEASSIGNMENTPROTO
_DEVICEASSIGNMENTPROTO.fields_by_name['computation_devices'].message_type = _DEVICEASSIGNMENTPROTO_COMPUTATIONDEVICE
_LITERALPROTO.fields_by_name['shape'].message_type = _SHAPE
_LITERALPROTO.fields_by_name['tuple_literals'].message_type = _LITERALPROTO
_WINDOW.fields_by_name['dimensions'].message_type = _WINDOWDIMENSION
_OPSHARDING.fields_by_name['type'].enum_type = _OPSHARDING_TYPE
_OPSHARDING.fields_by_name['tile_shape'].message_type = _SHAPE
_OPSHARDING.fields_by_name['tuple_shardings'].message_type = _OPSHARDING
_OPSHARDING_TYPE.containing_type = _OPSHARDING
DESCRIPTOR.message_types_by_name['PaddingConfig'] = _PADDINGCONFIG
DESCRIPTOR.message_types_by_name['Layout'] = _LAYOUT
DESCRIPTOR.message_types_by_name['Shape'] = _SHAPE
DESCRIPTOR.message_types_by_name['ProgramShape'] = _PROGRAMSHAPE
DESCRIPTOR.message_types_by_name['ComputationStats'] = _COMPUTATIONSTATS
DESCRIPTOR.message_types_by_name['OpMetadata'] = _OPMETADATA
DESCRIPTOR.message_types_by_name['ExecutionProfile'] = _EXECUTIONPROFILE
DESCRIPTOR.message_types_by_name['ExecutionHandle'] = _EXECUTIONHANDLE
DESCRIPTOR.message_types_by_name['GlobalDataHandle'] = _GLOBALDATAHANDLE
DESCRIPTOR.message_types_by_name['DeviceHandle'] = _DEVICEHANDLE
DESCRIPTOR.message_types_by_name['ChannelHandle'] = _CHANNELHANDLE
DESCRIPTOR.message_types_by_name['DeviceAssignmentProto'] = _DEVICEASSIGNMENTPROTO
DESCRIPTOR.message_types_by_name['LiteralProto'] = _LITERALPROTO
DESCRIPTOR.message_types_by_name['WindowDimension'] = _WINDOWDIMENSION
DESCRIPTOR.message_types_by_name['Window'] = _WINDOW
DESCRIPTOR.message_types_by_name['GatherDimensionNumbers'] = _GATHERDIMENSIONNUMBERS
DESCRIPTOR.message_types_by_name['ConvolutionDimensionNumbers'] = _CONVOLUTIONDIMENSIONNUMBERS
DESCRIPTOR.message_types_by_name['DotDimensionNumbers'] = _DOTDIMENSIONNUMBERS
DESCRIPTOR.message_types_by_name['OpSharding'] = _OPSHARDING
DESCRIPTOR.enum_types_by_name['PrimitiveType'] = _PRIMITIVETYPE
DESCRIPTOR.enum_types_by_name['PaddingValue'] = _PADDINGVALUE
DESCRIPTOR.enum_types_by_name['Format'] = _FORMAT
DESCRIPTOR.enum_types_by_name['FftType'] = _FFTTYPE
DESCRIPTOR.enum_types_by_name['RandomDistribution'] = _RANDOMDISTRIBUTION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PaddingConfig = _reflection.GeneratedProtocolMessageType('PaddingConfig', (_message.Message,), dict(
PaddingConfigDimension = _reflection.GeneratedProtocolMessageType('PaddingConfigDimension', (_message.Message,), dict(
DESCRIPTOR = _PADDINGCONFIG_PADDINGCONFIGDIMENSION,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.PaddingConfig.PaddingConfigDimension)
))
,
DESCRIPTOR = _PADDINGCONFIG,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.PaddingConfig)
))
_sym_db.RegisterMessage(PaddingConfig)
_sym_db.RegisterMessage(PaddingConfig.PaddingConfigDimension)
Layout = _reflection.GeneratedProtocolMessageType('Layout', (_message.Message,), dict(
DESCRIPTOR = _LAYOUT,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.Layout)
))
_sym_db.RegisterMessage(Layout)
Shape = _reflection.GeneratedProtocolMessageType('Shape', (_message.Message,), dict(
DESCRIPTOR = _SHAPE,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.Shape)
))
_sym_db.RegisterMessage(Shape)
ProgramShape = _reflection.GeneratedProtocolMessageType('ProgramShape', (_message.Message,), dict(
DESCRIPTOR = _PROGRAMSHAPE,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.ProgramShape)
))
_sym_db.RegisterMessage(ProgramShape)
ComputationStats = _reflection.GeneratedProtocolMessageType('ComputationStats', (_message.Message,), dict(
DESCRIPTOR = _COMPUTATIONSTATS,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.ComputationStats)
))
_sym_db.RegisterMessage(ComputationStats)
OpMetadata = _reflection.GeneratedProtocolMessageType('OpMetadata', (_message.Message,), dict(
DESCRIPTOR = _OPMETADATA,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.OpMetadata)
))
_sym_db.RegisterMessage(OpMetadata)
ExecutionProfile = _reflection.GeneratedProtocolMessageType('ExecutionProfile', (_message.Message,), dict(
DESCRIPTOR = _EXECUTIONPROFILE,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.ExecutionProfile)
))
_sym_db.RegisterMessage(ExecutionProfile)
ExecutionHandle = _reflection.GeneratedProtocolMessageType('ExecutionHandle', (_message.Message,), dict(
DESCRIPTOR = _EXECUTIONHANDLE,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.ExecutionHandle)
))
_sym_db.RegisterMessage(ExecutionHandle)
GlobalDataHandle = _reflection.GeneratedProtocolMessageType('GlobalDataHandle', (_message.Message,), dict(
DESCRIPTOR = _GLOBALDATAHANDLE,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.GlobalDataHandle)
))
_sym_db.RegisterMessage(GlobalDataHandle)
DeviceHandle = _reflection.GeneratedProtocolMessageType('DeviceHandle', (_message.Message,), dict(
DESCRIPTOR = _DEVICEHANDLE,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.DeviceHandle)
))
_sym_db.RegisterMessage(DeviceHandle)
ChannelHandle = _reflection.GeneratedProtocolMessageType('ChannelHandle', (_message.Message,), dict(
DESCRIPTOR = _CHANNELHANDLE,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.ChannelHandle)
))
_sym_db.RegisterMessage(ChannelHandle)
DeviceAssignmentProto = _reflection.GeneratedProtocolMessageType('DeviceAssignmentProto', (_message.Message,), dict(
ComputationDevice = _reflection.GeneratedProtocolMessageType('ComputationDevice', (_message.Message,), dict(
DESCRIPTOR = _DEVICEASSIGNMENTPROTO_COMPUTATIONDEVICE,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.DeviceAssignmentProto.ComputationDevice)
))
,
DESCRIPTOR = _DEVICEASSIGNMENTPROTO,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.DeviceAssignmentProto)
))
_sym_db.RegisterMessage(DeviceAssignmentProto)
_sym_db.RegisterMessage(DeviceAssignmentProto.ComputationDevice)
LiteralProto = _reflection.GeneratedProtocolMessageType('LiteralProto', (_message.Message,), dict(
DESCRIPTOR = _LITERALPROTO,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.LiteralProto)
))
_sym_db.RegisterMessage(LiteralProto)
WindowDimension = _reflection.GeneratedProtocolMessageType('WindowDimension', (_message.Message,), dict(
DESCRIPTOR = _WINDOWDIMENSION,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.WindowDimension)
))
_sym_db.RegisterMessage(WindowDimension)
Window = _reflection.GeneratedProtocolMessageType('Window', (_message.Message,), dict(
DESCRIPTOR = _WINDOW,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.Window)
))
_sym_db.RegisterMessage(Window)
GatherDimensionNumbers = _reflection.GeneratedProtocolMessageType('GatherDimensionNumbers', (_message.Message,), dict(
DESCRIPTOR = _GATHERDIMENSIONNUMBERS,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.GatherDimensionNumbers)
))
_sym_db.RegisterMessage(GatherDimensionNumbers)
ConvolutionDimensionNumbers = _reflection.GeneratedProtocolMessageType('ConvolutionDimensionNumbers', (_message.Message,), dict(
DESCRIPTOR = _CONVOLUTIONDIMENSIONNUMBERS,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.ConvolutionDimensionNumbers)
))
_sym_db.RegisterMessage(ConvolutionDimensionNumbers)
DotDimensionNumbers = _reflection.GeneratedProtocolMessageType('DotDimensionNumbers', (_message.Message,), dict(
DESCRIPTOR = _DOTDIMENSIONNUMBERS,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.DotDimensionNumbers)
))
_sym_db.RegisterMessage(DotDimensionNumbers)
OpSharding = _reflection.GeneratedProtocolMessageType('OpSharding', (_message.Message,), dict(
DESCRIPTOR = _OPSHARDING,
__module__ = 'tensorflow.compiler.xla.xla_data_pb2'
# @@protoc_insertion_point(class_scope:xla.OpSharding)
))
_sym_db.RegisterMessage(OpSharding)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| true | true |
1c3bce0e68d6cb50bf6de6908d0001125204b025 | 817 | py | Python | v2/goalnet/core/modules/logger.py | DaniloZZZ/GoalNet | d24102a5f0700fbf42c0868b321d6f6a097ee97b | [
"Apache-2.0"
] | null | null | null | v2/goalnet/core/modules/logger.py | DaniloZZZ/GoalNet | d24102a5f0700fbf42c0868b321d6f6a097ee97b | [
"Apache-2.0"
] | null | null | null | v2/goalnet/core/modules/logger.py | DaniloZZZ/GoalNet | d24102a5f0700fbf42c0868b321d6f6a097ee97b | [
"Apache-2.0"
] | null | null | null | """
Created by Danil Lykov @danlkv on 13/02/19
"""
from pprint import pprint
import multiprocessing as prc
from goalnet.helpers.log_init import log
from goalnet.utils import get_network_config
from .AsyncBaseModule import AsyncModule
class LoggerModule(AsyncModule):
"""
A simple module that logs everything and does nothing afterwards
"""
def __init__(self, netconf,name='logger'):
super().__init__(netconf, name=name)
async def node_fun(self,message,drain):
log.info('message: %s'%message)
return message
def launch_logger(name,netconf):
logm = LoggerModule(netconf,name=name)
logm.start()
def main():
log.info("Starting logger module node...")
netconf = get_network_config()
launch_logger('logger',netconf)
if __name__=='__main__':
main()
| 24.029412 | 68 | 0.709914 |
from pprint import pprint
import multiprocessing as prc
from goalnet.helpers.log_init import log
from goalnet.utils import get_network_config
from .AsyncBaseModule import AsyncModule
class LoggerModule(AsyncModule):
def __init__(self, netconf,name='logger'):
super().__init__(netconf, name=name)
async def node_fun(self,message,drain):
log.info('message: %s'%message)
return message
def launch_logger(name,netconf):
logm = LoggerModule(netconf,name=name)
logm.start()
def main():
log.info("Starting logger module node...")
netconf = get_network_config()
launch_logger('logger',netconf)
if __name__=='__main__':
main()
| true | true |
1c3bce42ea29a20fbb907c8b16ed4eb577176d89 | 4,185 | py | Python | pypy/rlib/test/test_rweakkeydict.py | benoitc/pypy | a3e1b12d1d01dc29056b7badc051ffc034297658 | [
"MIT"
] | 1 | 2020-01-21T11:10:51.000Z | 2020-01-21T11:10:51.000Z | pypy/rlib/test/test_rweakkeydict.py | benoitc/pypy | a3e1b12d1d01dc29056b7badc051ffc034297658 | [
"MIT"
] | null | null | null | pypy/rlib/test/test_rweakkeydict.py | benoitc/pypy | a3e1b12d1d01dc29056b7badc051ffc034297658 | [
"MIT"
] | null | null | null | import py
from pypy.rlib import rgc
from pypy.rlib.rweakref import RWeakKeyDictionary
from pypy.rpython.test.test_llinterp import interpret
class KX(object):
pass
class KY(KX):
pass
class VX(object):
pass
class VY(VX):
pass
def make_test(loop=100, prebuilt=None):
def g(d):
assert d.get(KX()) is None
assert d.get(KY()) is None
k1 = KX(); k2 = KX(); k3 = KX()
v1 = VX(); v2 = VX(); v3 = VX()
d.set(k1, v1)
d.set(k2, v2)
d.set(k3, v3)
assert d.get(k1) is v1
assert d.get(k2) is v2
assert d.get(k3) is v3
assert d.get(KX()) is None
assert d.length() == 3
return k1, k3, v1, v2, v3 # k2 dies
def f():
d = prebuilt
if d is None:
d = RWeakKeyDictionary(KX, VX)
k1, k3, v1, v2, v3 = g(d)
rgc.collect(); rgc.collect()
assert d.get(k1) is v1
assert d.get(k3) is v3
assert d.get(k1) is not v2
assert d.get(k3) is not v2
assert d.length() == 2
d.set(k1, None)
assert d.get(k1) is None
assert d.get(k3) is v3
assert d.length() == 1
# resizing should also work
lots_of_keys = [KX() for i in range(loop)]
for k in lots_of_keys:
d.set(k, v1)
for k in lots_of_keys:
assert d.get(k) is v1
assert d.get(k1) is None
assert d.get(k3) is v3
assert d.length() == loop + 1
# a subclass
ky = KY()
vy = VY()
d.set(ky, vy)
assert d.get(ky) is vy
assert d.length() == loop + 2
# deleting by storing Nones
for k in lots_of_keys:
d.set(k, None)
for k in lots_of_keys:
assert d.get(k) is None
assert d.get(k1) is None
assert d.get(k3) is v3
assert d.get(ky) is vy
assert d.length() == 2
return f
def test_RWeakKeyDictionary():
make_test()()
def test_rpython_RWeakKeyDictionary():
interpret(make_test(loop=12), [])
def test_rpython_prebuilt():
f = make_test(loop=12, prebuilt=RWeakKeyDictionary(KX, VX))
interpret(f, [])
def test_rpython_merge_RWeakKeyDictionary():
empty = RWeakKeyDictionary(KX, VX)
def f(n):
k = KX()
v = VX()
if n:
d = empty
else:
d = RWeakKeyDictionary(KX, VX)
d.set(k, v)
return d.get(k) is v
assert f(0)
assert interpret(f, [0])
assert not f(1)
assert not interpret(f, [1])
def test_rpython_merge_RWeakKeyDictionary2():
class A(object):
def __init__(self):
self.d = RWeakKeyDictionary(KX, A)
def f(self, key):
a = A()
self.d.set(key, a)
return a
empty = A()
def f(x):
a = A()
if x:
a = empty
k = KX()
a2 = a.f(k)
assert a.d.get(k) is a2
f(0)
interpret(f, [0])
f(1)
interpret(f, [1])
def g(x):
if x:
d = RWeakKeyDictionary(KX, VX)
else:
d = RWeakKeyDictionary(KY, VX)
d.set(KX(), VX())
py.test.raises(Exception, interpret, g, [1])
def g(x):
if x:
d = RWeakKeyDictionary(KX, VX)
else:
d = RWeakKeyDictionary(KX, VY)
d.set(KX(), VX())
py.test.raises(Exception, interpret, g, [1])
def test_rpython_free_values():
import py; py.test.skip("XXX not implemented, messy")
class VXDel:
def __del__(self):
state.freed.append(1)
class State:
pass
state = State()
state.freed = []
#
def add_me():
k = KX()
v = VXDel()
d = RWeakKeyDictionary(KX, VXDel)
d.set(k, v)
return d
def f():
del state.freed[:]
d = add_me()
rgc.collect()
# we want the dictionary to be really empty here. It's hard to
# ensure in the current implementation after just one collect(),
# but at least two collects should be enough.
rgc.collect()
return len(state.freed)
assert f() == 1
assert interpret(f, []) == 1
| 25.05988 | 72 | 0.519235 | import py
from pypy.rlib import rgc
from pypy.rlib.rweakref import RWeakKeyDictionary
from pypy.rpython.test.test_llinterp import interpret
class KX(object):
pass
class KY(KX):
pass
class VX(object):
pass
class VY(VX):
pass
def make_test(loop=100, prebuilt=None):
def g(d):
assert d.get(KX()) is None
assert d.get(KY()) is None
k1 = KX(); k2 = KX(); k3 = KX()
v1 = VX(); v2 = VX(); v3 = VX()
d.set(k1, v1)
d.set(k2, v2)
d.set(k3, v3)
assert d.get(k1) is v1
assert d.get(k2) is v2
assert d.get(k3) is v3
assert d.get(KX()) is None
assert d.length() == 3
return k1, k3, v1, v2, v3
def f():
d = prebuilt
if d is None:
d = RWeakKeyDictionary(KX, VX)
k1, k3, v1, v2, v3 = g(d)
rgc.collect(); rgc.collect()
assert d.get(k1) is v1
assert d.get(k3) is v3
assert d.get(k1) is not v2
assert d.get(k3) is not v2
assert d.length() == 2
d.set(k1, None)
assert d.get(k1) is None
assert d.get(k3) is v3
assert d.length() == 1
lots_of_keys = [KX() for i in range(loop)]
for k in lots_of_keys:
d.set(k, v1)
for k in lots_of_keys:
assert d.get(k) is v1
assert d.get(k1) is None
assert d.get(k3) is v3
assert d.length() == loop + 1
ky = KY()
vy = VY()
d.set(ky, vy)
assert d.get(ky) is vy
assert d.length() == loop + 2
for k in lots_of_keys:
d.set(k, None)
for k in lots_of_keys:
assert d.get(k) is None
assert d.get(k1) is None
assert d.get(k3) is v3
assert d.get(ky) is vy
assert d.length() == 2
return f
def test_RWeakKeyDictionary():
make_test()()
def test_rpython_RWeakKeyDictionary():
interpret(make_test(loop=12), [])
def test_rpython_prebuilt():
f = make_test(loop=12, prebuilt=RWeakKeyDictionary(KX, VX))
interpret(f, [])
def test_rpython_merge_RWeakKeyDictionary():
empty = RWeakKeyDictionary(KX, VX)
def f(n):
k = KX()
v = VX()
if n:
d = empty
else:
d = RWeakKeyDictionary(KX, VX)
d.set(k, v)
return d.get(k) is v
assert f(0)
assert interpret(f, [0])
assert not f(1)
assert not interpret(f, [1])
def test_rpython_merge_RWeakKeyDictionary2():
class A(object):
def __init__(self):
self.d = RWeakKeyDictionary(KX, A)
def f(self, key):
a = A()
self.d.set(key, a)
return a
empty = A()
def f(x):
a = A()
if x:
a = empty
k = KX()
a2 = a.f(k)
assert a.d.get(k) is a2
f(0)
interpret(f, [0])
f(1)
interpret(f, [1])
def g(x):
if x:
d = RWeakKeyDictionary(KX, VX)
else:
d = RWeakKeyDictionary(KY, VX)
d.set(KX(), VX())
py.test.raises(Exception, interpret, g, [1])
def g(x):
if x:
d = RWeakKeyDictionary(KX, VX)
else:
d = RWeakKeyDictionary(KX, VY)
d.set(KX(), VX())
py.test.raises(Exception, interpret, g, [1])
def test_rpython_free_values():
import py; py.test.skip("XXX not implemented, messy")
class VXDel:
def __del__(self):
state.freed.append(1)
class State:
pass
state = State()
state.freed = []
def add_me():
k = KX()
v = VXDel()
d = RWeakKeyDictionary(KX, VXDel)
d.set(k, v)
return d
def f():
del state.freed[:]
d = add_me()
rgc.collect()
# ensure in the current implementation after just one collect(),
# but at least two collects should be enough.
rgc.collect()
return len(state.freed)
assert f() == 1
assert interpret(f, []) == 1
| true | true |
1c3bcea52be1a62af86608ca35e65fa11993e1bb | 547,683 | py | Python | HLTrigger/Configuration/test/OnLine_HLT_PRef.py | DenkMybu/cmssw | 70b770f8d71f81469ef7f171c16d4d8958826d6e | [
"Apache-2.0"
] | null | null | null | HLTrigger/Configuration/test/OnLine_HLT_PRef.py | DenkMybu/cmssw | 70b770f8d71f81469ef7f171c16d4d8958826d6e | [
"Apache-2.0"
] | null | null | null | HLTrigger/Configuration/test/OnLine_HLT_PRef.py | DenkMybu/cmssw | 70b770f8d71f81469ef7f171c16d4d8958826d6e | [
"Apache-2.0"
] | null | null | null | # hltGetConfiguration --full --data /dev/CMSSW_12_3_0/PRef --type PRef --unprescale --process HLTPRef --globaltag auto:run3_hlt_PRef --input file:RelVal_Raw_PRef_DATA.root
# /dev/CMSSW_12_3_0/PRef/V23 (CMSSW_12_3_0_pre4)
import FWCore.ParameterSet.Config as cms
from HeterogeneousCore.CUDACore.SwitchProducerCUDA import SwitchProducerCUDA
process = cms.Process( "HLTPRef" )
process.HLTConfigVersion = cms.PSet(
tableName = cms.string('/dev/CMSSW_12_3_0/PRef/V23')
)
process.transferSystem = cms.PSet(
destinations = cms.vstring( 'Tier0',
'DQM',
'ECAL',
'EventDisplay',
'Lustre',
'None' ),
transferModes = cms.vstring( 'default',
'test',
'emulator' ),
streamA = cms.PSet(
default = cms.vstring( 'Tier0' ),
test = cms.vstring( 'Lustre' ),
emulator = cms.vstring( 'Lustre' )
),
streamCalibration = cms.PSet(
default = cms.vstring( 'Tier0' ),
test = cms.vstring( 'Lustre' ),
emulator = cms.vstring( 'None' )
),
streamDQM = cms.PSet(
default = cms.vstring( 'DQM' ),
test = cms.vstring( 'DQM',
'Lustre' ),
emulator = cms.vstring( 'None' )
),
streamDQMCalibration = cms.PSet(
default = cms.vstring( 'DQM' ),
test = cms.vstring( 'DQM',
'Lustre' ),
emulator = cms.vstring( 'None' )
),
streamEcalCalibration = cms.PSet(
default = cms.vstring( 'ECAL' ),
test = cms.vstring( 'ECAL' ),
emulator = cms.vstring( 'None' )
),
streamEventDisplay = cms.PSet(
default = cms.vstring( 'EventDisplay',
'Tier0' ),
test = cms.vstring( 'EventDisplay',
'Lustre' ),
emulator = cms.vstring( 'None' )
),
streamExpressCosmics = cms.PSet(
default = cms.vstring( 'Tier0' ),
test = cms.vstring( 'Lustre' ),
emulator = cms.vstring( 'Lustre' )
),
streamNanoDST = cms.PSet(
default = cms.vstring( 'Tier0' ),
test = cms.vstring( 'Lustre' ),
emulator = cms.vstring( 'None' )
),
streamRPCMON = cms.PSet(
default = cms.vstring( 'Tier0' ),
test = cms.vstring( 'Lustre' ),
emulator = cms.vstring( 'None' )
),
streamTrackerCalibration = cms.PSet(
default = cms.vstring( 'Tier0' ),
test = cms.vstring( 'Lustre' ),
emulator = cms.vstring( 'None' )
),
default = cms.PSet(
default = cms.vstring( 'Tier0' ),
test = cms.vstring( 'Lustre' ),
emulator = cms.vstring( 'Lustre' ),
streamLookArea = cms.PSet( )
),
streamLookArea = cms.PSet(
default = cms.vstring( 'DQM' ),
test = cms.vstring( 'DQM',
'Lustre' ),
emulator = cms.vstring( 'None' )
)
)
process.HLTPSetInitialCkfTrajectoryFilterForHI = cms.PSet(
minimumNumberOfHits = cms.int32( 6 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.9 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTIter0PSetTrajectoryBuilderIT = cms.PSet(
ComponentType = cms.string( "CkfTrajectoryBuilder" ),
lostHitPenalty = cms.double( 30.0 ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter0PSetTrajectoryFilterIT" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
maxCand = cms.int32( 2 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator9" ),
intermediateCleaning = cms.bool( True ),
updator = cms.string( "hltESPKFUpdator" ),
seedAs5DHit = cms.bool( False )
)
process.HLTIter4PSetTrajectoryBuilderIT = cms.PSet(
ComponentType = cms.string( "CkfTrajectoryBuilder" ),
lostHitPenalty = cms.double( 30.0 ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter4PSetTrajectoryFilterIT" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
maxCand = cms.int32( 1 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator16" ),
intermediateCleaning = cms.bool( True ),
updator = cms.string( "hltESPKFUpdator" ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetTobTecStepInOutTrajectoryFilterBase = cms.PSet(
minimumNumberOfHits = cms.int32( 4 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.1 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 0 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTIter0GroupedCkfTrajectoryBuilderIT = cms.PSet(
keepOriginalIfRebuildFails = cms.bool( False ),
lockHits = cms.bool( True ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter0PSetTrajectoryFilterIT" ) ),
maxCand = cms.int32( 2 ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator9" ),
intermediateCleaning = cms.bool( True ),
bestHitOnly = cms.bool( True ),
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
lostHitPenalty = cms.double( 30.0 ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
alwaysUseInvalidHits = cms.bool( False ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter0PSetTrajectoryFilterIT" ) ),
foundHitBonus = cms.double( 5.0 ),
updator = cms.string( "hltESPKFUpdator" ),
seedAs5DHit = cms.bool( False )
)
process.HLTSiStripClusterChargeCutTiny = cms.PSet( value = cms.double( 800.0 ) )
process.HLTPSetTrajectoryFilterIT = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.3 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTIter4PSetTrajectoryFilterIT = cms.PSet(
minimumNumberOfHits = cms.int32( 6 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.3 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 0 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetTrajectoryBuilderForElectrons = cms.PSet(
ComponentType = cms.string( "CkfTrajectoryBuilder" ),
lostHitPenalty = cms.double( 90.0 ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "hltESPBwdElectronPropagator" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetTrajectoryFilterForElectrons" ) ),
propagatorAlong = cms.string( "hltESPFwdElectronPropagator" ),
maxCand = cms.int32( 5 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator30" ),
intermediateCleaning = cms.bool( False ),
updator = cms.string( "hltESPKFUpdator" ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetPvClusterComparerForIT = cms.PSet(
track_chi2_max = cms.double( 20.0 ),
track_pt_max = cms.double( 20.0 ),
track_prob_min = cms.double( -1.0 ),
track_pt_min = cms.double( 1.0 )
)
process.HLTPSetMixedStepTrajectoryFilter = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.1 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.4 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetInitialCkfTrajectoryBuilderForHI = cms.PSet(
ComponentType = cms.string( "CkfTrajectoryBuilder" ),
lostHitPenalty = cms.double( 30.0 ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOppositeForHI" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetInitialCkfTrajectoryFilterForHI" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialForHI" ),
maxCand = cms.int32( 5 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
intermediateCleaning = cms.bool( False ),
updator = cms.string( "hltESPKFUpdator" ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetMuonCkfTrajectoryBuilder = cms.PSet(
rescaleErrorIfFail = cms.double( 1.0 ),
ComponentType = cms.string( "MuonCkfTrajectoryBuilder" ),
lostHitPenalty = cms.double( 30.0 ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetMuonCkfTrajectoryFilter" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
maxCand = cms.int32( 5 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator30" ),
intermediateCleaning = cms.bool( False ),
propagatorProximity = cms.string( "SteppingHelixPropagatorAny" ),
updator = cms.string( "hltESPKFUpdator" ),
deltaEta = cms.double( -1.0 ),
useSeedLayer = cms.bool( False ),
deltaPhi = cms.double( -1.0 ),
seedAs5DHit = cms.bool( False )
)
process.HLTIter0HighPtTkMuPSetTrajectoryFilterIT = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.3 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetPvClusterComparerForBTag = cms.PSet(
track_chi2_max = cms.double( 20.0 ),
track_pt_max = cms.double( 20.0 ),
track_prob_min = cms.double( -1.0 ),
track_pt_min = cms.double( 0.1 )
)
process.HLTSeedFromConsecutiveHitsTripletOnlyCreator = cms.PSet(
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
SeedMomentumForBOFF = cms.double( 5.0 ),
propagator = cms.string( "PropagatorWithMaterialParabolicMf" ),
forceKinematicWithRegionDirection = cms.bool( False ),
magneticField = cms.string( "ParabolicMf" ),
OriginTransverseErrorMultiplier = cms.double( 1.0 ),
ComponentName = cms.string( "SeedFromConsecutiveHitsTripletOnlyCreator" ),
MinOneOverPtError = cms.double( 1.0 )
)
process.HLTIter2GroupedCkfTrajectoryBuilderIT = cms.PSet(
keepOriginalIfRebuildFails = cms.bool( False ),
lockHits = cms.bool( True ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter2PSetTrajectoryFilterIT" ) ),
maxCand = cms.int32( 2 ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator16" ),
intermediateCleaning = cms.bool( True ),
bestHitOnly = cms.bool( True ),
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
lostHitPenalty = cms.double( 30.0 ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
alwaysUseInvalidHits = cms.bool( False ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter2PSetTrajectoryFilterIT" ) ),
foundHitBonus = cms.double( 5.0 ),
updator = cms.string( "hltESPKFUpdator" ),
seedAs5DHit = cms.bool( False )
)
process.HLTIter3PSetTrajectoryBuilderIT = cms.PSet(
ComponentType = cms.string( "CkfTrajectoryBuilder" ),
lostHitPenalty = cms.double( 30.0 ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter3PSetTrajectoryFilterIT" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
maxCand = cms.int32( 1 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator16" ),
intermediateCleaning = cms.bool( True ),
updator = cms.string( "hltESPKFUpdator" ),
seedAs5DHit = cms.bool( False )
)
process.HLTSiStripClusterChargeCutTight = cms.PSet( value = cms.double( 1945.0 ) )
process.HLTPSetCkf3HitTrajectoryFilter = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.9 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( -1 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetDetachedStepTrajectoryFilterBase = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 2 ),
minPt = cms.double( 0.075 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetMuonTrackingRegionBuilder8356 = cms.PSet(
Rescale_Dz = cms.double( 3.0 ),
Pt_fixed = cms.bool( False ),
Eta_fixed = cms.bool( False ),
Eta_min = cms.double( 0.1 ),
DeltaZ = cms.double( 15.9 ),
maxRegions = cms.int32( 2 ),
EtaR_UpperLimit_Par1 = cms.double( 0.25 ),
UseVertex = cms.bool( False ),
Z_fixed = cms.bool( True ),
PhiR_UpperLimit_Par1 = cms.double( 0.6 ),
PhiR_UpperLimit_Par2 = cms.double( 0.2 ),
Rescale_phi = cms.double( 3.0 ),
DeltaEta = cms.double( 0.2 ),
precise = cms.bool( True ),
OnDemand = cms.int32( -1 ),
EtaR_UpperLimit_Par2 = cms.double( 0.15 ),
MeasurementTrackerName = cms.InputTag( "hltESPMeasurementTracker" ),
vertexCollection = cms.InputTag( "pixelVertices" ),
Pt_min = cms.double( 1.5 ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Phi_fixed = cms.bool( False ),
DeltaR = cms.double( 0.2 ),
input = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' ),
DeltaPhi = cms.double( 0.2 ),
Phi_min = cms.double( 0.1 ),
Rescale_eta = cms.double( 3.0 )
)
process.HLTPSetDetachedCkfTrajectoryFilterForHI = cms.PSet(
minimumNumberOfHits = cms.int32( 6 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.3 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 0.701 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTIter3PSetTrajectoryFilterIT = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.3 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 0 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetJetCoreStepTrajectoryFilter = cms.PSet(
minimumNumberOfHits = cms.int32( 4 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.1 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTIter2PSetTrajectoryFilterIT = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 1 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 0.3 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetMuTrackJpsiTrajectoryBuilder = cms.PSet(
ComponentType = cms.string( "CkfTrajectoryBuilder" ),
lostHitPenalty = cms.double( 30.0 ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetMuTrackJpsiTrajectoryFilter" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
maxCand = cms.int32( 1 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator30" ),
intermediateCleaning = cms.bool( True ),
updator = cms.string( "hltESPKFUpdator" ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetTrajectoryBuilderForGsfElectrons = cms.PSet(
ComponentType = cms.string( "CkfTrajectoryBuilder" ),
lostHitPenalty = cms.double( 90.0 ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "hltESPBwdElectronPropagator" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetTrajectoryFilterForElectrons" ) ),
propagatorAlong = cms.string( "hltESPFwdElectronPropagator" ),
maxCand = cms.int32( 5 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator2000" ),
intermediateCleaning = cms.bool( False ),
updator = cms.string( "hltESPKFUpdator" ),
seedAs5DHit = cms.bool( False )
)
process.HLTSiStripClusterChargeCutNone = cms.PSet( value = cms.double( -1.0 ) )
process.HLTPSetTobTecStepTrajectoryFilterBase = cms.PSet(
minimumNumberOfHits = cms.int32( 5 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.1 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 0 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetMuonCkfTrajectoryFilter = cms.PSet(
minimumNumberOfHits = cms.int32( 5 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.9 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( -1 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetbJetRegionalTrajectoryFilter = cms.PSet(
minimumNumberOfHits = cms.int32( 5 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 1.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 8 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetDetachedStepTrajectoryFilter = cms.PSet(
ComponentType = cms.string( "CompositeTrajectoryFilter" ),
filters = cms.VPSet(
cms.PSet( refToPSet_ = cms.string( "HLTPSetDetachedStepTrajectoryFilterBase" ) )
)
)
process.HLTIter1PSetTrajectoryFilterIT = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 0.2 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetDetachedCkfTrajectoryFilterForHIGlobalPt8 = cms.PSet(
minimumNumberOfHits = cms.int32( 6 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 8.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 0.701 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetMixedStepTrajectoryBuilder = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialForMixedStepOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetMixedStepTrajectoryFilter" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialForMixedStep" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 2 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPChi2ChargeTightMeasurementEstimator16" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetMixedStepTrajectoryFilter" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 5.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetMixedStepTrajectoryFilterBase = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.05 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 0 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetCkfTrajectoryFilter = cms.PSet(
minimumNumberOfHits = cms.int32( 5 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.9 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( -1 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTSeedFromProtoTracks = cms.PSet(
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
SeedMomentumForBOFF = cms.double( 5.0 ),
propagator = cms.string( "PropagatorWithMaterialParabolicMf" ),
forceKinematicWithRegionDirection = cms.bool( False ),
magneticField = cms.string( "ParabolicMf" ),
OriginTransverseErrorMultiplier = cms.double( 1.0 ),
ComponentName = cms.string( "SeedFromConsecutiveHitsCreator" ),
MinOneOverPtError = cms.double( 1.0 )
)
process.HLTPSetInitialStepTrajectoryFilterBase = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 2 ),
minPt = cms.double( 0.2 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTIter2PSetTrajectoryBuilderIT = cms.PSet(
ComponentType = cms.string( "CkfTrajectoryBuilder" ),
lostHitPenalty = cms.double( 30.0 ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter2PSetTrajectoryFilterIT" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
maxCand = cms.int32( 2 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator16" ),
intermediateCleaning = cms.bool( True ),
updator = cms.string( "hltESPKFUpdator" ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetMuTrackJpsiTrajectoryFilter = cms.PSet(
minimumNumberOfHits = cms.int32( 5 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 10.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 8 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTSeedFromConsecutiveHitsCreatorIT = cms.PSet(
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
SeedMomentumForBOFF = cms.double( 5.0 ),
propagator = cms.string( "PropagatorWithMaterialParabolicMf" ),
forceKinematicWithRegionDirection = cms.bool( False ),
magneticField = cms.string( "ParabolicMf" ),
OriginTransverseErrorMultiplier = cms.double( 1.0 ),
ComponentName = cms.string( "SeedFromConsecutiveHitsCreator" ),
MinOneOverPtError = cms.double( 1.0 )
)
process.HLTPSetTrajectoryFilterL3 = cms.PSet(
minimumNumberOfHits = cms.int32( 5 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.5 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 1000000000 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetDetachedStepTrajectoryBuilder = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetDetachedStepTrajectoryFilter" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 3 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator9" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetDetachedStepTrajectoryFilter" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 5.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetPixelPairCkfTrajectoryFilterForHIGlobalPt8 = cms.PSet(
minimumNumberOfHits = cms.int32( 6 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 8.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 100 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTIter0PSetTrajectoryFilterIT = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 0.3 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTIter2HighPtTkMuPSetTrajectoryFilterIT = cms.PSet(
minimumNumberOfHits = cms.int32( 5 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.3 ),
maxConsecLostHits = cms.int32( 3 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetMuTrackJpsiEffTrajectoryFilter = cms.PSet(
minimumNumberOfHits = cms.int32( 5 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 1.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 9 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetPixelPairCkfTrajectoryBuilderForHIGlobalPt8 = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOppositeForHI" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetPixelPairCkfTrajectoryFilterForHIGlobalPt8" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialForHI" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 3 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator9ForHI" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetPixelPairCkfTrajectoryFilterForHIGlobalPt8" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 5.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetPixelPairStepTrajectoryFilterBase = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 2 ),
minPt = cms.double( 0.1 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetLowPtStepTrajectoryFilter = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 1 ),
minPt = cms.double( 0.075 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTSeedFromConsecutiveHitsCreator = cms.PSet(
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
SeedMomentumForBOFF = cms.double( 5.0 ),
propagator = cms.string( "PropagatorWithMaterial" ),
forceKinematicWithRegionDirection = cms.bool( False ),
magneticField = cms.string( "" ),
OriginTransverseErrorMultiplier = cms.double( 1.0 ),
ComponentName = cms.string( "SeedFromConsecutiveHitsCreator" ),
MinOneOverPtError = cms.double( 1.0 )
)
process.HLTPSetPixelPairCkfTrajectoryBuilderForHI = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOppositeForHI" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetPixelPairCkfTrajectoryFilterForHI" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialForHI" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 3 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator9ForHI" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetPixelPairCkfTrajectoryFilterForHI" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 5.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetDetachedCkfTrajectoryBuilderForHI = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 0.0 ),
maxPtForLooperReconstruction = cms.double( 0.0 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOppositeForHI" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetDetachedCkfTrajectoryFilterForHI" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialForHI" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 2 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPChi2MeasurementEstimator9" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetDetachedCkfTrajectoryFilterForHI" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 5.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTIter1PSetTrajectoryBuilderIT = cms.PSet(
ComponentType = cms.string( "CkfTrajectoryBuilder" ),
lostHitPenalty = cms.double( 30.0 ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter1PSetTrajectoryFilterIT" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
maxCand = cms.int32( 2 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator16" ),
intermediateCleaning = cms.bool( True ),
updator = cms.string( "hltESPKFUpdator" ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetDetachedCkfTrajectoryBuilderForHIGlobalPt8 = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 0.0 ),
maxPtForLooperReconstruction = cms.double( 0.0 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOppositeForHI" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetDetachedCkfTrajectoryFilterForHIGlobalPt8" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialForHI" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 2 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPChi2MeasurementEstimator9" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetDetachedCkfTrajectoryFilterForHIGlobalPt8" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 5.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTSiStripClusterChargeCutForHI = cms.PSet( value = cms.double( 2069.0 ) )
process.HLTPSetLowPtStepTrajectoryBuilder = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetLowPtStepTrajectoryFilter" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 4 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator9" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetLowPtStepTrajectoryFilter" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 5.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetMuTrackJpsiEffTrajectoryBuilder = cms.PSet(
ComponentType = cms.string( "CkfTrajectoryBuilder" ),
lostHitPenalty = cms.double( 30.0 ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetMuTrackJpsiEffTrajectoryFilter" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
maxCand = cms.int32( 1 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator30" ),
intermediateCleaning = cms.bool( True ),
updator = cms.string( "hltESPKFUpdator" ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetTrajectoryFilterForElectrons = cms.PSet(
minimumNumberOfHits = cms.int32( 5 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 2.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( -1 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( -1 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetJetCoreStepTrajectoryBuilder = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetJetCoreStepTrajectoryFilter" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 50 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetJetCoreStepTrajectoryFilter" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 5.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetPvClusterComparer = cms.PSet(
track_chi2_max = cms.double( 9999999.0 ),
track_pt_max = cms.double( 10.0 ),
track_prob_min = cms.double( -1.0 ),
track_pt_min = cms.double( 2.5 )
)
process.HLTIter0HighPtTkMuPSetTrajectoryBuilderIT = cms.PSet(
ComponentType = cms.string( "CkfTrajectoryBuilder" ),
lostHitPenalty = cms.double( 30.0 ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter0HighPtTkMuPSetTrajectoryFilterIT" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
maxCand = cms.int32( 4 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator30" ),
intermediateCleaning = cms.bool( True ),
updator = cms.string( "hltESPKFUpdator" ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetPixelLessStepTrajectoryFilterBase = cms.PSet(
minimumNumberOfHits = cms.int32( 4 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.05 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 0 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTIter1GroupedCkfTrajectoryBuilderIT = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter1PSetTrajectoryFilterIT" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 2 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator16" ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 5.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter1PSetTrajectoryFilterIT" ) )
)
process.HLTPSetMuonCkfTrajectoryBuilderSeedHit = cms.PSet(
rescaleErrorIfFail = cms.double( 1.0 ),
ComponentType = cms.string( "MuonCkfTrajectoryBuilder" ),
lostHitPenalty = cms.double( 30.0 ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetMuonCkfTrajectoryFilter" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
maxCand = cms.int32( 5 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator30" ),
intermediateCleaning = cms.bool( False ),
propagatorProximity = cms.string( "SteppingHelixPropagatorAny" ),
updator = cms.string( "hltESPKFUpdator" ),
deltaEta = cms.double( -1.0 ),
useSeedLayer = cms.bool( True ),
deltaPhi = cms.double( -1.0 ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetPixelPairCkfTrajectoryFilterForHI = cms.PSet(
minimumNumberOfHits = cms.int32( 6 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 1.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 100 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetInitialStepTrajectoryBuilder = cms.PSet(
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
bestHitOnly = cms.bool( True ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetInitialStepTrajectoryFilter" ) ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetInitialStepTrajectoryFilter" ) ),
useSameTrajFilter = cms.bool( True ),
maxCand = cms.int32( 3 ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 ),
foundHitBonus = cms.double( 10.0 ),
lockHits = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
keepOriginalIfRebuildFails = cms.bool( True ),
estimator = cms.string( "hltESPInitialStepChi2ChargeMeasurementEstimator30" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
minNrOfHitsForRebuild = cms.int32( 1 ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetInitialStepTrajectoryFilter = cms.PSet(
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
minimumNumberOfHits = cms.int32( 3 ),
seedPairPenalty = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
minPt = cms.double( 0.2 ),
nSigmaMinPt = cms.double( 5.0 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHits = cms.int32( 999 ),
maxConsecLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
maxLostHitsFraction = cms.double( 0.1 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedExtension = cms.int32( 0 ),
strictSeedExtension = cms.bool( False ),
pixelSeedExtension = cms.bool( False ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
maxCCCLostHits = cms.int32( 0 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetLowPtQuadStepTrajectoryBuilder = cms.PSet(
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
bestHitOnly = cms.bool( True ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetLowPtQuadStepTrajectoryFilter" ) ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetLowPtQuadStepTrajectoryFilter" ) ),
useSameTrajFilter = cms.bool( True ),
maxCand = cms.int32( 4 ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 ),
foundHitBonus = cms.double( 10.0 ),
lockHits = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
keepOriginalIfRebuildFails = cms.bool( False ),
estimator = cms.string( "hltESPLowPtQuadStepChi2ChargeMeasurementEstimator9" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetLowPtQuadStepTrajectoryFilter = cms.PSet(
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
minimumNumberOfHits = cms.int32( 3 ),
seedPairPenalty = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
minPt = cms.double( 0.075 ),
nSigmaMinPt = cms.double( 5.0 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHits = cms.int32( 999 ),
maxConsecLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
maxLostHitsFraction = cms.double( 0.1 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedExtension = cms.int32( 0 ),
strictSeedExtension = cms.bool( False ),
pixelSeedExtension = cms.bool( False ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
maxCCCLostHits = cms.int32( 0 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetHighPtTripletStepTrajectoryBuilder = cms.PSet(
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
bestHitOnly = cms.bool( True ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetHighPtTripletStepTrajectoryFilter" ) ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetHighPtTripletStepTrajectoryFilter" ) ),
useSameTrajFilter = cms.bool( True ),
maxCand = cms.int32( 3 ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 ),
foundHitBonus = cms.double( 10.0 ),
lockHits = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
keepOriginalIfRebuildFails = cms.bool( False ),
estimator = cms.string( "hltESPHighPtTripletStepChi2ChargeMeasurementEstimator30" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetHighPtTripletStepTrajectoryFilter = cms.PSet(
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
minimumNumberOfHits = cms.int32( 3 ),
seedPairPenalty = cms.int32( 5 ),
chargeSignificance = cms.double( -1.0 ),
minPt = cms.double( 0.2 ),
nSigmaMinPt = cms.double( 5.0 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHits = cms.int32( 999 ),
maxConsecLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
maxLostHitsFraction = cms.double( 0.1 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedExtension = cms.int32( 0 ),
strictSeedExtension = cms.bool( False ),
pixelSeedExtension = cms.bool( False ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
maxCCCLostHits = cms.int32( 0 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetLowPtTripletStepTrajectoryBuilder = cms.PSet(
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
bestHitOnly = cms.bool( True ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetLowPtTripletStepTrajectoryFilter" ) ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetLowPtTripletStepTrajectoryFilter" ) ),
useSameTrajFilter = cms.bool( True ),
maxCand = cms.int32( 4 ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 ),
foundHitBonus = cms.double( 10.0 ),
lockHits = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
keepOriginalIfRebuildFails = cms.bool( False ),
estimator = cms.string( "hltESPLowPtTripletStepChi2ChargeMeasurementEstimator9" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetLowPtTripletStepTrajectoryFilter = cms.PSet(
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
minimumNumberOfHits = cms.int32( 3 ),
seedPairPenalty = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
minPt = cms.double( 0.075 ),
nSigmaMinPt = cms.double( 5.0 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHits = cms.int32( 999 ),
maxConsecLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
maxLostHitsFraction = cms.double( 0.1 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedExtension = cms.int32( 0 ),
strictSeedExtension = cms.bool( False ),
pixelSeedExtension = cms.bool( False ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
maxCCCLostHits = cms.int32( 0 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetDetachedQuadStepTrajectoryBuilder = cms.PSet(
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
bestHitOnly = cms.bool( True ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetDetachedQuadStepTrajectoryFilter" ) ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetDetachedQuadStepTrajectoryFilter" ) ),
useSameTrajFilter = cms.bool( True ),
maxCand = cms.int32( 3 ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 ),
foundHitBonus = cms.double( 10.0 ),
lockHits = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
keepOriginalIfRebuildFails = cms.bool( False ),
estimator = cms.string( "hltESPDetachedQuadStepChi2ChargeMeasurementEstimator9" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetDetachedQuadStepTrajectoryFilter = cms.PSet(
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
minimumNumberOfHits = cms.int32( 3 ),
seedPairPenalty = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
minPt = cms.double( 0.075 ),
nSigmaMinPt = cms.double( 5.0 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHits = cms.int32( 999 ),
maxConsecLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
maxLostHitsFraction = cms.double( 0.1 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedExtension = cms.int32( 0 ),
strictSeedExtension = cms.bool( False ),
pixelSeedExtension = cms.bool( False ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
maxCCCLostHits = cms.int32( 0 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetDetachedTripletStepTrajectoryBuilder = cms.PSet(
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
bestHitOnly = cms.bool( True ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetDetachedTripletStepTrajectoryFilter" ) ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetDetachedTripletStepTrajectoryFilter" ) ),
useSameTrajFilter = cms.bool( True ),
maxCand = cms.int32( 3 ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 ),
foundHitBonus = cms.double( 10.0 ),
lockHits = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
keepOriginalIfRebuildFails = cms.bool( False ),
estimator = cms.string( "hltESPDetachedTripletStepChi2ChargeMeasurementEstimator9" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetDetachedTripletStepTrajectoryFilter = cms.PSet(
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
minimumNumberOfHits = cms.int32( 3 ),
seedPairPenalty = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
minPt = cms.double( 0.075 ),
nSigmaMinPt = cms.double( 5.0 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHits = cms.int32( 999 ),
maxConsecLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
maxLostHitsFraction = cms.double( 0.1 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedExtension = cms.int32( 0 ),
strictSeedExtension = cms.bool( False ),
pixelSeedExtension = cms.bool( False ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
maxCCCLostHits = cms.int32( 0 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetMixedTripletStepTrajectoryBuilder = cms.PSet(
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
bestHitOnly = cms.bool( True ),
propagatorAlong = cms.string( "PropagatorWithMaterialForMixedStep" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetMixedTripletStepTrajectoryFilter" ) ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetMixedTripletStepTrajectoryFilter" ) ),
useSameTrajFilter = cms.bool( True ),
maxCand = cms.int32( 2 ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 ),
foundHitBonus = cms.double( 10.0 ),
lockHits = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
keepOriginalIfRebuildFails = cms.bool( False ),
estimator = cms.string( "hltESPMixedTripletStepChi2ChargeMeasurementEstimator16" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialForMixedStepOpposite" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetMixedTripletStepTrajectoryFilter = cms.PSet(
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
minimumNumberOfHits = cms.int32( 3 ),
seedPairPenalty = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
minPt = cms.double( 0.1 ),
nSigmaMinPt = cms.double( 5.0 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHits = cms.int32( 999 ),
maxConsecLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
maxLostHitsFraction = cms.double( 0.1 ),
constantValueForLostHitsFractionFilter = cms.double( 1.4 ),
seedExtension = cms.int32( 0 ),
strictSeedExtension = cms.bool( False ),
pixelSeedExtension = cms.bool( False ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
maxCCCLostHits = cms.int32( 9999 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetPixelLessStepTrajectoryBuilder = cms.PSet(
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
bestHitOnly = cms.bool( True ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetPixelLessStepTrajectoryFilter" ) ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetPixelLessStepTrajectoryFilter" ) ),
useSameTrajFilter = cms.bool( True ),
maxCand = cms.int32( 2 ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 ),
foundHitBonus = cms.double( 10.0 ),
lockHits = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( False ),
requireSeedHitsInRebuild = cms.bool( True ),
keepOriginalIfRebuildFails = cms.bool( False ),
estimator = cms.string( "hltESPPixelLessStepChi2ChargeMeasurementEstimator16" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
minNrOfHitsForRebuild = cms.int32( 4 ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetPixelLessStepTrajectoryFilter = cms.PSet(
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
minimumNumberOfHits = cms.int32( 4 ),
seedPairPenalty = cms.int32( 1 ),
chargeSignificance = cms.double( -1.0 ),
minPt = cms.double( 0.1 ),
nSigmaMinPt = cms.double( 5.0 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHits = cms.int32( 0 ),
maxConsecLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
maxLostHitsFraction = cms.double( 0.1 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedExtension = cms.int32( 0 ),
strictSeedExtension = cms.bool( False ),
pixelSeedExtension = cms.bool( False ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
maxCCCLostHits = cms.int32( 9999 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetTobTecStepTrajectoryFilter = cms.PSet(
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
minimumNumberOfHits = cms.int32( 5 ),
seedPairPenalty = cms.int32( 1 ),
chargeSignificance = cms.double( -1.0 ),
minPt = cms.double( 0.1 ),
nSigmaMinPt = cms.double( 5.0 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHits = cms.int32( 0 ),
maxConsecLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
maxLostHitsFraction = cms.double( 0.1 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedExtension = cms.int32( 0 ),
strictSeedExtension = cms.bool( False ),
pixelSeedExtension = cms.bool( False ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
maxCCCLostHits = cms.int32( 9999 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetTobTecStepInOutTrajectoryFilter = cms.PSet(
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
minimumNumberOfHits = cms.int32( 4 ),
seedPairPenalty = cms.int32( 1 ),
chargeSignificance = cms.double( -1.0 ),
minPt = cms.double( 0.1 ),
nSigmaMinPt = cms.double( 5.0 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHits = cms.int32( 0 ),
maxConsecLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
maxLostHitsFraction = cms.double( 0.1 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedExtension = cms.int32( 0 ),
strictSeedExtension = cms.bool( False ),
pixelSeedExtension = cms.bool( False ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
maxCCCLostHits = cms.int32( 9999 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetTobTecStepTrajectoryBuilder = cms.PSet(
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
bestHitOnly = cms.bool( True ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetTobTecStepTrajectoryFilter" ) ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetTobTecStepInOutTrajectoryFilter" ) ),
useSameTrajFilter = cms.bool( False ),
maxCand = cms.int32( 2 ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 ),
foundHitBonus = cms.double( 10.0 ),
lockHits = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( False ),
requireSeedHitsInRebuild = cms.bool( True ),
keepOriginalIfRebuildFails = cms.bool( False ),
estimator = cms.string( "hltESPTobTecStepChi2ChargeMeasurementEstimator16" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
minNrOfHitsForRebuild = cms.int32( 4 ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetGroupedCkfTrajectoryBuilderIterL3ForOI = cms.PSet(
rescaleErrorIfFail = cms.double( 1.0 ),
keepOriginalIfRebuildFails = cms.bool( False ),
lockHits = cms.bool( True ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfTrajectoryFilterIterL3OI" ) ),
maxCand = cms.int32( 5 ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator30" ),
intermediateCleaning = cms.bool( True ),
bestHitOnly = cms.bool( True ),
deltaEta = cms.double( -1.0 ),
useSeedLayer = cms.bool( False ),
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
lostHitPenalty = cms.double( 30.0 ),
requireSeedHitsInRebuild = cms.bool( False ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
alwaysUseInvalidHits = cms.bool( True ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfTrajectoryFilterIterL3OI" ) ),
foundHitBonus = cms.double( 1000.0 ),
propagatorProximity = cms.string( "SteppingHelixPropagatorAny" ),
updator = cms.string( "hltESPKFUpdator" ),
deltaPhi = cms.double( -1.0 ),
seedAs5DHit = cms.bool( False )
)
process.HLTIter0IterL3MuonPSetGroupedCkfTrajectoryBuilderIT = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( True ),
lostHitPenalty = cms.double( 1.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter0IterL3MuonGroupedCkfTrajectoryFilterIT" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
minNrOfHitsForRebuild = cms.int32( 2 ),
maxCand = cms.int32( 5 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator30" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter0IterL3MuonGroupedCkfTrajectoryFilterIT" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 1000.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTIter0IterL3FromL1MuonGroupedCkfTrajectoryFilterIT = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.9 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 10.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTIter0IterL3FromL1MuonPSetGroupedCkfTrajectoryBuilderIT = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( True ),
lostHitPenalty = cms.double( 1.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter0IterL3FromL1MuonGroupedCkfTrajectoryFilterIT" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
minNrOfHitsForRebuild = cms.int32( 2 ),
maxCand = cms.int32( 5 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator30" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter0IterL3FromL1MuonGroupedCkfTrajectoryFilterIT" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 1000.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTIter0IterL3MuonGroupedCkfTrajectoryFilterIT = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.9 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 10.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTIter2HighPtTkMuPSetTrajectoryBuilderIT = cms.PSet(
ComponentType = cms.string( "CkfTrajectoryBuilder" ),
lostHitPenalty = cms.double( 30.0 ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter2HighPtTkMuPSetTrajectoryFilterIT" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
maxCand = cms.int32( 2 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator30" ),
intermediateCleaning = cms.bool( True ),
updator = cms.string( "hltESPKFUpdator" ),
seedAs5DHit = cms.bool( False )
)
process.HLTIter2IterL3FromL1MuonPSetTrajectoryFilterIT = cms.PSet(
minimumNumberOfHits = cms.int32( 5 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.3 ),
maxConsecLostHits = cms.int32( 3 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTIter2IterL3FromL1MuonPSetGroupedCkfTrajectoryBuilderIT = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( False ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter2IterL3FromL1MuonPSetTrajectoryFilterIT" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 2 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator30" ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 1000.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter2IterL3FromL1MuonPSetTrajectoryFilterIT" ) )
)
process.HLTIter2IterL3MuonPSetTrajectoryFilterIT = cms.PSet(
minimumNumberOfHits = cms.int32( 5 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.3 ),
maxConsecLostHits = cms.int32( 3 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTIter2IterL3MuonPSetGroupedCkfTrajectoryBuilderIT = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( False ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter2IterL3MuonPSetTrajectoryFilterIT" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 2 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator30" ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 1000.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter2IterL3MuonPSetTrajectoryFilterIT" ) )
)
process.HLTPSetCkfTrajectoryFilterIterL3OI = cms.PSet(
minimumNumberOfHits = cms.int32( 5 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 3.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 10.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( -1 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetPixelPairStepTrajectoryFilter = cms.PSet(
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
minimumNumberOfHits = cms.int32( 4 ),
seedPairPenalty = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
minPt = cms.double( 0.1 ),
nSigmaMinPt = cms.double( 5.0 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHits = cms.int32( 999 ),
maxConsecLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
maxLostHitsFraction = cms.double( 0.1 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedExtension = cms.int32( 0 ),
strictSeedExtension = cms.bool( False ),
pixelSeedExtension = cms.bool( False ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
maxCCCLostHits = cms.int32( 0 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetPixelPairStepTrajectoryFilterInOut = cms.PSet(
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
minimumNumberOfHits = cms.int32( 4 ),
seedPairPenalty = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
minPt = cms.double( 0.1 ),
nSigmaMinPt = cms.double( 5.0 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHits = cms.int32( 999 ),
maxConsecLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
maxLostHitsFraction = cms.double( 0.1 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedExtension = cms.int32( 1 ),
strictSeedExtension = cms.bool( False ),
pixelSeedExtension = cms.bool( False ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
maxCCCLostHits = cms.int32( 0 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetPixelPairStepTrajectoryBuilder = cms.PSet(
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
bestHitOnly = cms.bool( True ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetPixelPairStepTrajectoryFilter" ) ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetPixelPairStepTrajectoryFilterInOut" ) ),
useSameTrajFilter = cms.bool( False ),
maxCand = cms.int32( 3 ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 ),
foundHitBonus = cms.double( 10.0 ),
lockHits = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
keepOriginalIfRebuildFails = cms.bool( False ),
estimator = cms.string( "hltESPPixelPairStepChi2ChargeMeasurementEstimator9" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetTobTecStepTrajectoryBuilderPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( False ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetTobTecStepTrajectoryFilterPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 4 ),
maxCand = cms.int32( 2 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPTobTecStepChi2ChargeMeasurementEstimator16" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetTobTecStepInOutTrajectoryFilterPPOnAA" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetLowPtTripletStepTrajectoryFilterPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 0.49 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetDetachedQuadStepTrajectoryFilterPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 0.9 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetCkfBaseTrajectoryFilter_block = cms.PSet(
minimumNumberOfHits = cms.int32( 5 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.9 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetInitialStepTrajectoryBuilderPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetInitialStepTrajectoryFilterPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 1 ),
maxCand = cms.int32( 3 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPInitialStepChi2ChargeMeasurementEstimator30" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetLowPtTripletStepTrajectoryBuilderPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetLowPtTripletStepTrajectoryFilterPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 4 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPLowPtTripletStepChi2ChargeMeasurementEstimator9" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetTobTecStepInOutTrajectoryFilterPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 4 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 2.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 0 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetInitialStepTrajectoryFilterBasePreSplittingPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 4 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 0.2 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetDetachedTripletStepTrajectoryBuilderPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetDetachedTripletStepTrajectoryFilterPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 3 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPDetachedTripletStepChi2ChargeMeasurementEstimator9" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetHighPtTripletStepTrajectoryBuilderPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetHighPtTripletStepTrajectoryFilterPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 3 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPHighPtTripletStepChi2ChargeMeasurementEstimator30" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetDetachedTripletStepTrajectoryFilterPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 0.9 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetInitialStepTrajectoryBuilderPreSplittingPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetInitialStepTrajectoryFilterPreSplittingPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 3 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPInitialStepChi2ChargeMeasurementEstimator30" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetLowPtQuadStepTrajectoryFilterPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 0.49 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetJetCoreStepTrajectoryBuilderPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetJetCoreStepTrajectoryFilterPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 50 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetMixedTripletStepTrajectoryFilterPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.4 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.4 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetJetCoreStepTrajectoryFilterPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 4 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 5.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetInitialStepTrajectoryFilterPreSplittingPPOnAA = cms.PSet(
ComponentType = cms.string( "CompositeTrajectoryFilter" ),
filters = cms.VPSet(
cms.PSet( refToPSet_ = cms.string( "HLTPSetInitialStepTrajectoryFilterBasePreSplittingPPOnAA" ) ),
cms.PSet( refToPSet_ = cms.string( "HLTPSetInitialStepTrajectoryFilterShapePreSplittingPPOnAA" ) )
)
)
process.HLTPSetMixedTripletStepTrajectoryBuilderPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialForMixedStepOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetMixedTripletStepTrajectoryFilterPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialForMixedStep" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 2 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPMixedTripletStepChi2ChargeMeasurementEstimator16" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetHighPtTripletStepTrajectoryFilterPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 0.7 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetLowPtQuadStepTrajectoryBuilderPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetLowPtQuadStepTrajectoryFilterPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 4 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPLowPtQuadStepChi2ChargeMeasurementEstimator9" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetPixelLessStepTrajectoryBuilderPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetPixelLessStepTrajectoryFilterPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 4 ),
maxCand = cms.int32( 2 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPPixelLessStepChi2ChargeMeasurementEstimator16" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTSiStripClusterChargeCutLoose = cms.PSet( value = cms.double( 1620.0 ) )
process.HLTPSetDetachedQuadStepTrajectoryBuilderPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetDetachedQuadStepTrajectoryFilterPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 3 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPDetachedQuadStepChi2ChargeMeasurementEstimator9" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetInitialStepTrajectoryFilterShapePreSplittingPPOnAA = cms.PSet(
ComponentType = cms.string( "StripSubClusterShapeTrajectoryFilter" ),
subclusterCutSN = cms.double( 12.0 ),
trimMaxADC = cms.double( 30.0 ),
seedCutMIPs = cms.double( 0.35 ),
subclusterCutMIPs = cms.double( 0.45 ),
subclusterWindow = cms.double( 0.7 ),
maxNSat = cms.uint32( 3 ),
trimMaxFracNeigh = cms.double( 0.25 ),
maxTrimmedSizeDiffNeg = cms.double( 1.0 ),
seedCutSN = cms.double( 7.0 ),
layerMask = cms.PSet(
TOB = cms.bool( False ),
TIB = cms.vuint32( 1, 2 ),
TID = cms.vuint32( 1, 2 ),
TEC = cms.bool( False )
),
maxTrimmedSizeDiffPos = cms.double( 0.7 ),
trimMaxFracTotal = cms.double( 0.15 )
)
process.HLTPSetInitialStepTrajectoryFilterPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 0.6 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetTobTecStepTrajectoryFilterPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 5 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 2.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 0 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetPixelLessStepTrajectoryFilterPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 4 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 2.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 0 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetPixelPairStepTrajectoryFilterPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 4 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 0.1 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetPixelPairStepTrajectoryFilterInOutPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 4 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 1 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 0.1 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetPixelPairStepTrajectoryBuilderPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( False ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetPixelPairStepTrajectoryFilterPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 3 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPPixelPairStepChi2ChargeMeasurementEstimator9" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetPixelPairStepTrajectoryFilterInOutPPOnAA" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetInitialStepTrajectoryFilterBasePreSplittingForFullTrackingPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 4 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
maxCCCLostHits = cms.int32( 0 ),
nSigmaMinPt = cms.double( 5.0 ),
minPt = cms.double( 1.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetInitialStepTrajectoryBuilderPreSplittingForFullTrackingPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetInitialStepTrajectoryFilterPreSplittingForFullTrackingPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 3 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPInitialStepChi2ChargeMeasurementEstimator30" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetInitialStepTrajectoryFilterPreSplittingForFullTrackingPPOnAA = cms.PSet(
ComponentType = cms.string( "CompositeTrajectoryFilter" ),
filters = cms.VPSet(
cms.PSet( refToPSet_ = cms.string( "HLTPSetInitialStepTrajectoryFilterBasePreSplittingForFullTrackingPPOnAA" ) ),
cms.PSet( refToPSet_ = cms.string( "HLTPSetInitialStepTrajectoryFilterShapePreSplittingPPOnAA" ) )
)
)
process.HLTPSetInitialStepTrajectoryFilterForFullTrackingPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 4 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
maxCCCLostHits = cms.int32( 0 ),
nSigmaMinPt = cms.double( 5.0 ),
minPt = cms.double( 1.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetInitialStepTrajectoryBuilderForFullTrackingPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetInitialStepTrajectoryFilterForFullTrackingPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 1 ),
maxCand = cms.int32( 3 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPInitialStepChi2ChargeMeasurementEstimator30" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetLowPtQuadStepTrajectoryFilterForFullTrackingPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 1.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetLowPtQuadStepTrajectoryBuilderForFullTrackingPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetLowPtQuadStepTrajectoryFilterForFullTrackingPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 4 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPLowPtQuadStepChi2ChargeMeasurementEstimator9" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetHighPtTripletStepTrajectoryFilterForFullTrackingPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 1.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetHighPtTripletStepTrajectoryBuilderForFullTrackingPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetHighPtTripletStepTrajectoryFilterForFullTrackingPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 3 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPHighPtTripletStepChi2ChargeMeasurementEstimator30" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetLowPtTripletStepTrajectoryFilterForFullTrackingPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 2.8 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetLowPtTripletStepTrajectoryBuilderForFullTrackingPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetLowPtTripletStepTrajectoryFilterForFullTrackingPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 4 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPLowPtTripletStepChi2ChargeMeasurementEstimator9" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetDetachedQuadStepTrajectoryFilterForFullTrackingPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 5.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetDetachedTripletStepTrajectoryFilterForFullTrackingPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 5.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetPixelPairStepTrajectoryFilterForFullTrackingPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 4 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 5.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetPixelPairStepTrajectoryBuilderForFullTrackingPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( False ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetPixelPairStepTrajectoryFilterForFullTrackingPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 3 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPPixelPairStepChi2ChargeMeasurementEstimator9" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetPixelPairStepTrajectoryFilterInOutForFullTrackingPPOnAA" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetMixedTripletStepTrajectoryFilterForFullTrackingPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 5.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.4 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetPixelLessStepTrajectoryFilterForFullTrackingPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 4 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 5.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 0 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetPixelLessStepTrajectoryBuilderForFullTrackingPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetPixelLessStepTrajectoryFilterForFullTrackingPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 4 ),
maxCand = cms.int32( 2 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPPixelLessStepChi2ChargeMeasurementEstimator16" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetTobTecStepTrajectoryFilterForFullTrackingPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 5 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 5.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 0 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetTobTecStepInOutTrajectoryFilterForFullTrackingPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 4 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 5.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 0 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetTobTecStepTrajectoryBuilderForFullTrackingPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( False ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetTobTecStepTrajectoryFilterForFullTrackingPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 4 ),
maxCand = cms.int32( 2 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPTobTecStepChi2ChargeMeasurementEstimator16" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetTobTecStepInOutTrajectoryFilterForFullTrackingPPOnAA" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetJetCoreStepTrajectoryFilterForFullTrackingPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 4 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 5.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetJetCoreStepTrajectoryBuilderForFullTrackingPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetJetCoreStepTrajectoryFilterForFullTrackingPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 50 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetPixelPairStepTrajectoryFilterInOutForFullTrackingPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 4 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 1 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 5.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetMixedTripletStepTrajectoryBuilderForFullTrackingPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialForMixedStepOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetMixedTripletStepTrajectoryFilterForFullTrackingPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialForMixedStep" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 2 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPMixedTripletStepChi2ChargeMeasurementEstimator16" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetDetachedQuadStepTrajectoryBuilderForFullTrackingPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetDetachedQuadStepTrajectoryFilterForFullTrackingPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 3 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPDetachedQuadStepChi2ChargeMeasurementEstimator9" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetDetachedTripletStepTrajectoryBuilderForFullTrackingPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetDetachedTripletStepTrajectoryFilterForFullTrackingPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 3 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPDetachedTripletStepChi2ChargeMeasurementEstimator9" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetInitialStepTrajectoryFilterBasePreSplittingForDmesonPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 4 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
maxCCCLostHits = cms.int32( 0 ),
nSigmaMinPt = cms.double( 5.0 ),
minPt = cms.double( 3.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetInitialStepTrajectoryFilterForDmesonPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 4 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
maxCCCLostHits = cms.int32( 0 ),
nSigmaMinPt = cms.double( 5.0 ),
minPt = cms.double( 3.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetInitialStepTrajectoryBuilderForDmesonPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetInitialStepTrajectoryFilterForDmesonPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 1 ),
maxCand = cms.int32( 3 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPInitialStepChi2ChargeMeasurementEstimator30" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetLowPtQuadStepTrajectoryFilterForDmesonPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 2.8 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetLowPtQuadStepTrajectoryBuilderForDmesonPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetLowPtQuadStepTrajectoryFilterForDmesonPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 4 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPLowPtQuadStepChi2ChargeMeasurementEstimator9" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetHighPtTripletStepTrajectoryFilterForDmesonPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 3.5 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetHighPtTripletStepTrajectoryBuilderForDmesonPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetHighPtTripletStepTrajectoryFilterForDmesonPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 3 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPHighPtTripletStepChi2ChargeMeasurementEstimator30" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetInitialStepTrajectoryFilterPreSplittingForDmesonPPOnAA = cms.PSet(
ComponentType = cms.string( "CompositeTrajectoryFilter" ),
filters = cms.VPSet(
cms.PSet( refToPSet_ = cms.string( "HLTPSetInitialStepTrajectoryFilterBasePreSplittingForDmesonPPOnAA" ) ),
cms.PSet( refToPSet_ = cms.string( "HLTPSetInitialStepTrajectoryFilterShapePreSplittingPPOnAA" ) )
)
)
process.HLTPSetInitialStepTrajectoryBuilderPreSplittingForDmesonPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetInitialStepTrajectoryFilterPreSplittingForDmesonPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 3 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPInitialStepChi2ChargeMeasurementEstimator30" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.streams = cms.PSet(
ALCALumiPixelCountsExpress = cms.vstring( 'AlCaLumiPixelCountsExpress' ),
ALCALumiPixelCountsPrompt = cms.vstring( 'AlCaLumiPixelCountsPrompt' ),
ALCAP0 = cms.vstring( 'AlCaP0' ),
ALCAPHISYM = cms.vstring( 'AlCaPhiSym' ),
Calibration = cms.vstring( 'TestEnablesEcalHcal' ),
DQM = cms.vstring( 'OnlineMonitor' ),
DQMCalibration = cms.vstring( 'TestEnablesEcalHcalDQM' ),
DQMOnlineBeamspot = cms.vstring( 'DQMOnlineBeamspot' ),
EcalCalibration = cms.vstring( 'EcalLaser' ),
Express = cms.vstring( 'ExpressPhysics' ),
ExpressAlignment = cms.vstring( 'ExpressAlignment' ),
NanoDST = cms.vstring( 'L1Accept' ),
PhysicsCommissioning = cms.vstring( 'HLTPhysics',
'ZeroBias' ),
PhysicsEndOfFill = cms.vstring( 'EmptyBX' ),
PhysicsHIZeroBias1 = cms.vstring( 'HIZeroBias1',
'HIZeroBias2' ),
PhysicsHIZeroBias2 = cms.vstring( 'HIZeroBias3',
'HIZeroBias4' ),
PhysicsHIZeroBias3 = cms.vstring( 'HIZeroBias5',
'HIZeroBias6' ),
PhysicsHIZeroBias4 = cms.vstring( 'HIZeroBias7',
'HIZeroBias8' ),
PhysicsHIZeroBias5 = cms.vstring( 'HIZeroBias10',
'HIZeroBias9' ),
PhysicsHIZeroBias6 = cms.vstring( 'HIZeroBias11',
'HIZeroBias12' ),
RPCMON = cms.vstring( 'RPCMonitor' )
)
process.datasets = cms.PSet(
AlCaLumiPixelCountsExpress = cms.vstring( 'AlCa_LumiPixelsCounts_Random_v2' ),
AlCaLumiPixelCountsPrompt = cms.vstring( 'AlCa_LumiPixelsCounts_ZeroBias_v2' ),
AlCaP0 = cms.vstring( 'AlCa_HIEcalEtaEBonly_v1',
'AlCa_HIEcalEtaEEonly_v1',
'AlCa_HIEcalPi0EBonly_v1',
'AlCa_HIEcalPi0EEonly_v1' ),
AlCaPhiSym = cms.vstring( 'AlCa_EcalPhiSym_v9' ),
DQMOnlineBeamspot = cms.vstring( 'HLT_HIHT80_Beamspot_ppRef5TeV_v3',
'HLT_ZeroBias_Beamspot_v4' ),
EcalLaser = cms.vstring( 'HLT_EcalCalibration_v4' ),
EmptyBX = cms.vstring( 'HLT_HIL1NotBptxORForPPRef_v2',
'HLT_HIL1UnpairedBunchBptxMinusForPPRef_v2',
'HLT_HIL1UnpairedBunchBptxPlusForPPRef_v2' ),
ExpressAlignment = cms.vstring( 'HLT_HIHT80_Beamspot_ppRef5TeV_v3',
'HLT_ZeroBias_Beamspot_v4' ),
ExpressPhysics = cms.vstring( 'HLT_Physics_v7',
'HLT_Random_v3',
'HLT_ZeroBias_FirstCollisionAfterAbortGap_v5',
'HLT_ZeroBias_v6' ),
HIZeroBias1 = cms.vstring( 'HLT_HIZeroBias_part0_v6' ),
HIZeroBias10 = cms.vstring( 'HLT_HIZeroBias_part9_v6' ),
HIZeroBias11 = cms.vstring( 'HLT_HIZeroBias_part10_v6' ),
HIZeroBias12 = cms.vstring( 'HLT_HIZeroBias_part11_v6' ),
HIZeroBias2 = cms.vstring( 'HLT_HIZeroBias_part1_v6' ),
HIZeroBias3 = cms.vstring( 'HLT_HIZeroBias_part2_v6' ),
HIZeroBias4 = cms.vstring( 'HLT_HIZeroBias_part3_v6' ),
HIZeroBias5 = cms.vstring( 'HLT_HIZeroBias_part4_v6' ),
HIZeroBias6 = cms.vstring( 'HLT_HIZeroBias_part5_v6' ),
HIZeroBias7 = cms.vstring( 'HLT_HIZeroBias_part6_v6' ),
HIZeroBias8 = cms.vstring( 'HLT_HIZeroBias_part7_v6' ),
HIZeroBias9 = cms.vstring( 'HLT_HIZeroBias_part8_v6' ),
HLTPhysics = cms.vstring( 'HLT_Physics_v7' ),
L1Accept = cms.vstring( 'DST_Physics_v7' ),
OnlineMonitor = cms.vstring( 'HLT_HIL1NotBptxORForPPRef_v2',
'HLT_HIL1UnpairedBunchBptxMinusForPPRef_v2',
'HLT_HIL1UnpairedBunchBptxPlusForPPRef_v2',
'HLT_HIZeroBias_part0_v6',
'HLT_HIZeroBias_part10_v6',
'HLT_HIZeroBias_part11_v6',
'HLT_HIZeroBias_part1_v6',
'HLT_HIZeroBias_part2_v6',
'HLT_HIZeroBias_part3_v6',
'HLT_HIZeroBias_part4_v6',
'HLT_HIZeroBias_part5_v6',
'HLT_HIZeroBias_part6_v6',
'HLT_HIZeroBias_part7_v6',
'HLT_HIZeroBias_part8_v6',
'HLT_HIZeroBias_part9_v6',
'HLT_Physics_v7',
'HLT_Random_v3',
'HLT_ZeroBias_FirstCollisionAfterAbortGap_v5',
'HLT_ZeroBias_v6' ),
RPCMonitor = cms.vstring( 'AlCa_HIRPCMuonNormalisation_v1' ),
TestEnablesEcalHcal = cms.vstring( 'HLT_EcalCalibration_v4',
'HLT_HcalCalibration_v5' ),
TestEnablesEcalHcalDQM = cms.vstring( 'HLT_EcalCalibration_v4',
'HLT_HcalCalibration_v5' ),
ZeroBias = cms.vstring( 'HLT_Random_v3',
'HLT_ZeroBias_FirstCollisionAfterAbortGap_v5',
'HLT_ZeroBias_v6' )
)
process.CSCChannelMapperESSource = cms.ESSource( "EmptyESSource",
recordName = cms.string( "CSCChannelMapperRecord" ),
iovIsRunNotTime = cms.bool( True ),
firstValid = cms.vuint32( 1 )
)
process.CSCINdexerESSource = cms.ESSource( "EmptyESSource",
recordName = cms.string( "CSCIndexerRecord" ),
iovIsRunNotTime = cms.bool( True ),
firstValid = cms.vuint32( 1 )
)
process.GlobalParametersRcdSource = cms.ESSource( "EmptyESSource",
recordName = cms.string( "L1TGlobalParametersRcd" ),
iovIsRunNotTime = cms.bool( True ),
firstValid = cms.vuint32( 1 )
)
process.GlobalTag = cms.ESSource( "PoolDBESSource",
DBParameters = cms.PSet(
connectionRetrialTimeOut = cms.untracked.int32( 60 ),
idleConnectionCleanupPeriod = cms.untracked.int32( 10 ),
enableReadOnlySessionOnUpdateConnection = cms.untracked.bool( False ),
enablePoolAutomaticCleanUp = cms.untracked.bool( False ),
messageLevel = cms.untracked.int32( 0 ),
authenticationPath = cms.untracked.string( "." ),
connectionRetrialPeriod = cms.untracked.int32( 10 ),
connectionTimeOut = cms.untracked.int32( 0 ),
enableConnectionSharing = cms.untracked.bool( True )
),
connect = cms.string( "frontier://FrontierProd/CMS_CONDITIONS" ),
globaltag = cms.string( "103X_dataRun2_HLT_v1" ),
snapshotTime = cms.string( "" ),
toGet = cms.VPSet(
),
DumpStat = cms.untracked.bool( False ),
ReconnectEachRun = cms.untracked.bool( False ),
RefreshAlways = cms.untracked.bool( False ),
RefreshEachRun = cms.untracked.bool( False ),
RefreshOpenIOVs = cms.untracked.bool( False ),
pfnPostfix = cms.untracked.string( "None" )
)
process.HcalTimeSlewEP = cms.ESSource( "HcalTimeSlewEP",
appendToDataLabel = cms.string( "HBHE" ),
timeSlewParametersM2 = cms.VPSet(
cms.PSet( slope = cms.double( -3.178648 ),
tmax = cms.double( 16.0 ),
tzero = cms.double( 23.960177 )
),
cms.PSet( slope = cms.double( -1.5610227 ),
tmax = cms.double( 10.0 ),
tzero = cms.double( 11.977461 )
),
cms.PSet( slope = cms.double( -1.075824 ),
tmax = cms.double( 6.25 ),
tzero = cms.double( 9.109694 )
)
),
timeSlewParametersM3 = cms.VPSet(
cms.PSet( tspar2 = cms.double( 0.0 ),
tspar0 = cms.double( 12.2999 ),
tspar1 = cms.double( -2.19142 ),
tspar1_siPM = cms.double( 0.0 ),
cap = cms.double( 6.0 ),
tspar0_siPM = cms.double( 0.0 ),
tspar2_siPM = cms.double( 0.0 )
),
cms.PSet( tspar2 = cms.double( 32.0 ),
tspar0 = cms.double( 15.5 ),
tspar1 = cms.double( -3.2 ),
tspar1_siPM = cms.double( 0.0 ),
cap = cms.double( 6.0 ),
tspar0_siPM = cms.double( 0.0 ),
tspar2_siPM = cms.double( 0.0 )
),
cms.PSet( tspar2 = cms.double( 0.0 ),
tspar0 = cms.double( 12.2999 ),
tspar1 = cms.double( -2.19142 ),
tspar1_siPM = cms.double( 0.0 ),
cap = cms.double( 6.0 ),
tspar0_siPM = cms.double( 0.0 ),
tspar2_siPM = cms.double( 0.0 )
),
cms.PSet( tspar2 = cms.double( 0.0 ),
tspar0 = cms.double( 12.2999 ),
tspar1 = cms.double( -2.19142 ),
tspar1_siPM = cms.double( 0.0 ),
cap = cms.double( 6.0 ),
tspar0_siPM = cms.double( 0.0 ),
tspar2_siPM = cms.double( 0.0 )
)
)
)
process.HepPDTESSource = cms.ESSource( "HepPDTESSource",
pdtFileName = cms.FileInPath( "SimGeneral/HepPDTESSource/data/pythiaparticle.tbl" )
)
process.eegeom = cms.ESSource( "EmptyESSource",
recordName = cms.string( "EcalMappingRcd" ),
iovIsRunNotTime = cms.bool( True ),
firstValid = cms.vuint32( 1 )
)
process.es_hardcode = cms.ESSource( "HcalHardcodeCalibrations",
fromDDD = cms.untracked.bool( False ),
toGet = cms.untracked.vstring( 'GainWidths' )
)
process.hltESSBTagRecord = cms.ESSource( "EmptyESSource",
recordName = cms.string( "JetTagComputerRecord" ),
iovIsRunNotTime = cms.bool( True ),
firstValid = cms.vuint32( 1 )
)
process.hltESSEcalSeverityLevel = cms.ESSource( "EmptyESSource",
recordName = cms.string( "EcalSeverityLevelAlgoRcd" ),
iovIsRunNotTime = cms.bool( True ),
firstValid = cms.vuint32( 1 )
)
process.hltESSHcalSeverityLevel = cms.ESSource( "EmptyESSource",
recordName = cms.string( "HcalSeverityLevelComputerRcd" ),
iovIsRunNotTime = cms.bool( True ),
firstValid = cms.vuint32( 1 )
)
process.ppsPixelTopologyESSource = cms.ESSource( "PPSPixelTopologyESSource",
RunType = cms.string( "Run3" ),
PitchSimY = cms.double( 0.15 ),
PitchSimX = cms.double( 0.1 ),
thickness = cms.double( 0.23 ),
noOfPixelSimX = cms.int32( 160 ),
noOfPixelSimY = cms.int32( 104 ),
noOfPixels = cms.int32( 16640 ),
simXWidth = cms.double( 16.6 ),
simYWidth = cms.double( 16.2 ),
deadEdgeWidth = cms.double( 0.2 ),
activeEdgeSigma = cms.double( 0.02 ),
physActiveEdgeDist = cms.double( 0.15 ),
appendToDataLabel = cms.string( "" )
)
process.AnyDirectionAnalyticalPropagator = cms.ESProducer( "AnalyticalPropagatorESProducer",
MaxDPhi = cms.double( 1.6 ),
ComponentName = cms.string( "AnyDirectionAnalyticalPropagator" ),
PropagationDirection = cms.string( "anyDirection" )
)
process.CSCChannelMapperESProducer = cms.ESProducer( "CSCChannelMapperESProducer",
AlgoName = cms.string( "CSCChannelMapperPostls1" )
)
process.CSCGeometryESModule = cms.ESProducer( "CSCGeometryESModule",
fromDDD = cms.bool( False ),
fromDD4hep = cms.bool( False ),
alignmentsLabel = cms.string( "" ),
appendToDataLabel = cms.string( "" ),
useRealWireGeometry = cms.bool( True ),
useOnlyWiresInME1a = cms.bool( False ),
useGangedStripsInME1a = cms.bool( False ),
useCentreTIOffsets = cms.bool( False ),
applyAlignment = cms.bool( True ),
debugV = cms.untracked.bool( False )
)
process.CSCIndexerESProducer = cms.ESProducer( "CSCIndexerESProducer",
AlgoName = cms.string( "CSCIndexerPostls1" )
)
process.CSCObjectMapESProducer = cms.ESProducer( "CSCObjectMapESProducer",
appendToDataLabel = cms.string( "" )
)
process.CaloGeometryBuilder = cms.ESProducer( "CaloGeometryBuilder",
SelectedCalos = cms.vstring( 'HCAL',
'ZDC',
'EcalBarrel',
'EcalEndcap',
'EcalPreshower',
'TOWER' )
)
process.CaloTopologyBuilder = cms.ESProducer( "CaloTopologyBuilder" )
process.CaloTowerConstituentsMapBuilder = cms.ESProducer( "CaloTowerConstituentsMapBuilder",
MapFile = cms.untracked.string( "Geometry/CaloTopology/data/CaloTowerEEGeometric.map.gz" ),
MapAuto = cms.untracked.bool( False ),
SkipHE = cms.untracked.bool( False ),
appendToDataLabel = cms.string( "" )
)
process.CaloTowerGeometryFromDBEP = cms.ESProducer( "CaloTowerGeometryFromDBEP",
applyAlignment = cms.bool( False )
)
process.CaloTowerTopologyEP = cms.ESProducer( "CaloTowerTopologyEP",
appendToDataLabel = cms.string( "" )
)
process.CastorDbProducer = cms.ESProducer( "CastorDbProducer",
appendToDataLabel = cms.string( "" )
)
process.ClusterShapeHitFilterESProducer = cms.ESProducer( "ClusterShapeHitFilterESProducer",
ComponentName = cms.string( "ClusterShapeHitFilter" ),
PixelShapeFile = cms.string( "RecoPixelVertexing/PixelLowPtUtilities/data/pixelShapePhase1_noL1.par" ),
PixelShapeFileL1 = cms.string( "RecoPixelVertexing/PixelLowPtUtilities/data/pixelShapePhase1_loose.par" ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) )
)
process.DTGeometryESModule = cms.ESProducer( "DTGeometryESModule",
fromDDD = cms.bool( False ),
fromDD4hep = cms.bool( False ),
DDDetector = cms.ESInputTag( "","" ),
alignmentsLabel = cms.string( "" ),
appendToDataLabel = cms.string( "" ),
attribute = cms.string( "MuStructure" ),
value = cms.string( "MuonBarrelDT" ),
applyAlignment = cms.bool( True )
)
process.DTObjectMapESProducer = cms.ESProducer( "DTObjectMapESProducer",
appendToDataLabel = cms.string( "" )
)
process.EcalBarrelGeometryFromDBEP = cms.ESProducer( "EcalBarrelGeometryFromDBEP",
applyAlignment = cms.bool( True )
)
process.EcalElectronicsMappingBuilder = cms.ESProducer( "EcalElectronicsMappingBuilder" )
process.EcalEndcapGeometryFromDBEP = cms.ESProducer( "EcalEndcapGeometryFromDBEP",
applyAlignment = cms.bool( True )
)
process.EcalLaserCorrectionService = cms.ESProducer( "EcalLaserCorrectionService",
maxExtrapolationTimeInSec = cms.uint32( 0 ),
appendToDataLabel = cms.string( "" )
)
process.EcalPreshowerGeometryFromDBEP = cms.ESProducer( "EcalPreshowerGeometryFromDBEP",
applyAlignment = cms.bool( True )
)
process.GEMGeometryESModule = cms.ESProducer( "GEMGeometryESModule",
fromDDD = cms.bool( False ),
fromDD4hep = cms.bool( False ),
applyAlignment = cms.bool( False ),
alignmentsLabel = cms.string( "" ),
appendToDataLabel = cms.string( "" )
)
process.GlobalParameters = cms.ESProducer( "StableParametersTrivialProducer",
TotalBxInEvent = cms.int32( 5 ),
NumberPhysTriggers = cms.uint32( 512 ),
NumberL1Muon = cms.uint32( 8 ),
NumberL1EGamma = cms.uint32( 12 ),
NumberL1Jet = cms.uint32( 12 ),
NumberL1Tau = cms.uint32( 12 ),
NumberChips = cms.uint32( 1 ),
PinsOnChip = cms.uint32( 512 ),
OrderOfChip = cms.vint32( 1 ),
NumberL1IsoEG = cms.uint32( 4 ),
NumberL1JetCounts = cms.uint32( 12 ),
UnitLength = cms.int32( 8 ),
NumberL1ForJet = cms.uint32( 4 ),
IfCaloEtaNumberBits = cms.uint32( 4 ),
IfMuEtaNumberBits = cms.uint32( 6 ),
NumberL1TauJet = cms.uint32( 4 ),
NumberL1Mu = cms.uint32( 4 ),
NumberConditionChips = cms.uint32( 1 ),
NumberPsbBoards = cms.int32( 7 ),
NumberL1CenJet = cms.uint32( 4 ),
PinsOnConditionChip = cms.uint32( 512 ),
NumberL1NoIsoEG = cms.uint32( 4 ),
NumberTechnicalTriggers = cms.uint32( 64 ),
NumberPhysTriggersExtended = cms.uint32( 64 ),
WordLength = cms.int32( 64 ),
OrderConditionChip = cms.vint32( 1 ),
appendToDataLabel = cms.string( "" )
)
process.HcalGeometryFromDBEP = cms.ESProducer( "HcalGeometryFromDBEP",
applyAlignment = cms.bool( False )
)
process.HcalTopologyIdealEP = cms.ESProducer( "HcalTopologyIdealEP",
Exclude = cms.untracked.string( "" ),
MergePosition = cms.untracked.bool( True ),
appendToDataLabel = cms.string( "" )
)
process.MaterialPropagator = cms.ESProducer( "PropagatorWithMaterialESProducer",
SimpleMagneticField = cms.string( "" ),
MaxDPhi = cms.double( 1.6 ),
ComponentName = cms.string( "PropagatorWithMaterial" ),
Mass = cms.double( 0.105 ),
PropagationDirection = cms.string( "alongMomentum" ),
useRungeKutta = cms.bool( False ),
ptMin = cms.double( -1.0 )
)
process.MaterialPropagatorForHI = cms.ESProducer( "PropagatorWithMaterialESProducer",
SimpleMagneticField = cms.string( "ParabolicMf" ),
MaxDPhi = cms.double( 1.6 ),
ComponentName = cms.string( "PropagatorWithMaterialForHI" ),
Mass = cms.double( 0.139 ),
PropagationDirection = cms.string( "alongMomentum" ),
useRungeKutta = cms.bool( False ),
ptMin = cms.double( -1.0 )
)
process.MaterialPropagatorParabolicMF = cms.ESProducer( "PropagatorWithMaterialESProducer",
SimpleMagneticField = cms.string( "ParabolicMf" ),
MaxDPhi = cms.double( 1.6 ),
ComponentName = cms.string( "PropagatorWithMaterialParabolicMf" ),
Mass = cms.double( 0.105 ),
PropagationDirection = cms.string( "alongMomentum" ),
useRungeKutta = cms.bool( False ),
ptMin = cms.double( -1.0 )
)
process.OppositeMaterialPropagator = cms.ESProducer( "PropagatorWithMaterialESProducer",
SimpleMagneticField = cms.string( "" ),
MaxDPhi = cms.double( 1.6 ),
ComponentName = cms.string( "PropagatorWithMaterialOpposite" ),
Mass = cms.double( 0.105 ),
PropagationDirection = cms.string( "oppositeToMomentum" ),
useRungeKutta = cms.bool( False ),
ptMin = cms.double( -1.0 )
)
process.OppositeMaterialPropagatorForHI = cms.ESProducer( "PropagatorWithMaterialESProducer",
SimpleMagneticField = cms.string( "ParabolicMf" ),
MaxDPhi = cms.double( 1.6 ),
ComponentName = cms.string( "PropagatorWithMaterialOppositeForHI" ),
Mass = cms.double( 0.139 ),
PropagationDirection = cms.string( "oppositeToMomentum" ),
useRungeKutta = cms.bool( False ),
ptMin = cms.double( -1.0 )
)
process.OppositeMaterialPropagatorParabolicMF = cms.ESProducer( "PropagatorWithMaterialESProducer",
SimpleMagneticField = cms.string( "ParabolicMf" ),
MaxDPhi = cms.double( 1.6 ),
ComponentName = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
Mass = cms.double( 0.105 ),
PropagationDirection = cms.string( "oppositeToMomentum" ),
useRungeKutta = cms.bool( False ),
ptMin = cms.double( -1.0 )
)
process.OppositePropagatorWithMaterialForMixedStep = cms.ESProducer( "PropagatorWithMaterialESProducer",
SimpleMagneticField = cms.string( "ParabolicMf" ),
MaxDPhi = cms.double( 1.6 ),
ComponentName = cms.string( "PropagatorWithMaterialForMixedStepOpposite" ),
Mass = cms.double( 0.105 ),
PropagationDirection = cms.string( "oppositeToMomentum" ),
useRungeKutta = cms.bool( False ),
ptMin = cms.double( 0.1 )
)
process.ParametrizedMagneticFieldProducer = cms.ESProducer( "AutoParametrizedMagneticFieldProducer",
version = cms.string( "Parabolic" ),
label = cms.untracked.string( "ParabolicMf" ),
valueOverride = cms.int32( -1 )
)
process.PixelCPEFastESProducer = cms.ESProducer( "PixelCPEFastESProducer",
LoadTemplatesFromDB = cms.bool( True ),
Alpha2Order = cms.bool( True ),
ClusterProbComputationFlag = cms.int32( 0 ),
useLAWidthFromDB = cms.bool( True ),
lAOffset = cms.double( 0.0 ),
lAWidthBPix = cms.double( 0.0 ),
lAWidthFPix = cms.double( 0.0 ),
doLorentzFromAlignment = cms.bool( False ),
useLAFromDB = cms.bool( True ),
xerr_barrel_l1 = cms.vdouble( 0.00115, 0.0012, 8.8E-4 ),
yerr_barrel_l1 = cms.vdouble( 0.00375, 0.0023, 0.0025, 0.0025, 0.0023, 0.0023, 0.0021, 0.0021, 0.0024 ),
xerr_barrel_ln = cms.vdouble( 0.00115, 0.0012, 8.8E-4 ),
yerr_barrel_ln = cms.vdouble( 0.00375, 0.0023, 0.0025, 0.0025, 0.0023, 0.0023, 0.0021, 0.0021, 0.0024 ),
xerr_endcap = cms.vdouble( 0.002, 0.002 ),
yerr_endcap = cms.vdouble( 0.0021 ),
xerr_barrel_l1_def = cms.double( 0.0103 ),
yerr_barrel_l1_def = cms.double( 0.0021 ),
xerr_barrel_ln_def = cms.double( 0.0103 ),
yerr_barrel_ln_def = cms.double( 0.0021 ),
xerr_endcap_def = cms.double( 0.002 ),
yerr_endcap_def = cms.double( 7.5E-4 ),
isPhase2 = cms.bool( False ),
EdgeClusterErrorX = cms.double( 50.0 ),
EdgeClusterErrorY = cms.double( 85.0 ),
UseErrorsFromTemplates = cms.bool( True ),
TruncatePixelCharge = cms.bool( True ),
ComponentName = cms.string( "PixelCPEFast" ),
MagneticFieldRecord = cms.ESInputTag( "","" ),
appendToDataLabel = cms.string( "" )
)
process.PropagatorWithMaterialForLoopers = cms.ESProducer( "PropagatorWithMaterialESProducer",
SimpleMagneticField = cms.string( "ParabolicMf" ),
MaxDPhi = cms.double( 4.0 ),
ComponentName = cms.string( "PropagatorWithMaterialForLoopers" ),
Mass = cms.double( 0.1396 ),
PropagationDirection = cms.string( "alongMomentum" ),
useRungeKutta = cms.bool( False ),
ptMin = cms.double( -1.0 )
)
process.PropagatorWithMaterialForMixedStep = cms.ESProducer( "PropagatorWithMaterialESProducer",
SimpleMagneticField = cms.string( "ParabolicMf" ),
MaxDPhi = cms.double( 1.6 ),
ComponentName = cms.string( "PropagatorWithMaterialForMixedStep" ),
Mass = cms.double( 0.105 ),
PropagationDirection = cms.string( "alongMomentum" ),
useRungeKutta = cms.bool( False ),
ptMin = cms.double( 0.1 )
)
process.RPCGeometryESModule = cms.ESProducer( "RPCGeometryESModule",
fromDDD = cms.untracked.bool( False ),
fromDD4hep = cms.untracked.bool( False ),
appendToDataLabel = cms.string( "" )
)
process.SiStripClusterizerConditionsESProducer = cms.ESProducer( "SiStripClusterizerConditionsESProducer",
QualityLabel = cms.string( "" ),
Label = cms.string( "" ),
appendToDataLabel = cms.string( "" )
)
process.SiStripGainESProducer = cms.ESProducer( "SiStripGainESProducer",
appendToDataLabel = cms.string( "" ),
printDebug = cms.untracked.bool( False ),
AutomaticNormalization = cms.bool( False ),
APVGain = cms.VPSet(
cms.PSet( NormalizationFactor = cms.untracked.double( 1.0 ),
Label = cms.untracked.string( "" ),
Record = cms.string( "SiStripApvGainRcd" )
),
cms.PSet( NormalizationFactor = cms.untracked.double( 1.0 ),
Label = cms.untracked.string( "" ),
Record = cms.string( "SiStripApvGain2Rcd" )
)
)
)
process.SiStripQualityESProducer = cms.ESProducer( "SiStripQualityESProducer",
appendToDataLabel = cms.string( "" ),
ListOfRecordToMerge = cms.VPSet(
cms.PSet( record = cms.string( "SiStripDetVOffRcd" ),
tag = cms.string( "" )
),
cms.PSet( record = cms.string( "SiStripDetCablingRcd" ),
tag = cms.string( "" )
),
cms.PSet( record = cms.string( "SiStripBadChannelRcd" ),
tag = cms.string( "" )
),
cms.PSet( record = cms.string( "SiStripBadFiberRcd" ),
tag = cms.string( "" )
),
cms.PSet( record = cms.string( "SiStripBadModuleRcd" ),
tag = cms.string( "" )
)
),
ReduceGranularity = cms.bool( False ),
ThresholdForReducedGranularity = cms.double( 0.3 ),
PrintDebugOutput = cms.bool( False ),
UseEmptyRunInfo = cms.bool( False )
)
process.SiStripRecHitMatcherESProducer = cms.ESProducer( "SiStripRecHitMatcherESProducer",
ComponentName = cms.string( "StandardMatcher" ),
NSigmaInside = cms.double( 3.0 ),
PreFilter = cms.bool( False )
)
process.SiStripRegionConnectivity = cms.ESProducer( "SiStripRegionConnectivity",
EtaDivisions = cms.untracked.uint32( 20 ),
PhiDivisions = cms.untracked.uint32( 20 ),
EtaMax = cms.untracked.double( 2.5 )
)
process.SimpleSecondaryVertex3TrkComputer = cms.ESProducer( "SimpleSecondaryVertexESProducer",
use3d = cms.bool( True ),
unBoost = cms.bool( False ),
useSignificance = cms.bool( True ),
minTracks = cms.uint32( 3 ),
minVertices = cms.uint32( 1 )
)
process.SteppingHelixPropagatorAny = cms.ESProducer( "SteppingHelixPropagatorESProducer",
ComponentName = cms.string( "SteppingHelixPropagatorAny" ),
NoErrorPropagation = cms.bool( False ),
PropagationDirection = cms.string( "anyDirection" ),
useTuningForL2Speed = cms.bool( False ),
useIsYokeFlag = cms.bool( True ),
endcapShiftInZNeg = cms.double( 0.0 ),
SetVBFPointer = cms.bool( False ),
AssumeNoMaterial = cms.bool( False ),
endcapShiftInZPos = cms.double( 0.0 ),
useInTeslaFromMagField = cms.bool( False ),
VBFName = cms.string( "VolumeBasedMagneticField" ),
useEndcapShiftsInZ = cms.bool( False ),
sendLogWarning = cms.bool( False ),
useMatVolumes = cms.bool( True ),
debug = cms.bool( False ),
ApplyRadX0Correction = cms.bool( True ),
useMagVolumes = cms.bool( True ),
returnTangentPlane = cms.bool( True )
)
process.TrackerAdditionalParametersPerDetESModule = cms.ESProducer( "TrackerAdditionalParametersPerDetESModule",
appendToDataLabel = cms.string( "" )
)
process.TrackerDigiGeometryESModule = cms.ESProducer( "TrackerDigiGeometryESModule",
appendToDataLabel = cms.string( "" ),
fromDDD = cms.bool( False ),
applyAlignment = cms.bool( True ),
alignmentsLabel = cms.string( "" )
)
process.TrackerGeometricDetESModule = cms.ESProducer( "TrackerGeometricDetESModule",
fromDDD = cms.bool( False ),
fromDD4hep = cms.bool( False ),
appendToDataLabel = cms.string( "" )
)
process.TransientTrackBuilderESProducer = cms.ESProducer( "TransientTrackBuilderESProducer",
ComponentName = cms.string( "TransientTrackBuilder" )
)
process.VolumeBasedMagneticFieldESProducer = cms.ESProducer( "VolumeBasedMagneticFieldESProducerFromDB",
label = cms.untracked.string( "" ),
debugBuilder = cms.untracked.bool( False ),
valueOverride = cms.int32( -1 )
)
process.ZdcGeometryFromDBEP = cms.ESProducer( "ZdcGeometryFromDBEP",
applyAlignment = cms.bool( False )
)
process.caloDetIdAssociator = cms.ESProducer( "DetIdAssociatorESProducer",
ComponentName = cms.string( "CaloDetIdAssociator" ),
etaBinSize = cms.double( 0.087 ),
nEta = cms.int32( 70 ),
nPhi = cms.int32( 72 ),
hcalRegion = cms.int32( 2 ),
includeBadChambers = cms.bool( False ),
includeGEM = cms.bool( False ),
includeME0 = cms.bool( False )
)
process.cosmicsNavigationSchoolESProducer = cms.ESProducer( "NavigationSchoolESProducer",
ComponentName = cms.string( "CosmicNavigationSchool" ),
SimpleMagneticField = cms.string( "" )
)
process.ctppsGeometryESModule = cms.ESProducer( "CTPPSGeometryESModule",
verbosity = cms.untracked.uint32( 1 ),
buildMisalignedGeometry = cms.bool( False ),
isRun2 = cms.bool( False ),
dbTag = cms.string( "" ),
compactViewTag = cms.string( "" ),
fromPreprocessedDB = cms.untracked.bool( True ),
fromDD4hep = cms.untracked.bool( False ),
appendToDataLabel = cms.string( "" )
)
process.ctppsInterpolatedOpticalFunctionsESSource = cms.ESProducer( "CTPPSInterpolatedOpticalFunctionsESSource",
lhcInfoLabel = cms.string( "" ),
opticsLabel = cms.string( "" ),
appendToDataLabel = cms.string( "" )
)
process.ecalDetIdAssociator = cms.ESProducer( "DetIdAssociatorESProducer",
ComponentName = cms.string( "EcalDetIdAssociator" ),
etaBinSize = cms.double( 0.02 ),
nEta = cms.int32( 300 ),
nPhi = cms.int32( 360 ),
hcalRegion = cms.int32( 2 ),
includeBadChambers = cms.bool( False ),
includeGEM = cms.bool( False ),
includeME0 = cms.bool( False )
)
process.ecalSeverityLevel = cms.ESProducer( "EcalSeverityLevelESProducer",
flagMask = cms.PSet(
kBad = cms.vstring( 'kFaultyHardware',
'kDead',
'kKilled' ),
kGood = cms.vstring( 'kGood' ),
kRecovered = cms.vstring( 'kLeadingEdgeRecovered',
'kTowerRecovered' ),
kProblematic = cms.vstring( 'kPoorReco',
'kPoorCalib',
'kNoisy',
'kSaturated' ),
kWeird = cms.vstring( 'kWeird',
'kDiWeird' ),
kTime = cms.vstring( 'kOutOfTime' )
),
dbstatusMask = cms.PSet(
kBad = cms.vstring( 'kNonRespondingIsolated',
'kDeadVFE',
'kDeadFE',
'kNoDataNoTP' ),
kGood = cms.vstring( 'kOk' ),
kRecovered = cms.vstring( ),
kProblematic = cms.vstring( 'kDAC',
'kNoLaser',
'kNoisy',
'kNNoisy',
'kNNNoisy',
'kNNNNoisy',
'kNNNNNoisy',
'kFixedG6',
'kFixedG1',
'kFixedG0' ),
kWeird = cms.vstring( ),
kTime = cms.vstring( )
),
timeThresh = cms.double( 2.0 )
)
process.hcalChannelPropertiesESProd = cms.ESProducer( "HcalChannelPropertiesEP" )
process.hcalDDDRecConstants = cms.ESProducer( "HcalDDDRecConstantsESModule",
appendToDataLabel = cms.string( "" )
)
process.hcalDDDSimConstants = cms.ESProducer( "HcalDDDSimConstantsESModule",
appendToDataLabel = cms.string( "" )
)
process.hcalDetIdAssociator = cms.ESProducer( "DetIdAssociatorESProducer",
ComponentName = cms.string( "HcalDetIdAssociator" ),
etaBinSize = cms.double( 0.087 ),
nEta = cms.int32( 70 ),
nPhi = cms.int32( 72 ),
hcalRegion = cms.int32( 2 ),
includeBadChambers = cms.bool( False ),
includeGEM = cms.bool( False ),
includeME0 = cms.bool( False )
)
process.hcalRecAlgos = cms.ESProducer( "HcalRecAlgoESProducer",
phase = cms.uint32( 1 ),
RecoveredRecHitBits = cms.vstring( ),
SeverityLevels = cms.VPSet(
cms.PSet( ChannelStatus = cms.vstring( ),
RecHitFlags = cms.vstring( 'TimingFromTDC' ),
Level = cms.int32( 0 )
),
cms.PSet( ChannelStatus = cms.vstring( 'HcalCellCaloTowerProb' ),
RecHitFlags = cms.vstring( ),
Level = cms.int32( 1 )
),
cms.PSet( ChannelStatus = cms.vstring( 'HcalCellExcludeFromHBHENoiseSummary' ),
RecHitFlags = cms.vstring( ),
Level = cms.int32( 5 )
),
cms.PSet( ChannelStatus = cms.vstring( ),
RecHitFlags = cms.vstring( 'HBHEHpdHitMultiplicity',
'HBHEIsolatedNoise',
'HBHEFlatNoise',
'HBHESpikeNoise',
'HBHETS4TS5Noise',
'HBHENegativeNoise',
'HBHEPulseFitBit',
'HBHEOOTPU' ),
Level = cms.int32( 8 )
),
cms.PSet( ChannelStatus = cms.vstring( ),
RecHitFlags = cms.vstring( 'HFLongShort',
'HFS8S1Ratio',
'HFPET',
'HFSignalAsymmetry' ),
Level = cms.int32( 11 )
),
cms.PSet( ChannelStatus = cms.vstring( 'HcalCellHot' ),
RecHitFlags = cms.vstring( ),
Level = cms.int32( 15 )
),
cms.PSet( ChannelStatus = cms.vstring( 'HcalCellOff',
'HcalCellDead' ),
RecHitFlags = cms.vstring( ),
Level = cms.int32( 20 )
)
),
DropChannelStatusBits = cms.vstring( 'HcalCellMask',
'HcalCellOff',
'HcalCellDead' ),
appendToDataLabel = cms.string( "" )
)
process.hcal_db_producer = cms.ESProducer( "HcalDbProducer" )
process.hltBoostedDoubleSecondaryVertexAK8Computer = cms.ESProducer( "CandidateBoostedDoubleSecondaryVertexESProducer",
useCondDB = cms.bool( False ),
weightFile = cms.FileInPath( "RecoBTag/SecondaryVertex/data/BoostedDoubleSV_AK8_BDT_v4.weights.xml.gz" ),
useGBRForest = cms.bool( True ),
useAdaBoost = cms.bool( False )
)
process.hltCombinedSecondaryVertex = cms.ESProducer( "CombinedSecondaryVertexESProducer",
trackPseudoSelection = cms.PSet(
maxDistToAxis = cms.double( 0.07 ),
totalHitsMin = cms.uint32( 0 ),
ptMin = cms.double( 0.0 ),
sip2dSigMax = cms.double( 99999.9 ),
sip2dValMax = cms.double( 99999.9 ),
sip3dSigMax = cms.double( 99999.9 ),
sip3dValMax = cms.double( 99999.9 ),
maxDecayLen = cms.double( 5.0 ),
qualityClass = cms.string( "any" ),
jetDeltaRMax = cms.double( 0.3 ),
normChi2Max = cms.double( 99999.9 ),
pixelHitsMin = cms.uint32( 0 ),
sip2dSigMin = cms.double( 2.0 ),
sip2dValMin = cms.double( -99999.9 ),
sip3dSigMin = cms.double( -99999.9 ),
sip3dValMin = cms.double( -99999.9 )
),
trackSelection = cms.PSet(
maxDistToAxis = cms.double( 0.07 ),
totalHitsMin = cms.uint32( 0 ),
ptMin = cms.double( 0.0 ),
sip2dSigMax = cms.double( 99999.9 ),
sip2dValMax = cms.double( 99999.9 ),
sip3dSigMax = cms.double( 99999.9 ),
sip3dValMax = cms.double( 99999.9 ),
maxDecayLen = cms.double( 5.0 ),
qualityClass = cms.string( "any" ),
jetDeltaRMax = cms.double( 0.3 ),
normChi2Max = cms.double( 99999.9 ),
pixelHitsMin = cms.uint32( 0 ),
sip2dSigMin = cms.double( -99999.9 ),
sip2dValMin = cms.double( -99999.9 ),
sip3dSigMin = cms.double( -99999.9 ),
sip3dValMin = cms.double( -99999.9 )
),
trackFlip = cms.bool( False ),
vertexFlip = cms.bool( False ),
SoftLeptonFlip = cms.bool( False ),
useTrackWeights = cms.bool( True ),
pseudoMultiplicityMin = cms.uint32( 2 ),
correctVertexMass = cms.bool( True ),
trackPairV0Filter = cms.PSet( k0sMassWindow = cms.double( 0.03 ) ),
charmCut = cms.double( 1.5 ),
minimumTrackWeight = cms.double( 0.5 ),
pseudoVertexV0Filter = cms.PSet( k0sMassWindow = cms.double( 0.05 ) ),
trackMultiplicityMin = cms.uint32( 3 ),
trackSort = cms.string( "sip2dSig" ),
useCategories = cms.bool( True ),
calibrationRecords = cms.vstring( 'CombinedSVRecoVertex',
'CombinedSVPseudoVertex',
'CombinedSVNoVertex' ),
recordLabel = cms.string( "HLT" ),
categoryVariableName = cms.string( "vertexCategory" )
)
process.hltCombinedSecondaryVertexV2 = cms.ESProducer( "CombinedSecondaryVertexESProducer",
trackPseudoSelection = cms.PSet(
max_pT_dRcut = cms.double( 0.1 ),
b_dR = cms.double( 0.6263 ),
min_pT = cms.double( 120.0 ),
b_pT = cms.double( 0.3684 ),
ptMin = cms.double( 0.0 ),
max_pT_trackPTcut = cms.double( 3.0 ),
max_pT = cms.double( 500.0 ),
useVariableJTA = cms.bool( False ),
maxDecayLen = cms.double( 5.0 ),
qualityClass = cms.string( "any" ),
normChi2Max = cms.double( 99999.9 ),
sip2dValMin = cms.double( -99999.9 ),
sip3dValMin = cms.double( -99999.9 ),
a_dR = cms.double( -0.001053 ),
maxDistToAxis = cms.double( 0.07 ),
totalHitsMin = cms.uint32( 0 ),
a_pT = cms.double( 0.005263 ),
sip2dSigMax = cms.double( 99999.9 ),
sip2dValMax = cms.double( 99999.9 ),
sip3dSigMax = cms.double( 99999.9 ),
sip3dValMax = cms.double( 99999.9 ),
min_pT_dRcut = cms.double( 0.5 ),
jetDeltaRMax = cms.double( 0.3 ),
pixelHitsMin = cms.uint32( 0 ),
sip3dSigMin = cms.double( -99999.9 ),
sip2dSigMin = cms.double( 2.0 )
),
trackSelection = cms.PSet(
max_pT_dRcut = cms.double( 0.1 ),
b_dR = cms.double( 0.6263 ),
min_pT = cms.double( 120.0 ),
b_pT = cms.double( 0.3684 ),
ptMin = cms.double( 0.0 ),
max_pT_trackPTcut = cms.double( 3.0 ),
max_pT = cms.double( 500.0 ),
useVariableJTA = cms.bool( False ),
maxDecayLen = cms.double( 5.0 ),
qualityClass = cms.string( "any" ),
normChi2Max = cms.double( 99999.9 ),
sip2dValMin = cms.double( -99999.9 ),
sip3dValMin = cms.double( -99999.9 ),
a_dR = cms.double( -0.001053 ),
maxDistToAxis = cms.double( 0.07 ),
totalHitsMin = cms.uint32( 0 ),
a_pT = cms.double( 0.005263 ),
sip2dSigMax = cms.double( 99999.9 ),
sip2dValMax = cms.double( 99999.9 ),
sip3dSigMax = cms.double( 99999.9 ),
sip3dValMax = cms.double( 99999.9 ),
min_pT_dRcut = cms.double( 0.5 ),
jetDeltaRMax = cms.double( 0.3 ),
pixelHitsMin = cms.uint32( 0 ),
sip3dSigMin = cms.double( -99999.9 ),
sip2dSigMin = cms.double( -99999.9 )
),
trackFlip = cms.bool( False ),
vertexFlip = cms.bool( False ),
SoftLeptonFlip = cms.bool( False ),
useTrackWeights = cms.bool( True ),
pseudoMultiplicityMin = cms.uint32( 2 ),
correctVertexMass = cms.bool( True ),
trackPairV0Filter = cms.PSet( k0sMassWindow = cms.double( 0.03 ) ),
charmCut = cms.double( 1.5 ),
minimumTrackWeight = cms.double( 0.5 ),
pseudoVertexV0Filter = cms.PSet( k0sMassWindow = cms.double( 0.05 ) ),
trackMultiplicityMin = cms.uint32( 3 ),
trackSort = cms.string( "sip2dSig" ),
useCategories = cms.bool( True ),
calibrationRecords = cms.vstring( 'CombinedSVIVFV2RecoVertex',
'CombinedSVIVFV2PseudoVertex',
'CombinedSVIVFV2NoVertex' ),
recordLabel = cms.string( "HLT" ),
categoryVariableName = cms.string( "vertexCategory" )
)
process.hltDisplacedDijethltESPPromptTrackCountingESProducer = cms.ESProducer( "PromptTrackCountingESProducer",
impactParameterType = cms.int32( 1 ),
minimumImpactParameter = cms.double( -1.0 ),
useSignedImpactParameterSig = cms.bool( True ),
maximumDistanceToJetAxis = cms.double( 999999.0 ),
deltaR = cms.double( -1.0 ),
deltaRmin = cms.double( 0.0 ),
maximumDecayLength = cms.double( 999999.0 ),
maxImpactParameter = cms.double( 0.1 ),
maxImpactParameterSig = cms.double( 999999.0 ),
trackQualityClass = cms.string( "any" ),
nthTrack = cms.int32( -1 )
)
process.hltDisplacedDijethltESPTrackCounting2D1st = cms.ESProducer( "TrackCountingESProducer",
a_dR = cms.double( -0.001053 ),
b_dR = cms.double( 0.6263 ),
a_pT = cms.double( 0.005263 ),
b_pT = cms.double( 0.3684 ),
min_pT = cms.double( 120.0 ),
max_pT = cms.double( 500.0 ),
min_pT_dRcut = cms.double( 0.5 ),
max_pT_dRcut = cms.double( 0.1 ),
max_pT_trackPTcut = cms.double( 3.0 ),
minimumImpactParameter = cms.double( 0.05 ),
useSignedImpactParameterSig = cms.bool( False ),
impactParameterType = cms.int32( 1 ),
maximumDistanceToJetAxis = cms.double( 9999999.0 ),
deltaR = cms.double( -1.0 ),
maximumDecayLength = cms.double( 999999.0 ),
nthTrack = cms.int32( 1 ),
trackQualityClass = cms.string( "any" ),
useVariableJTA = cms.bool( False )
)
process.hltESPAnalyticalPropagator = cms.ESProducer( "AnalyticalPropagatorESProducer",
MaxDPhi = cms.double( 1.6 ),
ComponentName = cms.string( "hltESPAnalyticalPropagator" ),
PropagationDirection = cms.string( "alongMomentum" )
)
process.hltESPBwdAnalyticalPropagator = cms.ESProducer( "AnalyticalPropagatorESProducer",
MaxDPhi = cms.double( 1.6 ),
ComponentName = cms.string( "hltESPBwdAnalyticalPropagator" ),
PropagationDirection = cms.string( "oppositeToMomentum" )
)
process.hltESPBwdElectronPropagator = cms.ESProducer( "PropagatorWithMaterialESProducer",
SimpleMagneticField = cms.string( "" ),
MaxDPhi = cms.double( 1.6 ),
ComponentName = cms.string( "hltESPBwdElectronPropagator" ),
Mass = cms.double( 5.11E-4 ),
PropagationDirection = cms.string( "oppositeToMomentum" ),
useRungeKutta = cms.bool( False ),
ptMin = cms.double( -1.0 )
)
process.hltESPChi2ChargeLooseMeasurementEstimator16 = cms.ESProducer( "Chi2ChargeMeasurementEstimatorESProducer",
MaxChi2 = cms.double( 16.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 0.5 ),
MaxSagitta = cms.double( 2.0 ),
MinimalTolerance = cms.double( 0.5 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPChi2ChargeLooseMeasurementEstimator16" ),
pTChargeCutThreshold = cms.double( -1.0 ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
appendToDataLabel = cms.string( "" )
)
process.hltESPChi2ChargeMeasurementEstimator16 = cms.ESProducer( "Chi2ChargeMeasurementEstimatorESProducer",
MaxChi2 = cms.double( 16.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 0.5 ),
MaxSagitta = cms.double( 2.0 ),
MinimalTolerance = cms.double( 0.5 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPChi2ChargeMeasurementEstimator16" ),
pTChargeCutThreshold = cms.double( -1.0 ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
appendToDataLabel = cms.string( "" )
)
process.hltESPChi2ChargeMeasurementEstimator2000 = cms.ESProducer( "Chi2ChargeMeasurementEstimatorESProducer",
MaxChi2 = cms.double( 2000.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 100.0 ),
MaxSagitta = cms.double( -1.0 ),
MinimalTolerance = cms.double( 10.0 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPChi2ChargeMeasurementEstimator2000" ),
pTChargeCutThreshold = cms.double( -1.0 ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
appendToDataLabel = cms.string( "" )
)
process.hltESPChi2ChargeMeasurementEstimator30 = cms.ESProducer( "Chi2ChargeMeasurementEstimatorESProducer",
MaxChi2 = cms.double( 30.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 100.0 ),
MaxSagitta = cms.double( -1.0 ),
MinimalTolerance = cms.double( 10.0 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPChi2ChargeMeasurementEstimator30" ),
pTChargeCutThreshold = cms.double( -1.0 ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
appendToDataLabel = cms.string( "" )
)
process.hltESPChi2ChargeMeasurementEstimator9 = cms.ESProducer( "Chi2ChargeMeasurementEstimatorESProducer",
MaxChi2 = cms.double( 9.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 0.5 ),
MaxSagitta = cms.double( 2.0 ),
MinimalTolerance = cms.double( 0.5 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPChi2ChargeMeasurementEstimator9" ),
pTChargeCutThreshold = cms.double( 15.0 ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
appendToDataLabel = cms.string( "" )
)
process.hltESPChi2ChargeMeasurementEstimator9ForHI = cms.ESProducer( "Chi2ChargeMeasurementEstimatorESProducer",
MaxChi2 = cms.double( 9.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 100.0 ),
MaxSagitta = cms.double( -1.0 ),
MinimalTolerance = cms.double( 10.0 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPChi2ChargeMeasurementEstimator9ForHI" ),
pTChargeCutThreshold = cms.double( 15.0 ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutForHI" ) ),
appendToDataLabel = cms.string( "" )
)
process.hltESPChi2ChargeTightMeasurementEstimator16 = cms.ESProducer( "Chi2ChargeMeasurementEstimatorESProducer",
MaxChi2 = cms.double( 16.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 0.5 ),
MaxSagitta = cms.double( 2.0 ),
MinimalTolerance = cms.double( 0.5 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPChi2ChargeTightMeasurementEstimator16" ),
pTChargeCutThreshold = cms.double( -1.0 ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutTight" ) ),
appendToDataLabel = cms.string( "" )
)
process.hltESPChi2MeasurementEstimator100 = cms.ESProducer( "Chi2MeasurementEstimatorESProducer",
MaxChi2 = cms.double( 40.0 ),
nSigma = cms.double( 4.0 ),
MaxDisplacement = cms.double( 0.5 ),
MaxSagitta = cms.double( 2.0 ),
MinimalTolerance = cms.double( 0.5 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1.0E12 ),
ComponentName = cms.string( "hltESPChi2MeasurementEstimator100" ),
appendToDataLabel = cms.string( "" )
)
process.hltESPChi2MeasurementEstimator16 = cms.ESProducer( "Chi2MeasurementEstimatorESProducer",
MaxChi2 = cms.double( 16.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 100.0 ),
MaxSagitta = cms.double( -1.0 ),
MinimalTolerance = cms.double( 10.0 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPChi2MeasurementEstimator16" ),
appendToDataLabel = cms.string( "" )
)
process.hltESPChi2MeasurementEstimator30 = cms.ESProducer( "Chi2MeasurementEstimatorESProducer",
MaxChi2 = cms.double( 30.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 100.0 ),
MaxSagitta = cms.double( -1.0 ),
MinimalTolerance = cms.double( 10.0 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPChi2MeasurementEstimator30" ),
appendToDataLabel = cms.string( "" )
)
process.hltESPChi2MeasurementEstimator9 = cms.ESProducer( "Chi2MeasurementEstimatorESProducer",
MaxChi2 = cms.double( 9.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 100.0 ),
MaxSagitta = cms.double( -1.0 ),
MinimalTolerance = cms.double( 10.0 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPChi2MeasurementEstimator9" ),
appendToDataLabel = cms.string( "" )
)
process.hltESPCloseComponentsMerger5D = cms.ESProducer( "CloseComponentsMergerESProducer5D",
ComponentName = cms.string( "hltESPCloseComponentsMerger5D" ),
MaxComponents = cms.int32( 12 ),
DistanceMeasure = cms.string( "hltESPKullbackLeiblerDistance5D" )
)
process.hltESPDetachedQuadStepChi2ChargeMeasurementEstimator9 = cms.ESProducer( "Chi2ChargeMeasurementEstimatorESProducer",
MaxChi2 = cms.double( 9.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 0.5 ),
MaxSagitta = cms.double( 2.0 ),
MinimalTolerance = cms.double( 0.5 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPDetachedQuadStepChi2ChargeMeasurementEstimator9" ),
pTChargeCutThreshold = cms.double( -1.0 ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutTight" ) ),
appendToDataLabel = cms.string( "" )
)
process.hltESPDetachedQuadStepTrajectoryCleanerBySharedHits = cms.ESProducer( "TrajectoryCleanerESProducer",
ComponentName = cms.string( "hltESPDetachedQuadStepTrajectoryCleanerBySharedHits" ),
ComponentType = cms.string( "TrajectoryCleanerBySharedHits" ),
fractionShared = cms.double( 0.13 ),
ValidHitBonus = cms.double( 5.0 ),
MissingHitPenalty = cms.double( 20.0 ),
allowSharedFirstHit = cms.bool( True )
)
process.hltESPDetachedStepTrajectoryCleanerBySharedHits = cms.ESProducer( "TrajectoryCleanerESProducer",
ComponentName = cms.string( "hltESPDetachedStepTrajectoryCleanerBySharedHits" ),
ComponentType = cms.string( "TrajectoryCleanerBySharedHits" ),
fractionShared = cms.double( 0.13 ),
ValidHitBonus = cms.double( 5.0 ),
MissingHitPenalty = cms.double( 20.0 ),
allowSharedFirstHit = cms.bool( True )
)
process.hltESPDetachedTripletStepChi2ChargeMeasurementEstimator9 = cms.ESProducer( "Chi2ChargeMeasurementEstimatorESProducer",
MaxChi2 = cms.double( 9.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 0.5 ),
MaxSagitta = cms.double( 2.0 ),
MinimalTolerance = cms.double( 0.5 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPDetachedTripletStepChi2ChargeMeasurementEstimator9" ),
pTChargeCutThreshold = cms.double( -1.0 ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutTight" ) ),
appendToDataLabel = cms.string( "" )
)
process.hltESPDetachedTripletStepTrajectoryCleanerBySharedHits = cms.ESProducer( "TrajectoryCleanerESProducer",
ComponentName = cms.string( "hltESPDetachedTripletStepTrajectoryCleanerBySharedHits" ),
ComponentType = cms.string( "TrajectoryCleanerBySharedHits" ),
fractionShared = cms.double( 0.13 ),
ValidHitBonus = cms.double( 5.0 ),
MissingHitPenalty = cms.double( 20.0 ),
allowSharedFirstHit = cms.bool( True )
)
process.hltESPDisplacedDijethltPromptTrackCountingESProducer = cms.ESProducer( "PromptTrackCountingESProducer",
impactParameterType = cms.int32( 1 ),
minimumImpactParameter = cms.double( -1.0 ),
useSignedImpactParameterSig = cms.bool( True ),
maximumDistanceToJetAxis = cms.double( 999999.0 ),
deltaR = cms.double( -1.0 ),
deltaRmin = cms.double( 0.0 ),
maximumDecayLength = cms.double( 999999.0 ),
maxImpactParameter = cms.double( 0.1 ),
maxImpactParameterSig = cms.double( 999999.0 ),
trackQualityClass = cms.string( "any" ),
nthTrack = cms.int32( -1 )
)
process.hltESPDisplacedDijethltPromptTrackCountingESProducerLong = cms.ESProducer( "PromptTrackCountingESProducer",
impactParameterType = cms.int32( 1 ),
minimumImpactParameter = cms.double( -1.0 ),
useSignedImpactParameterSig = cms.bool( True ),
maximumDistanceToJetAxis = cms.double( 999999.0 ),
deltaR = cms.double( -1.0 ),
deltaRmin = cms.double( 0.0 ),
maximumDecayLength = cms.double( 999999.0 ),
maxImpactParameter = cms.double( 0.2 ),
maxImpactParameterSig = cms.double( 999999.0 ),
trackQualityClass = cms.string( "any" ),
nthTrack = cms.int32( -1 )
)
process.hltESPDisplacedDijethltTrackCounting2D1st = cms.ESProducer( "TrackCountingESProducer",
a_dR = cms.double( -0.001053 ),
b_dR = cms.double( 0.6263 ),
a_pT = cms.double( 0.005263 ),
b_pT = cms.double( 0.3684 ),
min_pT = cms.double( 120.0 ),
max_pT = cms.double( 500.0 ),
min_pT_dRcut = cms.double( 0.5 ),
max_pT_dRcut = cms.double( 0.1 ),
max_pT_trackPTcut = cms.double( 3.0 ),
minimumImpactParameter = cms.double( 0.05 ),
useSignedImpactParameterSig = cms.bool( False ),
impactParameterType = cms.int32( 1 ),
maximumDistanceToJetAxis = cms.double( 9999999.0 ),
deltaR = cms.double( -1.0 ),
maximumDecayLength = cms.double( 999999.0 ),
nthTrack = cms.int32( 1 ),
trackQualityClass = cms.string( "any" ),
useVariableJTA = cms.bool( False )
)
process.hltESPDisplacedDijethltTrackCounting2D2ndLong = cms.ESProducer( "TrackCountingESProducer",
a_dR = cms.double( -0.001053 ),
b_dR = cms.double( 0.6263 ),
a_pT = cms.double( 0.005263 ),
b_pT = cms.double( 0.3684 ),
min_pT = cms.double( 120.0 ),
max_pT = cms.double( 500.0 ),
min_pT_dRcut = cms.double( 0.5 ),
max_pT_dRcut = cms.double( 0.1 ),
max_pT_trackPTcut = cms.double( 3.0 ),
minimumImpactParameter = cms.double( 0.2 ),
useSignedImpactParameterSig = cms.bool( True ),
impactParameterType = cms.int32( 1 ),
maximumDistanceToJetAxis = cms.double( 9999999.0 ),
deltaR = cms.double( -1.0 ),
maximumDecayLength = cms.double( 999999.0 ),
nthTrack = cms.int32( 2 ),
trackQualityClass = cms.string( "any" ),
useVariableJTA = cms.bool( False )
)
process.hltESPDummyDetLayerGeometry = cms.ESProducer( "DetLayerGeometryESProducer",
ComponentName = cms.string( "hltESPDummyDetLayerGeometry" )
)
process.hltESPEcalTrigTowerConstituentsMapBuilder = cms.ESProducer( "EcalTrigTowerConstituentsMapBuilder",
MapFile = cms.untracked.string( "Geometry/EcalMapping/data/EndCap_TTMap.txt" )
)
process.hltESPElectronMaterialEffects = cms.ESProducer( "GsfMaterialEffectsESProducer",
BetheHeitlerParametrization = cms.string( "BetheHeitler_cdfmom_nC6_O5.par" ),
EnergyLossUpdator = cms.string( "GsfBetheHeitlerUpdator" ),
ComponentName = cms.string( "hltESPElectronMaterialEffects" ),
MultipleScatteringUpdator = cms.string( "MultipleScatteringUpdator" ),
Mass = cms.double( 5.11E-4 ),
BetheHeitlerCorrection = cms.int32( 2 )
)
process.hltESPFastSteppingHelixPropagatorAny = cms.ESProducer( "SteppingHelixPropagatorESProducer",
ComponentName = cms.string( "hltESPFastSteppingHelixPropagatorAny" ),
NoErrorPropagation = cms.bool( False ),
PropagationDirection = cms.string( "anyDirection" ),
useTuningForL2Speed = cms.bool( True ),
useIsYokeFlag = cms.bool( True ),
endcapShiftInZNeg = cms.double( 0.0 ),
SetVBFPointer = cms.bool( False ),
AssumeNoMaterial = cms.bool( False ),
endcapShiftInZPos = cms.double( 0.0 ),
useInTeslaFromMagField = cms.bool( False ),
VBFName = cms.string( "VolumeBasedMagneticField" ),
useEndcapShiftsInZ = cms.bool( False ),
sendLogWarning = cms.bool( False ),
useMatVolumes = cms.bool( True ),
debug = cms.bool( False ),
ApplyRadX0Correction = cms.bool( True ),
useMagVolumes = cms.bool( True ),
returnTangentPlane = cms.bool( True )
)
process.hltESPFastSteppingHelixPropagatorOpposite = cms.ESProducer( "SteppingHelixPropagatorESProducer",
ComponentName = cms.string( "hltESPFastSteppingHelixPropagatorOpposite" ),
NoErrorPropagation = cms.bool( False ),
PropagationDirection = cms.string( "oppositeToMomentum" ),
useTuningForL2Speed = cms.bool( True ),
useIsYokeFlag = cms.bool( True ),
endcapShiftInZNeg = cms.double( 0.0 ),
SetVBFPointer = cms.bool( False ),
AssumeNoMaterial = cms.bool( False ),
endcapShiftInZPos = cms.double( 0.0 ),
useInTeslaFromMagField = cms.bool( False ),
VBFName = cms.string( "VolumeBasedMagneticField" ),
useEndcapShiftsInZ = cms.bool( False ),
sendLogWarning = cms.bool( False ),
useMatVolumes = cms.bool( True ),
debug = cms.bool( False ),
ApplyRadX0Correction = cms.bool( True ),
useMagVolumes = cms.bool( True ),
returnTangentPlane = cms.bool( True )
)
process.hltESPFittingSmootherIT = cms.ESProducer( "KFFittingSmootherESProducer",
ComponentName = cms.string( "hltESPFittingSmootherIT" ),
Fitter = cms.string( "hltESPTrajectoryFitterRK" ),
Smoother = cms.string( "hltESPTrajectorySmootherRK" ),
EstimateCut = cms.double( -1.0 ),
MaxFractionOutliers = cms.double( 0.3 ),
MaxNumberOfOutliers = cms.int32( 3 ),
MinDof = cms.int32( 2 ),
NoOutliersBeginEnd = cms.bool( False ),
MinNumberOfHits = cms.int32( 3 ),
MinNumberOfHitsHighEta = cms.int32( 5 ),
HighEtaSwitch = cms.double( 5.0 ),
RejectTracks = cms.bool( True ),
BreakTrajWith2ConsecutiveMissing = cms.bool( True ),
NoInvalidHitsBeginEnd = cms.bool( True ),
LogPixelProbabilityCut = cms.double( -16.0 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPFittingSmootherRK = cms.ESProducer( "KFFittingSmootherESProducer",
ComponentName = cms.string( "hltESPFittingSmootherRK" ),
Fitter = cms.string( "hltESPTrajectoryFitterRK" ),
Smoother = cms.string( "hltESPTrajectorySmootherRK" ),
EstimateCut = cms.double( -1.0 ),
MaxFractionOutliers = cms.double( 0.3 ),
MaxNumberOfOutliers = cms.int32( 3 ),
MinDof = cms.int32( 2 ),
NoOutliersBeginEnd = cms.bool( False ),
MinNumberOfHits = cms.int32( 5 ),
MinNumberOfHitsHighEta = cms.int32( 5 ),
HighEtaSwitch = cms.double( 5.0 ),
RejectTracks = cms.bool( True ),
BreakTrajWith2ConsecutiveMissing = cms.bool( False ),
NoInvalidHitsBeginEnd = cms.bool( False ),
LogPixelProbabilityCut = cms.double( -16.0 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPFlexibleKFFittingSmoother = cms.ESProducer( "FlexibleKFFittingSmootherESProducer",
ComponentName = cms.string( "hltESPFlexibleKFFittingSmoother" ),
standardFitter = cms.string( "hltESPKFFittingSmootherWithOutliersRejectionAndRK" ),
looperFitter = cms.string( "hltESPKFFittingSmootherForLoopers" ),
appendToDataLabel = cms.string( "" )
)
process.hltESPFwdElectronPropagator = cms.ESProducer( "PropagatorWithMaterialESProducer",
SimpleMagneticField = cms.string( "" ),
MaxDPhi = cms.double( 1.6 ),
ComponentName = cms.string( "hltESPFwdElectronPropagator" ),
Mass = cms.double( 5.11E-4 ),
PropagationDirection = cms.string( "alongMomentum" ),
useRungeKutta = cms.bool( False ),
ptMin = cms.double( -1.0 )
)
process.hltESPGlobalDetLayerGeometry = cms.ESProducer( "GlobalDetLayerGeometryESProducer",
ComponentName = cms.string( "hltESPGlobalDetLayerGeometry" )
)
process.hltESPGlobalTrackingGeometryESProducer = cms.ESProducer( "GlobalTrackingGeometryESProducer" )
process.hltESPGsfElectronFittingSmoother = cms.ESProducer( "KFFittingSmootherESProducer",
ComponentName = cms.string( "hltESPGsfElectronFittingSmoother" ),
Fitter = cms.string( "hltESPGsfTrajectoryFitter" ),
Smoother = cms.string( "hltESPGsfTrajectorySmoother" ),
EstimateCut = cms.double( -1.0 ),
MaxFractionOutliers = cms.double( 0.3 ),
MaxNumberOfOutliers = cms.int32( 3 ),
MinDof = cms.int32( 2 ),
NoOutliersBeginEnd = cms.bool( False ),
MinNumberOfHits = cms.int32( 5 ),
MinNumberOfHitsHighEta = cms.int32( 5 ),
HighEtaSwitch = cms.double( 5.0 ),
RejectTracks = cms.bool( True ),
BreakTrajWith2ConsecutiveMissing = cms.bool( True ),
NoInvalidHitsBeginEnd = cms.bool( True ),
LogPixelProbabilityCut = cms.double( -16.0 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPGsfTrajectoryFitter = cms.ESProducer( "GsfTrajectoryFitterESProducer",
Merger = cms.string( "hltESPCloseComponentsMerger5D" ),
ComponentName = cms.string( "hltESPGsfTrajectoryFitter" ),
MaterialEffectsUpdator = cms.string( "hltESPElectronMaterialEffects" ),
GeometricalPropagator = cms.string( "hltESPAnalyticalPropagator" ),
RecoGeometry = cms.string( "hltESPGlobalDetLayerGeometry" )
)
process.hltESPGsfTrajectorySmoother = cms.ESProducer( "GsfTrajectorySmootherESProducer",
Merger = cms.string( "hltESPCloseComponentsMerger5D" ),
ComponentName = cms.string( "hltESPGsfTrajectorySmoother" ),
MaterialEffectsUpdator = cms.string( "hltESPElectronMaterialEffects" ),
ErrorRescaling = cms.double( 100.0 ),
GeometricalPropagator = cms.string( "hltESPBwdAnalyticalPropagator" ),
RecoGeometry = cms.string( "hltESPGlobalDetLayerGeometry" )
)
process.hltESPHighPtTripletStepChi2ChargeMeasurementEstimator30 = cms.ESProducer( "Chi2ChargeMeasurementEstimatorESProducer",
MaxChi2 = cms.double( 30.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 0.5 ),
MaxSagitta = cms.double( 2.0 ),
MinimalTolerance = cms.double( 0.5 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPHighPtTripletStepChi2ChargeMeasurementEstimator30" ),
pTChargeCutThreshold = cms.double( 15.0 ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
appendToDataLabel = cms.string( "" )
)
process.hltESPInitialStepChi2ChargeMeasurementEstimator30 = cms.ESProducer( "Chi2ChargeMeasurementEstimatorESProducer",
MaxChi2 = cms.double( 30.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 0.5 ),
MaxSagitta = cms.double( 2.0 ),
MinimalTolerance = cms.double( 0.5 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPInitialStepChi2ChargeMeasurementEstimator30" ),
pTChargeCutThreshold = cms.double( 15.0 ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
appendToDataLabel = cms.string( "" )
)
process.hltESPInitialStepChi2MeasurementEstimator36 = cms.ESProducer( "Chi2MeasurementEstimatorESProducer",
MaxChi2 = cms.double( 36.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 100.0 ),
MaxSagitta = cms.double( -1.0 ),
MinimalTolerance = cms.double( 10.0 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPInitialStepChi2MeasurementEstimator36" ),
appendToDataLabel = cms.string( "" )
)
process.hltESPKFFittingSmoother = cms.ESProducer( "KFFittingSmootherESProducer",
ComponentName = cms.string( "hltESPKFFittingSmoother" ),
Fitter = cms.string( "hltESPKFTrajectoryFitter" ),
Smoother = cms.string( "hltESPKFTrajectorySmoother" ),
EstimateCut = cms.double( -1.0 ),
MaxFractionOutliers = cms.double( 0.3 ),
MaxNumberOfOutliers = cms.int32( 3 ),
MinDof = cms.int32( 2 ),
NoOutliersBeginEnd = cms.bool( False ),
MinNumberOfHits = cms.int32( 5 ),
MinNumberOfHitsHighEta = cms.int32( 5 ),
HighEtaSwitch = cms.double( 5.0 ),
RejectTracks = cms.bool( True ),
BreakTrajWith2ConsecutiveMissing = cms.bool( False ),
NoInvalidHitsBeginEnd = cms.bool( False ),
LogPixelProbabilityCut = cms.double( -16.0 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPKFFittingSmootherForL2Muon = cms.ESProducer( "KFFittingSmootherESProducer",
ComponentName = cms.string( "hltESPKFFittingSmootherForL2Muon" ),
Fitter = cms.string( "hltESPKFTrajectoryFitterForL2Muon" ),
Smoother = cms.string( "hltESPKFTrajectorySmootherForL2Muon" ),
EstimateCut = cms.double( -1.0 ),
MaxFractionOutliers = cms.double( 0.3 ),
MaxNumberOfOutliers = cms.int32( 3 ),
MinDof = cms.int32( 2 ),
NoOutliersBeginEnd = cms.bool( False ),
MinNumberOfHits = cms.int32( 5 ),
MinNumberOfHitsHighEta = cms.int32( 5 ),
HighEtaSwitch = cms.double( 5.0 ),
RejectTracks = cms.bool( True ),
BreakTrajWith2ConsecutiveMissing = cms.bool( False ),
NoInvalidHitsBeginEnd = cms.bool( False ),
LogPixelProbabilityCut = cms.double( -16.0 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPKFFittingSmootherForLoopers = cms.ESProducer( "KFFittingSmootherESProducer",
ComponentName = cms.string( "hltESPKFFittingSmootherForLoopers" ),
Fitter = cms.string( "hltESPKFTrajectoryFitterForLoopers" ),
Smoother = cms.string( "hltESPKFTrajectorySmootherForLoopers" ),
EstimateCut = cms.double( 20.0 ),
MaxFractionOutliers = cms.double( 0.3 ),
MaxNumberOfOutliers = cms.int32( 3 ),
MinDof = cms.int32( 2 ),
NoOutliersBeginEnd = cms.bool( False ),
MinNumberOfHits = cms.int32( 3 ),
MinNumberOfHitsHighEta = cms.int32( 5 ),
HighEtaSwitch = cms.double( 5.0 ),
RejectTracks = cms.bool( True ),
BreakTrajWith2ConsecutiveMissing = cms.bool( True ),
NoInvalidHitsBeginEnd = cms.bool( True ),
LogPixelProbabilityCut = cms.double( -14.0 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPKFFittingSmootherWithOutliersRejectionAndRK = cms.ESProducer( "KFFittingSmootherESProducer",
ComponentName = cms.string( "hltESPKFFittingSmootherWithOutliersRejectionAndRK" ),
Fitter = cms.string( "hltESPRKTrajectoryFitter" ),
Smoother = cms.string( "hltESPRKTrajectorySmoother" ),
EstimateCut = cms.double( 20.0 ),
MaxFractionOutliers = cms.double( 0.3 ),
MaxNumberOfOutliers = cms.int32( 3 ),
MinDof = cms.int32( 2 ),
NoOutliersBeginEnd = cms.bool( False ),
MinNumberOfHits = cms.int32( 3 ),
MinNumberOfHitsHighEta = cms.int32( 5 ),
HighEtaSwitch = cms.double( 5.0 ),
RejectTracks = cms.bool( True ),
BreakTrajWith2ConsecutiveMissing = cms.bool( True ),
NoInvalidHitsBeginEnd = cms.bool( True ),
LogPixelProbabilityCut = cms.double( -14.0 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPKFTrajectoryFitter = cms.ESProducer( "KFTrajectoryFitterESProducer",
ComponentName = cms.string( "hltESPKFTrajectoryFitter" ),
Propagator = cms.string( "PropagatorWithMaterialParabolicMf" ),
Updator = cms.string( "hltESPKFUpdator" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
RecoGeometry = cms.string( "hltESPDummyDetLayerGeometry" ),
minHits = cms.int32( 3 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPKFTrajectoryFitterForL2Muon = cms.ESProducer( "KFTrajectoryFitterESProducer",
ComponentName = cms.string( "hltESPKFTrajectoryFitterForL2Muon" ),
Propagator = cms.string( "hltESPFastSteppingHelixPropagatorAny" ),
Updator = cms.string( "hltESPKFUpdator" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
RecoGeometry = cms.string( "hltESPDummyDetLayerGeometry" ),
minHits = cms.int32( 3 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPKFTrajectoryFitterForLoopers = cms.ESProducer( "KFTrajectoryFitterESProducer",
ComponentName = cms.string( "hltESPKFTrajectoryFitterForLoopers" ),
Propagator = cms.string( "PropagatorWithMaterialForLoopers" ),
Updator = cms.string( "hltESPKFUpdator" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
RecoGeometry = cms.string( "hltESPGlobalDetLayerGeometry" ),
minHits = cms.int32( 3 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPKFTrajectorySmoother = cms.ESProducer( "KFTrajectorySmootherESProducer",
ComponentName = cms.string( "hltESPKFTrajectorySmoother" ),
Propagator = cms.string( "PropagatorWithMaterialParabolicMf" ),
Updator = cms.string( "hltESPKFUpdator" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
RecoGeometry = cms.string( "hltESPDummyDetLayerGeometry" ),
errorRescaling = cms.double( 100.0 ),
minHits = cms.int32( 3 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPKFTrajectorySmootherForL2Muon = cms.ESProducer( "KFTrajectorySmootherESProducer",
ComponentName = cms.string( "hltESPKFTrajectorySmootherForL2Muon" ),
Propagator = cms.string( "hltESPFastSteppingHelixPropagatorOpposite" ),
Updator = cms.string( "hltESPKFUpdator" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
RecoGeometry = cms.string( "hltESPDummyDetLayerGeometry" ),
errorRescaling = cms.double( 100.0 ),
minHits = cms.int32( 3 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPKFTrajectorySmootherForLoopers = cms.ESProducer( "KFTrajectorySmootherESProducer",
ComponentName = cms.string( "hltESPKFTrajectorySmootherForLoopers" ),
Propagator = cms.string( "PropagatorWithMaterialForLoopers" ),
Updator = cms.string( "hltESPKFUpdator" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
RecoGeometry = cms.string( "hltESPGlobalDetLayerGeometry" ),
errorRescaling = cms.double( 10.0 ),
minHits = cms.int32( 3 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPKFTrajectorySmootherForMuonTrackLoader = cms.ESProducer( "KFTrajectorySmootherESProducer",
ComponentName = cms.string( "hltESPKFTrajectorySmootherForMuonTrackLoader" ),
Propagator = cms.string( "hltESPSmartPropagatorAnyOpposite" ),
Updator = cms.string( "hltESPKFUpdator" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
RecoGeometry = cms.string( "hltESPDummyDetLayerGeometry" ),
errorRescaling = cms.double( 10.0 ),
minHits = cms.int32( 3 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPKFUpdator = cms.ESProducer( "KFUpdatorESProducer",
ComponentName = cms.string( "hltESPKFUpdator" )
)
process.hltESPKullbackLeiblerDistance5D = cms.ESProducer( "DistanceBetweenComponentsESProducer5D",
ComponentName = cms.string( "hltESPKullbackLeiblerDistance5D" ),
DistanceMeasure = cms.string( "KullbackLeibler" )
)
process.hltESPL3MuKFTrajectoryFitter = cms.ESProducer( "KFTrajectoryFitterESProducer",
ComponentName = cms.string( "hltESPL3MuKFTrajectoryFitter" ),
Propagator = cms.string( "hltESPSmartPropagatorAny" ),
Updator = cms.string( "hltESPKFUpdator" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
RecoGeometry = cms.string( "hltESPDummyDetLayerGeometry" ),
minHits = cms.int32( 3 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPLowPtQuadStepChi2ChargeMeasurementEstimator9 = cms.ESProducer( "Chi2ChargeMeasurementEstimatorESProducer",
MaxChi2 = cms.double( 9.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 0.5 ),
MaxSagitta = cms.double( 2.0 ),
MinimalTolerance = cms.double( 0.5 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPLowPtQuadStepChi2ChargeMeasurementEstimator9" ),
pTChargeCutThreshold = cms.double( -1.0 ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutTight" ) ),
appendToDataLabel = cms.string( "" )
)
process.hltESPLowPtQuadStepTrajectoryCleanerBySharedHits = cms.ESProducer( "TrajectoryCleanerESProducer",
ComponentName = cms.string( "hltESPLowPtQuadStepTrajectoryCleanerBySharedHits" ),
ComponentType = cms.string( "TrajectoryCleanerBySharedHits" ),
fractionShared = cms.double( 0.16 ),
ValidHitBonus = cms.double( 5.0 ),
MissingHitPenalty = cms.double( 20.0 ),
allowSharedFirstHit = cms.bool( True )
)
process.hltESPLowPtStepTrajectoryCleanerBySharedHits = cms.ESProducer( "TrajectoryCleanerESProducer",
ComponentName = cms.string( "hltESPLowPtStepTrajectoryCleanerBySharedHits" ),
ComponentType = cms.string( "TrajectoryCleanerBySharedHits" ),
fractionShared = cms.double( 0.16 ),
ValidHitBonus = cms.double( 5.0 ),
MissingHitPenalty = cms.double( 20.0 ),
allowSharedFirstHit = cms.bool( True )
)
process.hltESPLowPtTripletStepChi2ChargeMeasurementEstimator9 = cms.ESProducer( "Chi2ChargeMeasurementEstimatorESProducer",
MaxChi2 = cms.double( 9.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 0.5 ),
MaxSagitta = cms.double( 2.0 ),
MinimalTolerance = cms.double( 0.5 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPLowPtTripletStepChi2ChargeMeasurementEstimator9" ),
pTChargeCutThreshold = cms.double( -1.0 ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutTight" ) ),
appendToDataLabel = cms.string( "" )
)
process.hltESPLowPtTripletStepTrajectoryCleanerBySharedHits = cms.ESProducer( "TrajectoryCleanerESProducer",
ComponentName = cms.string( "hltESPLowPtTripletStepTrajectoryCleanerBySharedHits" ),
ComponentType = cms.string( "TrajectoryCleanerBySharedHits" ),
fractionShared = cms.double( 0.16 ),
ValidHitBonus = cms.double( 5.0 ),
MissingHitPenalty = cms.double( 20.0 ),
allowSharedFirstHit = cms.bool( True )
)
process.hltESPMeasurementTracker = cms.ESProducer( "MeasurementTrackerESProducer",
ComponentName = cms.string( "hltESPMeasurementTracker" ),
PixelCPE = cms.string( "hltESPPixelCPEGeneric" ),
StripCPE = cms.string( "hltESPStripCPEfromTrackAngle" ),
HitMatcher = cms.string( "StandardMatcher" ),
Phase2StripCPE = cms.string( "" ),
SiStripQualityLabel = cms.string( "" ),
UseStripModuleQualityDB = cms.bool( True ),
DebugStripModuleQualityDB = cms.untracked.bool( False ),
UseStripAPVFiberQualityDB = cms.bool( True ),
DebugStripAPVFiberQualityDB = cms.untracked.bool( False ),
MaskBadAPVFibers = cms.bool( True ),
UseStripStripQualityDB = cms.bool( True ),
DebugStripStripQualityDB = cms.untracked.bool( False ),
badStripCuts = cms.PSet(
TOB = cms.PSet(
maxBad = cms.uint32( 4 ),
maxConsecutiveBad = cms.uint32( 2 )
),
TIB = cms.PSet(
maxBad = cms.uint32( 4 ),
maxConsecutiveBad = cms.uint32( 2 )
),
TID = cms.PSet(
maxBad = cms.uint32( 4 ),
maxConsecutiveBad = cms.uint32( 2 )
),
TEC = cms.PSet(
maxBad = cms.uint32( 4 ),
maxConsecutiveBad = cms.uint32( 2 )
)
),
UsePixelModuleQualityDB = cms.bool( True ),
DebugPixelModuleQualityDB = cms.untracked.bool( False ),
UsePixelROCQualityDB = cms.bool( True ),
DebugPixelROCQualityDB = cms.untracked.bool( False ),
appendToDataLabel = cms.string( "" )
)
process.hltESPMixedStepClusterShapeHitFilter = cms.ESProducer( "ClusterShapeHitFilterESProducer",
ComponentName = cms.string( "hltESPMixedStepClusterShapeHitFilter" ),
PixelShapeFile = cms.string( "RecoPixelVertexing/PixelLowPtUtilities/data/pixelShapePhase1_noL1.par" ),
PixelShapeFileL1 = cms.string( "RecoPixelVertexing/PixelLowPtUtilities/data/pixelShapePhase1_loose.par" ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutTight" ) )
)
process.hltESPMixedStepTrajectoryCleanerBySharedHits = cms.ESProducer( "TrajectoryCleanerESProducer",
ComponentName = cms.string( "hltESPMixedStepTrajectoryCleanerBySharedHits" ),
ComponentType = cms.string( "TrajectoryCleanerBySharedHits" ),
fractionShared = cms.double( 0.11 ),
ValidHitBonus = cms.double( 5.0 ),
MissingHitPenalty = cms.double( 20.0 ),
allowSharedFirstHit = cms.bool( True )
)
process.hltESPMixedTripletStepChi2ChargeMeasurementEstimator16 = cms.ESProducer( "Chi2ChargeMeasurementEstimatorESProducer",
MaxChi2 = cms.double( 16.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 0.5 ),
MaxSagitta = cms.double( 2.0 ),
MinimalTolerance = cms.double( 0.5 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPMixedTripletStepChi2ChargeMeasurementEstimator16" ),
pTChargeCutThreshold = cms.double( -1.0 ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutTight" ) ),
appendToDataLabel = cms.string( "" )
)
process.hltESPMixedTripletStepTrajectoryCleanerBySharedHits = cms.ESProducer( "TrajectoryCleanerESProducer",
ComponentName = cms.string( "hltESPMixedTripletStepTrajectoryCleanerBySharedHits" ),
ComponentType = cms.string( "TrajectoryCleanerBySharedHits" ),
fractionShared = cms.double( 0.11 ),
ValidHitBonus = cms.double( 5.0 ),
MissingHitPenalty = cms.double( 20.0 ),
allowSharedFirstHit = cms.bool( True )
)
process.hltESPMuonDetLayerGeometryESProducer = cms.ESProducer( "MuonDetLayerGeometryESProducer" )
process.hltESPMuonTransientTrackingRecHitBuilder = cms.ESProducer( "MuonTransientTrackingRecHitBuilderESProducer",
ComponentName = cms.string( "hltESPMuonTransientTrackingRecHitBuilder" )
)
process.hltESPPixelCPEGeneric = cms.ESProducer( "PixelCPEGenericESProducer",
LoadTemplatesFromDB = cms.bool( True ),
Alpha2Order = cms.bool( True ),
ClusterProbComputationFlag = cms.int32( 0 ),
useLAWidthFromDB = cms.bool( False ),
lAOffset = cms.double( 0.0 ),
lAWidthBPix = cms.double( 0.0 ),
lAWidthFPix = cms.double( 0.0 ),
doLorentzFromAlignment = cms.bool( False ),
useLAFromDB = cms.bool( True ),
xerr_barrel_l1 = cms.vdouble( 0.00115, 0.0012, 8.8E-4 ),
yerr_barrel_l1 = cms.vdouble( 0.00375, 0.0023, 0.0025, 0.0025, 0.0023, 0.0023, 0.0021, 0.0021, 0.0024 ),
xerr_barrel_ln = cms.vdouble( 0.00115, 0.0012, 8.8E-4 ),
yerr_barrel_ln = cms.vdouble( 0.00375, 0.0023, 0.0025, 0.0025, 0.0023, 0.0023, 0.0021, 0.0021, 0.0024 ),
xerr_endcap = cms.vdouble( 0.002, 0.002 ),
yerr_endcap = cms.vdouble( 0.0021 ),
xerr_barrel_l1_def = cms.double( 0.0103 ),
yerr_barrel_l1_def = cms.double( 0.0021 ),
xerr_barrel_ln_def = cms.double( 0.0103 ),
yerr_barrel_ln_def = cms.double( 0.0021 ),
xerr_endcap_def = cms.double( 0.002 ),
yerr_endcap_def = cms.double( 7.5E-4 ),
eff_charge_cut_highX = cms.double( 1.0 ),
eff_charge_cut_highY = cms.double( 1.0 ),
eff_charge_cut_lowX = cms.double( 0.0 ),
eff_charge_cut_lowY = cms.double( 0.0 ),
size_cutX = cms.double( 3.0 ),
size_cutY = cms.double( 3.0 ),
EdgeClusterErrorX = cms.double( 50.0 ),
EdgeClusterErrorY = cms.double( 85.0 ),
inflate_errors = cms.bool( False ),
inflate_all_errors_no_trk_angle = cms.bool( False ),
NoTemplateErrorsWhenNoTrkAngles = cms.bool( False ),
UseErrorsFromTemplates = cms.bool( True ),
TruncatePixelCharge = cms.bool( True ),
IrradiationBiasCorrection = cms.bool( True ),
DoCosmics = cms.bool( False ),
Upgrade = cms.bool( False ),
SmallPitch = cms.bool( False ),
ComponentName = cms.string( "hltESPPixelCPEGeneric" ),
MagneticFieldRecord = cms.ESInputTag( "","" ),
appendToDataLabel = cms.string( "" )
)
process.hltESPPixelCPETemplateReco = cms.ESProducer( "PixelCPETemplateRecoESProducer",
LoadTemplatesFromDB = cms.bool( True ),
Alpha2Order = cms.bool( True ),
ClusterProbComputationFlag = cms.int32( 0 ),
useLAWidthFromDB = cms.bool( True ),
lAOffset = cms.double( 0.0 ),
lAWidthBPix = cms.double( 0.0 ),
lAWidthFPix = cms.double( 0.0 ),
doLorentzFromAlignment = cms.bool( False ),
useLAFromDB = cms.bool( True ),
barrelTemplateID = cms.int32( 0 ),
forwardTemplateID = cms.int32( 0 ),
directoryWithTemplates = cms.int32( 0 ),
speed = cms.int32( -2 ),
UseClusterSplitter = cms.bool( False ),
ComponentName = cms.string( "hltESPPixelCPETemplateReco" ),
appendToDataLabel = cms.string( "" )
)
process.hltESPPixelLessStepChi2ChargeMeasurementEstimator16 = cms.ESProducer( "Chi2ChargeMeasurementEstimatorESProducer",
MaxChi2 = cms.double( 16.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 0.5 ),
MaxSagitta = cms.double( 2.0 ),
MinimalTolerance = cms.double( 0.5 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPPixelLessStepChi2ChargeMeasurementEstimator16" ),
pTChargeCutThreshold = cms.double( -1.0 ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutTight" ) ),
appendToDataLabel = cms.string( "" )
)
process.hltESPPixelLessStepClusterShapeHitFilter = cms.ESProducer( "ClusterShapeHitFilterESProducer",
ComponentName = cms.string( "hltESPPixelLessStepClusterShapeHitFilter" ),
PixelShapeFile = cms.string( "RecoPixelVertexing/PixelLowPtUtilities/data/pixelShapePhase1_noL1.par" ),
PixelShapeFileL1 = cms.string( "RecoPixelVertexing/PixelLowPtUtilities/data/pixelShapePhase1_loose.par" ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutTight" ) )
)
process.hltESPPixelLessStepTrajectoryCleanerBySharedHits = cms.ESProducer( "TrajectoryCleanerESProducer",
ComponentName = cms.string( "hltESPPixelLessStepTrajectoryCleanerBySharedHits" ),
ComponentType = cms.string( "TrajectoryCleanerBySharedHits" ),
fractionShared = cms.double( 0.11 ),
ValidHitBonus = cms.double( 5.0 ),
MissingHitPenalty = cms.double( 20.0 ),
allowSharedFirstHit = cms.bool( True )
)
process.hltESPPixelPairStepChi2ChargeMeasurementEstimator9 = cms.ESProducer( "Chi2ChargeMeasurementEstimatorESProducer",
MaxChi2 = cms.double( 9.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 0.5 ),
MaxSagitta = cms.double( 2.0 ),
MinimalTolerance = cms.double( 0.5 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1.0E12 ),
ComponentName = cms.string( "hltESPPixelPairStepChi2ChargeMeasurementEstimator9" ),
pTChargeCutThreshold = cms.double( 15.0 ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
appendToDataLabel = cms.string( "" )
)
process.hltESPPixelPairStepChi2MeasurementEstimator25 = cms.ESProducer( "Chi2MeasurementEstimatorESProducer",
MaxChi2 = cms.double( 25.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 100.0 ),
MaxSagitta = cms.double( -1.0 ),
MinimalTolerance = cms.double( 10.0 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPPixelPairStepChi2MeasurementEstimator25" ),
appendToDataLabel = cms.string( "" )
)
process.hltESPPixelPairTrajectoryCleanerBySharedHits = cms.ESProducer( "TrajectoryCleanerESProducer",
ComponentName = cms.string( "hltESPPixelPairTrajectoryCleanerBySharedHits" ),
ComponentType = cms.string( "TrajectoryCleanerBySharedHits" ),
fractionShared = cms.double( 0.19 ),
ValidHitBonus = cms.double( 5.0 ),
MissingHitPenalty = cms.double( 20.0 ),
allowSharedFirstHit = cms.bool( True )
)
process.hltESPRKTrajectoryFitter = cms.ESProducer( "KFTrajectoryFitterESProducer",
ComponentName = cms.string( "hltESPRKTrajectoryFitter" ),
Propagator = cms.string( "hltESPRungeKuttaTrackerPropagator" ),
Updator = cms.string( "hltESPKFUpdator" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
RecoGeometry = cms.string( "hltESPGlobalDetLayerGeometry" ),
minHits = cms.int32( 3 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPRKTrajectorySmoother = cms.ESProducer( "KFTrajectorySmootherESProducer",
ComponentName = cms.string( "hltESPRKTrajectorySmoother" ),
Propagator = cms.string( "hltESPRungeKuttaTrackerPropagator" ),
Updator = cms.string( "hltESPKFUpdator" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
RecoGeometry = cms.string( "hltESPGlobalDetLayerGeometry" ),
errorRescaling = cms.double( 100.0 ),
minHits = cms.int32( 3 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPRungeKuttaTrackerPropagator = cms.ESProducer( "PropagatorWithMaterialESProducer",
SimpleMagneticField = cms.string( "" ),
MaxDPhi = cms.double( 1.6 ),
ComponentName = cms.string( "hltESPRungeKuttaTrackerPropagator" ),
Mass = cms.double( 0.105 ),
PropagationDirection = cms.string( "alongMomentum" ),
useRungeKutta = cms.bool( True ),
ptMin = cms.double( -1.0 )
)
process.hltESPSmartPropagator = cms.ESProducer( "SmartPropagatorESProducer",
ComponentName = cms.string( "hltESPSmartPropagator" ),
TrackerPropagator = cms.string( "PropagatorWithMaterial" ),
MuonPropagator = cms.string( "hltESPSteppingHelixPropagatorAlong" ),
PropagationDirection = cms.string( "alongMomentum" ),
Epsilon = cms.double( 5.0 )
)
process.hltESPSmartPropagatorAny = cms.ESProducer( "SmartPropagatorESProducer",
ComponentName = cms.string( "hltESPSmartPropagatorAny" ),
TrackerPropagator = cms.string( "PropagatorWithMaterial" ),
MuonPropagator = cms.string( "SteppingHelixPropagatorAny" ),
PropagationDirection = cms.string( "alongMomentum" ),
Epsilon = cms.double( 5.0 )
)
process.hltESPSmartPropagatorAnyOpposite = cms.ESProducer( "SmartPropagatorESProducer",
ComponentName = cms.string( "hltESPSmartPropagatorAnyOpposite" ),
TrackerPropagator = cms.string( "PropagatorWithMaterialOpposite" ),
MuonPropagator = cms.string( "SteppingHelixPropagatorAny" ),
PropagationDirection = cms.string( "oppositeToMomentum" ),
Epsilon = cms.double( 5.0 )
)
process.hltESPSoftLeptonByDistance = cms.ESProducer( "LeptonTaggerByDistanceESProducer",
distance = cms.double( 0.5 )
)
process.hltESPSteppingHelixPropagatorAlong = cms.ESProducer( "SteppingHelixPropagatorESProducer",
ComponentName = cms.string( "hltESPSteppingHelixPropagatorAlong" ),
NoErrorPropagation = cms.bool( False ),
PropagationDirection = cms.string( "alongMomentum" ),
useTuningForL2Speed = cms.bool( False ),
useIsYokeFlag = cms.bool( True ),
endcapShiftInZNeg = cms.double( 0.0 ),
SetVBFPointer = cms.bool( False ),
AssumeNoMaterial = cms.bool( False ),
endcapShiftInZPos = cms.double( 0.0 ),
useInTeslaFromMagField = cms.bool( False ),
VBFName = cms.string( "VolumeBasedMagneticField" ),
useEndcapShiftsInZ = cms.bool( False ),
sendLogWarning = cms.bool( False ),
useMatVolumes = cms.bool( True ),
debug = cms.bool( False ),
ApplyRadX0Correction = cms.bool( True ),
useMagVolumes = cms.bool( True ),
returnTangentPlane = cms.bool( True )
)
process.hltESPSteppingHelixPropagatorOpposite = cms.ESProducer( "SteppingHelixPropagatorESProducer",
ComponentName = cms.string( "hltESPSteppingHelixPropagatorOpposite" ),
NoErrorPropagation = cms.bool( False ),
PropagationDirection = cms.string( "oppositeToMomentum" ),
useTuningForL2Speed = cms.bool( False ),
useIsYokeFlag = cms.bool( True ),
endcapShiftInZNeg = cms.double( 0.0 ),
SetVBFPointer = cms.bool( False ),
AssumeNoMaterial = cms.bool( False ),
endcapShiftInZPos = cms.double( 0.0 ),
useInTeslaFromMagField = cms.bool( False ),
VBFName = cms.string( "VolumeBasedMagneticField" ),
useEndcapShiftsInZ = cms.bool( False ),
sendLogWarning = cms.bool( False ),
useMatVolumes = cms.bool( True ),
debug = cms.bool( False ),
ApplyRadX0Correction = cms.bool( True ),
useMagVolumes = cms.bool( True ),
returnTangentPlane = cms.bool( True )
)
process.hltESPStripCPEfromTrackAngle = cms.ESProducer( "StripCPEESProducer",
ComponentName = cms.string( "hltESPStripCPEfromTrackAngle" ),
ComponentType = cms.string( "StripCPEfromTrackAngle" ),
parameters = cms.PSet(
mTIB_P1 = cms.double( 0.202 ),
maxChgOneMIP = cms.double( 6000.0 ),
mTEC_P0 = cms.double( -1.885 ),
mTOB_P1 = cms.double( 0.253 ),
mTEC_P1 = cms.double( 0.471 ),
mLC_P2 = cms.double( 0.3 ),
mLC_P1 = cms.double( 0.618 ),
mTOB_P0 = cms.double( -1.026 ),
mLC_P0 = cms.double( -0.326 ),
useLegacyError = cms.bool( False ),
mTIB_P0 = cms.double( -0.742 ),
mTID_P1 = cms.double( 0.433 ),
mTID_P0 = cms.double( -1.427 )
)
)
process.hltESPTTRHBWithTrackAngle = cms.ESProducer( "TkTransientTrackingRecHitBuilderESProducer",
StripCPE = cms.string( "hltESPStripCPEfromTrackAngle" ),
ComponentName = cms.string( "hltESPTTRHBWithTrackAngle" ),
PixelCPE = cms.string( "hltESPPixelCPEGeneric" ),
Matcher = cms.string( "StandardMatcher" ),
ComputeCoarseLocalPositionFromDisk = cms.bool( False )
)
process.hltESPTTRHBuilderAngleAndTemplate = cms.ESProducer( "TkTransientTrackingRecHitBuilderESProducer",
StripCPE = cms.string( "hltESPStripCPEfromTrackAngle" ),
ComponentName = cms.string( "hltESPTTRHBuilderAngleAndTemplate" ),
PixelCPE = cms.string( "hltESPPixelCPETemplateReco" ),
Matcher = cms.string( "StandardMatcher" ),
ComputeCoarseLocalPositionFromDisk = cms.bool( False )
)
process.hltESPTTRHBuilderPixelOnly = cms.ESProducer( "TkTransientTrackingRecHitBuilderESProducer",
StripCPE = cms.string( "Fake" ),
ComponentName = cms.string( "hltESPTTRHBuilderPixelOnly" ),
PixelCPE = cms.string( "hltESPPixelCPEGeneric" ),
Matcher = cms.string( "StandardMatcher" ),
ComputeCoarseLocalPositionFromDisk = cms.bool( False )
)
process.hltESPTTRHBuilderWithoutAngle4PixelTriplets = cms.ESProducer( "TkTransientTrackingRecHitBuilderESProducer",
StripCPE = cms.string( "Fake" ),
ComponentName = cms.string( "hltESPTTRHBuilderWithoutAngle4PixelTriplets" ),
PixelCPE = cms.string( "hltESPPixelCPEGeneric" ),
Matcher = cms.string( "StandardMatcher" ),
ComputeCoarseLocalPositionFromDisk = cms.bool( False )
)
process.hltESPTobTecStepChi2ChargeMeasurementEstimator16 = cms.ESProducer( "Chi2ChargeMeasurementEstimatorESProducer",
MaxChi2 = cms.double( 16.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 0.5 ),
MaxSagitta = cms.double( 2.0 ),
MinimalTolerance = cms.double( 0.5 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPTobTecStepChi2ChargeMeasurementEstimator16" ),
pTChargeCutThreshold = cms.double( -1.0 ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutTight" ) ),
appendToDataLabel = cms.string( "" )
)
process.hltESPTobTecStepClusterShapeHitFilter = cms.ESProducer( "ClusterShapeHitFilterESProducer",
ComponentName = cms.string( "hltESPTobTecStepClusterShapeHitFilter" ),
PixelShapeFile = cms.string( "RecoPixelVertexing/PixelLowPtUtilities/data/pixelShapePhase1_noL1.par" ),
PixelShapeFileL1 = cms.string( "RecoPixelVertexing/PixelLowPtUtilities/data/pixelShapePhase1_loose.par" ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutTight" ) )
)
process.hltESPTobTecStepFittingSmoother = cms.ESProducer( "KFFittingSmootherESProducer",
ComponentName = cms.string( "hltESPTobTecStepFitterSmoother" ),
Fitter = cms.string( "hltESPTobTecStepRKFitter" ),
Smoother = cms.string( "hltESPTobTecStepRKSmoother" ),
EstimateCut = cms.double( 30.0 ),
MaxFractionOutliers = cms.double( 0.3 ),
MaxNumberOfOutliers = cms.int32( 3 ),
MinDof = cms.int32( 2 ),
NoOutliersBeginEnd = cms.bool( False ),
MinNumberOfHits = cms.int32( 7 ),
MinNumberOfHitsHighEta = cms.int32( 5 ),
HighEtaSwitch = cms.double( 5.0 ),
RejectTracks = cms.bool( True ),
BreakTrajWith2ConsecutiveMissing = cms.bool( False ),
NoInvalidHitsBeginEnd = cms.bool( False ),
LogPixelProbabilityCut = cms.double( -16.0 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPTobTecStepFittingSmootherForLoopers = cms.ESProducer( "KFFittingSmootherESProducer",
ComponentName = cms.string( "hltESPTobTecStepFitterSmootherForLoopers" ),
Fitter = cms.string( "hltESPTobTecStepRKFitterForLoopers" ),
Smoother = cms.string( "hltESPTobTecStepRKSmootherForLoopers" ),
EstimateCut = cms.double( 30.0 ),
MaxFractionOutliers = cms.double( 0.3 ),
MaxNumberOfOutliers = cms.int32( 3 ),
MinDof = cms.int32( 2 ),
NoOutliersBeginEnd = cms.bool( False ),
MinNumberOfHits = cms.int32( 7 ),
MinNumberOfHitsHighEta = cms.int32( 5 ),
HighEtaSwitch = cms.double( 5.0 ),
RejectTracks = cms.bool( True ),
BreakTrajWith2ConsecutiveMissing = cms.bool( False ),
NoInvalidHitsBeginEnd = cms.bool( False ),
LogPixelProbabilityCut = cms.double( -16.0 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPTobTecStepFlexibleKFFittingSmoother = cms.ESProducer( "FlexibleKFFittingSmootherESProducer",
ComponentName = cms.string( "hltESPTobTecStepFlexibleKFFittingSmoother" ),
standardFitter = cms.string( "hltESPTobTecStepFitterSmoother" ),
looperFitter = cms.string( "hltESPTobTecStepFitterSmootherForLoopers" ),
appendToDataLabel = cms.string( "" )
)
process.hltESPTobTecStepRKTrajectoryFitter = cms.ESProducer( "KFTrajectoryFitterESProducer",
ComponentName = cms.string( "hltESPTobTecStepRKFitter" ),
Propagator = cms.string( "PropagatorWithMaterialParabolicMf" ),
Updator = cms.string( "hltESPKFUpdator" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
RecoGeometry = cms.string( "hltESPDummyDetLayerGeometry" ),
minHits = cms.int32( 7 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPTobTecStepRKTrajectoryFitterForLoopers = cms.ESProducer( "KFTrajectoryFitterESProducer",
ComponentName = cms.string( "hltESPTobTecStepRKFitterForLoopers" ),
Propagator = cms.string( "PropagatorWithMaterialForLoopers" ),
Updator = cms.string( "hltESPKFUpdator" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
RecoGeometry = cms.string( "hltESPDummyDetLayerGeometry" ),
minHits = cms.int32( 7 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPTobTecStepRKTrajectorySmoother = cms.ESProducer( "KFTrajectorySmootherESProducer",
ComponentName = cms.string( "hltESPTobTecStepRKSmoother" ),
Propagator = cms.string( "PropagatorWithMaterialParabolicMf" ),
Updator = cms.string( "hltESPKFUpdator" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
RecoGeometry = cms.string( "hltESPDummyDetLayerGeometry" ),
errorRescaling = cms.double( 10.0 ),
minHits = cms.int32( 7 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPTobTecStepRKTrajectorySmootherForLoopers = cms.ESProducer( "KFTrajectorySmootherESProducer",
ComponentName = cms.string( "hltESPTobTecStepRKSmootherForLoopers" ),
Propagator = cms.string( "PropagatorWithMaterialForLoopers" ),
Updator = cms.string( "hltESPKFUpdator" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
RecoGeometry = cms.string( "hltESPDummyDetLayerGeometry" ),
errorRescaling = cms.double( 10.0 ),
minHits = cms.int32( 7 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPTobTecStepTrajectoryCleanerBySharedHits = cms.ESProducer( "TrajectoryCleanerESProducer",
ComponentName = cms.string( "hltESPTobTecStepTrajectoryCleanerBySharedHits" ),
ComponentType = cms.string( "TrajectoryCleanerBySharedHits" ),
fractionShared = cms.double( 0.09 ),
ValidHitBonus = cms.double( 5.0 ),
MissingHitPenalty = cms.double( 20.0 ),
allowSharedFirstHit = cms.bool( True )
)
process.hltESPTrackAlgoPriorityOrder = cms.ESProducer( "TrackAlgoPriorityOrderESProducer",
ComponentName = cms.string( "hltESPTrackAlgoPriorityOrder" ),
algoOrder = cms.vstring( ),
appendToDataLabel = cms.string( "" )
)
process.hltESPTrackerRecoGeometryESProducer = cms.ESProducer( "TrackerRecoGeometryESProducer",
usePhase2Stacks = cms.bool( False ),
trackerGeometryLabel = cms.untracked.string( "" ),
appendToDataLabel = cms.string( "" )
)
process.hltESPTrajectoryCleanerBySharedHits = cms.ESProducer( "TrajectoryCleanerESProducer",
ComponentName = cms.string( "hltESPTrajectoryCleanerBySharedHits" ),
ComponentType = cms.string( "TrajectoryCleanerBySharedHits" ),
fractionShared = cms.double( 0.5 ),
ValidHitBonus = cms.double( 100.0 ),
MissingHitPenalty = cms.double( 0.0 ),
allowSharedFirstHit = cms.bool( False )
)
process.hltESPTrajectoryFitterRK = cms.ESProducer( "KFTrajectoryFitterESProducer",
ComponentName = cms.string( "hltESPTrajectoryFitterRK" ),
Propagator = cms.string( "hltESPRungeKuttaTrackerPropagator" ),
Updator = cms.string( "hltESPKFUpdator" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
RecoGeometry = cms.string( "hltESPDummyDetLayerGeometry" ),
minHits = cms.int32( 3 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPTrajectorySmootherRK = cms.ESProducer( "KFTrajectorySmootherESProducer",
ComponentName = cms.string( "hltESPTrajectorySmootherRK" ),
Propagator = cms.string( "hltESPRungeKuttaTrackerPropagator" ),
Updator = cms.string( "hltESPKFUpdator" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
RecoGeometry = cms.string( "hltESPDummyDetLayerGeometry" ),
errorRescaling = cms.double( 100.0 ),
minHits = cms.int32( 3 ),
appendToDataLabel = cms.string( "" )
)
process.hltPixelTracksCleanerBySharedHits = cms.ESProducer( "PixelTrackCleanerBySharedHitsESProducer",
ComponentName = cms.string( "hltPixelTracksCleanerBySharedHits" ),
useQuadrupletAlgo = cms.bool( False ),
appendToDataLabel = cms.string( "" )
)
process.hltTrackCleaner = cms.ESProducer( "TrackCleanerESProducer",
ComponentName = cms.string( "hltTrackCleaner" ),
appendToDataLabel = cms.string( "" )
)
process.hoDetIdAssociator = cms.ESProducer( "DetIdAssociatorESProducer",
ComponentName = cms.string( "HODetIdAssociator" ),
etaBinSize = cms.double( 0.087 ),
nEta = cms.int32( 30 ),
nPhi = cms.int32( 72 ),
hcalRegion = cms.int32( 2 ),
includeBadChambers = cms.bool( False ),
includeGEM = cms.bool( False ),
includeME0 = cms.bool( False )
)
process.multipleScatteringParametrisationMakerESProducer = cms.ESProducer( "MultipleScatteringParametrisationMakerESProducer",
appendToDataLabel = cms.string( "" )
)
process.muonDetIdAssociator = cms.ESProducer( "DetIdAssociatorESProducer",
ComponentName = cms.string( "MuonDetIdAssociator" ),
etaBinSize = cms.double( 0.125 ),
nEta = cms.int32( 48 ),
nPhi = cms.int32( 48 ),
hcalRegion = cms.int32( 2 ),
includeBadChambers = cms.bool( False ),
includeGEM = cms.bool( False ),
includeME0 = cms.bool( False )
)
process.muonSeededTrajectoryCleanerBySharedHits = cms.ESProducer( "TrajectoryCleanerESProducer",
ComponentName = cms.string( "muonSeededTrajectoryCleanerBySharedHits" ),
ComponentType = cms.string( "TrajectoryCleanerBySharedHits" ),
fractionShared = cms.double( 0.1 ),
ValidHitBonus = cms.double( 1000.0 ),
MissingHitPenalty = cms.double( 1.0 ),
allowSharedFirstHit = cms.bool( True )
)
process.navigationSchoolESProducer = cms.ESProducer( "NavigationSchoolESProducer",
ComponentName = cms.string( "SimpleNavigationSchool" ),
SimpleMagneticField = cms.string( "ParabolicMf" )
)
process.preshowerDetIdAssociator = cms.ESProducer( "DetIdAssociatorESProducer",
ComponentName = cms.string( "PreshowerDetIdAssociator" ),
etaBinSize = cms.double( 0.1 ),
nEta = cms.int32( 60 ),
nPhi = cms.int32( 30 ),
hcalRegion = cms.int32( 2 ),
includeBadChambers = cms.bool( False ),
includeGEM = cms.bool( False ),
includeME0 = cms.bool( False )
)
process.siPixelQualityESProducer = cms.ESProducer( "SiPixelQualityESProducer",
siPixelQualityLabel = cms.string( "" )
)
process.siPixelTemplateDBObjectESProducer = cms.ESProducer( "SiPixelTemplateDBObjectESProducer" )
process.siStripBackPlaneCorrectionDepESProducer = cms.ESProducer( "SiStripBackPlaneCorrectionDepESProducer",
LatencyRecord = cms.PSet(
label = cms.untracked.string( "" ),
record = cms.string( "SiStripLatencyRcd" )
),
BackPlaneCorrectionPeakMode = cms.PSet(
label = cms.untracked.string( "peak" ),
record = cms.string( "SiStripBackPlaneCorrectionRcd" )
),
BackPlaneCorrectionDeconvMode = cms.PSet(
label = cms.untracked.string( "deconvolution" ),
record = cms.string( "SiStripBackPlaneCorrectionRcd" )
)
)
process.siStripLorentzAngleDepESProducer = cms.ESProducer( "SiStripLorentzAngleDepESProducer",
LatencyRecord = cms.PSet(
label = cms.untracked.string( "" ),
record = cms.string( "SiStripLatencyRcd" )
),
LorentzAnglePeakMode = cms.PSet(
label = cms.untracked.string( "peak" ),
record = cms.string( "SiStripLorentzAngleRcd" )
),
LorentzAngleDeconvMode = cms.PSet(
label = cms.untracked.string( "deconvolution" ),
record = cms.string( "SiStripLorentzAngleRcd" )
)
)
process.sistripconn = cms.ESProducer( "SiStripConnectivity" )
process.trackerTopology = cms.ESProducer( "TrackerTopologyEP",
appendToDataLabel = cms.string( "" )
)
process.CUDAService = cms.Service( "CUDAService",
enabled = cms.untracked.bool( True ),
verbose = cms.untracked.bool( False ),
limits = cms.untracked.PSet(
cudaLimitDevRuntimePendingLaunchCount = cms.untracked.int32( -1 ),
cudaLimitDevRuntimeSyncDepth = cms.untracked.int32( -1 ),
cudaLimitStackSize = cms.untracked.int32( -1 ),
cudaLimitPrintfFifoSize = cms.untracked.int32( -1 ),
cudaLimitMallocHeapSize = cms.untracked.int32( -1 )
),
allocator = cms.untracked.PSet(
hostPreallocate = cms.untracked.vuint32( ),
devicePreallocate = cms.untracked.vuint32( )
)
)
process.FastTimerService = cms.Service( "FastTimerService",
printEventSummary = cms.untracked.bool( False ),
printRunSummary = cms.untracked.bool( True ),
printJobSummary = cms.untracked.bool( True ),
writeJSONSummary = cms.untracked.bool( False ),
jsonFileName = cms.untracked.string( "resources.json" ),
enableDQM = cms.untracked.bool( True ),
enableDQMbyModule = cms.untracked.bool( False ),
enableDQMbyPath = cms.untracked.bool( False ),
enableDQMbyLumiSection = cms.untracked.bool( True ),
enableDQMbyProcesses = cms.untracked.bool( True ),
enableDQMTransitions = cms.untracked.bool( False ),
dqmTimeRange = cms.untracked.double( 2000.0 ),
dqmTimeResolution = cms.untracked.double( 5.0 ),
dqmMemoryRange = cms.untracked.double( 1000000.0 ),
dqmMemoryResolution = cms.untracked.double( 5000.0 ),
dqmPathTimeRange = cms.untracked.double( 100.0 ),
dqmPathTimeResolution = cms.untracked.double( 0.5 ),
dqmPathMemoryRange = cms.untracked.double( 1000000.0 ),
dqmPathMemoryResolution = cms.untracked.double( 5000.0 ),
dqmModuleTimeRange = cms.untracked.double( 40.0 ),
dqmModuleTimeResolution = cms.untracked.double( 0.2 ),
dqmModuleMemoryRange = cms.untracked.double( 100000.0 ),
dqmModuleMemoryResolution = cms.untracked.double( 500.0 ),
dqmLumiSectionsRange = cms.untracked.uint32( 2500 ),
dqmPath = cms.untracked.string( "HLT/TimerService" ),
)
process.MessageLogger = cms.Service( "MessageLogger",
suppressWarning = cms.untracked.vstring( 'hltOnlineBeamSpot',
'hltCtf3HitL1SeededWithMaterialTracks',
'hltL3MuonsOIState',
'hltPixelTracksForHighMult',
'hltHITPixelTracksHE',
'hltHITPixelTracksHB',
'hltCtfL1SeededWithMaterialTracks',
'hltRegionalTracksForL3MuonIsolation',
'hltSiPixelClusters',
'hltActivityStartUpElectronPixelSeeds',
'hltLightPFTracks',
'hltPixelVertices3DbbPhi',
'hltL3MuonsIOHit',
'hltPixelTracks',
'hltSiPixelDigis',
'hltL3MuonsOIHit',
'hltL1SeededElectronGsfTracks',
'hltL1SeededStartUpElectronPixelSeeds',
'hltBLifetimeRegionalCtfWithMaterialTracksbbPhiL1FastJetFastPV',
'hltCtfActivityWithMaterialTracks' ),
suppressFwkInfo = cms.untracked.vstring( ),
suppressInfo = cms.untracked.vstring( ),
suppressDebug = cms.untracked.vstring( ),
debugModules = cms.untracked.vstring( ),
cerr = cms.untracked.PSet(
INFO = cms.untracked.PSet( limit = cms.untracked.int32( 0 ) ),
noTimeStamps = cms.untracked.bool( False ),
FwkReport = cms.untracked.PSet(
reportEvery = cms.untracked.int32( 1 ),
limit = cms.untracked.int32( 0 )
),
default = cms.untracked.PSet( limit = cms.untracked.int32( 10000000 ) ),
Root_NoDictionary = cms.untracked.PSet( limit = cms.untracked.int32( 0 ) ),
FwkJob = cms.untracked.PSet( limit = cms.untracked.int32( 0 ) ),
FwkSummary = cms.untracked.PSet(
reportEvery = cms.untracked.int32( 1 ),
limit = cms.untracked.int32( 10000000 )
),
threshold = cms.untracked.string( "INFO" ),
),
suppressError = cms.untracked.vstring( 'hltOnlineBeamSpot',
'hltL3MuonCandidates',
'hltL3TkTracksFromL2OIState',
'hltPFJetCtfWithMaterialTracks',
'hltL3TkTracksFromL2IOHit',
'hltL3TkTracksFromL2OIHit' )
)
process.ThroughputService = cms.Service( "ThroughputService",
eventRange = cms.untracked.uint32( 10000 ),
eventResolution = cms.untracked.uint32( 1 ),
printEventSummary = cms.untracked.bool( False ),
enableDQM = cms.untracked.bool( True ),
dqmPathByProcesses = cms.untracked.bool( False ),
dqmPath = cms.untracked.string( "HLT/Throughput" ),
timeRange = cms.untracked.double( 60000.0 ),
timeResolution = cms.untracked.double( 5.828 )
)
process.hltGetConditions = cms.EDAnalyzer( "EventSetupRecordDataGetter",
verbose = cms.untracked.bool( False ),
toGet = cms.VPSet(
)
)
process.hltGetRaw = cms.EDAnalyzer( "HLTGetRaw",
RawDataCollection = cms.InputTag( "rawDataCollector" )
)
process.hltPSetMap = cms.EDProducer( "ParameterSetBlobProducer" )
process.hltBoolFalse = cms.EDFilter( "HLTBool",
result = cms.bool( False )
)
process.statusOnGPUFilter = cms.EDFilter( "BooleanFilter",
src = cms.InputTag( "statusOnGPU" )
)
process.hltTriggerType = cms.EDFilter( "HLTTriggerTypeFilter",
SelectedTriggerType = cms.int32( 1 )
)
process.hltGtStage2Digis = cms.EDProducer( "L1TRawToDigi",
FedIds = cms.vint32( 1404 ),
Setup = cms.string( "stage2::GTSetup" ),
FWId = cms.uint32( 0 ),
DmxFWId = cms.uint32( 0 ),
FWOverride = cms.bool( False ),
TMTCheck = cms.bool( True ),
CTP7 = cms.untracked.bool( False ),
MTF7 = cms.untracked.bool( False ),
InputLabel = cms.InputTag( "rawDataCollector" ),
lenSlinkHeader = cms.untracked.int32( 8 ),
lenSlinkTrailer = cms.untracked.int32( 8 ),
lenAMCHeader = cms.untracked.int32( 8 ),
lenAMCTrailer = cms.untracked.int32( 0 ),
lenAMC13Header = cms.untracked.int32( 8 ),
lenAMC13Trailer = cms.untracked.int32( 8 ),
debug = cms.untracked.bool( False ),
MinFeds = cms.uint32( 0 )
)
process.hltGtStage2ObjectMap = cms.EDProducer( "L1TGlobalProducer",
MuonInputTag = cms.InputTag( 'hltGtStage2Digis','Muon' ),
MuonShowerInputTag = cms.InputTag( "" ),
EGammaInputTag = cms.InputTag( 'hltGtStage2Digis','EGamma' ),
TauInputTag = cms.InputTag( 'hltGtStage2Digis','Tau' ),
JetInputTag = cms.InputTag( 'hltGtStage2Digis','Jet' ),
EtSumInputTag = cms.InputTag( 'hltGtStage2Digis','EtSum' ),
ExtInputTag = cms.InputTag( "hltGtStage2Digis" ),
AlgoBlkInputTag = cms.InputTag( "hltGtStage2Digis" ),
GetPrescaleColumnFromData = cms.bool( False ),
AlgorithmTriggersUnprescaled = cms.bool( True ),
RequireMenuToMatchAlgoBlkInput = cms.bool( True ),
AlgorithmTriggersUnmasked = cms.bool( True ),
useMuonShowers = cms.bool( False ),
ProduceL1GtDaqRecord = cms.bool( True ),
ProduceL1GtObjectMapRecord = cms.bool( True ),
EmulateBxInEvent = cms.int32( 1 ),
L1DataBxInEvent = cms.int32( 5 ),
AlternativeNrBxBoardDaq = cms.uint32( 0 ),
BstLengthBytes = cms.int32( -1 ),
PrescaleSet = cms.uint32( 1 ),
Verbosity = cms.untracked.int32( 0 ),
PrintL1Menu = cms.untracked.bool( False ),
TriggerMenuLuminosity = cms.string( "startup" ),
PrescaleCSVFile = cms.string( "prescale_L1TGlobal.csv" )
)
process.hltScalersRawToDigi = cms.EDProducer( "ScalersRawToDigi",
scalersInputTag = cms.InputTag( "rawDataCollector" )
)
process.hltOnlineMetaDataDigis = cms.EDProducer( "OnlineMetaDataRawToDigi",
onlineMetaDataInputLabel = cms.InputTag( "rawDataCollector" )
)
process.hltOnlineBeamSpot = cms.EDProducer( "BeamSpotOnlineProducer",
changeToCMSCoordinates = cms.bool( False ),
maxZ = cms.double( 40.0 ),
setSigmaZ = cms.double( 0.0 ),
beamMode = cms.untracked.uint32( 11 ),
src = cms.InputTag( "hltScalersRawToDigi" ),
gtEvmLabel = cms.InputTag( "" ),
maxRadius = cms.double( 2.0 ),
useTransientRecord = cms.bool( False )
)
process.hltL1sZeroBias = cms.EDFilter( "HLTL1TSeed",
saveTags = cms.bool( True ),
L1SeedsLogicalExpression = cms.string( "L1_ZeroBias" ),
L1ObjectMapInputTag = cms.InputTag( "hltGtStage2ObjectMap" ),
L1GlobalInputTag = cms.InputTag( "hltGtStage2Digis" ),
L1MuonInputTag = cms.InputTag( 'hltGtStage2Digis','Muon' ),
L1EGammaInputTag = cms.InputTag( 'hltGtStage2Digis','EGamma' ),
L1JetInputTag = cms.InputTag( 'hltGtStage2Digis','Jet' ),
L1TauInputTag = cms.InputTag( 'hltGtStage2Digis','Tau' ),
L1EtSumInputTag = cms.InputTag( 'hltGtStage2Digis','EtSum' )
)
process.hltPreZeroBiasBeamspot = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltEcalDigisLegacy = cms.EDProducer( "EcalRawToDigi",
tccUnpacking = cms.bool( True ),
FedLabel = cms.InputTag( "listfeds" ),
srpUnpacking = cms.bool( True ),
syncCheck = cms.bool( True ),
feIdCheck = cms.bool( True ),
silentMode = cms.untracked.bool( True ),
InputLabel = cms.InputTag( "rawDataCollector" ),
orderedFedList = cms.vint32( 601, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 616, 617, 618, 619, 620, 621, 622, 623, 624, 625, 626, 627, 628, 629, 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, 654 ),
eventPut = cms.bool( True ),
numbTriggerTSamples = cms.int32( 1 ),
numbXtalTSamples = cms.int32( 10 ),
orderedDCCIdList = cms.vint32( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54 ),
FEDs = cms.vint32( 601, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 616, 617, 618, 619, 620, 621, 622, 623, 624, 625, 626, 627, 628, 629, 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, 654 ),
DoRegional = cms.bool( False ),
feUnpacking = cms.bool( True ),
forceToKeepFRData = cms.bool( False ),
headerUnpacking = cms.bool( True ),
memUnpacking = cms.bool( True )
)
process.hltEcalDetIdToBeRecovered = cms.EDProducer( "EcalDetIdToBeRecoveredProducer",
ebIntegrityChIdErrors = cms.InputTag( 'hltEcalDigis','EcalIntegrityChIdErrors' ),
ebDetIdToBeRecovered = cms.string( "ebDetId" ),
integrityTTIdErrors = cms.InputTag( 'hltEcalDigis','EcalIntegrityTTIdErrors' ),
eeIntegrityGainErrors = cms.InputTag( 'hltEcalDigis','EcalIntegrityGainErrors' ),
ebFEToBeRecovered = cms.string( "ebFE" ),
ebIntegrityGainErrors = cms.InputTag( 'hltEcalDigis','EcalIntegrityGainErrors' ),
eeDetIdToBeRecovered = cms.string( "eeDetId" ),
eeIntegrityGainSwitchErrors = cms.InputTag( 'hltEcalDigis','EcalIntegrityGainSwitchErrors' ),
eeIntegrityChIdErrors = cms.InputTag( 'hltEcalDigis','EcalIntegrityChIdErrors' ),
ebIntegrityGainSwitchErrors = cms.InputTag( 'hltEcalDigis','EcalIntegrityGainSwitchErrors' ),
ebSrFlagCollection = cms.InputTag( "hltEcalDigis" ),
eeFEToBeRecovered = cms.string( "eeFE" ),
integrityBlockSizeErrors = cms.InputTag( 'hltEcalDigis','EcalIntegrityBlockSizeErrors' ),
eeSrFlagCollection = cms.InputTag( "hltEcalDigis" )
)
process.hltHcalDigis = cms.EDProducer( "HcalRawToDigi",
HcalFirstFED = cms.untracked.int32( 700 ),
firstSample = cms.int32( 0 ),
lastSample = cms.int32( 9 ),
FilterDataQuality = cms.bool( True ),
FEDs = cms.untracked.vint32( ),
UnpackZDC = cms.untracked.bool( True ),
UnpackCalib = cms.untracked.bool( True ),
UnpackUMNio = cms.untracked.bool( True ),
UnpackTTP = cms.untracked.bool( False ),
silent = cms.untracked.bool( True ),
saveQIE10DataNSamples = cms.untracked.vint32( ),
saveQIE10DataTags = cms.untracked.vstring( ),
saveQIE11DataNSamples = cms.untracked.vint32( ),
saveQIE11DataTags = cms.untracked.vstring( ),
ComplainEmptyData = cms.untracked.bool( False ),
UnpackerMode = cms.untracked.int32( 0 ),
ExpectedOrbitMessageTime = cms.untracked.int32( -1 ),
InputLabel = cms.InputTag( "rawDataCollector" ),
ElectronicsMap = cms.string( "" )
)
process.hltHfprereco = cms.EDProducer( "HFPreReconstructor",
digiLabel = cms.InputTag( "hltHcalDigis" ),
dropZSmarkedPassed = cms.bool( True ),
tsFromDB = cms.bool( False ),
sumAllTimeSlices = cms.bool( False ),
forceSOI = cms.int32( -1 ),
soiShift = cms.int32( 0 )
)
process.hltHfreco = cms.EDProducer( "HFPhase1Reconstructor",
inputLabel = cms.InputTag( "hltHfprereco" ),
useChannelQualityFromDB = cms.bool( False ),
checkChannelQualityForDepth3and4 = cms.bool( False ),
algorithm = cms.PSet(
tfallIfNoTDC = cms.double( -101.0 ),
triseIfNoTDC = cms.double( -100.0 ),
rejectAllFailures = cms.bool( True ),
energyWeights = cms.vdouble( 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 1.0 ),
soiPhase = cms.uint32( 1 ),
timeShift = cms.double( 0.0 ),
tlimits = cms.vdouble( -1000.0, 1000.0, -1000.0, 1000.0 ),
Class = cms.string( "HFFlexibleTimeCheck" )
),
algoConfigClass = cms.string( "HFPhase1PMTParams" ),
setNoiseFlags = cms.bool( True ),
runHFStripFilter = cms.bool( False ),
S9S1stat = cms.PSet(
shortEnergyParams = cms.vdouble( 35.1773, 35.37, 35.7933, 36.4472, 37.3317, 38.4468, 39.7925, 41.3688, 43.1757, 45.2132, 47.4813, 49.98, 52.7093 ),
shortETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),
long_optimumSlope = cms.vdouble( -99999.0, 0.0164905, 0.0238698, 0.0321383, 0.041296, 0.0513428, 0.0622789, 0.0741041, 0.0868186, 0.100422, 0.135313, 0.136289, 0.0589927 ),
isS8S1 = cms.bool( False ),
longETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),
longEnergyParams = cms.vdouble( 43.5, 45.7, 48.32, 51.36, 54.82, 58.7, 63.0, 67.72, 72.86, 78.42, 84.4, 90.8, 97.62 ),
short_optimumSlope = cms.vdouble( -99999.0, 0.0164905, 0.0238698, 0.0321383, 0.041296, 0.0513428, 0.0622789, 0.0741041, 0.0868186, 0.100422, 0.135313, 0.136289, 0.0589927 ),
HcalAcceptSeverityLevel = cms.int32( 9 )
),
S8S1stat = cms.PSet(
shortEnergyParams = cms.vdouble( 40.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0 ),
shortETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),
long_optimumSlope = cms.vdouble( 0.3, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1 ),
isS8S1 = cms.bool( True ),
longETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),
longEnergyParams = cms.vdouble( 40.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0 ),
short_optimumSlope = cms.vdouble( 0.3, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1 ),
HcalAcceptSeverityLevel = cms.int32( 9 )
),
PETstat = cms.PSet(
shortEnergyParams = cms.vdouble( 35.1773, 35.37, 35.7933, 36.4472, 37.3317, 38.4468, 39.7925, 41.3688, 43.1757, 45.2132, 47.4813, 49.98, 52.7093 ),
shortETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),
long_R_29 = cms.vdouble( 0.8 ),
longETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),
longEnergyParams = cms.vdouble( 43.5, 45.7, 48.32, 51.36, 54.82, 58.7, 63.0, 67.72, 72.86, 78.42, 84.4, 90.8, 97.62 ),
short_R_29 = cms.vdouble( 0.8 ),
long_R = cms.vdouble( 0.98 ),
short_R = cms.vdouble( 0.8 ),
HcalAcceptSeverityLevel = cms.int32( 9 )
),
HFStripFilter = cms.PSet(
timeMax = cms.double( 6.0 ),
seedHitIetaMax = cms.int32( 35 ),
gap = cms.int32( 2 ),
verboseLevel = cms.untracked.int32( 10 ),
wedgeCut = cms.double( 0.05 ),
stripThreshold = cms.double( 40.0 ),
maxStripTime = cms.double( 10.0 ),
maxThreshold = cms.double( 100.0 ),
lstrips = cms.int32( 2 )
)
)
process.hltHoreco = cms.EDProducer( "HcalHitReconstructor",
correctForPhaseContainment = cms.bool( True ),
correctionPhaseNS = cms.double( 13.0 ),
digiLabel = cms.InputTag( "hltHcalDigis" ),
Subdetector = cms.string( "HO" ),
correctForTimeslew = cms.bool( True ),
dropZSmarkedPassed = cms.bool( True ),
firstSample = cms.int32( 4 ),
samplesToAdd = cms.int32( 4 ),
tsFromDB = cms.bool( True ),
recoParamsFromDB = cms.bool( True ),
useLeakCorrection = cms.bool( False ),
dataOOTCorrectionName = cms.string( "" ),
dataOOTCorrectionCategory = cms.string( "Data" ),
mcOOTCorrectionName = cms.string( "" ),
mcOOTCorrectionCategory = cms.string( "MC" ),
correctTiming = cms.bool( False ),
firstAuxTS = cms.int32( 4 ),
setNoiseFlags = cms.bool( False ),
digiTimeFromDB = cms.bool( True ),
setHSCPFlags = cms.bool( False ),
setSaturationFlags = cms.bool( False ),
setTimingTrustFlags = cms.bool( False ),
setPulseShapeFlags = cms.bool( False ),
setNegativeFlags = cms.bool( False ),
digistat = cms.PSet( ),
HFInWindowStat = cms.PSet( ),
S9S1stat = cms.PSet( ),
S8S1stat = cms.PSet( ),
PETstat = cms.PSet( ),
saturationParameters = cms.PSet( maxADCvalue = cms.int32( 127 ) ),
hfTimingTrustParameters = cms.PSet( )
)
process.hltTowerMakerForAll = cms.EDProducer( "CaloTowersCreator",
EBSumThreshold = cms.double( 0.2 ),
HF2Weight = cms.double( 1.0 ),
EBWeight = cms.double( 1.0 ),
hfInput = cms.InputTag( "hltHfreco" ),
EESumThreshold = cms.double( 0.45 ),
HOThreshold0 = cms.double( 3.5 ),
HOThresholdPlus1 = cms.double( 3.5 ),
HOThresholdMinus1 = cms.double( 3.5 ),
HOThresholdPlus2 = cms.double( 3.5 ),
HOThresholdMinus2 = cms.double( 3.5 ),
HBGrid = cms.vdouble( ),
HBThreshold1 = cms.double( 0.1 ),
HBThreshold2 = cms.double( 0.2 ),
HBThreshold = cms.double( 0.3 ),
EEWeights = cms.vdouble( ),
HF1Threshold = cms.double( 0.5 ),
HF2Weights = cms.vdouble( ),
HOWeights = cms.vdouble( ),
EEGrid = cms.vdouble( ),
HEDWeight = cms.double( 1.0 ),
EEWeight = cms.double( 1.0 ),
UseHO = cms.bool( False ),
HBWeights = cms.vdouble( ),
HESWeight = cms.double( 1.0 ),
HF1Weight = cms.double( 1.0 ),
HF2Grid = cms.vdouble( ),
HEDWeights = cms.vdouble( ),
HF1Grid = cms.vdouble( ),
EBWeights = cms.vdouble( ),
HOWeight = cms.double( 1.0E-99 ),
EBThreshold = cms.double( 0.07 ),
EEThreshold = cms.double( 0.3 ),
UseEtEBTreshold = cms.bool( False ),
UseSymEBTreshold = cms.bool( False ),
UseEtEETreshold = cms.bool( False ),
UseSymEETreshold = cms.bool( False ),
hbheInput = cms.InputTag( "hltHbhereco" ),
HcalThreshold = cms.double( -1000.0 ),
HF2Threshold = cms.double( 0.85 ),
HESThreshold1 = cms.double( 0.1 ),
HESThreshold = cms.double( 0.2 ),
HF1Weights = cms.vdouble( ),
hoInput = cms.InputTag( "hltHoreco" ),
HESGrid = cms.vdouble( ),
HESWeights = cms.vdouble( ),
HEDThreshold1 = cms.double( 0.1 ),
HEDThreshold = cms.double( 0.2 ),
EcutTower = cms.double( -1000.0 ),
HEDGrid = cms.vdouble( ),
ecalInputs = cms.VInputTag( 'hltEcalRecHit:EcalRecHitsEB','hltEcalRecHit:EcalRecHitsEE' ),
HBWeight = cms.double( 1.0 ),
HOGrid = cms.vdouble( ),
EBGrid = cms.vdouble( ),
MomConstrMethod = cms.int32( 1 ),
MomHBDepth = cms.double( 0.2 ),
MomHEDepth = cms.double( 0.4 ),
MomEBDepth = cms.double( 0.3 ),
MomEEDepth = cms.double( 0.0 ),
HcalAcceptSeverityLevel = cms.uint32( 9 ),
EcalRecHitSeveritiesToBeExcluded = cms.vstring( 'kTime',
'kWeird',
'kBad' ),
UseHcalRecoveredHits = cms.bool( False ),
UseEcalRecoveredHits = cms.bool( False ),
UseRejectedHitsOnly = cms.bool( False ),
HcalAcceptSeverityLevelForRejectedHit = cms.uint32( 9999 ),
EcalSeveritiesToBeUsedInBadTowers = cms.vstring( ),
UseRejectedRecoveredHcalHits = cms.bool( False ),
UseRejectedRecoveredEcalHits = cms.bool( False ),
missingHcalRescaleFactorForEcal = cms.double( 0.0 ),
AllowMissingInputs = cms.bool( False ),
HcalPhase = cms.int32( 1 )
)
process.hltAK4CaloJetsPF = cms.EDProducer( "FastjetJetProducer",
useMassDropTagger = cms.bool( False ),
useFiltering = cms.bool( False ),
useDynamicFiltering = cms.bool( False ),
useTrimming = cms.bool( False ),
usePruning = cms.bool( False ),
useCMSBoostedTauSeedingAlgorithm = cms.bool( False ),
useKtPruning = cms.bool( False ),
useConstituentSubtraction = cms.bool( False ),
useSoftDrop = cms.bool( False ),
correctShape = cms.bool( False ),
UseOnlyVertexTracks = cms.bool( False ),
UseOnlyOnePV = cms.bool( False ),
muCut = cms.double( -1.0 ),
yCut = cms.double( -1.0 ),
rFilt = cms.double( -1.0 ),
rFiltFactor = cms.double( -1.0 ),
trimPtFracMin = cms.double( -1.0 ),
zcut = cms.double( -1.0 ),
rcut_factor = cms.double( -1.0 ),
csRho_EtaMax = cms.double( -1.0 ),
csRParam = cms.double( -1.0 ),
beta = cms.double( -1.0 ),
R0 = cms.double( -1.0 ),
gridMaxRapidity = cms.double( -1.0 ),
gridSpacing = cms.double( -1.0 ),
DzTrVtxMax = cms.double( 0.0 ),
DxyTrVtxMax = cms.double( 0.0 ),
MaxVtxZ = cms.double( 15.0 ),
subjetPtMin = cms.double( -1.0 ),
muMin = cms.double( -1.0 ),
muMax = cms.double( -1.0 ),
yMin = cms.double( -1.0 ),
yMax = cms.double( -1.0 ),
dRMin = cms.double( -1.0 ),
dRMax = cms.double( -1.0 ),
maxDepth = cms.int32( -1 ),
nFilt = cms.int32( -1 ),
MinVtxNdof = cms.int32( 5 ),
src = cms.InputTag( "hltTowerMakerForAll" ),
srcPVs = cms.InputTag( "NotUsed" ),
jetType = cms.string( "CaloJet" ),
jetAlgorithm = cms.string( "AntiKt" ),
rParam = cms.double( 0.4 ),
inputEtMin = cms.double( 0.3 ),
inputEMin = cms.double( 0.0 ),
jetPtMin = cms.double( 1.0 ),
doPVCorrection = cms.bool( False ),
doAreaFastjet = cms.bool( False ),
doRhoFastjet = cms.bool( False ),
doPUOffsetCorr = cms.bool( False ),
puPtMin = cms.double( 10.0 ),
nSigmaPU = cms.double( 1.0 ),
radiusPU = cms.double( 0.4 ),
subtractorName = cms.string( "" ),
useExplicitGhosts = cms.bool( False ),
doAreaDiskApprox = cms.bool( False ),
voronoiRfact = cms.double( -9.0 ),
Rho_EtaMax = cms.double( 4.4 ),
Ghost_EtaMax = cms.double( 6.0 ),
Active_Area_Repeats = cms.int32( 5 ),
GhostArea = cms.double( 0.01 ),
restrictInputs = cms.bool( False ),
maxInputs = cms.uint32( 1 ),
writeCompound = cms.bool( False ),
writeJetsWithConst = cms.bool( False ),
doFastJetNonUniform = cms.bool( False ),
useDeterministicSeed = cms.bool( True ),
minSeed = cms.uint32( 0 ),
verbosity = cms.int32( 0 ),
puWidth = cms.double( 0.0 ),
nExclude = cms.uint32( 0 ),
maxBadEcalCells = cms.uint32( 9999999 ),
maxBadHcalCells = cms.uint32( 9999999 ),
maxProblematicEcalCells = cms.uint32( 9999999 ),
maxProblematicHcalCells = cms.uint32( 9999999 ),
maxRecoveredEcalCells = cms.uint32( 9999999 ),
maxRecoveredHcalCells = cms.uint32( 9999999 ),
puCenters = cms.vdouble( ),
applyWeight = cms.bool( False ),
srcWeights = cms.InputTag( "" ),
minimumTowersFraction = cms.double( 0.0 ),
jetCollInstanceName = cms.string( "" ),
sumRecHits = cms.bool( False )
)
process.hltAK4CaloJetsPFEt5 = cms.EDFilter( "EtMinCaloJetSelector",
src = cms.InputTag( "hltAK4CaloJetsPF" ),
filter = cms.bool( False ),
etMin = cms.double( 5.0 )
)
process.hltMuonDTDigis = cms.EDProducer( "DTuROSRawToDigi",
inputLabel = cms.InputTag( "rawDataCollector" ),
debug = cms.untracked.bool( False )
)
process.hltDt1DRecHits = cms.EDProducer( "DTRecHitProducer",
recAlgoConfig = cms.PSet(
maxTime = cms.double( 420.0 ),
debug = cms.untracked.bool( False ),
stepTwoFromDigi = cms.bool( False ),
tTrigModeConfig = cms.PSet(
debug = cms.untracked.bool( False ),
tofCorrType = cms.int32( 0 ),
tTrigLabel = cms.string( "" ),
wirePropCorrType = cms.int32( 0 ),
doTOFCorrection = cms.bool( True ),
vPropWire = cms.double( 24.4 ),
doT0Correction = cms.bool( True ),
doWirePropCorrection = cms.bool( True ),
t0Label = cms.string( "" )
),
useUncertDB = cms.bool( True ),
doVdriftCorr = cms.bool( True ),
minTime = cms.double( -3.0 ),
tTrigMode = cms.string( "DTTTrigSyncFromDB" ),
readLegacyTTrigDB = cms.bool( True ),
readLegacyVDriftDB = cms.bool( True )
),
recAlgo = cms.string( "DTLinearDriftFromDBAlgo" ),
debug = cms.untracked.bool( False ),
dtDigiLabel = cms.InputTag( "hltMuonDTDigis" )
)
process.hltDt4DSegments = cms.EDProducer( "DTRecSegment4DProducer",
Reco4DAlgoName = cms.string( "DTCombinatorialPatternReco4D" ),
Reco4DAlgoConfig = cms.PSet(
Reco2DAlgoConfig = cms.PSet(
AlphaMaxPhi = cms.double( 1.0 ),
debug = cms.untracked.bool( False ),
segmCleanerMode = cms.int32( 2 ),
AlphaMaxTheta = cms.double( 0.9 ),
hit_afterT0_resolution = cms.double( 0.03 ),
performT0_vdriftSegCorrection = cms.bool( False ),
recAlgo = cms.string( "DTLinearDriftFromDBAlgo" ),
recAlgoConfig = cms.PSet(
maxTime = cms.double( 420.0 ),
debug = cms.untracked.bool( False ),
stepTwoFromDigi = cms.bool( False ),
tTrigModeConfig = cms.PSet(
debug = cms.untracked.bool( False ),
tofCorrType = cms.int32( 0 ),
tTrigLabel = cms.string( "" ),
wirePropCorrType = cms.int32( 0 ),
doTOFCorrection = cms.bool( True ),
vPropWire = cms.double( 24.4 ),
doT0Correction = cms.bool( True ),
doWirePropCorrection = cms.bool( True ),
t0Label = cms.string( "" )
),
useUncertDB = cms.bool( True ),
doVdriftCorr = cms.bool( True ),
minTime = cms.double( -3.0 ),
tTrigMode = cms.string( "DTTTrigSyncFromDB" ),
readLegacyTTrigDB = cms.bool( True ),
readLegacyVDriftDB = cms.bool( True )
),
MaxAllowedHits = cms.uint32( 50 ),
nUnSharedHitsMin = cms.int32( 2 ),
nSharedHitsMax = cms.int32( 2 ),
performT0SegCorrection = cms.bool( False ),
perform_delta_rejecting = cms.bool( False )
),
Reco2DAlgoName = cms.string( "DTCombinatorialPatternReco" ),
debug = cms.untracked.bool( False ),
segmCleanerMode = cms.int32( 2 ),
AllDTRecHits = cms.bool( True ),
hit_afterT0_resolution = cms.double( 0.03 ),
performT0_vdriftSegCorrection = cms.bool( False ),
recAlgo = cms.string( "DTLinearDriftFromDBAlgo" ),
recAlgoConfig = cms.PSet(
maxTime = cms.double( 420.0 ),
debug = cms.untracked.bool( False ),
stepTwoFromDigi = cms.bool( False ),
tTrigModeConfig = cms.PSet(
debug = cms.untracked.bool( False ),
tofCorrType = cms.int32( 0 ),
tTrigLabel = cms.string( "" ),
wirePropCorrType = cms.int32( 0 ),
doTOFCorrection = cms.bool( True ),
vPropWire = cms.double( 24.4 ),
doT0Correction = cms.bool( True ),
doWirePropCorrection = cms.bool( True ),
t0Label = cms.string( "" )
),
useUncertDB = cms.bool( True ),
doVdriftCorr = cms.bool( True ),
minTime = cms.double( -3.0 ),
tTrigMode = cms.string( "DTTTrigSyncFromDB" ),
readLegacyTTrigDB = cms.bool( True ),
readLegacyVDriftDB = cms.bool( True )
),
nUnSharedHitsMin = cms.int32( 2 ),
nSharedHitsMax = cms.int32( 2 ),
performT0SegCorrection = cms.bool( False ),
perform_delta_rejecting = cms.bool( False )
),
debug = cms.untracked.bool( False ),
recHits1DLabel = cms.InputTag( "hltDt1DRecHits" ),
recHits2DLabel = cms.InputTag( "dt2DSegments" )
)
process.hltMuonCSCDigis = cms.EDProducer( "CSCDCCUnpacker",
InputObjects = cms.InputTag( "rawDataCollector" ),
UseExaminer = cms.bool( True ),
ExaminerMask = cms.uint32( 535558134 ),
UseSelectiveUnpacking = cms.bool( True ),
ErrorMask = cms.uint32( 0 ),
UnpackStatusDigis = cms.bool( False ),
UseFormatStatus = cms.bool( True ),
useRPCs = cms.bool( False ),
useGEMs = cms.bool( False ),
useCSCShowers = cms.bool( False ),
Debug = cms.untracked.bool( False ),
PrintEventNumber = cms.untracked.bool( False ),
runDQM = cms.untracked.bool( False ),
VisualFEDInspect = cms.untracked.bool( False ),
VisualFEDShort = cms.untracked.bool( False ),
FormatedEventDump = cms.untracked.bool( False ),
SuppressZeroLCT = cms.untracked.bool( True ),
DisableMappingCheck = cms.untracked.bool( False ),
B904Setup = cms.untracked.bool( False )
)
process.hltCsc2DRecHits = cms.EDProducer( "CSCRecHitDProducer",
CSCStripPeakThreshold = cms.double( 10.0 ),
CSCStripClusterChargeCut = cms.double( 25.0 ),
CSCStripxtalksOffset = cms.double( 0.03 ),
UseAverageTime = cms.bool( False ),
UseParabolaFit = cms.bool( False ),
UseFivePoleFit = cms.bool( True ),
CSCWireClusterDeltaT = cms.int32( 1 ),
CSCUseCalibrations = cms.bool( True ),
CSCUseStaticPedestals = cms.bool( False ),
CSCNoOfTimeBinsForDynamicPedestal = cms.int32( 2 ),
wireDigiTag = cms.InputTag( 'hltMuonCSCDigis','MuonCSCWireDigi' ),
stripDigiTag = cms.InputTag( 'hltMuonCSCDigis','MuonCSCStripDigi' ),
readBadChannels = cms.bool( False ),
readBadChambers = cms.bool( True ),
CSCUseTimingCorrections = cms.bool( True ),
CSCUseGasGainCorrections = cms.bool( False ),
CSCDebug = cms.untracked.bool( False ),
CSCstripWireDeltaTime = cms.int32( 8 ),
XTasymmetry_ME1a = cms.double( 0.0 ),
XTasymmetry_ME1b = cms.double( 0.0 ),
XTasymmetry_ME12 = cms.double( 0.0 ),
XTasymmetry_ME13 = cms.double( 0.0 ),
XTasymmetry_ME21 = cms.double( 0.0 ),
XTasymmetry_ME22 = cms.double( 0.0 ),
XTasymmetry_ME31 = cms.double( 0.0 ),
XTasymmetry_ME32 = cms.double( 0.0 ),
XTasymmetry_ME41 = cms.double( 0.0 ),
ConstSyst_ME1a = cms.double( 0.022 ),
ConstSyst_ME1b = cms.double( 0.007 ),
ConstSyst_ME12 = cms.double( 0.0 ),
ConstSyst_ME13 = cms.double( 0.0 ),
ConstSyst_ME21 = cms.double( 0.0 ),
ConstSyst_ME22 = cms.double( 0.0 ),
ConstSyst_ME31 = cms.double( 0.0 ),
ConstSyst_ME32 = cms.double( 0.0 ),
ConstSyst_ME41 = cms.double( 0.0 ),
NoiseLevel_ME1a = cms.double( 7.0 ),
NoiseLevel_ME1b = cms.double( 8.0 ),
NoiseLevel_ME12 = cms.double( 9.0 ),
NoiseLevel_ME13 = cms.double( 8.0 ),
NoiseLevel_ME21 = cms.double( 9.0 ),
NoiseLevel_ME22 = cms.double( 9.0 ),
NoiseLevel_ME31 = cms.double( 9.0 ),
NoiseLevel_ME32 = cms.double( 9.0 ),
NoiseLevel_ME41 = cms.double( 9.0 ),
CSCUseReducedWireTimeWindow = cms.bool( False ),
CSCWireTimeWindowLow = cms.int32( 0 ),
CSCWireTimeWindowHigh = cms.int32( 15 )
)
process.hltCscSegments = cms.EDProducer( "CSCSegmentProducer",
inputObjects = cms.InputTag( "hltCsc2DRecHits" ),
algo_type = cms.int32( 1 ),
algo_psets = cms.VPSet(
cms.PSet( parameters_per_chamber_type = cms.vint32( 1, 2, 3, 4, 5, 6, 5, 6, 5, 6 ),
algo_psets = cms.VPSet(
cms.PSet( wideSeg = cms.double( 3.0 ),
chi2Norm_2D_ = cms.double( 35.0 ),
dRIntMax = cms.double( 2.0 ),
doCollisions = cms.bool( True ),
dPhiMax = cms.double( 0.006 ),
dRMax = cms.double( 1.5 ),
dPhiIntMax = cms.double( 0.005 ),
minLayersApart = cms.int32( 1 ),
chi2Max = cms.double( 100.0 ),
chi2_str = cms.double( 50.0 ),
enlarge = cms.bool( False )
),
cms.PSet( wideSeg = cms.double( 3.0 ),
chi2Norm_2D_ = cms.double( 35.0 ),
dRIntMax = cms.double( 2.0 ),
doCollisions = cms.bool( True ),
dPhiMax = cms.double( 0.005 ),
dRMax = cms.double( 1.5 ),
dPhiIntMax = cms.double( 0.004 ),
minLayersApart = cms.int32( 1 ),
chi2Max = cms.double( 100.0 ),
chi2_str = cms.double( 50.0 ),
enlarge = cms.bool( False )
),
cms.PSet( wideSeg = cms.double( 3.0 ),
chi2Norm_2D_ = cms.double( 35.0 ),
dRIntMax = cms.double( 2.0 ),
doCollisions = cms.bool( True ),
dPhiMax = cms.double( 0.004 ),
dRMax = cms.double( 1.5 ),
dPhiIntMax = cms.double( 0.003 ),
minLayersApart = cms.int32( 1 ),
chi2Max = cms.double( 100.0 ),
chi2_str = cms.double( 50.0 ),
enlarge = cms.bool( False )
),
cms.PSet( wideSeg = cms.double( 3.0 ),
chi2Norm_2D_ = cms.double( 20.0 ),
dRIntMax = cms.double( 2.0 ),
doCollisions = cms.bool( True ),
dPhiMax = cms.double( 0.003 ),
dRMax = cms.double( 1.5 ),
dPhiIntMax = cms.double( 0.002 ),
minLayersApart = cms.int32( 1 ),
chi2Max = cms.double( 60.0 ),
chi2_str = cms.double( 30.0 ),
enlarge = cms.bool( False )
),
cms.PSet( wideSeg = cms.double( 3.0 ),
chi2Norm_2D_ = cms.double( 60.0 ),
dRIntMax = cms.double( 2.0 ),
doCollisions = cms.bool( True ),
dPhiMax = cms.double( 0.007 ),
dRMax = cms.double( 1.5 ),
dPhiIntMax = cms.double( 0.005 ),
minLayersApart = cms.int32( 1 ),
chi2Max = cms.double( 180.0 ),
chi2_str = cms.double( 80.0 ),
enlarge = cms.bool( False )
),
cms.PSet( wideSeg = cms.double( 3.0 ),
chi2Norm_2D_ = cms.double( 35.0 ),
dRIntMax = cms.double( 2.0 ),
doCollisions = cms.bool( True ),
dPhiMax = cms.double( 0.006 ),
dRMax = cms.double( 1.5 ),
dPhiIntMax = cms.double( 0.004 ),
minLayersApart = cms.int32( 1 ),
chi2Max = cms.double( 100.0 ),
chi2_str = cms.double( 50.0 ),
enlarge = cms.bool( False )
)
),
algo_name = cms.string( "CSCSegAlgoRU" ),
chamber_types = cms.vstring( 'ME1/a',
'ME1/b',
'ME1/2',
'ME1/3',
'ME2/1',
'ME2/2',
'ME3/1',
'ME3/2',
'ME4/1',
'ME4/2' )
)
)
)
process.hltMuonRPCDigis = cms.EDProducer( "RPCUnpackingModule",
InputLabel = cms.InputTag( "rawDataCollector" ),
doSynchro = cms.bool( False )
)
process.hltRpcRecHits = cms.EDProducer( "RPCRecHitProducer",
recAlgoConfig = cms.PSet( ),
recAlgo = cms.string( "RPCRecHitStandardAlgo" ),
rpcDigiLabel = cms.InputTag( "hltMuonRPCDigis" ),
maskSource = cms.string( "File" ),
maskvecfile = cms.FileInPath( "RecoLocalMuon/RPCRecHit/data/RPCMaskVec.dat" ),
deadSource = cms.string( "File" ),
deadvecfile = cms.FileInPath( "RecoLocalMuon/RPCRecHit/data/RPCDeadVec.dat" )
)
process.hltMuonGEMDigis = cms.EDProducer( "GEMRawToDigiModule",
InputLabel = cms.InputTag( "rawDataCollector" ),
useDBEMap = cms.bool( False ),
keepDAQStatus = cms.bool( False ),
readMultiBX = cms.bool( False ),
fedIdStart = cms.uint32( 1467 ),
fedIdEnd = cms.uint32( 1478 )
)
process.hltGemRecHits = cms.EDProducer( "GEMRecHitProducer",
recAlgoConfig = cms.PSet( ),
recAlgo = cms.string( "GEMRecHitStandardAlgo" ),
gemDigiLabel = cms.InputTag( "hltMuonGEMDigis" ),
applyMasking = cms.bool( False )
)
process.hltGemSegments = cms.EDProducer( "GEMSegmentProducer",
gemRecHitLabel = cms.InputTag( "hltGemRecHits" ),
ge0_name = cms.string( "GE0SegAlgoRU" ),
algo_name = cms.string( "GEMSegmentAlgorithm" ),
ge0_pset = cms.PSet(
maxChi2GoodSeg = cms.double( 50.0 ),
maxChi2Prune = cms.double( 50.0 ),
maxNumberOfHitsPerLayer = cms.uint32( 100 ),
maxETASeeds = cms.double( 0.1 ),
maxPhiAdditional = cms.double( 0.001096605744 ),
minNumberOfHits = cms.uint32( 4 ),
doCollisions = cms.bool( True ),
maxPhiSeeds = cms.double( 0.001096605744 ),
requireCentralBX = cms.bool( True ),
maxChi2Additional = cms.double( 100.0 ),
allowWideSegments = cms.bool( True ),
maxNumberOfHits = cms.uint32( 300 ),
maxTOFDiff = cms.double( 25.0 )
),
algo_pset = cms.PSet(
dYclusBoxMax = cms.double( 5.0 ),
dXclusBoxMax = cms.double( 1.0 ),
maxRecHitsInCluster = cms.int32( 4 ),
preClustering = cms.bool( True ),
preClusteringUseChaining = cms.bool( True ),
dEtaChainBoxMax = cms.double( 0.05 ),
clusterOnlySameBXRecHits = cms.bool( True ),
minHitsPerSegment = cms.uint32( 2 ),
dPhiChainBoxMax = cms.double( 0.02 )
)
)
process.hltL2OfflineMuonSeeds = cms.EDProducer( "MuonSeedGenerator",
beamSpotTag = cms.InputTag( "hltOnlineBeamSpot" ),
scaleDT = cms.bool( True ),
CSCRecSegmentLabel = cms.InputTag( "hltCscSegments" ),
DTRecSegmentLabel = cms.InputTag( "hltDt4DSegments" ),
ME0RecSegmentLabel = cms.InputTag( "me0Segments" ),
EnableDTMeasurement = cms.bool( True ),
EnableCSCMeasurement = cms.bool( True ),
EnableME0Measurement = cms.bool( False ),
crackEtas = cms.vdouble( 0.2, 1.6, 1.7 ),
crackWindow = cms.double( 0.04 ),
deltaPhiSearchWindow = cms.double( 0.25 ),
deltaEtaSearchWindow = cms.double( 0.2 ),
deltaEtaCrackSearchWindow = cms.double( 0.25 ),
CSC_01 = cms.vdouble( 0.166, 0.0, 0.0, 0.031, 0.0, 0.0 ),
CSC_12 = cms.vdouble( -0.161, 0.254, -0.047, 0.042, -0.007, 0.0 ),
CSC_02 = cms.vdouble( 0.612, -0.207, 0.0, 0.067, -0.001, 0.0 ),
CSC_13 = cms.vdouble( 0.901, -1.302, 0.533, 0.045, 0.005, 0.0 ),
CSC_03 = cms.vdouble( 0.787, -0.338, 0.029, 0.101, -0.008, 0.0 ),
CSC_14 = cms.vdouble( 0.606, -0.181, -0.002, 0.111, -0.003, 0.0 ),
CSC_23 = cms.vdouble( -0.081, 0.113, -0.029, 0.015, 0.008, 0.0 ),
CSC_24 = cms.vdouble( 0.004, 0.021, -0.002, 0.053, 0.0, 0.0 ),
CSC_34 = cms.vdouble( 0.062, -0.067, 0.019, 0.021, 0.003, 0.0 ),
DT_12 = cms.vdouble( 0.183, 0.054, -0.087, 0.028, 0.002, 0.0 ),
DT_13 = cms.vdouble( 0.315, 0.068, -0.127, 0.051, -0.002, 0.0 ),
DT_14 = cms.vdouble( 0.359, 0.052, -0.107, 0.072, -0.004, 0.0 ),
DT_23 = cms.vdouble( 0.13, 0.023, -0.057, 0.028, 0.004, 0.0 ),
DT_24 = cms.vdouble( 0.176, 0.014, -0.051, 0.051, 0.003, 0.0 ),
DT_34 = cms.vdouble( 0.044, 0.004, -0.013, 0.029, 0.003, 0.0 ),
OL_1213 = cms.vdouble( 0.96, -0.737, 0.0, 0.052, 0.0, 0.0 ),
OL_1222 = cms.vdouble( 0.848, -0.591, 0.0, 0.062, 0.0, 0.0 ),
OL_1232 = cms.vdouble( 0.184, 0.0, 0.0, 0.066, 0.0, 0.0 ),
OL_2213 = cms.vdouble( 0.117, 0.0, 0.0, 0.044, 0.0, 0.0 ),
OL_2222 = cms.vdouble( 0.107, 0.0, 0.0, 0.04, 0.0, 0.0 ),
SME_11 = cms.vdouble( 3.295, -1.527, 0.112, 0.378, 0.02, 0.0 ),
SME_12 = cms.vdouble( 0.102, 0.599, 0.0, 0.38, 0.0, 0.0 ),
SME_13 = cms.vdouble( -1.286, 1.711, 0.0, 0.356, 0.0, 0.0 ),
SME_21 = cms.vdouble( -0.529, 1.194, -0.358, 0.472, 0.086, 0.0 ),
SME_22 = cms.vdouble( -1.207, 1.491, -0.251, 0.189, 0.243, 0.0 ),
SME_31 = cms.vdouble( -1.594, 1.482, -0.317, 0.487, 0.097, 0.0 ),
SME_32 = cms.vdouble( -0.901, 1.333, -0.47, 0.41, 0.073, 0.0 ),
SME_41 = cms.vdouble( -0.003, 0.005, 0.005, 0.608, 0.076, 0.0 ),
SME_42 = cms.vdouble( -0.003, 0.005, 0.005, 0.608, 0.076, 0.0 ),
SMB_10 = cms.vdouble( 1.387, -0.038, 0.0, 0.19, 0.0, 0.0 ),
SMB_11 = cms.vdouble( 1.247, 0.72, -0.802, 0.229, -0.075, 0.0 ),
SMB_12 = cms.vdouble( 2.128, -0.956, 0.0, 0.199, 0.0, 0.0 ),
SMB_20 = cms.vdouble( 1.011, -0.052, 0.0, 0.188, 0.0, 0.0 ),
SMB_21 = cms.vdouble( 1.043, -0.124, 0.0, 0.183, 0.0, 0.0 ),
SMB_22 = cms.vdouble( 1.474, -0.758, 0.0, 0.185, 0.0, 0.0 ),
SMB_30 = cms.vdouble( 0.505, -0.022, 0.0, 0.215, 0.0, 0.0 ),
SMB_31 = cms.vdouble( 0.549, -0.145, 0.0, 0.207, 0.0, 0.0 ),
SMB_32 = cms.vdouble( 0.67, -0.327, 0.0, 0.22, 0.0, 0.0 ),
CSC_01_1_scale = cms.vdouble( -1.915329, 0.0 ),
CSC_12_1_scale = cms.vdouble( -6.434242, 0.0 ),
CSC_12_2_scale = cms.vdouble( -1.63622, 0.0 ),
CSC_12_3_scale = cms.vdouble( -1.63622, 0.0 ),
CSC_13_2_scale = cms.vdouble( -6.077936, 0.0 ),
CSC_13_3_scale = cms.vdouble( -1.701268, 0.0 ),
CSC_14_3_scale = cms.vdouble( -1.969563, 0.0 ),
CSC_23_1_scale = cms.vdouble( -19.084285, 0.0 ),
CSC_23_2_scale = cms.vdouble( -6.079917, 0.0 ),
CSC_24_1_scale = cms.vdouble( -6.055701, 0.0 ),
CSC_34_1_scale = cms.vdouble( -11.520507, 0.0 ),
OL_1213_0_scale = cms.vdouble( -4.488158, 0.0 ),
OL_1222_0_scale = cms.vdouble( -5.810449, 0.0 ),
OL_1232_0_scale = cms.vdouble( -5.964634, 0.0 ),
OL_2213_0_scale = cms.vdouble( -7.239789, 0.0 ),
OL_2222_0_scale = cms.vdouble( -7.667231, 0.0 ),
DT_12_1_scale = cms.vdouble( -3.692398, 0.0 ),
DT_12_2_scale = cms.vdouble( -3.518165, 0.0 ),
DT_13_1_scale = cms.vdouble( -4.520923, 0.0 ),
DT_13_2_scale = cms.vdouble( -4.257687, 0.0 ),
DT_14_1_scale = cms.vdouble( -5.644816, 0.0 ),
DT_14_2_scale = cms.vdouble( -4.808546, 0.0 ),
DT_23_1_scale = cms.vdouble( -5.320346, 0.0 ),
DT_23_2_scale = cms.vdouble( -5.117625, 0.0 ),
DT_24_1_scale = cms.vdouble( -7.490909, 0.0 ),
DT_24_2_scale = cms.vdouble( -6.63094, 0.0 ),
DT_34_1_scale = cms.vdouble( -13.783765, 0.0 ),
DT_34_2_scale = cms.vdouble( -11.901897, 0.0 ),
SMB_10_0_scale = cms.vdouble( 2.448566, 0.0 ),
SMB_11_0_scale = cms.vdouble( 2.56363, 0.0 ),
SMB_12_0_scale = cms.vdouble( 2.283221, 0.0 ),
SMB_20_0_scale = cms.vdouble( 1.486168, 0.0 ),
SMB_21_0_scale = cms.vdouble( 1.58384, 0.0 ),
SMB_22_0_scale = cms.vdouble( 1.346681, 0.0 ),
SMB_30_0_scale = cms.vdouble( -3.629838, 0.0 ),
SMB_31_0_scale = cms.vdouble( -3.323768, 0.0 ),
SMB_32_0_scale = cms.vdouble( -3.054156, 0.0 ),
SME_11_0_scale = cms.vdouble( 1.325085, 0.0 ),
SME_12_0_scale = cms.vdouble( 2.279181, 0.0 ),
SME_13_0_scale = cms.vdouble( 0.104905, 0.0 ),
SME_21_0_scale = cms.vdouble( -0.040862, 0.0 ),
SME_22_0_scale = cms.vdouble( -3.457901, 0.0 )
)
process.hltL2MuonSeeds = cms.EDProducer( "L2MuonSeedGeneratorFromL1T",
GMTReadoutCollection = cms.InputTag( "" ),
InputObjects = cms.InputTag( 'hltGtStage2Digis','Muon' ),
Propagator = cms.string( "SteppingHelixPropagatorAny" ),
L1MinPt = cms.double( 0.0 ),
L1MaxEta = cms.double( 2.5 ),
L1MinQuality = cms.uint32( 7 ),
SetMinPtBarrelTo = cms.double( 3.5 ),
SetMinPtEndcapTo = cms.double( 1.0 ),
UseOfflineSeed = cms.untracked.bool( True ),
UseUnassociatedL1 = cms.bool( False ),
MatchDR = cms.vdouble( 0.3 ),
EtaMatchingBins = cms.vdouble( 0.0, 2.5 ),
CentralBxOnly = cms.bool( True ),
MatchType = cms.uint32( 0 ),
SortType = cms.uint32( 0 ),
OfflineSeedLabel = cms.untracked.InputTag( "hltL2OfflineMuonSeeds" ),
ServiceParameters = cms.PSet(
RPCLayers = cms.bool( True ),
UseMuonNavigation = cms.untracked.bool( True ),
Propagators = cms.untracked.vstring( 'SteppingHelixPropagatorAny' )
)
)
process.hltL2Muons = cms.EDProducer( "L2MuonProducer",
ServiceParameters = cms.PSet(
RPCLayers = cms.bool( True ),
UseMuonNavigation = cms.untracked.bool( True ),
Propagators = cms.untracked.vstring( 'hltESPFastSteppingHelixPropagatorAny',
'hltESPFastSteppingHelixPropagatorOpposite' )
),
InputObjects = cms.InputTag( "hltL2MuonSeeds" ),
SeedTransformerParameters = cms.PSet(
Fitter = cms.string( "hltESPKFFittingSmootherForL2Muon" ),
NMinRecHits = cms.uint32( 2 ),
RescaleError = cms.double( 100.0 ),
Propagator = cms.string( "hltESPFastSteppingHelixPropagatorAny" ),
UseSubRecHits = cms.bool( False ),
MuonRecHitBuilder = cms.string( "hltESPMuonTransientTrackingRecHitBuilder" )
),
L2TrajBuilderParameters = cms.PSet(
BWFilterParameters = cms.PSet(
DTRecSegmentLabel = cms.InputTag( "hltDt4DSegments" ),
CSCRecSegmentLabel = cms.InputTag( "hltCscSegments" ),
BWSeedType = cms.string( "fromGenerator" ),
GEMRecSegmentLabel = cms.InputTag( "hltGemRecHits" ),
RPCRecSegmentLabel = cms.InputTag( "hltRpcRecHits" ),
EnableGEMMeasurement = cms.bool( True ),
EnableRPCMeasurement = cms.bool( True ),
MuonTrajectoryUpdatorParameters = cms.PSet(
ExcludeRPCFromFit = cms.bool( False ),
Granularity = cms.int32( 0 ),
MaxChi2 = cms.double( 25.0 ),
RescaleError = cms.bool( False ),
RescaleErrorFactor = cms.double( 100.0 ),
UseInvalidHits = cms.bool( True )
),
EnableCSCMeasurement = cms.bool( True ),
MaxChi2 = cms.double( 100.0 ),
FitDirection = cms.string( "outsideIn" ),
Propagator = cms.string( "hltESPFastSteppingHelixPropagatorAny" ),
NumberOfSigma = cms.double( 3.0 ),
EnableDTMeasurement = cms.bool( True )
),
DoSeedRefit = cms.bool( False ),
FilterParameters = cms.PSet(
DTRecSegmentLabel = cms.InputTag( "hltDt4DSegments" ),
CSCRecSegmentLabel = cms.InputTag( "hltCscSegments" ),
GEMRecSegmentLabel = cms.InputTag( "hltGemRecHits" ),
RPCRecSegmentLabel = cms.InputTag( "hltRpcRecHits" ),
EnableGEMMeasurement = cms.bool( True ),
EnableRPCMeasurement = cms.bool( True ),
MuonTrajectoryUpdatorParameters = cms.PSet(
ExcludeRPCFromFit = cms.bool( False ),
Granularity = cms.int32( 0 ),
MaxChi2 = cms.double( 25.0 ),
RescaleError = cms.bool( False ),
RescaleErrorFactor = cms.double( 100.0 ),
UseInvalidHits = cms.bool( True )
),
EnableCSCMeasurement = cms.bool( True ),
MaxChi2 = cms.double( 1000.0 ),
FitDirection = cms.string( "insideOut" ),
Propagator = cms.string( "hltESPFastSteppingHelixPropagatorAny" ),
NumberOfSigma = cms.double( 3.0 ),
EnableDTMeasurement = cms.bool( True )
),
SeedPosition = cms.string( "in" ),
DoBackwardFilter = cms.bool( True ),
DoRefit = cms.bool( False ),
NavigationType = cms.string( "Standard" ),
SeedTransformerParameters = cms.PSet(
Fitter = cms.string( "hltESPKFFittingSmootherForL2Muon" ),
NMinRecHits = cms.uint32( 2 ),
RescaleError = cms.double( 100.0 ),
Propagator = cms.string( "hltESPFastSteppingHelixPropagatorAny" ),
UseSubRecHits = cms.bool( False ),
MuonRecHitBuilder = cms.string( "hltESPMuonTransientTrackingRecHitBuilder" )
),
SeedPropagator = cms.string( "hltESPFastSteppingHelixPropagatorAny" )
),
DoSeedRefit = cms.bool( False ),
TrackLoaderParameters = cms.PSet(
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
DoSmoothing = cms.bool( False ),
VertexConstraint = cms.bool( True ),
MuonUpdatorAtVertexParameters = cms.PSet(
MaxChi2 = cms.double( 1000000.0 ),
BeamSpotPositionErrors = cms.vdouble( 0.1, 0.1, 5.3 ),
BeamSpotPosition = cms.vdouble( 0.0, 0.0, 0.0 ),
Propagator = cms.string( "hltESPFastSteppingHelixPropagatorOpposite" )
),
Smoother = cms.string( "hltESPKFTrajectorySmootherForMuonTrackLoader" )
),
MuonTrajectoryBuilder = cms.string( "Exhaustive" )
)
process.hltL2MuonCandidates = cms.EDProducer( "L2MuonCandidateProducer",
InputObjects = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' )
)
process.hltSiPixelDigisLegacy = cms.EDProducer( "SiPixelRawToDigi",
IncludeErrors = cms.bool( True ),
UseQualityInfo = cms.bool( False ),
ErrorList = cms.vint32( 29 ),
UserErrorList = cms.vint32( ),
InputLabel = cms.InputTag( "rawDataCollector" ),
Regions = cms.PSet( ),
UsePilotBlade = cms.bool( False ),
UsePhase1 = cms.bool( True ),
CablingMapLabel = cms.string( "" ),
SiPixelQualityLabel = cms.string( "" )
)
process.hltSiPixelClustersLegacy = cms.EDProducer( "SiPixelClusterProducer",
src = cms.InputTag( "hltSiPixelDigisLegacy" ),
ClusterMode = cms.string( "PixelThresholdClusterizer" ),
maxNumberOfClusters = cms.int32( 40000 ),
payloadType = cms.string( "HLT" ),
ChannelThreshold = cms.int32( 10 ),
MissCalibrate = cms.bool( True ),
SplitClusters = cms.bool( False ),
VCaltoElectronGain = cms.int32( 1 ),
VCaltoElectronGain_L1 = cms.int32( 1 ),
VCaltoElectronOffset = cms.int32( 0 ),
VCaltoElectronOffset_L1 = cms.int32( 0 ),
SeedThreshold = cms.int32( 1000 ),
ClusterThreshold_L1 = cms.int32( 4000 ),
ClusterThreshold = cms.int32( 4000 ),
ElectronPerADCGain = cms.double( 135.0 ),
Phase2Calibration = cms.bool( False ),
Phase2ReadoutMode = cms.int32( -1 ),
Phase2DigiBaseline = cms.double( 1200.0 ),
Phase2KinkADC = cms.int32( 8 )
)
process.hltSiPixelClustersCache = cms.EDProducer( "SiPixelClusterShapeCacheProducer",
src = cms.InputTag( "hltSiPixelClusters" ),
onDemand = cms.bool( False )
)
process.hltSiPixelRecHitSoA = cms.EDProducer( "SiPixelRecHitSoAFromLegacy",
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
src = cms.InputTag( "hltSiPixelClusters" ),
CPE = cms.string( "PixelCPEFast" ),
convertToLegacy = cms.bool( True ),
isPhase2 = cms.bool( False )
)
process.hltSiStripExcludedFEDListProducer = cms.EDProducer( "SiStripExcludedFEDListProducer",
ProductLabel = cms.InputTag( "rawDataCollector" )
)
process.hltSiStripRawToClustersFacility = cms.EDProducer( "SiStripClusterizerFromRaw",
onDemand = cms.bool( True ),
Clusterizer = cms.PSet(
ConditionsLabel = cms.string( "" ),
ClusterThreshold = cms.double( 5.0 ),
SeedThreshold = cms.double( 3.0 ),
Algorithm = cms.string( "ThreeThresholdAlgorithm" ),
ChannelThreshold = cms.double( 2.0 ),
MaxAdjacentBad = cms.uint32( 0 ),
setDetId = cms.bool( True ),
MaxSequentialHoles = cms.uint32( 0 ),
RemoveApvShots = cms.bool( True ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
MaxSequentialBad = cms.uint32( 1 )
),
Algorithms = cms.PSet(
Use10bitsTruncation = cms.bool( False ),
CommonModeNoiseSubtractionMode = cms.string( "Median" ),
useCMMeanMap = cms.bool( False ),
TruncateInSuppressor = cms.bool( True ),
doAPVRestore = cms.bool( False ),
SiStripFedZeroSuppressionMode = cms.uint32( 4 ),
PedestalSubtractionFedMode = cms.bool( True )
),
DoAPVEmulatorCheck = cms.bool( False ),
HybridZeroSuppressed = cms.bool( False ),
ProductLabel = cms.InputTag( "rawDataCollector" )
)
process.hltSiStripClusters = cms.EDProducer( "MeasurementTrackerEventProducer",
measurementTracker = cms.string( "hltESPMeasurementTracker" ),
skipClusters = cms.InputTag( "" ),
pixelClusterProducer = cms.string( "hltSiPixelClusters" ),
stripClusterProducer = cms.string( "hltSiStripRawToClustersFacility" ),
Phase2TrackerCluster1DProducer = cms.string( "" ),
vectorHits = cms.InputTag( "" ),
vectorHitsRej = cms.InputTag( "" ),
inactivePixelDetectorLabels = cms.VInputTag( 'hltSiPixelDigis' ),
badPixelFEDChannelCollectionLabels = cms.VInputTag( 'hltSiPixelDigis' ),
pixelCablingMapLabel = cms.string( "" ),
inactiveStripDetectorLabels = cms.VInputTag( 'hltSiStripExcludedFEDListProducer' ),
switchOffPixelsIfEmpty = cms.bool( True )
)
process.hltIterL3OISeedsFromL2Muons = cms.EDProducer( "TSGForOIFromL2",
src = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' ),
layersToTry = cms.int32( 2 ),
fixedErrorRescaleFactorForHitless = cms.double( 2.0 ),
hitsToTry = cms.int32( 1 ),
adjustErrorsDynamicallyForHits = cms.bool( False ),
adjustErrorsDynamicallyForHitless = cms.bool( True ),
MeasurementTrackerEvent = cms.InputTag( "hltSiStripClusters" ),
UseHitLessSeeds = cms.bool( True ),
estimator = cms.string( "hltESPChi2MeasurementEstimator100" ),
maxEtaForTOB = cms.double( 1.8 ),
minEtaForTEC = cms.double( 0.7 ),
debug = cms.untracked.bool( False ),
fixedErrorRescaleFactorForHits = cms.double( 1.0 ),
maxSeeds = cms.uint32( 20 ),
maxHitlessSeeds = cms.uint32( 5 ),
maxHitSeeds = cms.uint32( 1 ),
numL2ValidHitsCutAllEta = cms.uint32( 20 ),
numL2ValidHitsCutAllEndcap = cms.uint32( 30 ),
pT1 = cms.double( 13.0 ),
pT2 = cms.double( 30.0 ),
pT3 = cms.double( 70.0 ),
eta1 = cms.double( 0.2 ),
eta2 = cms.double( 0.3 ),
eta3 = cms.double( 1.0 ),
eta4 = cms.double( 1.2 ),
eta5 = cms.double( 1.6 ),
eta6 = cms.double( 1.4 ),
eta7 = cms.double( 2.1 ),
SF1 = cms.double( 3.0 ),
SF2 = cms.double( 4.0 ),
SF3 = cms.double( 5.0 ),
SF4 = cms.double( 7.0 ),
SF5 = cms.double( 10.0 ),
SF6 = cms.double( 2.0 ),
tsosDiff1 = cms.double( 0.2 ),
tsosDiff2 = cms.double( 0.02 ),
propagatorName = cms.string( "PropagatorWithMaterialParabolicMf" )
)
process.hltIterL3OITrackCandidates = cms.EDProducer( "CkfTrackCandidateMaker",
cleanTrajectoryAfterInOut = cms.bool( False ),
doSeedingRegionRebuilding = cms.bool( False ),
onlyPixelHitsForSeedCleaner = cms.bool( False ),
reverseTrajectories = cms.bool( True ),
useHitsSplitting = cms.bool( False ),
MeasurementTrackerEvent = cms.InputTag( "hltSiStripClusters" ),
src = cms.InputTag( "hltIterL3OISeedsFromL2Muons" ),
clustersToSkip = cms.InputTag( "" ),
phase2clustersToSkip = cms.InputTag( "" ),
TrajectoryBuilderPSet = cms.PSet( refToPSet_ = cms.string( "HLTPSetMuonCkfTrajectoryBuilder" ) ),
TransientInitialStateEstimatorParameters = cms.PSet(
propagatorAlongTISE = cms.string( "PropagatorWithMaterial" ),
numberMeasurementsForFit = cms.int32( 4 ),
propagatorOppositeTISE = cms.string( "PropagatorWithMaterialOpposite" )
),
numHitsForSeedCleaner = cms.int32( 4 ),
NavigationSchool = cms.string( "SimpleNavigationSchool" ),
RedundantSeedCleaner = cms.string( "CachingSeedCleanerBySharedInput" ),
TrajectoryCleaner = cms.string( "muonSeededTrajectoryCleanerBySharedHits" ),
maxNSeeds = cms.uint32( 500000 ),
maxSeedsBeforeCleaning = cms.uint32( 5000 )
)
process.hltIterL3OIMuCtfWithMaterialTracks = cms.EDProducer( "TrackProducer",
useSimpleMF = cms.bool( False ),
SimpleMagneticField = cms.string( "" ),
src = cms.InputTag( "hltIterL3OITrackCandidates" ),
clusterRemovalInfo = cms.InputTag( "" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Fitter = cms.string( "hltESPKFFittingSmootherWithOutliersRejectionAndRK" ),
useHitsSplitting = cms.bool( False ),
alias = cms.untracked.string( "ctfWithMaterialTracks" ),
TrajectoryInEvent = cms.bool( False ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
AlgorithmName = cms.string( "iter10" ),
Propagator = cms.string( "PropagatorWithMaterial" ),
GeometricInnerState = cms.bool( True ),
NavigationSchool = cms.string( "SimpleNavigationSchool" ),
MeasurementTracker = cms.string( "hltESPMeasurementTracker" ),
MeasurementTrackerEvent = cms.InputTag( "hltSiStripClusters" )
)
process.hltIterL3OIMuonTrackCutClassifier = cms.EDProducer( "TrackCutClassifier",
src = cms.InputTag( "hltIterL3OIMuCtfWithMaterialTracks" ),
beamspot = cms.InputTag( "hltOnlineBeamSpot" ),
vertices = cms.InputTag( "Notused" ),
ignoreVertices = cms.bool( True ),
qualityCuts = cms.vdouble( -0.7, 0.1, 0.7 ),
mva = cms.PSet(
minPixelHits = cms.vint32( 0, 0, 1 ),
maxDzWrtBS = cms.vdouble( 3.40282346639E38, 24.0, 100.0 ),
dr_par = cms.PSet(
d0err = cms.vdouble( 0.003, 0.003, 3.40282346639E38 ),
dr_par2 = cms.vdouble( 0.3, 0.3, 3.40282346639E38 ),
dr_par1 = cms.vdouble( 0.4, 0.4, 3.40282346639E38 ),
dr_exp = cms.vint32( 4, 4, 2147483647 ),
d0err_par = cms.vdouble( 0.001, 0.001, 3.40282346639E38 )
),
maxLostLayers = cms.vint32( 4, 3, 2 ),
min3DLayers = cms.vint32( 1, 2, 1 ),
dz_par = cms.PSet(
dz_par1 = cms.vdouble( 0.4, 0.4, 3.40282346639E38 ),
dz_par2 = cms.vdouble( 0.35, 0.35, 3.40282346639E38 ),
dz_exp = cms.vint32( 4, 4, 2147483647 )
),
minNVtxTrk = cms.int32( 3 ),
maxDz = cms.vdouble( 0.5, 0.2, 3.40282346639E38 ),
minNdof = cms.vdouble( 1.0E-5, 1.0E-5, 1.0E-5 ),
maxChi2 = cms.vdouble( 3.40282346639E38, 3.40282346639E38, 3.40282346639E38 ),
maxChi2n = cms.vdouble( 10.0, 1.0, 0.4 ),
maxDr = cms.vdouble( 0.5, 0.03, 3.40282346639E38 ),
minLayers = cms.vint32( 3, 5, 5 )
)
)
process.hltIterL3OIMuonTrackSelectionHighPurity = cms.EDProducer( "TrackCollectionFilterCloner",
originalSource = cms.InputTag( "hltIterL3OIMuCtfWithMaterialTracks" ),
originalMVAVals = cms.InputTag( 'hltIterL3OIMuonTrackCutClassifier','MVAValues' ),
originalQualVals = cms.InputTag( 'hltIterL3OIMuonTrackCutClassifier','QualityMasks' ),
minQuality = cms.string( "highPurity" ),
copyExtras = cms.untracked.bool( True ),
copyTrajectories = cms.untracked.bool( False )
)
process.hltL3MuonsIterL3OI = cms.EDProducer( "L3MuonProducer",
ServiceParameters = cms.PSet(
RPCLayers = cms.bool( True ),
UseMuonNavigation = cms.untracked.bool( True ),
Propagators = cms.untracked.vstring( 'hltESPSmartPropagatorAny',
'SteppingHelixPropagatorAny',
'hltESPSmartPropagator',
'hltESPSteppingHelixPropagatorOpposite' )
),
MuonCollectionLabel = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' ),
TrackLoaderParameters = cms.PSet(
MuonSeededTracksInstance = cms.untracked.string( "L2Seeded" ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
DoSmoothing = cms.bool( True ),
SmoothTkTrack = cms.untracked.bool( False ),
VertexConstraint = cms.bool( False ),
MuonUpdatorAtVertexParameters = cms.PSet(
MaxChi2 = cms.double( 1000000.0 ),
BeamSpotPositionErrors = cms.vdouble( 0.1, 0.1, 5.3 ),
Propagator = cms.string( "hltESPSteppingHelixPropagatorOpposite" )
),
PutTkTrackIntoEvent = cms.untracked.bool( False ),
Smoother = cms.string( "hltESPKFTrajectorySmootherForMuonTrackLoader" )
),
L3TrajBuilderParameters = cms.PSet(
PtCut = cms.double( 1.0 ),
TrackerPropagator = cms.string( "SteppingHelixPropagatorAny" ),
GlobalMuonTrackMatcher = cms.PSet(
Chi2Cut_3 = cms.double( 200.0 ),
DeltaDCut_2 = cms.double( 10.0 ),
Eta_threshold = cms.double( 1.2 ),
Quality_2 = cms.double( 15.0 ),
DeltaDCut_1 = cms.double( 40.0 ),
Quality_3 = cms.double( 7.0 ),
DeltaDCut_3 = cms.double( 15.0 ),
Quality_1 = cms.double( 20.0 ),
Pt_threshold1 = cms.double( 0.0 ),
DeltaRCut_2 = cms.double( 0.2 ),
DeltaRCut_1 = cms.double( 0.1 ),
Pt_threshold2 = cms.double( 9.99999999E8 ),
Chi2Cut_1 = cms.double( 50.0 ),
Chi2Cut_2 = cms.double( 50.0 ),
DeltaRCut_3 = cms.double( 1.0 ),
LocChi2Cut = cms.double( 0.001 ),
Propagator = cms.string( "hltESPSmartPropagator" ),
MinPt = cms.double( 1.0 ),
MinP = cms.double( 2.5 )
),
ScaleTECxFactor = cms.double( -1.0 ),
tkTrajUseVertex = cms.bool( False ),
MuonTrackingRegionBuilder = cms.PSet(
Rescale_Dz = cms.double( 4.0 ),
Pt_fixed = cms.bool( False ),
Eta_fixed = cms.bool( True ),
Eta_min = cms.double( 0.1 ),
DeltaZ = cms.double( 24.2 ),
maxRegions = cms.int32( 2 ),
EtaR_UpperLimit_Par1 = cms.double( 0.25 ),
UseVertex = cms.bool( False ),
Z_fixed = cms.bool( False ),
PhiR_UpperLimit_Par1 = cms.double( 0.6 ),
PhiR_UpperLimit_Par2 = cms.double( 0.2 ),
Rescale_phi = cms.double( 3.0 ),
DeltaEta = cms.double( 0.2 ),
precise = cms.bool( True ),
OnDemand = cms.int32( -1 ),
EtaR_UpperLimit_Par2 = cms.double( 0.15 ),
MeasurementTrackerName = cms.InputTag( "hltESPMeasurementTracker" ),
vertexCollection = cms.InputTag( "pixelVertices" ),
Pt_min = cms.double( 3.0 ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Phi_fixed = cms.bool( True ),
DeltaR = cms.double( 0.025 ),
input = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' ),
DeltaPhi = cms.double( 0.15 ),
Phi_min = cms.double( 0.1 ),
Rescale_eta = cms.double( 3.0 )
),
TrackTransformer = cms.PSet(
Fitter = cms.string( "hltESPL3MuKFTrajectoryFitter" ),
RefitDirection = cms.string( "insideOut" ),
RefitRPCHits = cms.bool( True ),
Propagator = cms.string( "hltESPSmartPropagatorAny" ),
DoPredictionsOnly = cms.bool( False ),
TrackerRecHitBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
MuonRecHitBuilder = cms.string( "hltESPMuonTransientTrackingRecHitBuilder" ),
Smoother = cms.string( "hltESPKFTrajectorySmootherForMuonTrackLoader" )
),
tkTrajBeamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
RefitRPCHits = cms.bool( True ),
tkTrajVertex = cms.InputTag( "Notused" ),
GlbRefitterParameters = cms.PSet(
Fitter = cms.string( "hltESPL3MuKFTrajectoryFitter" ),
DTRecSegmentLabel = cms.InputTag( "hltDt4DSegments" ),
RefitFlag = cms.bool( True ),
SkipStation = cms.int32( -1 ),
Chi2CutRPC = cms.double( 1.0 ),
PropDirForCosmics = cms.bool( False ),
CSCRecSegmentLabel = cms.InputTag( "hltCscSegments" ),
GEMRecHitLabel = cms.InputTag( "hltGemRecHits" ),
HitThreshold = cms.int32( 1 ),
Chi2CutGEM = cms.double( 1.0 ),
DYTthrs = cms.vint32( 30, 15 ),
TrackerSkipSystem = cms.int32( -1 ),
RefitDirection = cms.string( "insideOut" ),
Chi2CutCSC = cms.double( 150.0 ),
Chi2CutDT = cms.double( 10.0 ),
RefitRPCHits = cms.bool( True ),
TrackerSkipSection = cms.int32( -1 ),
Propagator = cms.string( "hltESPSmartPropagatorAny" ),
DoPredictionsOnly = cms.bool( False ),
TrackerRecHitBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
MuonHitsOption = cms.int32( 1 ),
MuonRecHitBuilder = cms.string( "hltESPMuonTransientTrackingRecHitBuilder" )
),
PCut = cms.double( 2.5 ),
tkTrajMaxDXYBeamSpot = cms.double( 9999.0 ),
TrackerRecHitBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
tkTrajMaxChi2 = cms.double( 9999.0 ),
MuonRecHitBuilder = cms.string( "hltESPMuonTransientTrackingRecHitBuilder" ),
ScaleTECyFactor = cms.double( -1.0 ),
tkTrajLabel = cms.InputTag( "hltIterL3OIMuonTrackSelectionHighPurity" )
)
)
process.hltIterL3OIL3MuonsLinksCombination = cms.EDProducer( "L3TrackLinksCombiner",
labels = cms.VInputTag( 'hltL3MuonsIterL3OI' )
)
process.hltIterL3OIL3Muons = cms.EDProducer( "L3TrackCombiner",
labels = cms.VInputTag( 'hltL3MuonsIterL3OI' )
)
process.hltIterL3OIL3MuonCandidates = cms.EDProducer( "L3MuonCandidateProducer",
InputObjects = cms.InputTag( "hltIterL3OIL3Muons" ),
InputLinksObjects = cms.InputTag( "hltIterL3OIL3MuonsLinksCombination" ),
MuonPtOption = cms.string( "Tracker" )
)
process.hltL2SelectorForL3IO = cms.EDProducer( "HLTMuonL2SelectorForL3IO",
l2Src = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' ),
l3OISrc = cms.InputTag( "hltIterL3OIL3MuonCandidates" ),
InputLinks = cms.InputTag( "hltIterL3OIL3MuonsLinksCombination" ),
applyL3Filters = cms.bool( False ),
MinNhits = cms.int32( 1 ),
MaxNormalizedChi2 = cms.double( 20.0 ),
MinNmuonHits = cms.int32( 1 ),
MaxPtDifference = cms.double( 0.3 )
)
process.hltIterL3MuonPixelTracksFilter = cms.EDProducer( "PixelTrackFilterByKinematicsProducer",
ptMin = cms.double( 0.1 ),
nSigmaInvPtTolerance = cms.double( 0.0 ),
tipMax = cms.double( 1.0 ),
nSigmaTipMaxTolerance = cms.double( 0.0 ),
chi2 = cms.double( 1000.0 )
)
process.hltIterL3MuonPixelTracksFitter = cms.EDProducer( "PixelFitterByHelixProjectionsProducer",
scaleErrorsForBPix1 = cms.bool( False ),
scaleFactor = cms.double( 0.65 )
)
process.hltIterL3MuonPixelTracksTrackingRegions = cms.EDProducer( "MuonTrackingRegionEDProducer",
EtaR_UpperLimit_Par1 = cms.double( 0.25 ),
DeltaR = cms.double( 0.025 ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
OnDemand = cms.int32( -1 ),
vertexCollection = cms.InputTag( "notUsed" ),
Rescale_phi = cms.double( 3.0 ),
Eta_fixed = cms.bool( True ),
Rescale_eta = cms.double( 3.0 ),
PhiR_UpperLimit_Par2 = cms.double( 0.2 ),
Eta_min = cms.double( 0.0 ),
Phi_fixed = cms.bool( True ),
Phi_min = cms.double( 0.0 ),
PhiR_UpperLimit_Par1 = cms.double( 0.6 ),
EtaR_UpperLimit_Par2 = cms.double( 0.15 ),
MeasurementTrackerName = cms.InputTag( "" ),
UseVertex = cms.bool( False ),
Rescale_Dz = cms.double( 4.0 ),
Pt_fixed = cms.bool( True ),
Z_fixed = cms.bool( True ),
Pt_min = cms.double( 2.0 ),
DeltaZ = cms.double( 24.2 ),
DeltaEta = cms.double( 0.2 ),
DeltaPhi = cms.double( 0.15 ),
maxRegions = cms.int32( 5 ),
precise = cms.bool( True ),
input = cms.InputTag( "hltL2SelectorForL3IO" )
)
process.hltIterL3MuonPixelLayerQuadruplets = cms.EDProducer( "SeedingLayersEDProducer",
layerList = cms.vstring( 'BPix1+BPix2+BPix3+BPix4',
'BPix1+BPix2+BPix3+FPix1_pos',
'BPix1+BPix2+BPix3+FPix1_neg',
'BPix1+BPix2+FPix1_pos+FPix2_pos',
'BPix1+BPix2+FPix1_neg+FPix2_neg',
'BPix1+FPix1_pos+FPix2_pos+FPix3_pos',
'BPix1+FPix1_neg+FPix2_neg+FPix3_neg' ),
BPix = cms.PSet(
hitErrorRPhi = cms.double( 0.0027 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
useErrorsFromParam = cms.bool( True ),
hitErrorRZ = cms.double( 0.006 ),
HitProducer = cms.string( "hltSiPixelRecHits" )
),
FPix = cms.PSet(
hitErrorRPhi = cms.double( 0.0051 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
useErrorsFromParam = cms.bool( True ),
hitErrorRZ = cms.double( 0.0036 ),
HitProducer = cms.string( "hltSiPixelRecHits" )
),
TIB = cms.PSet( ),
TID = cms.PSet( ),
TOB = cms.PSet( ),
TEC = cms.PSet( ),
MTIB = cms.PSet( ),
MTID = cms.PSet( ),
MTOB = cms.PSet( ),
MTEC = cms.PSet( )
)
process.hltIterL3MuonPixelTracksHitDoublets = cms.EDProducer( "HitPairEDProducer",
seedingLayers = cms.InputTag( "hltIterL3MuonPixelLayerQuadruplets" ),
trackingRegions = cms.InputTag( "hltIterL3MuonPixelTracksTrackingRegions" ),
trackingRegionsSeedingLayers = cms.InputTag( "" ),
clusterCheck = cms.InputTag( "" ),
produceSeedingHitSets = cms.bool( False ),
produceIntermediateHitDoublets = cms.bool( True ),
maxElement = cms.uint32( 0 ),
maxElementTotal = cms.uint32( 50000000 ),
layerPairs = cms.vuint32( 0, 1, 2 )
)
process.hltIterL3MuonPixelTracksHitQuadruplets = cms.EDProducer( "CAHitQuadrupletEDProducer",
doublets = cms.InputTag( "hltIterL3MuonPixelTracksHitDoublets" ),
extraHitRPhitolerance = cms.double( 0.032 ),
fitFastCircle = cms.bool( True ),
fitFastCircleChi2Cut = cms.bool( True ),
useBendingCorrection = cms.bool( True ),
CAThetaCut = cms.double( 0.005 ),
CAPhiCut = cms.double( 0.2 ),
CAThetaCut_byTriplets = cms.VPSet(
cms.PSet( seedingLayers = cms.string( "" ),
cut = cms.double( -1.0 )
)
),
CAPhiCut_byTriplets = cms.VPSet(
cms.PSet( seedingLayers = cms.string( "" ),
cut = cms.double( -1.0 )
)
),
CAHardPtCut = cms.double( 0.0 ),
maxChi2 = cms.PSet(
value2 = cms.double( 50.0 ),
value1 = cms.double( 200.0 ),
pt1 = cms.double( 0.7 ),
enabled = cms.bool( True ),
pt2 = cms.double( 2.0 )
),
SeedComparitorPSet = cms.PSet(
clusterShapeHitFilter = cms.string( "ClusterShapeHitFilter" ),
ComponentName = cms.string( "LowPtClusterShapeSeedComparitor" ),
clusterShapeCacheSrc = cms.InputTag( "hltSiPixelClustersCache" )
)
)
process.hltIterL3MuonPixelTracks = cms.EDProducer( "PixelTrackProducer",
passLabel = cms.string( "" ),
SeedingHitSets = cms.InputTag( "hltIterL3MuonPixelTracksHitQuadruplets" ),
Fitter = cms.InputTag( "hltIterL3MuonPixelTracksFitter" ),
Filter = cms.InputTag( "hltIterL3MuonPixelTracksFilter" ),
Cleaner = cms.string( "hltPixelTracksCleanerBySharedHits" )
)
process.hltIterL3MuonPixelVertices = cms.EDProducer( "PixelVertexProducer",
WtAverage = cms.bool( True ),
ZOffset = cms.double( 5.0 ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Verbosity = cms.int32( 0 ),
UseError = cms.bool( True ),
TrackCollection = cms.InputTag( "hltIterL3MuonPixelTracks" ),
ZSeparation = cms.double( 0.05 ),
NTrkMin = cms.int32( 2 ),
Method2 = cms.bool( True ),
Finder = cms.string( "DivisiveVertexFinder" ),
PtMin = cms.double( 1.0 ),
PVcomparer = cms.PSet( refToPSet_ = cms.string( "HLTPSetPvClusterComparerForIT" ) )
)
process.hltIterL3MuonTrimmedPixelVertices = cms.EDProducer( "PixelVertexCollectionTrimmer",
src = cms.InputTag( "hltIterL3MuonPixelVertices" ),
maxVtx = cms.uint32( 100 ),
fractionSumPt2 = cms.double( 0.3 ),
minSumPt2 = cms.double( 0.0 ),
PVcomparer = cms.PSet( refToPSet_ = cms.string( "HLTPSetPvClusterComparerForIT" ) )
)
process.hltIter0IterL3MuonPixelSeedsFromPixelTracks = cms.EDProducer( "SeedGeneratorFromProtoTracksEDProducer",
InputCollection = cms.InputTag( "hltIterL3MuonPixelTracks" ),
InputVertexCollection = cms.InputTag( "hltIterL3MuonTrimmedPixelVertices" ),
originHalfLength = cms.double( 0.3 ),
originRadius = cms.double( 0.1 ),
useProtoTrackKinematics = cms.bool( False ),
useEventsWithNoVertex = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
usePV = cms.bool( False ),
includeFourthHit = cms.bool( False ),
SeedCreatorPSet = cms.PSet( refToPSet_ = cms.string( "HLTSeedFromProtoTracks" ) )
)
process.hltIter0IterL3MuonCkfTrackCandidates = cms.EDProducer( "CkfTrackCandidateMaker",
cleanTrajectoryAfterInOut = cms.bool( False ),
doSeedingRegionRebuilding = cms.bool( True ),
onlyPixelHitsForSeedCleaner = cms.bool( False ),
reverseTrajectories = cms.bool( False ),
useHitsSplitting = cms.bool( True ),
MeasurementTrackerEvent = cms.InputTag( "hltSiStripClusters" ),
src = cms.InputTag( "hltIter0IterL3MuonPixelSeedsFromPixelTracks" ),
clustersToSkip = cms.InputTag( "" ),
phase2clustersToSkip = cms.InputTag( "" ),
TrajectoryBuilderPSet = cms.PSet( refToPSet_ = cms.string( "HLTIter0IterL3MuonPSetGroupedCkfTrajectoryBuilderIT" ) ),
TransientInitialStateEstimatorParameters = cms.PSet(
propagatorAlongTISE = cms.string( "PropagatorWithMaterialParabolicMf" ),
numberMeasurementsForFit = cms.int32( 4 ),
propagatorOppositeTISE = cms.string( "PropagatorWithMaterialParabolicMfOpposite" )
),
numHitsForSeedCleaner = cms.int32( 4 ),
NavigationSchool = cms.string( "SimpleNavigationSchool" ),
RedundantSeedCleaner = cms.string( "none" ),
TrajectoryCleaner = cms.string( "hltESPTrajectoryCleanerBySharedHits" ),
maxNSeeds = cms.uint32( 100000 ),
maxSeedsBeforeCleaning = cms.uint32( 1000 )
)
process.hltIter0IterL3MuonCtfWithMaterialTracks = cms.EDProducer( "TrackProducer",
useSimpleMF = cms.bool( True ),
SimpleMagneticField = cms.string( "ParabolicMf" ),
src = cms.InputTag( "hltIter0IterL3MuonCkfTrackCandidates" ),
clusterRemovalInfo = cms.InputTag( "" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Fitter = cms.string( "hltESPFittingSmootherIT" ),
useHitsSplitting = cms.bool( False ),
alias = cms.untracked.string( "ctfWithMaterialTracks" ),
TrajectoryInEvent = cms.bool( False ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
AlgorithmName = cms.string( "hltIter0" ),
Propagator = cms.string( "hltESPRungeKuttaTrackerPropagator" ),
GeometricInnerState = cms.bool( True ),
NavigationSchool = cms.string( "" ),
MeasurementTracker = cms.string( "" ),
MeasurementTrackerEvent = cms.InputTag( "hltSiStripClusters" )
)
process.hltIter0IterL3MuonTrackCutClassifier = cms.EDProducer( "TrackCutClassifier",
src = cms.InputTag( "hltIter0IterL3MuonCtfWithMaterialTracks" ),
beamspot = cms.InputTag( "hltOnlineBeamSpot" ),
vertices = cms.InputTag( "hltIterL3MuonTrimmedPixelVertices" ),
ignoreVertices = cms.bool( False ),
qualityCuts = cms.vdouble( -0.7, 0.1, 0.7 ),
mva = cms.PSet(
minPixelHits = cms.vint32( 0, 3, 4 ),
maxDzWrtBS = cms.vdouble( 3.40282346639E38, 24.0, 100.0 ),
dr_par = cms.PSet(
d0err = cms.vdouble( 0.003, 0.003, 3.40282346639E38 ),
dr_par2 = cms.vdouble( 0.3, 0.3, 3.40282346639E38 ),
dr_par1 = cms.vdouble( 0.4, 0.4, 3.40282346639E38 ),
dr_exp = cms.vint32( 4, 4, 2147483647 ),
d0err_par = cms.vdouble( 0.001, 0.001, 3.40282346639E38 )
),
maxLostLayers = cms.vint32( 1, 1, 1 ),
min3DLayers = cms.vint32( 0, 3, 4 ),
dz_par = cms.PSet(
dz_par1 = cms.vdouble( 0.4, 0.4, 3.40282346639E38 ),
dz_par2 = cms.vdouble( 0.35, 0.35, 3.40282346639E38 ),
dz_exp = cms.vint32( 4, 4, 2147483647 )
),
minNVtxTrk = cms.int32( 3 ),
maxDz = cms.vdouble( 0.5, 0.2, 3.40282346639E38 ),
minNdof = cms.vdouble( 1.0E-5, 1.0E-5, 1.0E-5 ),
maxChi2 = cms.vdouble( 3.40282346639E38, 3.40282346639E38, 3.40282346639E38 ),
maxChi2n = cms.vdouble( 1.2, 1.0, 0.7 ),
maxDr = cms.vdouble( 0.5, 0.03, 3.40282346639E38 ),
minLayers = cms.vint32( 3, 3, 4 )
)
)
process.hltIter0IterL3MuonTrackSelectionHighPurity = cms.EDProducer( "TrackCollectionFilterCloner",
originalSource = cms.InputTag( "hltIter0IterL3MuonCtfWithMaterialTracks" ),
originalMVAVals = cms.InputTag( 'hltIter0IterL3MuonTrackCutClassifier','MVAValues' ),
originalQualVals = cms.InputTag( 'hltIter0IterL3MuonTrackCutClassifier','QualityMasks' ),
minQuality = cms.string( "highPurity" ),
copyExtras = cms.untracked.bool( True ),
copyTrajectories = cms.untracked.bool( False )
)
process.hltIter2IterL3MuonClustersRefRemoval = cms.EDProducer( "TrackClusterRemover",
trajectories = cms.InputTag( "hltIter0IterL3MuonTrackSelectionHighPurity" ),
trackClassifier = cms.InputTag( '','QualityMasks' ),
pixelClusters = cms.InputTag( "hltSiPixelClusters" ),
stripClusters = cms.InputTag( "hltSiStripRawToClustersFacility" ),
oldClusterRemovalInfo = cms.InputTag( "" ),
TrackQuality = cms.string( "highPurity" ),
maxChi2 = cms.double( 16.0 ),
minNumberOfLayersWithMeasBeforeFiltering = cms.int32( 0 ),
overrideTrkQuals = cms.InputTag( "" )
)
process.hltIter2IterL3MuonMaskedMeasurementTrackerEvent = cms.EDProducer( "MaskedMeasurementTrackerEventProducer",
src = cms.InputTag( "hltSiStripClusters" ),
OnDemand = cms.bool( False ),
clustersToSkip = cms.InputTag( "hltIter2IterL3MuonClustersRefRemoval" )
)
process.hltIter2IterL3MuonPixelLayerTriplets = cms.EDProducer( "SeedingLayersEDProducer",
layerList = cms.vstring( 'BPix1+BPix2+BPix3',
'BPix2+BPix3+BPix4',
'BPix1+BPix3+BPix4',
'BPix1+BPix2+BPix4',
'BPix2+BPix3+FPix1_pos',
'BPix2+BPix3+FPix1_neg',
'BPix1+BPix2+FPix1_pos',
'BPix1+BPix2+FPix1_neg',
'BPix2+FPix1_pos+FPix2_pos',
'BPix2+FPix1_neg+FPix2_neg',
'BPix1+FPix1_pos+FPix2_pos',
'BPix1+FPix1_neg+FPix2_neg',
'FPix1_pos+FPix2_pos+FPix3_pos',
'FPix1_neg+FPix2_neg+FPix3_neg',
'BPix1+BPix3+FPix1_pos',
'BPix1+BPix2+FPix2_pos',
'BPix1+BPix3+FPix1_neg',
'BPix1+BPix2+FPix2_neg',
'BPix1+FPix2_neg+FPix3_neg',
'BPix1+FPix1_neg+FPix3_neg',
'BPix1+FPix2_pos+FPix3_pos',
'BPix1+FPix1_pos+FPix3_pos' ),
BPix = cms.PSet(
hitErrorRPhi = cms.double( 0.0027 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltIter2IterL3MuonClustersRefRemoval" ),
useErrorsFromParam = cms.bool( True ),
hitErrorRZ = cms.double( 0.006 ),
HitProducer = cms.string( "hltSiPixelRecHits" )
),
FPix = cms.PSet(
hitErrorRPhi = cms.double( 0.0051 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltIter2IterL3MuonClustersRefRemoval" ),
useErrorsFromParam = cms.bool( True ),
hitErrorRZ = cms.double( 0.0036 ),
HitProducer = cms.string( "hltSiPixelRecHits" )
),
TIB = cms.PSet( ),
TID = cms.PSet( ),
TOB = cms.PSet( ),
TEC = cms.PSet( ),
MTIB = cms.PSet( ),
MTID = cms.PSet( ),
MTOB = cms.PSet( ),
MTEC = cms.PSet( )
)
process.hltIter2IterL3MuonPixelClusterCheck = cms.EDProducer( "ClusterCheckerEDProducer",
doClusterCheck = cms.bool( False ),
MaxNumberOfCosmicClusters = cms.uint32( 50000 ),
ClusterCollectionLabel = cms.InputTag( "hltSiStripClusters" ),
MaxNumberOfPixelClusters = cms.uint32( 10000 ),
PixelClusterCollectionLabel = cms.InputTag( "hltSiPixelClusters" ),
cut = cms.string( "" ),
silentClusterCheck = cms.untracked.bool( False )
)
process.hltIter2IterL3MuonPixelHitDoublets = cms.EDProducer( "HitPairEDProducer",
seedingLayers = cms.InputTag( "hltIter2IterL3MuonPixelLayerTriplets" ),
trackingRegions = cms.InputTag( "hltIterL3MuonPixelTracksTrackingRegions" ),
trackingRegionsSeedingLayers = cms.InputTag( "" ),
clusterCheck = cms.InputTag( "hltIter2IterL3MuonPixelClusterCheck" ),
produceSeedingHitSets = cms.bool( False ),
produceIntermediateHitDoublets = cms.bool( True ),
maxElement = cms.uint32( 0 ),
maxElementTotal = cms.uint32( 50000000 ),
layerPairs = cms.vuint32( 0, 1 )
)
process.hltIter2IterL3MuonPixelHitTriplets = cms.EDProducer( "CAHitTripletEDProducer",
doublets = cms.InputTag( "hltIter2IterL3MuonPixelHitDoublets" ),
extraHitRPhitolerance = cms.double( 0.032 ),
useBendingCorrection = cms.bool( True ),
CAThetaCut = cms.double( 0.015 ),
CAPhiCut = cms.double( 0.1 ),
CAThetaCut_byTriplets = cms.VPSet(
cms.PSet( seedingLayers = cms.string( "" ),
cut = cms.double( -1.0 )
)
),
CAPhiCut_byTriplets = cms.VPSet(
cms.PSet( seedingLayers = cms.string( "" ),
cut = cms.double( -1.0 )
)
),
CAHardPtCut = cms.double( 0.3 ),
maxChi2 = cms.PSet(
value2 = cms.double( 6.0 ),
value1 = cms.double( 100.0 ),
pt1 = cms.double( 0.8 ),
enabled = cms.bool( True ),
pt2 = cms.double( 8.0 )
),
SeedComparitorPSet = cms.PSet( ComponentName = cms.string( "none" ) )
)
process.hltIter2IterL3MuonPixelSeeds = cms.EDProducer( "SeedCreatorFromRegionConsecutiveHitsTripletOnlyEDProducer",
seedingHitSets = cms.InputTag( "hltIter2IterL3MuonPixelHitTriplets" ),
propagator = cms.string( "PropagatorWithMaterialParabolicMf" ),
SeedMomentumForBOFF = cms.double( 5.0 ),
OriginTransverseErrorMultiplier = cms.double( 1.0 ),
MinOneOverPtError = cms.double( 1.0 ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
magneticField = cms.string( "ParabolicMf" ),
forceKinematicWithRegionDirection = cms.bool( False ),
SeedComparitorPSet = cms.PSet( ComponentName = cms.string( "none" ) )
)
process.hltIter2IterL3MuonCkfTrackCandidates = cms.EDProducer( "CkfTrackCandidateMaker",
cleanTrajectoryAfterInOut = cms.bool( False ),
doSeedingRegionRebuilding = cms.bool( False ),
onlyPixelHitsForSeedCleaner = cms.bool( False ),
reverseTrajectories = cms.bool( False ),
useHitsSplitting = cms.bool( False ),
MeasurementTrackerEvent = cms.InputTag( "hltIter2IterL3MuonMaskedMeasurementTrackerEvent" ),
src = cms.InputTag( "hltIter2IterL3MuonPixelSeeds" ),
clustersToSkip = cms.InputTag( "" ),
phase2clustersToSkip = cms.InputTag( "" ),
TrajectoryBuilderPSet = cms.PSet( refToPSet_ = cms.string( "HLTIter2IterL3MuonPSetGroupedCkfTrajectoryBuilderIT" ) ),
TransientInitialStateEstimatorParameters = cms.PSet(
propagatorAlongTISE = cms.string( "PropagatorWithMaterialParabolicMf" ),
numberMeasurementsForFit = cms.int32( 4 ),
propagatorOppositeTISE = cms.string( "PropagatorWithMaterialParabolicMfOpposite" )
),
numHitsForSeedCleaner = cms.int32( 4 ),
NavigationSchool = cms.string( "SimpleNavigationSchool" ),
RedundantSeedCleaner = cms.string( "CachingSeedCleanerBySharedInput" ),
TrajectoryCleaner = cms.string( "hltESPTrajectoryCleanerBySharedHits" ),
maxNSeeds = cms.uint32( 100000 ),
maxSeedsBeforeCleaning = cms.uint32( 1000 )
)
process.hltIter2IterL3MuonCtfWithMaterialTracks = cms.EDProducer( "TrackProducer",
useSimpleMF = cms.bool( True ),
SimpleMagneticField = cms.string( "ParabolicMf" ),
src = cms.InputTag( "hltIter2IterL3MuonCkfTrackCandidates" ),
clusterRemovalInfo = cms.InputTag( "" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Fitter = cms.string( "hltESPFittingSmootherIT" ),
useHitsSplitting = cms.bool( False ),
alias = cms.untracked.string( "ctfWithMaterialTracks" ),
TrajectoryInEvent = cms.bool( False ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
AlgorithmName = cms.string( "hltIter2" ),
Propagator = cms.string( "hltESPRungeKuttaTrackerPropagator" ),
GeometricInnerState = cms.bool( True ),
NavigationSchool = cms.string( "" ),
MeasurementTracker = cms.string( "" ),
MeasurementTrackerEvent = cms.InputTag( "hltIter2IterL3MuonMaskedMeasurementTrackerEvent" )
)
process.hltIter2IterL3MuonTrackCutClassifier = cms.EDProducer( "TrackCutClassifier",
src = cms.InputTag( "hltIter2IterL3MuonCtfWithMaterialTracks" ),
beamspot = cms.InputTag( "hltOnlineBeamSpot" ),
vertices = cms.InputTag( "hltIterL3MuonTrimmedPixelVertices" ),
ignoreVertices = cms.bool( False ),
qualityCuts = cms.vdouble( -0.7, 0.1, 0.7 ),
mva = cms.PSet(
minPixelHits = cms.vint32( 0, 0, 0 ),
maxDzWrtBS = cms.vdouble( 3.40282346639E38, 24.0, 100.0 ),
dr_par = cms.PSet(
d0err = cms.vdouble( 0.003, 0.003, 3.40282346639E38 ),
dr_par2 = cms.vdouble( 3.40282346639E38, 0.3, 3.40282346639E38 ),
dr_par1 = cms.vdouble( 3.40282346639E38, 0.4, 3.40282346639E38 ),
dr_exp = cms.vint32( 4, 4, 2147483647 ),
d0err_par = cms.vdouble( 0.001, 0.001, 3.40282346639E38 )
),
maxLostLayers = cms.vint32( 1, 1, 1 ),
min3DLayers = cms.vint32( 0, 0, 0 ),
dz_par = cms.PSet(
dz_par1 = cms.vdouble( 3.40282346639E38, 0.4, 3.40282346639E38 ),
dz_par2 = cms.vdouble( 3.40282346639E38, 0.35, 3.40282346639E38 ),
dz_exp = cms.vint32( 4, 4, 2147483647 )
),
minNVtxTrk = cms.int32( 3 ),
maxDz = cms.vdouble( 0.5, 0.2, 3.40282346639E38 ),
minNdof = cms.vdouble( 1.0E-5, 1.0E-5, 1.0E-5 ),
maxChi2 = cms.vdouble( 9999.0, 25.0, 3.40282346639E38 ),
maxChi2n = cms.vdouble( 1.2, 1.0, 0.7 ),
maxDr = cms.vdouble( 0.5, 0.03, 3.40282346639E38 ),
minLayers = cms.vint32( 3, 3, 3 )
)
)
process.hltIter2IterL3MuonTrackSelectionHighPurity = cms.EDProducer( "TrackCollectionFilterCloner",
originalSource = cms.InputTag( "hltIter2IterL3MuonCtfWithMaterialTracks" ),
originalMVAVals = cms.InputTag( 'hltIter2IterL3MuonTrackCutClassifier','MVAValues' ),
originalQualVals = cms.InputTag( 'hltIter2IterL3MuonTrackCutClassifier','QualityMasks' ),
minQuality = cms.string( "highPurity" ),
copyExtras = cms.untracked.bool( True ),
copyTrajectories = cms.untracked.bool( False )
)
process.hltIter2IterL3MuonMerged = cms.EDProducer( "TrackListMerger",
ShareFrac = cms.double( 0.19 ),
FoundHitBonus = cms.double( 5.0 ),
LostHitPenalty = cms.double( 20.0 ),
MinPT = cms.double( 0.05 ),
Epsilon = cms.double( -0.001 ),
MaxNormalizedChisq = cms.double( 1000.0 ),
MinFound = cms.int32( 3 ),
TrackProducers = cms.VInputTag( 'hltIter0IterL3MuonTrackSelectionHighPurity','hltIter2IterL3MuonTrackSelectionHighPurity' ),
hasSelector = cms.vint32( 0, 0 ),
indivShareFrac = cms.vdouble( 1.0, 1.0 ),
selectedTrackQuals = cms.VInputTag( 'hltIter0IterL3MuonTrackSelectionHighPurity','hltIter2IterL3MuonTrackSelectionHighPurity' ),
setsToMerge = cms.VPSet(
cms.PSet( pQual = cms.bool( False ),
tLists = cms.vint32( 0, 1 )
)
),
trackAlgoPriorityOrder = cms.string( "hltESPTrackAlgoPriorityOrder" ),
allowFirstHitShare = cms.bool( True ),
newQuality = cms.string( "confirmed" ),
copyExtras = cms.untracked.bool( True ),
writeOnlyTrkQuals = cms.bool( False ),
copyMVA = cms.bool( False )
)
process.hltIter3IterL3MuonClustersRefRemoval = cms.EDProducer( "TrackClusterRemover",
trajectories = cms.InputTag( "hltIter2IterL3MuonTrackSelectionHighPurity" ),
trackClassifier = cms.InputTag( '','QualityMasks' ),
pixelClusters = cms.InputTag( "hltSiPixelClusters" ),
stripClusters = cms.InputTag( "hltSiStripRawToClustersFacility" ),
oldClusterRemovalInfo = cms.InputTag( "hltIter2IterL3MuonClustersRefRemoval" ),
TrackQuality = cms.string( "highPurity" ),
maxChi2 = cms.double( 16.0 ),
minNumberOfLayersWithMeasBeforeFiltering = cms.int32( 0 ),
overrideTrkQuals = cms.InputTag( "" )
)
process.hltIter3IterL3MuonMaskedMeasurementTrackerEvent = cms.EDProducer( "MaskedMeasurementTrackerEventProducer",
src = cms.InputTag( "hltSiStripClusters" ),
OnDemand = cms.bool( False ),
clustersToSkip = cms.InputTag( "hltIter3IterL3MuonClustersRefRemoval" )
)
process.hltIter3IterL3MuonPixelLayerPairs = cms.EDProducer( "SeedingLayersEDProducer",
layerList = cms.vstring( 'BPix1+BPix2',
'BPix1+BPix3',
'BPix1+BPix4',
'BPix2+BPix3',
'BPix2+BPix4',
'BPix3+BPix4',
'BPix1+FPix1_pos',
'BPix1+FPix1_neg',
'BPix1+FPix2_pos',
'BPix1+FPix2_neg',
'BPix1+FPix3_pos',
'BPix1+FPix3_neg',
'BPix2+FPix1_pos',
'BPix2+FPix1_neg',
'BPix2+FPix2_pos',
'BPix2+FPix2_neg',
'BPix3+FPix1_pos',
'BPix3+FPix1_neg',
'FPix1_pos+FPix2_pos',
'FPix1_neg+FPix2_neg',
'FPix1_pos+FPix3_pos',
'FPix1_neg+FPix3_neg',
'FPix2_pos+FPix3_pos',
'FPix2_neg+FPix3_neg' ),
BPix = cms.PSet(
hitErrorRPhi = cms.double( 0.0027 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltIter3IterL3MuonClustersRefRemoval" ),
useErrorsFromParam = cms.bool( True ),
hitErrorRZ = cms.double( 0.006 ),
HitProducer = cms.string( "hltSiPixelRecHits" )
),
FPix = cms.PSet(
hitErrorRPhi = cms.double( 0.0051 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltIter3IterL3MuonClustersRefRemoval" ),
useErrorsFromParam = cms.bool( True ),
hitErrorRZ = cms.double( 0.0036 ),
HitProducer = cms.string( "hltSiPixelRecHits" )
),
TIB = cms.PSet( ),
TID = cms.PSet( ),
TOB = cms.PSet( ),
TEC = cms.PSet( ),
MTIB = cms.PSet( ),
MTID = cms.PSet( ),
MTOB = cms.PSet( ),
MTEC = cms.PSet( )
)
process.hltIter3IterL3MuonL2Candidates = cms.EDProducer( "ConcreteChargedCandidateProducer",
src = cms.InputTag( "hltL2SelectorForL3IO" ),
particleType = cms.string( "mu+" )
)
process.hltIter3IterL3MuonTrackingRegions = cms.EDProducer( "CandidateSeededTrackingRegionsEDProducer",
RegionPSet = cms.PSet(
vertexCollection = cms.InputTag( "notUsed" ),
zErrorVetex = cms.double( 0.2 ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
zErrorBeamSpot = cms.double( 24.2 ),
maxNVertices = cms.int32( 1 ),
maxNRegions = cms.int32( 2 ),
nSigmaZVertex = cms.double( 3.0 ),
nSigmaZBeamSpot = cms.double( 4.0 ),
ptMin = cms.double( 2.0 ),
mode = cms.string( "BeamSpotSigma" ),
input = cms.InputTag( "hltIter3IterL3MuonL2Candidates" ),
searchOpt = cms.bool( False ),
whereToUseMeasurementTracker = cms.string( "Never" ),
originRadius = cms.double( 0.015 ),
measurementTrackerName = cms.InputTag( "" ),
precise = cms.bool( True ),
deltaEta = cms.double( 0.1 ),
deltaPhi = cms.double( 0.1 )
)
)
process.hltIter3IterL3MuonPixelClusterCheck = cms.EDProducer( "ClusterCheckerEDProducer",
doClusterCheck = cms.bool( False ),
MaxNumberOfCosmicClusters = cms.uint32( 50000 ),
ClusterCollectionLabel = cms.InputTag( "hltSiStripClusters" ),
MaxNumberOfPixelClusters = cms.uint32( 10000 ),
PixelClusterCollectionLabel = cms.InputTag( "hltSiPixelClusters" ),
cut = cms.string( "" ),
silentClusterCheck = cms.untracked.bool( False )
)
process.hltIter3IterL3MuonPixelHitDoublets = cms.EDProducer( "HitPairEDProducer",
seedingLayers = cms.InputTag( "hltIter3IterL3MuonPixelLayerPairs" ),
trackingRegions = cms.InputTag( "hltIter3IterL3MuonTrackingRegions" ),
trackingRegionsSeedingLayers = cms.InputTag( "" ),
clusterCheck = cms.InputTag( "hltIter3IterL3MuonPixelClusterCheck" ),
produceSeedingHitSets = cms.bool( True ),
produceIntermediateHitDoublets = cms.bool( False ),
maxElement = cms.uint32( 0 ),
maxElementTotal = cms.uint32( 50000000 ),
layerPairs = cms.vuint32( 0 )
)
process.hltIter3IterL3MuonPixelSeeds = cms.EDProducer( "SeedCreatorFromRegionConsecutiveHitsEDProducer",
seedingHitSets = cms.InputTag( "hltIter3IterL3MuonPixelHitDoublets" ),
propagator = cms.string( "PropagatorWithMaterialParabolicMf" ),
SeedMomentumForBOFF = cms.double( 5.0 ),
OriginTransverseErrorMultiplier = cms.double( 1.0 ),
MinOneOverPtError = cms.double( 1.0 ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
magneticField = cms.string( "ParabolicMf" ),
forceKinematicWithRegionDirection = cms.bool( False ),
SeedComparitorPSet = cms.PSet( ComponentName = cms.string( "none" ) )
)
process.hltIter3IterL3MuonCkfTrackCandidates = cms.EDProducer( "CkfTrackCandidateMaker",
cleanTrajectoryAfterInOut = cms.bool( False ),
doSeedingRegionRebuilding = cms.bool( False ),
onlyPixelHitsForSeedCleaner = cms.bool( False ),
reverseTrajectories = cms.bool( False ),
useHitsSplitting = cms.bool( False ),
MeasurementTrackerEvent = cms.InputTag( "hltIter3IterL3MuonMaskedMeasurementTrackerEvent" ),
src = cms.InputTag( "hltIter3IterL3MuonPixelSeeds" ),
clustersToSkip = cms.InputTag( "" ),
phase2clustersToSkip = cms.InputTag( "" ),
TrajectoryBuilderPSet = cms.PSet( refToPSet_ = cms.string( "HLTIter2IterL3MuonPSetGroupedCkfTrajectoryBuilderIT" ) ),
TransientInitialStateEstimatorParameters = cms.PSet(
propagatorAlongTISE = cms.string( "PropagatorWithMaterialParabolicMf" ),
numberMeasurementsForFit = cms.int32( 4 ),
propagatorOppositeTISE = cms.string( "PropagatorWithMaterialParabolicMfOpposite" )
),
numHitsForSeedCleaner = cms.int32( 4 ),
NavigationSchool = cms.string( "SimpleNavigationSchool" ),
RedundantSeedCleaner = cms.string( "CachingSeedCleanerBySharedInput" ),
TrajectoryCleaner = cms.string( "hltESPTrajectoryCleanerBySharedHits" ),
maxNSeeds = cms.uint32( 100000 ),
maxSeedsBeforeCleaning = cms.uint32( 1000 )
)
process.hltIter3IterL3MuonCtfWithMaterialTracks = cms.EDProducer( "TrackProducer",
useSimpleMF = cms.bool( True ),
SimpleMagneticField = cms.string( "ParabolicMf" ),
src = cms.InputTag( "hltIter3IterL3MuonCkfTrackCandidates" ),
clusterRemovalInfo = cms.InputTag( "" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Fitter = cms.string( "hltESPFittingSmootherIT" ),
useHitsSplitting = cms.bool( False ),
alias = cms.untracked.string( "ctfWithMaterialTracks" ),
TrajectoryInEvent = cms.bool( False ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
AlgorithmName = cms.string( "hltIter3" ),
Propagator = cms.string( "hltESPRungeKuttaTrackerPropagator" ),
GeometricInnerState = cms.bool( True ),
NavigationSchool = cms.string( "" ),
MeasurementTracker = cms.string( "" ),
MeasurementTrackerEvent = cms.InputTag( "hltIter3IterL3MuonMaskedMeasurementTrackerEvent" )
)
process.hltIter3IterL3MuonTrackCutClassifier = cms.EDProducer( "TrackCutClassifier",
src = cms.InputTag( "hltIter3IterL3MuonCtfWithMaterialTracks" ),
beamspot = cms.InputTag( "hltOnlineBeamSpot" ),
vertices = cms.InputTag( "hltIterL3MuonTrimmedPixelVertices" ),
ignoreVertices = cms.bool( False ),
qualityCuts = cms.vdouble( -0.7, 0.1, 0.7 ),
mva = cms.PSet(
minPixelHits = cms.vint32( 0, 0, 0 ),
maxDzWrtBS = cms.vdouble( 3.40282346639E38, 24.0, 100.0 ),
dr_par = cms.PSet(
d0err = cms.vdouble( 0.003, 0.003, 3.40282346639E38 ),
dr_par2 = cms.vdouble( 3.40282346639E38, 0.3, 3.40282346639E38 ),
dr_par1 = cms.vdouble( 3.40282346639E38, 0.4, 3.40282346639E38 ),
dr_exp = cms.vint32( 4, 4, 2147483647 ),
d0err_par = cms.vdouble( 0.001, 0.001, 3.40282346639E38 )
),
maxLostLayers = cms.vint32( 1, 1, 1 ),
min3DLayers = cms.vint32( 0, 0, 0 ),
dz_par = cms.PSet(
dz_par1 = cms.vdouble( 3.40282346639E38, 0.4, 3.40282346639E38 ),
dz_par2 = cms.vdouble( 3.40282346639E38, 0.35, 3.40282346639E38 ),
dz_exp = cms.vint32( 4, 4, 2147483647 )
),
minNVtxTrk = cms.int32( 3 ),
maxDz = cms.vdouble( 0.5, 0.2, 3.40282346639E38 ),
minNdof = cms.vdouble( 1.0E-5, 1.0E-5, 1.0E-5 ),
maxChi2 = cms.vdouble( 9999.0, 25.0, 3.40282346639E38 ),
maxChi2n = cms.vdouble( 1.2, 1.0, 0.7 ),
maxDr = cms.vdouble( 0.5, 0.03, 3.40282346639E38 ),
minLayers = cms.vint32( 3, 3, 3 )
)
)
process.hltIter3IterL3MuonTrackSelectionHighPurity = cms.EDProducer( "TrackCollectionFilterCloner",
originalSource = cms.InputTag( "hltIter3IterL3MuonCtfWithMaterialTracks" ),
originalMVAVals = cms.InputTag( 'hltIter3IterL3MuonTrackCutClassifier','MVAValues' ),
originalQualVals = cms.InputTag( 'hltIter3IterL3MuonTrackCutClassifier','QualityMasks' ),
minQuality = cms.string( "highPurity" ),
copyExtras = cms.untracked.bool( True ),
copyTrajectories = cms.untracked.bool( False )
)
process.hltIter3IterL3MuonMerged = cms.EDProducer( "TrackListMerger",
ShareFrac = cms.double( 0.19 ),
FoundHitBonus = cms.double( 5.0 ),
LostHitPenalty = cms.double( 20.0 ),
MinPT = cms.double( 0.05 ),
Epsilon = cms.double( -0.001 ),
MaxNormalizedChisq = cms.double( 1000.0 ),
MinFound = cms.int32( 3 ),
TrackProducers = cms.VInputTag( 'hltIter2IterL3MuonMerged','hltIter3IterL3MuonTrackSelectionHighPurity' ),
hasSelector = cms.vint32( 0, 0 ),
indivShareFrac = cms.vdouble( 1.0, 1.0 ),
selectedTrackQuals = cms.VInputTag( 'hltIter2IterL3MuonMerged','hltIter3IterL3MuonTrackSelectionHighPurity' ),
setsToMerge = cms.VPSet(
cms.PSet( pQual = cms.bool( False ),
tLists = cms.vint32( 0, 1 )
)
),
trackAlgoPriorityOrder = cms.string( "hltESPTrackAlgoPriorityOrder" ),
allowFirstHitShare = cms.bool( True ),
newQuality = cms.string( "confirmed" ),
copyExtras = cms.untracked.bool( True ),
writeOnlyTrkQuals = cms.bool( False ),
copyMVA = cms.bool( False )
)
process.hltL3MuonsIterL3IO = cms.EDProducer( "L3MuonProducer",
ServiceParameters = cms.PSet(
RPCLayers = cms.bool( True ),
UseMuonNavigation = cms.untracked.bool( True ),
Propagators = cms.untracked.vstring( 'hltESPSmartPropagatorAny',
'SteppingHelixPropagatorAny',
'hltESPSmartPropagator',
'hltESPSteppingHelixPropagatorOpposite' )
),
MuonCollectionLabel = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' ),
TrackLoaderParameters = cms.PSet(
MuonSeededTracksInstance = cms.untracked.string( "L2Seeded" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
DoSmoothing = cms.bool( False ),
SmoothTkTrack = cms.untracked.bool( False ),
VertexConstraint = cms.bool( False ),
MuonUpdatorAtVertexParameters = cms.PSet(
MaxChi2 = cms.double( 1000000.0 ),
BeamSpotPositionErrors = cms.vdouble( 0.1, 0.1, 5.3 ),
Propagator = cms.string( "hltESPSteppingHelixPropagatorOpposite" )
),
PutTkTrackIntoEvent = cms.untracked.bool( False ),
Smoother = cms.string( "hltESPKFTrajectorySmootherForMuonTrackLoader" )
),
L3TrajBuilderParameters = cms.PSet(
PtCut = cms.double( 1.0 ),
TrackerPropagator = cms.string( "SteppingHelixPropagatorAny" ),
GlobalMuonTrackMatcher = cms.PSet(
Chi2Cut_3 = cms.double( 200.0 ),
DeltaDCut_2 = cms.double( 10.0 ),
Eta_threshold = cms.double( 1.2 ),
Quality_2 = cms.double( 15.0 ),
DeltaDCut_1 = cms.double( 40.0 ),
Quality_3 = cms.double( 7.0 ),
DeltaDCut_3 = cms.double( 15.0 ),
Quality_1 = cms.double( 20.0 ),
Pt_threshold1 = cms.double( 0.0 ),
DeltaRCut_2 = cms.double( 0.2 ),
DeltaRCut_1 = cms.double( 0.1 ),
Pt_threshold2 = cms.double( 9.99999999E8 ),
Chi2Cut_1 = cms.double( 50.0 ),
Chi2Cut_2 = cms.double( 50.0 ),
DeltaRCut_3 = cms.double( 1.0 ),
LocChi2Cut = cms.double( 0.001 ),
Propagator = cms.string( "hltESPSmartPropagator" ),
MinPt = cms.double( 1.0 ),
MinP = cms.double( 2.5 )
),
ScaleTECxFactor = cms.double( -1.0 ),
tkTrajUseVertex = cms.bool( False ),
MuonTrackingRegionBuilder = cms.PSet(
Rescale_Dz = cms.double( 4.0 ),
Pt_fixed = cms.bool( True ),
Eta_fixed = cms.bool( True ),
Eta_min = cms.double( 0.1 ),
DeltaZ = cms.double( 24.2 ),
maxRegions = cms.int32( 2 ),
EtaR_UpperLimit_Par1 = cms.double( 0.25 ),
UseVertex = cms.bool( False ),
Z_fixed = cms.bool( True ),
PhiR_UpperLimit_Par1 = cms.double( 0.6 ),
PhiR_UpperLimit_Par2 = cms.double( 0.2 ),
Rescale_phi = cms.double( 3.0 ),
DeltaEta = cms.double( 0.04 ),
precise = cms.bool( True ),
OnDemand = cms.int32( -1 ),
EtaR_UpperLimit_Par2 = cms.double( 0.15 ),
MeasurementTrackerName = cms.InputTag( "hltESPMeasurementTracker" ),
vertexCollection = cms.InputTag( "pixelVertices" ),
Pt_min = cms.double( 3.0 ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Phi_fixed = cms.bool( True ),
DeltaR = cms.double( 0.025 ),
input = cms.InputTag( "hltL2SelectorForL3IO" ),
DeltaPhi = cms.double( 0.15 ),
Phi_min = cms.double( 0.1 ),
Rescale_eta = cms.double( 3.0 )
),
TrackTransformer = cms.PSet(
Fitter = cms.string( "hltESPL3MuKFTrajectoryFitter" ),
RefitDirection = cms.string( "insideOut" ),
RefitRPCHits = cms.bool( True ),
Propagator = cms.string( "hltESPSmartPropagatorAny" ),
DoPredictionsOnly = cms.bool( False ),
TrackerRecHitBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
MuonRecHitBuilder = cms.string( "hltESPMuonTransientTrackingRecHitBuilder" ),
Smoother = cms.string( "hltESPKFTrajectorySmootherForMuonTrackLoader" )
),
tkTrajBeamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
RefitRPCHits = cms.bool( True ),
tkTrajVertex = cms.InputTag( "hltIterL3MuonPixelVertices" ),
GlbRefitterParameters = cms.PSet(
Fitter = cms.string( "hltESPL3MuKFTrajectoryFitter" ),
DTRecSegmentLabel = cms.InputTag( "hltDt4DSegments" ),
RefitFlag = cms.bool( True ),
SkipStation = cms.int32( -1 ),
Chi2CutRPC = cms.double( 1.0 ),
PropDirForCosmics = cms.bool( False ),
CSCRecSegmentLabel = cms.InputTag( "hltCscSegments" ),
GEMRecHitLabel = cms.InputTag( "hltGemRecHits" ),
HitThreshold = cms.int32( 1 ),
Chi2CutGEM = cms.double( 1.0 ),
DYTthrs = cms.vint32( 30, 15 ),
TrackerSkipSystem = cms.int32( -1 ),
RefitDirection = cms.string( "insideOut" ),
Chi2CutCSC = cms.double( 150.0 ),
Chi2CutDT = cms.double( 10.0 ),
RefitRPCHits = cms.bool( True ),
TrackerSkipSection = cms.int32( -1 ),
Propagator = cms.string( "hltESPSmartPropagatorAny" ),
DoPredictionsOnly = cms.bool( False ),
TrackerRecHitBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
MuonHitsOption = cms.int32( 1 ),
MuonRecHitBuilder = cms.string( "hltESPMuonTransientTrackingRecHitBuilder" )
),
PCut = cms.double( 2.5 ),
tkTrajMaxDXYBeamSpot = cms.double( 9999.0 ),
TrackerRecHitBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
matchToSeeds = cms.bool( True ),
tkTrajMaxChi2 = cms.double( 9999.0 ),
MuonRecHitBuilder = cms.string( "hltESPMuonTransientTrackingRecHitBuilder" ),
ScaleTECyFactor = cms.double( -1.0 ),
tkTrajLabel = cms.InputTag( "hltIter3IterL3MuonMerged" )
)
)
process.hltIterL3MuonsFromL2LinksCombination = cms.EDProducer( "L3TrackLinksCombiner",
labels = cms.VInputTag( 'hltL3MuonsIterL3OI','hltL3MuonsIterL3IO' )
)
process.hltL1MuonsPt0 = cms.EDProducer( "HLTL1TMuonSelector",
InputObjects = cms.InputTag( 'hltGtStage2Digis','Muon' ),
L1MinPt = cms.double( -1.0 ),
L1MaxEta = cms.double( 5.0 ),
L1MinQuality = cms.uint32( 7 ),
CentralBxOnly = cms.bool( True )
)
process.hltIterL3FromL1MuonPixelTracksTrackingRegions = cms.EDProducer( "CandidateSeededTrackingRegionsEDProducer",
RegionPSet = cms.PSet(
vertexCollection = cms.InputTag( "notUsed" ),
zErrorVetex = cms.double( 0.2 ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
zErrorBeamSpot = cms.double( 24.2 ),
maxNVertices = cms.int32( 1 ),
maxNRegions = cms.int32( 2 ),
nSigmaZVertex = cms.double( 3.0 ),
nSigmaZBeamSpot = cms.double( 4.0 ),
ptMin = cms.double( 10.0 ),
mode = cms.string( "BeamSpotSigma" ),
input = cms.InputTag( "hltL1MuonsPt0" ),
searchOpt = cms.bool( False ),
whereToUseMeasurementTracker = cms.string( "Never" ),
originRadius = cms.double( 0.2 ),
measurementTrackerName = cms.InputTag( "" ),
precise = cms.bool( True ),
deltaEta = cms.double( 0.35 ),
deltaPhi = cms.double( 0.2 )
)
)
process.hltIterL3FromL1MuonPixelLayerQuadruplets = cms.EDProducer( "SeedingLayersEDProducer",
layerList = cms.vstring( 'BPix1+BPix2+BPix3+BPix4',
'BPix1+BPix2+BPix3+FPix1_pos',
'BPix1+BPix2+BPix3+FPix1_neg',
'BPix1+BPix2+FPix1_pos+FPix2_pos',
'BPix1+BPix2+FPix1_neg+FPix2_neg',
'BPix1+FPix1_pos+FPix2_pos+FPix3_pos',
'BPix1+FPix1_neg+FPix2_neg+FPix3_neg' ),
BPix = cms.PSet(
hitErrorRPhi = cms.double( 0.0027 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
useErrorsFromParam = cms.bool( True ),
hitErrorRZ = cms.double( 0.006 ),
HitProducer = cms.string( "hltSiPixelRecHits" )
),
FPix = cms.PSet(
hitErrorRPhi = cms.double( 0.0051 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
useErrorsFromParam = cms.bool( True ),
hitErrorRZ = cms.double( 0.0036 ),
HitProducer = cms.string( "hltSiPixelRecHits" )
),
TIB = cms.PSet( ),
TID = cms.PSet( ),
TOB = cms.PSet( ),
TEC = cms.PSet( ),
MTIB = cms.PSet( ),
MTID = cms.PSet( ),
MTOB = cms.PSet( ),
MTEC = cms.PSet( )
)
process.hltIterL3FromL1MuonPixelTracksHitDoublets = cms.EDProducer( "HitPairEDProducer",
seedingLayers = cms.InputTag( "hltIterL3FromL1MuonPixelLayerQuadruplets" ),
trackingRegions = cms.InputTag( "hltIterL3FromL1MuonPixelTracksTrackingRegions" ),
trackingRegionsSeedingLayers = cms.InputTag( "" ),
clusterCheck = cms.InputTag( "" ),
produceSeedingHitSets = cms.bool( False ),
produceIntermediateHitDoublets = cms.bool( True ),
maxElement = cms.uint32( 0 ),
maxElementTotal = cms.uint32( 50000000 ),
layerPairs = cms.vuint32( 0, 1, 2 )
)
process.hltIterL3FromL1MuonPixelTracksHitQuadruplets = cms.EDProducer( "CAHitQuadrupletEDProducer",
doublets = cms.InputTag( "hltIterL3FromL1MuonPixelTracksHitDoublets" ),
extraHitRPhitolerance = cms.double( 0.032 ),
fitFastCircle = cms.bool( True ),
fitFastCircleChi2Cut = cms.bool( True ),
useBendingCorrection = cms.bool( True ),
CAThetaCut = cms.double( 0.005 ),
CAPhiCut = cms.double( 0.2 ),
CAThetaCut_byTriplets = cms.VPSet(
cms.PSet( seedingLayers = cms.string( "" ),
cut = cms.double( -1.0 )
)
),
CAPhiCut_byTriplets = cms.VPSet(
cms.PSet( seedingLayers = cms.string( "" ),
cut = cms.double( -1.0 )
)
),
CAHardPtCut = cms.double( 0.0 ),
maxChi2 = cms.PSet(
value2 = cms.double( 50.0 ),
value1 = cms.double( 200.0 ),
pt1 = cms.double( 0.7 ),
enabled = cms.bool( True ),
pt2 = cms.double( 2.0 )
),
SeedComparitorPSet = cms.PSet(
clusterShapeHitFilter = cms.string( "ClusterShapeHitFilter" ),
ComponentName = cms.string( "LowPtClusterShapeSeedComparitor" ),
clusterShapeCacheSrc = cms.InputTag( "hltSiPixelClustersCache" )
)
)
process.hltIterL3FromL1MuonPixelTracks = cms.EDProducer( "PixelTrackProducer",
passLabel = cms.string( "" ),
SeedingHitSets = cms.InputTag( "hltIterL3FromL1MuonPixelTracksHitQuadruplets" ),
Fitter = cms.InputTag( "hltIterL3MuonPixelTracksFitter" ),
Filter = cms.InputTag( "hltIterL3MuonPixelTracksFilter" ),
Cleaner = cms.string( "hltPixelTracksCleanerBySharedHits" )
)
process.hltIterL3FromL1MuonPixelVertices = cms.EDProducer( "PixelVertexProducer",
WtAverage = cms.bool( True ),
ZOffset = cms.double( 5.0 ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Verbosity = cms.int32( 0 ),
UseError = cms.bool( True ),
TrackCollection = cms.InputTag( "hltIterL3MuonPixelTracks" ),
ZSeparation = cms.double( 0.05 ),
NTrkMin = cms.int32( 2 ),
Method2 = cms.bool( True ),
Finder = cms.string( "DivisiveVertexFinder" ),
PtMin = cms.double( 1.0 ),
PVcomparer = cms.PSet( refToPSet_ = cms.string( "HLTPSetPvClusterComparerForIT" ) )
)
process.hltIterL3FromL1MuonTrimmedPixelVertices = cms.EDProducer( "PixelVertexCollectionTrimmer",
src = cms.InputTag( "hltIterL3FromL1MuonPixelVertices" ),
maxVtx = cms.uint32( 100 ),
fractionSumPt2 = cms.double( 0.3 ),
minSumPt2 = cms.double( 0.0 ),
PVcomparer = cms.PSet( refToPSet_ = cms.string( "HLTPSetPvClusterComparerForIT" ) )
)
process.hltIter0IterL3FromL1MuonPixelSeedsFromPixelTracks = cms.EDProducer( "SeedGeneratorFromProtoTracksEDProducer",
InputCollection = cms.InputTag( "hltIterL3FromL1MuonPixelTracks" ),
InputVertexCollection = cms.InputTag( "hltIterL3FromL1MuonTrimmedPixelVertices" ),
originHalfLength = cms.double( 0.3 ),
originRadius = cms.double( 0.1 ),
useProtoTrackKinematics = cms.bool( False ),
useEventsWithNoVertex = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
usePV = cms.bool( False ),
includeFourthHit = cms.bool( False ),
SeedCreatorPSet = cms.PSet( refToPSet_ = cms.string( "HLTSeedFromProtoTracks" ) )
)
process.hltIter0IterL3FromL1MuonCkfTrackCandidates = cms.EDProducer( "CkfTrackCandidateMaker",
cleanTrajectoryAfterInOut = cms.bool( False ),
doSeedingRegionRebuilding = cms.bool( True ),
onlyPixelHitsForSeedCleaner = cms.bool( False ),
reverseTrajectories = cms.bool( False ),
useHitsSplitting = cms.bool( True ),
MeasurementTrackerEvent = cms.InputTag( "hltSiStripClusters" ),
src = cms.InputTag( "hltIter0IterL3FromL1MuonPixelSeedsFromPixelTracks" ),
clustersToSkip = cms.InputTag( "" ),
phase2clustersToSkip = cms.InputTag( "" ),
TrajectoryBuilderPSet = cms.PSet( refToPSet_ = cms.string( "HLTIter0IterL3FromL1MuonPSetGroupedCkfTrajectoryBuilderIT" ) ),
TransientInitialStateEstimatorParameters = cms.PSet(
propagatorAlongTISE = cms.string( "PropagatorWithMaterialParabolicMf" ),
numberMeasurementsForFit = cms.int32( 4 ),
propagatorOppositeTISE = cms.string( "PropagatorWithMaterialParabolicMfOpposite" )
),
numHitsForSeedCleaner = cms.int32( 4 ),
NavigationSchool = cms.string( "SimpleNavigationSchool" ),
RedundantSeedCleaner = cms.string( "none" ),
TrajectoryCleaner = cms.string( "hltESPTrajectoryCleanerBySharedHits" ),
maxNSeeds = cms.uint32( 100000 ),
maxSeedsBeforeCleaning = cms.uint32( 1000 )
)
process.hltIter0IterL3FromL1MuonCtfWithMaterialTracks = cms.EDProducer( "TrackProducer",
useSimpleMF = cms.bool( True ),
SimpleMagneticField = cms.string( "ParabolicMf" ),
src = cms.InputTag( "hltIter0IterL3FromL1MuonCkfTrackCandidates" ),
clusterRemovalInfo = cms.InputTag( "" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Fitter = cms.string( "hltESPFittingSmootherIT" ),
useHitsSplitting = cms.bool( False ),
alias = cms.untracked.string( "ctfWithMaterialTracks" ),
TrajectoryInEvent = cms.bool( False ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
AlgorithmName = cms.string( "hltIter0" ),
Propagator = cms.string( "hltESPRungeKuttaTrackerPropagator" ),
GeometricInnerState = cms.bool( True ),
NavigationSchool = cms.string( "" ),
MeasurementTracker = cms.string( "" ),
MeasurementTrackerEvent = cms.InputTag( "hltSiStripClusters" )
)
process.hltIter0IterL3FromL1MuonTrackCutClassifier = cms.EDProducer( "TrackCutClassifier",
src = cms.InputTag( "hltIter0IterL3FromL1MuonCtfWithMaterialTracks" ),
beamspot = cms.InputTag( "hltOnlineBeamSpot" ),
vertices = cms.InputTag( "hltIterL3FromL1MuonTrimmedPixelVertices" ),
ignoreVertices = cms.bool( False ),
qualityCuts = cms.vdouble( -0.7, 0.1, 0.7 ),
mva = cms.PSet(
minPixelHits = cms.vint32( 0, 3, 4 ),
maxDzWrtBS = cms.vdouble( 3.40282346639E38, 24.0, 100.0 ),
dr_par = cms.PSet(
d0err = cms.vdouble( 0.003, 0.003, 3.40282346639E38 ),
dr_par2 = cms.vdouble( 0.3, 0.3, 3.40282346639E38 ),
dr_par1 = cms.vdouble( 0.4, 0.4, 3.40282346639E38 ),
dr_exp = cms.vint32( 4, 4, 2147483647 ),
d0err_par = cms.vdouble( 0.001, 0.001, 3.40282346639E38 )
),
maxLostLayers = cms.vint32( 1, 1, 1 ),
min3DLayers = cms.vint32( 0, 3, 4 ),
dz_par = cms.PSet(
dz_par1 = cms.vdouble( 0.4, 0.4, 3.40282346639E38 ),
dz_par2 = cms.vdouble( 0.35, 0.35, 3.40282346639E38 ),
dz_exp = cms.vint32( 4, 4, 2147483647 )
),
minNVtxTrk = cms.int32( 3 ),
maxDz = cms.vdouble( 0.5, 0.2, 3.40282346639E38 ),
minNdof = cms.vdouble( 1.0E-5, 1.0E-5, 1.0E-5 ),
maxChi2 = cms.vdouble( 3.40282346639E38, 3.40282346639E38, 3.40282346639E38 ),
maxChi2n = cms.vdouble( 1.2, 1.0, 0.7 ),
maxDr = cms.vdouble( 0.5, 0.03, 3.40282346639E38 ),
minLayers = cms.vint32( 3, 3, 4 )
)
)
process.hltIter0IterL3FromL1MuonTrackSelectionHighPurity = cms.EDProducer( "TrackCollectionFilterCloner",
originalSource = cms.InputTag( "hltIter0IterL3FromL1MuonCtfWithMaterialTracks" ),
originalMVAVals = cms.InputTag( 'hltIter0IterL3FromL1MuonTrackCutClassifier','MVAValues' ),
originalQualVals = cms.InputTag( 'hltIter0IterL3FromL1MuonTrackCutClassifier','QualityMasks' ),
minQuality = cms.string( "highPurity" ),
copyExtras = cms.untracked.bool( True ),
copyTrajectories = cms.untracked.bool( False )
)
process.hltIter2IterL3FromL1MuonClustersRefRemoval = cms.EDProducer( "TrackClusterRemover",
trajectories = cms.InputTag( "hltIter0IterL3FromL1MuonTrackSelectionHighPurity" ),
trackClassifier = cms.InputTag( '','QualityMasks' ),
pixelClusters = cms.InputTag( "hltSiPixelClusters" ),
stripClusters = cms.InputTag( "hltSiStripRawToClustersFacility" ),
oldClusterRemovalInfo = cms.InputTag( "" ),
TrackQuality = cms.string( "highPurity" ),
maxChi2 = cms.double( 16.0 ),
minNumberOfLayersWithMeasBeforeFiltering = cms.int32( 0 ),
overrideTrkQuals = cms.InputTag( "" )
)
process.hltIter2IterL3FromL1MuonMaskedMeasurementTrackerEvent = cms.EDProducer( "MaskedMeasurementTrackerEventProducer",
src = cms.InputTag( "hltSiStripClusters" ),
OnDemand = cms.bool( False ),
clustersToSkip = cms.InputTag( "hltIter2IterL3FromL1MuonClustersRefRemoval" )
)
process.hltIter2IterL3FromL1MuonPixelLayerTriplets = cms.EDProducer( "SeedingLayersEDProducer",
layerList = cms.vstring( 'BPix1+BPix2+BPix3',
'BPix2+BPix3+BPix4',
'BPix1+BPix3+BPix4',
'BPix1+BPix2+BPix4',
'BPix2+BPix3+FPix1_pos',
'BPix2+BPix3+FPix1_neg',
'BPix1+BPix2+FPix1_pos',
'BPix1+BPix2+FPix1_neg',
'BPix2+FPix1_pos+FPix2_pos',
'BPix2+FPix1_neg+FPix2_neg',
'BPix1+FPix1_pos+FPix2_pos',
'BPix1+FPix1_neg+FPix2_neg',
'FPix1_pos+FPix2_pos+FPix3_pos',
'FPix1_neg+FPix2_neg+FPix3_neg',
'BPix1+BPix3+FPix1_pos',
'BPix1+BPix2+FPix2_pos',
'BPix1+BPix3+FPix1_neg',
'BPix1+BPix2+FPix2_neg',
'BPix1+FPix2_neg+FPix3_neg',
'BPix1+FPix1_neg+FPix3_neg',
'BPix1+FPix2_pos+FPix3_pos',
'BPix1+FPix1_pos+FPix3_pos' ),
BPix = cms.PSet(
hitErrorRPhi = cms.double( 0.0027 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltIter2IterL3FromL1MuonClustersRefRemoval" ),
useErrorsFromParam = cms.bool( True ),
hitErrorRZ = cms.double( 0.006 ),
HitProducer = cms.string( "hltSiPixelRecHits" )
),
FPix = cms.PSet(
hitErrorRPhi = cms.double( 0.0051 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltIter2IterL3FromL1MuonClustersRefRemoval" ),
useErrorsFromParam = cms.bool( True ),
hitErrorRZ = cms.double( 0.0036 ),
HitProducer = cms.string( "hltSiPixelRecHits" )
),
TIB = cms.PSet( ),
TID = cms.PSet( ),
TOB = cms.PSet( ),
TEC = cms.PSet( ),
MTIB = cms.PSet( ),
MTID = cms.PSet( ),
MTOB = cms.PSet( ),
MTEC = cms.PSet( )
)
process.hltIter2IterL3FromL1MuonPixelClusterCheck = cms.EDProducer( "ClusterCheckerEDProducer",
doClusterCheck = cms.bool( False ),
MaxNumberOfCosmicClusters = cms.uint32( 50000 ),
ClusterCollectionLabel = cms.InputTag( "hltSiStripClusters" ),
MaxNumberOfPixelClusters = cms.uint32( 10000 ),
PixelClusterCollectionLabel = cms.InputTag( "hltSiPixelClusters" ),
cut = cms.string( "" ),
silentClusterCheck = cms.untracked.bool( False )
)
process.hltIter2IterL3FromL1MuonPixelHitDoublets = cms.EDProducer( "HitPairEDProducer",
seedingLayers = cms.InputTag( "hltIter2IterL3FromL1MuonPixelLayerTriplets" ),
trackingRegions = cms.InputTag( "hltIterL3FromL1MuonPixelTracksTrackingRegions" ),
trackingRegionsSeedingLayers = cms.InputTag( "" ),
clusterCheck = cms.InputTag( "hltIter2IterL3FromL1MuonPixelClusterCheck" ),
produceSeedingHitSets = cms.bool( False ),
produceIntermediateHitDoublets = cms.bool( True ),
maxElement = cms.uint32( 0 ),
maxElementTotal = cms.uint32( 50000000 ),
layerPairs = cms.vuint32( 0, 1 )
)
process.hltIter2IterL3FromL1MuonPixelHitTriplets = cms.EDProducer( "CAHitTripletEDProducer",
doublets = cms.InputTag( "hltIter2IterL3FromL1MuonPixelHitDoublets" ),
extraHitRPhitolerance = cms.double( 0.032 ),
useBendingCorrection = cms.bool( True ),
CAThetaCut = cms.double( 0.015 ),
CAPhiCut = cms.double( 0.1 ),
CAThetaCut_byTriplets = cms.VPSet(
cms.PSet( seedingLayers = cms.string( "" ),
cut = cms.double( -1.0 )
)
),
CAPhiCut_byTriplets = cms.VPSet(
cms.PSet( seedingLayers = cms.string( "" ),
cut = cms.double( -1.0 )
)
),
CAHardPtCut = cms.double( 0.3 ),
maxChi2 = cms.PSet(
value2 = cms.double( 6.0 ),
value1 = cms.double( 100.0 ),
pt1 = cms.double( 0.8 ),
enabled = cms.bool( True ),
pt2 = cms.double( 8.0 )
),
SeedComparitorPSet = cms.PSet( ComponentName = cms.string( "none" ) )
)
process.hltIter2IterL3FromL1MuonPixelSeeds = cms.EDProducer( "SeedCreatorFromRegionConsecutiveHitsTripletOnlyEDProducer",
seedingHitSets = cms.InputTag( "hltIter2IterL3FromL1MuonPixelHitTriplets" ),
propagator = cms.string( "PropagatorWithMaterialParabolicMf" ),
SeedMomentumForBOFF = cms.double( 5.0 ),
OriginTransverseErrorMultiplier = cms.double( 1.0 ),
MinOneOverPtError = cms.double( 1.0 ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
magneticField = cms.string( "ParabolicMf" ),
forceKinematicWithRegionDirection = cms.bool( False ),
SeedComparitorPSet = cms.PSet( ComponentName = cms.string( "none" ) )
)
process.hltIter2IterL3FromL1MuonCkfTrackCandidates = cms.EDProducer( "CkfTrackCandidateMaker",
cleanTrajectoryAfterInOut = cms.bool( False ),
doSeedingRegionRebuilding = cms.bool( False ),
onlyPixelHitsForSeedCleaner = cms.bool( False ),
reverseTrajectories = cms.bool( False ),
useHitsSplitting = cms.bool( False ),
MeasurementTrackerEvent = cms.InputTag( "hltIter2IterL3FromL1MuonMaskedMeasurementTrackerEvent" ),
src = cms.InputTag( "hltIter2IterL3FromL1MuonPixelSeeds" ),
clustersToSkip = cms.InputTag( "" ),
phase2clustersToSkip = cms.InputTag( "" ),
TrajectoryBuilderPSet = cms.PSet( refToPSet_ = cms.string( "HLTIter2IterL3FromL1MuonPSetGroupedCkfTrajectoryBuilderIT" ) ),
TransientInitialStateEstimatorParameters = cms.PSet(
propagatorAlongTISE = cms.string( "PropagatorWithMaterialParabolicMf" ),
numberMeasurementsForFit = cms.int32( 4 ),
propagatorOppositeTISE = cms.string( "PropagatorWithMaterialParabolicMfOpposite" )
),
numHitsForSeedCleaner = cms.int32( 4 ),
NavigationSchool = cms.string( "SimpleNavigationSchool" ),
RedundantSeedCleaner = cms.string( "CachingSeedCleanerBySharedInput" ),
TrajectoryCleaner = cms.string( "hltESPTrajectoryCleanerBySharedHits" ),
maxNSeeds = cms.uint32( 100000 ),
maxSeedsBeforeCleaning = cms.uint32( 1000 )
)
process.hltIter2IterL3FromL1MuonCtfWithMaterialTracks = cms.EDProducer( "TrackProducer",
useSimpleMF = cms.bool( True ),
SimpleMagneticField = cms.string( "ParabolicMf" ),
src = cms.InputTag( "hltIter2IterL3FromL1MuonCkfTrackCandidates" ),
clusterRemovalInfo = cms.InputTag( "" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Fitter = cms.string( "hltESPFittingSmootherIT" ),
useHitsSplitting = cms.bool( False ),
alias = cms.untracked.string( "ctfWithMaterialTracks" ),
TrajectoryInEvent = cms.bool( False ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
AlgorithmName = cms.string( "hltIter2" ),
Propagator = cms.string( "hltESPRungeKuttaTrackerPropagator" ),
GeometricInnerState = cms.bool( True ),
NavigationSchool = cms.string( "" ),
MeasurementTracker = cms.string( "" ),
MeasurementTrackerEvent = cms.InputTag( "hltIter2IterL3FromL1MuonMaskedMeasurementTrackerEvent" )
)
process.hltIter2IterL3FromL1MuonTrackCutClassifier = cms.EDProducer( "TrackCutClassifier",
src = cms.InputTag( "hltIter2IterL3FromL1MuonCtfWithMaterialTracks" ),
beamspot = cms.InputTag( "hltOnlineBeamSpot" ),
vertices = cms.InputTag( "hltIterL3FromL1MuonTrimmedPixelVertices" ),
ignoreVertices = cms.bool( False ),
qualityCuts = cms.vdouble( -0.7, 0.1, 0.7 ),
mva = cms.PSet(
minPixelHits = cms.vint32( 0, 0, 0 ),
maxDzWrtBS = cms.vdouble( 3.40282346639E38, 24.0, 100.0 ),
dr_par = cms.PSet(
d0err = cms.vdouble( 0.003, 0.003, 3.40282346639E38 ),
dr_par2 = cms.vdouble( 3.40282346639E38, 0.3, 3.40282346639E38 ),
dr_par1 = cms.vdouble( 3.40282346639E38, 0.4, 3.40282346639E38 ),
dr_exp = cms.vint32( 4, 4, 2147483647 ),
d0err_par = cms.vdouble( 0.001, 0.001, 3.40282346639E38 )
),
maxLostLayers = cms.vint32( 1, 1, 1 ),
min3DLayers = cms.vint32( 0, 0, 0 ),
dz_par = cms.PSet(
dz_par1 = cms.vdouble( 3.40282346639E38, 0.4, 3.40282346639E38 ),
dz_par2 = cms.vdouble( 3.40282346639E38, 0.35, 3.40282346639E38 ),
dz_exp = cms.vint32( 4, 4, 2147483647 )
),
minNVtxTrk = cms.int32( 3 ),
maxDz = cms.vdouble( 0.5, 0.2, 3.40282346639E38 ),
minNdof = cms.vdouble( 1.0E-5, 1.0E-5, 1.0E-5 ),
maxChi2 = cms.vdouble( 9999.0, 25.0, 3.40282346639E38 ),
maxChi2n = cms.vdouble( 1.2, 1.0, 0.7 ),
maxDr = cms.vdouble( 0.5, 0.03, 3.40282346639E38 ),
minLayers = cms.vint32( 3, 3, 3 )
)
)
process.hltIter2IterL3FromL1MuonTrackSelectionHighPurity = cms.EDProducer( "TrackCollectionFilterCloner",
originalSource = cms.InputTag( "hltIter2IterL3FromL1MuonCtfWithMaterialTracks" ),
originalMVAVals = cms.InputTag( 'hltIter2IterL3FromL1MuonTrackCutClassifier','MVAValues' ),
originalQualVals = cms.InputTag( 'hltIter2IterL3FromL1MuonTrackCutClassifier','QualityMasks' ),
minQuality = cms.string( "highPurity" ),
copyExtras = cms.untracked.bool( True ),
copyTrajectories = cms.untracked.bool( False )
)
process.hltIter2IterL3FromL1MuonMerged = cms.EDProducer( "TrackListMerger",
ShareFrac = cms.double( 0.19 ),
FoundHitBonus = cms.double( 5.0 ),
LostHitPenalty = cms.double( 20.0 ),
MinPT = cms.double( 0.05 ),
Epsilon = cms.double( -0.001 ),
MaxNormalizedChisq = cms.double( 1000.0 ),
MinFound = cms.int32( 3 ),
TrackProducers = cms.VInputTag( 'hltIter0IterL3FromL1MuonTrackSelectionHighPurity','hltIter2IterL3FromL1MuonTrackSelectionHighPurity' ),
hasSelector = cms.vint32( 0, 0 ),
indivShareFrac = cms.vdouble( 1.0, 1.0 ),
selectedTrackQuals = cms.VInputTag( 'hltIter0IterL3FromL1MuonTrackSelectionHighPurity','hltIter2IterL3FromL1MuonTrackSelectionHighPurity' ),
setsToMerge = cms.VPSet(
cms.PSet( pQual = cms.bool( False ),
tLists = cms.vint32( 0, 1 )
)
),
trackAlgoPriorityOrder = cms.string( "hltESPTrackAlgoPriorityOrder" ),
allowFirstHitShare = cms.bool( True ),
newQuality = cms.string( "confirmed" ),
copyExtras = cms.untracked.bool( True ),
writeOnlyTrkQuals = cms.bool( False ),
copyMVA = cms.bool( False )
)
process.hltIter3IterL3FromL1MuonClustersRefRemoval = cms.EDProducer( "TrackClusterRemover",
trajectories = cms.InputTag( "hltIter2IterL3FromL1MuonTrackSelectionHighPurity" ),
trackClassifier = cms.InputTag( '','QualityMasks' ),
pixelClusters = cms.InputTag( "hltSiPixelClusters" ),
stripClusters = cms.InputTag( "hltSiStripRawToClustersFacility" ),
oldClusterRemovalInfo = cms.InputTag( "hltIter2IterL3FromL1MuonClustersRefRemoval" ),
TrackQuality = cms.string( "highPurity" ),
maxChi2 = cms.double( 16.0 ),
minNumberOfLayersWithMeasBeforeFiltering = cms.int32( 0 ),
overrideTrkQuals = cms.InputTag( "" )
)
process.hltIter3IterL3FromL1MuonMaskedMeasurementTrackerEvent = cms.EDProducer( "MaskedMeasurementTrackerEventProducer",
src = cms.InputTag( "hltSiStripClusters" ),
OnDemand = cms.bool( False ),
clustersToSkip = cms.InputTag( "hltIter3IterL3FromL1MuonClustersRefRemoval" )
)
process.hltIter3IterL3FromL1MuonPixelLayerPairs = cms.EDProducer( "SeedingLayersEDProducer",
layerList = cms.vstring( 'BPix1+BPix2',
'BPix1+BPix3',
'BPix1+BPix4',
'BPix2+BPix3',
'BPix2+BPix4',
'BPix3+BPix4',
'BPix1+FPix1_pos',
'BPix1+FPix1_neg',
'BPix1+FPix2_pos',
'BPix1+FPix2_neg',
'BPix1+FPix3_pos',
'BPix1+FPix3_neg',
'BPix2+FPix1_pos',
'BPix2+FPix1_neg',
'BPix2+FPix2_pos',
'BPix2+FPix2_neg',
'BPix3+FPix1_pos',
'BPix3+FPix1_neg',
'FPix1_pos+FPix2_pos',
'FPix1_neg+FPix2_neg',
'FPix1_pos+FPix3_pos',
'FPix1_neg+FPix3_neg',
'FPix2_pos+FPix3_pos',
'FPix2_neg+FPix3_neg' ),
BPix = cms.PSet(
hitErrorRPhi = cms.double( 0.0027 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltIter3IterL3FromL1MuonClustersRefRemoval" ),
useErrorsFromParam = cms.bool( True ),
hitErrorRZ = cms.double( 0.006 ),
HitProducer = cms.string( "hltSiPixelRecHits" )
),
FPix = cms.PSet(
hitErrorRPhi = cms.double( 0.0051 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltIter3IterL3FromL1MuonClustersRefRemoval" ),
useErrorsFromParam = cms.bool( True ),
hitErrorRZ = cms.double( 0.0036 ),
HitProducer = cms.string( "hltSiPixelRecHits" )
),
TIB = cms.PSet( ),
TID = cms.PSet( ),
TOB = cms.PSet( ),
TEC = cms.PSet( ),
MTIB = cms.PSet( ),
MTID = cms.PSet( ),
MTOB = cms.PSet( ),
MTEC = cms.PSet( )
)
process.hltIter3IterL3FromL1MuonTrackingRegions = cms.EDProducer( "CandidateSeededTrackingRegionsEDProducer",
RegionPSet = cms.PSet(
vertexCollection = cms.InputTag( "notUsed" ),
zErrorVetex = cms.double( 0.2 ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
zErrorBeamSpot = cms.double( 24.2 ),
maxNVertices = cms.int32( 1 ),
maxNRegions = cms.int32( 2 ),
nSigmaZVertex = cms.double( 3.0 ),
nSigmaZBeamSpot = cms.double( 4.0 ),
ptMin = cms.double( 10.0 ),
mode = cms.string( "BeamSpotSigma" ),
input = cms.InputTag( "hltL1MuonsPt0" ),
searchOpt = cms.bool( False ),
whereToUseMeasurementTracker = cms.string( "Never" ),
originRadius = cms.double( 0.015 ),
measurementTrackerName = cms.InputTag( "" ),
precise = cms.bool( True ),
deltaEta = cms.double( 0.2 ),
deltaPhi = cms.double( 0.1 )
)
)
process.hltIter3IterL3FromL1MuonPixelClusterCheck = cms.EDProducer( "ClusterCheckerEDProducer",
doClusterCheck = cms.bool( False ),
MaxNumberOfCosmicClusters = cms.uint32( 50000 ),
ClusterCollectionLabel = cms.InputTag( "hltSiStripClusters" ),
MaxNumberOfPixelClusters = cms.uint32( 10000 ),
PixelClusterCollectionLabel = cms.InputTag( "hltSiPixelClusters" ),
cut = cms.string( "" ),
silentClusterCheck = cms.untracked.bool( False )
)
process.hltIter3IterL3FromL1MuonPixelHitDoublets = cms.EDProducer( "HitPairEDProducer",
seedingLayers = cms.InputTag( "hltIter3IterL3FromL1MuonPixelLayerPairs" ),
trackingRegions = cms.InputTag( "hltIter3IterL3FromL1MuonTrackingRegions" ),
trackingRegionsSeedingLayers = cms.InputTag( "" ),
clusterCheck = cms.InputTag( "hltIter3IterL3FromL1MuonPixelClusterCheck" ),
produceSeedingHitSets = cms.bool( True ),
produceIntermediateHitDoublets = cms.bool( False ),
maxElement = cms.uint32( 0 ),
maxElementTotal = cms.uint32( 50000000 ),
layerPairs = cms.vuint32( 0 )
)
process.hltIter3IterL3FromL1MuonPixelSeeds = cms.EDProducer( "SeedCreatorFromRegionConsecutiveHitsEDProducer",
seedingHitSets = cms.InputTag( "hltIter3IterL3FromL1MuonPixelHitDoublets" ),
propagator = cms.string( "PropagatorWithMaterialParabolicMf" ),
SeedMomentumForBOFF = cms.double( 5.0 ),
OriginTransverseErrorMultiplier = cms.double( 1.0 ),
MinOneOverPtError = cms.double( 1.0 ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
magneticField = cms.string( "ParabolicMf" ),
forceKinematicWithRegionDirection = cms.bool( False ),
SeedComparitorPSet = cms.PSet( ComponentName = cms.string( "none" ) )
)
process.hltIter3IterL3FromL1MuonCkfTrackCandidates = cms.EDProducer( "CkfTrackCandidateMaker",
cleanTrajectoryAfterInOut = cms.bool( False ),
doSeedingRegionRebuilding = cms.bool( False ),
onlyPixelHitsForSeedCleaner = cms.bool( False ),
reverseTrajectories = cms.bool( False ),
useHitsSplitting = cms.bool( False ),
MeasurementTrackerEvent = cms.InputTag( "hltIter3IterL3FromL1MuonMaskedMeasurementTrackerEvent" ),
src = cms.InputTag( "hltIter3IterL3FromL1MuonPixelSeeds" ),
clustersToSkip = cms.InputTag( "" ),
phase2clustersToSkip = cms.InputTag( "" ),
TrajectoryBuilderPSet = cms.PSet( refToPSet_ = cms.string( "HLTIter2IterL3MuonPSetGroupedCkfTrajectoryBuilderIT" ) ),
TransientInitialStateEstimatorParameters = cms.PSet(
propagatorAlongTISE = cms.string( "PropagatorWithMaterialParabolicMf" ),
numberMeasurementsForFit = cms.int32( 4 ),
propagatorOppositeTISE = cms.string( "PropagatorWithMaterialParabolicMfOpposite" )
),
numHitsForSeedCleaner = cms.int32( 4 ),
NavigationSchool = cms.string( "SimpleNavigationSchool" ),
RedundantSeedCleaner = cms.string( "CachingSeedCleanerBySharedInput" ),
TrajectoryCleaner = cms.string( "hltESPTrajectoryCleanerBySharedHits" ),
maxNSeeds = cms.uint32( 100000 ),
maxSeedsBeforeCleaning = cms.uint32( 1000 )
)
process.hltIter3IterL3FromL1MuonCtfWithMaterialTracks = cms.EDProducer( "TrackProducer",
useSimpleMF = cms.bool( True ),
SimpleMagneticField = cms.string( "ParabolicMf" ),
src = cms.InputTag( "hltIter3IterL3FromL1MuonCkfTrackCandidates" ),
clusterRemovalInfo = cms.InputTag( "" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Fitter = cms.string( "hltESPFittingSmootherIT" ),
useHitsSplitting = cms.bool( False ),
alias = cms.untracked.string( "ctfWithMaterialTracks" ),
TrajectoryInEvent = cms.bool( False ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
AlgorithmName = cms.string( "hltIter3" ),
Propagator = cms.string( "hltESPRungeKuttaTrackerPropagator" ),
GeometricInnerState = cms.bool( True ),
NavigationSchool = cms.string( "" ),
MeasurementTracker = cms.string( "" ),
MeasurementTrackerEvent = cms.InputTag( "hltIter3IterL3FromL1MuonMaskedMeasurementTrackerEvent" )
)
process.hltIter3IterL3FromL1MuonTrackCutClassifier = cms.EDProducer( "TrackCutClassifier",
src = cms.InputTag( "hltIter3IterL3FromL1MuonCtfWithMaterialTracks" ),
beamspot = cms.InputTag( "hltOnlineBeamSpot" ),
vertices = cms.InputTag( "hltIterL3FromL1MuonTrimmedPixelVertices" ),
ignoreVertices = cms.bool( False ),
qualityCuts = cms.vdouble( -0.7, 0.1, 0.7 ),
mva = cms.PSet(
minPixelHits = cms.vint32( 0, 0, 0 ),
maxDzWrtBS = cms.vdouble( 3.40282346639E38, 24.0, 100.0 ),
dr_par = cms.PSet(
d0err = cms.vdouble( 0.003, 0.003, 3.40282346639E38 ),
dr_par2 = cms.vdouble( 3.40282346639E38, 0.3, 3.40282346639E38 ),
dr_par1 = cms.vdouble( 3.40282346639E38, 0.4, 3.40282346639E38 ),
dr_exp = cms.vint32( 4, 4, 2147483647 ),
d0err_par = cms.vdouble( 0.001, 0.001, 3.40282346639E38 )
),
maxLostLayers = cms.vint32( 1, 1, 1 ),
min3DLayers = cms.vint32( 0, 0, 0 ),
dz_par = cms.PSet(
dz_par1 = cms.vdouble( 3.40282346639E38, 0.4, 3.40282346639E38 ),
dz_par2 = cms.vdouble( 3.40282346639E38, 0.35, 3.40282346639E38 ),
dz_exp = cms.vint32( 4, 4, 2147483647 )
),
minNVtxTrk = cms.int32( 3 ),
maxDz = cms.vdouble( 0.5, 0.2, 3.40282346639E38 ),
minNdof = cms.vdouble( 1.0E-5, 1.0E-5, 1.0E-5 ),
maxChi2 = cms.vdouble( 9999.0, 25.0, 3.40282346639E38 ),
maxChi2n = cms.vdouble( 1.2, 1.0, 0.7 ),
maxDr = cms.vdouble( 0.5, 0.03, 3.40282346639E38 ),
minLayers = cms.vint32( 3, 3, 3 )
)
)
process.hltIter3IterL3FromL1MuonTrackSelectionHighPurity = cms.EDProducer( "TrackCollectionFilterCloner",
originalSource = cms.InputTag( "hltIter3IterL3FromL1MuonCtfWithMaterialTracks" ),
originalMVAVals = cms.InputTag( 'hltIter3IterL3FromL1MuonTrackCutClassifier','MVAValues' ),
originalQualVals = cms.InputTag( 'hltIter3IterL3FromL1MuonTrackCutClassifier','QualityMasks' ),
minQuality = cms.string( "highPurity" ),
copyExtras = cms.untracked.bool( True ),
copyTrajectories = cms.untracked.bool( False )
)
process.hltIter3IterL3FromL1MuonMerged = cms.EDProducer( "TrackListMerger",
ShareFrac = cms.double( 0.19 ),
FoundHitBonus = cms.double( 5.0 ),
LostHitPenalty = cms.double( 20.0 ),
MinPT = cms.double( 0.05 ),
Epsilon = cms.double( -0.001 ),
MaxNormalizedChisq = cms.double( 1000.0 ),
MinFound = cms.int32( 3 ),
TrackProducers = cms.VInputTag( 'hltIter2IterL3FromL1MuonMerged','hltIter3IterL3FromL1MuonTrackSelectionHighPurity' ),
hasSelector = cms.vint32( 0, 0 ),
indivShareFrac = cms.vdouble( 1.0, 1.0 ),
selectedTrackQuals = cms.VInputTag( 'hltIter2IterL3FromL1MuonMerged','hltIter3IterL3FromL1MuonTrackSelectionHighPurity' ),
setsToMerge = cms.VPSet(
cms.PSet( pQual = cms.bool( False ),
tLists = cms.vint32( 0, 1 )
)
),
trackAlgoPriorityOrder = cms.string( "hltESPTrackAlgoPriorityOrder" ),
allowFirstHitShare = cms.bool( True ),
newQuality = cms.string( "confirmed" ),
copyExtras = cms.untracked.bool( True ),
writeOnlyTrkQuals = cms.bool( False ),
copyMVA = cms.bool( False )
)
process.hltIterL3MuonMerged = cms.EDProducer( "TrackListMerger",
ShareFrac = cms.double( 0.19 ),
FoundHitBonus = cms.double( 5.0 ),
LostHitPenalty = cms.double( 20.0 ),
MinPT = cms.double( 0.05 ),
Epsilon = cms.double( -0.001 ),
MaxNormalizedChisq = cms.double( 1000.0 ),
MinFound = cms.int32( 3 ),
TrackProducers = cms.VInputTag( 'hltIterL3OIMuonTrackSelectionHighPurity','hltIter3IterL3MuonMerged' ),
hasSelector = cms.vint32( 0, 0 ),
indivShareFrac = cms.vdouble( 1.0, 1.0 ),
selectedTrackQuals = cms.VInputTag( 'hltIterL3OIMuonTrackSelectionHighPurity','hltIter3IterL3MuonMerged' ),
setsToMerge = cms.VPSet(
cms.PSet( pQual = cms.bool( False ),
tLists = cms.vint32( 0, 1 )
)
),
trackAlgoPriorityOrder = cms.string( "hltESPTrackAlgoPriorityOrder" ),
allowFirstHitShare = cms.bool( True ),
newQuality = cms.string( "confirmed" ),
copyExtras = cms.untracked.bool( True ),
writeOnlyTrkQuals = cms.bool( False ),
copyMVA = cms.bool( False )
)
process.hltIterL3MuonAndMuonFromL1Merged = cms.EDProducer( "TrackListMerger",
ShareFrac = cms.double( 0.19 ),
FoundHitBonus = cms.double( 5.0 ),
LostHitPenalty = cms.double( 20.0 ),
MinPT = cms.double( 0.05 ),
Epsilon = cms.double( -0.001 ),
MaxNormalizedChisq = cms.double( 1000.0 ),
MinFound = cms.int32( 3 ),
TrackProducers = cms.VInputTag( 'hltIterL3MuonMerged','hltIter3IterL3FromL1MuonMerged' ),
hasSelector = cms.vint32( 0, 0 ),
indivShareFrac = cms.vdouble( 1.0, 1.0 ),
selectedTrackQuals = cms.VInputTag( 'hltIterL3MuonMerged','hltIter3IterL3FromL1MuonMerged' ),
setsToMerge = cms.VPSet(
cms.PSet( pQual = cms.bool( False ),
tLists = cms.vint32( 0, 1 )
)
),
trackAlgoPriorityOrder = cms.string( "hltESPTrackAlgoPriorityOrder" ),
allowFirstHitShare = cms.bool( True ),
newQuality = cms.string( "confirmed" ),
copyExtras = cms.untracked.bool( True ),
writeOnlyTrkQuals = cms.bool( False ),
copyMVA = cms.bool( False )
)
process.hltIterL3GlbMuon = cms.EDProducer( "L3MuonProducer",
ServiceParameters = cms.PSet(
RPCLayers = cms.bool( True ),
UseMuonNavigation = cms.untracked.bool( True ),
Propagators = cms.untracked.vstring( 'hltESPSmartPropagatorAny',
'SteppingHelixPropagatorAny',
'hltESPSmartPropagator',
'hltESPSteppingHelixPropagatorOpposite' )
),
MuonCollectionLabel = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' ),
TrackLoaderParameters = cms.PSet(
MuonSeededTracksInstance = cms.untracked.string( "L2Seeded" ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
DoSmoothing = cms.bool( True ),
SmoothTkTrack = cms.untracked.bool( False ),
VertexConstraint = cms.bool( False ),
MuonUpdatorAtVertexParameters = cms.PSet(
MaxChi2 = cms.double( 1000000.0 ),
BeamSpotPositionErrors = cms.vdouble( 0.1, 0.1, 5.3 ),
Propagator = cms.string( "hltESPSteppingHelixPropagatorOpposite" )
),
PutTkTrackIntoEvent = cms.untracked.bool( False ),
Smoother = cms.string( "hltESPKFTrajectorySmootherForMuonTrackLoader" )
),
L3TrajBuilderParameters = cms.PSet(
PtCut = cms.double( 1.0 ),
TrackerPropagator = cms.string( "SteppingHelixPropagatorAny" ),
GlobalMuonTrackMatcher = cms.PSet(
Chi2Cut_3 = cms.double( 200.0 ),
DeltaDCut_2 = cms.double( 10.0 ),
Eta_threshold = cms.double( 1.2 ),
Quality_2 = cms.double( 15.0 ),
DeltaDCut_1 = cms.double( 40.0 ),
Quality_3 = cms.double( 7.0 ),
DeltaDCut_3 = cms.double( 15.0 ),
Quality_1 = cms.double( 20.0 ),
Pt_threshold1 = cms.double( 0.0 ),
DeltaRCut_2 = cms.double( 0.2 ),
DeltaRCut_1 = cms.double( 0.1 ),
Pt_threshold2 = cms.double( 9.99999999E8 ),
Chi2Cut_1 = cms.double( 50.0 ),
Chi2Cut_2 = cms.double( 50.0 ),
DeltaRCut_3 = cms.double( 1.0 ),
LocChi2Cut = cms.double( 0.001 ),
Propagator = cms.string( "hltESPSmartPropagator" ),
MinPt = cms.double( 1.0 ),
MinP = cms.double( 2.5 )
),
ScaleTECxFactor = cms.double( -1.0 ),
tkTrajUseVertex = cms.bool( False ),
MuonTrackingRegionBuilder = cms.PSet(
Rescale_Dz = cms.double( 4.0 ),
Pt_fixed = cms.bool( False ),
Eta_fixed = cms.bool( True ),
Eta_min = cms.double( 0.1 ),
DeltaZ = cms.double( 24.2 ),
maxRegions = cms.int32( 2 ),
EtaR_UpperLimit_Par1 = cms.double( 0.25 ),
UseVertex = cms.bool( False ),
Z_fixed = cms.bool( False ),
PhiR_UpperLimit_Par1 = cms.double( 0.6 ),
PhiR_UpperLimit_Par2 = cms.double( 0.2 ),
Rescale_phi = cms.double( 3.0 ),
DeltaEta = cms.double( 0.2 ),
precise = cms.bool( True ),
OnDemand = cms.int32( -1 ),
EtaR_UpperLimit_Par2 = cms.double( 0.15 ),
MeasurementTrackerName = cms.InputTag( "hltESPMeasurementTracker" ),
vertexCollection = cms.InputTag( "pixelVertices" ),
Pt_min = cms.double( 3.0 ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Phi_fixed = cms.bool( True ),
DeltaR = cms.double( 0.025 ),
input = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' ),
DeltaPhi = cms.double( 0.15 ),
Phi_min = cms.double( 0.1 ),
Rescale_eta = cms.double( 3.0 )
),
TrackTransformer = cms.PSet(
Fitter = cms.string( "hltESPL3MuKFTrajectoryFitter" ),
RefitDirection = cms.string( "insideOut" ),
RefitRPCHits = cms.bool( True ),
Propagator = cms.string( "hltESPSmartPropagatorAny" ),
DoPredictionsOnly = cms.bool( False ),
TrackerRecHitBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
MuonRecHitBuilder = cms.string( "hltESPMuonTransientTrackingRecHitBuilder" ),
Smoother = cms.string( "hltESPKFTrajectorySmootherForMuonTrackLoader" )
),
tkTrajBeamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
RefitRPCHits = cms.bool( True ),
tkTrajVertex = cms.InputTag( "Notused" ),
GlbRefitterParameters = cms.PSet(
Fitter = cms.string( "hltESPL3MuKFTrajectoryFitter" ),
DTRecSegmentLabel = cms.InputTag( "hltDt4DSegments" ),
RefitFlag = cms.bool( True ),
SkipStation = cms.int32( -1 ),
Chi2CutRPC = cms.double( 1.0 ),
PropDirForCosmics = cms.bool( False ),
CSCRecSegmentLabel = cms.InputTag( "hltCscSegments" ),
GEMRecHitLabel = cms.InputTag( "hltGemRecHits" ),
HitThreshold = cms.int32( 1 ),
Chi2CutGEM = cms.double( 1.0 ),
DYTthrs = cms.vint32( 30, 15 ),
TrackerSkipSystem = cms.int32( -1 ),
RefitDirection = cms.string( "insideOut" ),
Chi2CutCSC = cms.double( 150.0 ),
Chi2CutDT = cms.double( 10.0 ),
RefitRPCHits = cms.bool( True ),
TrackerSkipSection = cms.int32( -1 ),
Propagator = cms.string( "hltESPSmartPropagatorAny" ),
DoPredictionsOnly = cms.bool( False ),
TrackerRecHitBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
MuonHitsOption = cms.int32( 1 ),
MuonRecHitBuilder = cms.string( "hltESPMuonTransientTrackingRecHitBuilder" )
),
PCut = cms.double( 2.5 ),
tkTrajMaxDXYBeamSpot = cms.double( 9999.0 ),
TrackerRecHitBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
tkTrajMaxChi2 = cms.double( 9999.0 ),
MuonRecHitBuilder = cms.string( "hltESPMuonTransientTrackingRecHitBuilder" ),
ScaleTECyFactor = cms.double( -1.0 ),
tkTrajLabel = cms.InputTag( "hltIterL3MuonAndMuonFromL1Merged" )
)
)
process.hltIterL3MuonsNoID = cms.EDProducer( "MuonIdProducer",
MuonCaloCompatibility = cms.PSet(
delta_eta = cms.double( 0.02 ),
delta_phi = cms.double( 0.02 ),
allSiPMHO = cms.bool( False ),
MuonTemplateFileName = cms.FileInPath( "RecoMuon/MuonIdentification/data/MuID_templates_muons_lowPt_3_1_norm.root" ),
PionTemplateFileName = cms.FileInPath( "RecoMuon/MuonIdentification/data/MuID_templates_pions_lowPt_3_1_norm.root" )
),
TrackAssociatorParameters = cms.PSet(
useMuon = cms.bool( True ),
truthMatch = cms.bool( False ),
usePreshower = cms.bool( False ),
dRPreshowerPreselection = cms.double( 0.2 ),
muonMaxDistanceSigmaY = cms.double( 0.0 ),
useEcal = cms.bool( False ),
muonMaxDistanceSigmaX = cms.double( 0.0 ),
dRMuon = cms.double( 9999.0 ),
dREcal = cms.double( 9999.0 ),
CSCSegmentCollectionLabel = cms.InputTag( "hltCscSegments" ),
DTRecSegment4DCollectionLabel = cms.InputTag( "hltDt4DSegments" ),
EBRecHitCollectionLabel = cms.InputTag( "Notused" ),
useGEM = cms.bool( True ),
GEMSegmentCollectionLabel = cms.InputTag( "hltGemSegments" ),
CaloTowerCollectionLabel = cms.InputTag( "Notused" ),
propagateAllDirections = cms.bool( True ),
muonMaxDistanceY = cms.double( 5.0 ),
useHO = cms.bool( False ),
muonMaxDistanceX = cms.double( 5.0 ),
trajectoryUncertaintyTolerance = cms.double( -1.0 ),
useHcal = cms.bool( False ),
HBHERecHitCollectionLabel = cms.InputTag( "Notused" ),
accountForTrajectoryChangeCalo = cms.bool( False ),
dREcalPreselection = cms.double( 0.05 ),
useCalo = cms.bool( False ),
dRMuonPreselection = cms.double( 0.2 ),
EERecHitCollectionLabel = cms.InputTag( "Notused" ),
dRHcal = cms.double( 9999.0 ),
dRHcalPreselection = cms.double( 0.2 ),
HORecHitCollectionLabel = cms.InputTag( "Notused" )
),
CaloExtractorPSet = cms.PSet(
DR_Veto_H = cms.double( 0.1 ),
CenterConeOnCalIntersection = cms.bool( False ),
NoiseTow_EE = cms.double( 0.15 ),
Noise_EB = cms.double( 0.025 ),
Noise_HE = cms.double( 0.2 ),
DR_Veto_E = cms.double( 0.07 ),
NoiseTow_EB = cms.double( 0.04 ),
Noise_EE = cms.double( 0.1 ),
UseRecHitsFlag = cms.bool( False ),
DR_Max = cms.double( 1.0 ),
DepositLabel = cms.untracked.string( "Cal" ),
Noise_HO = cms.double( 0.2 ),
DR_Veto_HO = cms.double( 0.1 ),
Threshold_H = cms.double( 0.5 ),
PrintTimeReport = cms.untracked.bool( False ),
Threshold_E = cms.double( 0.2 ),
PropagatorName = cms.string( "hltESPFastSteppingHelixPropagatorAny" ),
ComponentName = cms.string( "CaloExtractorByAssociator" ),
Threshold_HO = cms.double( 0.5 ),
DepositInstanceLabels = cms.vstring( 'ecal',
'hcal',
'ho' ),
ServiceParameters = cms.PSet(
RPCLayers = cms.bool( False ),
UseMuonNavigation = cms.untracked.bool( False ),
Propagators = cms.untracked.vstring( 'hltESPFastSteppingHelixPropagatorAny' )
),
TrackAssociatorParameters = cms.PSet(
useMuon = cms.bool( False ),
truthMatch = cms.bool( False ),
usePreshower = cms.bool( False ),
dRPreshowerPreselection = cms.double( 0.2 ),
muonMaxDistanceSigmaY = cms.double( 0.0 ),
useEcal = cms.bool( False ),
muonMaxDistanceSigmaX = cms.double( 0.0 ),
dRMuon = cms.double( 9999.0 ),
dREcal = cms.double( 1.0 ),
CSCSegmentCollectionLabel = cms.InputTag( "hltCscSegments" ),
DTRecSegment4DCollectionLabel = cms.InputTag( "hltDt4DSegments" ),
EBRecHitCollectionLabel = cms.InputTag( "Notused" ),
CaloTowerCollectionLabel = cms.InputTag( "Notused" ),
propagateAllDirections = cms.bool( True ),
muonMaxDistanceY = cms.double( 5.0 ),
useHO = cms.bool( False ),
muonMaxDistanceX = cms.double( 5.0 ),
trajectoryUncertaintyTolerance = cms.double( -1.0 ),
useHcal = cms.bool( False ),
HBHERecHitCollectionLabel = cms.InputTag( "Notused" ),
accountForTrajectoryChangeCalo = cms.bool( False ),
dREcalPreselection = cms.double( 1.0 ),
useCalo = cms.bool( True ),
dRMuonPreselection = cms.double( 0.2 ),
EERecHitCollectionLabel = cms.InputTag( "Notused" ),
dRHcal = cms.double( 1.0 ),
dRHcalPreselection = cms.double( 1.0 ),
HORecHitCollectionLabel = cms.InputTag( "Notused" )
),
Noise_HB = cms.double( 0.2 )
),
TrackExtractorPSet = cms.PSet(
Diff_z = cms.double( 0.2 ),
inputTrackCollection = cms.InputTag( "hltIter3IterL3FromL1MuonMerged" ),
Chi2Ndof_Max = cms.double( 1.0E64 ),
BeamSpotLabel = cms.InputTag( "hltOnlineBeamSpot" ),
DR_Veto = cms.double( 0.01 ),
Pt_Min = cms.double( -1.0 ),
DR_Max = cms.double( 1.0 ),
NHits_Min = cms.uint32( 0 ),
Chi2Prob_Min = cms.double( -1.0 ),
Diff_r = cms.double( 0.1 ),
BeamlineOption = cms.string( "BeamSpotFromEvent" ),
ComponentName = cms.string( "TrackExtractor" )
),
JetExtractorPSet = cms.PSet(
JetCollectionLabel = cms.InputTag( "Notused" ),
DR_Veto = cms.double( 0.1 ),
DR_Max = cms.double( 1.0 ),
ExcludeMuonVeto = cms.bool( True ),
PrintTimeReport = cms.untracked.bool( False ),
PropagatorName = cms.string( "hltESPFastSteppingHelixPropagatorAny" ),
ComponentName = cms.string( "JetExtractor" ),
ServiceParameters = cms.PSet(
RPCLayers = cms.bool( False ),
UseMuonNavigation = cms.untracked.bool( False ),
Propagators = cms.untracked.vstring( 'hltESPFastSteppingHelixPropagatorAny' )
),
TrackAssociatorParameters = cms.PSet(
useMuon = cms.bool( False ),
truthMatch = cms.bool( False ),
usePreshower = cms.bool( False ),
dRPreshowerPreselection = cms.double( 0.2 ),
muonMaxDistanceSigmaY = cms.double( 0.0 ),
useEcal = cms.bool( False ),
muonMaxDistanceSigmaX = cms.double( 0.0 ),
dRMuon = cms.double( 9999.0 ),
dREcal = cms.double( 0.5 ),
CSCSegmentCollectionLabel = cms.InputTag( "hltCscSegments" ),
DTRecSegment4DCollectionLabel = cms.InputTag( "hltDt4DSegments" ),
EBRecHitCollectionLabel = cms.InputTag( "Notused" ),
CaloTowerCollectionLabel = cms.InputTag( "Notused" ),
propagateAllDirections = cms.bool( True ),
muonMaxDistanceY = cms.double( 5.0 ),
useHO = cms.bool( False ),
muonMaxDistanceX = cms.double( 5.0 ),
trajectoryUncertaintyTolerance = cms.double( -1.0 ),
useHcal = cms.bool( False ),
HBHERecHitCollectionLabel = cms.InputTag( "Notused" ),
accountForTrajectoryChangeCalo = cms.bool( False ),
dREcalPreselection = cms.double( 0.5 ),
useCalo = cms.bool( True ),
dRMuonPreselection = cms.double( 0.2 ),
EERecHitCollectionLabel = cms.InputTag( "Notused" ),
dRHcal = cms.double( 0.5 ),
dRHcalPreselection = cms.double( 0.5 ),
HORecHitCollectionLabel = cms.InputTag( "Notused" )
),
Threshold = cms.double( 5.0 )
),
trackDepositName = cms.string( "tracker" ),
ecalDepositName = cms.string( "ecal" ),
hcalDepositName = cms.string( "hcal" ),
hoDepositName = cms.string( "ho" ),
jetDepositName = cms.string( "jets" ),
TimingFillerParameters = cms.PSet(
DTTimingParameters = cms.PSet(
HitError = cms.double( 6.0 ),
MatchParameters = cms.PSet(
TightMatchDT = cms.bool( False ),
DTradius = cms.double( 0.01 ),
TightMatchCSC = cms.bool( True ),
CSCsegments = cms.InputTag( "hltCscSegments" ),
DTsegments = cms.InputTag( "hltDt4DSegments" )
),
debug = cms.bool( False ),
DoWireCorr = cms.bool( False ),
RequireBothProjections = cms.bool( False ),
DTTimeOffset = cms.double( 2.7 ),
PruneCut = cms.double( 10000.0 ),
DTsegments = cms.InputTag( "hltDt4DSegments" ),
UseSegmentT0 = cms.bool( False ),
HitsMin = cms.int32( 5 ),
DropTheta = cms.bool( True ),
ServiceParameters = cms.PSet(
RPCLayers = cms.bool( True ),
Propagators = cms.untracked.vstring( 'hltESPFastSteppingHelixPropagatorAny' )
)
),
UseCSC = cms.bool( True ),
CSCTimingParameters = cms.PSet(
MatchParameters = cms.PSet(
TightMatchDT = cms.bool( False ),
DTradius = cms.double( 0.01 ),
TightMatchCSC = cms.bool( True ),
CSCsegments = cms.InputTag( "hltCscSegments" ),
DTsegments = cms.InputTag( "hltDt4DSegments" )
),
debug = cms.bool( False ),
CSCWireTimeOffset = cms.double( 0.0 ),
CSCStripError = cms.double( 7.0 ),
CSCTimeOffset = cms.double( 0.0 ),
CSCWireError = cms.double( 8.6 ),
PruneCut = cms.double( 100.0 ),
CSCsegments = cms.InputTag( "hltCscSegments" ),
UseStripTime = cms.bool( True ),
CSCStripTimeOffset = cms.double( 0.0 ),
UseWireTime = cms.bool( True ),
ServiceParameters = cms.PSet(
RPCLayers = cms.bool( True ),
Propagators = cms.untracked.vstring( 'hltESPFastSteppingHelixPropagatorAny' )
)
),
ErrorDT = cms.double( 6.0 ),
EcalEnergyCut = cms.double( 0.4 ),
UseECAL = cms.bool( True ),
ErrorEB = cms.double( 2.085 ),
UseDT = cms.bool( True ),
ErrorEE = cms.double( 6.95 ),
ErrorCSC = cms.double( 7.4 )
),
ShowerDigiFillerParameters = cms.PSet(
cscDigiCollectionLabel = cms.InputTag( 'muonCSCDigis','MuonCSCStripDigi' ),
digiMaxDistanceX = cms.double( 25.0 ),
dtDigiCollectionLabel = cms.InputTag( "muonDTDigis" )
),
TrackerKinkFinderParameters = cms.PSet(
usePosition = cms.bool( False ),
diagonalOnly = cms.bool( False )
),
fillEnergy = cms.bool( False ),
storeCrossedHcalRecHits = cms.bool( False ),
maxAbsPullX = cms.double( 4.0 ),
maxAbsEta = cms.double( 3.0 ),
minPt = cms.double( 2.0 ),
inputCollectionTypes = cms.vstring( 'inner tracks',
'links',
'outer tracks' ),
addExtraSoftMuons = cms.bool( False ),
fillGlobalTrackRefits = cms.bool( False ),
debugWithTruthMatching = cms.bool( False ),
inputCollectionLabels = cms.VInputTag( 'hltIterL3MuonAndMuonFromL1Merged','hltIterL3GlbMuon','hltL2Muons:UpdatedAtVtx' ),
fillCaloCompatibility = cms.bool( False ),
maxAbsPullY = cms.double( 9999.0 ),
maxAbsDy = cms.double( 9999.0 ),
minP = cms.double( 0.0 ),
minPCaloMuon = cms.double( 1.0E9 ),
maxAbsDx = cms.double( 3.0 ),
fillIsolation = cms.bool( False ),
writeIsoDeposits = cms.bool( False ),
minNumberOfMatches = cms.int32( 1 ),
fillMatching = cms.bool( True ),
fillShowerDigis = cms.bool( False ),
ptThresholdToFillCandidateP4WithGlobalFit = cms.double( 200.0 ),
sigmaThresholdToFillCandidateP4WithGlobalFit = cms.double( 2.0 ),
fillGlobalTrackQuality = cms.bool( False ),
globalTrackQualityInputTag = cms.InputTag( "glbTrackQual" ),
fillTrackerKink = cms.bool( False ),
minCaloCompatibility = cms.double( 0.6 ),
runArbitrationCleaner = cms.bool( False ),
arbitrationCleanerOptions = cms.PSet(
OverlapDTheta = cms.double( 0.02 ),
Overlap = cms.bool( True ),
Clustering = cms.bool( True ),
ME1a = cms.bool( True ),
ClusterDTheta = cms.double( 0.02 ),
ClusterDPhi = cms.double( 0.6 ),
OverlapDPhi = cms.double( 0.0786 )
),
arbitrateTrackerMuons = cms.bool( True )
)
process.hltIterL3Muons = cms.EDProducer( "MuonIDFilterProducerForHLT",
inputMuonCollection = cms.InputTag( "hltIterL3MuonsNoID" ),
applyTriggerIdLoose = cms.bool( True ),
typeMuon = cms.uint32( 0 ),
allowedTypeMask = cms.uint32( 0 ),
requiredTypeMask = cms.uint32( 0 ),
minNMuonHits = cms.int32( 0 ),
minNMuonStations = cms.int32( 0 ),
minNTrkLayers = cms.int32( 0 ),
minTrkHits = cms.int32( 0 ),
minPixLayer = cms.int32( 0 ),
minPixHits = cms.int32( 0 ),
minPt = cms.double( 0.0 ),
maxNormalizedChi2 = cms.double( 9999.0 )
)
process.hltL3MuonsIterL3Links = cms.EDProducer( "MuonLinksProducer",
inputCollection = cms.InputTag( "hltIterL3Muons" )
)
process.hltIterL3MuonTracks = cms.EDProducer( "HLTMuonTrackSelector",
track = cms.InputTag( "hltIterL3MuonAndMuonFromL1Merged" ),
muon = cms.InputTag( "hltIterL3Muons" ),
originalMVAVals = cms.InputTag( "none" ),
copyMVA = cms.bool( False ),
copyExtras = cms.untracked.bool( True ),
copyTrajectories = cms.untracked.bool( False )
)
process.hltIterL3MuonCandidates = cms.EDProducer( "L3MuonCandidateProducerFromMuons",
InputObjects = cms.InputTag( "hltIterL3Muons" )
)
process.hltPixelTracksFitter = cms.EDProducer( "PixelFitterByHelixProjectionsProducer",
scaleErrorsForBPix1 = cms.bool( False ),
scaleFactor = cms.double( 0.65 )
)
process.hltPixelTracksFilter = cms.EDProducer( "PixelTrackFilterByKinematicsProducer",
ptMin = cms.double( 0.1 ),
nSigmaInvPtTolerance = cms.double( 0.0 ),
tipMax = cms.double( 1.0 ),
nSigmaTipMaxTolerance = cms.double( 0.0 ),
chi2 = cms.double( 1000.0 )
)
process.hltPixelTracksTrackingRegions = cms.EDProducer( "GlobalTrackingRegionFromBeamSpotEDProducer",
RegionPSet = cms.PSet(
nSigmaZ = cms.double( 4.0 ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
ptMin = cms.double( 0.8 ),
originRadius = cms.double( 0.02 ),
precise = cms.bool( True )
)
)
process.hltPixelTracks = cms.EDProducer( "PixelTrackProducerFromSoA",
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
trackSrc = cms.InputTag( "hltPixelTracksSoA" ),
pixelRecHitLegacySrc = cms.InputTag( "hltSiPixelRecHits" ),
minNumberOfHits = cms.int32( 0 ),
minQuality = cms.string( "loose" )
)
process.hltPixelVertices = cms.EDProducer( "PixelVertexProducerFromSoA",
TrackCollection = cms.InputTag( "hltPixelTracks" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
src = cms.InputTag( "hltPixelVerticesSoA" )
)
process.hltTrimmedPixelVertices = cms.EDProducer( "PixelVertexCollectionTrimmer",
src = cms.InputTag( "hltPixelVertices" ),
maxVtx = cms.uint32( 100 ),
fractionSumPt2 = cms.double( 0.3 ),
minSumPt2 = cms.double( 0.0 ),
PVcomparer = cms.PSet( refToPSet_ = cms.string( "HLTPSetPvClusterComparerForIT" ) )
)
process.hltIter0PFLowPixelSeedsFromPixelTracks = cms.EDProducer( "SeedGeneratorFromProtoTracksEDProducer",
InputCollection = cms.InputTag( "hltPixelTracks" ),
InputVertexCollection = cms.InputTag( "hltTrimmedPixelVertices" ),
originHalfLength = cms.double( 0.3 ),
originRadius = cms.double( 0.1 ),
useProtoTrackKinematics = cms.bool( False ),
useEventsWithNoVertex = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
usePV = cms.bool( False ),
includeFourthHit = cms.bool( True ),
SeedCreatorPSet = cms.PSet( refToPSet_ = cms.string( "HLTSeedFromProtoTracks" ) )
)
process.hltIter0PFlowCkfTrackCandidates = cms.EDProducer( "CkfTrackCandidateMaker",
cleanTrajectoryAfterInOut = cms.bool( False ),
doSeedingRegionRebuilding = cms.bool( False ),
onlyPixelHitsForSeedCleaner = cms.bool( False ),
reverseTrajectories = cms.bool( False ),
useHitsSplitting = cms.bool( False ),
MeasurementTrackerEvent = cms.InputTag( "hltSiStripClusters" ),
src = cms.InputTag( "hltIter0PFLowPixelSeedsFromPixelTracks" ),
clustersToSkip = cms.InputTag( "" ),
phase2clustersToSkip = cms.InputTag( "" ),
TrajectoryBuilderPSet = cms.PSet( refToPSet_ = cms.string( "HLTIter0GroupedCkfTrajectoryBuilderIT" ) ),
TransientInitialStateEstimatorParameters = cms.PSet(
propagatorAlongTISE = cms.string( "PropagatorWithMaterialParabolicMf" ),
numberMeasurementsForFit = cms.int32( 4 ),
propagatorOppositeTISE = cms.string( "PropagatorWithMaterialParabolicMfOpposite" )
),
numHitsForSeedCleaner = cms.int32( 4 ),
NavigationSchool = cms.string( "SimpleNavigationSchool" ),
RedundantSeedCleaner = cms.string( "CachingSeedCleanerBySharedInput" ),
TrajectoryCleaner = cms.string( "hltESPTrajectoryCleanerBySharedHits" ),
maxNSeeds = cms.uint32( 100000 ),
maxSeedsBeforeCleaning = cms.uint32( 1000 )
)
process.hltIter0PFlowCtfWithMaterialTracks = cms.EDProducer( "TrackProducer",
useSimpleMF = cms.bool( True ),
SimpleMagneticField = cms.string( "ParabolicMf" ),
src = cms.InputTag( "hltIter0PFlowCkfTrackCandidates" ),
clusterRemovalInfo = cms.InputTag( "" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Fitter = cms.string( "hltESPFittingSmootherIT" ),
useHitsSplitting = cms.bool( False ),
alias = cms.untracked.string( "ctfWithMaterialTracks" ),
TrajectoryInEvent = cms.bool( False ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
AlgorithmName = cms.string( "hltIter0" ),
Propagator = cms.string( "hltESPRungeKuttaTrackerPropagator" ),
GeometricInnerState = cms.bool( True ),
NavigationSchool = cms.string( "" ),
MeasurementTracker = cms.string( "" ),
MeasurementTrackerEvent = cms.InputTag( "hltSiStripClusters" )
)
process.hltIter0PFlowTrackCutClassifier = cms.EDProducer( "TrackCutClassifier",
src = cms.InputTag( "hltIter0PFlowCtfWithMaterialTracks" ),
beamspot = cms.InputTag( "hltOnlineBeamSpot" ),
vertices = cms.InputTag( "hltTrimmedPixelVertices" ),
ignoreVertices = cms.bool( False ),
qualityCuts = cms.vdouble( -0.7, 0.1, 0.7 ),
mva = cms.PSet(
minPixelHits = cms.vint32( 0, 0, 0 ),
maxDzWrtBS = cms.vdouble( 3.40282346639E38, 24.0, 15.0 ),
dr_par = cms.PSet(
d0err = cms.vdouble( 0.003, 0.003, 0.003 ),
dr_par2 = cms.vdouble( 3.40282346639E38, 0.6, 0.6 ),
dr_par1 = cms.vdouble( 3.40282346639E38, 0.8, 0.8 ),
dr_exp = cms.vint32( 4, 4, 4 ),
d0err_par = cms.vdouble( 0.001, 0.001, 0.001 )
),
maxLostLayers = cms.vint32( 1, 1, 1 ),
min3DLayers = cms.vint32( 0, 0, 0 ),
dz_par = cms.PSet(
dz_par1 = cms.vdouble( 3.40282346639E38, 0.75, 0.75 ),
dz_par2 = cms.vdouble( 3.40282346639E38, 0.5, 0.5 ),
dz_exp = cms.vint32( 4, 4, 4 )
),
minNVtxTrk = cms.int32( 3 ),
maxDz = cms.vdouble( 0.5, 0.2, 3.40282346639E38 ),
minNdof = cms.vdouble( 1.0E-5, 1.0E-5, 1.0E-5 ),
maxChi2 = cms.vdouble( 9999.0, 25.0, 16.0 ),
maxChi2n = cms.vdouble( 1.2, 1.0, 0.7 ),
maxDr = cms.vdouble( 0.5, 0.03, 3.40282346639E38 ),
minLayers = cms.vint32( 3, 3, 3 )
)
)
process.hltMergedTracks = cms.EDProducer( "TrackCollectionFilterCloner",
originalSource = cms.InputTag( "hltIter0PFlowCtfWithMaterialTracks" ),
originalMVAVals = cms.InputTag( 'hltIter0PFlowTrackCutClassifier','MVAValues' ),
originalQualVals = cms.InputTag( 'hltIter0PFlowTrackCutClassifier','QualityMasks' ),
minQuality = cms.string( "highPurity" ),
copyExtras = cms.untracked.bool( True ),
copyTrajectories = cms.untracked.bool( False )
)
process.hltPFMuonMerging = cms.EDProducer( "TrackListMerger",
ShareFrac = cms.double( 0.19 ),
FoundHitBonus = cms.double( 5.0 ),
LostHitPenalty = cms.double( 20.0 ),
MinPT = cms.double( 0.05 ),
Epsilon = cms.double( -0.001 ),
MaxNormalizedChisq = cms.double( 1000.0 ),
MinFound = cms.int32( 3 ),
TrackProducers = cms.VInputTag( 'hltIterL3MuonTracks','hltMergedTracks' ),
hasSelector = cms.vint32( 0, 0 ),
indivShareFrac = cms.vdouble( 1.0, 1.0 ),
selectedTrackQuals = cms.VInputTag( 'hltIterL3MuonTracks','hltMergedTracks' ),
setsToMerge = cms.VPSet(
cms.PSet( pQual = cms.bool( False ),
tLists = cms.vint32( 0, 1 )
)
),
trackAlgoPriorityOrder = cms.string( "hltESPTrackAlgoPriorityOrder" ),
allowFirstHitShare = cms.bool( True ),
newQuality = cms.string( "confirmed" ),
copyExtras = cms.untracked.bool( True ),
writeOnlyTrkQuals = cms.bool( False ),
copyMVA = cms.bool( False )
)
process.hltVerticesPF = cms.EDProducer( "PrimaryVertexProducer",
vertexCollections = cms.VPSet(
cms.PSet( chi2cutoff = cms.double( 3.0 ),
label = cms.string( "" ),
useBeamConstraint = cms.bool( False ),
minNdof = cms.double( 0.0 ),
maxDistanceToBeam = cms.double( 1.0 ),
algorithm = cms.string( "AdaptiveVertexFitter" )
),
cms.PSet( chi2cutoff = cms.double( 3.0 ),
label = cms.string( "WithBS" ),
useBeamConstraint = cms.bool( True ),
minNdof = cms.double( 0.0 ),
maxDistanceToBeam = cms.double( 1.0 ),
algorithm = cms.string( "AdaptiveVertexFitter" )
)
),
verbose = cms.untracked.bool( False ),
TkFilterParameters = cms.PSet(
maxEta = cms.double( 100.0 ),
minPt = cms.double( 0.0 ),
minSiliconLayersWithHits = cms.int32( 5 ),
minPixelLayersWithHits = cms.int32( 2 ),
maxNormalizedChi2 = cms.double( 20.0 ),
trackQuality = cms.string( "any" ),
algorithm = cms.string( "filter" ),
maxD0Significance = cms.double( 999.0 )
),
beamSpotLabel = cms.InputTag( "hltOnlineBeamSpot" ),
TrackLabel = cms.InputTag( "hltPFMuonMerging" ),
TrackTimeResosLabel = cms.InputTag( "dummy_default" ),
TrackTimesLabel = cms.InputTag( "dummy_default" ),
TkClusParameters = cms.PSet(
TkDAClusParameters = cms.PSet(
zmerge = cms.double( 0.01 ),
Tstop = cms.double( 0.5 ),
d0CutOff = cms.double( 999.0 ),
dzCutOff = cms.double( 4.0 ),
vertexSize = cms.double( 0.15 ),
coolingFactor = cms.double( 0.6 ),
Tpurge = cms.double( 2.0 ),
Tmin = cms.double( 2.4 ),
uniquetrkweight = cms.double( 0.9 )
),
algorithm = cms.string( "DA_vect" )
),
isRecoveryIteration = cms.bool( False ),
recoveryVtxCollection = cms.InputTag( "" )
)
process.hltVerticesPFSelector = cms.EDFilter( "PrimaryVertexObjectFilter",
filterParams = cms.PSet(
maxZ = cms.double( 24.0 ),
minNdof = cms.double( 4.0 ),
maxRho = cms.double( 2.0 ),
pvSrc = cms.InputTag( "hltVerticesPF" )
),
src = cms.InputTag( "hltVerticesPF" )
)
process.hltVerticesPFFilter = cms.EDFilter( "VertexSelector",
src = cms.InputTag( "hltVerticesPFSelector" ),
cut = cms.string( "!isFake" ),
filter = cms.bool( True )
)
process.hltBoolEnd = cms.EDFilter( "HLTBool",
result = cms.bool( True )
)
process.hltL1EventNumberL1Fat = cms.EDFilter( "HLTL1NumberFilter",
rawInput = cms.InputTag( "rawDataCollector" ),
period = cms.uint32( 107 ),
invert = cms.bool( False ),
fedId = cms.int32( 1024 ),
useTCDSEventNumber = cms.bool( True )
)
process.hltPrePhysics = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreDSTPhysics = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltRandomEventsFilter = cms.EDFilter( "HLTTriggerTypeFilter",
SelectedTriggerType = cms.int32( 3 )
)
process.hltPreRandom = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreZeroBias = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltL1sL1UnpairedBunchBptxMinus = cms.EDFilter( "HLTL1TSeed",
saveTags = cms.bool( True ),
L1SeedsLogicalExpression = cms.string( "L1_UnpairedBunchBptxMinus" ),
L1ObjectMapInputTag = cms.InputTag( "hltGtStage2ObjectMap" ),
L1GlobalInputTag = cms.InputTag( "hltGtStage2Digis" ),
L1MuonInputTag = cms.InputTag( 'hltGtStage2Digis','Muon' ),
L1EGammaInputTag = cms.InputTag( 'hltGtStage2Digis','EGamma' ),
L1JetInputTag = cms.InputTag( 'hltGtStage2Digis','Jet' ),
L1TauInputTag = cms.InputTag( 'hltGtStage2Digis','Tau' ),
L1EtSumInputTag = cms.InputTag( 'hltGtStage2Digis','EtSum' )
)
process.hltPreHIL1UnpairedBunchBptxMinusForPPRef = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltL1sL1UnpairedBunchBptxPlus = cms.EDFilter( "HLTL1TSeed",
saveTags = cms.bool( True ),
L1SeedsLogicalExpression = cms.string( "L1_UnpairedBunchBptxPlus" ),
L1ObjectMapInputTag = cms.InputTag( "hltGtStage2ObjectMap" ),
L1GlobalInputTag = cms.InputTag( "hltGtStage2Digis" ),
L1MuonInputTag = cms.InputTag( 'hltGtStage2Digis','Muon' ),
L1EGammaInputTag = cms.InputTag( 'hltGtStage2Digis','EGamma' ),
L1JetInputTag = cms.InputTag( 'hltGtStage2Digis','Jet' ),
L1TauInputTag = cms.InputTag( 'hltGtStage2Digis','Tau' ),
L1EtSumInputTag = cms.InputTag( 'hltGtStage2Digis','EtSum' )
)
process.hltPreHIL1UnpairedBunchBptxPlusForPPRef = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltL1sNotBptxOR = cms.EDFilter( "HLTL1TSeed",
saveTags = cms.bool( True ),
L1SeedsLogicalExpression = cms.string( "L1_NotBptxOR" ),
L1ObjectMapInputTag = cms.InputTag( "hltGtStage2ObjectMap" ),
L1GlobalInputTag = cms.InputTag( "hltGtStage2Digis" ),
L1MuonInputTag = cms.InputTag( 'hltGtStage2Digis','Muon' ),
L1EGammaInputTag = cms.InputTag( 'hltGtStage2Digis','EGamma' ),
L1JetInputTag = cms.InputTag( 'hltGtStage2Digis','Jet' ),
L1TauInputTag = cms.InputTag( 'hltGtStage2Digis','Tau' ),
L1EtSumInputTag = cms.InputTag( 'hltGtStage2Digis','EtSum' )
)
process.hltPreHIL1NotBptxORForPPRef = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltL1sHTTForBeamSpotPP5TeV = cms.EDFilter( "HLTL1TSeed",
saveTags = cms.bool( True ),
L1SeedsLogicalExpression = cms.string( "L1_DoubleJet40er2p7 OR L1_DoubleJet50er2p7 OR L1_DoubleJet60er2p7 OR L1_DoubleJet80er2p7 OR L1_DoubleJet100er2p7 OR L1_DoubleJet112er2p7 OR L1_DoubleJet120er2p7 OR L1_DoubleJet150er2p7 OR L1_SingleJet80 OR L1_SingleJet90 OR L1_SingleJet120 OR L1_SingleJet140 OR L1_SingleJet150 OR L1_SingleJet160 OR L1_SingleJet170 OR L1_SingleJet180 OR L1_SingleJet200" ),
L1ObjectMapInputTag = cms.InputTag( "hltGtStage2ObjectMap" ),
L1GlobalInputTag = cms.InputTag( "hltGtStage2Digis" ),
L1MuonInputTag = cms.InputTag( 'hltGtStage2Digis','Muon' ),
L1EGammaInputTag = cms.InputTag( 'hltGtStage2Digis','EGamma' ),
L1JetInputTag = cms.InputTag( 'hltGtStage2Digis','Jet' ),
L1TauInputTag = cms.InputTag( 'hltGtStage2Digis','Tau' ),
L1EtSumInputTag = cms.InputTag( 'hltGtStage2Digis','EtSum' )
)
process.hltPreHIHT80BeamspotppRef5TeV = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltAK4CaloJets = cms.EDProducer( "FastjetJetProducer",
useMassDropTagger = cms.bool( False ),
useFiltering = cms.bool( False ),
useDynamicFiltering = cms.bool( False ),
useTrimming = cms.bool( False ),
usePruning = cms.bool( False ),
useCMSBoostedTauSeedingAlgorithm = cms.bool( False ),
useKtPruning = cms.bool( False ),
useConstituentSubtraction = cms.bool( False ),
useSoftDrop = cms.bool( False ),
correctShape = cms.bool( False ),
UseOnlyVertexTracks = cms.bool( False ),
UseOnlyOnePV = cms.bool( False ),
muCut = cms.double( -1.0 ),
yCut = cms.double( -1.0 ),
rFilt = cms.double( -1.0 ),
rFiltFactor = cms.double( -1.0 ),
trimPtFracMin = cms.double( -1.0 ),
zcut = cms.double( -1.0 ),
rcut_factor = cms.double( -1.0 ),
csRho_EtaMax = cms.double( -1.0 ),
csRParam = cms.double( -1.0 ),
beta = cms.double( -1.0 ),
R0 = cms.double( -1.0 ),
gridMaxRapidity = cms.double( -1.0 ),
gridSpacing = cms.double( -1.0 ),
DzTrVtxMax = cms.double( 0.0 ),
DxyTrVtxMax = cms.double( 0.0 ),
MaxVtxZ = cms.double( 15.0 ),
subjetPtMin = cms.double( -1.0 ),
muMin = cms.double( -1.0 ),
muMax = cms.double( -1.0 ),
yMin = cms.double( -1.0 ),
yMax = cms.double( -1.0 ),
dRMin = cms.double( -1.0 ),
dRMax = cms.double( -1.0 ),
maxDepth = cms.int32( -1 ),
nFilt = cms.int32( -1 ),
MinVtxNdof = cms.int32( 5 ),
src = cms.InputTag( "hltTowerMakerForAll" ),
srcPVs = cms.InputTag( "NotUsed" ),
jetType = cms.string( "CaloJet" ),
jetAlgorithm = cms.string( "AntiKt" ),
rParam = cms.double( 0.4 ),
inputEtMin = cms.double( 0.3 ),
inputEMin = cms.double( 0.0 ),
jetPtMin = cms.double( 1.0 ),
doPVCorrection = cms.bool( False ),
doAreaFastjet = cms.bool( False ),
doRhoFastjet = cms.bool( False ),
doPUOffsetCorr = cms.bool( False ),
puPtMin = cms.double( 10.0 ),
nSigmaPU = cms.double( 1.0 ),
radiusPU = cms.double( 0.4 ),
subtractorName = cms.string( "" ),
useExplicitGhosts = cms.bool( False ),
doAreaDiskApprox = cms.bool( True ),
voronoiRfact = cms.double( 0.9 ),
Rho_EtaMax = cms.double( 4.4 ),
Ghost_EtaMax = cms.double( 6.0 ),
Active_Area_Repeats = cms.int32( 5 ),
GhostArea = cms.double( 0.01 ),
restrictInputs = cms.bool( False ),
maxInputs = cms.uint32( 1 ),
writeCompound = cms.bool( False ),
writeJetsWithConst = cms.bool( False ),
doFastJetNonUniform = cms.bool( False ),
useDeterministicSeed = cms.bool( True ),
minSeed = cms.uint32( 14327 ),
verbosity = cms.int32( 0 ),
puWidth = cms.double( 0.0 ),
nExclude = cms.uint32( 0 ),
maxBadEcalCells = cms.uint32( 9999999 ),
maxBadHcalCells = cms.uint32( 9999999 ),
maxProblematicEcalCells = cms.uint32( 9999999 ),
maxProblematicHcalCells = cms.uint32( 9999999 ),
maxRecoveredEcalCells = cms.uint32( 9999999 ),
maxRecoveredHcalCells = cms.uint32( 9999999 ),
puCenters = cms.vdouble( ),
applyWeight = cms.bool( False ),
srcWeights = cms.InputTag( "" ),
minimumTowersFraction = cms.double( 0.0 ),
jetCollInstanceName = cms.string( "" ),
sumRecHits = cms.bool( False )
)
process.hltAK4CaloJetsIDPassed = cms.EDProducer( "HLTCaloJetIDProducer",
min_N90 = cms.int32( -2 ),
min_N90hits = cms.int32( 2 ),
min_EMF = cms.double( 1.0E-6 ),
max_EMF = cms.double( 999.0 ),
jetsInput = cms.InputTag( "hltAK4CaloJets" ),
JetIDParams = cms.PSet(
hfRecHitsColl = cms.InputTag( "hltHfreco" ),
hoRecHitsColl = cms.InputTag( "hltHoreco" ),
ebRecHitsColl = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEB' ),
hbheRecHitsColl = cms.InputTag( "hltHbhereco" ),
useRecHits = cms.bool( True ),
eeRecHitsColl = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEE' )
)
)
process.hltFixedGridRhoFastjetAllCalo = cms.EDProducer( "FixedGridRhoProducerFastjet",
pfCandidatesTag = cms.InputTag( "hltTowerMakerForAll" ),
maxRapidity = cms.double( 5.0 ),
gridSpacing = cms.double( 0.55 )
)
process.hltAK4CaloFastJetCorrector = cms.EDProducer( "L1FastjetCorrectorProducer",
level = cms.string( "L1FastJet" ),
algorithm = cms.string( "AK4CaloHLT" ),
srcRho = cms.InputTag( "hltFixedGridRhoFastjetAllCalo" )
)
process.hltAK4CaloRelativeCorrector = cms.EDProducer( "LXXXCorrectorProducer",
level = cms.string( "L2Relative" ),
algorithm = cms.string( "AK4CaloHLT" )
)
process.hltAK4CaloAbsoluteCorrector = cms.EDProducer( "LXXXCorrectorProducer",
level = cms.string( "L3Absolute" ),
algorithm = cms.string( "AK4CaloHLT" )
)
process.hltAK4CaloResidualCorrector = cms.EDProducer( "LXXXCorrectorProducer",
level = cms.string( "L2L3Residual" ),
algorithm = cms.string( "AK4CaloHLT" )
)
process.hltAK4CaloCorrector = cms.EDProducer( "ChainedJetCorrectorProducer",
correctors = cms.VInputTag( 'hltAK4CaloFastJetCorrector','hltAK4CaloRelativeCorrector','hltAK4CaloAbsoluteCorrector','hltAK4CaloResidualCorrector' )
)
process.hltAK4CaloJetsCorrected = cms.EDProducer( "CorrectedCaloJetProducer",
src = cms.InputTag( "hltAK4CaloJets" ),
correctors = cms.VInputTag( 'hltAK4CaloCorrector' )
)
process.hltAK4CaloJetsCorrectedIDPassed = cms.EDProducer( "CorrectedCaloJetProducer",
src = cms.InputTag( "hltAK4CaloJetsIDPassed" ),
correctors = cms.VInputTag( 'hltAK4CaloCorrector' )
)
process.hltHtMht = cms.EDProducer( "HLTHtMhtProducer",
usePt = cms.bool( False ),
excludePFMuons = cms.bool( False ),
minNJetHt = cms.int32( 0 ),
minNJetMht = cms.int32( 0 ),
minPtJetHt = cms.double( 40.0 ),
minPtJetMht = cms.double( 30.0 ),
maxEtaJetHt = cms.double( 2.5 ),
maxEtaJetMht = cms.double( 5.0 ),
jetsLabel = cms.InputTag( "hltAK4CaloJetsCorrected" ),
pfCandidatesLabel = cms.InputTag( "" )
)
process.hltHT80 = cms.EDFilter( "HLTHtMhtFilter",
saveTags = cms.bool( True ),
htLabels = cms.VInputTag( 'hltHtMht' ),
mhtLabels = cms.VInputTag( 'hltHtMht' ),
minHt = cms.vdouble( 80.0 ),
minMht = cms.vdouble( 0.0 ),
minMeff = cms.vdouble( 0.0 ),
meffSlope = cms.vdouble( 1.0 )
)
process.hltPreHIZeroBiaspart0 = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreHIZeroBiaspart1 = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 1 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreHIZeroBiaspart2 = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 2 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreHIZeroBiaspart3 = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 3 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreHIZeroBiaspart4 = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 4 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreHIZeroBiaspart5 = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 5 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreHIZeroBiaspart6 = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 6 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreHIZeroBiaspart7 = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 7 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreHIZeroBiaspart8 = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 8 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreHIZeroBiaspart9 = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 9 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreHIZeroBiaspart10 = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 10 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreHIZeroBiaspart11 = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 11 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltL1sAlCaHIEcalPi0Eta = cms.EDFilter( "HLTL1TSeed",
saveTags = cms.bool( True ),
L1SeedsLogicalExpression = cms.string( "L1_AlwaysTrue OR L1_DoubleEG_15_10 OR L1_DoubleEG_18_17 OR L1_DoubleEG_20_18 OR L1_DoubleEG_22_10 OR L1_DoubleEG_22_12 OR L1_DoubleEG_22_15 OR L1_DoubleEG_23_10 OR L1_DoubleEG_24_17 OR L1_DoubleEG_25_12 OR L1_DoubleJet100er2p7 OR L1_DoubleJet112er2p7 OR L1_DoubleJet120er2p7 OR L1_DoubleJet40er2p7 OR L1_DoubleJet50er2p7 OR L1_DoubleJet60er2p7 OR L1_DoubleJet80er2p7 OR L1_IsolatedBunch OR L1_SingleEG10 OR L1_SingleEG15 OR L1_SingleEG18 OR L1_SingleEG24 OR L1_SingleEG26 OR L1_SingleEG28 OR L1_SingleEG30 OR L1_SingleEG32 OR L1_SingleEG34 OR L1_SingleEG36 OR L1_SingleEG38 OR L1_SingleEG40 OR L1_SingleEG42 OR L1_SingleEG45 OR L1_SingleEG5 OR L1_SingleIsoEG18 OR L1_SingleIsoEG20 OR L1_SingleIsoEG22 OR L1_SingleIsoEG24 OR L1_SingleIsoEG26 OR L1_SingleIsoEG28 OR L1_SingleIsoEG30 OR L1_SingleIsoEG32 OR L1_SingleIsoEG34 OR L1_SingleIsoEG36 OR L1_SingleJet120 OR L1_SingleJet140 OR L1_SingleJet150 OR L1_SingleJet16 OR L1_SingleJet160 OR L1_SingleJet170 OR L1_SingleJet180 OR L1_SingleJet20 OR L1_SingleJet200 OR L1_SingleJet35 OR L1_SingleJet60 OR L1_SingleJet90" ),
L1ObjectMapInputTag = cms.InputTag( "hltGtStage2ObjectMap" ),
L1GlobalInputTag = cms.InputTag( "hltGtStage2Digis" ),
L1MuonInputTag = cms.InputTag( 'hltGtStage2Digis','Muon' ),
L1EGammaInputTag = cms.InputTag( 'hltGtStage2Digis','EGamma' ),
L1JetInputTag = cms.InputTag( 'hltGtStage2Digis','Jet' ),
L1TauInputTag = cms.InputTag( 'hltGtStage2Digis','Tau' ),
L1EtSumInputTag = cms.InputTag( 'hltGtStage2Digis','EtSum' )
)
process.hltPreAlCaHIEcalPi0EBonly = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltEcalPreshowerDigis = cms.EDProducer( "ESRawToDigi",
sourceTag = cms.InputTag( "rawDataCollector" ),
debugMode = cms.untracked.bool( False ),
InstanceES = cms.string( "" ),
LookupTable = cms.FileInPath( "EventFilter/ESDigiToRaw/data/ES_lookup_table.dat" ),
ESdigiCollection = cms.string( "" )
)
process.hltEcalPreshowerRecHit = cms.EDProducer( "ESRecHitProducer",
ESrechitCollection = cms.string( "EcalRecHitsES" ),
ESdigiCollection = cms.InputTag( "hltEcalPreshowerDigis" ),
algo = cms.string( "ESRecHitWorker" ),
ESRecoAlgo = cms.int32( 0 )
)
process.hltSimple3x3Clusters = cms.EDProducer( "EgammaHLTNxNClusterProducer",
doBarrel = cms.bool( True ),
doEndcaps = cms.bool( True ),
barrelHitProducer = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEB' ),
endcapHitProducer = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEE' ),
clusEtaSize = cms.int32( 3 ),
clusPhiSize = cms.int32( 3 ),
barrelClusterCollection = cms.string( "Simple3x3ClustersBarrel" ),
endcapClusterCollection = cms.string( "Simple3x3ClustersEndcap" ),
clusSeedThr = cms.double( 0.5 ),
clusSeedThrEndCap = cms.double( 1.0 ),
useRecoFlag = cms.bool( False ),
flagLevelRecHitsToUse = cms.int32( 1 ),
useDBStatus = cms.bool( True ),
statusLevelRecHitsToUse = cms.int32( 1 ),
posCalcParameters = cms.PSet(
T0_barl = cms.double( 7.4 ),
T0_endcPresh = cms.double( 1.2 ),
LogWeighted = cms.bool( True ),
T0_endc = cms.double( 3.1 ),
X0 = cms.double( 0.89 ),
W0 = cms.double( 4.2 )
),
maxNumberofSeeds = cms.int32( 700 ),
maxNumberofClusters = cms.int32( 300 ),
debugLevel = cms.int32( 0 )
)
process.hltAlCaPi0RecHitsFilterEBonlyRegional = cms.EDFilter( "HLTRegionalEcalResonanceFilter",
barrelHits = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEB' ),
endcapHits = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEE' ),
preshRecHitProducer = cms.InputTag( 'hltEcalPreshowerRecHit','EcalRecHitsES' ),
barrelClusters = cms.InputTag( 'hltSimple3x3Clusters','Simple3x3ClustersBarrel' ),
endcapClusters = cms.InputTag( 'hltSimple3x3Clusters','Simple3x3ClustersEndcap' ),
useRecoFlag = cms.bool( False ),
flagLevelRecHitsToUse = cms.int32( 1 ),
useDBStatus = cms.bool( True ),
statusLevelRecHitsToUse = cms.int32( 1 ),
doSelBarrel = cms.bool( True ),
barrelSelection = cms.PSet(
massHighPi0Cand = cms.double( 0.163 ),
ptMinForIsolation = cms.double( 1.0 ),
seleMinvMaxBarrel = cms.double( 0.22 ),
massLowPi0Cand = cms.double( 0.104 ),
seleS9S25Gamma = cms.double( 0.0 ),
seleBeltDeta = cms.double( 0.05 ),
seleS4S9GammaBarrel_region2 = cms.double( 0.9 ),
barrelHitCollection = cms.string( "pi0EcalRecHitsEB" ),
removePi0CandidatesForEta = cms.bool( False ),
seleMinvMinBarrel = cms.double( 0.06 ),
seleS4S9GammaBarrel_region1 = cms.double( 0.88 ),
selePtPairBarrel_region1 = cms.double( 2.0 ),
selePtPairBarrel_region2 = cms.double( 1.75 ),
seleBeltDR = cms.double( 0.2 ),
region1_Barrel = cms.double( 1.0 ),
seleIsoBarrel_region1 = cms.double( 0.5 ),
selePtGammaBarrel_region1 = cms.double( 0.65 ),
seleIsoBarrel_region2 = cms.double( 0.5 ),
selePtGammaBarrel_region2 = cms.double( 0.65 ),
store5x5RecHitEB = cms.bool( False ),
seleNxtalBarrel_region2 = cms.uint32( 6 ),
seleNxtalBarrel_region1 = cms.uint32( 6 )
),
doSelEndcap = cms.bool( False ),
endcapSelection = cms.PSet(
seleBeltDetaEndCap = cms.double( 0.05 ),
selePtPairMaxEndCap_region3 = cms.double( 2.5 ),
seleS4S9GammaEndCap_region2 = cms.double( 0.65 ),
seleS4S9GammaEndCap_region1 = cms.double( 0.65 ),
seleNxtalEndCap_region2 = cms.uint32( 6 ),
seleNxtalEndCap_region3 = cms.uint32( 6 ),
ptMinForIsolationEndCap = cms.double( 0.5 ),
selePtPairEndCap_region1 = cms.double( 1.5 ),
endcapHitCollection = cms.string( "pi0EcalRecHitsEE" ),
selePtPairEndCap_region2 = cms.double( 1.5 ),
seleS4S9GammaEndCap_region3 = cms.double( 0.65 ),
selePtGammaEndCap_region3 = cms.double( 0.5 ),
selePtGammaEndCap_region2 = cms.double( 0.5 ),
selePtGammaEndCap_region1 = cms.double( 0.5 ),
region1_EndCap = cms.double( 1.8 ),
region2_EndCap = cms.double( 2.0 ),
store5x5RecHitEE = cms.bool( False ),
seleIsoEndCap_region3 = cms.double( 0.5 ),
seleIsoEndCap_region2 = cms.double( 0.5 ),
seleMinvMinEndCap = cms.double( 0.05 ),
selePtPairEndCap_region3 = cms.double( 99.0 ),
seleIsoEndCap_region1 = cms.double( 0.5 ),
seleBeltDREndCap = cms.double( 0.2 ),
seleMinvMaxEndCap = cms.double( 0.3 ),
seleNxtalEndCap_region1 = cms.uint32( 6 ),
seleS9S25GammaEndCap = cms.double( 0.0 )
),
storeRecHitES = cms.bool( False ),
preshowerSelection = cms.PSet(
preshClusterEnergyCut = cms.double( 0.0 ),
debugLevelES = cms.string( "" ),
ESCollection = cms.string( "pi0EcalRecHitsES" ),
preshNclust = cms.int32( 4 ),
preshStripEnergyCut = cms.double( 0.0 ),
preshCalibPlaneY = cms.double( 0.7 ),
preshSeededNstrip = cms.int32( 15 ),
preshCalibGamma = cms.double( 0.024 ),
preshCalibPlaneX = cms.double( 1.0 ),
preshCalibMIP = cms.double( 9.0E-5 )
),
debugLevel = cms.int32( 0 )
)
process.hltAlCaPi0EBUncalibrator = cms.EDProducer( "EcalRecalibRecHitProducer",
doEnergyScale = cms.bool( False ),
doEnergyScaleInverse = cms.bool( False ),
doIntercalib = cms.bool( False ),
doIntercalibInverse = cms.bool( False ),
EERecHitCollection = cms.InputTag( 'hltAlCaPi0RecHitsFilterEBonlyRegional','pi0EcalRecHitsEB' ),
EBRecHitCollection = cms.InputTag( 'hltAlCaPi0RecHitsFilterEBonlyRegional','pi0EcalRecHitsEB' ),
doLaserCorrections = cms.bool( False ),
doLaserCorrectionsInverse = cms.bool( False ),
EBRecalibRecHitCollection = cms.string( "pi0EcalRecHitsEB" ),
EERecalibRecHitCollection = cms.string( "pi0EcalRecHitsEE" )
)
process.hltAlCaPi0EBRechitsToDigis = cms.EDProducer( "HLTRechitsToDigis",
region = cms.string( "barrel" ),
digisIn = cms.InputTag( 'hltEcalDigis','ebDigis' ),
digisOut = cms.string( "pi0EBDigis" ),
recHits = cms.InputTag( 'hltAlCaPi0EBUncalibrator','pi0EcalRecHitsEB' ),
srFlagsIn = cms.InputTag( "hltEcalDigis" ),
srFlagsOut = cms.string( "pi0EBSrFlags" )
)
process.hltPreAlCaHIEcalPi0EEonly = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltAlCaPi0RecHitsFilterEEonlyRegional = cms.EDFilter( "HLTRegionalEcalResonanceFilter",
barrelHits = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEB' ),
endcapHits = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEE' ),
preshRecHitProducer = cms.InputTag( 'hltEcalPreshowerRecHit','EcalRecHitsES' ),
barrelClusters = cms.InputTag( 'hltSimple3x3Clusters','Simple3x3ClustersBarrel' ),
endcapClusters = cms.InputTag( 'hltSimple3x3Clusters','Simple3x3ClustersEndcap' ),
useRecoFlag = cms.bool( False ),
flagLevelRecHitsToUse = cms.int32( 1 ),
useDBStatus = cms.bool( True ),
statusLevelRecHitsToUse = cms.int32( 1 ),
doSelBarrel = cms.bool( False ),
barrelSelection = cms.PSet(
massHighPi0Cand = cms.double( 0.163 ),
ptMinForIsolation = cms.double( 1.0 ),
seleMinvMaxBarrel = cms.double( 0.22 ),
massLowPi0Cand = cms.double( 0.104 ),
seleS9S25Gamma = cms.double( 0.0 ),
seleBeltDeta = cms.double( 0.05 ),
seleS4S9GammaBarrel_region2 = cms.double( 0.65 ),
barrelHitCollection = cms.string( "pi0EcalRecHitsEB" ),
removePi0CandidatesForEta = cms.bool( False ),
seleMinvMinBarrel = cms.double( 0.06 ),
seleS4S9GammaBarrel_region1 = cms.double( 0.65 ),
selePtPairBarrel_region1 = cms.double( 1.5 ),
selePtPairBarrel_region2 = cms.double( 1.5 ),
seleBeltDR = cms.double( 0.2 ),
region1_Barrel = cms.double( 1.0 ),
seleIsoBarrel_region1 = cms.double( 0.5 ),
selePtGammaBarrel_region1 = cms.double( 0.5 ),
seleIsoBarrel_region2 = cms.double( 0.5 ),
selePtGammaBarrel_region2 = cms.double( 0.5 ),
store5x5RecHitEB = cms.bool( False ),
seleNxtalBarrel_region2 = cms.uint32( 6 ),
seleNxtalBarrel_region1 = cms.uint32( 6 )
),
doSelEndcap = cms.bool( True ),
endcapSelection = cms.PSet(
seleBeltDetaEndCap = cms.double( 0.05 ),
selePtPairMaxEndCap_region3 = cms.double( 999.0 ),
seleS4S9GammaEndCap_region2 = cms.double( 0.92 ),
seleS4S9GammaEndCap_region1 = cms.double( 0.85 ),
seleNxtalEndCap_region2 = cms.uint32( 6 ),
seleNxtalEndCap_region3 = cms.uint32( 6 ),
ptMinForIsolationEndCap = cms.double( 0.5 ),
selePtPairEndCap_region1 = cms.double( 3.75 ),
endcapHitCollection = cms.string( "pi0EcalRecHitsEE" ),
selePtPairEndCap_region2 = cms.double( 2.0 ),
seleS4S9GammaEndCap_region3 = cms.double( 0.92 ),
selePtGammaEndCap_region3 = cms.double( 0.95 ),
selePtGammaEndCap_region2 = cms.double( 0.95 ),
selePtGammaEndCap_region1 = cms.double( 1.1 ),
region1_EndCap = cms.double( 1.8 ),
region2_EndCap = cms.double( 2.0 ),
store5x5RecHitEE = cms.bool( False ),
seleIsoEndCap_region3 = cms.double( 0.5 ),
seleIsoEndCap_region2 = cms.double( 0.5 ),
seleMinvMinEndCap = cms.double( 0.05 ),
selePtPairEndCap_region3 = cms.double( 2.0 ),
seleIsoEndCap_region1 = cms.double( 0.5 ),
seleBeltDREndCap = cms.double( 0.2 ),
seleMinvMaxEndCap = cms.double( 0.3 ),
seleNxtalEndCap_region1 = cms.uint32( 6 ),
seleS9S25GammaEndCap = cms.double( 0.0 )
),
storeRecHitES = cms.bool( True ),
preshowerSelection = cms.PSet(
preshClusterEnergyCut = cms.double( 0.0 ),
debugLevelES = cms.string( "" ),
ESCollection = cms.string( "pi0EcalRecHitsES" ),
preshNclust = cms.int32( 4 ),
preshStripEnergyCut = cms.double( 0.0 ),
preshCalibPlaneY = cms.double( 0.7 ),
preshSeededNstrip = cms.int32( 15 ),
preshCalibGamma = cms.double( 0.024 ),
preshCalibPlaneX = cms.double( 1.0 ),
preshCalibMIP = cms.double( 9.0E-5 )
),
debugLevel = cms.int32( 0 )
)
process.hltAlCaPi0EEUncalibrator = cms.EDProducer( "EcalRecalibRecHitProducer",
doEnergyScale = cms.bool( False ),
doEnergyScaleInverse = cms.bool( False ),
doIntercalib = cms.bool( False ),
doIntercalibInverse = cms.bool( False ),
EERecHitCollection = cms.InputTag( 'hltAlCaPi0RecHitsFilterEEonlyRegional','pi0EcalRecHitsEE' ),
EBRecHitCollection = cms.InputTag( 'hltAlCaPi0RecHitsFilterEEonlyRegional','pi0EcalRecHitsEE' ),
doLaserCorrections = cms.bool( False ),
doLaserCorrectionsInverse = cms.bool( False ),
EBRecalibRecHitCollection = cms.string( "pi0EcalRecHitsEB" ),
EERecalibRecHitCollection = cms.string( "pi0EcalRecHitsEE" )
)
process.hltAlCaPi0EERechitsToDigis = cms.EDProducer( "HLTRechitsToDigis",
region = cms.string( "endcap" ),
digisIn = cms.InputTag( 'hltEcalDigis','eeDigis' ),
digisOut = cms.string( "pi0EEDigis" ),
recHits = cms.InputTag( 'hltAlCaPi0EEUncalibrator','pi0EcalRecHitsEE' ),
srFlagsIn = cms.InputTag( "hltEcalDigis" ),
srFlagsOut = cms.string( "pi0EESrFlags" )
)
process.hltPreAlCaHIEcalEtaEBonly = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltAlCaEtaRecHitsFilterEBonlyRegional = cms.EDFilter( "HLTRegionalEcalResonanceFilter",
barrelHits = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEB' ),
endcapHits = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEE' ),
preshRecHitProducer = cms.InputTag( 'hltEcalPreshowerRecHit','EcalRecHitsES' ),
barrelClusters = cms.InputTag( 'hltSimple3x3Clusters','Simple3x3ClustersBarrel' ),
endcapClusters = cms.InputTag( 'hltSimple3x3Clusters','Simple3x3ClustersEndcap' ),
useRecoFlag = cms.bool( False ),
flagLevelRecHitsToUse = cms.int32( 1 ),
useDBStatus = cms.bool( True ),
statusLevelRecHitsToUse = cms.int32( 1 ),
doSelBarrel = cms.bool( True ),
barrelSelection = cms.PSet(
massHighPi0Cand = cms.double( 0.156 ),
ptMinForIsolation = cms.double( 1.0 ),
seleMinvMaxBarrel = cms.double( 0.8 ),
massLowPi0Cand = cms.double( 0.084 ),
seleS9S25Gamma = cms.double( 0.8 ),
seleBeltDeta = cms.double( 0.1 ),
seleS4S9GammaBarrel_region2 = cms.double( 0.9 ),
barrelHitCollection = cms.string( "etaEcalRecHitsEB" ),
removePi0CandidatesForEta = cms.bool( True ),
seleMinvMinBarrel = cms.double( 0.2 ),
seleS4S9GammaBarrel_region1 = cms.double( 0.9 ),
selePtPairBarrel_region1 = cms.double( 3.0 ),
selePtPairBarrel_region2 = cms.double( 3.0 ),
seleBeltDR = cms.double( 0.3 ),
region1_Barrel = cms.double( 1.0 ),
seleIsoBarrel_region1 = cms.double( 0.5 ),
selePtGammaBarrel_region1 = cms.double( 0.65 ),
seleIsoBarrel_region2 = cms.double( 0.5 ),
selePtGammaBarrel_region2 = cms.double( 1.4 ),
store5x5RecHitEB = cms.bool( True ),
seleNxtalBarrel_region2 = cms.uint32( 6 ),
seleNxtalBarrel_region1 = cms.uint32( 6 )
),
doSelEndcap = cms.bool( False ),
endcapSelection = cms.PSet(
seleBeltDetaEndCap = cms.double( 0.05 ),
selePtPairMaxEndCap_region3 = cms.double( 2.5 ),
seleS4S9GammaEndCap_region2 = cms.double( 0.65 ),
seleS4S9GammaEndCap_region1 = cms.double( 0.65 ),
seleNxtalEndCap_region2 = cms.uint32( 6 ),
seleNxtalEndCap_region3 = cms.uint32( 6 ),
ptMinForIsolationEndCap = cms.double( 0.5 ),
selePtPairEndCap_region1 = cms.double( 1.5 ),
endcapHitCollection = cms.string( "etaEcalRecHitsEE" ),
selePtPairEndCap_region2 = cms.double( 1.5 ),
seleS4S9GammaEndCap_region3 = cms.double( 0.65 ),
selePtGammaEndCap_region3 = cms.double( 0.5 ),
selePtGammaEndCap_region2 = cms.double( 0.5 ),
selePtGammaEndCap_region1 = cms.double( 0.5 ),
region1_EndCap = cms.double( 1.8 ),
region2_EndCap = cms.double( 2.0 ),
store5x5RecHitEE = cms.bool( False ),
seleIsoEndCap_region3 = cms.double( 0.5 ),
seleIsoEndCap_region2 = cms.double( 0.5 ),
seleMinvMinEndCap = cms.double( 0.05 ),
selePtPairEndCap_region3 = cms.double( 99.0 ),
seleIsoEndCap_region1 = cms.double( 0.5 ),
seleBeltDREndCap = cms.double( 0.2 ),
seleMinvMaxEndCap = cms.double( 0.3 ),
seleNxtalEndCap_region1 = cms.uint32( 6 ),
seleS9S25GammaEndCap = cms.double( 0.0 )
),
storeRecHitES = cms.bool( False ),
preshowerSelection = cms.PSet(
preshClusterEnergyCut = cms.double( 0.0 ),
debugLevelES = cms.string( "" ),
ESCollection = cms.string( "etaEcalRecHitsES" ),
preshNclust = cms.int32( 4 ),
preshStripEnergyCut = cms.double( 0.0 ),
preshCalibPlaneY = cms.double( 0.7 ),
preshSeededNstrip = cms.int32( 15 ),
preshCalibGamma = cms.double( 0.024 ),
preshCalibPlaneX = cms.double( 1.0 ),
preshCalibMIP = cms.double( 9.0E-5 )
),
debugLevel = cms.int32( 0 )
)
process.hltAlCaEtaEBUncalibrator = cms.EDProducer( "EcalRecalibRecHitProducer",
doEnergyScale = cms.bool( False ),
doEnergyScaleInverse = cms.bool( False ),
doIntercalib = cms.bool( False ),
doIntercalibInverse = cms.bool( False ),
EERecHitCollection = cms.InputTag( 'hltAlCaEtaRecHitsFilterEBonlyRegional','etaEcalRecHitsEB' ),
EBRecHitCollection = cms.InputTag( 'hltAlCaEtaRecHitsFilterEBonlyRegional','etaEcalRecHitsEB' ),
doLaserCorrections = cms.bool( False ),
doLaserCorrectionsInverse = cms.bool( False ),
EBRecalibRecHitCollection = cms.string( "etaEcalRecHitsEB" ),
EERecalibRecHitCollection = cms.string( "etaEcalRecHitsEE" )
)
process.hltAlCaEtaEBRechitsToDigis = cms.EDProducer( "HLTRechitsToDigis",
region = cms.string( "barrel" ),
digisIn = cms.InputTag( 'hltEcalDigis','ebDigis' ),
digisOut = cms.string( "etaEBDigis" ),
recHits = cms.InputTag( 'hltAlCaEtaEBUncalibrator','etaEcalRecHitsEB' ),
srFlagsIn = cms.InputTag( "hltEcalDigis" ),
srFlagsOut = cms.string( "etaEBSrFlags" )
)
process.hltPreAlCaHIEcalEtaEEonly = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltAlCaEtaRecHitsFilterEEonlyRegional = cms.EDFilter( "HLTRegionalEcalResonanceFilter",
barrelHits = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEB' ),
endcapHits = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEE' ),
preshRecHitProducer = cms.InputTag( 'hltEcalPreshowerRecHit','EcalRecHitsES' ),
barrelClusters = cms.InputTag( 'hltSimple3x3Clusters','Simple3x3ClustersBarrel' ),
endcapClusters = cms.InputTag( 'hltSimple3x3Clusters','Simple3x3ClustersEndcap' ),
useRecoFlag = cms.bool( False ),
flagLevelRecHitsToUse = cms.int32( 1 ),
useDBStatus = cms.bool( True ),
statusLevelRecHitsToUse = cms.int32( 1 ),
doSelBarrel = cms.bool( False ),
barrelSelection = cms.PSet(
massHighPi0Cand = cms.double( 0.163 ),
ptMinForIsolation = cms.double( 1.0 ),
seleMinvMaxBarrel = cms.double( 0.8 ),
massLowPi0Cand = cms.double( 0.104 ),
seleS9S25Gamma = cms.double( 0.0 ),
seleBeltDeta = cms.double( 0.05 ),
seleS4S9GammaBarrel_region2 = cms.double( 0.65 ),
barrelHitCollection = cms.string( "etaEcalRecHitsEB" ),
removePi0CandidatesForEta = cms.bool( False ),
seleMinvMinBarrel = cms.double( 0.3 ),
seleS4S9GammaBarrel_region1 = cms.double( 0.65 ),
selePtPairBarrel_region1 = cms.double( 1.5 ),
selePtPairBarrel_region2 = cms.double( 1.5 ),
seleBeltDR = cms.double( 0.2 ),
region1_Barrel = cms.double( 1.0 ),
seleIsoBarrel_region1 = cms.double( 0.5 ),
selePtGammaBarrel_region1 = cms.double( 1.0 ),
seleIsoBarrel_region2 = cms.double( 0.5 ),
selePtGammaBarrel_region2 = cms.double( 0.5 ),
store5x5RecHitEB = cms.bool( False ),
seleNxtalBarrel_region2 = cms.uint32( 6 ),
seleNxtalBarrel_region1 = cms.uint32( 6 )
),
doSelEndcap = cms.bool( True ),
endcapSelection = cms.PSet(
seleBeltDetaEndCap = cms.double( 0.1 ),
selePtPairMaxEndCap_region3 = cms.double( 999.0 ),
seleS4S9GammaEndCap_region2 = cms.double( 0.9 ),
seleS4S9GammaEndCap_region1 = cms.double( 0.9 ),
seleNxtalEndCap_region2 = cms.uint32( 6 ),
seleNxtalEndCap_region3 = cms.uint32( 6 ),
ptMinForIsolationEndCap = cms.double( 0.5 ),
selePtPairEndCap_region1 = cms.double( 3.0 ),
endcapHitCollection = cms.string( "etaEcalRecHitsEE" ),
selePtPairEndCap_region2 = cms.double( 3.0 ),
seleS4S9GammaEndCap_region3 = cms.double( 0.9 ),
selePtGammaEndCap_region3 = cms.double( 1.0 ),
selePtGammaEndCap_region2 = cms.double( 1.0 ),
selePtGammaEndCap_region1 = cms.double( 1.0 ),
region1_EndCap = cms.double( 1.8 ),
region2_EndCap = cms.double( 2.0 ),
store5x5RecHitEE = cms.bool( True ),
seleIsoEndCap_region3 = cms.double( 0.5 ),
seleIsoEndCap_region2 = cms.double( 0.5 ),
seleMinvMinEndCap = cms.double( 0.2 ),
selePtPairEndCap_region3 = cms.double( 3.0 ),
seleIsoEndCap_region1 = cms.double( 0.5 ),
seleBeltDREndCap = cms.double( 0.3 ),
seleMinvMaxEndCap = cms.double( 0.8 ),
seleNxtalEndCap_region1 = cms.uint32( 6 ),
seleS9S25GammaEndCap = cms.double( 0.85 )
),
storeRecHitES = cms.bool( True ),
preshowerSelection = cms.PSet(
preshClusterEnergyCut = cms.double( 0.0 ),
debugLevelES = cms.string( "" ),
ESCollection = cms.string( "etaEcalRecHitsES" ),
preshNclust = cms.int32( 4 ),
preshStripEnergyCut = cms.double( 0.0 ),
preshCalibPlaneY = cms.double( 0.7 ),
preshSeededNstrip = cms.int32( 15 ),
preshCalibGamma = cms.double( 0.024 ),
preshCalibPlaneX = cms.double( 1.0 ),
preshCalibMIP = cms.double( 9.0E-5 )
),
debugLevel = cms.int32( 0 )
)
process.hltAlCaEtaEEUncalibrator = cms.EDProducer( "EcalRecalibRecHitProducer",
doEnergyScale = cms.bool( False ),
doEnergyScaleInverse = cms.bool( False ),
doIntercalib = cms.bool( False ),
doIntercalibInverse = cms.bool( False ),
EERecHitCollection = cms.InputTag( 'hltAlCaEtaRecHitsFilterEEonlyRegional','etaEcalRecHitsEE' ),
EBRecHitCollection = cms.InputTag( 'hltAlCaEtaRecHitsFilterEEonlyRegional','etaEcalRecHitsEE' ),
doLaserCorrections = cms.bool( False ),
doLaserCorrectionsInverse = cms.bool( False ),
EBRecalibRecHitCollection = cms.string( "etaEcalRecHitsEB" ),
EERecalibRecHitCollection = cms.string( "etaEcalRecHitsEE" )
)
process.hltAlCaEtaEERechitsToDigis = cms.EDProducer( "HLTRechitsToDigis",
region = cms.string( "endcap" ),
digisIn = cms.InputTag( 'hltEcalDigis','eeDigis' ),
digisOut = cms.string( "etaEEDigis" ),
recHits = cms.InputTag( 'hltAlCaEtaEEUncalibrator','etaEcalRecHitsEE' ),
srFlagsIn = cms.InputTag( "hltEcalDigis" ),
srFlagsOut = cms.string( "etaEESrFlags" )
)
process.hltCalibrationEventsFilter = cms.EDFilter( "HLTTriggerTypeFilter",
SelectedTriggerType = cms.int32( 2 )
)
process.hltPreEcalCalibration = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltEcalCalibrationRaw = cms.EDProducer( "EvFFEDSelector",
inputTag = cms.InputTag( "rawDataCollector" ),
fedList = cms.vuint32( 601, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 616, 617, 618, 619, 620, 621, 622, 623, 624, 625, 626, 627, 628, 629, 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, 1024 )
)
process.hltPreHcalCalibration = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltHcalCalibTypeFilter = cms.EDFilter( "HLTHcalCalibTypeFilter",
InputTag = cms.InputTag( "rawDataCollector" ),
CalibTypes = cms.vint32( 1, 2, 3, 4, 5, 6 )
)
process.hltHcalCalibrationRaw = cms.EDProducer( "EvFFEDSelector",
inputTag = cms.InputTag( "rawDataCollector" ),
fedList = cms.vuint32( 700, 701, 702, 703, 704, 705, 706, 707, 708, 709, 710, 711, 712, 713, 714, 715, 716, 717, 718, 719, 720, 721, 722, 723, 724, 725, 726, 727, 728, 729, 730, 731, 1024, 1100, 1101, 1102, 1103, 1104, 1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, 1113, 1114, 1115, 1116, 1117, 1118, 1119, 1120, 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128, 1129, 1130, 1131, 1132, 1133, 1134, 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, 1143, 1144, 1145, 1146, 1147, 1148, 1149, 1150, 1151, 1152, 1153, 1154, 1155, 1156, 1157, 1158, 1159, 1160, 1161, 1162, 1163, 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1171, 1172, 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, 1192, 1193, 1194, 1195, 1196, 1197, 1198, 1199 )
)
process.hltL1sZeroBiasIorAlwaysTrueIorIsolatedBunch = cms.EDFilter( "HLTL1TSeed",
saveTags = cms.bool( True ),
L1SeedsLogicalExpression = cms.string( "L1_ZeroBias OR L1_AlwaysTrue OR L1_IsolatedBunch" ),
L1ObjectMapInputTag = cms.InputTag( "hltGtStage2ObjectMap" ),
L1GlobalInputTag = cms.InputTag( "hltGtStage2Digis" ),
L1MuonInputTag = cms.InputTag( 'hltGtStage2Digis','Muon' ),
L1EGammaInputTag = cms.InputTag( 'hltGtStage2Digis','EGamma' ),
L1JetInputTag = cms.InputTag( 'hltGtStage2Digis','Jet' ),
L1TauInputTag = cms.InputTag( 'hltGtStage2Digis','Tau' ),
L1EtSumInputTag = cms.InputTag( 'hltGtStage2Digis','EtSum' )
)
process.hltPreAlCaEcalPhiSym = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltEcalPhiSymFilter = cms.EDFilter( "HLTEcalPhiSymFilter",
barrelDigiCollection = cms.InputTag( 'hltEcalDigis','ebDigis' ),
endcapDigiCollection = cms.InputTag( 'hltEcalDigis','eeDigis' ),
barrelUncalibHitCollection = cms.InputTag( 'hltEcalUncalibRecHit','EcalUncalibRecHitsEB' ),
endcapUncalibHitCollection = cms.InputTag( 'hltEcalUncalibRecHit','EcalUncalibRecHitsEE' ),
barrelHitCollection = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEB' ),
endcapHitCollection = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEE' ),
statusThreshold = cms.uint32( 3 ),
useRecoFlag = cms.bool( False ),
ampCut_barrelP = cms.vdouble( 14.31759, 14.33355, 14.34853, 14.36281, 14.37667, 14.39011, 14.40334, 14.41657, 14.42994, 14.44359, 14.45759, 14.47222, 14.48748, 14.50358, 14.52052, 14.53844, 14.55755, 14.57778, 14.59934, 14.62216, 14.64645, 14.67221, 14.69951, 14.72849, 14.75894, 14.79121, 14.82502, 14.86058, 14.89796, 14.93695, 14.97783, 15.02025, 15.06442, 15.11041, 15.15787, 15.20708, 15.25783, 15.31026, 15.36409, 15.41932, 15.47602, 15.53384, 15.5932, 15.65347, 15.715, 15.77744, 15.84086, 15.90505, 15.97001, 16.03539, 16.10147, 16.16783, 16.23454, 16.30146, 16.36824, 16.43502, 16.50159, 16.56781, 16.63354, 16.69857, 16.76297, 16.82625, 16.88862, 16.94973, 17.00951, 17.06761, 17.12403, 17.1787, 17.23127, 17.28167, 17.32955, 17.37491, 17.41754, 17.45723, 17.49363, 17.52688, 17.55642, 17.58218, 17.60416, 17.62166, 17.63468, 17.64315, 17.64665, 17.6449, 17.6379 ),
ampCut_barrelM = cms.vdouble( 17.6379, 17.6449, 17.64665, 17.64315, 17.63468, 17.62166, 17.60416, 17.58218, 17.55642, 17.52688, 17.49363, 17.45723, 17.41754, 17.37491, 17.32955, 17.28167, 17.23127, 17.1787, 17.12403, 17.06761, 17.00951, 16.94973, 16.88862, 16.82625, 16.76297, 16.69857, 16.63354, 16.56781, 16.50159, 16.43502, 16.36824, 16.30146, 16.23454, 16.16783, 16.10147, 16.03539, 15.97001, 15.90505, 15.84086, 15.77744, 15.715, 15.65347, 15.5932, 15.53384, 15.47602, 15.41932, 15.36409, 15.31026, 15.25783, 15.20708, 15.15787, 15.11041, 15.06442, 15.02025, 14.97783, 14.93695, 14.89796, 14.86058, 14.82502, 14.79121, 14.75894, 14.72849, 14.69951, 14.67221, 14.64645, 14.62216, 14.59934, 14.57778, 14.55755, 14.53844, 14.52052, 14.50358, 14.48748, 14.47222, 14.45759, 14.44359, 14.42994, 14.41657, 14.40334, 14.39011, 14.37667, 14.36281, 14.34853, 14.33355, 14.31759 ),
ampCut_endcapP = cms.vdouble( 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 11.5, 11.5, 11.5, 11.5, 11.5, 11.5, 11.5, 11.5, 11.5 ),
ampCut_endcapM = cms.vdouble( 11.5, 11.5, 11.5, 11.5, 11.5, 11.5, 11.5, 11.5, 11.5, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0 ),
phiSymBarrelDigiCollection = cms.string( "phiSymEcalDigisEB" ),
phiSymEndcapDigiCollection = cms.string( "phiSymEcalDigisEE" )
)
process.hltL1sL1ZeroBiasFirstCollisionAfterAbortGap = cms.EDFilter( "HLTL1TSeed",
saveTags = cms.bool( True ),
L1SeedsLogicalExpression = cms.string( "L1_FirstCollisionInOrbit" ),
L1ObjectMapInputTag = cms.InputTag( "hltGtStage2ObjectMap" ),
L1GlobalInputTag = cms.InputTag( "hltGtStage2Digis" ),
L1MuonInputTag = cms.InputTag( 'hltGtStage2Digis','Muon' ),
L1EGammaInputTag = cms.InputTag( 'hltGtStage2Digis','EGamma' ),
L1JetInputTag = cms.InputTag( 'hltGtStage2Digis','Jet' ),
L1TauInputTag = cms.InputTag( 'hltGtStage2Digis','Tau' ),
L1EtSumInputTag = cms.InputTag( 'hltGtStage2Digis','EtSum' )
)
process.hltPreZeroBiasFirstCollisionAfterAbortGap = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltL1sSingleMu7to30 = cms.EDFilter( "HLTL1TSeed",
saveTags = cms.bool( True ),
L1SeedsLogicalExpression = cms.string( "L1_SingleMu7 OR L1_SingleMu12 OR L1_SingleMu16 OR L1_SingleMu18 OR L1_SingleMu20 OR L1_SingleMu22 OR L1_SingleMu25 OR L1_SingleMu30" ),
L1ObjectMapInputTag = cms.InputTag( "hltGtStage2ObjectMap" ),
L1GlobalInputTag = cms.InputTag( "hltGtStage2Digis" ),
L1MuonInputTag = cms.InputTag( 'hltGtStage2Digis','Muon' ),
L1EGammaInputTag = cms.InputTag( 'hltGtStage2Digis','EGamma' ),
L1JetInputTag = cms.InputTag( 'hltGtStage2Digis','Jet' ),
L1TauInputTag = cms.InputTag( 'hltGtStage2Digis','Tau' ),
L1EtSumInputTag = cms.InputTag( 'hltGtStage2Digis','EtSum' )
)
process.hltPreAlCaHIRPCMuonNormalisation = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltHIRPCMuonNormaL1Filtered0 = cms.EDFilter( "HLTMuonL1TFilter",
saveTags = cms.bool( True ),
CandTag = cms.InputTag( 'hltGtStage2Digis','Muon' ),
PreviousCandTag = cms.InputTag( "hltL1sSingleMu7to30" ),
MaxEta = cms.double( 1.6 ),
MinPt = cms.double( 0.0 ),
MinN = cms.int32( 1 ),
CentralBxOnly = cms.bool( True ),
SelectQualities = cms.vint32( )
)
process.hltPreAlCaLumiPixelsCountsRandom = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPixelTrackerHVOn = cms.EDFilter( "DetectorStateFilter",
DebugOn = cms.untracked.bool( False ),
DetectorType = cms.untracked.string( "pixel" ),
DcsStatusLabel = cms.untracked.InputTag( "hltScalersRawToDigi" ),
DCSRecordLabel = cms.untracked.InputTag( "hltOnlineMetaDataDigis" )
)
process.hltAlcaPixelClusterCounts = cms.EDProducer( "AlcaPCCEventProducer",
pixelClusterLabel = cms.InputTag( "hltSiPixelClusters" ),
trigstring = cms.untracked.string( "alcaPCCEvent" )
)
process.hltPreAlCaLumiPixelsCountsZeroBias = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltFEDSelector = cms.EDProducer( "EvFFEDSelector",
inputTag = cms.InputTag( "rawDataCollector" ),
fedList = cms.vuint32( 1023, 1024 )
)
process.hltTriggerSummaryAOD = cms.EDProducer( "TriggerSummaryProducerAOD",
throw = cms.bool( False ),
processName = cms.string( "@" ),
moduleLabelPatternsToMatch = cms.vstring( 'hlt*' ),
moduleLabelPatternsToSkip = cms.vstring( )
)
process.hltTriggerSummaryRAW = cms.EDProducer( "TriggerSummaryProducerRAW",
processName = cms.string( "@" )
)
process.hltPreHLTAnalyzerEndpath = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltL1TGlobalSummary = cms.EDAnalyzer( "L1TGlobalSummary",
AlgInputTag = cms.InputTag( "hltGtStage2Digis" ),
ExtInputTag = cms.InputTag( "hltGtStage2Digis" ),
MinBx = cms.int32( 0 ),
MaxBx = cms.int32( 0 ),
DumpTrigResults = cms.bool( False ),
DumpRecord = cms.bool( False ),
DumpTrigSummary = cms.bool( True ),
ReadPrescalesFromFile = cms.bool( False ),
psFileName = cms.string( "prescale_L1TGlobal.csv" ),
psColumn = cms.int32( 0 )
)
process.hltTrigReport = cms.EDAnalyzer( "HLTrigReport",
HLTriggerResults = cms.InputTag( 'TriggerResults','','@currentProcess' ),
reportBy = cms.untracked.string( "job" ),
resetBy = cms.untracked.string( "never" ),
serviceBy = cms.untracked.string( "never" ),
ReferencePath = cms.untracked.string( "HLTriggerFinalPath" ),
ReferenceRate = cms.untracked.double( 100.0 )
)
process.hltPrePhysicsCommissioningOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPrePhysicsEGammaOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPrePhysicsEndOfFillOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPrePhysicsHadronsTausOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPrePhysicsMuonsOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPrePhysicsTracksOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPrePhysicsForwardOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreDQMOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreDQMOutputSmart = cms.EDFilter( "TriggerResultsFilter",
usePathStatus = cms.bool( False ),
hltResults = cms.InputTag( 'TriggerResults','','@currentProcess' ),
l1tResults = cms.InputTag( "" ),
l1tIgnoreMaskAndPrescale = cms.bool( False ),
throw = cms.bool( True ),
triggerConditions = cms.vstring( '( HLT_Random_v3 OR FALSE OR FALSE OR FALSE OR FALSE OR FALSE OR FALSE OR FALSE OR FALSE OR FALSE OR FALSE OR FALSE OR FALSE OR FALSE ) / 10',
'HLT_Physics_v7',
'HLT_Random_v3 / 3',
'HLT_ZeroBias_v6 / 3',
'HLT_HIL1UnpairedBunchBptxMinusForPPRef_v2 / 3',
'HLT_HIL1UnpairedBunchBptxPlusForPPRef_v2 / 3',
'HLT_HIL1NotBptxORForPPRef_v2 / 3',
'HLT_HIZeroBias_part0_v6 / 3',
'HLT_HIZeroBias_part1_v6 / 3',
'HLT_HIZeroBias_part2_v6 / 3',
'HLT_HIZeroBias_part3_v6 / 3',
'HLT_HIZeroBias_part4_v6 / 3',
'HLT_HIZeroBias_part5_v6 / 3',
'HLT_HIZeroBias_part6_v6 / 3',
'HLT_HIZeroBias_part7_v6 / 3',
'HLT_HIZeroBias_part8_v6 / 3',
'HLT_HIZeroBias_part9_v6 / 3',
'HLT_HIZeroBias_part10_v6 / 3',
'HLT_HIZeroBias_part11_v6 / 3',
'HLT_ZeroBias_FirstCollisionAfterAbortGap_v5 / 3' )
)
process.hltPreDQMOnlineBeamspotOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreDQMOnlineBeamspotOutputSmart = cms.EDFilter( "TriggerResultsFilter",
usePathStatus = cms.bool( False ),
hltResults = cms.InputTag( 'TriggerResults','','@currentProcess' ),
l1tResults = cms.InputTag( "" ),
l1tIgnoreMaskAndPrescale = cms.bool( False ),
throw = cms.bool( True ),
triggerConditions = cms.vstring( 'HLT_ZeroBias_Beamspot_v4',
'HLT_HIHT80_Beamspot_ppRef5TeV_v3' )
)
process.hltPreDQMCalibrationOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreDQMEventDisplayOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreDQMEventDisplayOutputSmart = cms.EDFilter( "TriggerResultsFilter",
usePathStatus = cms.bool( False ),
hltResults = cms.InputTag( 'TriggerResults','','@currentProcess' ),
l1tResults = cms.InputTag( "" ),
l1tIgnoreMaskAndPrescale = cms.bool( False ),
throw = cms.bool( True ),
triggerConditions = cms.vstring( )
)
process.hltPreHLTMonitorOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreHLTMonitorOutputSmart = cms.EDFilter( "TriggerResultsFilter",
usePathStatus = cms.bool( False ),
hltResults = cms.InputTag( 'TriggerResults','','@currentProcess' ),
l1tResults = cms.InputTag( "" ),
l1tIgnoreMaskAndPrescale = cms.bool( False ),
throw = cms.bool( True ),
triggerConditions = cms.vstring( )
)
process.hltPreRPCMONOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreCalibrationOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreEcalCalibrationOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreALCAPHISYMOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreALCALumiPixelCountsExpressOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreALCALumiPixelCountsPromptOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreALCAP0Output = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreExpressOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreExpressOutputSmart = cms.EDFilter( "TriggerResultsFilter",
usePathStatus = cms.bool( False ),
hltResults = cms.InputTag( 'TriggerResults','','@currentProcess' ),
l1tResults = cms.InputTag( "" ),
l1tIgnoreMaskAndPrescale = cms.bool( False ),
throw = cms.bool( True ),
triggerConditions = cms.vstring( 'HLT_Physics_v7',
'HLT_Random_v3',
'HLT_ZeroBias_v6',
'HLT_ZeroBias_FirstCollisionAfterAbortGap_v5' )
)
process.hltPreExpressAlignmentOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreExpressAlignmentOutputSmart = cms.EDFilter( "TriggerResultsFilter",
usePathStatus = cms.bool( False ),
hltResults = cms.InputTag( 'TriggerResults','','@currentProcess' ),
l1tResults = cms.InputTag( "" ),
l1tIgnoreMaskAndPrescale = cms.bool( False ),
throw = cms.bool( True ),
triggerConditions = cms.vstring( 'HLT_ZeroBias_Beamspot_v4',
'HLT_HIHT80_Beamspot_ppRef5TeV_v3' )
)
process.hltPreNanoDSTOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPrePhysicsHIZeroBias1Output = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPrePhysicsHIZeroBias2Output = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPrePhysicsHIZeroBias3Output = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPrePhysicsHIZeroBias4Output = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPrePhysicsHIZeroBias5Output = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPrePhysicsHIZeroBias6Output = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.statusOnGPU = SwitchProducerCUDA(
cpu = cms.EDProducer( "BooleanProducer",
value = cms.bool( False )
),
)
process.hltEcalDigis = SwitchProducerCUDA(
cpu = cms.EDAlias(
hltEcalDigisLegacy = cms.VPSet(
cms.PSet( type = cms.string( "EBDigiCollection" ) ),
cms.PSet( type = cms.string( "EEDigiCollection" ) ),
cms.PSet( type = cms.string( "EBDetIdedmEDCollection" ) ),
cms.PSet( type = cms.string( "EEDetIdedmEDCollection" ) ),
cms.PSet( type = cms.string( "EBSrFlagsSorted" ) ),
cms.PSet( type = cms.string( "EESrFlagsSorted" ) ),
cms.PSet( type = cms.string( "EcalElectronicsIdedmEDCollection" ),
fromProductInstance = cms.string( "EcalIntegrityBlockSizeErrors" )
),
cms.PSet( type = cms.string( "EcalElectronicsIdedmEDCollection" ),
fromProductInstance = cms.string( "EcalIntegrityTTIdErrors" )
),
cms.PSet( type = cms.string( "EcalElectronicsIdedmEDCollection" ),
fromProductInstance = cms.string( "EcalIntegrityZSXtalIdErrors" )
),
cms.PSet( type = cms.string( "EcalPnDiodeDigisSorted" ) ),
cms.PSet( type = cms.string( "EcalPseudoStripInputDigisSorted" ),
fromProductInstance = cms.string( "EcalPseudoStripInputs" )
),
cms.PSet( type = cms.string( "EcalTriggerPrimitiveDigisSorted" ),
fromProductInstance = cms.string( "EcalTriggerPrimitives" )
)
)
),
)
process.hltEcalUncalibRecHit = SwitchProducerCUDA(
cpu = cms.EDProducer( "EcalUncalibRecHitProducer",
EBdigiCollection = cms.InputTag( 'hltEcalDigis','ebDigis' ),
EEhitCollection = cms.string( "EcalUncalibRecHitsEE" ),
EEdigiCollection = cms.InputTag( 'hltEcalDigis','eeDigis' ),
EBhitCollection = cms.string( "EcalUncalibRecHitsEB" ),
algo = cms.string( "EcalUncalibRecHitWorkerMultiFit" ),
algoPSet = cms.PSet(
ebSpikeThreshold = cms.double( 1.042 ),
EBtimeFitLimits_Upper = cms.double( 1.4 ),
EEtimeFitLimits_Lower = cms.double( 0.2 ),
timealgo = cms.string( "None" ),
EBtimeNconst = cms.double( 28.5 ),
prefitMaxChiSqEE = cms.double( 10.0 ),
outOfTimeThresholdGain12mEB = cms.double( 5.0 ),
outOfTimeThresholdGain12mEE = cms.double( 1000.0 ),
EEtimeFitParameters = cms.vdouble( -2.390548, 3.553628, -17.62341, 67.67538, -133.213, 140.7432, -75.41106, 16.20277 ),
prefitMaxChiSqEB = cms.double( 25.0 ),
simplifiedNoiseModelForGainSwitch = cms.bool( True ),
EBtimeFitParameters = cms.vdouble( -2.015452, 3.130702, -12.3473, 41.88921, -82.83944, 91.01147, -50.35761, 11.05621 ),
selectiveBadSampleCriteriaEB = cms.bool( False ),
dynamicPedestalsEB = cms.bool( False ),
useLumiInfoRunHeader = cms.bool( False ),
EBamplitudeFitParameters = cms.vdouble( 1.138, 1.652 ),
doPrefitEE = cms.bool( False ),
dynamicPedestalsEE = cms.bool( False ),
selectiveBadSampleCriteriaEE = cms.bool( False ),
outOfTimeThresholdGain61pEE = cms.double( 1000.0 ),
outOfTimeThresholdGain61pEB = cms.double( 5.0 ),
activeBXs = cms.vint32( -5, -4, -3, -2, -1, 0, 1, 2, 3, 4 ),
EcalPulseShapeParameters = cms.PSet(
EEPulseShapeTemplate = cms.vdouble( 0.116442, 0.756246, 1.0, 0.897182, 0.686831, 0.491506, 0.344111, 0.245731, 0.174115, 0.123361, 0.0874288, 0.061957 ),
EEdigiCollection = cms.string( "" ),
EcalPreMixStage2 = cms.bool( False ),
EcalPreMixStage1 = cms.bool( False ),
EBPulseShapeCovariance = cms.vdouble( 3.001E-6, 1.233E-5, 0.0, -4.416E-6, -4.571E-6, -3.614E-6, -2.636E-6, -1.286E-6, -8.41E-7, -5.296E-7, 0.0, 0.0, 1.233E-5, 6.154E-5, 0.0, -2.2E-5, -2.309E-5, -1.838E-5, -1.373E-5, -7.334E-6, -5.088E-6, -3.745E-6, -2.428E-6, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.416E-6, -2.2E-5, 0.0, 8.319E-6, 8.545E-6, 6.792E-6, 5.059E-6, 2.678E-6, 1.816E-6, 1.223E-6, 8.245E-7, 5.589E-7, -4.571E-6, -2.309E-5, 0.0, 8.545E-6, 9.182E-6, 7.219E-6, 5.388E-6, 2.853E-6, 1.944E-6, 1.324E-6, 9.083E-7, 6.335E-7, -3.614E-6, -1.838E-5, 0.0, 6.792E-6, 7.219E-6, 6.016E-6, 4.437E-6, 2.385E-6, 1.636E-6, 1.118E-6, 7.754E-7, 5.556E-7, -2.636E-6, -1.373E-5, 0.0, 5.059E-6, 5.388E-6, 4.437E-6, 3.602E-6, 1.917E-6, 1.322E-6, 9.079E-7, 6.529E-7, 4.752E-7, -1.286E-6, -7.334E-6, 0.0, 2.678E-6, 2.853E-6, 2.385E-6, 1.917E-6, 1.375E-6, 9.1E-7, 6.455E-7, 4.693E-7, 3.657E-7, -8.41E-7, -5.088E-6, 0.0, 1.816E-6, 1.944E-6, 1.636E-6, 1.322E-6, 9.1E-7, 9.115E-7, 6.062E-7, 4.436E-7, 3.422E-7, -5.296E-7, -3.745E-6, 0.0, 1.223E-6, 1.324E-6, 1.118E-6, 9.079E-7, 6.455E-7, 6.062E-7, 7.217E-7, 4.862E-7, 3.768E-7, 0.0, -2.428E-6, 0.0, 8.245E-7, 9.083E-7, 7.754E-7, 6.529E-7, 4.693E-7, 4.436E-7, 4.862E-7, 6.509E-7, 4.418E-7, 0.0, 0.0, 0.0, 5.589E-7, 6.335E-7, 5.556E-7, 4.752E-7, 3.657E-7, 3.422E-7, 3.768E-7, 4.418E-7, 6.142E-7 ),
ESdigiCollection = cms.string( "" ),
EBdigiCollection = cms.string( "" ),
EBCorrNoiseMatrixG01 = cms.vdouble( 1.0, 0.73354, 0.64442, 0.58851, 0.55425, 0.53082, 0.51916, 0.51097, 0.50732, 0.50409 ),
EBCorrNoiseMatrixG12 = cms.vdouble( 1.0, 0.71073, 0.55721, 0.46089, 0.40449, 0.35931, 0.33924, 0.32439, 0.31581, 0.30481 ),
EBCorrNoiseMatrixG06 = cms.vdouble( 1.0, 0.70946, 0.58021, 0.49846, 0.45006, 0.41366, 0.39699, 0.38478, 0.37847, 0.37055 ),
EEPulseShapeCovariance = cms.vdouble( 3.941E-5, 3.333E-5, 0.0, -1.449E-5, -1.661E-5, -1.424E-5, -1.183E-5, -6.842E-6, -4.915E-6, -3.411E-6, 0.0, 0.0, 3.333E-5, 2.862E-5, 0.0, -1.244E-5, -1.431E-5, -1.233E-5, -1.032E-5, -5.883E-6, -4.154E-6, -2.902E-6, -2.128E-6, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.449E-5, -1.244E-5, 0.0, 5.84E-6, 6.649E-6, 5.72E-6, 4.812E-6, 2.708E-6, 1.869E-6, 1.33E-6, 9.186E-7, 6.446E-7, -1.661E-5, -1.431E-5, 0.0, 6.649E-6, 7.966E-6, 6.898E-6, 5.794E-6, 3.157E-6, 2.184E-6, 1.567E-6, 1.084E-6, 7.575E-7, -1.424E-5, -1.233E-5, 0.0, 5.72E-6, 6.898E-6, 6.341E-6, 5.347E-6, 2.859E-6, 1.991E-6, 1.431E-6, 9.839E-7, 6.886E-7, -1.183E-5, -1.032E-5, 0.0, 4.812E-6, 5.794E-6, 5.347E-6, 4.854E-6, 2.628E-6, 1.809E-6, 1.289E-6, 9.02E-7, 6.146E-7, -6.842E-6, -5.883E-6, 0.0, 2.708E-6, 3.157E-6, 2.859E-6, 2.628E-6, 1.863E-6, 1.296E-6, 8.882E-7, 6.108E-7, 4.283E-7, -4.915E-6, -4.154E-6, 0.0, 1.869E-6, 2.184E-6, 1.991E-6, 1.809E-6, 1.296E-6, 1.217E-6, 8.669E-7, 5.751E-7, 3.882E-7, -3.411E-6, -2.902E-6, 0.0, 1.33E-6, 1.567E-6, 1.431E-6, 1.289E-6, 8.882E-7, 8.669E-7, 9.522E-7, 6.717E-7, 4.293E-7, 0.0, -2.128E-6, 0.0, 9.186E-7, 1.084E-6, 9.839E-7, 9.02E-7, 6.108E-7, 5.751E-7, 6.717E-7, 7.911E-7, 5.493E-7, 0.0, 0.0, 0.0, 6.446E-7, 7.575E-7, 6.886E-7, 6.146E-7, 4.283E-7, 3.882E-7, 4.293E-7, 5.493E-7, 7.027E-7 ),
EBPulseShapeTemplate = cms.vdouble( 0.0113979, 0.758151, 1.0, 0.887744, 0.673548, 0.474332, 0.319561, 0.215144, 0.147464, 0.101087, 0.0693181, 0.0475044 ),
EECorrNoiseMatrixG01 = cms.vdouble( 1.0, 0.72698, 0.62048, 0.55691, 0.51848, 0.49147, 0.47813, 0.47007, 0.46621, 0.46265 ),
EECorrNoiseMatrixG12 = cms.vdouble( 1.0, 0.71373, 0.44825, 0.30152, 0.21609, 0.14786, 0.11772, 0.10165, 0.09465, 0.08098 ),
UseLCcorrection = cms.untracked.bool( True ),
EECorrNoiseMatrixG06 = cms.vdouble( 1.0, 0.71217, 0.47464, 0.34056, 0.26282, 0.20287, 0.17734, 0.16256, 0.15618, 0.14443 )
),
doPrefitEB = cms.bool( False ),
addPedestalUncertaintyEE = cms.double( 0.0 ),
addPedestalUncertaintyEB = cms.double( 0.0 ),
gainSwitchUseMaxSampleEB = cms.bool( True ),
EEtimeNconst = cms.double( 31.8 ),
EEamplitudeFitParameters = cms.vdouble( 1.89, 1.4 ),
chi2ThreshEE_ = cms.double( 50.0 ),
eePulseShape = cms.vdouble( 5.2E-5, -5.26E-5, 6.66E-5, 0.1168, 0.7575, 1.0, 0.8876, 0.6732, 0.4741, 0.3194 ),
outOfTimeThresholdGain12pEB = cms.double( 5.0 ),
gainSwitchUseMaxSampleEE = cms.bool( False ),
mitigateBadSamplesEB = cms.bool( False ),
outOfTimeThresholdGain12pEE = cms.double( 1000.0 ),
ebPulseShape = cms.vdouble( 5.2E-5, -5.26E-5, 6.66E-5, 0.1168, 0.7575, 1.0, 0.8876, 0.6732, 0.4741, 0.3194 ),
ampErrorCalculation = cms.bool( False ),
mitigateBadSamplesEE = cms.bool( False ),
amplitudeThresholdEB = cms.double( 10.0 ),
kPoorRecoFlagEB = cms.bool( True ),
amplitudeThresholdEE = cms.double( 10.0 ),
EBtimeFitLimits_Lower = cms.double( 0.2 ),
kPoorRecoFlagEE = cms.bool( False ),
EEtimeFitLimits_Upper = cms.double( 1.4 ),
outOfTimeThresholdGain61mEE = cms.double( 1000.0 ),
EEtimeConstantTerm = cms.double( 1.0 ),
EBtimeConstantTerm = cms.double( 0.6 ),
chi2ThreshEB_ = cms.double( 65.0 ),
outOfTimeThresholdGain61mEB = cms.double( 5.0 )
)
),
)
process.hltEcalRecHit = SwitchProducerCUDA(
cpu = cms.EDProducer( "EcalRecHitProducer",
recoverEEVFE = cms.bool( False ),
EErechitCollection = cms.string( "EcalRecHitsEE" ),
recoverEBIsolatedChannels = cms.bool( False ),
recoverEBVFE = cms.bool( False ),
laserCorrection = cms.bool( True ),
EBLaserMIN = cms.double( 0.5 ),
killDeadChannels = cms.bool( True ),
dbStatusToBeExcludedEB = cms.vint32( 14, 78, 142 ),
EEuncalibRecHitCollection = cms.InputTag( 'hltEcalUncalibRecHit','EcalUncalibRecHitsEE' ),
dbStatusToBeExcludedEE = cms.vint32( 14, 78, 142 ),
EELaserMIN = cms.double( 0.5 ),
ebFEToBeRecovered = cms.InputTag( 'hltEcalDetIdToBeRecovered','ebFE' ),
cleaningConfig = cms.PSet(
e6e2thresh = cms.double( 0.04 ),
tightenCrack_e6e2_double = cms.double( 3.0 ),
e4e1Threshold_endcap = cms.double( 0.3 ),
tightenCrack_e4e1_single = cms.double( 3.0 ),
tightenCrack_e1_double = cms.double( 2.0 ),
cThreshold_barrel = cms.double( 4.0 ),
e4e1Threshold_barrel = cms.double( 0.08 ),
tightenCrack_e1_single = cms.double( 2.0 ),
e4e1_b_barrel = cms.double( -0.024 ),
e4e1_a_barrel = cms.double( 0.04 ),
ignoreOutOfTimeThresh = cms.double( 1.0E9 ),
cThreshold_endcap = cms.double( 15.0 ),
e4e1_b_endcap = cms.double( -0.0125 ),
e4e1_a_endcap = cms.double( 0.02 ),
cThreshold_double = cms.double( 10.0 )
),
logWarningEtThreshold_EE_FE = cms.double( 50.0 ),
eeDetIdToBeRecovered = cms.InputTag( 'hltEcalDetIdToBeRecovered','eeDetId' ),
recoverEBFE = cms.bool( True ),
eeFEToBeRecovered = cms.InputTag( 'hltEcalDetIdToBeRecovered','eeFE' ),
ebDetIdToBeRecovered = cms.InputTag( 'hltEcalDetIdToBeRecovered','ebDetId' ),
singleChannelRecoveryThreshold = cms.double( 8.0 ),
sum8ChannelRecoveryThreshold = cms.double( 0.0 ),
bdtWeightFileNoCracks = cms.FileInPath( "RecoLocalCalo/EcalDeadChannelRecoveryAlgos/data/BDTWeights/bdtgAllRH_8GT700MeV_noCracks_ZskimData2017_v1.xml" ),
bdtWeightFileCracks = cms.FileInPath( "RecoLocalCalo/EcalDeadChannelRecoveryAlgos/data/BDTWeights/bdtgAllRH_8GT700MeV_onlyCracks_ZskimData2017_v1.xml" ),
ChannelStatusToBeExcluded = cms.vstring( ),
EBrechitCollection = cms.string( "EcalRecHitsEB" ),
triggerPrimitiveDigiCollection = cms.InputTag( 'hltEcalDigisLegacy','EcalTriggerPrimitives' ),
recoverEEFE = cms.bool( True ),
singleChannelRecoveryMethod = cms.string( "NeuralNetworks" ),
EBLaserMAX = cms.double( 3.0 ),
flagsMapDBReco = cms.PSet(
kGood = cms.vstring( 'kOk',
'kDAC',
'kNoLaser',
'kNoisy' ),
kNeighboursRecovered = cms.vstring( 'kFixedG0',
'kNonRespondingIsolated',
'kDeadVFE' ),
kDead = cms.vstring( 'kNoDataNoTP' ),
kNoisy = cms.vstring( 'kNNoisy',
'kFixedG6',
'kFixedG1' ),
kTowerRecovered = cms.vstring( 'kDeadFE' )
),
EBuncalibRecHitCollection = cms.InputTag( 'hltEcalUncalibRecHit','EcalUncalibRecHitsEB' ),
algoRecover = cms.string( "EcalRecHitWorkerRecover" ),
algo = cms.string( "EcalRecHitWorkerSimple" ),
EELaserMAX = cms.double( 8.0 ),
logWarningEtThreshold_EB_FE = cms.double( 50.0 ),
recoverEEIsolatedChannels = cms.bool( False ),
skipTimeCalib = cms.bool( True )
),
)
process.hltHbhereco = SwitchProducerCUDA(
cpu = cms.EDProducer( "HBHEPhase1Reconstructor",
digiLabelQIE8 = cms.InputTag( "hltHcalDigis" ),
processQIE8 = cms.bool( False ),
digiLabelQIE11 = cms.InputTag( "hltHcalDigis" ),
processQIE11 = cms.bool( True ),
tsFromDB = cms.bool( False ),
recoParamsFromDB = cms.bool( True ),
saveEffectivePedestal = cms.bool( True ),
dropZSmarkedPassed = cms.bool( True ),
makeRecHits = cms.bool( True ),
saveInfos = cms.bool( False ),
saveDroppedInfos = cms.bool( False ),
use8ts = cms.bool( True ),
sipmQTSShift = cms.int32( 0 ),
sipmQNTStoSum = cms.int32( 3 ),
algorithm = cms.PSet(
ts4Thresh = cms.double( 0.0 ),
meanTime = cms.double( 0.0 ),
nnlsThresh = cms.double( 1.0E-11 ),
nMaxItersMin = cms.int32( 50 ),
timeSigmaSiPM = cms.double( 2.5 ),
applyTimeSlew = cms.bool( True ),
timeSlewParsType = cms.int32( 3 ),
ts4Max = cms.vdouble( 100.0, 20000.0, 30000.0 ),
samplesToAdd = cms.int32( 2 ),
deltaChiSqThresh = cms.double( 0.001 ),
applyTimeConstraint = cms.bool( False ),
timeSigmaHPD = cms.double( 5.0 ),
useMahi = cms.bool( True ),
correctForPhaseContainment = cms.bool( True ),
respCorrM3 = cms.double( 1.0 ),
pulseJitter = cms.double( 1.0 ),
applyPedConstraint = cms.bool( False ),
fitTimes = cms.int32( 1 ),
nMaxItersNNLS = cms.int32( 500 ),
applyTimeSlewM3 = cms.bool( True ),
meanPed = cms.double( 0.0 ),
ts4Min = cms.double( 0.0 ),
applyPulseJitter = cms.bool( False ),
useM2 = cms.bool( False ),
timeMin = cms.double( -12.5 ),
useM3 = cms.bool( False ),
chiSqSwitch = cms.double( -1.0 ),
dynamicPed = cms.bool( False ),
tdcTimeShift = cms.double( 0.0 ),
correctionPhaseNS = cms.double( 6.0 ),
firstSampleShift = cms.int32( 0 ),
activeBXs = cms.vint32( -3, -2, -1, 0, 1, 2, 3, 4 ),
ts4chi2 = cms.vdouble( 15.0, 15.0 ),
timeMax = cms.double( 12.5 ),
Class = cms.string( "SimpleHBHEPhase1Algo" ),
calculateArrivalTime = cms.bool( False ),
applyLegacyHBMCorrection = cms.bool( False )
),
algoConfigClass = cms.string( "" ),
setNegativeFlagsQIE8 = cms.bool( False ),
setNegativeFlagsQIE11 = cms.bool( False ),
setNoiseFlagsQIE8 = cms.bool( False ),
setNoiseFlagsQIE11 = cms.bool( False ),
setPulseShapeFlagsQIE8 = cms.bool( False ),
setPulseShapeFlagsQIE11 = cms.bool( False ),
setLegacyFlagsQIE8 = cms.bool( False ),
setLegacyFlagsQIE11 = cms.bool( False ),
flagParametersQIE8 = cms.PSet(
hitEnergyMinimum = cms.double( 1.0 ),
pulseShapeParameterSets = cms.VPSet(
cms.PSet( pulseShapeParameters = cms.vdouble( 0.0, 100.0, -50.0, 0.0, -15.0, 0.15 ) ),
cms.PSet( pulseShapeParameters = cms.vdouble( 100.0, 2000.0, -50.0, 0.0, -5.0, 0.05 ) ),
cms.PSet( pulseShapeParameters = cms.vdouble( 2000.0, 1000000.0, -50.0, 0.0, 95.0, 0.0 ) ),
cms.PSet( pulseShapeParameters = cms.vdouble( -1000000.0, 1000000.0, 45.0, 0.1, 1000000.0, 0.0 ) )
),
nominalPedestal = cms.double( 3.0 ),
hitMultiplicityThreshold = cms.int32( 17 )
),
flagParametersQIE11 = cms.PSet( ),
pulseShapeParametersQIE8 = cms.PSet(
UseDualFit = cms.bool( True ),
LinearCut = cms.vdouble( -3.0, -0.054, -0.054 ),
TriangleIgnoreSlow = cms.bool( False ),
TS4TS5LowerThreshold = cms.vdouble( 100.0, 120.0, 160.0, 200.0, 300.0, 500.0 ),
LinearThreshold = cms.vdouble( 20.0, 100.0, 100000.0 ),
RightSlopeSmallCut = cms.vdouble( 1.08, 1.16, 1.16 ),
TS4TS5UpperThreshold = cms.vdouble( 70.0, 90.0, 100.0, 400.0 ),
TS3TS4ChargeThreshold = cms.double( 70.0 ),
R45PlusOneRange = cms.double( 0.2 ),
TS4TS5LowerCut = cms.vdouble( -1.0, -0.7, -0.5, -0.4, -0.3, 0.1 ),
RightSlopeThreshold = cms.vdouble( 250.0, 400.0, 100000.0 ),
TS3TS4UpperChargeThreshold = cms.double( 20.0 ),
MinimumChargeThreshold = cms.double( 20.0 ),
RightSlopeCut = cms.vdouble( 5.0, 4.15, 4.15 ),
RMS8MaxThreshold = cms.vdouble( 20.0, 100.0, 100000.0 ),
MinimumTS4TS5Threshold = cms.double( 100.0 ),
LeftSlopeThreshold = cms.vdouble( 250.0, 500.0, 100000.0 ),
TS5TS6ChargeThreshold = cms.double( 70.0 ),
TrianglePeakTS = cms.uint32( 10000 ),
TS5TS6UpperChargeThreshold = cms.double( 20.0 ),
RightSlopeSmallThreshold = cms.vdouble( 150.0, 200.0, 100000.0 ),
RMS8MaxCut = cms.vdouble( -13.5, -11.5, -11.5 ),
TS4TS5ChargeThreshold = cms.double( 70.0 ),
R45MinusOneRange = cms.double( 0.2 ),
LeftSlopeCut = cms.vdouble( 5.0, 2.55, 2.55 ),
TS4TS5UpperCut = cms.vdouble( 1.0, 0.8, 0.75, 0.72 )
),
pulseShapeParametersQIE11 = cms.PSet( )
),
)
process.hltSiPixelDigis = SwitchProducerCUDA(
cpu = cms.EDAlias(
hltSiPixelDigisLegacy = cms.VPSet(
cms.PSet( type = cms.string( "DetIdedmEDCollection" ) ),
cms.PSet( type = cms.string( "SiPixelRawDataErroredmDetSetVector" ) ),
cms.PSet( type = cms.string( "PixelFEDChanneledmNewDetSetVector" ) )
)
),
)
process.hltSiPixelClusters = SwitchProducerCUDA(
cpu = cms.EDAlias(
hltSiPixelClustersLegacy = cms.VPSet(
cms.PSet( type = cms.string( "SiPixelClusteredmNewDetSetVector" ) )
)
),
)
process.hltSiPixelRecHits = SwitchProducerCUDA(
cpu = cms.EDAlias(
hltSiPixelRecHitSoA = cms.VPSet(
cms.PSet( type = cms.string( "SiPixelRecHitedmNewDetSetVector" ) ),
cms.PSet( type = cms.string( "uintAsHostProduct" ) )
)
),
)
process.hltPixelTracksSoA = SwitchProducerCUDA(
cpu = cms.EDProducer( "CAHitNtupletCUDA",
onGPU = cms.bool( False ),
pixelRecHitSrc = cms.InputTag( "hltSiPixelRecHitSoA" ),
ptmin = cms.double( 0.899999976158 ),
CAThetaCutBarrel = cms.double( 0.00200000009499 ),
CAThetaCutForward = cms.double( 0.00300000002608 ),
hardCurvCut = cms.double( 0.0328407224959 ),
dcaCutInnerTriplet = cms.double( 0.15000000596 ),
dcaCutOuterTriplet = cms.double( 0.25 ),
earlyFishbone = cms.bool( True ),
lateFishbone = cms.bool( False ),
idealConditions = cms.bool( False ),
fillStatistics = cms.bool( False ),
minHitsPerNtuplet = cms.uint32( 3 ),
maxNumberOfDoublets = cms.uint32( 524288 ),
minHitsForSharingCut = cms.uint32( 10 ),
includeJumpingForwardDoublets = cms.bool( True ),
fitNas4 = cms.bool( False ),
doClusterCut = cms.bool( True ),
doZ0Cut = cms.bool( True ),
doPtCut = cms.bool( True ),
useRiemannFit = cms.bool( False ),
doSharedHitCut = cms.bool( True ),
dupPassThrough = cms.bool( False ),
useSimpleTripletCleaner = cms.bool( True ),
trackQualityCuts = cms.PSet(
chi2MaxPt = cms.double( 10.0 ),
tripletMaxTip = cms.double( 0.3 ),
chi2Scale = cms.double( 8.0 ),
quadrupletMaxTip = cms.double( 0.5 ),
quadrupletMinPt = cms.double( 0.3 ),
quadrupletMaxZip = cms.double( 12.0 ),
tripletMaxZip = cms.double( 12.0 ),
tripletMinPt = cms.double( 0.5 ),
chi2Coeff = cms.vdouble( 0.9, 1.8 )
)
),
)
process.hltPixelVerticesSoA = SwitchProducerCUDA(
cpu = cms.EDProducer( "PixelVertexProducerCUDA",
onGPU = cms.bool( False ),
oneKernel = cms.bool( True ),
useDensity = cms.bool( True ),
useDBSCAN = cms.bool( False ),
useIterative = cms.bool( False ),
minT = cms.int32( 2 ),
eps = cms.double( 0.07 ),
errmax = cms.double( 0.01 ),
chi2max = cms.double( 9.0 ),
PtMin = cms.double( 0.5 ),
pixelTrackSrc = cms.InputTag( "hltPixelTracksSoA" )
),
)
process.hltOutputPhysicsCommissioning = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputPhysicsCommissioning.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'HLT_Physics_v7',
'HLT_Random_v3',
'HLT_ZeroBias_FirstCollisionAfterAbortGap_v5',
'HLT_ZeroBias_v6' ) ),
outputCommands = cms.untracked.vstring( 'drop *',
'keep FEDRawDataCollection_rawDataCollector_*_*',
'keep FEDRawDataCollection_source_*_*',
'keep GlobalObjectMapRecord_hltGtStage2ObjectMap_*_*',
'keep edmTriggerResults_*_*_*',
'keep triggerTriggerEvent_*_*_*' )
)
process.hltOutputPhysicsEndOfFill = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputPhysicsEndOfFill.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'HLT_HIL1NotBptxORForPPRef_v2',
'HLT_HIL1UnpairedBunchBptxMinusForPPRef_v2',
'HLT_HIL1UnpairedBunchBptxPlusForPPRef_v2' ) ),
outputCommands = cms.untracked.vstring( 'drop *',
'keep FEDRawDataCollection_rawDataCollector_*_*',
'keep FEDRawDataCollection_source_*_*',
'keep GlobalObjectMapRecord_hltGtStage2ObjectMap_*_*',
'keep edmTriggerResults_*_*_*',
'keep triggerTriggerEvent_*_*_*' )
)
process.hltOutputDQM = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputDQM.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'HLT_HIL1NotBptxORForPPRef_v2',
'HLT_HIL1UnpairedBunchBptxMinusForPPRef_v2',
'HLT_HIL1UnpairedBunchBptxPlusForPPRef_v2',
'HLT_HIZeroBias_part0_v6',
'HLT_HIZeroBias_part10_v6',
'HLT_HIZeroBias_part11_v6',
'HLT_HIZeroBias_part1_v6',
'HLT_HIZeroBias_part2_v6',
'HLT_HIZeroBias_part3_v6',
'HLT_HIZeroBias_part4_v6',
'HLT_HIZeroBias_part5_v6',
'HLT_HIZeroBias_part6_v6',
'HLT_HIZeroBias_part7_v6',
'HLT_HIZeroBias_part8_v6',
'HLT_HIZeroBias_part9_v6',
'HLT_Physics_v7',
'HLT_Random_v3',
'HLT_ZeroBias_FirstCollisionAfterAbortGap_v5',
'HLT_ZeroBias_v6' ) ),
outputCommands = cms.untracked.vstring( 'drop *',
'keep *_hltDeepCombinedSecondaryVertexBJetTagsCalo_*_*',
'keep *_hltDeepCombinedSecondaryVertexBJetTagsPF_*_*',
'keep *_hltEcalRecHit_*_*',
'keep *_hltEgammaCandidates_*_*',
'keep *_hltEgammaGsfElectrons_*_*',
'keep *_hltFullIterativeTrackingMergedForRefPP_*_*',
'keep *_hltHbhereco_*_*',
'keep *_hltHfreco_*_*',
'keep *_hltHoreco_*_*',
'keep *_hltIter0HighPtTkMuPixelTracks_*_*',
'keep *_hltIter0HighPtTkMuTrackSelectionHighPurity_*_*',
'keep *_hltIter2HighPtTkMuMerged_*_*',
'keep *_hltIter2HighPtTkMuTrackSelectionHighPurity_*_*',
'keep *_hltIter2Merged_*_*',
'keep *_hltL3NoFiltersNoVtxMuonCandidates_*_*',
'keep *_hltMergedTracks_*_*',
'keep *_hltOnlineBeamSpot_*_*',
'keep *_hltPFJetForBtag_*_*',
'keep *_hltPixelTracks_*_*',
'keep *_hltPixelVertices_*_*',
'keep *_hltSelector8CentralJetsL1FastJet_*_*',
'keep *_hltSiPixelClustersCache_*_*',
'keep *_hltSiPixelClusters_*_*',
'keep *_hltSiStripRawToClustersFacility_*_*',
'keep FEDRawDataCollection_rawDataCollector_*_*',
'keep FEDRawDataCollection_source_*_*',
'keep GlobalObjectMapRecord_hltGtStage2ObjectMap_*_*',
'keep edmTriggerResults_*_*_*',
'keep triggerTriggerEvent_*_*_*' )
)
process.hltOutputDQMOnlineBeamspot = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputDQMOnlineBeamspot.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'HLT_HIHT80_Beamspot_ppRef5TeV_v3',
'HLT_ZeroBias_Beamspot_v4' ) ),
outputCommands = cms.untracked.vstring( 'drop *',
'keep edmTriggerResults_*_*_*',
'keep recoTracks_hltPFMuonMerging_*_*',
'keep recoVertexs_hltVerticesPFFilter_*_*' )
)
process.hltOutputDQMCalibration = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputDQMCalibration.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'HLT_EcalCalibration_v4',
'HLT_HcalCalibration_v5' ) ),
outputCommands = cms.untracked.vstring( 'drop *_hlt*_*_*',
'keep *_hltEcalCalibrationRaw_*_*',
'keep *_hltHcalCalibrationRaw_*_*',
'keep edmTriggerResults_*_*_*',
'keep triggerTriggerEvent_*_*_*' )
)
process.hltOutputRPCMON = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputRPCMON.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'AlCa_HIRPCMuonNormalisation_v1' ) ),
outputCommands = cms.untracked.vstring( 'drop *',
'keep *_hltCscSegments_*_*',
'keep *_hltDt4DSegments_*_*',
'keep *_hltGtStage2Digis_*_*',
'keep *_hltMuonCSCDigis_MuonCSCStripDigi_*',
'keep *_hltMuonCSCDigis_MuonCSCWireDigi_*',
'keep *_hltMuonDTDigis_*_*',
'keep *_hltMuonRPCDigis_*_*',
'keep *_hltRpcRecHits_*_*',
'keep edmTriggerResults_*_*_*',
'keep triggerTriggerEvent_*_*_*' )
)
process.hltOutputCalibration = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputCalibration.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'HLT_EcalCalibration_v4',
'HLT_HcalCalibration_v5' ) ),
outputCommands = cms.untracked.vstring( 'drop *_hlt*_*_*',
'keep *_hltEcalCalibrationRaw_*_*',
'keep *_hltHcalCalibrationRaw_*_*',
'keep edmTriggerResults_*_*_*',
'keep triggerTriggerEvent_*_*_*' )
)
process.hltOutputEcalCalibration = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputEcalCalibration.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'HLT_EcalCalibration_v4' ) ),
outputCommands = cms.untracked.vstring( 'drop *_hlt*_*_*',
'keep *_hltEcalCalibrationRaw_*_*',
'keep edmTriggerResults_*_*_*',
'keep triggerTriggerEvent_*_*_*' )
)
process.hltOutputALCAPHISYM = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputALCAPHISYM.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'AlCa_EcalPhiSym_v9' ) ),
outputCommands = cms.untracked.vstring( 'drop *',
'keep *_hltEcalPhiSymFilter_*_*',
'keep *_hltGtStage2Digis_*_*',
'keep edmTriggerResults_*_*_*' )
)
process.hltOutputALCALumiPixelCountsExpress = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputALCALumiPixelCountsExpress.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'AlCa_LumiPixelsCounts_Random_v2' ) ),
outputCommands = cms.untracked.vstring( 'drop *',
'keep *_hltAlcaPixelClusterCounts_*_*',
'keep edmTriggerResults_*_*_*' )
)
process.hltOutputALCALumiPixelCountsPrompt = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputALCALumiPixelCountsPrompt.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'AlCa_LumiPixelsCounts_ZeroBias_v2' ) ),
outputCommands = cms.untracked.vstring( 'drop *',
'keep *_hltAlcaPixelClusterCounts_*_*',
'keep edmTriggerResults_*_*_*' )
)
process.hltOutputALCAP0 = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputALCAP0.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'AlCa_HIEcalEtaEBonly_v1',
'AlCa_HIEcalEtaEEonly_v1',
'AlCa_HIEcalPi0EBonly_v1',
'AlCa_HIEcalPi0EEonly_v1' ) ),
outputCommands = cms.untracked.vstring( 'drop *',
'keep *_hltAlCaEtaEBRechitsToDigisLowPU_*_*',
'keep *_hltAlCaEtaEBRechitsToDigis_*_*',
'keep *_hltAlCaEtaEERechitsToDigisLowPU_*_*',
'keep *_hltAlCaEtaEERechitsToDigis_*_*',
'keep *_hltAlCaEtaRecHitsFilterEEonlyRegionalLowPU_etaEcalRecHitsES_*',
'keep *_hltAlCaEtaRecHitsFilterEEonlyRegional_etaEcalRecHitsES_*',
'keep *_hltAlCaPi0EBRechitsToDigisLowPU_*_*',
'keep *_hltAlCaPi0EBRechitsToDigis_*_*',
'keep *_hltAlCaPi0EERechitsToDigisLowPU_*_*',
'keep *_hltAlCaPi0EERechitsToDigis_*_*',
'keep *_hltAlCaPi0RecHitsFilterEEonlyRegionalLowPU_pi0EcalRecHitsES_*',
'keep *_hltAlCaPi0RecHitsFilterEEonlyRegional_pi0EcalRecHitsES_*',
'keep *_hltGtStage2Digis_*_*',
'keep edmTriggerResults_*_*_*' )
)
process.hltOutputExpress = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputExpress.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'HLT_Physics_v7',
'HLT_Random_v3',
'HLT_ZeroBias_FirstCollisionAfterAbortGap_v5',
'HLT_ZeroBias_v6' ) ),
outputCommands = cms.untracked.vstring( 'drop *',
'keep FEDRawDataCollection_rawDataCollector_*_*',
'keep FEDRawDataCollection_source_*_*',
'keep GlobalObjectMapRecord_hltGtStage2ObjectMap_*_*',
'keep edmTriggerResults_*_*_*',
'keep triggerTriggerEvent_*_*_*' )
)
process.hltOutputExpressAlignment = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputExpressAlignment.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'HLT_HIHT80_Beamspot_ppRef5TeV_v3',
'HLT_ZeroBias_Beamspot_v4' ) ),
outputCommands = cms.untracked.vstring( 'drop *',
'keep FEDRawDataCollection_rawDataCollector_*_*',
'keep FEDRawDataCollection_source_*_*',
'keep GlobalObjectMapRecord_hltGtStage2ObjectMap_*_*',
'keep edmTriggerResults_*_*_*',
'keep triggerTriggerEvent_*_*_*' )
)
process.hltOutputNanoDST = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputNanoDST.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'DST_Physics_v7' ) ),
outputCommands = cms.untracked.vstring( 'drop *',
'keep *_hltFEDSelector_*_*',
'keep *_hltGtStage2Digis_*_*',
'keep edmTriggerResults_*_*_*' )
)
process.hltOutputPhysicsHIZeroBias1 = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputPhysicsHIZeroBias1.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'HLT_HIZeroBias_part0_v6',
'HLT_HIZeroBias_part1_v6' ) ),
outputCommands = cms.untracked.vstring( 'drop *',
'keep FEDRawDataCollection_rawDataCollector_*_*',
'keep FEDRawDataCollection_source_*_*',
'keep GlobalObjectMapRecord_hltGtStage2ObjectMap_*_*',
'keep edmTriggerResults_*_*_*',
'keep triggerTriggerEvent_*_*_*' )
)
process.hltOutputPhysicsHIZeroBias2 = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputPhysicsHIZeroBias2.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'HLT_HIZeroBias_part2_v6',
'HLT_HIZeroBias_part3_v6' ) ),
outputCommands = cms.untracked.vstring( 'drop *',
'keep FEDRawDataCollection_rawDataCollector_*_*',
'keep FEDRawDataCollection_source_*_*',
'keep GlobalObjectMapRecord_hltGtStage2ObjectMap_*_*',
'keep edmTriggerResults_*_*_*',
'keep triggerTriggerEvent_*_*_*' )
)
process.hltOutputPhysicsHIZeroBias3 = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputPhysicsHIZeroBias3.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'HLT_HIZeroBias_part4_v6',
'HLT_HIZeroBias_part5_v6' ) ),
outputCommands = cms.untracked.vstring( 'drop *',
'keep FEDRawDataCollection_rawDataCollector_*_*',
'keep FEDRawDataCollection_source_*_*',
'keep GlobalObjectMapRecord_hltGtStage2ObjectMap_*_*',
'keep edmTriggerResults_*_*_*',
'keep triggerTriggerEvent_*_*_*' )
)
process.hltOutputPhysicsHIZeroBias4 = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputPhysicsHIZeroBias4.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'HLT_HIZeroBias_part6_v6',
'HLT_HIZeroBias_part7_v6' ) ),
outputCommands = cms.untracked.vstring( 'drop *',
'keep FEDRawDataCollection_rawDataCollector_*_*',
'keep FEDRawDataCollection_source_*_*',
'keep GlobalObjectMapRecord_hltGtStage2ObjectMap_*_*',
'keep edmTriggerResults_*_*_*',
'keep triggerTriggerEvent_*_*_*' )
)
process.hltOutputPhysicsHIZeroBias5 = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputPhysicsHIZeroBias5.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'HLT_HIZeroBias_part8_v6',
'HLT_HIZeroBias_part9_v6' ) ),
outputCommands = cms.untracked.vstring( 'drop *',
'keep FEDRawDataCollection_rawDataCollector_*_*',
'keep FEDRawDataCollection_source_*_*',
'keep GlobalObjectMapRecord_hltGtStage2ObjectMap_*_*',
'keep edmTriggerResults_*_*_*',
'keep triggerTriggerEvent_*_*_*' )
)
process.hltOutputPhysicsHIZeroBias6 = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputPhysicsHIZeroBias6.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'HLT_HIZeroBias_part10_v6',
'HLT_HIZeroBias_part11_v6' ) ),
outputCommands = cms.untracked.vstring( 'drop *',
'keep FEDRawDataCollection_rawDataCollector_*_*',
'keep FEDRawDataCollection_source_*_*',
'keep GlobalObjectMapRecord_hltGtStage2ObjectMap_*_*',
'keep edmTriggerResults_*_*_*',
'keep triggerTriggerEvent_*_*_*' )
)
process.HLTL1UnpackerSequence = cms.Sequence( process.hltGtStage2Digis + process.hltGtStage2ObjectMap )
process.HLTBeamSpot = cms.Sequence( process.hltScalersRawToDigi + process.hltOnlineMetaDataDigis + process.hltOnlineBeamSpot )
process.HLTBeginSequence = cms.Sequence( process.hltTriggerType + process.HLTL1UnpackerSequence + process.HLTBeamSpot )
process.HLTDoFullUnpackingEgammaEcalWithoutPreshowerSequence = cms.Sequence( process.hltEcalDigisLegacy + process.hltEcalDigis + process.hltEcalUncalibRecHit + process.hltEcalDetIdToBeRecovered + process.hltEcalRecHit )
process.HLTDoLocalHcalSequence = cms.Sequence( process.hltHcalDigis + process.hltHbhereco + process.hltHfprereco + process.hltHfreco + process.hltHoreco )
process.HLTDoCaloSequencePF = cms.Sequence( process.HLTDoFullUnpackingEgammaEcalWithoutPreshowerSequence + process.HLTDoLocalHcalSequence + process.hltTowerMakerForAll )
process.HLTAK4CaloJetsPrePFRecoSequence = cms.Sequence( process.HLTDoCaloSequencePF + process.hltAK4CaloJetsPF )
process.HLTPreAK4PFJetsRecoSequence = cms.Sequence( process.HLTAK4CaloJetsPrePFRecoSequence + process.hltAK4CaloJetsPFEt5 )
process.HLTMuonLocalRecoSequence = cms.Sequence( process.hltMuonDTDigis + process.hltDt1DRecHits + process.hltDt4DSegments + process.hltMuonCSCDigis + process.hltCsc2DRecHits + process.hltCscSegments + process.hltMuonRPCDigis + process.hltRpcRecHits + process.hltMuonGEMDigis + process.hltGemRecHits + process.hltGemSegments )
process.HLTL2muonrecoNocandSequence = cms.Sequence( process.HLTMuonLocalRecoSequence + process.hltL2OfflineMuonSeeds + process.hltL2MuonSeeds + process.hltL2Muons )
process.HLTL2muonrecoSequence = cms.Sequence( process.HLTL2muonrecoNocandSequence + process.hltL2MuonCandidates )
process.HLTDoLocalPixelSequence = cms.Sequence( process.hltSiPixelDigisLegacy + process.hltSiPixelDigis + process.hltSiPixelClustersLegacy + process.hltSiPixelClusters + process.hltSiPixelClustersCache + process.hltSiPixelRecHitSoA + process.hltSiPixelRecHits )
process.HLTDoLocalStripSequence = cms.Sequence( process.hltSiStripExcludedFEDListProducer + process.hltSiStripRawToClustersFacility + process.hltSiStripClusters )
process.HLTIterL3OImuonTkCandidateSequence = cms.Sequence( process.hltIterL3OISeedsFromL2Muons + process.hltIterL3OITrackCandidates + process.hltIterL3OIMuCtfWithMaterialTracks + process.hltIterL3OIMuonTrackCutClassifier + process.hltIterL3OIMuonTrackSelectionHighPurity + process.hltL3MuonsIterL3OI )
process.HLTIterL3MuonRecoPixelTracksSequence = cms.Sequence( process.hltIterL3MuonPixelTracksFilter + process.hltIterL3MuonPixelTracksFitter + process.hltIterL3MuonPixelTracksTrackingRegions + process.hltIterL3MuonPixelLayerQuadruplets + process.hltIterL3MuonPixelTracksHitDoublets + process.hltIterL3MuonPixelTracksHitQuadruplets + process.hltIterL3MuonPixelTracks )
process.HLTIterL3MuonRecopixelvertexingSequence = cms.Sequence( process.HLTIterL3MuonRecoPixelTracksSequence + process.hltIterL3MuonPixelVertices + process.hltIterL3MuonTrimmedPixelVertices )
process.HLTIterativeTrackingIteration0ForIterL3Muon = cms.Sequence( process.hltIter0IterL3MuonPixelSeedsFromPixelTracks + process.hltIter0IterL3MuonCkfTrackCandidates + process.hltIter0IterL3MuonCtfWithMaterialTracks + process.hltIter0IterL3MuonTrackCutClassifier + process.hltIter0IterL3MuonTrackSelectionHighPurity )
process.HLTIterativeTrackingIteration2ForIterL3Muon = cms.Sequence( process.hltIter2IterL3MuonClustersRefRemoval + process.hltIter2IterL3MuonMaskedMeasurementTrackerEvent + process.hltIter2IterL3MuonPixelLayerTriplets + process.hltIter2IterL3MuonPixelClusterCheck + process.hltIter2IterL3MuonPixelHitDoublets + process.hltIter2IterL3MuonPixelHitTriplets + process.hltIter2IterL3MuonPixelSeeds + process.hltIter2IterL3MuonCkfTrackCandidates + process.hltIter2IterL3MuonCtfWithMaterialTracks + process.hltIter2IterL3MuonTrackCutClassifier + process.hltIter2IterL3MuonTrackSelectionHighPurity )
process.HLTIterativeTrackingIteration3ForIterL3Muon = cms.Sequence( process.hltIter3IterL3MuonClustersRefRemoval + process.hltIter3IterL3MuonMaskedMeasurementTrackerEvent + process.hltIter3IterL3MuonPixelLayerPairs + process.hltIter3IterL3MuonL2Candidates + process.hltIter3IterL3MuonTrackingRegions + process.hltIter3IterL3MuonPixelClusterCheck + process.hltIter3IterL3MuonPixelHitDoublets + process.hltIter3IterL3MuonPixelSeeds + process.hltIter3IterL3MuonCkfTrackCandidates + process.hltIter3IterL3MuonCtfWithMaterialTracks + process.hltIter3IterL3MuonTrackCutClassifier + process.hltIter3IterL3MuonTrackSelectionHighPurity )
process.HLTIterativeTrackingIter023ForIterL3Muon = cms.Sequence( process.HLTIterativeTrackingIteration0ForIterL3Muon + process.HLTIterativeTrackingIteration2ForIterL3Muon + process.hltIter2IterL3MuonMerged + process.HLTIterativeTrackingIteration3ForIterL3Muon + process.hltIter3IterL3MuonMerged )
process.HLTIterL3IOmuonTkCandidateSequence = cms.Sequence( process.HLTIterL3MuonRecopixelvertexingSequence + process.HLTIterativeTrackingIter023ForIterL3Muon + process.hltL3MuonsIterL3IO )
process.HLTIterL3OIAndIOFromL2muonTkCandidateSequence = cms.Sequence( process.HLTIterL3OImuonTkCandidateSequence + process.hltIterL3OIL3MuonsLinksCombination + process.hltIterL3OIL3Muons + process.hltIterL3OIL3MuonCandidates + process.hltL2SelectorForL3IO + process.HLTIterL3IOmuonTkCandidateSequence + process.hltIterL3MuonsFromL2LinksCombination )
process.HLTRecoPixelTracksSequenceForIterL3FromL1Muon = cms.Sequence( process.hltIterL3FromL1MuonPixelTracksTrackingRegions + process.hltIterL3FromL1MuonPixelLayerQuadruplets + process.hltIterL3FromL1MuonPixelTracksHitDoublets + process.hltIterL3FromL1MuonPixelTracksHitQuadruplets + process.hltIterL3FromL1MuonPixelTracks )
process.HLTRecopixelvertexingSequenceForIterL3FromL1Muon = cms.Sequence( process.HLTRecoPixelTracksSequenceForIterL3FromL1Muon + process.hltIterL3FromL1MuonPixelVertices + process.hltIterL3FromL1MuonTrimmedPixelVertices )
process.HLTIterativeTrackingIteration0ForIterL3FromL1Muon = cms.Sequence( process.hltIter0IterL3FromL1MuonPixelSeedsFromPixelTracks + process.hltIter0IterL3FromL1MuonCkfTrackCandidates + process.hltIter0IterL3FromL1MuonCtfWithMaterialTracks + process.hltIter0IterL3FromL1MuonTrackCutClassifier + process.hltIter0IterL3FromL1MuonTrackSelectionHighPurity )
process.HLTIterativeTrackingIteration2ForIterL3FromL1Muon = cms.Sequence( process.hltIter2IterL3FromL1MuonClustersRefRemoval + process.hltIter2IterL3FromL1MuonMaskedMeasurementTrackerEvent + process.hltIter2IterL3FromL1MuonPixelLayerTriplets + process.hltIter2IterL3FromL1MuonPixelClusterCheck + process.hltIter2IterL3FromL1MuonPixelHitDoublets + process.hltIter2IterL3FromL1MuonPixelHitTriplets + process.hltIter2IterL3FromL1MuonPixelSeeds + process.hltIter2IterL3FromL1MuonCkfTrackCandidates + process.hltIter2IterL3FromL1MuonCtfWithMaterialTracks + process.hltIter2IterL3FromL1MuonTrackCutClassifier + process.hltIter2IterL3FromL1MuonTrackSelectionHighPurity )
process.HLTIterativeTrackingIteration3ForIterL3FromL1Muon = cms.Sequence( process.hltIter3IterL3FromL1MuonClustersRefRemoval + process.hltIter3IterL3FromL1MuonMaskedMeasurementTrackerEvent + process.hltIter3IterL3FromL1MuonPixelLayerPairs + process.hltIter3IterL3FromL1MuonTrackingRegions + process.hltIter3IterL3FromL1MuonPixelClusterCheck + process.hltIter3IterL3FromL1MuonPixelHitDoublets + process.hltIter3IterL3FromL1MuonPixelSeeds + process.hltIter3IterL3FromL1MuonCkfTrackCandidates + process.hltIter3IterL3FromL1MuonCtfWithMaterialTracks + process.hltIter3IterL3FromL1MuonTrackCutClassifier + process.hltIter3IterL3FromL1MuonTrackSelectionHighPurity )
process.HLTIterativeTrackingIter023ForIterL3FromL1Muon = cms.Sequence( process.HLTIterativeTrackingIteration0ForIterL3FromL1Muon + process.HLTIterativeTrackingIteration2ForIterL3FromL1Muon + process.hltIter2IterL3FromL1MuonMerged + process.HLTIterativeTrackingIteration3ForIterL3FromL1Muon + process.hltIter3IterL3FromL1MuonMerged )
process.HLTIterL3IOmuonFromL1TkCandidateSequence = cms.Sequence( process.HLTRecopixelvertexingSequenceForIterL3FromL1Muon + process.HLTIterativeTrackingIter023ForIterL3FromL1Muon )
process.HLTIterL3muonTkCandidateSequence = cms.Sequence( process.HLTDoLocalPixelSequence + process.HLTDoLocalStripSequence + process.HLTIterL3OIAndIOFromL2muonTkCandidateSequence + process.hltL1MuonsPt0 + process.HLTIterL3IOmuonFromL1TkCandidateSequence )
process.HLTL3muonrecoNocandSequence = cms.Sequence( process.HLTIterL3muonTkCandidateSequence + process.hltIterL3MuonMerged + process.hltIterL3MuonAndMuonFromL1Merged + process.hltIterL3GlbMuon + process.hltIterL3MuonsNoID + process.hltIterL3Muons + process.hltL3MuonsIterL3Links + process.hltIterL3MuonTracks )
process.HLTL3muonrecoSequence = cms.Sequence( process.HLTL3muonrecoNocandSequence + process.hltIterL3MuonCandidates )
process.HLTRecoPixelTracksSequence = cms.Sequence( process.hltPixelTracksTrackingRegions + process.hltPixelTracksSoA + process.hltPixelTracks )
process.HLTRecopixelvertexingSequence = cms.Sequence( process.hltPixelTracksFitter + process.hltPixelTracksFilter + process.HLTRecoPixelTracksSequence + process.hltPixelVerticesSoA + process.hltPixelVertices + process.hltTrimmedPixelVertices )
process.HLTIterativeTrackingIteration0 = cms.Sequence( process.hltIter0PFLowPixelSeedsFromPixelTracks + process.hltIter0PFlowCkfTrackCandidates + process.hltIter0PFlowCtfWithMaterialTracks + process.hltIter0PFlowTrackCutClassifier + process.hltMergedTracks )
process.HLTIterativeTrackingIter02 = cms.Sequence( process.HLTIterativeTrackingIteration0 )
process.HLTTrackingForBeamSpot = cms.Sequence( process.HLTPreAK4PFJetsRecoSequence + process.HLTL2muonrecoSequence + process.HLTL3muonrecoSequence + process.HLTDoLocalPixelSequence + process.HLTRecopixelvertexingSequence + process.HLTDoLocalStripSequence + process.HLTIterativeTrackingIter02 + process.hltPFMuonMerging )
process.HLTEndSequence = cms.Sequence( process.hltBoolEnd )
process.HLTBeginSequenceL1Fat = cms.Sequence( process.hltTriggerType + process.hltL1EventNumberL1Fat + process.HLTL1UnpackerSequence + process.HLTBeamSpot )
process.HLTBeginSequenceRandom = cms.Sequence( process.hltRandomEventsFilter + process.hltGtStage2Digis )
process.HLTDoCaloSequence = cms.Sequence( process.HLTDoFullUnpackingEgammaEcalWithoutPreshowerSequence + process.HLTDoLocalHcalSequence + process.hltTowerMakerForAll )
process.HLTAK4CaloJetsReconstructionSequence = cms.Sequence( process.HLTDoCaloSequence + process.hltAK4CaloJets + process.hltAK4CaloJetsIDPassed )
process.HLTAK4CaloCorrectorProducersSequence = cms.Sequence( process.hltAK4CaloFastJetCorrector + process.hltAK4CaloRelativeCorrector + process.hltAK4CaloAbsoluteCorrector + process.hltAK4CaloResidualCorrector + process.hltAK4CaloCorrector )
process.HLTAK4CaloJetsCorrectionSequence = cms.Sequence( process.hltFixedGridRhoFastjetAllCalo + process.HLTAK4CaloCorrectorProducersSequence + process.hltAK4CaloJetsCorrected + process.hltAK4CaloJetsCorrectedIDPassed )
process.HLTAK4CaloJetsSequence = cms.Sequence( process.HLTAK4CaloJetsReconstructionSequence + process.HLTAK4CaloJetsCorrectionSequence )
process.HLTPreshowerSequence = cms.Sequence( process.hltEcalPreshowerDigis + process.hltEcalPreshowerRecHit )
process.HLTDoFullUnpackingEgammaEcalSequence = cms.Sequence( process.HLTDoFullUnpackingEgammaEcalWithoutPreshowerSequence + process.HLTPreshowerSequence )
process.HLTBeginSequenceCalibration = cms.Sequence( process.hltCalibrationEventsFilter + process.hltGtStage2Digis )
process.HLTriggerFirstPath = cms.Path( process.hltGetConditions + process.hltGetRaw + process.hltPSetMap + process.hltBoolFalse )
process.Status_OnCPU = cms.Path( process.statusOnGPU + ~process.statusOnGPUFilter )
process.Status_OnGPU = cms.Path( process.statusOnGPU + process.statusOnGPUFilter )
process.HLT_ZeroBias_Beamspot_v4 = cms.Path( process.HLTBeginSequence + process.hltL1sZeroBias + process.hltPreZeroBiasBeamspot + process.HLTTrackingForBeamSpot + process.hltVerticesPF + process.hltVerticesPFSelector + process.hltVerticesPFFilter + process.HLTEndSequence )
process.HLT_Physics_v7 = cms.Path( process.HLTBeginSequenceL1Fat + process.hltPrePhysics + process.HLTEndSequence )
process.DST_Physics_v7 = cms.Path( process.HLTBeginSequence + process.hltPreDSTPhysics + process.HLTEndSequence )
process.HLT_Random_v3 = cms.Path( process.HLTBeginSequenceRandom + process.hltPreRandom + process.HLTEndSequence )
process.HLT_ZeroBias_v6 = cms.Path( process.HLTBeginSequence + process.hltL1sZeroBias + process.hltPreZeroBias + process.HLTEndSequence )
process.HLT_HIL1UnpairedBunchBptxMinusForPPRef_v2 = cms.Path( process.HLTBeginSequence + process.hltL1sL1UnpairedBunchBptxMinus + process.hltPreHIL1UnpairedBunchBptxMinusForPPRef + process.HLTEndSequence )
process.HLT_HIL1UnpairedBunchBptxPlusForPPRef_v2 = cms.Path( process.HLTBeginSequence + process.hltL1sL1UnpairedBunchBptxPlus + process.hltPreHIL1UnpairedBunchBptxPlusForPPRef + process.HLTEndSequence )
process.HLT_HIL1NotBptxORForPPRef_v2 = cms.Path( process.HLTBeginSequence + process.hltL1sNotBptxOR + process.hltPreHIL1NotBptxORForPPRef + process.HLTEndSequence )
process.HLT_HIHT80_Beamspot_ppRef5TeV_v3 = cms.Path( process.HLTBeginSequence + process.hltL1sHTTForBeamSpotPP5TeV + process.hltPreHIHT80BeamspotppRef5TeV + process.HLTAK4CaloJetsSequence + process.hltHtMht + process.hltHT80 + process.HLTTrackingForBeamSpot + process.hltVerticesPF + process.hltVerticesPFSelector + process.hltVerticesPFFilter + process.HLTEndSequence )
process.HLT_HIZeroBias_part0_v6 = cms.Path( process.HLTBeginSequence + process.hltL1sZeroBias + process.hltPreHIZeroBiaspart0 + process.HLTEndSequence )
process.HLT_HIZeroBias_part1_v6 = cms.Path( process.HLTBeginSequence + process.hltL1sZeroBias + process.hltPreHIZeroBiaspart1 + process.HLTEndSequence )
process.HLT_HIZeroBias_part2_v6 = cms.Path( process.HLTBeginSequence + process.hltL1sZeroBias + process.hltPreHIZeroBiaspart2 + process.HLTEndSequence )
process.HLT_HIZeroBias_part3_v6 = cms.Path( process.HLTBeginSequence + process.hltL1sZeroBias + process.hltPreHIZeroBiaspart3 + process.HLTEndSequence )
process.HLT_HIZeroBias_part4_v6 = cms.Path( process.HLTBeginSequence + process.hltL1sZeroBias + process.hltPreHIZeroBiaspart4 + process.HLTEndSequence )
process.HLT_HIZeroBias_part5_v6 = cms.Path( process.HLTBeginSequence + process.hltL1sZeroBias + process.hltPreHIZeroBiaspart5 + process.HLTEndSequence )
process.HLT_HIZeroBias_part6_v6 = cms.Path( process.HLTBeginSequence + process.hltL1sZeroBias + process.hltPreHIZeroBiaspart6 + process.HLTEndSequence )
process.HLT_HIZeroBias_part7_v6 = cms.Path( process.HLTBeginSequence + process.hltL1sZeroBias + process.hltPreHIZeroBiaspart7 + process.HLTEndSequence )
process.HLT_HIZeroBias_part8_v6 = cms.Path( process.HLTBeginSequence + process.hltL1sZeroBias + process.hltPreHIZeroBiaspart8 + process.HLTEndSequence )
process.HLT_HIZeroBias_part9_v6 = cms.Path( process.HLTBeginSequence + process.hltL1sZeroBias + process.hltPreHIZeroBiaspart9 + process.HLTEndSequence )
process.HLT_HIZeroBias_part10_v6 = cms.Path( process.HLTBeginSequence + process.hltL1sZeroBias + process.hltPreHIZeroBiaspart10 + process.HLTEndSequence )
process.HLT_HIZeroBias_part11_v6 = cms.Path( process.HLTBeginSequence + process.hltL1sZeroBias + process.hltPreHIZeroBiaspart11 + process.HLTEndSequence )
process.AlCa_HIEcalPi0EBonly_v1 = cms.Path( process.HLTBeginSequence + process.hltL1sAlCaHIEcalPi0Eta + process.hltPreAlCaHIEcalPi0EBonly + process.HLTDoFullUnpackingEgammaEcalSequence + process.hltSimple3x3Clusters + process.hltAlCaPi0RecHitsFilterEBonlyRegional + process.hltAlCaPi0EBUncalibrator + process.hltAlCaPi0EBRechitsToDigis + process.HLTEndSequence )
process.AlCa_HIEcalPi0EEonly_v1 = cms.Path( process.HLTBeginSequence + process.hltL1sAlCaHIEcalPi0Eta + process.hltPreAlCaHIEcalPi0EEonly + process.HLTDoFullUnpackingEgammaEcalSequence + process.hltSimple3x3Clusters + process.hltAlCaPi0RecHitsFilterEEonlyRegional + process.hltAlCaPi0EEUncalibrator + process.hltAlCaPi0EERechitsToDigis + process.HLTEndSequence )
process.AlCa_HIEcalEtaEBonly_v1 = cms.Path( process.HLTBeginSequence + process.hltL1sAlCaHIEcalPi0Eta + process.hltPreAlCaHIEcalEtaEBonly + process.HLTDoFullUnpackingEgammaEcalSequence + process.hltSimple3x3Clusters + process.hltAlCaEtaRecHitsFilterEBonlyRegional + process.hltAlCaEtaEBUncalibrator + process.hltAlCaEtaEBRechitsToDigis + process.HLTEndSequence )
process.AlCa_HIEcalEtaEEonly_v1 = cms.Path( process.HLTBeginSequence + process.hltL1sAlCaHIEcalPi0Eta + process.hltPreAlCaHIEcalEtaEEonly + process.HLTDoFullUnpackingEgammaEcalSequence + process.hltSimple3x3Clusters + process.hltAlCaEtaRecHitsFilterEEonlyRegional + process.hltAlCaEtaEEUncalibrator + process.hltAlCaEtaEERechitsToDigis + process.HLTEndSequence )
process.HLT_EcalCalibration_v4 = cms.Path( process.HLTBeginSequenceCalibration + process.hltPreEcalCalibration + process.hltEcalCalibrationRaw + process.HLTEndSequence )
process.HLT_HcalCalibration_v5 = cms.Path( process.HLTBeginSequenceCalibration + process.hltPreHcalCalibration + process.hltHcalCalibTypeFilter + process.hltHcalCalibrationRaw + process.HLTEndSequence )
process.AlCa_EcalPhiSym_v9 = cms.Path( process.HLTBeginSequence + process.hltL1sZeroBiasIorAlwaysTrueIorIsolatedBunch + process.hltPreAlCaEcalPhiSym + process.HLTDoFullUnpackingEgammaEcalSequence + process.hltEcalPhiSymFilter + process.HLTEndSequence )
process.HLT_ZeroBias_FirstCollisionAfterAbortGap_v5 = cms.Path( process.HLTBeginSequence + process.hltL1sL1ZeroBiasFirstCollisionAfterAbortGap + process.hltPreZeroBiasFirstCollisionAfterAbortGap + process.HLTEndSequence )
process.AlCa_HIRPCMuonNormalisation_v1 = cms.Path( process.HLTBeginSequence + process.hltL1sSingleMu7to30 + process.hltPreAlCaHIRPCMuonNormalisation + process.hltHIRPCMuonNormaL1Filtered0 + process.HLTMuonLocalRecoSequence + process.HLTEndSequence )
process.AlCa_LumiPixelsCounts_Random_v2 = cms.Path( process.HLTBeginSequenceRandom + process.hltPreAlCaLumiPixelsCountsRandom + process.HLTBeamSpot + process.hltPixelTrackerHVOn + process.HLTDoLocalPixelSequence + process.hltAlcaPixelClusterCounts + process.HLTEndSequence )
process.AlCa_LumiPixelsCounts_ZeroBias_v2 = cms.Path( process.HLTBeginSequence + process.hltL1sZeroBias + process.hltPreAlCaLumiPixelsCountsZeroBias + process.hltPixelTrackerHVOn + process.HLTDoLocalPixelSequence + process.hltAlcaPixelClusterCounts + process.HLTEndSequence )
process.HLTriggerFinalPath = cms.Path( process.hltGtStage2Digis + process.hltScalersRawToDigi + process.hltFEDSelector + process.hltTriggerSummaryAOD + process.hltTriggerSummaryRAW + process.hltBoolFalse )
process.HLTAnalyzerEndpath = cms.EndPath( process.hltGtStage2Digis + process.hltPreHLTAnalyzerEndpath + process.hltL1TGlobalSummary + process.hltTrigReport )
process.PhysicsCommissioningOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPrePhysicsCommissioningOutput + process.hltOutputPhysicsCommissioning )
process.PhysicsEGammaOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPrePhysicsEGammaOutput )
process.PhysicsEndOfFillOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPrePhysicsEndOfFillOutput + process.hltOutputPhysicsEndOfFill )
process.PhysicsHadronsTausOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPrePhysicsHadronsTausOutput )
process.PhysicsMuonsOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPrePhysicsMuonsOutput )
process.PhysicsTracksOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPrePhysicsTracksOutput )
process.PhysicsForwardOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPrePhysicsForwardOutput )
# load the DQMStore and DQMRootOutputModule
process.load( "DQMServices.Core.DQMStore_cfi" )
process.dqmOutput = cms.OutputModule("DQMRootOutputModule",
fileName = cms.untracked.string("DQMIO.root")
)
process.DQMOutput = cms.EndPath( process.dqmOutput + process.hltGtStage2Digis + process.hltPreDQMOutput + process.hltPreDQMOutputSmart + process.hltOutputDQM )
process.DQMOnlineBeamspotOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPreDQMOnlineBeamspotOutput + process.hltPreDQMOnlineBeamspotOutputSmart + process.hltOutputDQMOnlineBeamspot )
process.DQMCalibrationOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPreDQMCalibrationOutput + process.hltOutputDQMCalibration )
process.DQMEventDisplayOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPreDQMEventDisplayOutput + process.hltPreDQMEventDisplayOutputSmart )
process.HLTMonitorOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPreHLTMonitorOutput + process.hltPreHLTMonitorOutputSmart )
process.RPCMONOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPreRPCMONOutput + process.hltOutputRPCMON )
process.CalibrationOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPreCalibrationOutput + process.hltOutputCalibration )
process.EcalCalibrationOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPreEcalCalibrationOutput + process.hltOutputEcalCalibration )
process.ALCAPHISYMOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPreALCAPHISYMOutput + process.hltOutputALCAPHISYM )
process.ALCALumiPixelCountsExpressOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPreALCALumiPixelCountsExpressOutput + process.hltOutputALCALumiPixelCountsExpress )
process.ALCALumiPixelCountsPromptOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPreALCALumiPixelCountsPromptOutput + process.hltOutputALCALumiPixelCountsPrompt )
process.ALCAP0Output = cms.EndPath( process.hltGtStage2Digis + process.hltPreALCAP0Output + process.hltOutputALCAP0 )
process.ExpressOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPreExpressOutput + process.hltPreExpressOutputSmart + process.hltOutputExpress )
process.ExpressAlignmentOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPreExpressAlignmentOutput + process.hltPreExpressAlignmentOutputSmart + process.hltOutputExpressAlignment )
process.NanoDSTOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPreNanoDSTOutput + process.hltOutputNanoDST )
process.PhysicsHIZeroBias1Output = cms.EndPath( process.hltGtStage2Digis + process.hltPrePhysicsHIZeroBias1Output + process.hltOutputPhysicsHIZeroBias1 )
process.PhysicsHIZeroBias2Output = cms.EndPath( process.hltGtStage2Digis + process.hltPrePhysicsHIZeroBias2Output + process.hltOutputPhysicsHIZeroBias2 )
process.PhysicsHIZeroBias3Output = cms.EndPath( process.hltGtStage2Digis + process.hltPrePhysicsHIZeroBias3Output + process.hltOutputPhysicsHIZeroBias3 )
process.PhysicsHIZeroBias4Output = cms.EndPath( process.hltGtStage2Digis + process.hltPrePhysicsHIZeroBias4Output + process.hltOutputPhysicsHIZeroBias4 )
process.PhysicsHIZeroBias5Output = cms.EndPath( process.hltGtStage2Digis + process.hltPrePhysicsHIZeroBias5Output + process.hltOutputPhysicsHIZeroBias5 )
process.PhysicsHIZeroBias6Output = cms.EndPath( process.hltGtStage2Digis + process.hltPrePhysicsHIZeroBias6Output + process.hltOutputPhysicsHIZeroBias6 )
process.schedule = cms.Schedule( *(process.HLTriggerFirstPath, process.Status_OnCPU, process.Status_OnGPU, process.HLT_ZeroBias_Beamspot_v4, process.HLT_Physics_v7, process.DST_Physics_v7, process.HLT_Random_v3, process.HLT_ZeroBias_v6, process.HLT_HIL1UnpairedBunchBptxMinusForPPRef_v2, process.HLT_HIL1UnpairedBunchBptxPlusForPPRef_v2, process.HLT_HIL1NotBptxORForPPRef_v2, process.HLT_HIHT80_Beamspot_ppRef5TeV_v3, process.HLT_HIZeroBias_part0_v6, process.HLT_HIZeroBias_part1_v6, process.HLT_HIZeroBias_part2_v6, process.HLT_HIZeroBias_part3_v6, process.HLT_HIZeroBias_part4_v6, process.HLT_HIZeroBias_part5_v6, process.HLT_HIZeroBias_part6_v6, process.HLT_HIZeroBias_part7_v6, process.HLT_HIZeroBias_part8_v6, process.HLT_HIZeroBias_part9_v6, process.HLT_HIZeroBias_part10_v6, process.HLT_HIZeroBias_part11_v6, process.AlCa_HIEcalPi0EBonly_v1, process.AlCa_HIEcalPi0EEonly_v1, process.AlCa_HIEcalEtaEBonly_v1, process.AlCa_HIEcalEtaEEonly_v1, process.HLT_EcalCalibration_v4, process.HLT_HcalCalibration_v5, process.AlCa_EcalPhiSym_v9, process.HLT_ZeroBias_FirstCollisionAfterAbortGap_v5, process.AlCa_HIRPCMuonNormalisation_v1, process.AlCa_LumiPixelsCounts_Random_v2, process.AlCa_LumiPixelsCounts_ZeroBias_v2, process.HLTriggerFinalPath, process.HLTAnalyzerEndpath, process.PhysicsCommissioningOutput, process.PhysicsEGammaOutput, process.PhysicsEndOfFillOutput, process.PhysicsHadronsTausOutput, process.PhysicsMuonsOutput, process.PhysicsTracksOutput, process.PhysicsForwardOutput, process.DQMOutput, process.DQMOnlineBeamspotOutput, process.DQMCalibrationOutput, process.DQMEventDisplayOutput, process.HLTMonitorOutput, process.RPCMONOutput, process.CalibrationOutput, process.EcalCalibrationOutput, process.ALCAPHISYMOutput, process.ALCALumiPixelCountsExpressOutput, process.ALCALumiPixelCountsPromptOutput, process.ALCAP0Output, process.ExpressOutput, process.ExpressAlignmentOutput, process.NanoDSTOutput, process.PhysicsHIZeroBias1Output, process.PhysicsHIZeroBias2Output, process.PhysicsHIZeroBias3Output, process.PhysicsHIZeroBias4Output, process.PhysicsHIZeroBias5Output, process.PhysicsHIZeroBias6Output, ))
# source module (EDM inputs)
process.source = cms.Source( "PoolSource",
fileNames = cms.untracked.vstring(
'file:RelVal_Raw_PRef_DATA.root',
),
inputCommands = cms.untracked.vstring(
'keep *'
)
)
# limit the number of events to be processed
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32( 100 )
)
# enable TrigReport, TimeReport and MultiThreading
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool( True ),
numberOfThreads = cms.untracked.uint32( 4 ),
numberOfStreams = cms.untracked.uint32( 0 ),
)
# override the GlobalTag, connection string and pfnPrefix
if 'GlobalTag' in process.__dict__:
from Configuration.AlCa.GlobalTag import GlobalTag as customiseGlobalTag
process.GlobalTag = customiseGlobalTag(process.GlobalTag, globaltag = 'auto:run3_hlt_PRef')
# show summaries from trigger analysers used at HLT
if 'MessageLogger' in process.__dict__:
process.MessageLogger.TriggerSummaryProducerAOD = cms.untracked.PSet()
process.MessageLogger.L1GtTrigReport = cms.untracked.PSet()
process.MessageLogger.L1TGlobalSummary = cms.untracked.PSet()
process.MessageLogger.HLTrigReport = cms.untracked.PSet()
process.MessageLogger.FastReport = cms.untracked.PSet()
process.MessageLogger.ThroughputService = cms.untracked.PSet()
# add specific customizations
_customInfo = {}
_customInfo['menuType' ]= "PRef"
_customInfo['globalTags']= {}
_customInfo['globalTags'][True ] = "auto:run3_hlt_PRef"
_customInfo['globalTags'][False] = "auto:run3_mc_PRef"
_customInfo['inputFiles']={}
_customInfo['inputFiles'][True] = "file:RelVal_Raw_PRef_DATA.root"
_customInfo['inputFiles'][False] = "file:RelVal_Raw_PRef_MC.root"
_customInfo['maxEvents' ]= 100
_customInfo['globalTag' ]= "auto:run3_hlt_PRef"
_customInfo['inputFile' ]= ['file:RelVal_Raw_PRef_DATA.root']
_customInfo['realData' ]= True
from HLTrigger.Configuration.customizeHLTforALL import customizeHLTforAll
process = customizeHLTforAll(process,"PRef",_customInfo)
from HLTrigger.Configuration.customizeHLTforCMSSW import customizeHLTforCMSSW
process = customizeHLTforCMSSW(process,"PRef")
# Eras-based customisations
from HLTrigger.Configuration.Eras import modifyHLTforEras
modifyHLTforEras(process)
| 48.105665 | 2,125 | 0.707365 |
import FWCore.ParameterSet.Config as cms
from HeterogeneousCore.CUDACore.SwitchProducerCUDA import SwitchProducerCUDA
process = cms.Process( "HLTPRef" )
process.HLTConfigVersion = cms.PSet(
tableName = cms.string('/dev/CMSSW_12_3_0/PRef/V23')
)
process.transferSystem = cms.PSet(
destinations = cms.vstring( 'Tier0',
'DQM',
'ECAL',
'EventDisplay',
'Lustre',
'None' ),
transferModes = cms.vstring( 'default',
'test',
'emulator' ),
streamA = cms.PSet(
default = cms.vstring( 'Tier0' ),
test = cms.vstring( 'Lustre' ),
emulator = cms.vstring( 'Lustre' )
),
streamCalibration = cms.PSet(
default = cms.vstring( 'Tier0' ),
test = cms.vstring( 'Lustre' ),
emulator = cms.vstring( 'None' )
),
streamDQM = cms.PSet(
default = cms.vstring( 'DQM' ),
test = cms.vstring( 'DQM',
'Lustre' ),
emulator = cms.vstring( 'None' )
),
streamDQMCalibration = cms.PSet(
default = cms.vstring( 'DQM' ),
test = cms.vstring( 'DQM',
'Lustre' ),
emulator = cms.vstring( 'None' )
),
streamEcalCalibration = cms.PSet(
default = cms.vstring( 'ECAL' ),
test = cms.vstring( 'ECAL' ),
emulator = cms.vstring( 'None' )
),
streamEventDisplay = cms.PSet(
default = cms.vstring( 'EventDisplay',
'Tier0' ),
test = cms.vstring( 'EventDisplay',
'Lustre' ),
emulator = cms.vstring( 'None' )
),
streamExpressCosmics = cms.PSet(
default = cms.vstring( 'Tier0' ),
test = cms.vstring( 'Lustre' ),
emulator = cms.vstring( 'Lustre' )
),
streamNanoDST = cms.PSet(
default = cms.vstring( 'Tier0' ),
test = cms.vstring( 'Lustre' ),
emulator = cms.vstring( 'None' )
),
streamRPCMON = cms.PSet(
default = cms.vstring( 'Tier0' ),
test = cms.vstring( 'Lustre' ),
emulator = cms.vstring( 'None' )
),
streamTrackerCalibration = cms.PSet(
default = cms.vstring( 'Tier0' ),
test = cms.vstring( 'Lustre' ),
emulator = cms.vstring( 'None' )
),
default = cms.PSet(
default = cms.vstring( 'Tier0' ),
test = cms.vstring( 'Lustre' ),
emulator = cms.vstring( 'Lustre' ),
streamLookArea = cms.PSet( )
),
streamLookArea = cms.PSet(
default = cms.vstring( 'DQM' ),
test = cms.vstring( 'DQM',
'Lustre' ),
emulator = cms.vstring( 'None' )
)
)
process.HLTPSetInitialCkfTrajectoryFilterForHI = cms.PSet(
minimumNumberOfHits = cms.int32( 6 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.9 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTIter0PSetTrajectoryBuilderIT = cms.PSet(
ComponentType = cms.string( "CkfTrajectoryBuilder" ),
lostHitPenalty = cms.double( 30.0 ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter0PSetTrajectoryFilterIT" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
maxCand = cms.int32( 2 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator9" ),
intermediateCleaning = cms.bool( True ),
updator = cms.string( "hltESPKFUpdator" ),
seedAs5DHit = cms.bool( False )
)
process.HLTIter4PSetTrajectoryBuilderIT = cms.PSet(
ComponentType = cms.string( "CkfTrajectoryBuilder" ),
lostHitPenalty = cms.double( 30.0 ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter4PSetTrajectoryFilterIT" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
maxCand = cms.int32( 1 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator16" ),
intermediateCleaning = cms.bool( True ),
updator = cms.string( "hltESPKFUpdator" ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetTobTecStepInOutTrajectoryFilterBase = cms.PSet(
minimumNumberOfHits = cms.int32( 4 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.1 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 0 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTIter0GroupedCkfTrajectoryBuilderIT = cms.PSet(
keepOriginalIfRebuildFails = cms.bool( False ),
lockHits = cms.bool( True ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter0PSetTrajectoryFilterIT" ) ),
maxCand = cms.int32( 2 ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator9" ),
intermediateCleaning = cms.bool( True ),
bestHitOnly = cms.bool( True ),
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
lostHitPenalty = cms.double( 30.0 ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
alwaysUseInvalidHits = cms.bool( False ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter0PSetTrajectoryFilterIT" ) ),
foundHitBonus = cms.double( 5.0 ),
updator = cms.string( "hltESPKFUpdator" ),
seedAs5DHit = cms.bool( False )
)
process.HLTSiStripClusterChargeCutTiny = cms.PSet( value = cms.double( 800.0 ) )
process.HLTPSetTrajectoryFilterIT = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.3 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTIter4PSetTrajectoryFilterIT = cms.PSet(
minimumNumberOfHits = cms.int32( 6 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.3 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 0 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetTrajectoryBuilderForElectrons = cms.PSet(
ComponentType = cms.string( "CkfTrajectoryBuilder" ),
lostHitPenalty = cms.double( 90.0 ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "hltESPBwdElectronPropagator" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetTrajectoryFilterForElectrons" ) ),
propagatorAlong = cms.string( "hltESPFwdElectronPropagator" ),
maxCand = cms.int32( 5 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator30" ),
intermediateCleaning = cms.bool( False ),
updator = cms.string( "hltESPKFUpdator" ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetPvClusterComparerForIT = cms.PSet(
track_chi2_max = cms.double( 20.0 ),
track_pt_max = cms.double( 20.0 ),
track_prob_min = cms.double( -1.0 ),
track_pt_min = cms.double( 1.0 )
)
process.HLTPSetMixedStepTrajectoryFilter = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.1 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.4 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetInitialCkfTrajectoryBuilderForHI = cms.PSet(
ComponentType = cms.string( "CkfTrajectoryBuilder" ),
lostHitPenalty = cms.double( 30.0 ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOppositeForHI" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetInitialCkfTrajectoryFilterForHI" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialForHI" ),
maxCand = cms.int32( 5 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
intermediateCleaning = cms.bool( False ),
updator = cms.string( "hltESPKFUpdator" ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetMuonCkfTrajectoryBuilder = cms.PSet(
rescaleErrorIfFail = cms.double( 1.0 ),
ComponentType = cms.string( "MuonCkfTrajectoryBuilder" ),
lostHitPenalty = cms.double( 30.0 ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetMuonCkfTrajectoryFilter" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
maxCand = cms.int32( 5 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator30" ),
intermediateCleaning = cms.bool( False ),
propagatorProximity = cms.string( "SteppingHelixPropagatorAny" ),
updator = cms.string( "hltESPKFUpdator" ),
deltaEta = cms.double( -1.0 ),
useSeedLayer = cms.bool( False ),
deltaPhi = cms.double( -1.0 ),
seedAs5DHit = cms.bool( False )
)
process.HLTIter0HighPtTkMuPSetTrajectoryFilterIT = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.3 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetPvClusterComparerForBTag = cms.PSet(
track_chi2_max = cms.double( 20.0 ),
track_pt_max = cms.double( 20.0 ),
track_prob_min = cms.double( -1.0 ),
track_pt_min = cms.double( 0.1 )
)
process.HLTSeedFromConsecutiveHitsTripletOnlyCreator = cms.PSet(
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
SeedMomentumForBOFF = cms.double( 5.0 ),
propagator = cms.string( "PropagatorWithMaterialParabolicMf" ),
forceKinematicWithRegionDirection = cms.bool( False ),
magneticField = cms.string( "ParabolicMf" ),
OriginTransverseErrorMultiplier = cms.double( 1.0 ),
ComponentName = cms.string( "SeedFromConsecutiveHitsTripletOnlyCreator" ),
MinOneOverPtError = cms.double( 1.0 )
)
process.HLTIter2GroupedCkfTrajectoryBuilderIT = cms.PSet(
keepOriginalIfRebuildFails = cms.bool( False ),
lockHits = cms.bool( True ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter2PSetTrajectoryFilterIT" ) ),
maxCand = cms.int32( 2 ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator16" ),
intermediateCleaning = cms.bool( True ),
bestHitOnly = cms.bool( True ),
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
lostHitPenalty = cms.double( 30.0 ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
alwaysUseInvalidHits = cms.bool( False ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter2PSetTrajectoryFilterIT" ) ),
foundHitBonus = cms.double( 5.0 ),
updator = cms.string( "hltESPKFUpdator" ),
seedAs5DHit = cms.bool( False )
)
process.HLTIter3PSetTrajectoryBuilderIT = cms.PSet(
ComponentType = cms.string( "CkfTrajectoryBuilder" ),
lostHitPenalty = cms.double( 30.0 ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter3PSetTrajectoryFilterIT" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
maxCand = cms.int32( 1 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator16" ),
intermediateCleaning = cms.bool( True ),
updator = cms.string( "hltESPKFUpdator" ),
seedAs5DHit = cms.bool( False )
)
process.HLTSiStripClusterChargeCutTight = cms.PSet( value = cms.double( 1945.0 ) )
process.HLTPSetCkf3HitTrajectoryFilter = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.9 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( -1 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetDetachedStepTrajectoryFilterBase = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 2 ),
minPt = cms.double( 0.075 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetMuonTrackingRegionBuilder8356 = cms.PSet(
Rescale_Dz = cms.double( 3.0 ),
Pt_fixed = cms.bool( False ),
Eta_fixed = cms.bool( False ),
Eta_min = cms.double( 0.1 ),
DeltaZ = cms.double( 15.9 ),
maxRegions = cms.int32( 2 ),
EtaR_UpperLimit_Par1 = cms.double( 0.25 ),
UseVertex = cms.bool( False ),
Z_fixed = cms.bool( True ),
PhiR_UpperLimit_Par1 = cms.double( 0.6 ),
PhiR_UpperLimit_Par2 = cms.double( 0.2 ),
Rescale_phi = cms.double( 3.0 ),
DeltaEta = cms.double( 0.2 ),
precise = cms.bool( True ),
OnDemand = cms.int32( -1 ),
EtaR_UpperLimit_Par2 = cms.double( 0.15 ),
MeasurementTrackerName = cms.InputTag( "hltESPMeasurementTracker" ),
vertexCollection = cms.InputTag( "pixelVertices" ),
Pt_min = cms.double( 1.5 ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Phi_fixed = cms.bool( False ),
DeltaR = cms.double( 0.2 ),
input = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' ),
DeltaPhi = cms.double( 0.2 ),
Phi_min = cms.double( 0.1 ),
Rescale_eta = cms.double( 3.0 )
)
process.HLTPSetDetachedCkfTrajectoryFilterForHI = cms.PSet(
minimumNumberOfHits = cms.int32( 6 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.3 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 0.701 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTIter3PSetTrajectoryFilterIT = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.3 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 0 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetJetCoreStepTrajectoryFilter = cms.PSet(
minimumNumberOfHits = cms.int32( 4 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.1 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTIter2PSetTrajectoryFilterIT = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 1 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 0.3 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetMuTrackJpsiTrajectoryBuilder = cms.PSet(
ComponentType = cms.string( "CkfTrajectoryBuilder" ),
lostHitPenalty = cms.double( 30.0 ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetMuTrackJpsiTrajectoryFilter" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
maxCand = cms.int32( 1 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator30" ),
intermediateCleaning = cms.bool( True ),
updator = cms.string( "hltESPKFUpdator" ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetTrajectoryBuilderForGsfElectrons = cms.PSet(
ComponentType = cms.string( "CkfTrajectoryBuilder" ),
lostHitPenalty = cms.double( 90.0 ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "hltESPBwdElectronPropagator" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetTrajectoryFilterForElectrons" ) ),
propagatorAlong = cms.string( "hltESPFwdElectronPropagator" ),
maxCand = cms.int32( 5 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator2000" ),
intermediateCleaning = cms.bool( False ),
updator = cms.string( "hltESPKFUpdator" ),
seedAs5DHit = cms.bool( False )
)
process.HLTSiStripClusterChargeCutNone = cms.PSet( value = cms.double( -1.0 ) )
process.HLTPSetTobTecStepTrajectoryFilterBase = cms.PSet(
minimumNumberOfHits = cms.int32( 5 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.1 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 0 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetMuonCkfTrajectoryFilter = cms.PSet(
minimumNumberOfHits = cms.int32( 5 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.9 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( -1 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetbJetRegionalTrajectoryFilter = cms.PSet(
minimumNumberOfHits = cms.int32( 5 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 1.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 8 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetDetachedStepTrajectoryFilter = cms.PSet(
ComponentType = cms.string( "CompositeTrajectoryFilter" ),
filters = cms.VPSet(
cms.PSet( refToPSet_ = cms.string( "HLTPSetDetachedStepTrajectoryFilterBase" ) )
)
)
process.HLTIter1PSetTrajectoryFilterIT = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 0.2 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetDetachedCkfTrajectoryFilterForHIGlobalPt8 = cms.PSet(
minimumNumberOfHits = cms.int32( 6 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 8.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 0.701 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetMixedStepTrajectoryBuilder = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialForMixedStepOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetMixedStepTrajectoryFilter" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialForMixedStep" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 2 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPChi2ChargeTightMeasurementEstimator16" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetMixedStepTrajectoryFilter" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 5.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetMixedStepTrajectoryFilterBase = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.05 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 0 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetCkfTrajectoryFilter = cms.PSet(
minimumNumberOfHits = cms.int32( 5 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.9 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( -1 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTSeedFromProtoTracks = cms.PSet(
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
SeedMomentumForBOFF = cms.double( 5.0 ),
propagator = cms.string( "PropagatorWithMaterialParabolicMf" ),
forceKinematicWithRegionDirection = cms.bool( False ),
magneticField = cms.string( "ParabolicMf" ),
OriginTransverseErrorMultiplier = cms.double( 1.0 ),
ComponentName = cms.string( "SeedFromConsecutiveHitsCreator" ),
MinOneOverPtError = cms.double( 1.0 )
)
process.HLTPSetInitialStepTrajectoryFilterBase = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 2 ),
minPt = cms.double( 0.2 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTIter2PSetTrajectoryBuilderIT = cms.PSet(
ComponentType = cms.string( "CkfTrajectoryBuilder" ),
lostHitPenalty = cms.double( 30.0 ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter2PSetTrajectoryFilterIT" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
maxCand = cms.int32( 2 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator16" ),
intermediateCleaning = cms.bool( True ),
updator = cms.string( "hltESPKFUpdator" ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetMuTrackJpsiTrajectoryFilter = cms.PSet(
minimumNumberOfHits = cms.int32( 5 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 10.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 8 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTSeedFromConsecutiveHitsCreatorIT = cms.PSet(
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
SeedMomentumForBOFF = cms.double( 5.0 ),
propagator = cms.string( "PropagatorWithMaterialParabolicMf" ),
forceKinematicWithRegionDirection = cms.bool( False ),
magneticField = cms.string( "ParabolicMf" ),
OriginTransverseErrorMultiplier = cms.double( 1.0 ),
ComponentName = cms.string( "SeedFromConsecutiveHitsCreator" ),
MinOneOverPtError = cms.double( 1.0 )
)
process.HLTPSetTrajectoryFilterL3 = cms.PSet(
minimumNumberOfHits = cms.int32( 5 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.5 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 1000000000 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetDetachedStepTrajectoryBuilder = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetDetachedStepTrajectoryFilter" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 3 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator9" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetDetachedStepTrajectoryFilter" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 5.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetPixelPairCkfTrajectoryFilterForHIGlobalPt8 = cms.PSet(
minimumNumberOfHits = cms.int32( 6 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 8.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 100 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTIter0PSetTrajectoryFilterIT = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 0.3 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTIter2HighPtTkMuPSetTrajectoryFilterIT = cms.PSet(
minimumNumberOfHits = cms.int32( 5 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.3 ),
maxConsecLostHits = cms.int32( 3 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetMuTrackJpsiEffTrajectoryFilter = cms.PSet(
minimumNumberOfHits = cms.int32( 5 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 1.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 9 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetPixelPairCkfTrajectoryBuilderForHIGlobalPt8 = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOppositeForHI" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetPixelPairCkfTrajectoryFilterForHIGlobalPt8" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialForHI" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 3 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator9ForHI" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetPixelPairCkfTrajectoryFilterForHIGlobalPt8" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 5.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetPixelPairStepTrajectoryFilterBase = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 2 ),
minPt = cms.double( 0.1 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetLowPtStepTrajectoryFilter = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 1 ),
minPt = cms.double( 0.075 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTSeedFromConsecutiveHitsCreator = cms.PSet(
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
SeedMomentumForBOFF = cms.double( 5.0 ),
propagator = cms.string( "PropagatorWithMaterial" ),
forceKinematicWithRegionDirection = cms.bool( False ),
magneticField = cms.string( "" ),
OriginTransverseErrorMultiplier = cms.double( 1.0 ),
ComponentName = cms.string( "SeedFromConsecutiveHitsCreator" ),
MinOneOverPtError = cms.double( 1.0 )
)
process.HLTPSetPixelPairCkfTrajectoryBuilderForHI = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOppositeForHI" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetPixelPairCkfTrajectoryFilterForHI" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialForHI" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 3 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator9ForHI" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetPixelPairCkfTrajectoryFilterForHI" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 5.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetDetachedCkfTrajectoryBuilderForHI = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 0.0 ),
maxPtForLooperReconstruction = cms.double( 0.0 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOppositeForHI" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetDetachedCkfTrajectoryFilterForHI" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialForHI" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 2 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPChi2MeasurementEstimator9" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetDetachedCkfTrajectoryFilterForHI" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 5.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTIter1PSetTrajectoryBuilderIT = cms.PSet(
ComponentType = cms.string( "CkfTrajectoryBuilder" ),
lostHitPenalty = cms.double( 30.0 ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter1PSetTrajectoryFilterIT" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
maxCand = cms.int32( 2 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator16" ),
intermediateCleaning = cms.bool( True ),
updator = cms.string( "hltESPKFUpdator" ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetDetachedCkfTrajectoryBuilderForHIGlobalPt8 = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 0.0 ),
maxPtForLooperReconstruction = cms.double( 0.0 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOppositeForHI" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetDetachedCkfTrajectoryFilterForHIGlobalPt8" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialForHI" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 2 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPChi2MeasurementEstimator9" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetDetachedCkfTrajectoryFilterForHIGlobalPt8" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 5.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTSiStripClusterChargeCutForHI = cms.PSet( value = cms.double( 2069.0 ) )
process.HLTPSetLowPtStepTrajectoryBuilder = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetLowPtStepTrajectoryFilter" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 4 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator9" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetLowPtStepTrajectoryFilter" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 5.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetMuTrackJpsiEffTrajectoryBuilder = cms.PSet(
ComponentType = cms.string( "CkfTrajectoryBuilder" ),
lostHitPenalty = cms.double( 30.0 ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetMuTrackJpsiEffTrajectoryFilter" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
maxCand = cms.int32( 1 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator30" ),
intermediateCleaning = cms.bool( True ),
updator = cms.string( "hltESPKFUpdator" ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetTrajectoryFilterForElectrons = cms.PSet(
minimumNumberOfHits = cms.int32( 5 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 2.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( -1 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( -1 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetJetCoreStepTrajectoryBuilder = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetJetCoreStepTrajectoryFilter" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 50 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetJetCoreStepTrajectoryFilter" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 5.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetPvClusterComparer = cms.PSet(
track_chi2_max = cms.double( 9999999.0 ),
track_pt_max = cms.double( 10.0 ),
track_prob_min = cms.double( -1.0 ),
track_pt_min = cms.double( 2.5 )
)
process.HLTIter0HighPtTkMuPSetTrajectoryBuilderIT = cms.PSet(
ComponentType = cms.string( "CkfTrajectoryBuilder" ),
lostHitPenalty = cms.double( 30.0 ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter0HighPtTkMuPSetTrajectoryFilterIT" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
maxCand = cms.int32( 4 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator30" ),
intermediateCleaning = cms.bool( True ),
updator = cms.string( "hltESPKFUpdator" ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetPixelLessStepTrajectoryFilterBase = cms.PSet(
minimumNumberOfHits = cms.int32( 4 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.05 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 0 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTIter1GroupedCkfTrajectoryBuilderIT = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter1PSetTrajectoryFilterIT" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 2 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator16" ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 5.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter1PSetTrajectoryFilterIT" ) )
)
process.HLTPSetMuonCkfTrajectoryBuilderSeedHit = cms.PSet(
rescaleErrorIfFail = cms.double( 1.0 ),
ComponentType = cms.string( "MuonCkfTrajectoryBuilder" ),
lostHitPenalty = cms.double( 30.0 ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetMuonCkfTrajectoryFilter" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
maxCand = cms.int32( 5 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator30" ),
intermediateCleaning = cms.bool( False ),
propagatorProximity = cms.string( "SteppingHelixPropagatorAny" ),
updator = cms.string( "hltESPKFUpdator" ),
deltaEta = cms.double( -1.0 ),
useSeedLayer = cms.bool( True ),
deltaPhi = cms.double( -1.0 ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetPixelPairCkfTrajectoryFilterForHI = cms.PSet(
minimumNumberOfHits = cms.int32( 6 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 1.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 100 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetInitialStepTrajectoryBuilder = cms.PSet(
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
bestHitOnly = cms.bool( True ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetInitialStepTrajectoryFilter" ) ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetInitialStepTrajectoryFilter" ) ),
useSameTrajFilter = cms.bool( True ),
maxCand = cms.int32( 3 ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 ),
foundHitBonus = cms.double( 10.0 ),
lockHits = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
keepOriginalIfRebuildFails = cms.bool( True ),
estimator = cms.string( "hltESPInitialStepChi2ChargeMeasurementEstimator30" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
minNrOfHitsForRebuild = cms.int32( 1 ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetInitialStepTrajectoryFilter = cms.PSet(
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
minimumNumberOfHits = cms.int32( 3 ),
seedPairPenalty = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
minPt = cms.double( 0.2 ),
nSigmaMinPt = cms.double( 5.0 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHits = cms.int32( 999 ),
maxConsecLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
maxLostHitsFraction = cms.double( 0.1 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedExtension = cms.int32( 0 ),
strictSeedExtension = cms.bool( False ),
pixelSeedExtension = cms.bool( False ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
maxCCCLostHits = cms.int32( 0 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetLowPtQuadStepTrajectoryBuilder = cms.PSet(
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
bestHitOnly = cms.bool( True ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetLowPtQuadStepTrajectoryFilter" ) ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetLowPtQuadStepTrajectoryFilter" ) ),
useSameTrajFilter = cms.bool( True ),
maxCand = cms.int32( 4 ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 ),
foundHitBonus = cms.double( 10.0 ),
lockHits = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
keepOriginalIfRebuildFails = cms.bool( False ),
estimator = cms.string( "hltESPLowPtQuadStepChi2ChargeMeasurementEstimator9" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetLowPtQuadStepTrajectoryFilter = cms.PSet(
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
minimumNumberOfHits = cms.int32( 3 ),
seedPairPenalty = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
minPt = cms.double( 0.075 ),
nSigmaMinPt = cms.double( 5.0 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHits = cms.int32( 999 ),
maxConsecLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
maxLostHitsFraction = cms.double( 0.1 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedExtension = cms.int32( 0 ),
strictSeedExtension = cms.bool( False ),
pixelSeedExtension = cms.bool( False ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
maxCCCLostHits = cms.int32( 0 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetHighPtTripletStepTrajectoryBuilder = cms.PSet(
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
bestHitOnly = cms.bool( True ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetHighPtTripletStepTrajectoryFilter" ) ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetHighPtTripletStepTrajectoryFilter" ) ),
useSameTrajFilter = cms.bool( True ),
maxCand = cms.int32( 3 ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 ),
foundHitBonus = cms.double( 10.0 ),
lockHits = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
keepOriginalIfRebuildFails = cms.bool( False ),
estimator = cms.string( "hltESPHighPtTripletStepChi2ChargeMeasurementEstimator30" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetHighPtTripletStepTrajectoryFilter = cms.PSet(
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
minimumNumberOfHits = cms.int32( 3 ),
seedPairPenalty = cms.int32( 5 ),
chargeSignificance = cms.double( -1.0 ),
minPt = cms.double( 0.2 ),
nSigmaMinPt = cms.double( 5.0 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHits = cms.int32( 999 ),
maxConsecLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
maxLostHitsFraction = cms.double( 0.1 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedExtension = cms.int32( 0 ),
strictSeedExtension = cms.bool( False ),
pixelSeedExtension = cms.bool( False ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
maxCCCLostHits = cms.int32( 0 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetLowPtTripletStepTrajectoryBuilder = cms.PSet(
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
bestHitOnly = cms.bool( True ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetLowPtTripletStepTrajectoryFilter" ) ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetLowPtTripletStepTrajectoryFilter" ) ),
useSameTrajFilter = cms.bool( True ),
maxCand = cms.int32( 4 ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 ),
foundHitBonus = cms.double( 10.0 ),
lockHits = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
keepOriginalIfRebuildFails = cms.bool( False ),
estimator = cms.string( "hltESPLowPtTripletStepChi2ChargeMeasurementEstimator9" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetLowPtTripletStepTrajectoryFilter = cms.PSet(
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
minimumNumberOfHits = cms.int32( 3 ),
seedPairPenalty = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
minPt = cms.double( 0.075 ),
nSigmaMinPt = cms.double( 5.0 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHits = cms.int32( 999 ),
maxConsecLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
maxLostHitsFraction = cms.double( 0.1 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedExtension = cms.int32( 0 ),
strictSeedExtension = cms.bool( False ),
pixelSeedExtension = cms.bool( False ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
maxCCCLostHits = cms.int32( 0 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetDetachedQuadStepTrajectoryBuilder = cms.PSet(
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
bestHitOnly = cms.bool( True ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetDetachedQuadStepTrajectoryFilter" ) ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetDetachedQuadStepTrajectoryFilter" ) ),
useSameTrajFilter = cms.bool( True ),
maxCand = cms.int32( 3 ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 ),
foundHitBonus = cms.double( 10.0 ),
lockHits = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
keepOriginalIfRebuildFails = cms.bool( False ),
estimator = cms.string( "hltESPDetachedQuadStepChi2ChargeMeasurementEstimator9" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetDetachedQuadStepTrajectoryFilter = cms.PSet(
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
minimumNumberOfHits = cms.int32( 3 ),
seedPairPenalty = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
minPt = cms.double( 0.075 ),
nSigmaMinPt = cms.double( 5.0 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHits = cms.int32( 999 ),
maxConsecLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
maxLostHitsFraction = cms.double( 0.1 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedExtension = cms.int32( 0 ),
strictSeedExtension = cms.bool( False ),
pixelSeedExtension = cms.bool( False ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
maxCCCLostHits = cms.int32( 0 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetDetachedTripletStepTrajectoryBuilder = cms.PSet(
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
bestHitOnly = cms.bool( True ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetDetachedTripletStepTrajectoryFilter" ) ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetDetachedTripletStepTrajectoryFilter" ) ),
useSameTrajFilter = cms.bool( True ),
maxCand = cms.int32( 3 ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 ),
foundHitBonus = cms.double( 10.0 ),
lockHits = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
keepOriginalIfRebuildFails = cms.bool( False ),
estimator = cms.string( "hltESPDetachedTripletStepChi2ChargeMeasurementEstimator9" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetDetachedTripletStepTrajectoryFilter = cms.PSet(
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
minimumNumberOfHits = cms.int32( 3 ),
seedPairPenalty = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
minPt = cms.double( 0.075 ),
nSigmaMinPt = cms.double( 5.0 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHits = cms.int32( 999 ),
maxConsecLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
maxLostHitsFraction = cms.double( 0.1 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedExtension = cms.int32( 0 ),
strictSeedExtension = cms.bool( False ),
pixelSeedExtension = cms.bool( False ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
maxCCCLostHits = cms.int32( 0 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetMixedTripletStepTrajectoryBuilder = cms.PSet(
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
bestHitOnly = cms.bool( True ),
propagatorAlong = cms.string( "PropagatorWithMaterialForMixedStep" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetMixedTripletStepTrajectoryFilter" ) ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetMixedTripletStepTrajectoryFilter" ) ),
useSameTrajFilter = cms.bool( True ),
maxCand = cms.int32( 2 ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 ),
foundHitBonus = cms.double( 10.0 ),
lockHits = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
keepOriginalIfRebuildFails = cms.bool( False ),
estimator = cms.string( "hltESPMixedTripletStepChi2ChargeMeasurementEstimator16" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialForMixedStepOpposite" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetMixedTripletStepTrajectoryFilter = cms.PSet(
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
minimumNumberOfHits = cms.int32( 3 ),
seedPairPenalty = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
minPt = cms.double( 0.1 ),
nSigmaMinPt = cms.double( 5.0 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHits = cms.int32( 999 ),
maxConsecLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
maxLostHitsFraction = cms.double( 0.1 ),
constantValueForLostHitsFractionFilter = cms.double( 1.4 ),
seedExtension = cms.int32( 0 ),
strictSeedExtension = cms.bool( False ),
pixelSeedExtension = cms.bool( False ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
maxCCCLostHits = cms.int32( 9999 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetPixelLessStepTrajectoryBuilder = cms.PSet(
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
bestHitOnly = cms.bool( True ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetPixelLessStepTrajectoryFilter" ) ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetPixelLessStepTrajectoryFilter" ) ),
useSameTrajFilter = cms.bool( True ),
maxCand = cms.int32( 2 ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 ),
foundHitBonus = cms.double( 10.0 ),
lockHits = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( False ),
requireSeedHitsInRebuild = cms.bool( True ),
keepOriginalIfRebuildFails = cms.bool( False ),
estimator = cms.string( "hltESPPixelLessStepChi2ChargeMeasurementEstimator16" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
minNrOfHitsForRebuild = cms.int32( 4 ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetPixelLessStepTrajectoryFilter = cms.PSet(
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
minimumNumberOfHits = cms.int32( 4 ),
seedPairPenalty = cms.int32( 1 ),
chargeSignificance = cms.double( -1.0 ),
minPt = cms.double( 0.1 ),
nSigmaMinPt = cms.double( 5.0 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHits = cms.int32( 0 ),
maxConsecLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
maxLostHitsFraction = cms.double( 0.1 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedExtension = cms.int32( 0 ),
strictSeedExtension = cms.bool( False ),
pixelSeedExtension = cms.bool( False ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
maxCCCLostHits = cms.int32( 9999 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetTobTecStepTrajectoryFilter = cms.PSet(
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
minimumNumberOfHits = cms.int32( 5 ),
seedPairPenalty = cms.int32( 1 ),
chargeSignificance = cms.double( -1.0 ),
minPt = cms.double( 0.1 ),
nSigmaMinPt = cms.double( 5.0 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHits = cms.int32( 0 ),
maxConsecLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
maxLostHitsFraction = cms.double( 0.1 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedExtension = cms.int32( 0 ),
strictSeedExtension = cms.bool( False ),
pixelSeedExtension = cms.bool( False ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
maxCCCLostHits = cms.int32( 9999 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetTobTecStepInOutTrajectoryFilter = cms.PSet(
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
minimumNumberOfHits = cms.int32( 4 ),
seedPairPenalty = cms.int32( 1 ),
chargeSignificance = cms.double( -1.0 ),
minPt = cms.double( 0.1 ),
nSigmaMinPt = cms.double( 5.0 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHits = cms.int32( 0 ),
maxConsecLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
maxLostHitsFraction = cms.double( 0.1 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedExtension = cms.int32( 0 ),
strictSeedExtension = cms.bool( False ),
pixelSeedExtension = cms.bool( False ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
maxCCCLostHits = cms.int32( 9999 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetTobTecStepTrajectoryBuilder = cms.PSet(
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
bestHitOnly = cms.bool( True ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetTobTecStepTrajectoryFilter" ) ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetTobTecStepInOutTrajectoryFilter" ) ),
useSameTrajFilter = cms.bool( False ),
maxCand = cms.int32( 2 ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 ),
foundHitBonus = cms.double( 10.0 ),
lockHits = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( False ),
requireSeedHitsInRebuild = cms.bool( True ),
keepOriginalIfRebuildFails = cms.bool( False ),
estimator = cms.string( "hltESPTobTecStepChi2ChargeMeasurementEstimator16" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
minNrOfHitsForRebuild = cms.int32( 4 ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetGroupedCkfTrajectoryBuilderIterL3ForOI = cms.PSet(
rescaleErrorIfFail = cms.double( 1.0 ),
keepOriginalIfRebuildFails = cms.bool( False ),
lockHits = cms.bool( True ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfTrajectoryFilterIterL3OI" ) ),
maxCand = cms.int32( 5 ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator30" ),
intermediateCleaning = cms.bool( True ),
bestHitOnly = cms.bool( True ),
deltaEta = cms.double( -1.0 ),
useSeedLayer = cms.bool( False ),
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
lostHitPenalty = cms.double( 30.0 ),
requireSeedHitsInRebuild = cms.bool( False ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
alwaysUseInvalidHits = cms.bool( True ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfTrajectoryFilterIterL3OI" ) ),
foundHitBonus = cms.double( 1000.0 ),
propagatorProximity = cms.string( "SteppingHelixPropagatorAny" ),
updator = cms.string( "hltESPKFUpdator" ),
deltaPhi = cms.double( -1.0 ),
seedAs5DHit = cms.bool( False )
)
process.HLTIter0IterL3MuonPSetGroupedCkfTrajectoryBuilderIT = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( True ),
lostHitPenalty = cms.double( 1.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter0IterL3MuonGroupedCkfTrajectoryFilterIT" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
minNrOfHitsForRebuild = cms.int32( 2 ),
maxCand = cms.int32( 5 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator30" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter0IterL3MuonGroupedCkfTrajectoryFilterIT" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 1000.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTIter0IterL3FromL1MuonGroupedCkfTrajectoryFilterIT = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.9 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 10.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTIter0IterL3FromL1MuonPSetGroupedCkfTrajectoryBuilderIT = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( True ),
lostHitPenalty = cms.double( 1.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter0IterL3FromL1MuonGroupedCkfTrajectoryFilterIT" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterial" ),
minNrOfHitsForRebuild = cms.int32( 2 ),
maxCand = cms.int32( 5 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator30" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter0IterL3FromL1MuonGroupedCkfTrajectoryFilterIT" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 1000.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTIter0IterL3MuonGroupedCkfTrajectoryFilterIT = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.9 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 10.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTIter2HighPtTkMuPSetTrajectoryBuilderIT = cms.PSet(
ComponentType = cms.string( "CkfTrajectoryBuilder" ),
lostHitPenalty = cms.double( 30.0 ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter2HighPtTkMuPSetTrajectoryFilterIT" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
maxCand = cms.int32( 2 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator30" ),
intermediateCleaning = cms.bool( True ),
updator = cms.string( "hltESPKFUpdator" ),
seedAs5DHit = cms.bool( False )
)
process.HLTIter2IterL3FromL1MuonPSetTrajectoryFilterIT = cms.PSet(
minimumNumberOfHits = cms.int32( 5 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.3 ),
maxConsecLostHits = cms.int32( 3 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTIter2IterL3FromL1MuonPSetGroupedCkfTrajectoryBuilderIT = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( False ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter2IterL3FromL1MuonPSetTrajectoryFilterIT" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 2 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator30" ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 1000.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter2IterL3FromL1MuonPSetTrajectoryFilterIT" ) )
)
process.HLTIter2IterL3MuonPSetTrajectoryFilterIT = cms.PSet(
minimumNumberOfHits = cms.int32( 5 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.3 ),
maxConsecLostHits = cms.int32( 3 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTIter2IterL3MuonPSetGroupedCkfTrajectoryBuilderIT = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( False ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter2IterL3MuonPSetTrajectoryFilterIT" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 2 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPChi2ChargeMeasurementEstimator30" ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 1000.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTIter2IterL3MuonPSetTrajectoryFilterIT" ) )
)
process.HLTPSetCkfTrajectoryFilterIterL3OI = cms.PSet(
minimumNumberOfHits = cms.int32( 5 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 3.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 10.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( -1 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 999.0 ),
maxLostHits = cms.int32( 1 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetPixelPairStepTrajectoryFilter = cms.PSet(
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
minimumNumberOfHits = cms.int32( 4 ),
seedPairPenalty = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
minPt = cms.double( 0.1 ),
nSigmaMinPt = cms.double( 5.0 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHits = cms.int32( 999 ),
maxConsecLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
maxLostHitsFraction = cms.double( 0.1 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedExtension = cms.int32( 0 ),
strictSeedExtension = cms.bool( False ),
pixelSeedExtension = cms.bool( False ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
maxCCCLostHits = cms.int32( 0 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetPixelPairStepTrajectoryFilterInOut = cms.PSet(
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
minimumNumberOfHits = cms.int32( 4 ),
seedPairPenalty = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
minPt = cms.double( 0.1 ),
nSigmaMinPt = cms.double( 5.0 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHits = cms.int32( 999 ),
maxConsecLostHits = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
maxLostHitsFraction = cms.double( 0.1 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedExtension = cms.int32( 1 ),
strictSeedExtension = cms.bool( False ),
pixelSeedExtension = cms.bool( False ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
maxCCCLostHits = cms.int32( 0 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetPixelPairStepTrajectoryBuilder = cms.PSet(
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
bestHitOnly = cms.bool( True ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetPixelPairStepTrajectoryFilter" ) ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetPixelPairStepTrajectoryFilterInOut" ) ),
useSameTrajFilter = cms.bool( False ),
maxCand = cms.int32( 3 ),
intermediateCleaning = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 ),
foundHitBonus = cms.double( 10.0 ),
lockHits = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
updator = cms.string( "hltESPKFUpdator" ),
alwaysUseInvalidHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
keepOriginalIfRebuildFails = cms.bool( False ),
estimator = cms.string( "hltESPPixelPairStepChi2ChargeMeasurementEstimator9" ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetTobTecStepTrajectoryBuilderPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( False ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetTobTecStepTrajectoryFilterPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 4 ),
maxCand = cms.int32( 2 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPTobTecStepChi2ChargeMeasurementEstimator16" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetTobTecStepInOutTrajectoryFilterPPOnAA" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetLowPtTripletStepTrajectoryFilterPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 0.49 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetDetachedQuadStepTrajectoryFilterPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 0.9 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetCkfBaseTrajectoryFilter_block = cms.PSet(
minimumNumberOfHits = cms.int32( 5 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.9 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetInitialStepTrajectoryBuilderPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetInitialStepTrajectoryFilterPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 1 ),
maxCand = cms.int32( 3 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPInitialStepChi2ChargeMeasurementEstimator30" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetLowPtTripletStepTrajectoryBuilderPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetLowPtTripletStepTrajectoryFilterPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 4 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPLowPtTripletStepChi2ChargeMeasurementEstimator9" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetTobTecStepInOutTrajectoryFilterPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 4 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 2.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 0 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetInitialStepTrajectoryFilterBasePreSplittingPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 4 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 0.2 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetDetachedTripletStepTrajectoryBuilderPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetDetachedTripletStepTrajectoryFilterPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 3 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPDetachedTripletStepChi2ChargeMeasurementEstimator9" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetHighPtTripletStepTrajectoryBuilderPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetHighPtTripletStepTrajectoryFilterPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 3 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPHighPtTripletStepChi2ChargeMeasurementEstimator30" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetDetachedTripletStepTrajectoryFilterPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 0.9 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetInitialStepTrajectoryBuilderPreSplittingPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetInitialStepTrajectoryFilterPreSplittingPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 3 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPInitialStepChi2ChargeMeasurementEstimator30" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetLowPtQuadStepTrajectoryFilterPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 0.49 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetJetCoreStepTrajectoryBuilderPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetJetCoreStepTrajectoryFilterPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 50 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetMixedTripletStepTrajectoryFilterPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 0.4 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.4 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetJetCoreStepTrajectoryFilterPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 4 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 5.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetInitialStepTrajectoryFilterPreSplittingPPOnAA = cms.PSet(
ComponentType = cms.string( "CompositeTrajectoryFilter" ),
filters = cms.VPSet(
cms.PSet( refToPSet_ = cms.string( "HLTPSetInitialStepTrajectoryFilterBasePreSplittingPPOnAA" ) ),
cms.PSet( refToPSet_ = cms.string( "HLTPSetInitialStepTrajectoryFilterShapePreSplittingPPOnAA" ) )
)
)
process.HLTPSetMixedTripletStepTrajectoryBuilderPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialForMixedStepOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetMixedTripletStepTrajectoryFilterPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialForMixedStep" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 2 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPMixedTripletStepChi2ChargeMeasurementEstimator16" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetHighPtTripletStepTrajectoryFilterPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 0.7 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetLowPtQuadStepTrajectoryBuilderPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetLowPtQuadStepTrajectoryFilterPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 4 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPLowPtQuadStepChi2ChargeMeasurementEstimator9" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetPixelLessStepTrajectoryBuilderPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetPixelLessStepTrajectoryFilterPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 4 ),
maxCand = cms.int32( 2 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPPixelLessStepChi2ChargeMeasurementEstimator16" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTSiStripClusterChargeCutLoose = cms.PSet( value = cms.double( 1620.0 ) )
process.HLTPSetDetachedQuadStepTrajectoryBuilderPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetDetachedQuadStepTrajectoryFilterPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 3 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPDetachedQuadStepChi2ChargeMeasurementEstimator9" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetInitialStepTrajectoryFilterShapePreSplittingPPOnAA = cms.PSet(
ComponentType = cms.string( "StripSubClusterShapeTrajectoryFilter" ),
subclusterCutSN = cms.double( 12.0 ),
trimMaxADC = cms.double( 30.0 ),
seedCutMIPs = cms.double( 0.35 ),
subclusterCutMIPs = cms.double( 0.45 ),
subclusterWindow = cms.double( 0.7 ),
maxNSat = cms.uint32( 3 ),
trimMaxFracNeigh = cms.double( 0.25 ),
maxTrimmedSizeDiffNeg = cms.double( 1.0 ),
seedCutSN = cms.double( 7.0 ),
layerMask = cms.PSet(
TOB = cms.bool( False ),
TIB = cms.vuint32( 1, 2 ),
TID = cms.vuint32( 1, 2 ),
TEC = cms.bool( False )
),
maxTrimmedSizeDiffPos = cms.double( 0.7 ),
trimMaxFracTotal = cms.double( 0.15 )
)
process.HLTPSetInitialStepTrajectoryFilterPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 0.6 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetTobTecStepTrajectoryFilterPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 5 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 2.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 0 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetPixelLessStepTrajectoryFilterPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 4 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 2.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 0 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetPixelPairStepTrajectoryFilterPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 4 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 0.1 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetPixelPairStepTrajectoryFilterInOutPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 4 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 1 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 0.1 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetPixelPairStepTrajectoryBuilderPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( False ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetPixelPairStepTrajectoryFilterPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 3 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPPixelPairStepChi2ChargeMeasurementEstimator9" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetPixelPairStepTrajectoryFilterInOutPPOnAA" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetInitialStepTrajectoryFilterBasePreSplittingForFullTrackingPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 4 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
maxCCCLostHits = cms.int32( 0 ),
nSigmaMinPt = cms.double( 5.0 ),
minPt = cms.double( 1.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetInitialStepTrajectoryBuilderPreSplittingForFullTrackingPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetInitialStepTrajectoryFilterPreSplittingForFullTrackingPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 3 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPInitialStepChi2ChargeMeasurementEstimator30" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetInitialStepTrajectoryFilterPreSplittingForFullTrackingPPOnAA = cms.PSet(
ComponentType = cms.string( "CompositeTrajectoryFilter" ),
filters = cms.VPSet(
cms.PSet( refToPSet_ = cms.string( "HLTPSetInitialStepTrajectoryFilterBasePreSplittingForFullTrackingPPOnAA" ) ),
cms.PSet( refToPSet_ = cms.string( "HLTPSetInitialStepTrajectoryFilterShapePreSplittingPPOnAA" ) )
)
)
process.HLTPSetInitialStepTrajectoryFilterForFullTrackingPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 4 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
maxCCCLostHits = cms.int32( 0 ),
nSigmaMinPt = cms.double( 5.0 ),
minPt = cms.double( 1.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetInitialStepTrajectoryBuilderForFullTrackingPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetInitialStepTrajectoryFilterForFullTrackingPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 1 ),
maxCand = cms.int32( 3 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPInitialStepChi2ChargeMeasurementEstimator30" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetLowPtQuadStepTrajectoryFilterForFullTrackingPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 1.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetLowPtQuadStepTrajectoryBuilderForFullTrackingPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetLowPtQuadStepTrajectoryFilterForFullTrackingPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 4 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPLowPtQuadStepChi2ChargeMeasurementEstimator9" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetHighPtTripletStepTrajectoryFilterForFullTrackingPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 1.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetHighPtTripletStepTrajectoryBuilderForFullTrackingPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetHighPtTripletStepTrajectoryFilterForFullTrackingPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 3 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPHighPtTripletStepChi2ChargeMeasurementEstimator30" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetLowPtTripletStepTrajectoryFilterForFullTrackingPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 2.8 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetLowPtTripletStepTrajectoryBuilderForFullTrackingPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetLowPtTripletStepTrajectoryFilterForFullTrackingPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 4 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPLowPtTripletStepChi2ChargeMeasurementEstimator9" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetDetachedQuadStepTrajectoryFilterForFullTrackingPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 5.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetDetachedTripletStepTrajectoryFilterForFullTrackingPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 5.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetPixelPairStepTrajectoryFilterForFullTrackingPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 4 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 5.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetPixelPairStepTrajectoryBuilderForFullTrackingPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( False ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetPixelPairStepTrajectoryFilterForFullTrackingPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 3 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPPixelPairStepChi2ChargeMeasurementEstimator9" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetPixelPairStepTrajectoryFilterInOutForFullTrackingPPOnAA" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetMixedTripletStepTrajectoryFilterForFullTrackingPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 5.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 1.4 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetPixelLessStepTrajectoryFilterForFullTrackingPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 4 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 5.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 0 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetPixelLessStepTrajectoryBuilderForFullTrackingPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetPixelLessStepTrajectoryFilterForFullTrackingPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 4 ),
maxCand = cms.int32( 2 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPPixelLessStepChi2ChargeMeasurementEstimator16" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetTobTecStepTrajectoryFilterForFullTrackingPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 5 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 5.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 0 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetTobTecStepInOutTrajectoryFilterForFullTrackingPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 4 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 5.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 1 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 0 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetTobTecStepTrajectoryBuilderForFullTrackingPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( False ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetTobTecStepTrajectoryFilterForFullTrackingPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 4 ),
maxCand = cms.int32( 2 ),
alwaysUseInvalidHits = cms.bool( False ),
estimator = cms.string( "hltESPTobTecStepChi2ChargeMeasurementEstimator16" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetTobTecStepInOutTrajectoryFilterForFullTrackingPPOnAA" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetJetCoreStepTrajectoryFilterForFullTrackingPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 4 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 9999 ),
minPt = cms.double( 5.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetJetCoreStepTrajectoryBuilderForFullTrackingPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetJetCoreStepTrajectoryFilterForFullTrackingPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 50 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetPixelPairStepTrajectoryFilterInOutForFullTrackingPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 4 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 1 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 5.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetMixedTripletStepTrajectoryBuilderForFullTrackingPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialForMixedStepOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetMixedTripletStepTrajectoryFilterForFullTrackingPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialForMixedStep" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 2 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPMixedTripletStepChi2ChargeMeasurementEstimator16" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetDetachedQuadStepTrajectoryBuilderForFullTrackingPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetDetachedQuadStepTrajectoryFilterForFullTrackingPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 3 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPDetachedQuadStepChi2ChargeMeasurementEstimator9" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetDetachedTripletStepTrajectoryBuilderForFullTrackingPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetDetachedTripletStepTrajectoryFilterForFullTrackingPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 3 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPDetachedTripletStepChi2ChargeMeasurementEstimator9" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetInitialStepTrajectoryFilterBasePreSplittingForDmesonPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 4 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
maxCCCLostHits = cms.int32( 0 ),
nSigmaMinPt = cms.double( 5.0 ),
minPt = cms.double( 3.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetInitialStepTrajectoryFilterForDmesonPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 4 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
maxCCCLostHits = cms.int32( 0 ),
nSigmaMinPt = cms.double( 5.0 ),
minPt = cms.double( 3.0 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetInitialStepTrajectoryBuilderForDmesonPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( True ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetInitialStepTrajectoryFilterForDmesonPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 1 ),
maxCand = cms.int32( 3 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPInitialStepChi2ChargeMeasurementEstimator30" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetLowPtQuadStepTrajectoryFilterForDmesonPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 2.8 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetLowPtQuadStepTrajectoryBuilderForDmesonPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetLowPtQuadStepTrajectoryFilterForDmesonPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 4 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPLowPtQuadStepChi2ChargeMeasurementEstimator9" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetHighPtTripletStepTrajectoryFilterForDmesonPPOnAA = cms.PSet(
minimumNumberOfHits = cms.int32( 3 ),
ComponentType = cms.string( "CkfBaseTrajectoryFilter" ),
seedExtension = cms.int32( 0 ),
chargeSignificance = cms.double( -1.0 ),
pixelSeedExtension = cms.bool( False ),
strictSeedExtension = cms.bool( False ),
nSigmaMinPt = cms.double( 5.0 ),
maxCCCLostHits = cms.int32( 0 ),
minPt = cms.double( 3.5 ),
maxConsecLostHits = cms.int32( 1 ),
extraNumberOfHitsBeforeTheFirstLoop = cms.int32( 4 ),
constantValueForLostHitsFractionFilter = cms.double( 2.0 ),
seedPairPenalty = cms.int32( 0 ),
maxNumberOfHits = cms.int32( 100 ),
minNumberOfHitsForLoopers = cms.int32( 13 ),
minGoodStripCharge = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
minNumberOfHitsPerLoop = cms.int32( 4 ),
minHitsMinPt = cms.int32( 3 ),
maxLostHitsFraction = cms.double( 0.1 ),
maxLostHits = cms.int32( 999 ),
highEtaSwitch = cms.double( 5.0 ),
minHitsAtHighEta = cms.int32( 5 )
)
process.HLTPSetHighPtTripletStepTrajectoryBuilderForDmesonPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetHighPtTripletStepTrajectoryFilterForDmesonPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 3 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPHighPtTripletStepChi2ChargeMeasurementEstimator30" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.HLTPSetInitialStepTrajectoryFilterPreSplittingForDmesonPPOnAA = cms.PSet(
ComponentType = cms.string( "CompositeTrajectoryFilter" ),
filters = cms.VPSet(
cms.PSet( refToPSet_ = cms.string( "HLTPSetInitialStepTrajectoryFilterBasePreSplittingForDmesonPPOnAA" ) ),
cms.PSet( refToPSet_ = cms.string( "HLTPSetInitialStepTrajectoryFilterShapePreSplittingPPOnAA" ) )
)
)
process.HLTPSetInitialStepTrajectoryBuilderPreSplittingForDmesonPPOnAA = cms.PSet(
useSameTrajFilter = cms.bool( True ),
ComponentType = cms.string( "GroupedCkfTrajectoryBuilder" ),
keepOriginalIfRebuildFails = cms.bool( False ),
lostHitPenalty = cms.double( 30.0 ),
lockHits = cms.bool( True ),
requireSeedHitsInRebuild = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
maxDPhiForLooperReconstruction = cms.double( 2.0 ),
maxPtForLooperReconstruction = cms.double( 0.7 ),
propagatorOpposite = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
trajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetInitialStepTrajectoryFilterPreSplittingForDmesonPPOnAA" ) ),
propagatorAlong = cms.string( "PropagatorWithMaterialParabolicMf" ),
minNrOfHitsForRebuild = cms.int32( 5 ),
maxCand = cms.int32( 3 ),
alwaysUseInvalidHits = cms.bool( True ),
estimator = cms.string( "hltESPInitialStepChi2ChargeMeasurementEstimator30" ),
inOutTrajectoryFilter = cms.PSet( refToPSet_ = cms.string( "HLTPSetCkfBaseTrajectoryFilter_block" ) ),
intermediateCleaning = cms.bool( True ),
foundHitBonus = cms.double( 10.0 ),
updator = cms.string( "hltESPKFUpdator" ),
bestHitOnly = cms.bool( True ),
seedAs5DHit = cms.bool( False )
)
process.streams = cms.PSet(
ALCALumiPixelCountsExpress = cms.vstring( 'AlCaLumiPixelCountsExpress' ),
ALCALumiPixelCountsPrompt = cms.vstring( 'AlCaLumiPixelCountsPrompt' ),
ALCAP0 = cms.vstring( 'AlCaP0' ),
ALCAPHISYM = cms.vstring( 'AlCaPhiSym' ),
Calibration = cms.vstring( 'TestEnablesEcalHcal' ),
DQM = cms.vstring( 'OnlineMonitor' ),
DQMCalibration = cms.vstring( 'TestEnablesEcalHcalDQM' ),
DQMOnlineBeamspot = cms.vstring( 'DQMOnlineBeamspot' ),
EcalCalibration = cms.vstring( 'EcalLaser' ),
Express = cms.vstring( 'ExpressPhysics' ),
ExpressAlignment = cms.vstring( 'ExpressAlignment' ),
NanoDST = cms.vstring( 'L1Accept' ),
PhysicsCommissioning = cms.vstring( 'HLTPhysics',
'ZeroBias' ),
PhysicsEndOfFill = cms.vstring( 'EmptyBX' ),
PhysicsHIZeroBias1 = cms.vstring( 'HIZeroBias1',
'HIZeroBias2' ),
PhysicsHIZeroBias2 = cms.vstring( 'HIZeroBias3',
'HIZeroBias4' ),
PhysicsHIZeroBias3 = cms.vstring( 'HIZeroBias5',
'HIZeroBias6' ),
PhysicsHIZeroBias4 = cms.vstring( 'HIZeroBias7',
'HIZeroBias8' ),
PhysicsHIZeroBias5 = cms.vstring( 'HIZeroBias10',
'HIZeroBias9' ),
PhysicsHIZeroBias6 = cms.vstring( 'HIZeroBias11',
'HIZeroBias12' ),
RPCMON = cms.vstring( 'RPCMonitor' )
)
process.datasets = cms.PSet(
AlCaLumiPixelCountsExpress = cms.vstring( 'AlCa_LumiPixelsCounts_Random_v2' ),
AlCaLumiPixelCountsPrompt = cms.vstring( 'AlCa_LumiPixelsCounts_ZeroBias_v2' ),
AlCaP0 = cms.vstring( 'AlCa_HIEcalEtaEBonly_v1',
'AlCa_HIEcalEtaEEonly_v1',
'AlCa_HIEcalPi0EBonly_v1',
'AlCa_HIEcalPi0EEonly_v1' ),
AlCaPhiSym = cms.vstring( 'AlCa_EcalPhiSym_v9' ),
DQMOnlineBeamspot = cms.vstring( 'HLT_HIHT80_Beamspot_ppRef5TeV_v3',
'HLT_ZeroBias_Beamspot_v4' ),
EcalLaser = cms.vstring( 'HLT_EcalCalibration_v4' ),
EmptyBX = cms.vstring( 'HLT_HIL1NotBptxORForPPRef_v2',
'HLT_HIL1UnpairedBunchBptxMinusForPPRef_v2',
'HLT_HIL1UnpairedBunchBptxPlusForPPRef_v2' ),
ExpressAlignment = cms.vstring( 'HLT_HIHT80_Beamspot_ppRef5TeV_v3',
'HLT_ZeroBias_Beamspot_v4' ),
ExpressPhysics = cms.vstring( 'HLT_Physics_v7',
'HLT_Random_v3',
'HLT_ZeroBias_FirstCollisionAfterAbortGap_v5',
'HLT_ZeroBias_v6' ),
HIZeroBias1 = cms.vstring( 'HLT_HIZeroBias_part0_v6' ),
HIZeroBias10 = cms.vstring( 'HLT_HIZeroBias_part9_v6' ),
HIZeroBias11 = cms.vstring( 'HLT_HIZeroBias_part10_v6' ),
HIZeroBias12 = cms.vstring( 'HLT_HIZeroBias_part11_v6' ),
HIZeroBias2 = cms.vstring( 'HLT_HIZeroBias_part1_v6' ),
HIZeroBias3 = cms.vstring( 'HLT_HIZeroBias_part2_v6' ),
HIZeroBias4 = cms.vstring( 'HLT_HIZeroBias_part3_v6' ),
HIZeroBias5 = cms.vstring( 'HLT_HIZeroBias_part4_v6' ),
HIZeroBias6 = cms.vstring( 'HLT_HIZeroBias_part5_v6' ),
HIZeroBias7 = cms.vstring( 'HLT_HIZeroBias_part6_v6' ),
HIZeroBias8 = cms.vstring( 'HLT_HIZeroBias_part7_v6' ),
HIZeroBias9 = cms.vstring( 'HLT_HIZeroBias_part8_v6' ),
HLTPhysics = cms.vstring( 'HLT_Physics_v7' ),
L1Accept = cms.vstring( 'DST_Physics_v7' ),
OnlineMonitor = cms.vstring( 'HLT_HIL1NotBptxORForPPRef_v2',
'HLT_HIL1UnpairedBunchBptxMinusForPPRef_v2',
'HLT_HIL1UnpairedBunchBptxPlusForPPRef_v2',
'HLT_HIZeroBias_part0_v6',
'HLT_HIZeroBias_part10_v6',
'HLT_HIZeroBias_part11_v6',
'HLT_HIZeroBias_part1_v6',
'HLT_HIZeroBias_part2_v6',
'HLT_HIZeroBias_part3_v6',
'HLT_HIZeroBias_part4_v6',
'HLT_HIZeroBias_part5_v6',
'HLT_HIZeroBias_part6_v6',
'HLT_HIZeroBias_part7_v6',
'HLT_HIZeroBias_part8_v6',
'HLT_HIZeroBias_part9_v6',
'HLT_Physics_v7',
'HLT_Random_v3',
'HLT_ZeroBias_FirstCollisionAfterAbortGap_v5',
'HLT_ZeroBias_v6' ),
RPCMonitor = cms.vstring( 'AlCa_HIRPCMuonNormalisation_v1' ),
TestEnablesEcalHcal = cms.vstring( 'HLT_EcalCalibration_v4',
'HLT_HcalCalibration_v5' ),
TestEnablesEcalHcalDQM = cms.vstring( 'HLT_EcalCalibration_v4',
'HLT_HcalCalibration_v5' ),
ZeroBias = cms.vstring( 'HLT_Random_v3',
'HLT_ZeroBias_FirstCollisionAfterAbortGap_v5',
'HLT_ZeroBias_v6' )
)
process.CSCChannelMapperESSource = cms.ESSource( "EmptyESSource",
recordName = cms.string( "CSCChannelMapperRecord" ),
iovIsRunNotTime = cms.bool( True ),
firstValid = cms.vuint32( 1 )
)
process.CSCINdexerESSource = cms.ESSource( "EmptyESSource",
recordName = cms.string( "CSCIndexerRecord" ),
iovIsRunNotTime = cms.bool( True ),
firstValid = cms.vuint32( 1 )
)
process.GlobalParametersRcdSource = cms.ESSource( "EmptyESSource",
recordName = cms.string( "L1TGlobalParametersRcd" ),
iovIsRunNotTime = cms.bool( True ),
firstValid = cms.vuint32( 1 )
)
process.GlobalTag = cms.ESSource( "PoolDBESSource",
DBParameters = cms.PSet(
connectionRetrialTimeOut = cms.untracked.int32( 60 ),
idleConnectionCleanupPeriod = cms.untracked.int32( 10 ),
enableReadOnlySessionOnUpdateConnection = cms.untracked.bool( False ),
enablePoolAutomaticCleanUp = cms.untracked.bool( False ),
messageLevel = cms.untracked.int32( 0 ),
authenticationPath = cms.untracked.string( "." ),
connectionRetrialPeriod = cms.untracked.int32( 10 ),
connectionTimeOut = cms.untracked.int32( 0 ),
enableConnectionSharing = cms.untracked.bool( True )
),
connect = cms.string( "frontier://FrontierProd/CMS_CONDITIONS" ),
globaltag = cms.string( "103X_dataRun2_HLT_v1" ),
snapshotTime = cms.string( "" ),
toGet = cms.VPSet(
),
DumpStat = cms.untracked.bool( False ),
ReconnectEachRun = cms.untracked.bool( False ),
RefreshAlways = cms.untracked.bool( False ),
RefreshEachRun = cms.untracked.bool( False ),
RefreshOpenIOVs = cms.untracked.bool( False ),
pfnPostfix = cms.untracked.string( "None" )
)
process.HcalTimeSlewEP = cms.ESSource( "HcalTimeSlewEP",
appendToDataLabel = cms.string( "HBHE" ),
timeSlewParametersM2 = cms.VPSet(
cms.PSet( slope = cms.double( -3.178648 ),
tmax = cms.double( 16.0 ),
tzero = cms.double( 23.960177 )
),
cms.PSet( slope = cms.double( -1.5610227 ),
tmax = cms.double( 10.0 ),
tzero = cms.double( 11.977461 )
),
cms.PSet( slope = cms.double( -1.075824 ),
tmax = cms.double( 6.25 ),
tzero = cms.double( 9.109694 )
)
),
timeSlewParametersM3 = cms.VPSet(
cms.PSet( tspar2 = cms.double( 0.0 ),
tspar0 = cms.double( 12.2999 ),
tspar1 = cms.double( -2.19142 ),
tspar1_siPM = cms.double( 0.0 ),
cap = cms.double( 6.0 ),
tspar0_siPM = cms.double( 0.0 ),
tspar2_siPM = cms.double( 0.0 )
),
cms.PSet( tspar2 = cms.double( 32.0 ),
tspar0 = cms.double( 15.5 ),
tspar1 = cms.double( -3.2 ),
tspar1_siPM = cms.double( 0.0 ),
cap = cms.double( 6.0 ),
tspar0_siPM = cms.double( 0.0 ),
tspar2_siPM = cms.double( 0.0 )
),
cms.PSet( tspar2 = cms.double( 0.0 ),
tspar0 = cms.double( 12.2999 ),
tspar1 = cms.double( -2.19142 ),
tspar1_siPM = cms.double( 0.0 ),
cap = cms.double( 6.0 ),
tspar0_siPM = cms.double( 0.0 ),
tspar2_siPM = cms.double( 0.0 )
),
cms.PSet( tspar2 = cms.double( 0.0 ),
tspar0 = cms.double( 12.2999 ),
tspar1 = cms.double( -2.19142 ),
tspar1_siPM = cms.double( 0.0 ),
cap = cms.double( 6.0 ),
tspar0_siPM = cms.double( 0.0 ),
tspar2_siPM = cms.double( 0.0 )
)
)
)
process.HepPDTESSource = cms.ESSource( "HepPDTESSource",
pdtFileName = cms.FileInPath( "SimGeneral/HepPDTESSource/data/pythiaparticle.tbl" )
)
process.eegeom = cms.ESSource( "EmptyESSource",
recordName = cms.string( "EcalMappingRcd" ),
iovIsRunNotTime = cms.bool( True ),
firstValid = cms.vuint32( 1 )
)
process.es_hardcode = cms.ESSource( "HcalHardcodeCalibrations",
fromDDD = cms.untracked.bool( False ),
toGet = cms.untracked.vstring( 'GainWidths' )
)
process.hltESSBTagRecord = cms.ESSource( "EmptyESSource",
recordName = cms.string( "JetTagComputerRecord" ),
iovIsRunNotTime = cms.bool( True ),
firstValid = cms.vuint32( 1 )
)
process.hltESSEcalSeverityLevel = cms.ESSource( "EmptyESSource",
recordName = cms.string( "EcalSeverityLevelAlgoRcd" ),
iovIsRunNotTime = cms.bool( True ),
firstValid = cms.vuint32( 1 )
)
process.hltESSHcalSeverityLevel = cms.ESSource( "EmptyESSource",
recordName = cms.string( "HcalSeverityLevelComputerRcd" ),
iovIsRunNotTime = cms.bool( True ),
firstValid = cms.vuint32( 1 )
)
process.ppsPixelTopologyESSource = cms.ESSource( "PPSPixelTopologyESSource",
RunType = cms.string( "Run3" ),
PitchSimY = cms.double( 0.15 ),
PitchSimX = cms.double( 0.1 ),
thickness = cms.double( 0.23 ),
noOfPixelSimX = cms.int32( 160 ),
noOfPixelSimY = cms.int32( 104 ),
noOfPixels = cms.int32( 16640 ),
simXWidth = cms.double( 16.6 ),
simYWidth = cms.double( 16.2 ),
deadEdgeWidth = cms.double( 0.2 ),
activeEdgeSigma = cms.double( 0.02 ),
physActiveEdgeDist = cms.double( 0.15 ),
appendToDataLabel = cms.string( "" )
)
process.AnyDirectionAnalyticalPropagator = cms.ESProducer( "AnalyticalPropagatorESProducer",
MaxDPhi = cms.double( 1.6 ),
ComponentName = cms.string( "AnyDirectionAnalyticalPropagator" ),
PropagationDirection = cms.string( "anyDirection" )
)
process.CSCChannelMapperESProducer = cms.ESProducer( "CSCChannelMapperESProducer",
AlgoName = cms.string( "CSCChannelMapperPostls1" )
)
process.CSCGeometryESModule = cms.ESProducer( "CSCGeometryESModule",
fromDDD = cms.bool( False ),
fromDD4hep = cms.bool( False ),
alignmentsLabel = cms.string( "" ),
appendToDataLabel = cms.string( "" ),
useRealWireGeometry = cms.bool( True ),
useOnlyWiresInME1a = cms.bool( False ),
useGangedStripsInME1a = cms.bool( False ),
useCentreTIOffsets = cms.bool( False ),
applyAlignment = cms.bool( True ),
debugV = cms.untracked.bool( False )
)
process.CSCIndexerESProducer = cms.ESProducer( "CSCIndexerESProducer",
AlgoName = cms.string( "CSCIndexerPostls1" )
)
process.CSCObjectMapESProducer = cms.ESProducer( "CSCObjectMapESProducer",
appendToDataLabel = cms.string( "" )
)
process.CaloGeometryBuilder = cms.ESProducer( "CaloGeometryBuilder",
SelectedCalos = cms.vstring( 'HCAL',
'ZDC',
'EcalBarrel',
'EcalEndcap',
'EcalPreshower',
'TOWER' )
)
process.CaloTopologyBuilder = cms.ESProducer( "CaloTopologyBuilder" )
process.CaloTowerConstituentsMapBuilder = cms.ESProducer( "CaloTowerConstituentsMapBuilder",
MapFile = cms.untracked.string( "Geometry/CaloTopology/data/CaloTowerEEGeometric.map.gz" ),
MapAuto = cms.untracked.bool( False ),
SkipHE = cms.untracked.bool( False ),
appendToDataLabel = cms.string( "" )
)
process.CaloTowerGeometryFromDBEP = cms.ESProducer( "CaloTowerGeometryFromDBEP",
applyAlignment = cms.bool( False )
)
process.CaloTowerTopologyEP = cms.ESProducer( "CaloTowerTopologyEP",
appendToDataLabel = cms.string( "" )
)
process.CastorDbProducer = cms.ESProducer( "CastorDbProducer",
appendToDataLabel = cms.string( "" )
)
process.ClusterShapeHitFilterESProducer = cms.ESProducer( "ClusterShapeHitFilterESProducer",
ComponentName = cms.string( "ClusterShapeHitFilter" ),
PixelShapeFile = cms.string( "RecoPixelVertexing/PixelLowPtUtilities/data/pixelShapePhase1_noL1.par" ),
PixelShapeFileL1 = cms.string( "RecoPixelVertexing/PixelLowPtUtilities/data/pixelShapePhase1_loose.par" ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) )
)
process.DTGeometryESModule = cms.ESProducer( "DTGeometryESModule",
fromDDD = cms.bool( False ),
fromDD4hep = cms.bool( False ),
DDDetector = cms.ESInputTag( "","" ),
alignmentsLabel = cms.string( "" ),
appendToDataLabel = cms.string( "" ),
attribute = cms.string( "MuStructure" ),
value = cms.string( "MuonBarrelDT" ),
applyAlignment = cms.bool( True )
)
process.DTObjectMapESProducer = cms.ESProducer( "DTObjectMapESProducer",
appendToDataLabel = cms.string( "" )
)
process.EcalBarrelGeometryFromDBEP = cms.ESProducer( "EcalBarrelGeometryFromDBEP",
applyAlignment = cms.bool( True )
)
process.EcalElectronicsMappingBuilder = cms.ESProducer( "EcalElectronicsMappingBuilder" )
process.EcalEndcapGeometryFromDBEP = cms.ESProducer( "EcalEndcapGeometryFromDBEP",
applyAlignment = cms.bool( True )
)
process.EcalLaserCorrectionService = cms.ESProducer( "EcalLaserCorrectionService",
maxExtrapolationTimeInSec = cms.uint32( 0 ),
appendToDataLabel = cms.string( "" )
)
process.EcalPreshowerGeometryFromDBEP = cms.ESProducer( "EcalPreshowerGeometryFromDBEP",
applyAlignment = cms.bool( True )
)
process.GEMGeometryESModule = cms.ESProducer( "GEMGeometryESModule",
fromDDD = cms.bool( False ),
fromDD4hep = cms.bool( False ),
applyAlignment = cms.bool( False ),
alignmentsLabel = cms.string( "" ),
appendToDataLabel = cms.string( "" )
)
process.GlobalParameters = cms.ESProducer( "StableParametersTrivialProducer",
TotalBxInEvent = cms.int32( 5 ),
NumberPhysTriggers = cms.uint32( 512 ),
NumberL1Muon = cms.uint32( 8 ),
NumberL1EGamma = cms.uint32( 12 ),
NumberL1Jet = cms.uint32( 12 ),
NumberL1Tau = cms.uint32( 12 ),
NumberChips = cms.uint32( 1 ),
PinsOnChip = cms.uint32( 512 ),
OrderOfChip = cms.vint32( 1 ),
NumberL1IsoEG = cms.uint32( 4 ),
NumberL1JetCounts = cms.uint32( 12 ),
UnitLength = cms.int32( 8 ),
NumberL1ForJet = cms.uint32( 4 ),
IfCaloEtaNumberBits = cms.uint32( 4 ),
IfMuEtaNumberBits = cms.uint32( 6 ),
NumberL1TauJet = cms.uint32( 4 ),
NumberL1Mu = cms.uint32( 4 ),
NumberConditionChips = cms.uint32( 1 ),
NumberPsbBoards = cms.int32( 7 ),
NumberL1CenJet = cms.uint32( 4 ),
PinsOnConditionChip = cms.uint32( 512 ),
NumberL1NoIsoEG = cms.uint32( 4 ),
NumberTechnicalTriggers = cms.uint32( 64 ),
NumberPhysTriggersExtended = cms.uint32( 64 ),
WordLength = cms.int32( 64 ),
OrderConditionChip = cms.vint32( 1 ),
appendToDataLabel = cms.string( "" )
)
process.HcalGeometryFromDBEP = cms.ESProducer( "HcalGeometryFromDBEP",
applyAlignment = cms.bool( False )
)
process.HcalTopologyIdealEP = cms.ESProducer( "HcalTopologyIdealEP",
Exclude = cms.untracked.string( "" ),
MergePosition = cms.untracked.bool( True ),
appendToDataLabel = cms.string( "" )
)
process.MaterialPropagator = cms.ESProducer( "PropagatorWithMaterialESProducer",
SimpleMagneticField = cms.string( "" ),
MaxDPhi = cms.double( 1.6 ),
ComponentName = cms.string( "PropagatorWithMaterial" ),
Mass = cms.double( 0.105 ),
PropagationDirection = cms.string( "alongMomentum" ),
useRungeKutta = cms.bool( False ),
ptMin = cms.double( -1.0 )
)
process.MaterialPropagatorForHI = cms.ESProducer( "PropagatorWithMaterialESProducer",
SimpleMagneticField = cms.string( "ParabolicMf" ),
MaxDPhi = cms.double( 1.6 ),
ComponentName = cms.string( "PropagatorWithMaterialForHI" ),
Mass = cms.double( 0.139 ),
PropagationDirection = cms.string( "alongMomentum" ),
useRungeKutta = cms.bool( False ),
ptMin = cms.double( -1.0 )
)
process.MaterialPropagatorParabolicMF = cms.ESProducer( "PropagatorWithMaterialESProducer",
SimpleMagneticField = cms.string( "ParabolicMf" ),
MaxDPhi = cms.double( 1.6 ),
ComponentName = cms.string( "PropagatorWithMaterialParabolicMf" ),
Mass = cms.double( 0.105 ),
PropagationDirection = cms.string( "alongMomentum" ),
useRungeKutta = cms.bool( False ),
ptMin = cms.double( -1.0 )
)
process.OppositeMaterialPropagator = cms.ESProducer( "PropagatorWithMaterialESProducer",
SimpleMagneticField = cms.string( "" ),
MaxDPhi = cms.double( 1.6 ),
ComponentName = cms.string( "PropagatorWithMaterialOpposite" ),
Mass = cms.double( 0.105 ),
PropagationDirection = cms.string( "oppositeToMomentum" ),
useRungeKutta = cms.bool( False ),
ptMin = cms.double( -1.0 )
)
process.OppositeMaterialPropagatorForHI = cms.ESProducer( "PropagatorWithMaterialESProducer",
SimpleMagneticField = cms.string( "ParabolicMf" ),
MaxDPhi = cms.double( 1.6 ),
ComponentName = cms.string( "PropagatorWithMaterialOppositeForHI" ),
Mass = cms.double( 0.139 ),
PropagationDirection = cms.string( "oppositeToMomentum" ),
useRungeKutta = cms.bool( False ),
ptMin = cms.double( -1.0 )
)
process.OppositeMaterialPropagatorParabolicMF = cms.ESProducer( "PropagatorWithMaterialESProducer",
SimpleMagneticField = cms.string( "ParabolicMf" ),
MaxDPhi = cms.double( 1.6 ),
ComponentName = cms.string( "PropagatorWithMaterialParabolicMfOpposite" ),
Mass = cms.double( 0.105 ),
PropagationDirection = cms.string( "oppositeToMomentum" ),
useRungeKutta = cms.bool( False ),
ptMin = cms.double( -1.0 )
)
process.OppositePropagatorWithMaterialForMixedStep = cms.ESProducer( "PropagatorWithMaterialESProducer",
SimpleMagneticField = cms.string( "ParabolicMf" ),
MaxDPhi = cms.double( 1.6 ),
ComponentName = cms.string( "PropagatorWithMaterialForMixedStepOpposite" ),
Mass = cms.double( 0.105 ),
PropagationDirection = cms.string( "oppositeToMomentum" ),
useRungeKutta = cms.bool( False ),
ptMin = cms.double( 0.1 )
)
process.ParametrizedMagneticFieldProducer = cms.ESProducer( "AutoParametrizedMagneticFieldProducer",
version = cms.string( "Parabolic" ),
label = cms.untracked.string( "ParabolicMf" ),
valueOverride = cms.int32( -1 )
)
process.PixelCPEFastESProducer = cms.ESProducer( "PixelCPEFastESProducer",
LoadTemplatesFromDB = cms.bool( True ),
Alpha2Order = cms.bool( True ),
ClusterProbComputationFlag = cms.int32( 0 ),
useLAWidthFromDB = cms.bool( True ),
lAOffset = cms.double( 0.0 ),
lAWidthBPix = cms.double( 0.0 ),
lAWidthFPix = cms.double( 0.0 ),
doLorentzFromAlignment = cms.bool( False ),
useLAFromDB = cms.bool( True ),
xerr_barrel_l1 = cms.vdouble( 0.00115, 0.0012, 8.8E-4 ),
yerr_barrel_l1 = cms.vdouble( 0.00375, 0.0023, 0.0025, 0.0025, 0.0023, 0.0023, 0.0021, 0.0021, 0.0024 ),
xerr_barrel_ln = cms.vdouble( 0.00115, 0.0012, 8.8E-4 ),
yerr_barrel_ln = cms.vdouble( 0.00375, 0.0023, 0.0025, 0.0025, 0.0023, 0.0023, 0.0021, 0.0021, 0.0024 ),
xerr_endcap = cms.vdouble( 0.002, 0.002 ),
yerr_endcap = cms.vdouble( 0.0021 ),
xerr_barrel_l1_def = cms.double( 0.0103 ),
yerr_barrel_l1_def = cms.double( 0.0021 ),
xerr_barrel_ln_def = cms.double( 0.0103 ),
yerr_barrel_ln_def = cms.double( 0.0021 ),
xerr_endcap_def = cms.double( 0.002 ),
yerr_endcap_def = cms.double( 7.5E-4 ),
isPhase2 = cms.bool( False ),
EdgeClusterErrorX = cms.double( 50.0 ),
EdgeClusterErrorY = cms.double( 85.0 ),
UseErrorsFromTemplates = cms.bool( True ),
TruncatePixelCharge = cms.bool( True ),
ComponentName = cms.string( "PixelCPEFast" ),
MagneticFieldRecord = cms.ESInputTag( "","" ),
appendToDataLabel = cms.string( "" )
)
process.PropagatorWithMaterialForLoopers = cms.ESProducer( "PropagatorWithMaterialESProducer",
SimpleMagneticField = cms.string( "ParabolicMf" ),
MaxDPhi = cms.double( 4.0 ),
ComponentName = cms.string( "PropagatorWithMaterialForLoopers" ),
Mass = cms.double( 0.1396 ),
PropagationDirection = cms.string( "alongMomentum" ),
useRungeKutta = cms.bool( False ),
ptMin = cms.double( -1.0 )
)
process.PropagatorWithMaterialForMixedStep = cms.ESProducer( "PropagatorWithMaterialESProducer",
SimpleMagneticField = cms.string( "ParabolicMf" ),
MaxDPhi = cms.double( 1.6 ),
ComponentName = cms.string( "PropagatorWithMaterialForMixedStep" ),
Mass = cms.double( 0.105 ),
PropagationDirection = cms.string( "alongMomentum" ),
useRungeKutta = cms.bool( False ),
ptMin = cms.double( 0.1 )
)
process.RPCGeometryESModule = cms.ESProducer( "RPCGeometryESModule",
fromDDD = cms.untracked.bool( False ),
fromDD4hep = cms.untracked.bool( False ),
appendToDataLabel = cms.string( "" )
)
process.SiStripClusterizerConditionsESProducer = cms.ESProducer( "SiStripClusterizerConditionsESProducer",
QualityLabel = cms.string( "" ),
Label = cms.string( "" ),
appendToDataLabel = cms.string( "" )
)
process.SiStripGainESProducer = cms.ESProducer( "SiStripGainESProducer",
appendToDataLabel = cms.string( "" ),
printDebug = cms.untracked.bool( False ),
AutomaticNormalization = cms.bool( False ),
APVGain = cms.VPSet(
cms.PSet( NormalizationFactor = cms.untracked.double( 1.0 ),
Label = cms.untracked.string( "" ),
Record = cms.string( "SiStripApvGainRcd" )
),
cms.PSet( NormalizationFactor = cms.untracked.double( 1.0 ),
Label = cms.untracked.string( "" ),
Record = cms.string( "SiStripApvGain2Rcd" )
)
)
)
process.SiStripQualityESProducer = cms.ESProducer( "SiStripQualityESProducer",
appendToDataLabel = cms.string( "" ),
ListOfRecordToMerge = cms.VPSet(
cms.PSet( record = cms.string( "SiStripDetVOffRcd" ),
tag = cms.string( "" )
),
cms.PSet( record = cms.string( "SiStripDetCablingRcd" ),
tag = cms.string( "" )
),
cms.PSet( record = cms.string( "SiStripBadChannelRcd" ),
tag = cms.string( "" )
),
cms.PSet( record = cms.string( "SiStripBadFiberRcd" ),
tag = cms.string( "" )
),
cms.PSet( record = cms.string( "SiStripBadModuleRcd" ),
tag = cms.string( "" )
)
),
ReduceGranularity = cms.bool( False ),
ThresholdForReducedGranularity = cms.double( 0.3 ),
PrintDebugOutput = cms.bool( False ),
UseEmptyRunInfo = cms.bool( False )
)
process.SiStripRecHitMatcherESProducer = cms.ESProducer( "SiStripRecHitMatcherESProducer",
ComponentName = cms.string( "StandardMatcher" ),
NSigmaInside = cms.double( 3.0 ),
PreFilter = cms.bool( False )
)
process.SiStripRegionConnectivity = cms.ESProducer( "SiStripRegionConnectivity",
EtaDivisions = cms.untracked.uint32( 20 ),
PhiDivisions = cms.untracked.uint32( 20 ),
EtaMax = cms.untracked.double( 2.5 )
)
process.SimpleSecondaryVertex3TrkComputer = cms.ESProducer( "SimpleSecondaryVertexESProducer",
use3d = cms.bool( True ),
unBoost = cms.bool( False ),
useSignificance = cms.bool( True ),
minTracks = cms.uint32( 3 ),
minVertices = cms.uint32( 1 )
)
process.SteppingHelixPropagatorAny = cms.ESProducer( "SteppingHelixPropagatorESProducer",
ComponentName = cms.string( "SteppingHelixPropagatorAny" ),
NoErrorPropagation = cms.bool( False ),
PropagationDirection = cms.string( "anyDirection" ),
useTuningForL2Speed = cms.bool( False ),
useIsYokeFlag = cms.bool( True ),
endcapShiftInZNeg = cms.double( 0.0 ),
SetVBFPointer = cms.bool( False ),
AssumeNoMaterial = cms.bool( False ),
endcapShiftInZPos = cms.double( 0.0 ),
useInTeslaFromMagField = cms.bool( False ),
VBFName = cms.string( "VolumeBasedMagneticField" ),
useEndcapShiftsInZ = cms.bool( False ),
sendLogWarning = cms.bool( False ),
useMatVolumes = cms.bool( True ),
debug = cms.bool( False ),
ApplyRadX0Correction = cms.bool( True ),
useMagVolumes = cms.bool( True ),
returnTangentPlane = cms.bool( True )
)
process.TrackerAdditionalParametersPerDetESModule = cms.ESProducer( "TrackerAdditionalParametersPerDetESModule",
appendToDataLabel = cms.string( "" )
)
process.TrackerDigiGeometryESModule = cms.ESProducer( "TrackerDigiGeometryESModule",
appendToDataLabel = cms.string( "" ),
fromDDD = cms.bool( False ),
applyAlignment = cms.bool( True ),
alignmentsLabel = cms.string( "" )
)
process.TrackerGeometricDetESModule = cms.ESProducer( "TrackerGeometricDetESModule",
fromDDD = cms.bool( False ),
fromDD4hep = cms.bool( False ),
appendToDataLabel = cms.string( "" )
)
process.TransientTrackBuilderESProducer = cms.ESProducer( "TransientTrackBuilderESProducer",
ComponentName = cms.string( "TransientTrackBuilder" )
)
process.VolumeBasedMagneticFieldESProducer = cms.ESProducer( "VolumeBasedMagneticFieldESProducerFromDB",
label = cms.untracked.string( "" ),
debugBuilder = cms.untracked.bool( False ),
valueOverride = cms.int32( -1 )
)
process.ZdcGeometryFromDBEP = cms.ESProducer( "ZdcGeometryFromDBEP",
applyAlignment = cms.bool( False )
)
process.caloDetIdAssociator = cms.ESProducer( "DetIdAssociatorESProducer",
ComponentName = cms.string( "CaloDetIdAssociator" ),
etaBinSize = cms.double( 0.087 ),
nEta = cms.int32( 70 ),
nPhi = cms.int32( 72 ),
hcalRegion = cms.int32( 2 ),
includeBadChambers = cms.bool( False ),
includeGEM = cms.bool( False ),
includeME0 = cms.bool( False )
)
process.cosmicsNavigationSchoolESProducer = cms.ESProducer( "NavigationSchoolESProducer",
ComponentName = cms.string( "CosmicNavigationSchool" ),
SimpleMagneticField = cms.string( "" )
)
process.ctppsGeometryESModule = cms.ESProducer( "CTPPSGeometryESModule",
verbosity = cms.untracked.uint32( 1 ),
buildMisalignedGeometry = cms.bool( False ),
isRun2 = cms.bool( False ),
dbTag = cms.string( "" ),
compactViewTag = cms.string( "" ),
fromPreprocessedDB = cms.untracked.bool( True ),
fromDD4hep = cms.untracked.bool( False ),
appendToDataLabel = cms.string( "" )
)
process.ctppsInterpolatedOpticalFunctionsESSource = cms.ESProducer( "CTPPSInterpolatedOpticalFunctionsESSource",
lhcInfoLabel = cms.string( "" ),
opticsLabel = cms.string( "" ),
appendToDataLabel = cms.string( "" )
)
process.ecalDetIdAssociator = cms.ESProducer( "DetIdAssociatorESProducer",
ComponentName = cms.string( "EcalDetIdAssociator" ),
etaBinSize = cms.double( 0.02 ),
nEta = cms.int32( 300 ),
nPhi = cms.int32( 360 ),
hcalRegion = cms.int32( 2 ),
includeBadChambers = cms.bool( False ),
includeGEM = cms.bool( False ),
includeME0 = cms.bool( False )
)
process.ecalSeverityLevel = cms.ESProducer( "EcalSeverityLevelESProducer",
flagMask = cms.PSet(
kBad = cms.vstring( 'kFaultyHardware',
'kDead',
'kKilled' ),
kGood = cms.vstring( 'kGood' ),
kRecovered = cms.vstring( 'kLeadingEdgeRecovered',
'kTowerRecovered' ),
kProblematic = cms.vstring( 'kPoorReco',
'kPoorCalib',
'kNoisy',
'kSaturated' ),
kWeird = cms.vstring( 'kWeird',
'kDiWeird' ),
kTime = cms.vstring( 'kOutOfTime' )
),
dbstatusMask = cms.PSet(
kBad = cms.vstring( 'kNonRespondingIsolated',
'kDeadVFE',
'kDeadFE',
'kNoDataNoTP' ),
kGood = cms.vstring( 'kOk' ),
kRecovered = cms.vstring( ),
kProblematic = cms.vstring( 'kDAC',
'kNoLaser',
'kNoisy',
'kNNoisy',
'kNNNoisy',
'kNNNNoisy',
'kNNNNNoisy',
'kFixedG6',
'kFixedG1',
'kFixedG0' ),
kWeird = cms.vstring( ),
kTime = cms.vstring( )
),
timeThresh = cms.double( 2.0 )
)
process.hcalChannelPropertiesESProd = cms.ESProducer( "HcalChannelPropertiesEP" )
process.hcalDDDRecConstants = cms.ESProducer( "HcalDDDRecConstantsESModule",
appendToDataLabel = cms.string( "" )
)
process.hcalDDDSimConstants = cms.ESProducer( "HcalDDDSimConstantsESModule",
appendToDataLabel = cms.string( "" )
)
process.hcalDetIdAssociator = cms.ESProducer( "DetIdAssociatorESProducer",
ComponentName = cms.string( "HcalDetIdAssociator" ),
etaBinSize = cms.double( 0.087 ),
nEta = cms.int32( 70 ),
nPhi = cms.int32( 72 ),
hcalRegion = cms.int32( 2 ),
includeBadChambers = cms.bool( False ),
includeGEM = cms.bool( False ),
includeME0 = cms.bool( False )
)
process.hcalRecAlgos = cms.ESProducer( "HcalRecAlgoESProducer",
phase = cms.uint32( 1 ),
RecoveredRecHitBits = cms.vstring( ),
SeverityLevels = cms.VPSet(
cms.PSet( ChannelStatus = cms.vstring( ),
RecHitFlags = cms.vstring( 'TimingFromTDC' ),
Level = cms.int32( 0 )
),
cms.PSet( ChannelStatus = cms.vstring( 'HcalCellCaloTowerProb' ),
RecHitFlags = cms.vstring( ),
Level = cms.int32( 1 )
),
cms.PSet( ChannelStatus = cms.vstring( 'HcalCellExcludeFromHBHENoiseSummary' ),
RecHitFlags = cms.vstring( ),
Level = cms.int32( 5 )
),
cms.PSet( ChannelStatus = cms.vstring( ),
RecHitFlags = cms.vstring( 'HBHEHpdHitMultiplicity',
'HBHEIsolatedNoise',
'HBHEFlatNoise',
'HBHESpikeNoise',
'HBHETS4TS5Noise',
'HBHENegativeNoise',
'HBHEPulseFitBit',
'HBHEOOTPU' ),
Level = cms.int32( 8 )
),
cms.PSet( ChannelStatus = cms.vstring( ),
RecHitFlags = cms.vstring( 'HFLongShort',
'HFS8S1Ratio',
'HFPET',
'HFSignalAsymmetry' ),
Level = cms.int32( 11 )
),
cms.PSet( ChannelStatus = cms.vstring( 'HcalCellHot' ),
RecHitFlags = cms.vstring( ),
Level = cms.int32( 15 )
),
cms.PSet( ChannelStatus = cms.vstring( 'HcalCellOff',
'HcalCellDead' ),
RecHitFlags = cms.vstring( ),
Level = cms.int32( 20 )
)
),
DropChannelStatusBits = cms.vstring( 'HcalCellMask',
'HcalCellOff',
'HcalCellDead' ),
appendToDataLabel = cms.string( "" )
)
process.hcal_db_producer = cms.ESProducer( "HcalDbProducer" )
process.hltBoostedDoubleSecondaryVertexAK8Computer = cms.ESProducer( "CandidateBoostedDoubleSecondaryVertexESProducer",
useCondDB = cms.bool( False ),
weightFile = cms.FileInPath( "RecoBTag/SecondaryVertex/data/BoostedDoubleSV_AK8_BDT_v4.weights.xml.gz" ),
useGBRForest = cms.bool( True ),
useAdaBoost = cms.bool( False )
)
process.hltCombinedSecondaryVertex = cms.ESProducer( "CombinedSecondaryVertexESProducer",
trackPseudoSelection = cms.PSet(
maxDistToAxis = cms.double( 0.07 ),
totalHitsMin = cms.uint32( 0 ),
ptMin = cms.double( 0.0 ),
sip2dSigMax = cms.double( 99999.9 ),
sip2dValMax = cms.double( 99999.9 ),
sip3dSigMax = cms.double( 99999.9 ),
sip3dValMax = cms.double( 99999.9 ),
maxDecayLen = cms.double( 5.0 ),
qualityClass = cms.string( "any" ),
jetDeltaRMax = cms.double( 0.3 ),
normChi2Max = cms.double( 99999.9 ),
pixelHitsMin = cms.uint32( 0 ),
sip2dSigMin = cms.double( 2.0 ),
sip2dValMin = cms.double( -99999.9 ),
sip3dSigMin = cms.double( -99999.9 ),
sip3dValMin = cms.double( -99999.9 )
),
trackSelection = cms.PSet(
maxDistToAxis = cms.double( 0.07 ),
totalHitsMin = cms.uint32( 0 ),
ptMin = cms.double( 0.0 ),
sip2dSigMax = cms.double( 99999.9 ),
sip2dValMax = cms.double( 99999.9 ),
sip3dSigMax = cms.double( 99999.9 ),
sip3dValMax = cms.double( 99999.9 ),
maxDecayLen = cms.double( 5.0 ),
qualityClass = cms.string( "any" ),
jetDeltaRMax = cms.double( 0.3 ),
normChi2Max = cms.double( 99999.9 ),
pixelHitsMin = cms.uint32( 0 ),
sip2dSigMin = cms.double( -99999.9 ),
sip2dValMin = cms.double( -99999.9 ),
sip3dSigMin = cms.double( -99999.9 ),
sip3dValMin = cms.double( -99999.9 )
),
trackFlip = cms.bool( False ),
vertexFlip = cms.bool( False ),
SoftLeptonFlip = cms.bool( False ),
useTrackWeights = cms.bool( True ),
pseudoMultiplicityMin = cms.uint32( 2 ),
correctVertexMass = cms.bool( True ),
trackPairV0Filter = cms.PSet( k0sMassWindow = cms.double( 0.03 ) ),
charmCut = cms.double( 1.5 ),
minimumTrackWeight = cms.double( 0.5 ),
pseudoVertexV0Filter = cms.PSet( k0sMassWindow = cms.double( 0.05 ) ),
trackMultiplicityMin = cms.uint32( 3 ),
trackSort = cms.string( "sip2dSig" ),
useCategories = cms.bool( True ),
calibrationRecords = cms.vstring( 'CombinedSVRecoVertex',
'CombinedSVPseudoVertex',
'CombinedSVNoVertex' ),
recordLabel = cms.string( "HLT" ),
categoryVariableName = cms.string( "vertexCategory" )
)
process.hltCombinedSecondaryVertexV2 = cms.ESProducer( "CombinedSecondaryVertexESProducer",
trackPseudoSelection = cms.PSet(
max_pT_dRcut = cms.double( 0.1 ),
b_dR = cms.double( 0.6263 ),
min_pT = cms.double( 120.0 ),
b_pT = cms.double( 0.3684 ),
ptMin = cms.double( 0.0 ),
max_pT_trackPTcut = cms.double( 3.0 ),
max_pT = cms.double( 500.0 ),
useVariableJTA = cms.bool( False ),
maxDecayLen = cms.double( 5.0 ),
qualityClass = cms.string( "any" ),
normChi2Max = cms.double( 99999.9 ),
sip2dValMin = cms.double( -99999.9 ),
sip3dValMin = cms.double( -99999.9 ),
a_dR = cms.double( -0.001053 ),
maxDistToAxis = cms.double( 0.07 ),
totalHitsMin = cms.uint32( 0 ),
a_pT = cms.double( 0.005263 ),
sip2dSigMax = cms.double( 99999.9 ),
sip2dValMax = cms.double( 99999.9 ),
sip3dSigMax = cms.double( 99999.9 ),
sip3dValMax = cms.double( 99999.9 ),
min_pT_dRcut = cms.double( 0.5 ),
jetDeltaRMax = cms.double( 0.3 ),
pixelHitsMin = cms.uint32( 0 ),
sip3dSigMin = cms.double( -99999.9 ),
sip2dSigMin = cms.double( 2.0 )
),
trackSelection = cms.PSet(
max_pT_dRcut = cms.double( 0.1 ),
b_dR = cms.double( 0.6263 ),
min_pT = cms.double( 120.0 ),
b_pT = cms.double( 0.3684 ),
ptMin = cms.double( 0.0 ),
max_pT_trackPTcut = cms.double( 3.0 ),
max_pT = cms.double( 500.0 ),
useVariableJTA = cms.bool( False ),
maxDecayLen = cms.double( 5.0 ),
qualityClass = cms.string( "any" ),
normChi2Max = cms.double( 99999.9 ),
sip2dValMin = cms.double( -99999.9 ),
sip3dValMin = cms.double( -99999.9 ),
a_dR = cms.double( -0.001053 ),
maxDistToAxis = cms.double( 0.07 ),
totalHitsMin = cms.uint32( 0 ),
a_pT = cms.double( 0.005263 ),
sip2dSigMax = cms.double( 99999.9 ),
sip2dValMax = cms.double( 99999.9 ),
sip3dSigMax = cms.double( 99999.9 ),
sip3dValMax = cms.double( 99999.9 ),
min_pT_dRcut = cms.double( 0.5 ),
jetDeltaRMax = cms.double( 0.3 ),
pixelHitsMin = cms.uint32( 0 ),
sip3dSigMin = cms.double( -99999.9 ),
sip2dSigMin = cms.double( -99999.9 )
),
trackFlip = cms.bool( False ),
vertexFlip = cms.bool( False ),
SoftLeptonFlip = cms.bool( False ),
useTrackWeights = cms.bool( True ),
pseudoMultiplicityMin = cms.uint32( 2 ),
correctVertexMass = cms.bool( True ),
trackPairV0Filter = cms.PSet( k0sMassWindow = cms.double( 0.03 ) ),
charmCut = cms.double( 1.5 ),
minimumTrackWeight = cms.double( 0.5 ),
pseudoVertexV0Filter = cms.PSet( k0sMassWindow = cms.double( 0.05 ) ),
trackMultiplicityMin = cms.uint32( 3 ),
trackSort = cms.string( "sip2dSig" ),
useCategories = cms.bool( True ),
calibrationRecords = cms.vstring( 'CombinedSVIVFV2RecoVertex',
'CombinedSVIVFV2PseudoVertex',
'CombinedSVIVFV2NoVertex' ),
recordLabel = cms.string( "HLT" ),
categoryVariableName = cms.string( "vertexCategory" )
)
process.hltDisplacedDijethltESPPromptTrackCountingESProducer = cms.ESProducer( "PromptTrackCountingESProducer",
impactParameterType = cms.int32( 1 ),
minimumImpactParameter = cms.double( -1.0 ),
useSignedImpactParameterSig = cms.bool( True ),
maximumDistanceToJetAxis = cms.double( 999999.0 ),
deltaR = cms.double( -1.0 ),
deltaRmin = cms.double( 0.0 ),
maximumDecayLength = cms.double( 999999.0 ),
maxImpactParameter = cms.double( 0.1 ),
maxImpactParameterSig = cms.double( 999999.0 ),
trackQualityClass = cms.string( "any" ),
nthTrack = cms.int32( -1 )
)
process.hltDisplacedDijethltESPTrackCounting2D1st = cms.ESProducer( "TrackCountingESProducer",
a_dR = cms.double( -0.001053 ),
b_dR = cms.double( 0.6263 ),
a_pT = cms.double( 0.005263 ),
b_pT = cms.double( 0.3684 ),
min_pT = cms.double( 120.0 ),
max_pT = cms.double( 500.0 ),
min_pT_dRcut = cms.double( 0.5 ),
max_pT_dRcut = cms.double( 0.1 ),
max_pT_trackPTcut = cms.double( 3.0 ),
minimumImpactParameter = cms.double( 0.05 ),
useSignedImpactParameterSig = cms.bool( False ),
impactParameterType = cms.int32( 1 ),
maximumDistanceToJetAxis = cms.double( 9999999.0 ),
deltaR = cms.double( -1.0 ),
maximumDecayLength = cms.double( 999999.0 ),
nthTrack = cms.int32( 1 ),
trackQualityClass = cms.string( "any" ),
useVariableJTA = cms.bool( False )
)
process.hltESPAnalyticalPropagator = cms.ESProducer( "AnalyticalPropagatorESProducer",
MaxDPhi = cms.double( 1.6 ),
ComponentName = cms.string( "hltESPAnalyticalPropagator" ),
PropagationDirection = cms.string( "alongMomentum" )
)
process.hltESPBwdAnalyticalPropagator = cms.ESProducer( "AnalyticalPropagatorESProducer",
MaxDPhi = cms.double( 1.6 ),
ComponentName = cms.string( "hltESPBwdAnalyticalPropagator" ),
PropagationDirection = cms.string( "oppositeToMomentum" )
)
process.hltESPBwdElectronPropagator = cms.ESProducer( "PropagatorWithMaterialESProducer",
SimpleMagneticField = cms.string( "" ),
MaxDPhi = cms.double( 1.6 ),
ComponentName = cms.string( "hltESPBwdElectronPropagator" ),
Mass = cms.double( 5.11E-4 ),
PropagationDirection = cms.string( "oppositeToMomentum" ),
useRungeKutta = cms.bool( False ),
ptMin = cms.double( -1.0 )
)
process.hltESPChi2ChargeLooseMeasurementEstimator16 = cms.ESProducer( "Chi2ChargeMeasurementEstimatorESProducer",
MaxChi2 = cms.double( 16.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 0.5 ),
MaxSagitta = cms.double( 2.0 ),
MinimalTolerance = cms.double( 0.5 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPChi2ChargeLooseMeasurementEstimator16" ),
pTChargeCutThreshold = cms.double( -1.0 ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
appendToDataLabel = cms.string( "" )
)
process.hltESPChi2ChargeMeasurementEstimator16 = cms.ESProducer( "Chi2ChargeMeasurementEstimatorESProducer",
MaxChi2 = cms.double( 16.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 0.5 ),
MaxSagitta = cms.double( 2.0 ),
MinimalTolerance = cms.double( 0.5 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPChi2ChargeMeasurementEstimator16" ),
pTChargeCutThreshold = cms.double( -1.0 ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
appendToDataLabel = cms.string( "" )
)
process.hltESPChi2ChargeMeasurementEstimator2000 = cms.ESProducer( "Chi2ChargeMeasurementEstimatorESProducer",
MaxChi2 = cms.double( 2000.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 100.0 ),
MaxSagitta = cms.double( -1.0 ),
MinimalTolerance = cms.double( 10.0 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPChi2ChargeMeasurementEstimator2000" ),
pTChargeCutThreshold = cms.double( -1.0 ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
appendToDataLabel = cms.string( "" )
)
process.hltESPChi2ChargeMeasurementEstimator30 = cms.ESProducer( "Chi2ChargeMeasurementEstimatorESProducer",
MaxChi2 = cms.double( 30.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 100.0 ),
MaxSagitta = cms.double( -1.0 ),
MinimalTolerance = cms.double( 10.0 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPChi2ChargeMeasurementEstimator30" ),
pTChargeCutThreshold = cms.double( -1.0 ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
appendToDataLabel = cms.string( "" )
)
process.hltESPChi2ChargeMeasurementEstimator9 = cms.ESProducer( "Chi2ChargeMeasurementEstimatorESProducer",
MaxChi2 = cms.double( 9.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 0.5 ),
MaxSagitta = cms.double( 2.0 ),
MinimalTolerance = cms.double( 0.5 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPChi2ChargeMeasurementEstimator9" ),
pTChargeCutThreshold = cms.double( 15.0 ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
appendToDataLabel = cms.string( "" )
)
process.hltESPChi2ChargeMeasurementEstimator9ForHI = cms.ESProducer( "Chi2ChargeMeasurementEstimatorESProducer",
MaxChi2 = cms.double( 9.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 100.0 ),
MaxSagitta = cms.double( -1.0 ),
MinimalTolerance = cms.double( 10.0 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPChi2ChargeMeasurementEstimator9ForHI" ),
pTChargeCutThreshold = cms.double( 15.0 ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutForHI" ) ),
appendToDataLabel = cms.string( "" )
)
process.hltESPChi2ChargeTightMeasurementEstimator16 = cms.ESProducer( "Chi2ChargeMeasurementEstimatorESProducer",
MaxChi2 = cms.double( 16.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 0.5 ),
MaxSagitta = cms.double( 2.0 ),
MinimalTolerance = cms.double( 0.5 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPChi2ChargeTightMeasurementEstimator16" ),
pTChargeCutThreshold = cms.double( -1.0 ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutTight" ) ),
appendToDataLabel = cms.string( "" )
)
process.hltESPChi2MeasurementEstimator100 = cms.ESProducer( "Chi2MeasurementEstimatorESProducer",
MaxChi2 = cms.double( 40.0 ),
nSigma = cms.double( 4.0 ),
MaxDisplacement = cms.double( 0.5 ),
MaxSagitta = cms.double( 2.0 ),
MinimalTolerance = cms.double( 0.5 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1.0E12 ),
ComponentName = cms.string( "hltESPChi2MeasurementEstimator100" ),
appendToDataLabel = cms.string( "" )
)
process.hltESPChi2MeasurementEstimator16 = cms.ESProducer( "Chi2MeasurementEstimatorESProducer",
MaxChi2 = cms.double( 16.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 100.0 ),
MaxSagitta = cms.double( -1.0 ),
MinimalTolerance = cms.double( 10.0 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPChi2MeasurementEstimator16" ),
appendToDataLabel = cms.string( "" )
)
process.hltESPChi2MeasurementEstimator30 = cms.ESProducer( "Chi2MeasurementEstimatorESProducer",
MaxChi2 = cms.double( 30.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 100.0 ),
MaxSagitta = cms.double( -1.0 ),
MinimalTolerance = cms.double( 10.0 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPChi2MeasurementEstimator30" ),
appendToDataLabel = cms.string( "" )
)
process.hltESPChi2MeasurementEstimator9 = cms.ESProducer( "Chi2MeasurementEstimatorESProducer",
MaxChi2 = cms.double( 9.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 100.0 ),
MaxSagitta = cms.double( -1.0 ),
MinimalTolerance = cms.double( 10.0 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPChi2MeasurementEstimator9" ),
appendToDataLabel = cms.string( "" )
)
process.hltESPCloseComponentsMerger5D = cms.ESProducer( "CloseComponentsMergerESProducer5D",
ComponentName = cms.string( "hltESPCloseComponentsMerger5D" ),
MaxComponents = cms.int32( 12 ),
DistanceMeasure = cms.string( "hltESPKullbackLeiblerDistance5D" )
)
process.hltESPDetachedQuadStepChi2ChargeMeasurementEstimator9 = cms.ESProducer( "Chi2ChargeMeasurementEstimatorESProducer",
MaxChi2 = cms.double( 9.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 0.5 ),
MaxSagitta = cms.double( 2.0 ),
MinimalTolerance = cms.double( 0.5 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPDetachedQuadStepChi2ChargeMeasurementEstimator9" ),
pTChargeCutThreshold = cms.double( -1.0 ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutTight" ) ),
appendToDataLabel = cms.string( "" )
)
process.hltESPDetachedQuadStepTrajectoryCleanerBySharedHits = cms.ESProducer( "TrajectoryCleanerESProducer",
ComponentName = cms.string( "hltESPDetachedQuadStepTrajectoryCleanerBySharedHits" ),
ComponentType = cms.string( "TrajectoryCleanerBySharedHits" ),
fractionShared = cms.double( 0.13 ),
ValidHitBonus = cms.double( 5.0 ),
MissingHitPenalty = cms.double( 20.0 ),
allowSharedFirstHit = cms.bool( True )
)
process.hltESPDetachedStepTrajectoryCleanerBySharedHits = cms.ESProducer( "TrajectoryCleanerESProducer",
ComponentName = cms.string( "hltESPDetachedStepTrajectoryCleanerBySharedHits" ),
ComponentType = cms.string( "TrajectoryCleanerBySharedHits" ),
fractionShared = cms.double( 0.13 ),
ValidHitBonus = cms.double( 5.0 ),
MissingHitPenalty = cms.double( 20.0 ),
allowSharedFirstHit = cms.bool( True )
)
process.hltESPDetachedTripletStepChi2ChargeMeasurementEstimator9 = cms.ESProducer( "Chi2ChargeMeasurementEstimatorESProducer",
MaxChi2 = cms.double( 9.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 0.5 ),
MaxSagitta = cms.double( 2.0 ),
MinimalTolerance = cms.double( 0.5 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPDetachedTripletStepChi2ChargeMeasurementEstimator9" ),
pTChargeCutThreshold = cms.double( -1.0 ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutTight" ) ),
appendToDataLabel = cms.string( "" )
)
process.hltESPDetachedTripletStepTrajectoryCleanerBySharedHits = cms.ESProducer( "TrajectoryCleanerESProducer",
ComponentName = cms.string( "hltESPDetachedTripletStepTrajectoryCleanerBySharedHits" ),
ComponentType = cms.string( "TrajectoryCleanerBySharedHits" ),
fractionShared = cms.double( 0.13 ),
ValidHitBonus = cms.double( 5.0 ),
MissingHitPenalty = cms.double( 20.0 ),
allowSharedFirstHit = cms.bool( True )
)
process.hltESPDisplacedDijethltPromptTrackCountingESProducer = cms.ESProducer( "PromptTrackCountingESProducer",
impactParameterType = cms.int32( 1 ),
minimumImpactParameter = cms.double( -1.0 ),
useSignedImpactParameterSig = cms.bool( True ),
maximumDistanceToJetAxis = cms.double( 999999.0 ),
deltaR = cms.double( -1.0 ),
deltaRmin = cms.double( 0.0 ),
maximumDecayLength = cms.double( 999999.0 ),
maxImpactParameter = cms.double( 0.1 ),
maxImpactParameterSig = cms.double( 999999.0 ),
trackQualityClass = cms.string( "any" ),
nthTrack = cms.int32( -1 )
)
process.hltESPDisplacedDijethltPromptTrackCountingESProducerLong = cms.ESProducer( "PromptTrackCountingESProducer",
impactParameterType = cms.int32( 1 ),
minimumImpactParameter = cms.double( -1.0 ),
useSignedImpactParameterSig = cms.bool( True ),
maximumDistanceToJetAxis = cms.double( 999999.0 ),
deltaR = cms.double( -1.0 ),
deltaRmin = cms.double( 0.0 ),
maximumDecayLength = cms.double( 999999.0 ),
maxImpactParameter = cms.double( 0.2 ),
maxImpactParameterSig = cms.double( 999999.0 ),
trackQualityClass = cms.string( "any" ),
nthTrack = cms.int32( -1 )
)
process.hltESPDisplacedDijethltTrackCounting2D1st = cms.ESProducer( "TrackCountingESProducer",
a_dR = cms.double( -0.001053 ),
b_dR = cms.double( 0.6263 ),
a_pT = cms.double( 0.005263 ),
b_pT = cms.double( 0.3684 ),
min_pT = cms.double( 120.0 ),
max_pT = cms.double( 500.0 ),
min_pT_dRcut = cms.double( 0.5 ),
max_pT_dRcut = cms.double( 0.1 ),
max_pT_trackPTcut = cms.double( 3.0 ),
minimumImpactParameter = cms.double( 0.05 ),
useSignedImpactParameterSig = cms.bool( False ),
impactParameterType = cms.int32( 1 ),
maximumDistanceToJetAxis = cms.double( 9999999.0 ),
deltaR = cms.double( -1.0 ),
maximumDecayLength = cms.double( 999999.0 ),
nthTrack = cms.int32( 1 ),
trackQualityClass = cms.string( "any" ),
useVariableJTA = cms.bool( False )
)
process.hltESPDisplacedDijethltTrackCounting2D2ndLong = cms.ESProducer( "TrackCountingESProducer",
a_dR = cms.double( -0.001053 ),
b_dR = cms.double( 0.6263 ),
a_pT = cms.double( 0.005263 ),
b_pT = cms.double( 0.3684 ),
min_pT = cms.double( 120.0 ),
max_pT = cms.double( 500.0 ),
min_pT_dRcut = cms.double( 0.5 ),
max_pT_dRcut = cms.double( 0.1 ),
max_pT_trackPTcut = cms.double( 3.0 ),
minimumImpactParameter = cms.double( 0.2 ),
useSignedImpactParameterSig = cms.bool( True ),
impactParameterType = cms.int32( 1 ),
maximumDistanceToJetAxis = cms.double( 9999999.0 ),
deltaR = cms.double( -1.0 ),
maximumDecayLength = cms.double( 999999.0 ),
nthTrack = cms.int32( 2 ),
trackQualityClass = cms.string( "any" ),
useVariableJTA = cms.bool( False )
)
process.hltESPDummyDetLayerGeometry = cms.ESProducer( "DetLayerGeometryESProducer",
ComponentName = cms.string( "hltESPDummyDetLayerGeometry" )
)
process.hltESPEcalTrigTowerConstituentsMapBuilder = cms.ESProducer( "EcalTrigTowerConstituentsMapBuilder",
MapFile = cms.untracked.string( "Geometry/EcalMapping/data/EndCap_TTMap.txt" )
)
process.hltESPElectronMaterialEffects = cms.ESProducer( "GsfMaterialEffectsESProducer",
BetheHeitlerParametrization = cms.string( "BetheHeitler_cdfmom_nC6_O5.par" ),
EnergyLossUpdator = cms.string( "GsfBetheHeitlerUpdator" ),
ComponentName = cms.string( "hltESPElectronMaterialEffects" ),
MultipleScatteringUpdator = cms.string( "MultipleScatteringUpdator" ),
Mass = cms.double( 5.11E-4 ),
BetheHeitlerCorrection = cms.int32( 2 )
)
process.hltESPFastSteppingHelixPropagatorAny = cms.ESProducer( "SteppingHelixPropagatorESProducer",
ComponentName = cms.string( "hltESPFastSteppingHelixPropagatorAny" ),
NoErrorPropagation = cms.bool( False ),
PropagationDirection = cms.string( "anyDirection" ),
useTuningForL2Speed = cms.bool( True ),
useIsYokeFlag = cms.bool( True ),
endcapShiftInZNeg = cms.double( 0.0 ),
SetVBFPointer = cms.bool( False ),
AssumeNoMaterial = cms.bool( False ),
endcapShiftInZPos = cms.double( 0.0 ),
useInTeslaFromMagField = cms.bool( False ),
VBFName = cms.string( "VolumeBasedMagneticField" ),
useEndcapShiftsInZ = cms.bool( False ),
sendLogWarning = cms.bool( False ),
useMatVolumes = cms.bool( True ),
debug = cms.bool( False ),
ApplyRadX0Correction = cms.bool( True ),
useMagVolumes = cms.bool( True ),
returnTangentPlane = cms.bool( True )
)
process.hltESPFastSteppingHelixPropagatorOpposite = cms.ESProducer( "SteppingHelixPropagatorESProducer",
ComponentName = cms.string( "hltESPFastSteppingHelixPropagatorOpposite" ),
NoErrorPropagation = cms.bool( False ),
PropagationDirection = cms.string( "oppositeToMomentum" ),
useTuningForL2Speed = cms.bool( True ),
useIsYokeFlag = cms.bool( True ),
endcapShiftInZNeg = cms.double( 0.0 ),
SetVBFPointer = cms.bool( False ),
AssumeNoMaterial = cms.bool( False ),
endcapShiftInZPos = cms.double( 0.0 ),
useInTeslaFromMagField = cms.bool( False ),
VBFName = cms.string( "VolumeBasedMagneticField" ),
useEndcapShiftsInZ = cms.bool( False ),
sendLogWarning = cms.bool( False ),
useMatVolumes = cms.bool( True ),
debug = cms.bool( False ),
ApplyRadX0Correction = cms.bool( True ),
useMagVolumes = cms.bool( True ),
returnTangentPlane = cms.bool( True )
)
process.hltESPFittingSmootherIT = cms.ESProducer( "KFFittingSmootherESProducer",
ComponentName = cms.string( "hltESPFittingSmootherIT" ),
Fitter = cms.string( "hltESPTrajectoryFitterRK" ),
Smoother = cms.string( "hltESPTrajectorySmootherRK" ),
EstimateCut = cms.double( -1.0 ),
MaxFractionOutliers = cms.double( 0.3 ),
MaxNumberOfOutliers = cms.int32( 3 ),
MinDof = cms.int32( 2 ),
NoOutliersBeginEnd = cms.bool( False ),
MinNumberOfHits = cms.int32( 3 ),
MinNumberOfHitsHighEta = cms.int32( 5 ),
HighEtaSwitch = cms.double( 5.0 ),
RejectTracks = cms.bool( True ),
BreakTrajWith2ConsecutiveMissing = cms.bool( True ),
NoInvalidHitsBeginEnd = cms.bool( True ),
LogPixelProbabilityCut = cms.double( -16.0 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPFittingSmootherRK = cms.ESProducer( "KFFittingSmootherESProducer",
ComponentName = cms.string( "hltESPFittingSmootherRK" ),
Fitter = cms.string( "hltESPTrajectoryFitterRK" ),
Smoother = cms.string( "hltESPTrajectorySmootherRK" ),
EstimateCut = cms.double( -1.0 ),
MaxFractionOutliers = cms.double( 0.3 ),
MaxNumberOfOutliers = cms.int32( 3 ),
MinDof = cms.int32( 2 ),
NoOutliersBeginEnd = cms.bool( False ),
MinNumberOfHits = cms.int32( 5 ),
MinNumberOfHitsHighEta = cms.int32( 5 ),
HighEtaSwitch = cms.double( 5.0 ),
RejectTracks = cms.bool( True ),
BreakTrajWith2ConsecutiveMissing = cms.bool( False ),
NoInvalidHitsBeginEnd = cms.bool( False ),
LogPixelProbabilityCut = cms.double( -16.0 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPFlexibleKFFittingSmoother = cms.ESProducer( "FlexibleKFFittingSmootherESProducer",
ComponentName = cms.string( "hltESPFlexibleKFFittingSmoother" ),
standardFitter = cms.string( "hltESPKFFittingSmootherWithOutliersRejectionAndRK" ),
looperFitter = cms.string( "hltESPKFFittingSmootherForLoopers" ),
appendToDataLabel = cms.string( "" )
)
process.hltESPFwdElectronPropagator = cms.ESProducer( "PropagatorWithMaterialESProducer",
SimpleMagneticField = cms.string( "" ),
MaxDPhi = cms.double( 1.6 ),
ComponentName = cms.string( "hltESPFwdElectronPropagator" ),
Mass = cms.double( 5.11E-4 ),
PropagationDirection = cms.string( "alongMomentum" ),
useRungeKutta = cms.bool( False ),
ptMin = cms.double( -1.0 )
)
process.hltESPGlobalDetLayerGeometry = cms.ESProducer( "GlobalDetLayerGeometryESProducer",
ComponentName = cms.string( "hltESPGlobalDetLayerGeometry" )
)
process.hltESPGlobalTrackingGeometryESProducer = cms.ESProducer( "GlobalTrackingGeometryESProducer" )
process.hltESPGsfElectronFittingSmoother = cms.ESProducer( "KFFittingSmootherESProducer",
ComponentName = cms.string( "hltESPGsfElectronFittingSmoother" ),
Fitter = cms.string( "hltESPGsfTrajectoryFitter" ),
Smoother = cms.string( "hltESPGsfTrajectorySmoother" ),
EstimateCut = cms.double( -1.0 ),
MaxFractionOutliers = cms.double( 0.3 ),
MaxNumberOfOutliers = cms.int32( 3 ),
MinDof = cms.int32( 2 ),
NoOutliersBeginEnd = cms.bool( False ),
MinNumberOfHits = cms.int32( 5 ),
MinNumberOfHitsHighEta = cms.int32( 5 ),
HighEtaSwitch = cms.double( 5.0 ),
RejectTracks = cms.bool( True ),
BreakTrajWith2ConsecutiveMissing = cms.bool( True ),
NoInvalidHitsBeginEnd = cms.bool( True ),
LogPixelProbabilityCut = cms.double( -16.0 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPGsfTrajectoryFitter = cms.ESProducer( "GsfTrajectoryFitterESProducer",
Merger = cms.string( "hltESPCloseComponentsMerger5D" ),
ComponentName = cms.string( "hltESPGsfTrajectoryFitter" ),
MaterialEffectsUpdator = cms.string( "hltESPElectronMaterialEffects" ),
GeometricalPropagator = cms.string( "hltESPAnalyticalPropagator" ),
RecoGeometry = cms.string( "hltESPGlobalDetLayerGeometry" )
)
process.hltESPGsfTrajectorySmoother = cms.ESProducer( "GsfTrajectorySmootherESProducer",
Merger = cms.string( "hltESPCloseComponentsMerger5D" ),
ComponentName = cms.string( "hltESPGsfTrajectorySmoother" ),
MaterialEffectsUpdator = cms.string( "hltESPElectronMaterialEffects" ),
ErrorRescaling = cms.double( 100.0 ),
GeometricalPropagator = cms.string( "hltESPBwdAnalyticalPropagator" ),
RecoGeometry = cms.string( "hltESPGlobalDetLayerGeometry" )
)
process.hltESPHighPtTripletStepChi2ChargeMeasurementEstimator30 = cms.ESProducer( "Chi2ChargeMeasurementEstimatorESProducer",
MaxChi2 = cms.double( 30.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 0.5 ),
MaxSagitta = cms.double( 2.0 ),
MinimalTolerance = cms.double( 0.5 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPHighPtTripletStepChi2ChargeMeasurementEstimator30" ),
pTChargeCutThreshold = cms.double( 15.0 ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
appendToDataLabel = cms.string( "" )
)
process.hltESPInitialStepChi2ChargeMeasurementEstimator30 = cms.ESProducer( "Chi2ChargeMeasurementEstimatorESProducer",
MaxChi2 = cms.double( 30.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 0.5 ),
MaxSagitta = cms.double( 2.0 ),
MinimalTolerance = cms.double( 0.5 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPInitialStepChi2ChargeMeasurementEstimator30" ),
pTChargeCutThreshold = cms.double( 15.0 ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
appendToDataLabel = cms.string( "" )
)
process.hltESPInitialStepChi2MeasurementEstimator36 = cms.ESProducer( "Chi2MeasurementEstimatorESProducer",
MaxChi2 = cms.double( 36.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 100.0 ),
MaxSagitta = cms.double( -1.0 ),
MinimalTolerance = cms.double( 10.0 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPInitialStepChi2MeasurementEstimator36" ),
appendToDataLabel = cms.string( "" )
)
process.hltESPKFFittingSmoother = cms.ESProducer( "KFFittingSmootherESProducer",
ComponentName = cms.string( "hltESPKFFittingSmoother" ),
Fitter = cms.string( "hltESPKFTrajectoryFitter" ),
Smoother = cms.string( "hltESPKFTrajectorySmoother" ),
EstimateCut = cms.double( -1.0 ),
MaxFractionOutliers = cms.double( 0.3 ),
MaxNumberOfOutliers = cms.int32( 3 ),
MinDof = cms.int32( 2 ),
NoOutliersBeginEnd = cms.bool( False ),
MinNumberOfHits = cms.int32( 5 ),
MinNumberOfHitsHighEta = cms.int32( 5 ),
HighEtaSwitch = cms.double( 5.0 ),
RejectTracks = cms.bool( True ),
BreakTrajWith2ConsecutiveMissing = cms.bool( False ),
NoInvalidHitsBeginEnd = cms.bool( False ),
LogPixelProbabilityCut = cms.double( -16.0 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPKFFittingSmootherForL2Muon = cms.ESProducer( "KFFittingSmootherESProducer",
ComponentName = cms.string( "hltESPKFFittingSmootherForL2Muon" ),
Fitter = cms.string( "hltESPKFTrajectoryFitterForL2Muon" ),
Smoother = cms.string( "hltESPKFTrajectorySmootherForL2Muon" ),
EstimateCut = cms.double( -1.0 ),
MaxFractionOutliers = cms.double( 0.3 ),
MaxNumberOfOutliers = cms.int32( 3 ),
MinDof = cms.int32( 2 ),
NoOutliersBeginEnd = cms.bool( False ),
MinNumberOfHits = cms.int32( 5 ),
MinNumberOfHitsHighEta = cms.int32( 5 ),
HighEtaSwitch = cms.double( 5.0 ),
RejectTracks = cms.bool( True ),
BreakTrajWith2ConsecutiveMissing = cms.bool( False ),
NoInvalidHitsBeginEnd = cms.bool( False ),
LogPixelProbabilityCut = cms.double( -16.0 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPKFFittingSmootherForLoopers = cms.ESProducer( "KFFittingSmootherESProducer",
ComponentName = cms.string( "hltESPKFFittingSmootherForLoopers" ),
Fitter = cms.string( "hltESPKFTrajectoryFitterForLoopers" ),
Smoother = cms.string( "hltESPKFTrajectorySmootherForLoopers" ),
EstimateCut = cms.double( 20.0 ),
MaxFractionOutliers = cms.double( 0.3 ),
MaxNumberOfOutliers = cms.int32( 3 ),
MinDof = cms.int32( 2 ),
NoOutliersBeginEnd = cms.bool( False ),
MinNumberOfHits = cms.int32( 3 ),
MinNumberOfHitsHighEta = cms.int32( 5 ),
HighEtaSwitch = cms.double( 5.0 ),
RejectTracks = cms.bool( True ),
BreakTrajWith2ConsecutiveMissing = cms.bool( True ),
NoInvalidHitsBeginEnd = cms.bool( True ),
LogPixelProbabilityCut = cms.double( -14.0 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPKFFittingSmootherWithOutliersRejectionAndRK = cms.ESProducer( "KFFittingSmootherESProducer",
ComponentName = cms.string( "hltESPKFFittingSmootherWithOutliersRejectionAndRK" ),
Fitter = cms.string( "hltESPRKTrajectoryFitter" ),
Smoother = cms.string( "hltESPRKTrajectorySmoother" ),
EstimateCut = cms.double( 20.0 ),
MaxFractionOutliers = cms.double( 0.3 ),
MaxNumberOfOutliers = cms.int32( 3 ),
MinDof = cms.int32( 2 ),
NoOutliersBeginEnd = cms.bool( False ),
MinNumberOfHits = cms.int32( 3 ),
MinNumberOfHitsHighEta = cms.int32( 5 ),
HighEtaSwitch = cms.double( 5.0 ),
RejectTracks = cms.bool( True ),
BreakTrajWith2ConsecutiveMissing = cms.bool( True ),
NoInvalidHitsBeginEnd = cms.bool( True ),
LogPixelProbabilityCut = cms.double( -14.0 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPKFTrajectoryFitter = cms.ESProducer( "KFTrajectoryFitterESProducer",
ComponentName = cms.string( "hltESPKFTrajectoryFitter" ),
Propagator = cms.string( "PropagatorWithMaterialParabolicMf" ),
Updator = cms.string( "hltESPKFUpdator" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
RecoGeometry = cms.string( "hltESPDummyDetLayerGeometry" ),
minHits = cms.int32( 3 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPKFTrajectoryFitterForL2Muon = cms.ESProducer( "KFTrajectoryFitterESProducer",
ComponentName = cms.string( "hltESPKFTrajectoryFitterForL2Muon" ),
Propagator = cms.string( "hltESPFastSteppingHelixPropagatorAny" ),
Updator = cms.string( "hltESPKFUpdator" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
RecoGeometry = cms.string( "hltESPDummyDetLayerGeometry" ),
minHits = cms.int32( 3 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPKFTrajectoryFitterForLoopers = cms.ESProducer( "KFTrajectoryFitterESProducer",
ComponentName = cms.string( "hltESPKFTrajectoryFitterForLoopers" ),
Propagator = cms.string( "PropagatorWithMaterialForLoopers" ),
Updator = cms.string( "hltESPKFUpdator" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
RecoGeometry = cms.string( "hltESPGlobalDetLayerGeometry" ),
minHits = cms.int32( 3 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPKFTrajectorySmoother = cms.ESProducer( "KFTrajectorySmootherESProducer",
ComponentName = cms.string( "hltESPKFTrajectorySmoother" ),
Propagator = cms.string( "PropagatorWithMaterialParabolicMf" ),
Updator = cms.string( "hltESPKFUpdator" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
RecoGeometry = cms.string( "hltESPDummyDetLayerGeometry" ),
errorRescaling = cms.double( 100.0 ),
minHits = cms.int32( 3 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPKFTrajectorySmootherForL2Muon = cms.ESProducer( "KFTrajectorySmootherESProducer",
ComponentName = cms.string( "hltESPKFTrajectorySmootherForL2Muon" ),
Propagator = cms.string( "hltESPFastSteppingHelixPropagatorOpposite" ),
Updator = cms.string( "hltESPKFUpdator" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
RecoGeometry = cms.string( "hltESPDummyDetLayerGeometry" ),
errorRescaling = cms.double( 100.0 ),
minHits = cms.int32( 3 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPKFTrajectorySmootherForLoopers = cms.ESProducer( "KFTrajectorySmootherESProducer",
ComponentName = cms.string( "hltESPKFTrajectorySmootherForLoopers" ),
Propagator = cms.string( "PropagatorWithMaterialForLoopers" ),
Updator = cms.string( "hltESPKFUpdator" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
RecoGeometry = cms.string( "hltESPGlobalDetLayerGeometry" ),
errorRescaling = cms.double( 10.0 ),
minHits = cms.int32( 3 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPKFTrajectorySmootherForMuonTrackLoader = cms.ESProducer( "KFTrajectorySmootherESProducer",
ComponentName = cms.string( "hltESPKFTrajectorySmootherForMuonTrackLoader" ),
Propagator = cms.string( "hltESPSmartPropagatorAnyOpposite" ),
Updator = cms.string( "hltESPKFUpdator" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
RecoGeometry = cms.string( "hltESPDummyDetLayerGeometry" ),
errorRescaling = cms.double( 10.0 ),
minHits = cms.int32( 3 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPKFUpdator = cms.ESProducer( "KFUpdatorESProducer",
ComponentName = cms.string( "hltESPKFUpdator" )
)
process.hltESPKullbackLeiblerDistance5D = cms.ESProducer( "DistanceBetweenComponentsESProducer5D",
ComponentName = cms.string( "hltESPKullbackLeiblerDistance5D" ),
DistanceMeasure = cms.string( "KullbackLeibler" )
)
process.hltESPL3MuKFTrajectoryFitter = cms.ESProducer( "KFTrajectoryFitterESProducer",
ComponentName = cms.string( "hltESPL3MuKFTrajectoryFitter" ),
Propagator = cms.string( "hltESPSmartPropagatorAny" ),
Updator = cms.string( "hltESPKFUpdator" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
RecoGeometry = cms.string( "hltESPDummyDetLayerGeometry" ),
minHits = cms.int32( 3 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPLowPtQuadStepChi2ChargeMeasurementEstimator9 = cms.ESProducer( "Chi2ChargeMeasurementEstimatorESProducer",
MaxChi2 = cms.double( 9.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 0.5 ),
MaxSagitta = cms.double( 2.0 ),
MinimalTolerance = cms.double( 0.5 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPLowPtQuadStepChi2ChargeMeasurementEstimator9" ),
pTChargeCutThreshold = cms.double( -1.0 ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutTight" ) ),
appendToDataLabel = cms.string( "" )
)
process.hltESPLowPtQuadStepTrajectoryCleanerBySharedHits = cms.ESProducer( "TrajectoryCleanerESProducer",
ComponentName = cms.string( "hltESPLowPtQuadStepTrajectoryCleanerBySharedHits" ),
ComponentType = cms.string( "TrajectoryCleanerBySharedHits" ),
fractionShared = cms.double( 0.16 ),
ValidHitBonus = cms.double( 5.0 ),
MissingHitPenalty = cms.double( 20.0 ),
allowSharedFirstHit = cms.bool( True )
)
process.hltESPLowPtStepTrajectoryCleanerBySharedHits = cms.ESProducer( "TrajectoryCleanerESProducer",
ComponentName = cms.string( "hltESPLowPtStepTrajectoryCleanerBySharedHits" ),
ComponentType = cms.string( "TrajectoryCleanerBySharedHits" ),
fractionShared = cms.double( 0.16 ),
ValidHitBonus = cms.double( 5.0 ),
MissingHitPenalty = cms.double( 20.0 ),
allowSharedFirstHit = cms.bool( True )
)
process.hltESPLowPtTripletStepChi2ChargeMeasurementEstimator9 = cms.ESProducer( "Chi2ChargeMeasurementEstimatorESProducer",
MaxChi2 = cms.double( 9.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 0.5 ),
MaxSagitta = cms.double( 2.0 ),
MinimalTolerance = cms.double( 0.5 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPLowPtTripletStepChi2ChargeMeasurementEstimator9" ),
pTChargeCutThreshold = cms.double( -1.0 ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutTight" ) ),
appendToDataLabel = cms.string( "" )
)
process.hltESPLowPtTripletStepTrajectoryCleanerBySharedHits = cms.ESProducer( "TrajectoryCleanerESProducer",
ComponentName = cms.string( "hltESPLowPtTripletStepTrajectoryCleanerBySharedHits" ),
ComponentType = cms.string( "TrajectoryCleanerBySharedHits" ),
fractionShared = cms.double( 0.16 ),
ValidHitBonus = cms.double( 5.0 ),
MissingHitPenalty = cms.double( 20.0 ),
allowSharedFirstHit = cms.bool( True )
)
process.hltESPMeasurementTracker = cms.ESProducer( "MeasurementTrackerESProducer",
ComponentName = cms.string( "hltESPMeasurementTracker" ),
PixelCPE = cms.string( "hltESPPixelCPEGeneric" ),
StripCPE = cms.string( "hltESPStripCPEfromTrackAngle" ),
HitMatcher = cms.string( "StandardMatcher" ),
Phase2StripCPE = cms.string( "" ),
SiStripQualityLabel = cms.string( "" ),
UseStripModuleQualityDB = cms.bool( True ),
DebugStripModuleQualityDB = cms.untracked.bool( False ),
UseStripAPVFiberQualityDB = cms.bool( True ),
DebugStripAPVFiberQualityDB = cms.untracked.bool( False ),
MaskBadAPVFibers = cms.bool( True ),
UseStripStripQualityDB = cms.bool( True ),
DebugStripStripQualityDB = cms.untracked.bool( False ),
badStripCuts = cms.PSet(
TOB = cms.PSet(
maxBad = cms.uint32( 4 ),
maxConsecutiveBad = cms.uint32( 2 )
),
TIB = cms.PSet(
maxBad = cms.uint32( 4 ),
maxConsecutiveBad = cms.uint32( 2 )
),
TID = cms.PSet(
maxBad = cms.uint32( 4 ),
maxConsecutiveBad = cms.uint32( 2 )
),
TEC = cms.PSet(
maxBad = cms.uint32( 4 ),
maxConsecutiveBad = cms.uint32( 2 )
)
),
UsePixelModuleQualityDB = cms.bool( True ),
DebugPixelModuleQualityDB = cms.untracked.bool( False ),
UsePixelROCQualityDB = cms.bool( True ),
DebugPixelROCQualityDB = cms.untracked.bool( False ),
appendToDataLabel = cms.string( "" )
)
process.hltESPMixedStepClusterShapeHitFilter = cms.ESProducer( "ClusterShapeHitFilterESProducer",
ComponentName = cms.string( "hltESPMixedStepClusterShapeHitFilter" ),
PixelShapeFile = cms.string( "RecoPixelVertexing/PixelLowPtUtilities/data/pixelShapePhase1_noL1.par" ),
PixelShapeFileL1 = cms.string( "RecoPixelVertexing/PixelLowPtUtilities/data/pixelShapePhase1_loose.par" ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutTight" ) )
)
process.hltESPMixedStepTrajectoryCleanerBySharedHits = cms.ESProducer( "TrajectoryCleanerESProducer",
ComponentName = cms.string( "hltESPMixedStepTrajectoryCleanerBySharedHits" ),
ComponentType = cms.string( "TrajectoryCleanerBySharedHits" ),
fractionShared = cms.double( 0.11 ),
ValidHitBonus = cms.double( 5.0 ),
MissingHitPenalty = cms.double( 20.0 ),
allowSharedFirstHit = cms.bool( True )
)
process.hltESPMixedTripletStepChi2ChargeMeasurementEstimator16 = cms.ESProducer( "Chi2ChargeMeasurementEstimatorESProducer",
MaxChi2 = cms.double( 16.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 0.5 ),
MaxSagitta = cms.double( 2.0 ),
MinimalTolerance = cms.double( 0.5 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPMixedTripletStepChi2ChargeMeasurementEstimator16" ),
pTChargeCutThreshold = cms.double( -1.0 ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutTight" ) ),
appendToDataLabel = cms.string( "" )
)
process.hltESPMixedTripletStepTrajectoryCleanerBySharedHits = cms.ESProducer( "TrajectoryCleanerESProducer",
ComponentName = cms.string( "hltESPMixedTripletStepTrajectoryCleanerBySharedHits" ),
ComponentType = cms.string( "TrajectoryCleanerBySharedHits" ),
fractionShared = cms.double( 0.11 ),
ValidHitBonus = cms.double( 5.0 ),
MissingHitPenalty = cms.double( 20.0 ),
allowSharedFirstHit = cms.bool( True )
)
process.hltESPMuonDetLayerGeometryESProducer = cms.ESProducer( "MuonDetLayerGeometryESProducer" )
process.hltESPMuonTransientTrackingRecHitBuilder = cms.ESProducer( "MuonTransientTrackingRecHitBuilderESProducer",
ComponentName = cms.string( "hltESPMuonTransientTrackingRecHitBuilder" )
)
process.hltESPPixelCPEGeneric = cms.ESProducer( "PixelCPEGenericESProducer",
LoadTemplatesFromDB = cms.bool( True ),
Alpha2Order = cms.bool( True ),
ClusterProbComputationFlag = cms.int32( 0 ),
useLAWidthFromDB = cms.bool( False ),
lAOffset = cms.double( 0.0 ),
lAWidthBPix = cms.double( 0.0 ),
lAWidthFPix = cms.double( 0.0 ),
doLorentzFromAlignment = cms.bool( False ),
useLAFromDB = cms.bool( True ),
xerr_barrel_l1 = cms.vdouble( 0.00115, 0.0012, 8.8E-4 ),
yerr_barrel_l1 = cms.vdouble( 0.00375, 0.0023, 0.0025, 0.0025, 0.0023, 0.0023, 0.0021, 0.0021, 0.0024 ),
xerr_barrel_ln = cms.vdouble( 0.00115, 0.0012, 8.8E-4 ),
yerr_barrel_ln = cms.vdouble( 0.00375, 0.0023, 0.0025, 0.0025, 0.0023, 0.0023, 0.0021, 0.0021, 0.0024 ),
xerr_endcap = cms.vdouble( 0.002, 0.002 ),
yerr_endcap = cms.vdouble( 0.0021 ),
xerr_barrel_l1_def = cms.double( 0.0103 ),
yerr_barrel_l1_def = cms.double( 0.0021 ),
xerr_barrel_ln_def = cms.double( 0.0103 ),
yerr_barrel_ln_def = cms.double( 0.0021 ),
xerr_endcap_def = cms.double( 0.002 ),
yerr_endcap_def = cms.double( 7.5E-4 ),
eff_charge_cut_highX = cms.double( 1.0 ),
eff_charge_cut_highY = cms.double( 1.0 ),
eff_charge_cut_lowX = cms.double( 0.0 ),
eff_charge_cut_lowY = cms.double( 0.0 ),
size_cutX = cms.double( 3.0 ),
size_cutY = cms.double( 3.0 ),
EdgeClusterErrorX = cms.double( 50.0 ),
EdgeClusterErrorY = cms.double( 85.0 ),
inflate_errors = cms.bool( False ),
inflate_all_errors_no_trk_angle = cms.bool( False ),
NoTemplateErrorsWhenNoTrkAngles = cms.bool( False ),
UseErrorsFromTemplates = cms.bool( True ),
TruncatePixelCharge = cms.bool( True ),
IrradiationBiasCorrection = cms.bool( True ),
DoCosmics = cms.bool( False ),
Upgrade = cms.bool( False ),
SmallPitch = cms.bool( False ),
ComponentName = cms.string( "hltESPPixelCPEGeneric" ),
MagneticFieldRecord = cms.ESInputTag( "","" ),
appendToDataLabel = cms.string( "" )
)
process.hltESPPixelCPETemplateReco = cms.ESProducer( "PixelCPETemplateRecoESProducer",
LoadTemplatesFromDB = cms.bool( True ),
Alpha2Order = cms.bool( True ),
ClusterProbComputationFlag = cms.int32( 0 ),
useLAWidthFromDB = cms.bool( True ),
lAOffset = cms.double( 0.0 ),
lAWidthBPix = cms.double( 0.0 ),
lAWidthFPix = cms.double( 0.0 ),
doLorentzFromAlignment = cms.bool( False ),
useLAFromDB = cms.bool( True ),
barrelTemplateID = cms.int32( 0 ),
forwardTemplateID = cms.int32( 0 ),
directoryWithTemplates = cms.int32( 0 ),
speed = cms.int32( -2 ),
UseClusterSplitter = cms.bool( False ),
ComponentName = cms.string( "hltESPPixelCPETemplateReco" ),
appendToDataLabel = cms.string( "" )
)
process.hltESPPixelLessStepChi2ChargeMeasurementEstimator16 = cms.ESProducer( "Chi2ChargeMeasurementEstimatorESProducer",
MaxChi2 = cms.double( 16.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 0.5 ),
MaxSagitta = cms.double( 2.0 ),
MinimalTolerance = cms.double( 0.5 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPPixelLessStepChi2ChargeMeasurementEstimator16" ),
pTChargeCutThreshold = cms.double( -1.0 ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutTight" ) ),
appendToDataLabel = cms.string( "" )
)
process.hltESPPixelLessStepClusterShapeHitFilter = cms.ESProducer( "ClusterShapeHitFilterESProducer",
ComponentName = cms.string( "hltESPPixelLessStepClusterShapeHitFilter" ),
PixelShapeFile = cms.string( "RecoPixelVertexing/PixelLowPtUtilities/data/pixelShapePhase1_noL1.par" ),
PixelShapeFileL1 = cms.string( "RecoPixelVertexing/PixelLowPtUtilities/data/pixelShapePhase1_loose.par" ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutTight" ) )
)
process.hltESPPixelLessStepTrajectoryCleanerBySharedHits = cms.ESProducer( "TrajectoryCleanerESProducer",
ComponentName = cms.string( "hltESPPixelLessStepTrajectoryCleanerBySharedHits" ),
ComponentType = cms.string( "TrajectoryCleanerBySharedHits" ),
fractionShared = cms.double( 0.11 ),
ValidHitBonus = cms.double( 5.0 ),
MissingHitPenalty = cms.double( 20.0 ),
allowSharedFirstHit = cms.bool( True )
)
process.hltESPPixelPairStepChi2ChargeMeasurementEstimator9 = cms.ESProducer( "Chi2ChargeMeasurementEstimatorESProducer",
MaxChi2 = cms.double( 9.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 0.5 ),
MaxSagitta = cms.double( 2.0 ),
MinimalTolerance = cms.double( 0.5 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1.0E12 ),
ComponentName = cms.string( "hltESPPixelPairStepChi2ChargeMeasurementEstimator9" ),
pTChargeCutThreshold = cms.double( 15.0 ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutLoose" ) ),
appendToDataLabel = cms.string( "" )
)
process.hltESPPixelPairStepChi2MeasurementEstimator25 = cms.ESProducer( "Chi2MeasurementEstimatorESProducer",
MaxChi2 = cms.double( 25.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 100.0 ),
MaxSagitta = cms.double( -1.0 ),
MinimalTolerance = cms.double( 10.0 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPPixelPairStepChi2MeasurementEstimator25" ),
appendToDataLabel = cms.string( "" )
)
process.hltESPPixelPairTrajectoryCleanerBySharedHits = cms.ESProducer( "TrajectoryCleanerESProducer",
ComponentName = cms.string( "hltESPPixelPairTrajectoryCleanerBySharedHits" ),
ComponentType = cms.string( "TrajectoryCleanerBySharedHits" ),
fractionShared = cms.double( 0.19 ),
ValidHitBonus = cms.double( 5.0 ),
MissingHitPenalty = cms.double( 20.0 ),
allowSharedFirstHit = cms.bool( True )
)
process.hltESPRKTrajectoryFitter = cms.ESProducer( "KFTrajectoryFitterESProducer",
ComponentName = cms.string( "hltESPRKTrajectoryFitter" ),
Propagator = cms.string( "hltESPRungeKuttaTrackerPropagator" ),
Updator = cms.string( "hltESPKFUpdator" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
RecoGeometry = cms.string( "hltESPGlobalDetLayerGeometry" ),
minHits = cms.int32( 3 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPRKTrajectorySmoother = cms.ESProducer( "KFTrajectorySmootherESProducer",
ComponentName = cms.string( "hltESPRKTrajectorySmoother" ),
Propagator = cms.string( "hltESPRungeKuttaTrackerPropagator" ),
Updator = cms.string( "hltESPKFUpdator" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
RecoGeometry = cms.string( "hltESPGlobalDetLayerGeometry" ),
errorRescaling = cms.double( 100.0 ),
minHits = cms.int32( 3 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPRungeKuttaTrackerPropagator = cms.ESProducer( "PropagatorWithMaterialESProducer",
SimpleMagneticField = cms.string( "" ),
MaxDPhi = cms.double( 1.6 ),
ComponentName = cms.string( "hltESPRungeKuttaTrackerPropagator" ),
Mass = cms.double( 0.105 ),
PropagationDirection = cms.string( "alongMomentum" ),
useRungeKutta = cms.bool( True ),
ptMin = cms.double( -1.0 )
)
process.hltESPSmartPropagator = cms.ESProducer( "SmartPropagatorESProducer",
ComponentName = cms.string( "hltESPSmartPropagator" ),
TrackerPropagator = cms.string( "PropagatorWithMaterial" ),
MuonPropagator = cms.string( "hltESPSteppingHelixPropagatorAlong" ),
PropagationDirection = cms.string( "alongMomentum" ),
Epsilon = cms.double( 5.0 )
)
process.hltESPSmartPropagatorAny = cms.ESProducer( "SmartPropagatorESProducer",
ComponentName = cms.string( "hltESPSmartPropagatorAny" ),
TrackerPropagator = cms.string( "PropagatorWithMaterial" ),
MuonPropagator = cms.string( "SteppingHelixPropagatorAny" ),
PropagationDirection = cms.string( "alongMomentum" ),
Epsilon = cms.double( 5.0 )
)
process.hltESPSmartPropagatorAnyOpposite = cms.ESProducer( "SmartPropagatorESProducer",
ComponentName = cms.string( "hltESPSmartPropagatorAnyOpposite" ),
TrackerPropagator = cms.string( "PropagatorWithMaterialOpposite" ),
MuonPropagator = cms.string( "SteppingHelixPropagatorAny" ),
PropagationDirection = cms.string( "oppositeToMomentum" ),
Epsilon = cms.double( 5.0 )
)
process.hltESPSoftLeptonByDistance = cms.ESProducer( "LeptonTaggerByDistanceESProducer",
distance = cms.double( 0.5 )
)
process.hltESPSteppingHelixPropagatorAlong = cms.ESProducer( "SteppingHelixPropagatorESProducer",
ComponentName = cms.string( "hltESPSteppingHelixPropagatorAlong" ),
NoErrorPropagation = cms.bool( False ),
PropagationDirection = cms.string( "alongMomentum" ),
useTuningForL2Speed = cms.bool( False ),
useIsYokeFlag = cms.bool( True ),
endcapShiftInZNeg = cms.double( 0.0 ),
SetVBFPointer = cms.bool( False ),
AssumeNoMaterial = cms.bool( False ),
endcapShiftInZPos = cms.double( 0.0 ),
useInTeslaFromMagField = cms.bool( False ),
VBFName = cms.string( "VolumeBasedMagneticField" ),
useEndcapShiftsInZ = cms.bool( False ),
sendLogWarning = cms.bool( False ),
useMatVolumes = cms.bool( True ),
debug = cms.bool( False ),
ApplyRadX0Correction = cms.bool( True ),
useMagVolumes = cms.bool( True ),
returnTangentPlane = cms.bool( True )
)
process.hltESPSteppingHelixPropagatorOpposite = cms.ESProducer( "SteppingHelixPropagatorESProducer",
ComponentName = cms.string( "hltESPSteppingHelixPropagatorOpposite" ),
NoErrorPropagation = cms.bool( False ),
PropagationDirection = cms.string( "oppositeToMomentum" ),
useTuningForL2Speed = cms.bool( False ),
useIsYokeFlag = cms.bool( True ),
endcapShiftInZNeg = cms.double( 0.0 ),
SetVBFPointer = cms.bool( False ),
AssumeNoMaterial = cms.bool( False ),
endcapShiftInZPos = cms.double( 0.0 ),
useInTeslaFromMagField = cms.bool( False ),
VBFName = cms.string( "VolumeBasedMagneticField" ),
useEndcapShiftsInZ = cms.bool( False ),
sendLogWarning = cms.bool( False ),
useMatVolumes = cms.bool( True ),
debug = cms.bool( False ),
ApplyRadX0Correction = cms.bool( True ),
useMagVolumes = cms.bool( True ),
returnTangentPlane = cms.bool( True )
)
process.hltESPStripCPEfromTrackAngle = cms.ESProducer( "StripCPEESProducer",
ComponentName = cms.string( "hltESPStripCPEfromTrackAngle" ),
ComponentType = cms.string( "StripCPEfromTrackAngle" ),
parameters = cms.PSet(
mTIB_P1 = cms.double( 0.202 ),
maxChgOneMIP = cms.double( 6000.0 ),
mTEC_P0 = cms.double( -1.885 ),
mTOB_P1 = cms.double( 0.253 ),
mTEC_P1 = cms.double( 0.471 ),
mLC_P2 = cms.double( 0.3 ),
mLC_P1 = cms.double( 0.618 ),
mTOB_P0 = cms.double( -1.026 ),
mLC_P0 = cms.double( -0.326 ),
useLegacyError = cms.bool( False ),
mTIB_P0 = cms.double( -0.742 ),
mTID_P1 = cms.double( 0.433 ),
mTID_P0 = cms.double( -1.427 )
)
)
process.hltESPTTRHBWithTrackAngle = cms.ESProducer( "TkTransientTrackingRecHitBuilderESProducer",
StripCPE = cms.string( "hltESPStripCPEfromTrackAngle" ),
ComponentName = cms.string( "hltESPTTRHBWithTrackAngle" ),
PixelCPE = cms.string( "hltESPPixelCPEGeneric" ),
Matcher = cms.string( "StandardMatcher" ),
ComputeCoarseLocalPositionFromDisk = cms.bool( False )
)
process.hltESPTTRHBuilderAngleAndTemplate = cms.ESProducer( "TkTransientTrackingRecHitBuilderESProducer",
StripCPE = cms.string( "hltESPStripCPEfromTrackAngle" ),
ComponentName = cms.string( "hltESPTTRHBuilderAngleAndTemplate" ),
PixelCPE = cms.string( "hltESPPixelCPETemplateReco" ),
Matcher = cms.string( "StandardMatcher" ),
ComputeCoarseLocalPositionFromDisk = cms.bool( False )
)
process.hltESPTTRHBuilderPixelOnly = cms.ESProducer( "TkTransientTrackingRecHitBuilderESProducer",
StripCPE = cms.string( "Fake" ),
ComponentName = cms.string( "hltESPTTRHBuilderPixelOnly" ),
PixelCPE = cms.string( "hltESPPixelCPEGeneric" ),
Matcher = cms.string( "StandardMatcher" ),
ComputeCoarseLocalPositionFromDisk = cms.bool( False )
)
process.hltESPTTRHBuilderWithoutAngle4PixelTriplets = cms.ESProducer( "TkTransientTrackingRecHitBuilderESProducer",
StripCPE = cms.string( "Fake" ),
ComponentName = cms.string( "hltESPTTRHBuilderWithoutAngle4PixelTriplets" ),
PixelCPE = cms.string( "hltESPPixelCPEGeneric" ),
Matcher = cms.string( "StandardMatcher" ),
ComputeCoarseLocalPositionFromDisk = cms.bool( False )
)
process.hltESPTobTecStepChi2ChargeMeasurementEstimator16 = cms.ESProducer( "Chi2ChargeMeasurementEstimatorESProducer",
MaxChi2 = cms.double( 16.0 ),
nSigma = cms.double( 3.0 ),
MaxDisplacement = cms.double( 0.5 ),
MaxSagitta = cms.double( 2.0 ),
MinimalTolerance = cms.double( 0.5 ),
MinPtForHitRecoveryInGluedDet = cms.double( 1000000.0 ),
ComponentName = cms.string( "hltESPTobTecStepChi2ChargeMeasurementEstimator16" ),
pTChargeCutThreshold = cms.double( -1.0 ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutTight" ) ),
appendToDataLabel = cms.string( "" )
)
process.hltESPTobTecStepClusterShapeHitFilter = cms.ESProducer( "ClusterShapeHitFilterESProducer",
ComponentName = cms.string( "hltESPTobTecStepClusterShapeHitFilter" ),
PixelShapeFile = cms.string( "RecoPixelVertexing/PixelLowPtUtilities/data/pixelShapePhase1_noL1.par" ),
PixelShapeFileL1 = cms.string( "RecoPixelVertexing/PixelLowPtUtilities/data/pixelShapePhase1_loose.par" ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutTight" ) )
)
process.hltESPTobTecStepFittingSmoother = cms.ESProducer( "KFFittingSmootherESProducer",
ComponentName = cms.string( "hltESPTobTecStepFitterSmoother" ),
Fitter = cms.string( "hltESPTobTecStepRKFitter" ),
Smoother = cms.string( "hltESPTobTecStepRKSmoother" ),
EstimateCut = cms.double( 30.0 ),
MaxFractionOutliers = cms.double( 0.3 ),
MaxNumberOfOutliers = cms.int32( 3 ),
MinDof = cms.int32( 2 ),
NoOutliersBeginEnd = cms.bool( False ),
MinNumberOfHits = cms.int32( 7 ),
MinNumberOfHitsHighEta = cms.int32( 5 ),
HighEtaSwitch = cms.double( 5.0 ),
RejectTracks = cms.bool( True ),
BreakTrajWith2ConsecutiveMissing = cms.bool( False ),
NoInvalidHitsBeginEnd = cms.bool( False ),
LogPixelProbabilityCut = cms.double( -16.0 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPTobTecStepFittingSmootherForLoopers = cms.ESProducer( "KFFittingSmootherESProducer",
ComponentName = cms.string( "hltESPTobTecStepFitterSmootherForLoopers" ),
Fitter = cms.string( "hltESPTobTecStepRKFitterForLoopers" ),
Smoother = cms.string( "hltESPTobTecStepRKSmootherForLoopers" ),
EstimateCut = cms.double( 30.0 ),
MaxFractionOutliers = cms.double( 0.3 ),
MaxNumberOfOutliers = cms.int32( 3 ),
MinDof = cms.int32( 2 ),
NoOutliersBeginEnd = cms.bool( False ),
MinNumberOfHits = cms.int32( 7 ),
MinNumberOfHitsHighEta = cms.int32( 5 ),
HighEtaSwitch = cms.double( 5.0 ),
RejectTracks = cms.bool( True ),
BreakTrajWith2ConsecutiveMissing = cms.bool( False ),
NoInvalidHitsBeginEnd = cms.bool( False ),
LogPixelProbabilityCut = cms.double( -16.0 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPTobTecStepFlexibleKFFittingSmoother = cms.ESProducer( "FlexibleKFFittingSmootherESProducer",
ComponentName = cms.string( "hltESPTobTecStepFlexibleKFFittingSmoother" ),
standardFitter = cms.string( "hltESPTobTecStepFitterSmoother" ),
looperFitter = cms.string( "hltESPTobTecStepFitterSmootherForLoopers" ),
appendToDataLabel = cms.string( "" )
)
process.hltESPTobTecStepRKTrajectoryFitter = cms.ESProducer( "KFTrajectoryFitterESProducer",
ComponentName = cms.string( "hltESPTobTecStepRKFitter" ),
Propagator = cms.string( "PropagatorWithMaterialParabolicMf" ),
Updator = cms.string( "hltESPKFUpdator" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
RecoGeometry = cms.string( "hltESPDummyDetLayerGeometry" ),
minHits = cms.int32( 7 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPTobTecStepRKTrajectoryFitterForLoopers = cms.ESProducer( "KFTrajectoryFitterESProducer",
ComponentName = cms.string( "hltESPTobTecStepRKFitterForLoopers" ),
Propagator = cms.string( "PropagatorWithMaterialForLoopers" ),
Updator = cms.string( "hltESPKFUpdator" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
RecoGeometry = cms.string( "hltESPDummyDetLayerGeometry" ),
minHits = cms.int32( 7 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPTobTecStepRKTrajectorySmoother = cms.ESProducer( "KFTrajectorySmootherESProducer",
ComponentName = cms.string( "hltESPTobTecStepRKSmoother" ),
Propagator = cms.string( "PropagatorWithMaterialParabolicMf" ),
Updator = cms.string( "hltESPKFUpdator" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
RecoGeometry = cms.string( "hltESPDummyDetLayerGeometry" ),
errorRescaling = cms.double( 10.0 ),
minHits = cms.int32( 7 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPTobTecStepRKTrajectorySmootherForLoopers = cms.ESProducer( "KFTrajectorySmootherESProducer",
ComponentName = cms.string( "hltESPTobTecStepRKSmootherForLoopers" ),
Propagator = cms.string( "PropagatorWithMaterialForLoopers" ),
Updator = cms.string( "hltESPKFUpdator" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
RecoGeometry = cms.string( "hltESPDummyDetLayerGeometry" ),
errorRescaling = cms.double( 10.0 ),
minHits = cms.int32( 7 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPTobTecStepTrajectoryCleanerBySharedHits = cms.ESProducer( "TrajectoryCleanerESProducer",
ComponentName = cms.string( "hltESPTobTecStepTrajectoryCleanerBySharedHits" ),
ComponentType = cms.string( "TrajectoryCleanerBySharedHits" ),
fractionShared = cms.double( 0.09 ),
ValidHitBonus = cms.double( 5.0 ),
MissingHitPenalty = cms.double( 20.0 ),
allowSharedFirstHit = cms.bool( True )
)
process.hltESPTrackAlgoPriorityOrder = cms.ESProducer( "TrackAlgoPriorityOrderESProducer",
ComponentName = cms.string( "hltESPTrackAlgoPriorityOrder" ),
algoOrder = cms.vstring( ),
appendToDataLabel = cms.string( "" )
)
process.hltESPTrackerRecoGeometryESProducer = cms.ESProducer( "TrackerRecoGeometryESProducer",
usePhase2Stacks = cms.bool( False ),
trackerGeometryLabel = cms.untracked.string( "" ),
appendToDataLabel = cms.string( "" )
)
process.hltESPTrajectoryCleanerBySharedHits = cms.ESProducer( "TrajectoryCleanerESProducer",
ComponentName = cms.string( "hltESPTrajectoryCleanerBySharedHits" ),
ComponentType = cms.string( "TrajectoryCleanerBySharedHits" ),
fractionShared = cms.double( 0.5 ),
ValidHitBonus = cms.double( 100.0 ),
MissingHitPenalty = cms.double( 0.0 ),
allowSharedFirstHit = cms.bool( False )
)
process.hltESPTrajectoryFitterRK = cms.ESProducer( "KFTrajectoryFitterESProducer",
ComponentName = cms.string( "hltESPTrajectoryFitterRK" ),
Propagator = cms.string( "hltESPRungeKuttaTrackerPropagator" ),
Updator = cms.string( "hltESPKFUpdator" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
RecoGeometry = cms.string( "hltESPDummyDetLayerGeometry" ),
minHits = cms.int32( 3 ),
appendToDataLabel = cms.string( "" )
)
process.hltESPTrajectorySmootherRK = cms.ESProducer( "KFTrajectorySmootherESProducer",
ComponentName = cms.string( "hltESPTrajectorySmootherRK" ),
Propagator = cms.string( "hltESPRungeKuttaTrackerPropagator" ),
Updator = cms.string( "hltESPKFUpdator" ),
Estimator = cms.string( "hltESPChi2MeasurementEstimator30" ),
RecoGeometry = cms.string( "hltESPDummyDetLayerGeometry" ),
errorRescaling = cms.double( 100.0 ),
minHits = cms.int32( 3 ),
appendToDataLabel = cms.string( "" )
)
process.hltPixelTracksCleanerBySharedHits = cms.ESProducer( "PixelTrackCleanerBySharedHitsESProducer",
ComponentName = cms.string( "hltPixelTracksCleanerBySharedHits" ),
useQuadrupletAlgo = cms.bool( False ),
appendToDataLabel = cms.string( "" )
)
process.hltTrackCleaner = cms.ESProducer( "TrackCleanerESProducer",
ComponentName = cms.string( "hltTrackCleaner" ),
appendToDataLabel = cms.string( "" )
)
process.hoDetIdAssociator = cms.ESProducer( "DetIdAssociatorESProducer",
ComponentName = cms.string( "HODetIdAssociator" ),
etaBinSize = cms.double( 0.087 ),
nEta = cms.int32( 30 ),
nPhi = cms.int32( 72 ),
hcalRegion = cms.int32( 2 ),
includeBadChambers = cms.bool( False ),
includeGEM = cms.bool( False ),
includeME0 = cms.bool( False )
)
process.multipleScatteringParametrisationMakerESProducer = cms.ESProducer( "MultipleScatteringParametrisationMakerESProducer",
appendToDataLabel = cms.string( "" )
)
process.muonDetIdAssociator = cms.ESProducer( "DetIdAssociatorESProducer",
ComponentName = cms.string( "MuonDetIdAssociator" ),
etaBinSize = cms.double( 0.125 ),
nEta = cms.int32( 48 ),
nPhi = cms.int32( 48 ),
hcalRegion = cms.int32( 2 ),
includeBadChambers = cms.bool( False ),
includeGEM = cms.bool( False ),
includeME0 = cms.bool( False )
)
process.muonSeededTrajectoryCleanerBySharedHits = cms.ESProducer( "TrajectoryCleanerESProducer",
ComponentName = cms.string( "muonSeededTrajectoryCleanerBySharedHits" ),
ComponentType = cms.string( "TrajectoryCleanerBySharedHits" ),
fractionShared = cms.double( 0.1 ),
ValidHitBonus = cms.double( 1000.0 ),
MissingHitPenalty = cms.double( 1.0 ),
allowSharedFirstHit = cms.bool( True )
)
process.navigationSchoolESProducer = cms.ESProducer( "NavigationSchoolESProducer",
ComponentName = cms.string( "SimpleNavigationSchool" ),
SimpleMagneticField = cms.string( "ParabolicMf" )
)
process.preshowerDetIdAssociator = cms.ESProducer( "DetIdAssociatorESProducer",
ComponentName = cms.string( "PreshowerDetIdAssociator" ),
etaBinSize = cms.double( 0.1 ),
nEta = cms.int32( 60 ),
nPhi = cms.int32( 30 ),
hcalRegion = cms.int32( 2 ),
includeBadChambers = cms.bool( False ),
includeGEM = cms.bool( False ),
includeME0 = cms.bool( False )
)
process.siPixelQualityESProducer = cms.ESProducer( "SiPixelQualityESProducer",
siPixelQualityLabel = cms.string( "" )
)
process.siPixelTemplateDBObjectESProducer = cms.ESProducer( "SiPixelTemplateDBObjectESProducer" )
process.siStripBackPlaneCorrectionDepESProducer = cms.ESProducer( "SiStripBackPlaneCorrectionDepESProducer",
LatencyRecord = cms.PSet(
label = cms.untracked.string( "" ),
record = cms.string( "SiStripLatencyRcd" )
),
BackPlaneCorrectionPeakMode = cms.PSet(
label = cms.untracked.string( "peak" ),
record = cms.string( "SiStripBackPlaneCorrectionRcd" )
),
BackPlaneCorrectionDeconvMode = cms.PSet(
label = cms.untracked.string( "deconvolution" ),
record = cms.string( "SiStripBackPlaneCorrectionRcd" )
)
)
process.siStripLorentzAngleDepESProducer = cms.ESProducer( "SiStripLorentzAngleDepESProducer",
LatencyRecord = cms.PSet(
label = cms.untracked.string( "" ),
record = cms.string( "SiStripLatencyRcd" )
),
LorentzAnglePeakMode = cms.PSet(
label = cms.untracked.string( "peak" ),
record = cms.string( "SiStripLorentzAngleRcd" )
),
LorentzAngleDeconvMode = cms.PSet(
label = cms.untracked.string( "deconvolution" ),
record = cms.string( "SiStripLorentzAngleRcd" )
)
)
process.sistripconn = cms.ESProducer( "SiStripConnectivity" )
process.trackerTopology = cms.ESProducer( "TrackerTopologyEP",
appendToDataLabel = cms.string( "" )
)
process.CUDAService = cms.Service( "CUDAService",
enabled = cms.untracked.bool( True ),
verbose = cms.untracked.bool( False ),
limits = cms.untracked.PSet(
cudaLimitDevRuntimePendingLaunchCount = cms.untracked.int32( -1 ),
cudaLimitDevRuntimeSyncDepth = cms.untracked.int32( -1 ),
cudaLimitStackSize = cms.untracked.int32( -1 ),
cudaLimitPrintfFifoSize = cms.untracked.int32( -1 ),
cudaLimitMallocHeapSize = cms.untracked.int32( -1 )
),
allocator = cms.untracked.PSet(
hostPreallocate = cms.untracked.vuint32( ),
devicePreallocate = cms.untracked.vuint32( )
)
)
process.FastTimerService = cms.Service( "FastTimerService",
printEventSummary = cms.untracked.bool( False ),
printRunSummary = cms.untracked.bool( True ),
printJobSummary = cms.untracked.bool( True ),
writeJSONSummary = cms.untracked.bool( False ),
jsonFileName = cms.untracked.string( "resources.json" ),
enableDQM = cms.untracked.bool( True ),
enableDQMbyModule = cms.untracked.bool( False ),
enableDQMbyPath = cms.untracked.bool( False ),
enableDQMbyLumiSection = cms.untracked.bool( True ),
enableDQMbyProcesses = cms.untracked.bool( True ),
enableDQMTransitions = cms.untracked.bool( False ),
dqmTimeRange = cms.untracked.double( 2000.0 ),
dqmTimeResolution = cms.untracked.double( 5.0 ),
dqmMemoryRange = cms.untracked.double( 1000000.0 ),
dqmMemoryResolution = cms.untracked.double( 5000.0 ),
dqmPathTimeRange = cms.untracked.double( 100.0 ),
dqmPathTimeResolution = cms.untracked.double( 0.5 ),
dqmPathMemoryRange = cms.untracked.double( 1000000.0 ),
dqmPathMemoryResolution = cms.untracked.double( 5000.0 ),
dqmModuleTimeRange = cms.untracked.double( 40.0 ),
dqmModuleTimeResolution = cms.untracked.double( 0.2 ),
dqmModuleMemoryRange = cms.untracked.double( 100000.0 ),
dqmModuleMemoryResolution = cms.untracked.double( 500.0 ),
dqmLumiSectionsRange = cms.untracked.uint32( 2500 ),
dqmPath = cms.untracked.string( "HLT/TimerService" ),
)
process.MessageLogger = cms.Service( "MessageLogger",
suppressWarning = cms.untracked.vstring( 'hltOnlineBeamSpot',
'hltCtf3HitL1SeededWithMaterialTracks',
'hltL3MuonsOIState',
'hltPixelTracksForHighMult',
'hltHITPixelTracksHE',
'hltHITPixelTracksHB',
'hltCtfL1SeededWithMaterialTracks',
'hltRegionalTracksForL3MuonIsolation',
'hltSiPixelClusters',
'hltActivityStartUpElectronPixelSeeds',
'hltLightPFTracks',
'hltPixelVertices3DbbPhi',
'hltL3MuonsIOHit',
'hltPixelTracks',
'hltSiPixelDigis',
'hltL3MuonsOIHit',
'hltL1SeededElectronGsfTracks',
'hltL1SeededStartUpElectronPixelSeeds',
'hltBLifetimeRegionalCtfWithMaterialTracksbbPhiL1FastJetFastPV',
'hltCtfActivityWithMaterialTracks' ),
suppressFwkInfo = cms.untracked.vstring( ),
suppressInfo = cms.untracked.vstring( ),
suppressDebug = cms.untracked.vstring( ),
debugModules = cms.untracked.vstring( ),
cerr = cms.untracked.PSet(
INFO = cms.untracked.PSet( limit = cms.untracked.int32( 0 ) ),
noTimeStamps = cms.untracked.bool( False ),
FwkReport = cms.untracked.PSet(
reportEvery = cms.untracked.int32( 1 ),
limit = cms.untracked.int32( 0 )
),
default = cms.untracked.PSet( limit = cms.untracked.int32( 10000000 ) ),
Root_NoDictionary = cms.untracked.PSet( limit = cms.untracked.int32( 0 ) ),
FwkJob = cms.untracked.PSet( limit = cms.untracked.int32( 0 ) ),
FwkSummary = cms.untracked.PSet(
reportEvery = cms.untracked.int32( 1 ),
limit = cms.untracked.int32( 10000000 )
),
threshold = cms.untracked.string( "INFO" ),
),
suppressError = cms.untracked.vstring( 'hltOnlineBeamSpot',
'hltL3MuonCandidates',
'hltL3TkTracksFromL2OIState',
'hltPFJetCtfWithMaterialTracks',
'hltL3TkTracksFromL2IOHit',
'hltL3TkTracksFromL2OIHit' )
)
process.ThroughputService = cms.Service( "ThroughputService",
eventRange = cms.untracked.uint32( 10000 ),
eventResolution = cms.untracked.uint32( 1 ),
printEventSummary = cms.untracked.bool( False ),
enableDQM = cms.untracked.bool( True ),
dqmPathByProcesses = cms.untracked.bool( False ),
dqmPath = cms.untracked.string( "HLT/Throughput" ),
timeRange = cms.untracked.double( 60000.0 ),
timeResolution = cms.untracked.double( 5.828 )
)
process.hltGetConditions = cms.EDAnalyzer( "EventSetupRecordDataGetter",
verbose = cms.untracked.bool( False ),
toGet = cms.VPSet(
)
)
process.hltGetRaw = cms.EDAnalyzer( "HLTGetRaw",
RawDataCollection = cms.InputTag( "rawDataCollector" )
)
process.hltPSetMap = cms.EDProducer( "ParameterSetBlobProducer" )
process.hltBoolFalse = cms.EDFilter( "HLTBool",
result = cms.bool( False )
)
process.statusOnGPUFilter = cms.EDFilter( "BooleanFilter",
src = cms.InputTag( "statusOnGPU" )
)
process.hltTriggerType = cms.EDFilter( "HLTTriggerTypeFilter",
SelectedTriggerType = cms.int32( 1 )
)
process.hltGtStage2Digis = cms.EDProducer( "L1TRawToDigi",
FedIds = cms.vint32( 1404 ),
Setup = cms.string( "stage2::GTSetup" ),
FWId = cms.uint32( 0 ),
DmxFWId = cms.uint32( 0 ),
FWOverride = cms.bool( False ),
TMTCheck = cms.bool( True ),
CTP7 = cms.untracked.bool( False ),
MTF7 = cms.untracked.bool( False ),
InputLabel = cms.InputTag( "rawDataCollector" ),
lenSlinkHeader = cms.untracked.int32( 8 ),
lenSlinkTrailer = cms.untracked.int32( 8 ),
lenAMCHeader = cms.untracked.int32( 8 ),
lenAMCTrailer = cms.untracked.int32( 0 ),
lenAMC13Header = cms.untracked.int32( 8 ),
lenAMC13Trailer = cms.untracked.int32( 8 ),
debug = cms.untracked.bool( False ),
MinFeds = cms.uint32( 0 )
)
process.hltGtStage2ObjectMap = cms.EDProducer( "L1TGlobalProducer",
MuonInputTag = cms.InputTag( 'hltGtStage2Digis','Muon' ),
MuonShowerInputTag = cms.InputTag( "" ),
EGammaInputTag = cms.InputTag( 'hltGtStage2Digis','EGamma' ),
TauInputTag = cms.InputTag( 'hltGtStage2Digis','Tau' ),
JetInputTag = cms.InputTag( 'hltGtStage2Digis','Jet' ),
EtSumInputTag = cms.InputTag( 'hltGtStage2Digis','EtSum' ),
ExtInputTag = cms.InputTag( "hltGtStage2Digis" ),
AlgoBlkInputTag = cms.InputTag( "hltGtStage2Digis" ),
GetPrescaleColumnFromData = cms.bool( False ),
AlgorithmTriggersUnprescaled = cms.bool( True ),
RequireMenuToMatchAlgoBlkInput = cms.bool( True ),
AlgorithmTriggersUnmasked = cms.bool( True ),
useMuonShowers = cms.bool( False ),
ProduceL1GtDaqRecord = cms.bool( True ),
ProduceL1GtObjectMapRecord = cms.bool( True ),
EmulateBxInEvent = cms.int32( 1 ),
L1DataBxInEvent = cms.int32( 5 ),
AlternativeNrBxBoardDaq = cms.uint32( 0 ),
BstLengthBytes = cms.int32( -1 ),
PrescaleSet = cms.uint32( 1 ),
Verbosity = cms.untracked.int32( 0 ),
PrintL1Menu = cms.untracked.bool( False ),
TriggerMenuLuminosity = cms.string( "startup" ),
PrescaleCSVFile = cms.string( "prescale_L1TGlobal.csv" )
)
process.hltScalersRawToDigi = cms.EDProducer( "ScalersRawToDigi",
scalersInputTag = cms.InputTag( "rawDataCollector" )
)
process.hltOnlineMetaDataDigis = cms.EDProducer( "OnlineMetaDataRawToDigi",
onlineMetaDataInputLabel = cms.InputTag( "rawDataCollector" )
)
process.hltOnlineBeamSpot = cms.EDProducer( "BeamSpotOnlineProducer",
changeToCMSCoordinates = cms.bool( False ),
maxZ = cms.double( 40.0 ),
setSigmaZ = cms.double( 0.0 ),
beamMode = cms.untracked.uint32( 11 ),
src = cms.InputTag( "hltScalersRawToDigi" ),
gtEvmLabel = cms.InputTag( "" ),
maxRadius = cms.double( 2.0 ),
useTransientRecord = cms.bool( False )
)
process.hltL1sZeroBias = cms.EDFilter( "HLTL1TSeed",
saveTags = cms.bool( True ),
L1SeedsLogicalExpression = cms.string( "L1_ZeroBias" ),
L1ObjectMapInputTag = cms.InputTag( "hltGtStage2ObjectMap" ),
L1GlobalInputTag = cms.InputTag( "hltGtStage2Digis" ),
L1MuonInputTag = cms.InputTag( 'hltGtStage2Digis','Muon' ),
L1EGammaInputTag = cms.InputTag( 'hltGtStage2Digis','EGamma' ),
L1JetInputTag = cms.InputTag( 'hltGtStage2Digis','Jet' ),
L1TauInputTag = cms.InputTag( 'hltGtStage2Digis','Tau' ),
L1EtSumInputTag = cms.InputTag( 'hltGtStage2Digis','EtSum' )
)
process.hltPreZeroBiasBeamspot = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltEcalDigisLegacy = cms.EDProducer( "EcalRawToDigi",
tccUnpacking = cms.bool( True ),
FedLabel = cms.InputTag( "listfeds" ),
srpUnpacking = cms.bool( True ),
syncCheck = cms.bool( True ),
feIdCheck = cms.bool( True ),
silentMode = cms.untracked.bool( True ),
InputLabel = cms.InputTag( "rawDataCollector" ),
orderedFedList = cms.vint32( 601, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 616, 617, 618, 619, 620, 621, 622, 623, 624, 625, 626, 627, 628, 629, 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, 654 ),
eventPut = cms.bool( True ),
numbTriggerTSamples = cms.int32( 1 ),
numbXtalTSamples = cms.int32( 10 ),
orderedDCCIdList = cms.vint32( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54 ),
FEDs = cms.vint32( 601, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 616, 617, 618, 619, 620, 621, 622, 623, 624, 625, 626, 627, 628, 629, 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, 654 ),
DoRegional = cms.bool( False ),
feUnpacking = cms.bool( True ),
forceToKeepFRData = cms.bool( False ),
headerUnpacking = cms.bool( True ),
memUnpacking = cms.bool( True )
)
process.hltEcalDetIdToBeRecovered = cms.EDProducer( "EcalDetIdToBeRecoveredProducer",
ebIntegrityChIdErrors = cms.InputTag( 'hltEcalDigis','EcalIntegrityChIdErrors' ),
ebDetIdToBeRecovered = cms.string( "ebDetId" ),
integrityTTIdErrors = cms.InputTag( 'hltEcalDigis','EcalIntegrityTTIdErrors' ),
eeIntegrityGainErrors = cms.InputTag( 'hltEcalDigis','EcalIntegrityGainErrors' ),
ebFEToBeRecovered = cms.string( "ebFE" ),
ebIntegrityGainErrors = cms.InputTag( 'hltEcalDigis','EcalIntegrityGainErrors' ),
eeDetIdToBeRecovered = cms.string( "eeDetId" ),
eeIntegrityGainSwitchErrors = cms.InputTag( 'hltEcalDigis','EcalIntegrityGainSwitchErrors' ),
eeIntegrityChIdErrors = cms.InputTag( 'hltEcalDigis','EcalIntegrityChIdErrors' ),
ebIntegrityGainSwitchErrors = cms.InputTag( 'hltEcalDigis','EcalIntegrityGainSwitchErrors' ),
ebSrFlagCollection = cms.InputTag( "hltEcalDigis" ),
eeFEToBeRecovered = cms.string( "eeFE" ),
integrityBlockSizeErrors = cms.InputTag( 'hltEcalDigis','EcalIntegrityBlockSizeErrors' ),
eeSrFlagCollection = cms.InputTag( "hltEcalDigis" )
)
process.hltHcalDigis = cms.EDProducer( "HcalRawToDigi",
HcalFirstFED = cms.untracked.int32( 700 ),
firstSample = cms.int32( 0 ),
lastSample = cms.int32( 9 ),
FilterDataQuality = cms.bool( True ),
FEDs = cms.untracked.vint32( ),
UnpackZDC = cms.untracked.bool( True ),
UnpackCalib = cms.untracked.bool( True ),
UnpackUMNio = cms.untracked.bool( True ),
UnpackTTP = cms.untracked.bool( False ),
silent = cms.untracked.bool( True ),
saveQIE10DataNSamples = cms.untracked.vint32( ),
saveQIE10DataTags = cms.untracked.vstring( ),
saveQIE11DataNSamples = cms.untracked.vint32( ),
saveQIE11DataTags = cms.untracked.vstring( ),
ComplainEmptyData = cms.untracked.bool( False ),
UnpackerMode = cms.untracked.int32( 0 ),
ExpectedOrbitMessageTime = cms.untracked.int32( -1 ),
InputLabel = cms.InputTag( "rawDataCollector" ),
ElectronicsMap = cms.string( "" )
)
process.hltHfprereco = cms.EDProducer( "HFPreReconstructor",
digiLabel = cms.InputTag( "hltHcalDigis" ),
dropZSmarkedPassed = cms.bool( True ),
tsFromDB = cms.bool( False ),
sumAllTimeSlices = cms.bool( False ),
forceSOI = cms.int32( -1 ),
soiShift = cms.int32( 0 )
)
process.hltHfreco = cms.EDProducer( "HFPhase1Reconstructor",
inputLabel = cms.InputTag( "hltHfprereco" ),
useChannelQualityFromDB = cms.bool( False ),
checkChannelQualityForDepth3and4 = cms.bool( False ),
algorithm = cms.PSet(
tfallIfNoTDC = cms.double( -101.0 ),
triseIfNoTDC = cms.double( -100.0 ),
rejectAllFailures = cms.bool( True ),
energyWeights = cms.vdouble( 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 2.0, 0.0, 2.0, 0.0, 2.0, 0.0, 1.0 ),
soiPhase = cms.uint32( 1 ),
timeShift = cms.double( 0.0 ),
tlimits = cms.vdouble( -1000.0, 1000.0, -1000.0, 1000.0 ),
Class = cms.string( "HFFlexibleTimeCheck" )
),
algoConfigClass = cms.string( "HFPhase1PMTParams" ),
setNoiseFlags = cms.bool( True ),
runHFStripFilter = cms.bool( False ),
S9S1stat = cms.PSet(
shortEnergyParams = cms.vdouble( 35.1773, 35.37, 35.7933, 36.4472, 37.3317, 38.4468, 39.7925, 41.3688, 43.1757, 45.2132, 47.4813, 49.98, 52.7093 ),
shortETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),
long_optimumSlope = cms.vdouble( -99999.0, 0.0164905, 0.0238698, 0.0321383, 0.041296, 0.0513428, 0.0622789, 0.0741041, 0.0868186, 0.100422, 0.135313, 0.136289, 0.0589927 ),
isS8S1 = cms.bool( False ),
longETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),
longEnergyParams = cms.vdouble( 43.5, 45.7, 48.32, 51.36, 54.82, 58.7, 63.0, 67.72, 72.86, 78.42, 84.4, 90.8, 97.62 ),
short_optimumSlope = cms.vdouble( -99999.0, 0.0164905, 0.0238698, 0.0321383, 0.041296, 0.0513428, 0.0622789, 0.0741041, 0.0868186, 0.100422, 0.135313, 0.136289, 0.0589927 ),
HcalAcceptSeverityLevel = cms.int32( 9 )
),
S8S1stat = cms.PSet(
shortEnergyParams = cms.vdouble( 40.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0 ),
shortETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),
long_optimumSlope = cms.vdouble( 0.3, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1 ),
isS8S1 = cms.bool( True ),
longETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),
longEnergyParams = cms.vdouble( 40.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0 ),
short_optimumSlope = cms.vdouble( 0.3, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1 ),
HcalAcceptSeverityLevel = cms.int32( 9 )
),
PETstat = cms.PSet(
shortEnergyParams = cms.vdouble( 35.1773, 35.37, 35.7933, 36.4472, 37.3317, 38.4468, 39.7925, 41.3688, 43.1757, 45.2132, 47.4813, 49.98, 52.7093 ),
shortETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),
long_R_29 = cms.vdouble( 0.8 ),
longETParams = cms.vdouble( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ),
longEnergyParams = cms.vdouble( 43.5, 45.7, 48.32, 51.36, 54.82, 58.7, 63.0, 67.72, 72.86, 78.42, 84.4, 90.8, 97.62 ),
short_R_29 = cms.vdouble( 0.8 ),
long_R = cms.vdouble( 0.98 ),
short_R = cms.vdouble( 0.8 ),
HcalAcceptSeverityLevel = cms.int32( 9 )
),
HFStripFilter = cms.PSet(
timeMax = cms.double( 6.0 ),
seedHitIetaMax = cms.int32( 35 ),
gap = cms.int32( 2 ),
verboseLevel = cms.untracked.int32( 10 ),
wedgeCut = cms.double( 0.05 ),
stripThreshold = cms.double( 40.0 ),
maxStripTime = cms.double( 10.0 ),
maxThreshold = cms.double( 100.0 ),
lstrips = cms.int32( 2 )
)
)
process.hltHoreco = cms.EDProducer( "HcalHitReconstructor",
correctForPhaseContainment = cms.bool( True ),
correctionPhaseNS = cms.double( 13.0 ),
digiLabel = cms.InputTag( "hltHcalDigis" ),
Subdetector = cms.string( "HO" ),
correctForTimeslew = cms.bool( True ),
dropZSmarkedPassed = cms.bool( True ),
firstSample = cms.int32( 4 ),
samplesToAdd = cms.int32( 4 ),
tsFromDB = cms.bool( True ),
recoParamsFromDB = cms.bool( True ),
useLeakCorrection = cms.bool( False ),
dataOOTCorrectionName = cms.string( "" ),
dataOOTCorrectionCategory = cms.string( "Data" ),
mcOOTCorrectionName = cms.string( "" ),
mcOOTCorrectionCategory = cms.string( "MC" ),
correctTiming = cms.bool( False ),
firstAuxTS = cms.int32( 4 ),
setNoiseFlags = cms.bool( False ),
digiTimeFromDB = cms.bool( True ),
setHSCPFlags = cms.bool( False ),
setSaturationFlags = cms.bool( False ),
setTimingTrustFlags = cms.bool( False ),
setPulseShapeFlags = cms.bool( False ),
setNegativeFlags = cms.bool( False ),
digistat = cms.PSet( ),
HFInWindowStat = cms.PSet( ),
S9S1stat = cms.PSet( ),
S8S1stat = cms.PSet( ),
PETstat = cms.PSet( ),
saturationParameters = cms.PSet( maxADCvalue = cms.int32( 127 ) ),
hfTimingTrustParameters = cms.PSet( )
)
process.hltTowerMakerForAll = cms.EDProducer( "CaloTowersCreator",
EBSumThreshold = cms.double( 0.2 ),
HF2Weight = cms.double( 1.0 ),
EBWeight = cms.double( 1.0 ),
hfInput = cms.InputTag( "hltHfreco" ),
EESumThreshold = cms.double( 0.45 ),
HOThreshold0 = cms.double( 3.5 ),
HOThresholdPlus1 = cms.double( 3.5 ),
HOThresholdMinus1 = cms.double( 3.5 ),
HOThresholdPlus2 = cms.double( 3.5 ),
HOThresholdMinus2 = cms.double( 3.5 ),
HBGrid = cms.vdouble( ),
HBThreshold1 = cms.double( 0.1 ),
HBThreshold2 = cms.double( 0.2 ),
HBThreshold = cms.double( 0.3 ),
EEWeights = cms.vdouble( ),
HF1Threshold = cms.double( 0.5 ),
HF2Weights = cms.vdouble( ),
HOWeights = cms.vdouble( ),
EEGrid = cms.vdouble( ),
HEDWeight = cms.double( 1.0 ),
EEWeight = cms.double( 1.0 ),
UseHO = cms.bool( False ),
HBWeights = cms.vdouble( ),
HESWeight = cms.double( 1.0 ),
HF1Weight = cms.double( 1.0 ),
HF2Grid = cms.vdouble( ),
HEDWeights = cms.vdouble( ),
HF1Grid = cms.vdouble( ),
EBWeights = cms.vdouble( ),
HOWeight = cms.double( 1.0E-99 ),
EBThreshold = cms.double( 0.07 ),
EEThreshold = cms.double( 0.3 ),
UseEtEBTreshold = cms.bool( False ),
UseSymEBTreshold = cms.bool( False ),
UseEtEETreshold = cms.bool( False ),
UseSymEETreshold = cms.bool( False ),
hbheInput = cms.InputTag( "hltHbhereco" ),
HcalThreshold = cms.double( -1000.0 ),
HF2Threshold = cms.double( 0.85 ),
HESThreshold1 = cms.double( 0.1 ),
HESThreshold = cms.double( 0.2 ),
HF1Weights = cms.vdouble( ),
hoInput = cms.InputTag( "hltHoreco" ),
HESGrid = cms.vdouble( ),
HESWeights = cms.vdouble( ),
HEDThreshold1 = cms.double( 0.1 ),
HEDThreshold = cms.double( 0.2 ),
EcutTower = cms.double( -1000.0 ),
HEDGrid = cms.vdouble( ),
ecalInputs = cms.VInputTag( 'hltEcalRecHit:EcalRecHitsEB','hltEcalRecHit:EcalRecHitsEE' ),
HBWeight = cms.double( 1.0 ),
HOGrid = cms.vdouble( ),
EBGrid = cms.vdouble( ),
MomConstrMethod = cms.int32( 1 ),
MomHBDepth = cms.double( 0.2 ),
MomHEDepth = cms.double( 0.4 ),
MomEBDepth = cms.double( 0.3 ),
MomEEDepth = cms.double( 0.0 ),
HcalAcceptSeverityLevel = cms.uint32( 9 ),
EcalRecHitSeveritiesToBeExcluded = cms.vstring( 'kTime',
'kWeird',
'kBad' ),
UseHcalRecoveredHits = cms.bool( False ),
UseEcalRecoveredHits = cms.bool( False ),
UseRejectedHitsOnly = cms.bool( False ),
HcalAcceptSeverityLevelForRejectedHit = cms.uint32( 9999 ),
EcalSeveritiesToBeUsedInBadTowers = cms.vstring( ),
UseRejectedRecoveredHcalHits = cms.bool( False ),
UseRejectedRecoveredEcalHits = cms.bool( False ),
missingHcalRescaleFactorForEcal = cms.double( 0.0 ),
AllowMissingInputs = cms.bool( False ),
HcalPhase = cms.int32( 1 )
)
process.hltAK4CaloJetsPF = cms.EDProducer( "FastjetJetProducer",
useMassDropTagger = cms.bool( False ),
useFiltering = cms.bool( False ),
useDynamicFiltering = cms.bool( False ),
useTrimming = cms.bool( False ),
usePruning = cms.bool( False ),
useCMSBoostedTauSeedingAlgorithm = cms.bool( False ),
useKtPruning = cms.bool( False ),
useConstituentSubtraction = cms.bool( False ),
useSoftDrop = cms.bool( False ),
correctShape = cms.bool( False ),
UseOnlyVertexTracks = cms.bool( False ),
UseOnlyOnePV = cms.bool( False ),
muCut = cms.double( -1.0 ),
yCut = cms.double( -1.0 ),
rFilt = cms.double( -1.0 ),
rFiltFactor = cms.double( -1.0 ),
trimPtFracMin = cms.double( -1.0 ),
zcut = cms.double( -1.0 ),
rcut_factor = cms.double( -1.0 ),
csRho_EtaMax = cms.double( -1.0 ),
csRParam = cms.double( -1.0 ),
beta = cms.double( -1.0 ),
R0 = cms.double( -1.0 ),
gridMaxRapidity = cms.double( -1.0 ),
gridSpacing = cms.double( -1.0 ),
DzTrVtxMax = cms.double( 0.0 ),
DxyTrVtxMax = cms.double( 0.0 ),
MaxVtxZ = cms.double( 15.0 ),
subjetPtMin = cms.double( -1.0 ),
muMin = cms.double( -1.0 ),
muMax = cms.double( -1.0 ),
yMin = cms.double( -1.0 ),
yMax = cms.double( -1.0 ),
dRMin = cms.double( -1.0 ),
dRMax = cms.double( -1.0 ),
maxDepth = cms.int32( -1 ),
nFilt = cms.int32( -1 ),
MinVtxNdof = cms.int32( 5 ),
src = cms.InputTag( "hltTowerMakerForAll" ),
srcPVs = cms.InputTag( "NotUsed" ),
jetType = cms.string( "CaloJet" ),
jetAlgorithm = cms.string( "AntiKt" ),
rParam = cms.double( 0.4 ),
inputEtMin = cms.double( 0.3 ),
inputEMin = cms.double( 0.0 ),
jetPtMin = cms.double( 1.0 ),
doPVCorrection = cms.bool( False ),
doAreaFastjet = cms.bool( False ),
doRhoFastjet = cms.bool( False ),
doPUOffsetCorr = cms.bool( False ),
puPtMin = cms.double( 10.0 ),
nSigmaPU = cms.double( 1.0 ),
radiusPU = cms.double( 0.4 ),
subtractorName = cms.string( "" ),
useExplicitGhosts = cms.bool( False ),
doAreaDiskApprox = cms.bool( False ),
voronoiRfact = cms.double( -9.0 ),
Rho_EtaMax = cms.double( 4.4 ),
Ghost_EtaMax = cms.double( 6.0 ),
Active_Area_Repeats = cms.int32( 5 ),
GhostArea = cms.double( 0.01 ),
restrictInputs = cms.bool( False ),
maxInputs = cms.uint32( 1 ),
writeCompound = cms.bool( False ),
writeJetsWithConst = cms.bool( False ),
doFastJetNonUniform = cms.bool( False ),
useDeterministicSeed = cms.bool( True ),
minSeed = cms.uint32( 0 ),
verbosity = cms.int32( 0 ),
puWidth = cms.double( 0.0 ),
nExclude = cms.uint32( 0 ),
maxBadEcalCells = cms.uint32( 9999999 ),
maxBadHcalCells = cms.uint32( 9999999 ),
maxProblematicEcalCells = cms.uint32( 9999999 ),
maxProblematicHcalCells = cms.uint32( 9999999 ),
maxRecoveredEcalCells = cms.uint32( 9999999 ),
maxRecoveredHcalCells = cms.uint32( 9999999 ),
puCenters = cms.vdouble( ),
applyWeight = cms.bool( False ),
srcWeights = cms.InputTag( "" ),
minimumTowersFraction = cms.double( 0.0 ),
jetCollInstanceName = cms.string( "" ),
sumRecHits = cms.bool( False )
)
process.hltAK4CaloJetsPFEt5 = cms.EDFilter( "EtMinCaloJetSelector",
src = cms.InputTag( "hltAK4CaloJetsPF" ),
filter = cms.bool( False ),
etMin = cms.double( 5.0 )
)
process.hltMuonDTDigis = cms.EDProducer( "DTuROSRawToDigi",
inputLabel = cms.InputTag( "rawDataCollector" ),
debug = cms.untracked.bool( False )
)
process.hltDt1DRecHits = cms.EDProducer( "DTRecHitProducer",
recAlgoConfig = cms.PSet(
maxTime = cms.double( 420.0 ),
debug = cms.untracked.bool( False ),
stepTwoFromDigi = cms.bool( False ),
tTrigModeConfig = cms.PSet(
debug = cms.untracked.bool( False ),
tofCorrType = cms.int32( 0 ),
tTrigLabel = cms.string( "" ),
wirePropCorrType = cms.int32( 0 ),
doTOFCorrection = cms.bool( True ),
vPropWire = cms.double( 24.4 ),
doT0Correction = cms.bool( True ),
doWirePropCorrection = cms.bool( True ),
t0Label = cms.string( "" )
),
useUncertDB = cms.bool( True ),
doVdriftCorr = cms.bool( True ),
minTime = cms.double( -3.0 ),
tTrigMode = cms.string( "DTTTrigSyncFromDB" ),
readLegacyTTrigDB = cms.bool( True ),
readLegacyVDriftDB = cms.bool( True )
),
recAlgo = cms.string( "DTLinearDriftFromDBAlgo" ),
debug = cms.untracked.bool( False ),
dtDigiLabel = cms.InputTag( "hltMuonDTDigis" )
)
process.hltDt4DSegments = cms.EDProducer( "DTRecSegment4DProducer",
Reco4DAlgoName = cms.string( "DTCombinatorialPatternReco4D" ),
Reco4DAlgoConfig = cms.PSet(
Reco2DAlgoConfig = cms.PSet(
AlphaMaxPhi = cms.double( 1.0 ),
debug = cms.untracked.bool( False ),
segmCleanerMode = cms.int32( 2 ),
AlphaMaxTheta = cms.double( 0.9 ),
hit_afterT0_resolution = cms.double( 0.03 ),
performT0_vdriftSegCorrection = cms.bool( False ),
recAlgo = cms.string( "DTLinearDriftFromDBAlgo" ),
recAlgoConfig = cms.PSet(
maxTime = cms.double( 420.0 ),
debug = cms.untracked.bool( False ),
stepTwoFromDigi = cms.bool( False ),
tTrigModeConfig = cms.PSet(
debug = cms.untracked.bool( False ),
tofCorrType = cms.int32( 0 ),
tTrigLabel = cms.string( "" ),
wirePropCorrType = cms.int32( 0 ),
doTOFCorrection = cms.bool( True ),
vPropWire = cms.double( 24.4 ),
doT0Correction = cms.bool( True ),
doWirePropCorrection = cms.bool( True ),
t0Label = cms.string( "" )
),
useUncertDB = cms.bool( True ),
doVdriftCorr = cms.bool( True ),
minTime = cms.double( -3.0 ),
tTrigMode = cms.string( "DTTTrigSyncFromDB" ),
readLegacyTTrigDB = cms.bool( True ),
readLegacyVDriftDB = cms.bool( True )
),
MaxAllowedHits = cms.uint32( 50 ),
nUnSharedHitsMin = cms.int32( 2 ),
nSharedHitsMax = cms.int32( 2 ),
performT0SegCorrection = cms.bool( False ),
perform_delta_rejecting = cms.bool( False )
),
Reco2DAlgoName = cms.string( "DTCombinatorialPatternReco" ),
debug = cms.untracked.bool( False ),
segmCleanerMode = cms.int32( 2 ),
AllDTRecHits = cms.bool( True ),
hit_afterT0_resolution = cms.double( 0.03 ),
performT0_vdriftSegCorrection = cms.bool( False ),
recAlgo = cms.string( "DTLinearDriftFromDBAlgo" ),
recAlgoConfig = cms.PSet(
maxTime = cms.double( 420.0 ),
debug = cms.untracked.bool( False ),
stepTwoFromDigi = cms.bool( False ),
tTrigModeConfig = cms.PSet(
debug = cms.untracked.bool( False ),
tofCorrType = cms.int32( 0 ),
tTrigLabel = cms.string( "" ),
wirePropCorrType = cms.int32( 0 ),
doTOFCorrection = cms.bool( True ),
vPropWire = cms.double( 24.4 ),
doT0Correction = cms.bool( True ),
doWirePropCorrection = cms.bool( True ),
t0Label = cms.string( "" )
),
useUncertDB = cms.bool( True ),
doVdriftCorr = cms.bool( True ),
minTime = cms.double( -3.0 ),
tTrigMode = cms.string( "DTTTrigSyncFromDB" ),
readLegacyTTrigDB = cms.bool( True ),
readLegacyVDriftDB = cms.bool( True )
),
nUnSharedHitsMin = cms.int32( 2 ),
nSharedHitsMax = cms.int32( 2 ),
performT0SegCorrection = cms.bool( False ),
perform_delta_rejecting = cms.bool( False )
),
debug = cms.untracked.bool( False ),
recHits1DLabel = cms.InputTag( "hltDt1DRecHits" ),
recHits2DLabel = cms.InputTag( "dt2DSegments" )
)
process.hltMuonCSCDigis = cms.EDProducer( "CSCDCCUnpacker",
InputObjects = cms.InputTag( "rawDataCollector" ),
UseExaminer = cms.bool( True ),
ExaminerMask = cms.uint32( 535558134 ),
UseSelectiveUnpacking = cms.bool( True ),
ErrorMask = cms.uint32( 0 ),
UnpackStatusDigis = cms.bool( False ),
UseFormatStatus = cms.bool( True ),
useRPCs = cms.bool( False ),
useGEMs = cms.bool( False ),
useCSCShowers = cms.bool( False ),
Debug = cms.untracked.bool( False ),
PrintEventNumber = cms.untracked.bool( False ),
runDQM = cms.untracked.bool( False ),
VisualFEDInspect = cms.untracked.bool( False ),
VisualFEDShort = cms.untracked.bool( False ),
FormatedEventDump = cms.untracked.bool( False ),
SuppressZeroLCT = cms.untracked.bool( True ),
DisableMappingCheck = cms.untracked.bool( False ),
B904Setup = cms.untracked.bool( False )
)
process.hltCsc2DRecHits = cms.EDProducer( "CSCRecHitDProducer",
CSCStripPeakThreshold = cms.double( 10.0 ),
CSCStripClusterChargeCut = cms.double( 25.0 ),
CSCStripxtalksOffset = cms.double( 0.03 ),
UseAverageTime = cms.bool( False ),
UseParabolaFit = cms.bool( False ),
UseFivePoleFit = cms.bool( True ),
CSCWireClusterDeltaT = cms.int32( 1 ),
CSCUseCalibrations = cms.bool( True ),
CSCUseStaticPedestals = cms.bool( False ),
CSCNoOfTimeBinsForDynamicPedestal = cms.int32( 2 ),
wireDigiTag = cms.InputTag( 'hltMuonCSCDigis','MuonCSCWireDigi' ),
stripDigiTag = cms.InputTag( 'hltMuonCSCDigis','MuonCSCStripDigi' ),
readBadChannels = cms.bool( False ),
readBadChambers = cms.bool( True ),
CSCUseTimingCorrections = cms.bool( True ),
CSCUseGasGainCorrections = cms.bool( False ),
CSCDebug = cms.untracked.bool( False ),
CSCstripWireDeltaTime = cms.int32( 8 ),
XTasymmetry_ME1a = cms.double( 0.0 ),
XTasymmetry_ME1b = cms.double( 0.0 ),
XTasymmetry_ME12 = cms.double( 0.0 ),
XTasymmetry_ME13 = cms.double( 0.0 ),
XTasymmetry_ME21 = cms.double( 0.0 ),
XTasymmetry_ME22 = cms.double( 0.0 ),
XTasymmetry_ME31 = cms.double( 0.0 ),
XTasymmetry_ME32 = cms.double( 0.0 ),
XTasymmetry_ME41 = cms.double( 0.0 ),
ConstSyst_ME1a = cms.double( 0.022 ),
ConstSyst_ME1b = cms.double( 0.007 ),
ConstSyst_ME12 = cms.double( 0.0 ),
ConstSyst_ME13 = cms.double( 0.0 ),
ConstSyst_ME21 = cms.double( 0.0 ),
ConstSyst_ME22 = cms.double( 0.0 ),
ConstSyst_ME31 = cms.double( 0.0 ),
ConstSyst_ME32 = cms.double( 0.0 ),
ConstSyst_ME41 = cms.double( 0.0 ),
NoiseLevel_ME1a = cms.double( 7.0 ),
NoiseLevel_ME1b = cms.double( 8.0 ),
NoiseLevel_ME12 = cms.double( 9.0 ),
NoiseLevel_ME13 = cms.double( 8.0 ),
NoiseLevel_ME21 = cms.double( 9.0 ),
NoiseLevel_ME22 = cms.double( 9.0 ),
NoiseLevel_ME31 = cms.double( 9.0 ),
NoiseLevel_ME32 = cms.double( 9.0 ),
NoiseLevel_ME41 = cms.double( 9.0 ),
CSCUseReducedWireTimeWindow = cms.bool( False ),
CSCWireTimeWindowLow = cms.int32( 0 ),
CSCWireTimeWindowHigh = cms.int32( 15 )
)
process.hltCscSegments = cms.EDProducer( "CSCSegmentProducer",
inputObjects = cms.InputTag( "hltCsc2DRecHits" ),
algo_type = cms.int32( 1 ),
algo_psets = cms.VPSet(
cms.PSet( parameters_per_chamber_type = cms.vint32( 1, 2, 3, 4, 5, 6, 5, 6, 5, 6 ),
algo_psets = cms.VPSet(
cms.PSet( wideSeg = cms.double( 3.0 ),
chi2Norm_2D_ = cms.double( 35.0 ),
dRIntMax = cms.double( 2.0 ),
doCollisions = cms.bool( True ),
dPhiMax = cms.double( 0.006 ),
dRMax = cms.double( 1.5 ),
dPhiIntMax = cms.double( 0.005 ),
minLayersApart = cms.int32( 1 ),
chi2Max = cms.double( 100.0 ),
chi2_str = cms.double( 50.0 ),
enlarge = cms.bool( False )
),
cms.PSet( wideSeg = cms.double( 3.0 ),
chi2Norm_2D_ = cms.double( 35.0 ),
dRIntMax = cms.double( 2.0 ),
doCollisions = cms.bool( True ),
dPhiMax = cms.double( 0.005 ),
dRMax = cms.double( 1.5 ),
dPhiIntMax = cms.double( 0.004 ),
minLayersApart = cms.int32( 1 ),
chi2Max = cms.double( 100.0 ),
chi2_str = cms.double( 50.0 ),
enlarge = cms.bool( False )
),
cms.PSet( wideSeg = cms.double( 3.0 ),
chi2Norm_2D_ = cms.double( 35.0 ),
dRIntMax = cms.double( 2.0 ),
doCollisions = cms.bool( True ),
dPhiMax = cms.double( 0.004 ),
dRMax = cms.double( 1.5 ),
dPhiIntMax = cms.double( 0.003 ),
minLayersApart = cms.int32( 1 ),
chi2Max = cms.double( 100.0 ),
chi2_str = cms.double( 50.0 ),
enlarge = cms.bool( False )
),
cms.PSet( wideSeg = cms.double( 3.0 ),
chi2Norm_2D_ = cms.double( 20.0 ),
dRIntMax = cms.double( 2.0 ),
doCollisions = cms.bool( True ),
dPhiMax = cms.double( 0.003 ),
dRMax = cms.double( 1.5 ),
dPhiIntMax = cms.double( 0.002 ),
minLayersApart = cms.int32( 1 ),
chi2Max = cms.double( 60.0 ),
chi2_str = cms.double( 30.0 ),
enlarge = cms.bool( False )
),
cms.PSet( wideSeg = cms.double( 3.0 ),
chi2Norm_2D_ = cms.double( 60.0 ),
dRIntMax = cms.double( 2.0 ),
doCollisions = cms.bool( True ),
dPhiMax = cms.double( 0.007 ),
dRMax = cms.double( 1.5 ),
dPhiIntMax = cms.double( 0.005 ),
minLayersApart = cms.int32( 1 ),
chi2Max = cms.double( 180.0 ),
chi2_str = cms.double( 80.0 ),
enlarge = cms.bool( False )
),
cms.PSet( wideSeg = cms.double( 3.0 ),
chi2Norm_2D_ = cms.double( 35.0 ),
dRIntMax = cms.double( 2.0 ),
doCollisions = cms.bool( True ),
dPhiMax = cms.double( 0.006 ),
dRMax = cms.double( 1.5 ),
dPhiIntMax = cms.double( 0.004 ),
minLayersApart = cms.int32( 1 ),
chi2Max = cms.double( 100.0 ),
chi2_str = cms.double( 50.0 ),
enlarge = cms.bool( False )
)
),
algo_name = cms.string( "CSCSegAlgoRU" ),
chamber_types = cms.vstring( 'ME1/a',
'ME1/b',
'ME1/2',
'ME1/3',
'ME2/1',
'ME2/2',
'ME3/1',
'ME3/2',
'ME4/1',
'ME4/2' )
)
)
)
process.hltMuonRPCDigis = cms.EDProducer( "RPCUnpackingModule",
InputLabel = cms.InputTag( "rawDataCollector" ),
doSynchro = cms.bool( False )
)
process.hltRpcRecHits = cms.EDProducer( "RPCRecHitProducer",
recAlgoConfig = cms.PSet( ),
recAlgo = cms.string( "RPCRecHitStandardAlgo" ),
rpcDigiLabel = cms.InputTag( "hltMuonRPCDigis" ),
maskSource = cms.string( "File" ),
maskvecfile = cms.FileInPath( "RecoLocalMuon/RPCRecHit/data/RPCMaskVec.dat" ),
deadSource = cms.string( "File" ),
deadvecfile = cms.FileInPath( "RecoLocalMuon/RPCRecHit/data/RPCDeadVec.dat" )
)
process.hltMuonGEMDigis = cms.EDProducer( "GEMRawToDigiModule",
InputLabel = cms.InputTag( "rawDataCollector" ),
useDBEMap = cms.bool( False ),
keepDAQStatus = cms.bool( False ),
readMultiBX = cms.bool( False ),
fedIdStart = cms.uint32( 1467 ),
fedIdEnd = cms.uint32( 1478 )
)
process.hltGemRecHits = cms.EDProducer( "GEMRecHitProducer",
recAlgoConfig = cms.PSet( ),
recAlgo = cms.string( "GEMRecHitStandardAlgo" ),
gemDigiLabel = cms.InputTag( "hltMuonGEMDigis" ),
applyMasking = cms.bool( False )
)
process.hltGemSegments = cms.EDProducer( "GEMSegmentProducer",
gemRecHitLabel = cms.InputTag( "hltGemRecHits" ),
ge0_name = cms.string( "GE0SegAlgoRU" ),
algo_name = cms.string( "GEMSegmentAlgorithm" ),
ge0_pset = cms.PSet(
maxChi2GoodSeg = cms.double( 50.0 ),
maxChi2Prune = cms.double( 50.0 ),
maxNumberOfHitsPerLayer = cms.uint32( 100 ),
maxETASeeds = cms.double( 0.1 ),
maxPhiAdditional = cms.double( 0.001096605744 ),
minNumberOfHits = cms.uint32( 4 ),
doCollisions = cms.bool( True ),
maxPhiSeeds = cms.double( 0.001096605744 ),
requireCentralBX = cms.bool( True ),
maxChi2Additional = cms.double( 100.0 ),
allowWideSegments = cms.bool( True ),
maxNumberOfHits = cms.uint32( 300 ),
maxTOFDiff = cms.double( 25.0 )
),
algo_pset = cms.PSet(
dYclusBoxMax = cms.double( 5.0 ),
dXclusBoxMax = cms.double( 1.0 ),
maxRecHitsInCluster = cms.int32( 4 ),
preClustering = cms.bool( True ),
preClusteringUseChaining = cms.bool( True ),
dEtaChainBoxMax = cms.double( 0.05 ),
clusterOnlySameBXRecHits = cms.bool( True ),
minHitsPerSegment = cms.uint32( 2 ),
dPhiChainBoxMax = cms.double( 0.02 )
)
)
process.hltL2OfflineMuonSeeds = cms.EDProducer( "MuonSeedGenerator",
beamSpotTag = cms.InputTag( "hltOnlineBeamSpot" ),
scaleDT = cms.bool( True ),
CSCRecSegmentLabel = cms.InputTag( "hltCscSegments" ),
DTRecSegmentLabel = cms.InputTag( "hltDt4DSegments" ),
ME0RecSegmentLabel = cms.InputTag( "me0Segments" ),
EnableDTMeasurement = cms.bool( True ),
EnableCSCMeasurement = cms.bool( True ),
EnableME0Measurement = cms.bool( False ),
crackEtas = cms.vdouble( 0.2, 1.6, 1.7 ),
crackWindow = cms.double( 0.04 ),
deltaPhiSearchWindow = cms.double( 0.25 ),
deltaEtaSearchWindow = cms.double( 0.2 ),
deltaEtaCrackSearchWindow = cms.double( 0.25 ),
CSC_01 = cms.vdouble( 0.166, 0.0, 0.0, 0.031, 0.0, 0.0 ),
CSC_12 = cms.vdouble( -0.161, 0.254, -0.047, 0.042, -0.007, 0.0 ),
CSC_02 = cms.vdouble( 0.612, -0.207, 0.0, 0.067, -0.001, 0.0 ),
CSC_13 = cms.vdouble( 0.901, -1.302, 0.533, 0.045, 0.005, 0.0 ),
CSC_03 = cms.vdouble( 0.787, -0.338, 0.029, 0.101, -0.008, 0.0 ),
CSC_14 = cms.vdouble( 0.606, -0.181, -0.002, 0.111, -0.003, 0.0 ),
CSC_23 = cms.vdouble( -0.081, 0.113, -0.029, 0.015, 0.008, 0.0 ),
CSC_24 = cms.vdouble( 0.004, 0.021, -0.002, 0.053, 0.0, 0.0 ),
CSC_34 = cms.vdouble( 0.062, -0.067, 0.019, 0.021, 0.003, 0.0 ),
DT_12 = cms.vdouble( 0.183, 0.054, -0.087, 0.028, 0.002, 0.0 ),
DT_13 = cms.vdouble( 0.315, 0.068, -0.127, 0.051, -0.002, 0.0 ),
DT_14 = cms.vdouble( 0.359, 0.052, -0.107, 0.072, -0.004, 0.0 ),
DT_23 = cms.vdouble( 0.13, 0.023, -0.057, 0.028, 0.004, 0.0 ),
DT_24 = cms.vdouble( 0.176, 0.014, -0.051, 0.051, 0.003, 0.0 ),
DT_34 = cms.vdouble( 0.044, 0.004, -0.013, 0.029, 0.003, 0.0 ),
OL_1213 = cms.vdouble( 0.96, -0.737, 0.0, 0.052, 0.0, 0.0 ),
OL_1222 = cms.vdouble( 0.848, -0.591, 0.0, 0.062, 0.0, 0.0 ),
OL_1232 = cms.vdouble( 0.184, 0.0, 0.0, 0.066, 0.0, 0.0 ),
OL_2213 = cms.vdouble( 0.117, 0.0, 0.0, 0.044, 0.0, 0.0 ),
OL_2222 = cms.vdouble( 0.107, 0.0, 0.0, 0.04, 0.0, 0.0 ),
SME_11 = cms.vdouble( 3.295, -1.527, 0.112, 0.378, 0.02, 0.0 ),
SME_12 = cms.vdouble( 0.102, 0.599, 0.0, 0.38, 0.0, 0.0 ),
SME_13 = cms.vdouble( -1.286, 1.711, 0.0, 0.356, 0.0, 0.0 ),
SME_21 = cms.vdouble( -0.529, 1.194, -0.358, 0.472, 0.086, 0.0 ),
SME_22 = cms.vdouble( -1.207, 1.491, -0.251, 0.189, 0.243, 0.0 ),
SME_31 = cms.vdouble( -1.594, 1.482, -0.317, 0.487, 0.097, 0.0 ),
SME_32 = cms.vdouble( -0.901, 1.333, -0.47, 0.41, 0.073, 0.0 ),
SME_41 = cms.vdouble( -0.003, 0.005, 0.005, 0.608, 0.076, 0.0 ),
SME_42 = cms.vdouble( -0.003, 0.005, 0.005, 0.608, 0.076, 0.0 ),
SMB_10 = cms.vdouble( 1.387, -0.038, 0.0, 0.19, 0.0, 0.0 ),
SMB_11 = cms.vdouble( 1.247, 0.72, -0.802, 0.229, -0.075, 0.0 ),
SMB_12 = cms.vdouble( 2.128, -0.956, 0.0, 0.199, 0.0, 0.0 ),
SMB_20 = cms.vdouble( 1.011, -0.052, 0.0, 0.188, 0.0, 0.0 ),
SMB_21 = cms.vdouble( 1.043, -0.124, 0.0, 0.183, 0.0, 0.0 ),
SMB_22 = cms.vdouble( 1.474, -0.758, 0.0, 0.185, 0.0, 0.0 ),
SMB_30 = cms.vdouble( 0.505, -0.022, 0.0, 0.215, 0.0, 0.0 ),
SMB_31 = cms.vdouble( 0.549, -0.145, 0.0, 0.207, 0.0, 0.0 ),
SMB_32 = cms.vdouble( 0.67, -0.327, 0.0, 0.22, 0.0, 0.0 ),
CSC_01_1_scale = cms.vdouble( -1.915329, 0.0 ),
CSC_12_1_scale = cms.vdouble( -6.434242, 0.0 ),
CSC_12_2_scale = cms.vdouble( -1.63622, 0.0 ),
CSC_12_3_scale = cms.vdouble( -1.63622, 0.0 ),
CSC_13_2_scale = cms.vdouble( -6.077936, 0.0 ),
CSC_13_3_scale = cms.vdouble( -1.701268, 0.0 ),
CSC_14_3_scale = cms.vdouble( -1.969563, 0.0 ),
CSC_23_1_scale = cms.vdouble( -19.084285, 0.0 ),
CSC_23_2_scale = cms.vdouble( -6.079917, 0.0 ),
CSC_24_1_scale = cms.vdouble( -6.055701, 0.0 ),
CSC_34_1_scale = cms.vdouble( -11.520507, 0.0 ),
OL_1213_0_scale = cms.vdouble( -4.488158, 0.0 ),
OL_1222_0_scale = cms.vdouble( -5.810449, 0.0 ),
OL_1232_0_scale = cms.vdouble( -5.964634, 0.0 ),
OL_2213_0_scale = cms.vdouble( -7.239789, 0.0 ),
OL_2222_0_scale = cms.vdouble( -7.667231, 0.0 ),
DT_12_1_scale = cms.vdouble( -3.692398, 0.0 ),
DT_12_2_scale = cms.vdouble( -3.518165, 0.0 ),
DT_13_1_scale = cms.vdouble( -4.520923, 0.0 ),
DT_13_2_scale = cms.vdouble( -4.257687, 0.0 ),
DT_14_1_scale = cms.vdouble( -5.644816, 0.0 ),
DT_14_2_scale = cms.vdouble( -4.808546, 0.0 ),
DT_23_1_scale = cms.vdouble( -5.320346, 0.0 ),
DT_23_2_scale = cms.vdouble( -5.117625, 0.0 ),
DT_24_1_scale = cms.vdouble( -7.490909, 0.0 ),
DT_24_2_scale = cms.vdouble( -6.63094, 0.0 ),
DT_34_1_scale = cms.vdouble( -13.783765, 0.0 ),
DT_34_2_scale = cms.vdouble( -11.901897, 0.0 ),
SMB_10_0_scale = cms.vdouble( 2.448566, 0.0 ),
SMB_11_0_scale = cms.vdouble( 2.56363, 0.0 ),
SMB_12_0_scale = cms.vdouble( 2.283221, 0.0 ),
SMB_20_0_scale = cms.vdouble( 1.486168, 0.0 ),
SMB_21_0_scale = cms.vdouble( 1.58384, 0.0 ),
SMB_22_0_scale = cms.vdouble( 1.346681, 0.0 ),
SMB_30_0_scale = cms.vdouble( -3.629838, 0.0 ),
SMB_31_0_scale = cms.vdouble( -3.323768, 0.0 ),
SMB_32_0_scale = cms.vdouble( -3.054156, 0.0 ),
SME_11_0_scale = cms.vdouble( 1.325085, 0.0 ),
SME_12_0_scale = cms.vdouble( 2.279181, 0.0 ),
SME_13_0_scale = cms.vdouble( 0.104905, 0.0 ),
SME_21_0_scale = cms.vdouble( -0.040862, 0.0 ),
SME_22_0_scale = cms.vdouble( -3.457901, 0.0 )
)
process.hltL2MuonSeeds = cms.EDProducer( "L2MuonSeedGeneratorFromL1T",
GMTReadoutCollection = cms.InputTag( "" ),
InputObjects = cms.InputTag( 'hltGtStage2Digis','Muon' ),
Propagator = cms.string( "SteppingHelixPropagatorAny" ),
L1MinPt = cms.double( 0.0 ),
L1MaxEta = cms.double( 2.5 ),
L1MinQuality = cms.uint32( 7 ),
SetMinPtBarrelTo = cms.double( 3.5 ),
SetMinPtEndcapTo = cms.double( 1.0 ),
UseOfflineSeed = cms.untracked.bool( True ),
UseUnassociatedL1 = cms.bool( False ),
MatchDR = cms.vdouble( 0.3 ),
EtaMatchingBins = cms.vdouble( 0.0, 2.5 ),
CentralBxOnly = cms.bool( True ),
MatchType = cms.uint32( 0 ),
SortType = cms.uint32( 0 ),
OfflineSeedLabel = cms.untracked.InputTag( "hltL2OfflineMuonSeeds" ),
ServiceParameters = cms.PSet(
RPCLayers = cms.bool( True ),
UseMuonNavigation = cms.untracked.bool( True ),
Propagators = cms.untracked.vstring( 'SteppingHelixPropagatorAny' )
)
)
process.hltL2Muons = cms.EDProducer( "L2MuonProducer",
ServiceParameters = cms.PSet(
RPCLayers = cms.bool( True ),
UseMuonNavigation = cms.untracked.bool( True ),
Propagators = cms.untracked.vstring( 'hltESPFastSteppingHelixPropagatorAny',
'hltESPFastSteppingHelixPropagatorOpposite' )
),
InputObjects = cms.InputTag( "hltL2MuonSeeds" ),
SeedTransformerParameters = cms.PSet(
Fitter = cms.string( "hltESPKFFittingSmootherForL2Muon" ),
NMinRecHits = cms.uint32( 2 ),
RescaleError = cms.double( 100.0 ),
Propagator = cms.string( "hltESPFastSteppingHelixPropagatorAny" ),
UseSubRecHits = cms.bool( False ),
MuonRecHitBuilder = cms.string( "hltESPMuonTransientTrackingRecHitBuilder" )
),
L2TrajBuilderParameters = cms.PSet(
BWFilterParameters = cms.PSet(
DTRecSegmentLabel = cms.InputTag( "hltDt4DSegments" ),
CSCRecSegmentLabel = cms.InputTag( "hltCscSegments" ),
BWSeedType = cms.string( "fromGenerator" ),
GEMRecSegmentLabel = cms.InputTag( "hltGemRecHits" ),
RPCRecSegmentLabel = cms.InputTag( "hltRpcRecHits" ),
EnableGEMMeasurement = cms.bool( True ),
EnableRPCMeasurement = cms.bool( True ),
MuonTrajectoryUpdatorParameters = cms.PSet(
ExcludeRPCFromFit = cms.bool( False ),
Granularity = cms.int32( 0 ),
MaxChi2 = cms.double( 25.0 ),
RescaleError = cms.bool( False ),
RescaleErrorFactor = cms.double( 100.0 ),
UseInvalidHits = cms.bool( True )
),
EnableCSCMeasurement = cms.bool( True ),
MaxChi2 = cms.double( 100.0 ),
FitDirection = cms.string( "outsideIn" ),
Propagator = cms.string( "hltESPFastSteppingHelixPropagatorAny" ),
NumberOfSigma = cms.double( 3.0 ),
EnableDTMeasurement = cms.bool( True )
),
DoSeedRefit = cms.bool( False ),
FilterParameters = cms.PSet(
DTRecSegmentLabel = cms.InputTag( "hltDt4DSegments" ),
CSCRecSegmentLabel = cms.InputTag( "hltCscSegments" ),
GEMRecSegmentLabel = cms.InputTag( "hltGemRecHits" ),
RPCRecSegmentLabel = cms.InputTag( "hltRpcRecHits" ),
EnableGEMMeasurement = cms.bool( True ),
EnableRPCMeasurement = cms.bool( True ),
MuonTrajectoryUpdatorParameters = cms.PSet(
ExcludeRPCFromFit = cms.bool( False ),
Granularity = cms.int32( 0 ),
MaxChi2 = cms.double( 25.0 ),
RescaleError = cms.bool( False ),
RescaleErrorFactor = cms.double( 100.0 ),
UseInvalidHits = cms.bool( True )
),
EnableCSCMeasurement = cms.bool( True ),
MaxChi2 = cms.double( 1000.0 ),
FitDirection = cms.string( "insideOut" ),
Propagator = cms.string( "hltESPFastSteppingHelixPropagatorAny" ),
NumberOfSigma = cms.double( 3.0 ),
EnableDTMeasurement = cms.bool( True )
),
SeedPosition = cms.string( "in" ),
DoBackwardFilter = cms.bool( True ),
DoRefit = cms.bool( False ),
NavigationType = cms.string( "Standard" ),
SeedTransformerParameters = cms.PSet(
Fitter = cms.string( "hltESPKFFittingSmootherForL2Muon" ),
NMinRecHits = cms.uint32( 2 ),
RescaleError = cms.double( 100.0 ),
Propagator = cms.string( "hltESPFastSteppingHelixPropagatorAny" ),
UseSubRecHits = cms.bool( False ),
MuonRecHitBuilder = cms.string( "hltESPMuonTransientTrackingRecHitBuilder" )
),
SeedPropagator = cms.string( "hltESPFastSteppingHelixPropagatorAny" )
),
DoSeedRefit = cms.bool( False ),
TrackLoaderParameters = cms.PSet(
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
DoSmoothing = cms.bool( False ),
VertexConstraint = cms.bool( True ),
MuonUpdatorAtVertexParameters = cms.PSet(
MaxChi2 = cms.double( 1000000.0 ),
BeamSpotPositionErrors = cms.vdouble( 0.1, 0.1, 5.3 ),
BeamSpotPosition = cms.vdouble( 0.0, 0.0, 0.0 ),
Propagator = cms.string( "hltESPFastSteppingHelixPropagatorOpposite" )
),
Smoother = cms.string( "hltESPKFTrajectorySmootherForMuonTrackLoader" )
),
MuonTrajectoryBuilder = cms.string( "Exhaustive" )
)
process.hltL2MuonCandidates = cms.EDProducer( "L2MuonCandidateProducer",
InputObjects = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' )
)
process.hltSiPixelDigisLegacy = cms.EDProducer( "SiPixelRawToDigi",
IncludeErrors = cms.bool( True ),
UseQualityInfo = cms.bool( False ),
ErrorList = cms.vint32( 29 ),
UserErrorList = cms.vint32( ),
InputLabel = cms.InputTag( "rawDataCollector" ),
Regions = cms.PSet( ),
UsePilotBlade = cms.bool( False ),
UsePhase1 = cms.bool( True ),
CablingMapLabel = cms.string( "" ),
SiPixelQualityLabel = cms.string( "" )
)
process.hltSiPixelClustersLegacy = cms.EDProducer( "SiPixelClusterProducer",
src = cms.InputTag( "hltSiPixelDigisLegacy" ),
ClusterMode = cms.string( "PixelThresholdClusterizer" ),
maxNumberOfClusters = cms.int32( 40000 ),
payloadType = cms.string( "HLT" ),
ChannelThreshold = cms.int32( 10 ),
MissCalibrate = cms.bool( True ),
SplitClusters = cms.bool( False ),
VCaltoElectronGain = cms.int32( 1 ),
VCaltoElectronGain_L1 = cms.int32( 1 ),
VCaltoElectronOffset = cms.int32( 0 ),
VCaltoElectronOffset_L1 = cms.int32( 0 ),
SeedThreshold = cms.int32( 1000 ),
ClusterThreshold_L1 = cms.int32( 4000 ),
ClusterThreshold = cms.int32( 4000 ),
ElectronPerADCGain = cms.double( 135.0 ),
Phase2Calibration = cms.bool( False ),
Phase2ReadoutMode = cms.int32( -1 ),
Phase2DigiBaseline = cms.double( 1200.0 ),
Phase2KinkADC = cms.int32( 8 )
)
process.hltSiPixelClustersCache = cms.EDProducer( "SiPixelClusterShapeCacheProducer",
src = cms.InputTag( "hltSiPixelClusters" ),
onDemand = cms.bool( False )
)
process.hltSiPixelRecHitSoA = cms.EDProducer( "SiPixelRecHitSoAFromLegacy",
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
src = cms.InputTag( "hltSiPixelClusters" ),
CPE = cms.string( "PixelCPEFast" ),
convertToLegacy = cms.bool( True ),
isPhase2 = cms.bool( False )
)
process.hltSiStripExcludedFEDListProducer = cms.EDProducer( "SiStripExcludedFEDListProducer",
ProductLabel = cms.InputTag( "rawDataCollector" )
)
process.hltSiStripRawToClustersFacility = cms.EDProducer( "SiStripClusterizerFromRaw",
onDemand = cms.bool( True ),
Clusterizer = cms.PSet(
ConditionsLabel = cms.string( "" ),
ClusterThreshold = cms.double( 5.0 ),
SeedThreshold = cms.double( 3.0 ),
Algorithm = cms.string( "ThreeThresholdAlgorithm" ),
ChannelThreshold = cms.double( 2.0 ),
MaxAdjacentBad = cms.uint32( 0 ),
setDetId = cms.bool( True ),
MaxSequentialHoles = cms.uint32( 0 ),
RemoveApvShots = cms.bool( True ),
clusterChargeCut = cms.PSet( refToPSet_ = cms.string( "HLTSiStripClusterChargeCutNone" ) ),
MaxSequentialBad = cms.uint32( 1 )
),
Algorithms = cms.PSet(
Use10bitsTruncation = cms.bool( False ),
CommonModeNoiseSubtractionMode = cms.string( "Median" ),
useCMMeanMap = cms.bool( False ),
TruncateInSuppressor = cms.bool( True ),
doAPVRestore = cms.bool( False ),
SiStripFedZeroSuppressionMode = cms.uint32( 4 ),
PedestalSubtractionFedMode = cms.bool( True )
),
DoAPVEmulatorCheck = cms.bool( False ),
HybridZeroSuppressed = cms.bool( False ),
ProductLabel = cms.InputTag( "rawDataCollector" )
)
process.hltSiStripClusters = cms.EDProducer( "MeasurementTrackerEventProducer",
measurementTracker = cms.string( "hltESPMeasurementTracker" ),
skipClusters = cms.InputTag( "" ),
pixelClusterProducer = cms.string( "hltSiPixelClusters" ),
stripClusterProducer = cms.string( "hltSiStripRawToClustersFacility" ),
Phase2TrackerCluster1DProducer = cms.string( "" ),
vectorHits = cms.InputTag( "" ),
vectorHitsRej = cms.InputTag( "" ),
inactivePixelDetectorLabels = cms.VInputTag( 'hltSiPixelDigis' ),
badPixelFEDChannelCollectionLabels = cms.VInputTag( 'hltSiPixelDigis' ),
pixelCablingMapLabel = cms.string( "" ),
inactiveStripDetectorLabels = cms.VInputTag( 'hltSiStripExcludedFEDListProducer' ),
switchOffPixelsIfEmpty = cms.bool( True )
)
process.hltIterL3OISeedsFromL2Muons = cms.EDProducer( "TSGForOIFromL2",
src = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' ),
layersToTry = cms.int32( 2 ),
fixedErrorRescaleFactorForHitless = cms.double( 2.0 ),
hitsToTry = cms.int32( 1 ),
adjustErrorsDynamicallyForHits = cms.bool( False ),
adjustErrorsDynamicallyForHitless = cms.bool( True ),
MeasurementTrackerEvent = cms.InputTag( "hltSiStripClusters" ),
UseHitLessSeeds = cms.bool( True ),
estimator = cms.string( "hltESPChi2MeasurementEstimator100" ),
maxEtaForTOB = cms.double( 1.8 ),
minEtaForTEC = cms.double( 0.7 ),
debug = cms.untracked.bool( False ),
fixedErrorRescaleFactorForHits = cms.double( 1.0 ),
maxSeeds = cms.uint32( 20 ),
maxHitlessSeeds = cms.uint32( 5 ),
maxHitSeeds = cms.uint32( 1 ),
numL2ValidHitsCutAllEta = cms.uint32( 20 ),
numL2ValidHitsCutAllEndcap = cms.uint32( 30 ),
pT1 = cms.double( 13.0 ),
pT2 = cms.double( 30.0 ),
pT3 = cms.double( 70.0 ),
eta1 = cms.double( 0.2 ),
eta2 = cms.double( 0.3 ),
eta3 = cms.double( 1.0 ),
eta4 = cms.double( 1.2 ),
eta5 = cms.double( 1.6 ),
eta6 = cms.double( 1.4 ),
eta7 = cms.double( 2.1 ),
SF1 = cms.double( 3.0 ),
SF2 = cms.double( 4.0 ),
SF3 = cms.double( 5.0 ),
SF4 = cms.double( 7.0 ),
SF5 = cms.double( 10.0 ),
SF6 = cms.double( 2.0 ),
tsosDiff1 = cms.double( 0.2 ),
tsosDiff2 = cms.double( 0.02 ),
propagatorName = cms.string( "PropagatorWithMaterialParabolicMf" )
)
process.hltIterL3OITrackCandidates = cms.EDProducer( "CkfTrackCandidateMaker",
cleanTrajectoryAfterInOut = cms.bool( False ),
doSeedingRegionRebuilding = cms.bool( False ),
onlyPixelHitsForSeedCleaner = cms.bool( False ),
reverseTrajectories = cms.bool( True ),
useHitsSplitting = cms.bool( False ),
MeasurementTrackerEvent = cms.InputTag( "hltSiStripClusters" ),
src = cms.InputTag( "hltIterL3OISeedsFromL2Muons" ),
clustersToSkip = cms.InputTag( "" ),
phase2clustersToSkip = cms.InputTag( "" ),
TrajectoryBuilderPSet = cms.PSet( refToPSet_ = cms.string( "HLTPSetMuonCkfTrajectoryBuilder" ) ),
TransientInitialStateEstimatorParameters = cms.PSet(
propagatorAlongTISE = cms.string( "PropagatorWithMaterial" ),
numberMeasurementsForFit = cms.int32( 4 ),
propagatorOppositeTISE = cms.string( "PropagatorWithMaterialOpposite" )
),
numHitsForSeedCleaner = cms.int32( 4 ),
NavigationSchool = cms.string( "SimpleNavigationSchool" ),
RedundantSeedCleaner = cms.string( "CachingSeedCleanerBySharedInput" ),
TrajectoryCleaner = cms.string( "muonSeededTrajectoryCleanerBySharedHits" ),
maxNSeeds = cms.uint32( 500000 ),
maxSeedsBeforeCleaning = cms.uint32( 5000 )
)
process.hltIterL3OIMuCtfWithMaterialTracks = cms.EDProducer( "TrackProducer",
useSimpleMF = cms.bool( False ),
SimpleMagneticField = cms.string( "" ),
src = cms.InputTag( "hltIterL3OITrackCandidates" ),
clusterRemovalInfo = cms.InputTag( "" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Fitter = cms.string( "hltESPKFFittingSmootherWithOutliersRejectionAndRK" ),
useHitsSplitting = cms.bool( False ),
alias = cms.untracked.string( "ctfWithMaterialTracks" ),
TrajectoryInEvent = cms.bool( False ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
AlgorithmName = cms.string( "iter10" ),
Propagator = cms.string( "PropagatorWithMaterial" ),
GeometricInnerState = cms.bool( True ),
NavigationSchool = cms.string( "SimpleNavigationSchool" ),
MeasurementTracker = cms.string( "hltESPMeasurementTracker" ),
MeasurementTrackerEvent = cms.InputTag( "hltSiStripClusters" )
)
process.hltIterL3OIMuonTrackCutClassifier = cms.EDProducer( "TrackCutClassifier",
src = cms.InputTag( "hltIterL3OIMuCtfWithMaterialTracks" ),
beamspot = cms.InputTag( "hltOnlineBeamSpot" ),
vertices = cms.InputTag( "Notused" ),
ignoreVertices = cms.bool( True ),
qualityCuts = cms.vdouble( -0.7, 0.1, 0.7 ),
mva = cms.PSet(
minPixelHits = cms.vint32( 0, 0, 1 ),
maxDzWrtBS = cms.vdouble( 3.40282346639E38, 24.0, 100.0 ),
dr_par = cms.PSet(
d0err = cms.vdouble( 0.003, 0.003, 3.40282346639E38 ),
dr_par2 = cms.vdouble( 0.3, 0.3, 3.40282346639E38 ),
dr_par1 = cms.vdouble( 0.4, 0.4, 3.40282346639E38 ),
dr_exp = cms.vint32( 4, 4, 2147483647 ),
d0err_par = cms.vdouble( 0.001, 0.001, 3.40282346639E38 )
),
maxLostLayers = cms.vint32( 4, 3, 2 ),
min3DLayers = cms.vint32( 1, 2, 1 ),
dz_par = cms.PSet(
dz_par1 = cms.vdouble( 0.4, 0.4, 3.40282346639E38 ),
dz_par2 = cms.vdouble( 0.35, 0.35, 3.40282346639E38 ),
dz_exp = cms.vint32( 4, 4, 2147483647 )
),
minNVtxTrk = cms.int32( 3 ),
maxDz = cms.vdouble( 0.5, 0.2, 3.40282346639E38 ),
minNdof = cms.vdouble( 1.0E-5, 1.0E-5, 1.0E-5 ),
maxChi2 = cms.vdouble( 3.40282346639E38, 3.40282346639E38, 3.40282346639E38 ),
maxChi2n = cms.vdouble( 10.0, 1.0, 0.4 ),
maxDr = cms.vdouble( 0.5, 0.03, 3.40282346639E38 ),
minLayers = cms.vint32( 3, 5, 5 )
)
)
process.hltIterL3OIMuonTrackSelectionHighPurity = cms.EDProducer( "TrackCollectionFilterCloner",
originalSource = cms.InputTag( "hltIterL3OIMuCtfWithMaterialTracks" ),
originalMVAVals = cms.InputTag( 'hltIterL3OIMuonTrackCutClassifier','MVAValues' ),
originalQualVals = cms.InputTag( 'hltIterL3OIMuonTrackCutClassifier','QualityMasks' ),
minQuality = cms.string( "highPurity" ),
copyExtras = cms.untracked.bool( True ),
copyTrajectories = cms.untracked.bool( False )
)
process.hltL3MuonsIterL3OI = cms.EDProducer( "L3MuonProducer",
ServiceParameters = cms.PSet(
RPCLayers = cms.bool( True ),
UseMuonNavigation = cms.untracked.bool( True ),
Propagators = cms.untracked.vstring( 'hltESPSmartPropagatorAny',
'SteppingHelixPropagatorAny',
'hltESPSmartPropagator',
'hltESPSteppingHelixPropagatorOpposite' )
),
MuonCollectionLabel = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' ),
TrackLoaderParameters = cms.PSet(
MuonSeededTracksInstance = cms.untracked.string( "L2Seeded" ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
DoSmoothing = cms.bool( True ),
SmoothTkTrack = cms.untracked.bool( False ),
VertexConstraint = cms.bool( False ),
MuonUpdatorAtVertexParameters = cms.PSet(
MaxChi2 = cms.double( 1000000.0 ),
BeamSpotPositionErrors = cms.vdouble( 0.1, 0.1, 5.3 ),
Propagator = cms.string( "hltESPSteppingHelixPropagatorOpposite" )
),
PutTkTrackIntoEvent = cms.untracked.bool( False ),
Smoother = cms.string( "hltESPKFTrajectorySmootherForMuonTrackLoader" )
),
L3TrajBuilderParameters = cms.PSet(
PtCut = cms.double( 1.0 ),
TrackerPropagator = cms.string( "SteppingHelixPropagatorAny" ),
GlobalMuonTrackMatcher = cms.PSet(
Chi2Cut_3 = cms.double( 200.0 ),
DeltaDCut_2 = cms.double( 10.0 ),
Eta_threshold = cms.double( 1.2 ),
Quality_2 = cms.double( 15.0 ),
DeltaDCut_1 = cms.double( 40.0 ),
Quality_3 = cms.double( 7.0 ),
DeltaDCut_3 = cms.double( 15.0 ),
Quality_1 = cms.double( 20.0 ),
Pt_threshold1 = cms.double( 0.0 ),
DeltaRCut_2 = cms.double( 0.2 ),
DeltaRCut_1 = cms.double( 0.1 ),
Pt_threshold2 = cms.double( 9.99999999E8 ),
Chi2Cut_1 = cms.double( 50.0 ),
Chi2Cut_2 = cms.double( 50.0 ),
DeltaRCut_3 = cms.double( 1.0 ),
LocChi2Cut = cms.double( 0.001 ),
Propagator = cms.string( "hltESPSmartPropagator" ),
MinPt = cms.double( 1.0 ),
MinP = cms.double( 2.5 )
),
ScaleTECxFactor = cms.double( -1.0 ),
tkTrajUseVertex = cms.bool( False ),
MuonTrackingRegionBuilder = cms.PSet(
Rescale_Dz = cms.double( 4.0 ),
Pt_fixed = cms.bool( False ),
Eta_fixed = cms.bool( True ),
Eta_min = cms.double( 0.1 ),
DeltaZ = cms.double( 24.2 ),
maxRegions = cms.int32( 2 ),
EtaR_UpperLimit_Par1 = cms.double( 0.25 ),
UseVertex = cms.bool( False ),
Z_fixed = cms.bool( False ),
PhiR_UpperLimit_Par1 = cms.double( 0.6 ),
PhiR_UpperLimit_Par2 = cms.double( 0.2 ),
Rescale_phi = cms.double( 3.0 ),
DeltaEta = cms.double( 0.2 ),
precise = cms.bool( True ),
OnDemand = cms.int32( -1 ),
EtaR_UpperLimit_Par2 = cms.double( 0.15 ),
MeasurementTrackerName = cms.InputTag( "hltESPMeasurementTracker" ),
vertexCollection = cms.InputTag( "pixelVertices" ),
Pt_min = cms.double( 3.0 ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Phi_fixed = cms.bool( True ),
DeltaR = cms.double( 0.025 ),
input = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' ),
DeltaPhi = cms.double( 0.15 ),
Phi_min = cms.double( 0.1 ),
Rescale_eta = cms.double( 3.0 )
),
TrackTransformer = cms.PSet(
Fitter = cms.string( "hltESPL3MuKFTrajectoryFitter" ),
RefitDirection = cms.string( "insideOut" ),
RefitRPCHits = cms.bool( True ),
Propagator = cms.string( "hltESPSmartPropagatorAny" ),
DoPredictionsOnly = cms.bool( False ),
TrackerRecHitBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
MuonRecHitBuilder = cms.string( "hltESPMuonTransientTrackingRecHitBuilder" ),
Smoother = cms.string( "hltESPKFTrajectorySmootherForMuonTrackLoader" )
),
tkTrajBeamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
RefitRPCHits = cms.bool( True ),
tkTrajVertex = cms.InputTag( "Notused" ),
GlbRefitterParameters = cms.PSet(
Fitter = cms.string( "hltESPL3MuKFTrajectoryFitter" ),
DTRecSegmentLabel = cms.InputTag( "hltDt4DSegments" ),
RefitFlag = cms.bool( True ),
SkipStation = cms.int32( -1 ),
Chi2CutRPC = cms.double( 1.0 ),
PropDirForCosmics = cms.bool( False ),
CSCRecSegmentLabel = cms.InputTag( "hltCscSegments" ),
GEMRecHitLabel = cms.InputTag( "hltGemRecHits" ),
HitThreshold = cms.int32( 1 ),
Chi2CutGEM = cms.double( 1.0 ),
DYTthrs = cms.vint32( 30, 15 ),
TrackerSkipSystem = cms.int32( -1 ),
RefitDirection = cms.string( "insideOut" ),
Chi2CutCSC = cms.double( 150.0 ),
Chi2CutDT = cms.double( 10.0 ),
RefitRPCHits = cms.bool( True ),
TrackerSkipSection = cms.int32( -1 ),
Propagator = cms.string( "hltESPSmartPropagatorAny" ),
DoPredictionsOnly = cms.bool( False ),
TrackerRecHitBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
MuonHitsOption = cms.int32( 1 ),
MuonRecHitBuilder = cms.string( "hltESPMuonTransientTrackingRecHitBuilder" )
),
PCut = cms.double( 2.5 ),
tkTrajMaxDXYBeamSpot = cms.double( 9999.0 ),
TrackerRecHitBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
tkTrajMaxChi2 = cms.double( 9999.0 ),
MuonRecHitBuilder = cms.string( "hltESPMuonTransientTrackingRecHitBuilder" ),
ScaleTECyFactor = cms.double( -1.0 ),
tkTrajLabel = cms.InputTag( "hltIterL3OIMuonTrackSelectionHighPurity" )
)
)
process.hltIterL3OIL3MuonsLinksCombination = cms.EDProducer( "L3TrackLinksCombiner",
labels = cms.VInputTag( 'hltL3MuonsIterL3OI' )
)
process.hltIterL3OIL3Muons = cms.EDProducer( "L3TrackCombiner",
labels = cms.VInputTag( 'hltL3MuonsIterL3OI' )
)
process.hltIterL3OIL3MuonCandidates = cms.EDProducer( "L3MuonCandidateProducer",
InputObjects = cms.InputTag( "hltIterL3OIL3Muons" ),
InputLinksObjects = cms.InputTag( "hltIterL3OIL3MuonsLinksCombination" ),
MuonPtOption = cms.string( "Tracker" )
)
process.hltL2SelectorForL3IO = cms.EDProducer( "HLTMuonL2SelectorForL3IO",
l2Src = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' ),
l3OISrc = cms.InputTag( "hltIterL3OIL3MuonCandidates" ),
InputLinks = cms.InputTag( "hltIterL3OIL3MuonsLinksCombination" ),
applyL3Filters = cms.bool( False ),
MinNhits = cms.int32( 1 ),
MaxNormalizedChi2 = cms.double( 20.0 ),
MinNmuonHits = cms.int32( 1 ),
MaxPtDifference = cms.double( 0.3 )
)
process.hltIterL3MuonPixelTracksFilter = cms.EDProducer( "PixelTrackFilterByKinematicsProducer",
ptMin = cms.double( 0.1 ),
nSigmaInvPtTolerance = cms.double( 0.0 ),
tipMax = cms.double( 1.0 ),
nSigmaTipMaxTolerance = cms.double( 0.0 ),
chi2 = cms.double( 1000.0 )
)
process.hltIterL3MuonPixelTracksFitter = cms.EDProducer( "PixelFitterByHelixProjectionsProducer",
scaleErrorsForBPix1 = cms.bool( False ),
scaleFactor = cms.double( 0.65 )
)
process.hltIterL3MuonPixelTracksTrackingRegions = cms.EDProducer( "MuonTrackingRegionEDProducer",
EtaR_UpperLimit_Par1 = cms.double( 0.25 ),
DeltaR = cms.double( 0.025 ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
OnDemand = cms.int32( -1 ),
vertexCollection = cms.InputTag( "notUsed" ),
Rescale_phi = cms.double( 3.0 ),
Eta_fixed = cms.bool( True ),
Rescale_eta = cms.double( 3.0 ),
PhiR_UpperLimit_Par2 = cms.double( 0.2 ),
Eta_min = cms.double( 0.0 ),
Phi_fixed = cms.bool( True ),
Phi_min = cms.double( 0.0 ),
PhiR_UpperLimit_Par1 = cms.double( 0.6 ),
EtaR_UpperLimit_Par2 = cms.double( 0.15 ),
MeasurementTrackerName = cms.InputTag( "" ),
UseVertex = cms.bool( False ),
Rescale_Dz = cms.double( 4.0 ),
Pt_fixed = cms.bool( True ),
Z_fixed = cms.bool( True ),
Pt_min = cms.double( 2.0 ),
DeltaZ = cms.double( 24.2 ),
DeltaEta = cms.double( 0.2 ),
DeltaPhi = cms.double( 0.15 ),
maxRegions = cms.int32( 5 ),
precise = cms.bool( True ),
input = cms.InputTag( "hltL2SelectorForL3IO" )
)
process.hltIterL3MuonPixelLayerQuadruplets = cms.EDProducer( "SeedingLayersEDProducer",
layerList = cms.vstring( 'BPix1+BPix2+BPix3+BPix4',
'BPix1+BPix2+BPix3+FPix1_pos',
'BPix1+BPix2+BPix3+FPix1_neg',
'BPix1+BPix2+FPix1_pos+FPix2_pos',
'BPix1+BPix2+FPix1_neg+FPix2_neg',
'BPix1+FPix1_pos+FPix2_pos+FPix3_pos',
'BPix1+FPix1_neg+FPix2_neg+FPix3_neg' ),
BPix = cms.PSet(
hitErrorRPhi = cms.double( 0.0027 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
useErrorsFromParam = cms.bool( True ),
hitErrorRZ = cms.double( 0.006 ),
HitProducer = cms.string( "hltSiPixelRecHits" )
),
FPix = cms.PSet(
hitErrorRPhi = cms.double( 0.0051 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
useErrorsFromParam = cms.bool( True ),
hitErrorRZ = cms.double( 0.0036 ),
HitProducer = cms.string( "hltSiPixelRecHits" )
),
TIB = cms.PSet( ),
TID = cms.PSet( ),
TOB = cms.PSet( ),
TEC = cms.PSet( ),
MTIB = cms.PSet( ),
MTID = cms.PSet( ),
MTOB = cms.PSet( ),
MTEC = cms.PSet( )
)
process.hltIterL3MuonPixelTracksHitDoublets = cms.EDProducer( "HitPairEDProducer",
seedingLayers = cms.InputTag( "hltIterL3MuonPixelLayerQuadruplets" ),
trackingRegions = cms.InputTag( "hltIterL3MuonPixelTracksTrackingRegions" ),
trackingRegionsSeedingLayers = cms.InputTag( "" ),
clusterCheck = cms.InputTag( "" ),
produceSeedingHitSets = cms.bool( False ),
produceIntermediateHitDoublets = cms.bool( True ),
maxElement = cms.uint32( 0 ),
maxElementTotal = cms.uint32( 50000000 ),
layerPairs = cms.vuint32( 0, 1, 2 )
)
process.hltIterL3MuonPixelTracksHitQuadruplets = cms.EDProducer( "CAHitQuadrupletEDProducer",
doublets = cms.InputTag( "hltIterL3MuonPixelTracksHitDoublets" ),
extraHitRPhitolerance = cms.double( 0.032 ),
fitFastCircle = cms.bool( True ),
fitFastCircleChi2Cut = cms.bool( True ),
useBendingCorrection = cms.bool( True ),
CAThetaCut = cms.double( 0.005 ),
CAPhiCut = cms.double( 0.2 ),
CAThetaCut_byTriplets = cms.VPSet(
cms.PSet( seedingLayers = cms.string( "" ),
cut = cms.double( -1.0 )
)
),
CAPhiCut_byTriplets = cms.VPSet(
cms.PSet( seedingLayers = cms.string( "" ),
cut = cms.double( -1.0 )
)
),
CAHardPtCut = cms.double( 0.0 ),
maxChi2 = cms.PSet(
value2 = cms.double( 50.0 ),
value1 = cms.double( 200.0 ),
pt1 = cms.double( 0.7 ),
enabled = cms.bool( True ),
pt2 = cms.double( 2.0 )
),
SeedComparitorPSet = cms.PSet(
clusterShapeHitFilter = cms.string( "ClusterShapeHitFilter" ),
ComponentName = cms.string( "LowPtClusterShapeSeedComparitor" ),
clusterShapeCacheSrc = cms.InputTag( "hltSiPixelClustersCache" )
)
)
process.hltIterL3MuonPixelTracks = cms.EDProducer( "PixelTrackProducer",
passLabel = cms.string( "" ),
SeedingHitSets = cms.InputTag( "hltIterL3MuonPixelTracksHitQuadruplets" ),
Fitter = cms.InputTag( "hltIterL3MuonPixelTracksFitter" ),
Filter = cms.InputTag( "hltIterL3MuonPixelTracksFilter" ),
Cleaner = cms.string( "hltPixelTracksCleanerBySharedHits" )
)
process.hltIterL3MuonPixelVertices = cms.EDProducer( "PixelVertexProducer",
WtAverage = cms.bool( True ),
ZOffset = cms.double( 5.0 ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Verbosity = cms.int32( 0 ),
UseError = cms.bool( True ),
TrackCollection = cms.InputTag( "hltIterL3MuonPixelTracks" ),
ZSeparation = cms.double( 0.05 ),
NTrkMin = cms.int32( 2 ),
Method2 = cms.bool( True ),
Finder = cms.string( "DivisiveVertexFinder" ),
PtMin = cms.double( 1.0 ),
PVcomparer = cms.PSet( refToPSet_ = cms.string( "HLTPSetPvClusterComparerForIT" ) )
)
process.hltIterL3MuonTrimmedPixelVertices = cms.EDProducer( "PixelVertexCollectionTrimmer",
src = cms.InputTag( "hltIterL3MuonPixelVertices" ),
maxVtx = cms.uint32( 100 ),
fractionSumPt2 = cms.double( 0.3 ),
minSumPt2 = cms.double( 0.0 ),
PVcomparer = cms.PSet( refToPSet_ = cms.string( "HLTPSetPvClusterComparerForIT" ) )
)
process.hltIter0IterL3MuonPixelSeedsFromPixelTracks = cms.EDProducer( "SeedGeneratorFromProtoTracksEDProducer",
InputCollection = cms.InputTag( "hltIterL3MuonPixelTracks" ),
InputVertexCollection = cms.InputTag( "hltIterL3MuonTrimmedPixelVertices" ),
originHalfLength = cms.double( 0.3 ),
originRadius = cms.double( 0.1 ),
useProtoTrackKinematics = cms.bool( False ),
useEventsWithNoVertex = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
usePV = cms.bool( False ),
includeFourthHit = cms.bool( False ),
SeedCreatorPSet = cms.PSet( refToPSet_ = cms.string( "HLTSeedFromProtoTracks" ) )
)
process.hltIter0IterL3MuonCkfTrackCandidates = cms.EDProducer( "CkfTrackCandidateMaker",
cleanTrajectoryAfterInOut = cms.bool( False ),
doSeedingRegionRebuilding = cms.bool( True ),
onlyPixelHitsForSeedCleaner = cms.bool( False ),
reverseTrajectories = cms.bool( False ),
useHitsSplitting = cms.bool( True ),
MeasurementTrackerEvent = cms.InputTag( "hltSiStripClusters" ),
src = cms.InputTag( "hltIter0IterL3MuonPixelSeedsFromPixelTracks" ),
clustersToSkip = cms.InputTag( "" ),
phase2clustersToSkip = cms.InputTag( "" ),
TrajectoryBuilderPSet = cms.PSet( refToPSet_ = cms.string( "HLTIter0IterL3MuonPSetGroupedCkfTrajectoryBuilderIT" ) ),
TransientInitialStateEstimatorParameters = cms.PSet(
propagatorAlongTISE = cms.string( "PropagatorWithMaterialParabolicMf" ),
numberMeasurementsForFit = cms.int32( 4 ),
propagatorOppositeTISE = cms.string( "PropagatorWithMaterialParabolicMfOpposite" )
),
numHitsForSeedCleaner = cms.int32( 4 ),
NavigationSchool = cms.string( "SimpleNavigationSchool" ),
RedundantSeedCleaner = cms.string( "none" ),
TrajectoryCleaner = cms.string( "hltESPTrajectoryCleanerBySharedHits" ),
maxNSeeds = cms.uint32( 100000 ),
maxSeedsBeforeCleaning = cms.uint32( 1000 )
)
process.hltIter0IterL3MuonCtfWithMaterialTracks = cms.EDProducer( "TrackProducer",
useSimpleMF = cms.bool( True ),
SimpleMagneticField = cms.string( "ParabolicMf" ),
src = cms.InputTag( "hltIter0IterL3MuonCkfTrackCandidates" ),
clusterRemovalInfo = cms.InputTag( "" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Fitter = cms.string( "hltESPFittingSmootherIT" ),
useHitsSplitting = cms.bool( False ),
alias = cms.untracked.string( "ctfWithMaterialTracks" ),
TrajectoryInEvent = cms.bool( False ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
AlgorithmName = cms.string( "hltIter0" ),
Propagator = cms.string( "hltESPRungeKuttaTrackerPropagator" ),
GeometricInnerState = cms.bool( True ),
NavigationSchool = cms.string( "" ),
MeasurementTracker = cms.string( "" ),
MeasurementTrackerEvent = cms.InputTag( "hltSiStripClusters" )
)
process.hltIter0IterL3MuonTrackCutClassifier = cms.EDProducer( "TrackCutClassifier",
src = cms.InputTag( "hltIter0IterL3MuonCtfWithMaterialTracks" ),
beamspot = cms.InputTag( "hltOnlineBeamSpot" ),
vertices = cms.InputTag( "hltIterL3MuonTrimmedPixelVertices" ),
ignoreVertices = cms.bool( False ),
qualityCuts = cms.vdouble( -0.7, 0.1, 0.7 ),
mva = cms.PSet(
minPixelHits = cms.vint32( 0, 3, 4 ),
maxDzWrtBS = cms.vdouble( 3.40282346639E38, 24.0, 100.0 ),
dr_par = cms.PSet(
d0err = cms.vdouble( 0.003, 0.003, 3.40282346639E38 ),
dr_par2 = cms.vdouble( 0.3, 0.3, 3.40282346639E38 ),
dr_par1 = cms.vdouble( 0.4, 0.4, 3.40282346639E38 ),
dr_exp = cms.vint32( 4, 4, 2147483647 ),
d0err_par = cms.vdouble( 0.001, 0.001, 3.40282346639E38 )
),
maxLostLayers = cms.vint32( 1, 1, 1 ),
min3DLayers = cms.vint32( 0, 3, 4 ),
dz_par = cms.PSet(
dz_par1 = cms.vdouble( 0.4, 0.4, 3.40282346639E38 ),
dz_par2 = cms.vdouble( 0.35, 0.35, 3.40282346639E38 ),
dz_exp = cms.vint32( 4, 4, 2147483647 )
),
minNVtxTrk = cms.int32( 3 ),
maxDz = cms.vdouble( 0.5, 0.2, 3.40282346639E38 ),
minNdof = cms.vdouble( 1.0E-5, 1.0E-5, 1.0E-5 ),
maxChi2 = cms.vdouble( 3.40282346639E38, 3.40282346639E38, 3.40282346639E38 ),
maxChi2n = cms.vdouble( 1.2, 1.0, 0.7 ),
maxDr = cms.vdouble( 0.5, 0.03, 3.40282346639E38 ),
minLayers = cms.vint32( 3, 3, 4 )
)
)
process.hltIter0IterL3MuonTrackSelectionHighPurity = cms.EDProducer( "TrackCollectionFilterCloner",
originalSource = cms.InputTag( "hltIter0IterL3MuonCtfWithMaterialTracks" ),
originalMVAVals = cms.InputTag( 'hltIter0IterL3MuonTrackCutClassifier','MVAValues' ),
originalQualVals = cms.InputTag( 'hltIter0IterL3MuonTrackCutClassifier','QualityMasks' ),
minQuality = cms.string( "highPurity" ),
copyExtras = cms.untracked.bool( True ),
copyTrajectories = cms.untracked.bool( False )
)
process.hltIter2IterL3MuonClustersRefRemoval = cms.EDProducer( "TrackClusterRemover",
trajectories = cms.InputTag( "hltIter0IterL3MuonTrackSelectionHighPurity" ),
trackClassifier = cms.InputTag( '','QualityMasks' ),
pixelClusters = cms.InputTag( "hltSiPixelClusters" ),
stripClusters = cms.InputTag( "hltSiStripRawToClustersFacility" ),
oldClusterRemovalInfo = cms.InputTag( "" ),
TrackQuality = cms.string( "highPurity" ),
maxChi2 = cms.double( 16.0 ),
minNumberOfLayersWithMeasBeforeFiltering = cms.int32( 0 ),
overrideTrkQuals = cms.InputTag( "" )
)
process.hltIter2IterL3MuonMaskedMeasurementTrackerEvent = cms.EDProducer( "MaskedMeasurementTrackerEventProducer",
src = cms.InputTag( "hltSiStripClusters" ),
OnDemand = cms.bool( False ),
clustersToSkip = cms.InputTag( "hltIter2IterL3MuonClustersRefRemoval" )
)
process.hltIter2IterL3MuonPixelLayerTriplets = cms.EDProducer( "SeedingLayersEDProducer",
layerList = cms.vstring( 'BPix1+BPix2+BPix3',
'BPix2+BPix3+BPix4',
'BPix1+BPix3+BPix4',
'BPix1+BPix2+BPix4',
'BPix2+BPix3+FPix1_pos',
'BPix2+BPix3+FPix1_neg',
'BPix1+BPix2+FPix1_pos',
'BPix1+BPix2+FPix1_neg',
'BPix2+FPix1_pos+FPix2_pos',
'BPix2+FPix1_neg+FPix2_neg',
'BPix1+FPix1_pos+FPix2_pos',
'BPix1+FPix1_neg+FPix2_neg',
'FPix1_pos+FPix2_pos+FPix3_pos',
'FPix1_neg+FPix2_neg+FPix3_neg',
'BPix1+BPix3+FPix1_pos',
'BPix1+BPix2+FPix2_pos',
'BPix1+BPix3+FPix1_neg',
'BPix1+BPix2+FPix2_neg',
'BPix1+FPix2_neg+FPix3_neg',
'BPix1+FPix1_neg+FPix3_neg',
'BPix1+FPix2_pos+FPix3_pos',
'BPix1+FPix1_pos+FPix3_pos' ),
BPix = cms.PSet(
hitErrorRPhi = cms.double( 0.0027 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltIter2IterL3MuonClustersRefRemoval" ),
useErrorsFromParam = cms.bool( True ),
hitErrorRZ = cms.double( 0.006 ),
HitProducer = cms.string( "hltSiPixelRecHits" )
),
FPix = cms.PSet(
hitErrorRPhi = cms.double( 0.0051 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltIter2IterL3MuonClustersRefRemoval" ),
useErrorsFromParam = cms.bool( True ),
hitErrorRZ = cms.double( 0.0036 ),
HitProducer = cms.string( "hltSiPixelRecHits" )
),
TIB = cms.PSet( ),
TID = cms.PSet( ),
TOB = cms.PSet( ),
TEC = cms.PSet( ),
MTIB = cms.PSet( ),
MTID = cms.PSet( ),
MTOB = cms.PSet( ),
MTEC = cms.PSet( )
)
process.hltIter2IterL3MuonPixelClusterCheck = cms.EDProducer( "ClusterCheckerEDProducer",
doClusterCheck = cms.bool( False ),
MaxNumberOfCosmicClusters = cms.uint32( 50000 ),
ClusterCollectionLabel = cms.InputTag( "hltSiStripClusters" ),
MaxNumberOfPixelClusters = cms.uint32( 10000 ),
PixelClusterCollectionLabel = cms.InputTag( "hltSiPixelClusters" ),
cut = cms.string( "" ),
silentClusterCheck = cms.untracked.bool( False )
)
process.hltIter2IterL3MuonPixelHitDoublets = cms.EDProducer( "HitPairEDProducer",
seedingLayers = cms.InputTag( "hltIter2IterL3MuonPixelLayerTriplets" ),
trackingRegions = cms.InputTag( "hltIterL3MuonPixelTracksTrackingRegions" ),
trackingRegionsSeedingLayers = cms.InputTag( "" ),
clusterCheck = cms.InputTag( "hltIter2IterL3MuonPixelClusterCheck" ),
produceSeedingHitSets = cms.bool( False ),
produceIntermediateHitDoublets = cms.bool( True ),
maxElement = cms.uint32( 0 ),
maxElementTotal = cms.uint32( 50000000 ),
layerPairs = cms.vuint32( 0, 1 )
)
process.hltIter2IterL3MuonPixelHitTriplets = cms.EDProducer( "CAHitTripletEDProducer",
doublets = cms.InputTag( "hltIter2IterL3MuonPixelHitDoublets" ),
extraHitRPhitolerance = cms.double( 0.032 ),
useBendingCorrection = cms.bool( True ),
CAThetaCut = cms.double( 0.015 ),
CAPhiCut = cms.double( 0.1 ),
CAThetaCut_byTriplets = cms.VPSet(
cms.PSet( seedingLayers = cms.string( "" ),
cut = cms.double( -1.0 )
)
),
CAPhiCut_byTriplets = cms.VPSet(
cms.PSet( seedingLayers = cms.string( "" ),
cut = cms.double( -1.0 )
)
),
CAHardPtCut = cms.double( 0.3 ),
maxChi2 = cms.PSet(
value2 = cms.double( 6.0 ),
value1 = cms.double( 100.0 ),
pt1 = cms.double( 0.8 ),
enabled = cms.bool( True ),
pt2 = cms.double( 8.0 )
),
SeedComparitorPSet = cms.PSet( ComponentName = cms.string( "none" ) )
)
process.hltIter2IterL3MuonPixelSeeds = cms.EDProducer( "SeedCreatorFromRegionConsecutiveHitsTripletOnlyEDProducer",
seedingHitSets = cms.InputTag( "hltIter2IterL3MuonPixelHitTriplets" ),
propagator = cms.string( "PropagatorWithMaterialParabolicMf" ),
SeedMomentumForBOFF = cms.double( 5.0 ),
OriginTransverseErrorMultiplier = cms.double( 1.0 ),
MinOneOverPtError = cms.double( 1.0 ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
magneticField = cms.string( "ParabolicMf" ),
forceKinematicWithRegionDirection = cms.bool( False ),
SeedComparitorPSet = cms.PSet( ComponentName = cms.string( "none" ) )
)
process.hltIter2IterL3MuonCkfTrackCandidates = cms.EDProducer( "CkfTrackCandidateMaker",
cleanTrajectoryAfterInOut = cms.bool( False ),
doSeedingRegionRebuilding = cms.bool( False ),
onlyPixelHitsForSeedCleaner = cms.bool( False ),
reverseTrajectories = cms.bool( False ),
useHitsSplitting = cms.bool( False ),
MeasurementTrackerEvent = cms.InputTag( "hltIter2IterL3MuonMaskedMeasurementTrackerEvent" ),
src = cms.InputTag( "hltIter2IterL3MuonPixelSeeds" ),
clustersToSkip = cms.InputTag( "" ),
phase2clustersToSkip = cms.InputTag( "" ),
TrajectoryBuilderPSet = cms.PSet( refToPSet_ = cms.string( "HLTIter2IterL3MuonPSetGroupedCkfTrajectoryBuilderIT" ) ),
TransientInitialStateEstimatorParameters = cms.PSet(
propagatorAlongTISE = cms.string( "PropagatorWithMaterialParabolicMf" ),
numberMeasurementsForFit = cms.int32( 4 ),
propagatorOppositeTISE = cms.string( "PropagatorWithMaterialParabolicMfOpposite" )
),
numHitsForSeedCleaner = cms.int32( 4 ),
NavigationSchool = cms.string( "SimpleNavigationSchool" ),
RedundantSeedCleaner = cms.string( "CachingSeedCleanerBySharedInput" ),
TrajectoryCleaner = cms.string( "hltESPTrajectoryCleanerBySharedHits" ),
maxNSeeds = cms.uint32( 100000 ),
maxSeedsBeforeCleaning = cms.uint32( 1000 )
)
process.hltIter2IterL3MuonCtfWithMaterialTracks = cms.EDProducer( "TrackProducer",
useSimpleMF = cms.bool( True ),
SimpleMagneticField = cms.string( "ParabolicMf" ),
src = cms.InputTag( "hltIter2IterL3MuonCkfTrackCandidates" ),
clusterRemovalInfo = cms.InputTag( "" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Fitter = cms.string( "hltESPFittingSmootherIT" ),
useHitsSplitting = cms.bool( False ),
alias = cms.untracked.string( "ctfWithMaterialTracks" ),
TrajectoryInEvent = cms.bool( False ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
AlgorithmName = cms.string( "hltIter2" ),
Propagator = cms.string( "hltESPRungeKuttaTrackerPropagator" ),
GeometricInnerState = cms.bool( True ),
NavigationSchool = cms.string( "" ),
MeasurementTracker = cms.string( "" ),
MeasurementTrackerEvent = cms.InputTag( "hltIter2IterL3MuonMaskedMeasurementTrackerEvent" )
)
process.hltIter2IterL3MuonTrackCutClassifier = cms.EDProducer( "TrackCutClassifier",
src = cms.InputTag( "hltIter2IterL3MuonCtfWithMaterialTracks" ),
beamspot = cms.InputTag( "hltOnlineBeamSpot" ),
vertices = cms.InputTag( "hltIterL3MuonTrimmedPixelVertices" ),
ignoreVertices = cms.bool( False ),
qualityCuts = cms.vdouble( -0.7, 0.1, 0.7 ),
mva = cms.PSet(
minPixelHits = cms.vint32( 0, 0, 0 ),
maxDzWrtBS = cms.vdouble( 3.40282346639E38, 24.0, 100.0 ),
dr_par = cms.PSet(
d0err = cms.vdouble( 0.003, 0.003, 3.40282346639E38 ),
dr_par2 = cms.vdouble( 3.40282346639E38, 0.3, 3.40282346639E38 ),
dr_par1 = cms.vdouble( 3.40282346639E38, 0.4, 3.40282346639E38 ),
dr_exp = cms.vint32( 4, 4, 2147483647 ),
d0err_par = cms.vdouble( 0.001, 0.001, 3.40282346639E38 )
),
maxLostLayers = cms.vint32( 1, 1, 1 ),
min3DLayers = cms.vint32( 0, 0, 0 ),
dz_par = cms.PSet(
dz_par1 = cms.vdouble( 3.40282346639E38, 0.4, 3.40282346639E38 ),
dz_par2 = cms.vdouble( 3.40282346639E38, 0.35, 3.40282346639E38 ),
dz_exp = cms.vint32( 4, 4, 2147483647 )
),
minNVtxTrk = cms.int32( 3 ),
maxDz = cms.vdouble( 0.5, 0.2, 3.40282346639E38 ),
minNdof = cms.vdouble( 1.0E-5, 1.0E-5, 1.0E-5 ),
maxChi2 = cms.vdouble( 9999.0, 25.0, 3.40282346639E38 ),
maxChi2n = cms.vdouble( 1.2, 1.0, 0.7 ),
maxDr = cms.vdouble( 0.5, 0.03, 3.40282346639E38 ),
minLayers = cms.vint32( 3, 3, 3 )
)
)
process.hltIter2IterL3MuonTrackSelectionHighPurity = cms.EDProducer( "TrackCollectionFilterCloner",
originalSource = cms.InputTag( "hltIter2IterL3MuonCtfWithMaterialTracks" ),
originalMVAVals = cms.InputTag( 'hltIter2IterL3MuonTrackCutClassifier','MVAValues' ),
originalQualVals = cms.InputTag( 'hltIter2IterL3MuonTrackCutClassifier','QualityMasks' ),
minQuality = cms.string( "highPurity" ),
copyExtras = cms.untracked.bool( True ),
copyTrajectories = cms.untracked.bool( False )
)
process.hltIter2IterL3MuonMerged = cms.EDProducer( "TrackListMerger",
ShareFrac = cms.double( 0.19 ),
FoundHitBonus = cms.double( 5.0 ),
LostHitPenalty = cms.double( 20.0 ),
MinPT = cms.double( 0.05 ),
Epsilon = cms.double( -0.001 ),
MaxNormalizedChisq = cms.double( 1000.0 ),
MinFound = cms.int32( 3 ),
TrackProducers = cms.VInputTag( 'hltIter0IterL3MuonTrackSelectionHighPurity','hltIter2IterL3MuonTrackSelectionHighPurity' ),
hasSelector = cms.vint32( 0, 0 ),
indivShareFrac = cms.vdouble( 1.0, 1.0 ),
selectedTrackQuals = cms.VInputTag( 'hltIter0IterL3MuonTrackSelectionHighPurity','hltIter2IterL3MuonTrackSelectionHighPurity' ),
setsToMerge = cms.VPSet(
cms.PSet( pQual = cms.bool( False ),
tLists = cms.vint32( 0, 1 )
)
),
trackAlgoPriorityOrder = cms.string( "hltESPTrackAlgoPriorityOrder" ),
allowFirstHitShare = cms.bool( True ),
newQuality = cms.string( "confirmed" ),
copyExtras = cms.untracked.bool( True ),
writeOnlyTrkQuals = cms.bool( False ),
copyMVA = cms.bool( False )
)
process.hltIter3IterL3MuonClustersRefRemoval = cms.EDProducer( "TrackClusterRemover",
trajectories = cms.InputTag( "hltIter2IterL3MuonTrackSelectionHighPurity" ),
trackClassifier = cms.InputTag( '','QualityMasks' ),
pixelClusters = cms.InputTag( "hltSiPixelClusters" ),
stripClusters = cms.InputTag( "hltSiStripRawToClustersFacility" ),
oldClusterRemovalInfo = cms.InputTag( "hltIter2IterL3MuonClustersRefRemoval" ),
TrackQuality = cms.string( "highPurity" ),
maxChi2 = cms.double( 16.0 ),
minNumberOfLayersWithMeasBeforeFiltering = cms.int32( 0 ),
overrideTrkQuals = cms.InputTag( "" )
)
process.hltIter3IterL3MuonMaskedMeasurementTrackerEvent = cms.EDProducer( "MaskedMeasurementTrackerEventProducer",
src = cms.InputTag( "hltSiStripClusters" ),
OnDemand = cms.bool( False ),
clustersToSkip = cms.InputTag( "hltIter3IterL3MuonClustersRefRemoval" )
)
process.hltIter3IterL3MuonPixelLayerPairs = cms.EDProducer( "SeedingLayersEDProducer",
layerList = cms.vstring( 'BPix1+BPix2',
'BPix1+BPix3',
'BPix1+BPix4',
'BPix2+BPix3',
'BPix2+BPix4',
'BPix3+BPix4',
'BPix1+FPix1_pos',
'BPix1+FPix1_neg',
'BPix1+FPix2_pos',
'BPix1+FPix2_neg',
'BPix1+FPix3_pos',
'BPix1+FPix3_neg',
'BPix2+FPix1_pos',
'BPix2+FPix1_neg',
'BPix2+FPix2_pos',
'BPix2+FPix2_neg',
'BPix3+FPix1_pos',
'BPix3+FPix1_neg',
'FPix1_pos+FPix2_pos',
'FPix1_neg+FPix2_neg',
'FPix1_pos+FPix3_pos',
'FPix1_neg+FPix3_neg',
'FPix2_pos+FPix3_pos',
'FPix2_neg+FPix3_neg' ),
BPix = cms.PSet(
hitErrorRPhi = cms.double( 0.0027 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltIter3IterL3MuonClustersRefRemoval" ),
useErrorsFromParam = cms.bool( True ),
hitErrorRZ = cms.double( 0.006 ),
HitProducer = cms.string( "hltSiPixelRecHits" )
),
FPix = cms.PSet(
hitErrorRPhi = cms.double( 0.0051 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltIter3IterL3MuonClustersRefRemoval" ),
useErrorsFromParam = cms.bool( True ),
hitErrorRZ = cms.double( 0.0036 ),
HitProducer = cms.string( "hltSiPixelRecHits" )
),
TIB = cms.PSet( ),
TID = cms.PSet( ),
TOB = cms.PSet( ),
TEC = cms.PSet( ),
MTIB = cms.PSet( ),
MTID = cms.PSet( ),
MTOB = cms.PSet( ),
MTEC = cms.PSet( )
)
process.hltIter3IterL3MuonL2Candidates = cms.EDProducer( "ConcreteChargedCandidateProducer",
src = cms.InputTag( "hltL2SelectorForL3IO" ),
particleType = cms.string( "mu+" )
)
process.hltIter3IterL3MuonTrackingRegions = cms.EDProducer( "CandidateSeededTrackingRegionsEDProducer",
RegionPSet = cms.PSet(
vertexCollection = cms.InputTag( "notUsed" ),
zErrorVetex = cms.double( 0.2 ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
zErrorBeamSpot = cms.double( 24.2 ),
maxNVertices = cms.int32( 1 ),
maxNRegions = cms.int32( 2 ),
nSigmaZVertex = cms.double( 3.0 ),
nSigmaZBeamSpot = cms.double( 4.0 ),
ptMin = cms.double( 2.0 ),
mode = cms.string( "BeamSpotSigma" ),
input = cms.InputTag( "hltIter3IterL3MuonL2Candidates" ),
searchOpt = cms.bool( False ),
whereToUseMeasurementTracker = cms.string( "Never" ),
originRadius = cms.double( 0.015 ),
measurementTrackerName = cms.InputTag( "" ),
precise = cms.bool( True ),
deltaEta = cms.double( 0.1 ),
deltaPhi = cms.double( 0.1 )
)
)
process.hltIter3IterL3MuonPixelClusterCheck = cms.EDProducer( "ClusterCheckerEDProducer",
doClusterCheck = cms.bool( False ),
MaxNumberOfCosmicClusters = cms.uint32( 50000 ),
ClusterCollectionLabel = cms.InputTag( "hltSiStripClusters" ),
MaxNumberOfPixelClusters = cms.uint32( 10000 ),
PixelClusterCollectionLabel = cms.InputTag( "hltSiPixelClusters" ),
cut = cms.string( "" ),
silentClusterCheck = cms.untracked.bool( False )
)
process.hltIter3IterL3MuonPixelHitDoublets = cms.EDProducer( "HitPairEDProducer",
seedingLayers = cms.InputTag( "hltIter3IterL3MuonPixelLayerPairs" ),
trackingRegions = cms.InputTag( "hltIter3IterL3MuonTrackingRegions" ),
trackingRegionsSeedingLayers = cms.InputTag( "" ),
clusterCheck = cms.InputTag( "hltIter3IterL3MuonPixelClusterCheck" ),
produceSeedingHitSets = cms.bool( True ),
produceIntermediateHitDoublets = cms.bool( False ),
maxElement = cms.uint32( 0 ),
maxElementTotal = cms.uint32( 50000000 ),
layerPairs = cms.vuint32( 0 )
)
process.hltIter3IterL3MuonPixelSeeds = cms.EDProducer( "SeedCreatorFromRegionConsecutiveHitsEDProducer",
seedingHitSets = cms.InputTag( "hltIter3IterL3MuonPixelHitDoublets" ),
propagator = cms.string( "PropagatorWithMaterialParabolicMf" ),
SeedMomentumForBOFF = cms.double( 5.0 ),
OriginTransverseErrorMultiplier = cms.double( 1.0 ),
MinOneOverPtError = cms.double( 1.0 ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
magneticField = cms.string( "ParabolicMf" ),
forceKinematicWithRegionDirection = cms.bool( False ),
SeedComparitorPSet = cms.PSet( ComponentName = cms.string( "none" ) )
)
process.hltIter3IterL3MuonCkfTrackCandidates = cms.EDProducer( "CkfTrackCandidateMaker",
cleanTrajectoryAfterInOut = cms.bool( False ),
doSeedingRegionRebuilding = cms.bool( False ),
onlyPixelHitsForSeedCleaner = cms.bool( False ),
reverseTrajectories = cms.bool( False ),
useHitsSplitting = cms.bool( False ),
MeasurementTrackerEvent = cms.InputTag( "hltIter3IterL3MuonMaskedMeasurementTrackerEvent" ),
src = cms.InputTag( "hltIter3IterL3MuonPixelSeeds" ),
clustersToSkip = cms.InputTag( "" ),
phase2clustersToSkip = cms.InputTag( "" ),
TrajectoryBuilderPSet = cms.PSet( refToPSet_ = cms.string( "HLTIter2IterL3MuonPSetGroupedCkfTrajectoryBuilderIT" ) ),
TransientInitialStateEstimatorParameters = cms.PSet(
propagatorAlongTISE = cms.string( "PropagatorWithMaterialParabolicMf" ),
numberMeasurementsForFit = cms.int32( 4 ),
propagatorOppositeTISE = cms.string( "PropagatorWithMaterialParabolicMfOpposite" )
),
numHitsForSeedCleaner = cms.int32( 4 ),
NavigationSchool = cms.string( "SimpleNavigationSchool" ),
RedundantSeedCleaner = cms.string( "CachingSeedCleanerBySharedInput" ),
TrajectoryCleaner = cms.string( "hltESPTrajectoryCleanerBySharedHits" ),
maxNSeeds = cms.uint32( 100000 ),
maxSeedsBeforeCleaning = cms.uint32( 1000 )
)
process.hltIter3IterL3MuonCtfWithMaterialTracks = cms.EDProducer( "TrackProducer",
useSimpleMF = cms.bool( True ),
SimpleMagneticField = cms.string( "ParabolicMf" ),
src = cms.InputTag( "hltIter3IterL3MuonCkfTrackCandidates" ),
clusterRemovalInfo = cms.InputTag( "" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Fitter = cms.string( "hltESPFittingSmootherIT" ),
useHitsSplitting = cms.bool( False ),
alias = cms.untracked.string( "ctfWithMaterialTracks" ),
TrajectoryInEvent = cms.bool( False ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
AlgorithmName = cms.string( "hltIter3" ),
Propagator = cms.string( "hltESPRungeKuttaTrackerPropagator" ),
GeometricInnerState = cms.bool( True ),
NavigationSchool = cms.string( "" ),
MeasurementTracker = cms.string( "" ),
MeasurementTrackerEvent = cms.InputTag( "hltIter3IterL3MuonMaskedMeasurementTrackerEvent" )
)
process.hltIter3IterL3MuonTrackCutClassifier = cms.EDProducer( "TrackCutClassifier",
src = cms.InputTag( "hltIter3IterL3MuonCtfWithMaterialTracks" ),
beamspot = cms.InputTag( "hltOnlineBeamSpot" ),
vertices = cms.InputTag( "hltIterL3MuonTrimmedPixelVertices" ),
ignoreVertices = cms.bool( False ),
qualityCuts = cms.vdouble( -0.7, 0.1, 0.7 ),
mva = cms.PSet(
minPixelHits = cms.vint32( 0, 0, 0 ),
maxDzWrtBS = cms.vdouble( 3.40282346639E38, 24.0, 100.0 ),
dr_par = cms.PSet(
d0err = cms.vdouble( 0.003, 0.003, 3.40282346639E38 ),
dr_par2 = cms.vdouble( 3.40282346639E38, 0.3, 3.40282346639E38 ),
dr_par1 = cms.vdouble( 3.40282346639E38, 0.4, 3.40282346639E38 ),
dr_exp = cms.vint32( 4, 4, 2147483647 ),
d0err_par = cms.vdouble( 0.001, 0.001, 3.40282346639E38 )
),
maxLostLayers = cms.vint32( 1, 1, 1 ),
min3DLayers = cms.vint32( 0, 0, 0 ),
dz_par = cms.PSet(
dz_par1 = cms.vdouble( 3.40282346639E38, 0.4, 3.40282346639E38 ),
dz_par2 = cms.vdouble( 3.40282346639E38, 0.35, 3.40282346639E38 ),
dz_exp = cms.vint32( 4, 4, 2147483647 )
),
minNVtxTrk = cms.int32( 3 ),
maxDz = cms.vdouble( 0.5, 0.2, 3.40282346639E38 ),
minNdof = cms.vdouble( 1.0E-5, 1.0E-5, 1.0E-5 ),
maxChi2 = cms.vdouble( 9999.0, 25.0, 3.40282346639E38 ),
maxChi2n = cms.vdouble( 1.2, 1.0, 0.7 ),
maxDr = cms.vdouble( 0.5, 0.03, 3.40282346639E38 ),
minLayers = cms.vint32( 3, 3, 3 )
)
)
process.hltIter3IterL3MuonTrackSelectionHighPurity = cms.EDProducer( "TrackCollectionFilterCloner",
originalSource = cms.InputTag( "hltIter3IterL3MuonCtfWithMaterialTracks" ),
originalMVAVals = cms.InputTag( 'hltIter3IterL3MuonTrackCutClassifier','MVAValues' ),
originalQualVals = cms.InputTag( 'hltIter3IterL3MuonTrackCutClassifier','QualityMasks' ),
minQuality = cms.string( "highPurity" ),
copyExtras = cms.untracked.bool( True ),
copyTrajectories = cms.untracked.bool( False )
)
process.hltIter3IterL3MuonMerged = cms.EDProducer( "TrackListMerger",
ShareFrac = cms.double( 0.19 ),
FoundHitBonus = cms.double( 5.0 ),
LostHitPenalty = cms.double( 20.0 ),
MinPT = cms.double( 0.05 ),
Epsilon = cms.double( -0.001 ),
MaxNormalizedChisq = cms.double( 1000.0 ),
MinFound = cms.int32( 3 ),
TrackProducers = cms.VInputTag( 'hltIter2IterL3MuonMerged','hltIter3IterL3MuonTrackSelectionHighPurity' ),
hasSelector = cms.vint32( 0, 0 ),
indivShareFrac = cms.vdouble( 1.0, 1.0 ),
selectedTrackQuals = cms.VInputTag( 'hltIter2IterL3MuonMerged','hltIter3IterL3MuonTrackSelectionHighPurity' ),
setsToMerge = cms.VPSet(
cms.PSet( pQual = cms.bool( False ),
tLists = cms.vint32( 0, 1 )
)
),
trackAlgoPriorityOrder = cms.string( "hltESPTrackAlgoPriorityOrder" ),
allowFirstHitShare = cms.bool( True ),
newQuality = cms.string( "confirmed" ),
copyExtras = cms.untracked.bool( True ),
writeOnlyTrkQuals = cms.bool( False ),
copyMVA = cms.bool( False )
)
process.hltL3MuonsIterL3IO = cms.EDProducer( "L3MuonProducer",
ServiceParameters = cms.PSet(
RPCLayers = cms.bool( True ),
UseMuonNavigation = cms.untracked.bool( True ),
Propagators = cms.untracked.vstring( 'hltESPSmartPropagatorAny',
'SteppingHelixPropagatorAny',
'hltESPSmartPropagator',
'hltESPSteppingHelixPropagatorOpposite' )
),
MuonCollectionLabel = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' ),
TrackLoaderParameters = cms.PSet(
MuonSeededTracksInstance = cms.untracked.string( "L2Seeded" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
DoSmoothing = cms.bool( False ),
SmoothTkTrack = cms.untracked.bool( False ),
VertexConstraint = cms.bool( False ),
MuonUpdatorAtVertexParameters = cms.PSet(
MaxChi2 = cms.double( 1000000.0 ),
BeamSpotPositionErrors = cms.vdouble( 0.1, 0.1, 5.3 ),
Propagator = cms.string( "hltESPSteppingHelixPropagatorOpposite" )
),
PutTkTrackIntoEvent = cms.untracked.bool( False ),
Smoother = cms.string( "hltESPKFTrajectorySmootherForMuonTrackLoader" )
),
L3TrajBuilderParameters = cms.PSet(
PtCut = cms.double( 1.0 ),
TrackerPropagator = cms.string( "SteppingHelixPropagatorAny" ),
GlobalMuonTrackMatcher = cms.PSet(
Chi2Cut_3 = cms.double( 200.0 ),
DeltaDCut_2 = cms.double( 10.0 ),
Eta_threshold = cms.double( 1.2 ),
Quality_2 = cms.double( 15.0 ),
DeltaDCut_1 = cms.double( 40.0 ),
Quality_3 = cms.double( 7.0 ),
DeltaDCut_3 = cms.double( 15.0 ),
Quality_1 = cms.double( 20.0 ),
Pt_threshold1 = cms.double( 0.0 ),
DeltaRCut_2 = cms.double( 0.2 ),
DeltaRCut_1 = cms.double( 0.1 ),
Pt_threshold2 = cms.double( 9.99999999E8 ),
Chi2Cut_1 = cms.double( 50.0 ),
Chi2Cut_2 = cms.double( 50.0 ),
DeltaRCut_3 = cms.double( 1.0 ),
LocChi2Cut = cms.double( 0.001 ),
Propagator = cms.string( "hltESPSmartPropagator" ),
MinPt = cms.double( 1.0 ),
MinP = cms.double( 2.5 )
),
ScaleTECxFactor = cms.double( -1.0 ),
tkTrajUseVertex = cms.bool( False ),
MuonTrackingRegionBuilder = cms.PSet(
Rescale_Dz = cms.double( 4.0 ),
Pt_fixed = cms.bool( True ),
Eta_fixed = cms.bool( True ),
Eta_min = cms.double( 0.1 ),
DeltaZ = cms.double( 24.2 ),
maxRegions = cms.int32( 2 ),
EtaR_UpperLimit_Par1 = cms.double( 0.25 ),
UseVertex = cms.bool( False ),
Z_fixed = cms.bool( True ),
PhiR_UpperLimit_Par1 = cms.double( 0.6 ),
PhiR_UpperLimit_Par2 = cms.double( 0.2 ),
Rescale_phi = cms.double( 3.0 ),
DeltaEta = cms.double( 0.04 ),
precise = cms.bool( True ),
OnDemand = cms.int32( -1 ),
EtaR_UpperLimit_Par2 = cms.double( 0.15 ),
MeasurementTrackerName = cms.InputTag( "hltESPMeasurementTracker" ),
vertexCollection = cms.InputTag( "pixelVertices" ),
Pt_min = cms.double( 3.0 ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Phi_fixed = cms.bool( True ),
DeltaR = cms.double( 0.025 ),
input = cms.InputTag( "hltL2SelectorForL3IO" ),
DeltaPhi = cms.double( 0.15 ),
Phi_min = cms.double( 0.1 ),
Rescale_eta = cms.double( 3.0 )
),
TrackTransformer = cms.PSet(
Fitter = cms.string( "hltESPL3MuKFTrajectoryFitter" ),
RefitDirection = cms.string( "insideOut" ),
RefitRPCHits = cms.bool( True ),
Propagator = cms.string( "hltESPSmartPropagatorAny" ),
DoPredictionsOnly = cms.bool( False ),
TrackerRecHitBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
MuonRecHitBuilder = cms.string( "hltESPMuonTransientTrackingRecHitBuilder" ),
Smoother = cms.string( "hltESPKFTrajectorySmootherForMuonTrackLoader" )
),
tkTrajBeamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
RefitRPCHits = cms.bool( True ),
tkTrajVertex = cms.InputTag( "hltIterL3MuonPixelVertices" ),
GlbRefitterParameters = cms.PSet(
Fitter = cms.string( "hltESPL3MuKFTrajectoryFitter" ),
DTRecSegmentLabel = cms.InputTag( "hltDt4DSegments" ),
RefitFlag = cms.bool( True ),
SkipStation = cms.int32( -1 ),
Chi2CutRPC = cms.double( 1.0 ),
PropDirForCosmics = cms.bool( False ),
CSCRecSegmentLabel = cms.InputTag( "hltCscSegments" ),
GEMRecHitLabel = cms.InputTag( "hltGemRecHits" ),
HitThreshold = cms.int32( 1 ),
Chi2CutGEM = cms.double( 1.0 ),
DYTthrs = cms.vint32( 30, 15 ),
TrackerSkipSystem = cms.int32( -1 ),
RefitDirection = cms.string( "insideOut" ),
Chi2CutCSC = cms.double( 150.0 ),
Chi2CutDT = cms.double( 10.0 ),
RefitRPCHits = cms.bool( True ),
TrackerSkipSection = cms.int32( -1 ),
Propagator = cms.string( "hltESPSmartPropagatorAny" ),
DoPredictionsOnly = cms.bool( False ),
TrackerRecHitBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
MuonHitsOption = cms.int32( 1 ),
MuonRecHitBuilder = cms.string( "hltESPMuonTransientTrackingRecHitBuilder" )
),
PCut = cms.double( 2.5 ),
tkTrajMaxDXYBeamSpot = cms.double( 9999.0 ),
TrackerRecHitBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
matchToSeeds = cms.bool( True ),
tkTrajMaxChi2 = cms.double( 9999.0 ),
MuonRecHitBuilder = cms.string( "hltESPMuonTransientTrackingRecHitBuilder" ),
ScaleTECyFactor = cms.double( -1.0 ),
tkTrajLabel = cms.InputTag( "hltIter3IterL3MuonMerged" )
)
)
process.hltIterL3MuonsFromL2LinksCombination = cms.EDProducer( "L3TrackLinksCombiner",
labels = cms.VInputTag( 'hltL3MuonsIterL3OI','hltL3MuonsIterL3IO' )
)
process.hltL1MuonsPt0 = cms.EDProducer( "HLTL1TMuonSelector",
InputObjects = cms.InputTag( 'hltGtStage2Digis','Muon' ),
L1MinPt = cms.double( -1.0 ),
L1MaxEta = cms.double( 5.0 ),
L1MinQuality = cms.uint32( 7 ),
CentralBxOnly = cms.bool( True )
)
process.hltIterL3FromL1MuonPixelTracksTrackingRegions = cms.EDProducer( "CandidateSeededTrackingRegionsEDProducer",
RegionPSet = cms.PSet(
vertexCollection = cms.InputTag( "notUsed" ),
zErrorVetex = cms.double( 0.2 ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
zErrorBeamSpot = cms.double( 24.2 ),
maxNVertices = cms.int32( 1 ),
maxNRegions = cms.int32( 2 ),
nSigmaZVertex = cms.double( 3.0 ),
nSigmaZBeamSpot = cms.double( 4.0 ),
ptMin = cms.double( 10.0 ),
mode = cms.string( "BeamSpotSigma" ),
input = cms.InputTag( "hltL1MuonsPt0" ),
searchOpt = cms.bool( False ),
whereToUseMeasurementTracker = cms.string( "Never" ),
originRadius = cms.double( 0.2 ),
measurementTrackerName = cms.InputTag( "" ),
precise = cms.bool( True ),
deltaEta = cms.double( 0.35 ),
deltaPhi = cms.double( 0.2 )
)
)
process.hltIterL3FromL1MuonPixelLayerQuadruplets = cms.EDProducer( "SeedingLayersEDProducer",
layerList = cms.vstring( 'BPix1+BPix2+BPix3+BPix4',
'BPix1+BPix2+BPix3+FPix1_pos',
'BPix1+BPix2+BPix3+FPix1_neg',
'BPix1+BPix2+FPix1_pos+FPix2_pos',
'BPix1+BPix2+FPix1_neg+FPix2_neg',
'BPix1+FPix1_pos+FPix2_pos+FPix3_pos',
'BPix1+FPix1_neg+FPix2_neg+FPix3_neg' ),
BPix = cms.PSet(
hitErrorRPhi = cms.double( 0.0027 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
useErrorsFromParam = cms.bool( True ),
hitErrorRZ = cms.double( 0.006 ),
HitProducer = cms.string( "hltSiPixelRecHits" )
),
FPix = cms.PSet(
hitErrorRPhi = cms.double( 0.0051 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
useErrorsFromParam = cms.bool( True ),
hitErrorRZ = cms.double( 0.0036 ),
HitProducer = cms.string( "hltSiPixelRecHits" )
),
TIB = cms.PSet( ),
TID = cms.PSet( ),
TOB = cms.PSet( ),
TEC = cms.PSet( ),
MTIB = cms.PSet( ),
MTID = cms.PSet( ),
MTOB = cms.PSet( ),
MTEC = cms.PSet( )
)
process.hltIterL3FromL1MuonPixelTracksHitDoublets = cms.EDProducer( "HitPairEDProducer",
seedingLayers = cms.InputTag( "hltIterL3FromL1MuonPixelLayerQuadruplets" ),
trackingRegions = cms.InputTag( "hltIterL3FromL1MuonPixelTracksTrackingRegions" ),
trackingRegionsSeedingLayers = cms.InputTag( "" ),
clusterCheck = cms.InputTag( "" ),
produceSeedingHitSets = cms.bool( False ),
produceIntermediateHitDoublets = cms.bool( True ),
maxElement = cms.uint32( 0 ),
maxElementTotal = cms.uint32( 50000000 ),
layerPairs = cms.vuint32( 0, 1, 2 )
)
process.hltIterL3FromL1MuonPixelTracksHitQuadruplets = cms.EDProducer( "CAHitQuadrupletEDProducer",
doublets = cms.InputTag( "hltIterL3FromL1MuonPixelTracksHitDoublets" ),
extraHitRPhitolerance = cms.double( 0.032 ),
fitFastCircle = cms.bool( True ),
fitFastCircleChi2Cut = cms.bool( True ),
useBendingCorrection = cms.bool( True ),
CAThetaCut = cms.double( 0.005 ),
CAPhiCut = cms.double( 0.2 ),
CAThetaCut_byTriplets = cms.VPSet(
cms.PSet( seedingLayers = cms.string( "" ),
cut = cms.double( -1.0 )
)
),
CAPhiCut_byTriplets = cms.VPSet(
cms.PSet( seedingLayers = cms.string( "" ),
cut = cms.double( -1.0 )
)
),
CAHardPtCut = cms.double( 0.0 ),
maxChi2 = cms.PSet(
value2 = cms.double( 50.0 ),
value1 = cms.double( 200.0 ),
pt1 = cms.double( 0.7 ),
enabled = cms.bool( True ),
pt2 = cms.double( 2.0 )
),
SeedComparitorPSet = cms.PSet(
clusterShapeHitFilter = cms.string( "ClusterShapeHitFilter" ),
ComponentName = cms.string( "LowPtClusterShapeSeedComparitor" ),
clusterShapeCacheSrc = cms.InputTag( "hltSiPixelClustersCache" )
)
)
process.hltIterL3FromL1MuonPixelTracks = cms.EDProducer( "PixelTrackProducer",
passLabel = cms.string( "" ),
SeedingHitSets = cms.InputTag( "hltIterL3FromL1MuonPixelTracksHitQuadruplets" ),
Fitter = cms.InputTag( "hltIterL3MuonPixelTracksFitter" ),
Filter = cms.InputTag( "hltIterL3MuonPixelTracksFilter" ),
Cleaner = cms.string( "hltPixelTracksCleanerBySharedHits" )
)
process.hltIterL3FromL1MuonPixelVertices = cms.EDProducer( "PixelVertexProducer",
WtAverage = cms.bool( True ),
ZOffset = cms.double( 5.0 ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Verbosity = cms.int32( 0 ),
UseError = cms.bool( True ),
TrackCollection = cms.InputTag( "hltIterL3MuonPixelTracks" ),
ZSeparation = cms.double( 0.05 ),
NTrkMin = cms.int32( 2 ),
Method2 = cms.bool( True ),
Finder = cms.string( "DivisiveVertexFinder" ),
PtMin = cms.double( 1.0 ),
PVcomparer = cms.PSet( refToPSet_ = cms.string( "HLTPSetPvClusterComparerForIT" ) )
)
process.hltIterL3FromL1MuonTrimmedPixelVertices = cms.EDProducer( "PixelVertexCollectionTrimmer",
src = cms.InputTag( "hltIterL3FromL1MuonPixelVertices" ),
maxVtx = cms.uint32( 100 ),
fractionSumPt2 = cms.double( 0.3 ),
minSumPt2 = cms.double( 0.0 ),
PVcomparer = cms.PSet( refToPSet_ = cms.string( "HLTPSetPvClusterComparerForIT" ) )
)
process.hltIter0IterL3FromL1MuonPixelSeedsFromPixelTracks = cms.EDProducer( "SeedGeneratorFromProtoTracksEDProducer",
InputCollection = cms.InputTag( "hltIterL3FromL1MuonPixelTracks" ),
InputVertexCollection = cms.InputTag( "hltIterL3FromL1MuonTrimmedPixelVertices" ),
originHalfLength = cms.double( 0.3 ),
originRadius = cms.double( 0.1 ),
useProtoTrackKinematics = cms.bool( False ),
useEventsWithNoVertex = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
usePV = cms.bool( False ),
includeFourthHit = cms.bool( False ),
SeedCreatorPSet = cms.PSet( refToPSet_ = cms.string( "HLTSeedFromProtoTracks" ) )
)
process.hltIter0IterL3FromL1MuonCkfTrackCandidates = cms.EDProducer( "CkfTrackCandidateMaker",
cleanTrajectoryAfterInOut = cms.bool( False ),
doSeedingRegionRebuilding = cms.bool( True ),
onlyPixelHitsForSeedCleaner = cms.bool( False ),
reverseTrajectories = cms.bool( False ),
useHitsSplitting = cms.bool( True ),
MeasurementTrackerEvent = cms.InputTag( "hltSiStripClusters" ),
src = cms.InputTag( "hltIter0IterL3FromL1MuonPixelSeedsFromPixelTracks" ),
clustersToSkip = cms.InputTag( "" ),
phase2clustersToSkip = cms.InputTag( "" ),
TrajectoryBuilderPSet = cms.PSet( refToPSet_ = cms.string( "HLTIter0IterL3FromL1MuonPSetGroupedCkfTrajectoryBuilderIT" ) ),
TransientInitialStateEstimatorParameters = cms.PSet(
propagatorAlongTISE = cms.string( "PropagatorWithMaterialParabolicMf" ),
numberMeasurementsForFit = cms.int32( 4 ),
propagatorOppositeTISE = cms.string( "PropagatorWithMaterialParabolicMfOpposite" )
),
numHitsForSeedCleaner = cms.int32( 4 ),
NavigationSchool = cms.string( "SimpleNavigationSchool" ),
RedundantSeedCleaner = cms.string( "none" ),
TrajectoryCleaner = cms.string( "hltESPTrajectoryCleanerBySharedHits" ),
maxNSeeds = cms.uint32( 100000 ),
maxSeedsBeforeCleaning = cms.uint32( 1000 )
)
process.hltIter0IterL3FromL1MuonCtfWithMaterialTracks = cms.EDProducer( "TrackProducer",
useSimpleMF = cms.bool( True ),
SimpleMagneticField = cms.string( "ParabolicMf" ),
src = cms.InputTag( "hltIter0IterL3FromL1MuonCkfTrackCandidates" ),
clusterRemovalInfo = cms.InputTag( "" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Fitter = cms.string( "hltESPFittingSmootherIT" ),
useHitsSplitting = cms.bool( False ),
alias = cms.untracked.string( "ctfWithMaterialTracks" ),
TrajectoryInEvent = cms.bool( False ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
AlgorithmName = cms.string( "hltIter0" ),
Propagator = cms.string( "hltESPRungeKuttaTrackerPropagator" ),
GeometricInnerState = cms.bool( True ),
NavigationSchool = cms.string( "" ),
MeasurementTracker = cms.string( "" ),
MeasurementTrackerEvent = cms.InputTag( "hltSiStripClusters" )
)
process.hltIter0IterL3FromL1MuonTrackCutClassifier = cms.EDProducer( "TrackCutClassifier",
src = cms.InputTag( "hltIter0IterL3FromL1MuonCtfWithMaterialTracks" ),
beamspot = cms.InputTag( "hltOnlineBeamSpot" ),
vertices = cms.InputTag( "hltIterL3FromL1MuonTrimmedPixelVertices" ),
ignoreVertices = cms.bool( False ),
qualityCuts = cms.vdouble( -0.7, 0.1, 0.7 ),
mva = cms.PSet(
minPixelHits = cms.vint32( 0, 3, 4 ),
maxDzWrtBS = cms.vdouble( 3.40282346639E38, 24.0, 100.0 ),
dr_par = cms.PSet(
d0err = cms.vdouble( 0.003, 0.003, 3.40282346639E38 ),
dr_par2 = cms.vdouble( 0.3, 0.3, 3.40282346639E38 ),
dr_par1 = cms.vdouble( 0.4, 0.4, 3.40282346639E38 ),
dr_exp = cms.vint32( 4, 4, 2147483647 ),
d0err_par = cms.vdouble( 0.001, 0.001, 3.40282346639E38 )
),
maxLostLayers = cms.vint32( 1, 1, 1 ),
min3DLayers = cms.vint32( 0, 3, 4 ),
dz_par = cms.PSet(
dz_par1 = cms.vdouble( 0.4, 0.4, 3.40282346639E38 ),
dz_par2 = cms.vdouble( 0.35, 0.35, 3.40282346639E38 ),
dz_exp = cms.vint32( 4, 4, 2147483647 )
),
minNVtxTrk = cms.int32( 3 ),
maxDz = cms.vdouble( 0.5, 0.2, 3.40282346639E38 ),
minNdof = cms.vdouble( 1.0E-5, 1.0E-5, 1.0E-5 ),
maxChi2 = cms.vdouble( 3.40282346639E38, 3.40282346639E38, 3.40282346639E38 ),
maxChi2n = cms.vdouble( 1.2, 1.0, 0.7 ),
maxDr = cms.vdouble( 0.5, 0.03, 3.40282346639E38 ),
minLayers = cms.vint32( 3, 3, 4 )
)
)
process.hltIter0IterL3FromL1MuonTrackSelectionHighPurity = cms.EDProducer( "TrackCollectionFilterCloner",
originalSource = cms.InputTag( "hltIter0IterL3FromL1MuonCtfWithMaterialTracks" ),
originalMVAVals = cms.InputTag( 'hltIter0IterL3FromL1MuonTrackCutClassifier','MVAValues' ),
originalQualVals = cms.InputTag( 'hltIter0IterL3FromL1MuonTrackCutClassifier','QualityMasks' ),
minQuality = cms.string( "highPurity" ),
copyExtras = cms.untracked.bool( True ),
copyTrajectories = cms.untracked.bool( False )
)
process.hltIter2IterL3FromL1MuonClustersRefRemoval = cms.EDProducer( "TrackClusterRemover",
trajectories = cms.InputTag( "hltIter0IterL3FromL1MuonTrackSelectionHighPurity" ),
trackClassifier = cms.InputTag( '','QualityMasks' ),
pixelClusters = cms.InputTag( "hltSiPixelClusters" ),
stripClusters = cms.InputTag( "hltSiStripRawToClustersFacility" ),
oldClusterRemovalInfo = cms.InputTag( "" ),
TrackQuality = cms.string( "highPurity" ),
maxChi2 = cms.double( 16.0 ),
minNumberOfLayersWithMeasBeforeFiltering = cms.int32( 0 ),
overrideTrkQuals = cms.InputTag( "" )
)
process.hltIter2IterL3FromL1MuonMaskedMeasurementTrackerEvent = cms.EDProducer( "MaskedMeasurementTrackerEventProducer",
src = cms.InputTag( "hltSiStripClusters" ),
OnDemand = cms.bool( False ),
clustersToSkip = cms.InputTag( "hltIter2IterL3FromL1MuonClustersRefRemoval" )
)
process.hltIter2IterL3FromL1MuonPixelLayerTriplets = cms.EDProducer( "SeedingLayersEDProducer",
layerList = cms.vstring( 'BPix1+BPix2+BPix3',
'BPix2+BPix3+BPix4',
'BPix1+BPix3+BPix4',
'BPix1+BPix2+BPix4',
'BPix2+BPix3+FPix1_pos',
'BPix2+BPix3+FPix1_neg',
'BPix1+BPix2+FPix1_pos',
'BPix1+BPix2+FPix1_neg',
'BPix2+FPix1_pos+FPix2_pos',
'BPix2+FPix1_neg+FPix2_neg',
'BPix1+FPix1_pos+FPix2_pos',
'BPix1+FPix1_neg+FPix2_neg',
'FPix1_pos+FPix2_pos+FPix3_pos',
'FPix1_neg+FPix2_neg+FPix3_neg',
'BPix1+BPix3+FPix1_pos',
'BPix1+BPix2+FPix2_pos',
'BPix1+BPix3+FPix1_neg',
'BPix1+BPix2+FPix2_neg',
'BPix1+FPix2_neg+FPix3_neg',
'BPix1+FPix1_neg+FPix3_neg',
'BPix1+FPix2_pos+FPix3_pos',
'BPix1+FPix1_pos+FPix3_pos' ),
BPix = cms.PSet(
hitErrorRPhi = cms.double( 0.0027 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltIter2IterL3FromL1MuonClustersRefRemoval" ),
useErrorsFromParam = cms.bool( True ),
hitErrorRZ = cms.double( 0.006 ),
HitProducer = cms.string( "hltSiPixelRecHits" )
),
FPix = cms.PSet(
hitErrorRPhi = cms.double( 0.0051 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltIter2IterL3FromL1MuonClustersRefRemoval" ),
useErrorsFromParam = cms.bool( True ),
hitErrorRZ = cms.double( 0.0036 ),
HitProducer = cms.string( "hltSiPixelRecHits" )
),
TIB = cms.PSet( ),
TID = cms.PSet( ),
TOB = cms.PSet( ),
TEC = cms.PSet( ),
MTIB = cms.PSet( ),
MTID = cms.PSet( ),
MTOB = cms.PSet( ),
MTEC = cms.PSet( )
)
process.hltIter2IterL3FromL1MuonPixelClusterCheck = cms.EDProducer( "ClusterCheckerEDProducer",
doClusterCheck = cms.bool( False ),
MaxNumberOfCosmicClusters = cms.uint32( 50000 ),
ClusterCollectionLabel = cms.InputTag( "hltSiStripClusters" ),
MaxNumberOfPixelClusters = cms.uint32( 10000 ),
PixelClusterCollectionLabel = cms.InputTag( "hltSiPixelClusters" ),
cut = cms.string( "" ),
silentClusterCheck = cms.untracked.bool( False )
)
process.hltIter2IterL3FromL1MuonPixelHitDoublets = cms.EDProducer( "HitPairEDProducer",
seedingLayers = cms.InputTag( "hltIter2IterL3FromL1MuonPixelLayerTriplets" ),
trackingRegions = cms.InputTag( "hltIterL3FromL1MuonPixelTracksTrackingRegions" ),
trackingRegionsSeedingLayers = cms.InputTag( "" ),
clusterCheck = cms.InputTag( "hltIter2IterL3FromL1MuonPixelClusterCheck" ),
produceSeedingHitSets = cms.bool( False ),
produceIntermediateHitDoublets = cms.bool( True ),
maxElement = cms.uint32( 0 ),
maxElementTotal = cms.uint32( 50000000 ),
layerPairs = cms.vuint32( 0, 1 )
)
process.hltIter2IterL3FromL1MuonPixelHitTriplets = cms.EDProducer( "CAHitTripletEDProducer",
doublets = cms.InputTag( "hltIter2IterL3FromL1MuonPixelHitDoublets" ),
extraHitRPhitolerance = cms.double( 0.032 ),
useBendingCorrection = cms.bool( True ),
CAThetaCut = cms.double( 0.015 ),
CAPhiCut = cms.double( 0.1 ),
CAThetaCut_byTriplets = cms.VPSet(
cms.PSet( seedingLayers = cms.string( "" ),
cut = cms.double( -1.0 )
)
),
CAPhiCut_byTriplets = cms.VPSet(
cms.PSet( seedingLayers = cms.string( "" ),
cut = cms.double( -1.0 )
)
),
CAHardPtCut = cms.double( 0.3 ),
maxChi2 = cms.PSet(
value2 = cms.double( 6.0 ),
value1 = cms.double( 100.0 ),
pt1 = cms.double( 0.8 ),
enabled = cms.bool( True ),
pt2 = cms.double( 8.0 )
),
SeedComparitorPSet = cms.PSet( ComponentName = cms.string( "none" ) )
)
process.hltIter2IterL3FromL1MuonPixelSeeds = cms.EDProducer( "SeedCreatorFromRegionConsecutiveHitsTripletOnlyEDProducer",
seedingHitSets = cms.InputTag( "hltIter2IterL3FromL1MuonPixelHitTriplets" ),
propagator = cms.string( "PropagatorWithMaterialParabolicMf" ),
SeedMomentumForBOFF = cms.double( 5.0 ),
OriginTransverseErrorMultiplier = cms.double( 1.0 ),
MinOneOverPtError = cms.double( 1.0 ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
magneticField = cms.string( "ParabolicMf" ),
forceKinematicWithRegionDirection = cms.bool( False ),
SeedComparitorPSet = cms.PSet( ComponentName = cms.string( "none" ) )
)
process.hltIter2IterL3FromL1MuonCkfTrackCandidates = cms.EDProducer( "CkfTrackCandidateMaker",
cleanTrajectoryAfterInOut = cms.bool( False ),
doSeedingRegionRebuilding = cms.bool( False ),
onlyPixelHitsForSeedCleaner = cms.bool( False ),
reverseTrajectories = cms.bool( False ),
useHitsSplitting = cms.bool( False ),
MeasurementTrackerEvent = cms.InputTag( "hltIter2IterL3FromL1MuonMaskedMeasurementTrackerEvent" ),
src = cms.InputTag( "hltIter2IterL3FromL1MuonPixelSeeds" ),
clustersToSkip = cms.InputTag( "" ),
phase2clustersToSkip = cms.InputTag( "" ),
TrajectoryBuilderPSet = cms.PSet( refToPSet_ = cms.string( "HLTIter2IterL3FromL1MuonPSetGroupedCkfTrajectoryBuilderIT" ) ),
TransientInitialStateEstimatorParameters = cms.PSet(
propagatorAlongTISE = cms.string( "PropagatorWithMaterialParabolicMf" ),
numberMeasurementsForFit = cms.int32( 4 ),
propagatorOppositeTISE = cms.string( "PropagatorWithMaterialParabolicMfOpposite" )
),
numHitsForSeedCleaner = cms.int32( 4 ),
NavigationSchool = cms.string( "SimpleNavigationSchool" ),
RedundantSeedCleaner = cms.string( "CachingSeedCleanerBySharedInput" ),
TrajectoryCleaner = cms.string( "hltESPTrajectoryCleanerBySharedHits" ),
maxNSeeds = cms.uint32( 100000 ),
maxSeedsBeforeCleaning = cms.uint32( 1000 )
)
process.hltIter2IterL3FromL1MuonCtfWithMaterialTracks = cms.EDProducer( "TrackProducer",
useSimpleMF = cms.bool( True ),
SimpleMagneticField = cms.string( "ParabolicMf" ),
src = cms.InputTag( "hltIter2IterL3FromL1MuonCkfTrackCandidates" ),
clusterRemovalInfo = cms.InputTag( "" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Fitter = cms.string( "hltESPFittingSmootherIT" ),
useHitsSplitting = cms.bool( False ),
alias = cms.untracked.string( "ctfWithMaterialTracks" ),
TrajectoryInEvent = cms.bool( False ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
AlgorithmName = cms.string( "hltIter2" ),
Propagator = cms.string( "hltESPRungeKuttaTrackerPropagator" ),
GeometricInnerState = cms.bool( True ),
NavigationSchool = cms.string( "" ),
MeasurementTracker = cms.string( "" ),
MeasurementTrackerEvent = cms.InputTag( "hltIter2IterL3FromL1MuonMaskedMeasurementTrackerEvent" )
)
process.hltIter2IterL3FromL1MuonTrackCutClassifier = cms.EDProducer( "TrackCutClassifier",
src = cms.InputTag( "hltIter2IterL3FromL1MuonCtfWithMaterialTracks" ),
beamspot = cms.InputTag( "hltOnlineBeamSpot" ),
vertices = cms.InputTag( "hltIterL3FromL1MuonTrimmedPixelVertices" ),
ignoreVertices = cms.bool( False ),
qualityCuts = cms.vdouble( -0.7, 0.1, 0.7 ),
mva = cms.PSet(
minPixelHits = cms.vint32( 0, 0, 0 ),
maxDzWrtBS = cms.vdouble( 3.40282346639E38, 24.0, 100.0 ),
dr_par = cms.PSet(
d0err = cms.vdouble( 0.003, 0.003, 3.40282346639E38 ),
dr_par2 = cms.vdouble( 3.40282346639E38, 0.3, 3.40282346639E38 ),
dr_par1 = cms.vdouble( 3.40282346639E38, 0.4, 3.40282346639E38 ),
dr_exp = cms.vint32( 4, 4, 2147483647 ),
d0err_par = cms.vdouble( 0.001, 0.001, 3.40282346639E38 )
),
maxLostLayers = cms.vint32( 1, 1, 1 ),
min3DLayers = cms.vint32( 0, 0, 0 ),
dz_par = cms.PSet(
dz_par1 = cms.vdouble( 3.40282346639E38, 0.4, 3.40282346639E38 ),
dz_par2 = cms.vdouble( 3.40282346639E38, 0.35, 3.40282346639E38 ),
dz_exp = cms.vint32( 4, 4, 2147483647 )
),
minNVtxTrk = cms.int32( 3 ),
maxDz = cms.vdouble( 0.5, 0.2, 3.40282346639E38 ),
minNdof = cms.vdouble( 1.0E-5, 1.0E-5, 1.0E-5 ),
maxChi2 = cms.vdouble( 9999.0, 25.0, 3.40282346639E38 ),
maxChi2n = cms.vdouble( 1.2, 1.0, 0.7 ),
maxDr = cms.vdouble( 0.5, 0.03, 3.40282346639E38 ),
minLayers = cms.vint32( 3, 3, 3 )
)
)
process.hltIter2IterL3FromL1MuonTrackSelectionHighPurity = cms.EDProducer( "TrackCollectionFilterCloner",
originalSource = cms.InputTag( "hltIter2IterL3FromL1MuonCtfWithMaterialTracks" ),
originalMVAVals = cms.InputTag( 'hltIter2IterL3FromL1MuonTrackCutClassifier','MVAValues' ),
originalQualVals = cms.InputTag( 'hltIter2IterL3FromL1MuonTrackCutClassifier','QualityMasks' ),
minQuality = cms.string( "highPurity" ),
copyExtras = cms.untracked.bool( True ),
copyTrajectories = cms.untracked.bool( False )
)
process.hltIter2IterL3FromL1MuonMerged = cms.EDProducer( "TrackListMerger",
ShareFrac = cms.double( 0.19 ),
FoundHitBonus = cms.double( 5.0 ),
LostHitPenalty = cms.double( 20.0 ),
MinPT = cms.double( 0.05 ),
Epsilon = cms.double( -0.001 ),
MaxNormalizedChisq = cms.double( 1000.0 ),
MinFound = cms.int32( 3 ),
TrackProducers = cms.VInputTag( 'hltIter0IterL3FromL1MuonTrackSelectionHighPurity','hltIter2IterL3FromL1MuonTrackSelectionHighPurity' ),
hasSelector = cms.vint32( 0, 0 ),
indivShareFrac = cms.vdouble( 1.0, 1.0 ),
selectedTrackQuals = cms.VInputTag( 'hltIter0IterL3FromL1MuonTrackSelectionHighPurity','hltIter2IterL3FromL1MuonTrackSelectionHighPurity' ),
setsToMerge = cms.VPSet(
cms.PSet( pQual = cms.bool( False ),
tLists = cms.vint32( 0, 1 )
)
),
trackAlgoPriorityOrder = cms.string( "hltESPTrackAlgoPriorityOrder" ),
allowFirstHitShare = cms.bool( True ),
newQuality = cms.string( "confirmed" ),
copyExtras = cms.untracked.bool( True ),
writeOnlyTrkQuals = cms.bool( False ),
copyMVA = cms.bool( False )
)
process.hltIter3IterL3FromL1MuonClustersRefRemoval = cms.EDProducer( "TrackClusterRemover",
trajectories = cms.InputTag( "hltIter2IterL3FromL1MuonTrackSelectionHighPurity" ),
trackClassifier = cms.InputTag( '','QualityMasks' ),
pixelClusters = cms.InputTag( "hltSiPixelClusters" ),
stripClusters = cms.InputTag( "hltSiStripRawToClustersFacility" ),
oldClusterRemovalInfo = cms.InputTag( "hltIter2IterL3FromL1MuonClustersRefRemoval" ),
TrackQuality = cms.string( "highPurity" ),
maxChi2 = cms.double( 16.0 ),
minNumberOfLayersWithMeasBeforeFiltering = cms.int32( 0 ),
overrideTrkQuals = cms.InputTag( "" )
)
process.hltIter3IterL3FromL1MuonMaskedMeasurementTrackerEvent = cms.EDProducer( "MaskedMeasurementTrackerEventProducer",
src = cms.InputTag( "hltSiStripClusters" ),
OnDemand = cms.bool( False ),
clustersToSkip = cms.InputTag( "hltIter3IterL3FromL1MuonClustersRefRemoval" )
)
process.hltIter3IterL3FromL1MuonPixelLayerPairs = cms.EDProducer( "SeedingLayersEDProducer",
layerList = cms.vstring( 'BPix1+BPix2',
'BPix1+BPix3',
'BPix1+BPix4',
'BPix2+BPix3',
'BPix2+BPix4',
'BPix3+BPix4',
'BPix1+FPix1_pos',
'BPix1+FPix1_neg',
'BPix1+FPix2_pos',
'BPix1+FPix2_neg',
'BPix1+FPix3_pos',
'BPix1+FPix3_neg',
'BPix2+FPix1_pos',
'BPix2+FPix1_neg',
'BPix2+FPix2_pos',
'BPix2+FPix2_neg',
'BPix3+FPix1_pos',
'BPix3+FPix1_neg',
'FPix1_pos+FPix2_pos',
'FPix1_neg+FPix2_neg',
'FPix1_pos+FPix3_pos',
'FPix1_neg+FPix3_neg',
'FPix2_pos+FPix3_pos',
'FPix2_neg+FPix3_neg' ),
BPix = cms.PSet(
hitErrorRPhi = cms.double( 0.0027 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltIter3IterL3FromL1MuonClustersRefRemoval" ),
useErrorsFromParam = cms.bool( True ),
hitErrorRZ = cms.double( 0.006 ),
HitProducer = cms.string( "hltSiPixelRecHits" )
),
FPix = cms.PSet(
hitErrorRPhi = cms.double( 0.0051 ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
skipClusters = cms.InputTag( "hltIter3IterL3FromL1MuonClustersRefRemoval" ),
useErrorsFromParam = cms.bool( True ),
hitErrorRZ = cms.double( 0.0036 ),
HitProducer = cms.string( "hltSiPixelRecHits" )
),
TIB = cms.PSet( ),
TID = cms.PSet( ),
TOB = cms.PSet( ),
TEC = cms.PSet( ),
MTIB = cms.PSet( ),
MTID = cms.PSet( ),
MTOB = cms.PSet( ),
MTEC = cms.PSet( )
)
process.hltIter3IterL3FromL1MuonTrackingRegions = cms.EDProducer( "CandidateSeededTrackingRegionsEDProducer",
RegionPSet = cms.PSet(
vertexCollection = cms.InputTag( "notUsed" ),
zErrorVetex = cms.double( 0.2 ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
zErrorBeamSpot = cms.double( 24.2 ),
maxNVertices = cms.int32( 1 ),
maxNRegions = cms.int32( 2 ),
nSigmaZVertex = cms.double( 3.0 ),
nSigmaZBeamSpot = cms.double( 4.0 ),
ptMin = cms.double( 10.0 ),
mode = cms.string( "BeamSpotSigma" ),
input = cms.InputTag( "hltL1MuonsPt0" ),
searchOpt = cms.bool( False ),
whereToUseMeasurementTracker = cms.string( "Never" ),
originRadius = cms.double( 0.015 ),
measurementTrackerName = cms.InputTag( "" ),
precise = cms.bool( True ),
deltaEta = cms.double( 0.2 ),
deltaPhi = cms.double( 0.1 )
)
)
process.hltIter3IterL3FromL1MuonPixelClusterCheck = cms.EDProducer( "ClusterCheckerEDProducer",
doClusterCheck = cms.bool( False ),
MaxNumberOfCosmicClusters = cms.uint32( 50000 ),
ClusterCollectionLabel = cms.InputTag( "hltSiStripClusters" ),
MaxNumberOfPixelClusters = cms.uint32( 10000 ),
PixelClusterCollectionLabel = cms.InputTag( "hltSiPixelClusters" ),
cut = cms.string( "" ),
silentClusterCheck = cms.untracked.bool( False )
)
process.hltIter3IterL3FromL1MuonPixelHitDoublets = cms.EDProducer( "HitPairEDProducer",
seedingLayers = cms.InputTag( "hltIter3IterL3FromL1MuonPixelLayerPairs" ),
trackingRegions = cms.InputTag( "hltIter3IterL3FromL1MuonTrackingRegions" ),
trackingRegionsSeedingLayers = cms.InputTag( "" ),
clusterCheck = cms.InputTag( "hltIter3IterL3FromL1MuonPixelClusterCheck" ),
produceSeedingHitSets = cms.bool( True ),
produceIntermediateHitDoublets = cms.bool( False ),
maxElement = cms.uint32( 0 ),
maxElementTotal = cms.uint32( 50000000 ),
layerPairs = cms.vuint32( 0 )
)
process.hltIter3IterL3FromL1MuonPixelSeeds = cms.EDProducer( "SeedCreatorFromRegionConsecutiveHitsEDProducer",
seedingHitSets = cms.InputTag( "hltIter3IterL3FromL1MuonPixelHitDoublets" ),
propagator = cms.string( "PropagatorWithMaterialParabolicMf" ),
SeedMomentumForBOFF = cms.double( 5.0 ),
OriginTransverseErrorMultiplier = cms.double( 1.0 ),
MinOneOverPtError = cms.double( 1.0 ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
magneticField = cms.string( "ParabolicMf" ),
forceKinematicWithRegionDirection = cms.bool( False ),
SeedComparitorPSet = cms.PSet( ComponentName = cms.string( "none" ) )
)
process.hltIter3IterL3FromL1MuonCkfTrackCandidates = cms.EDProducer( "CkfTrackCandidateMaker",
cleanTrajectoryAfterInOut = cms.bool( False ),
doSeedingRegionRebuilding = cms.bool( False ),
onlyPixelHitsForSeedCleaner = cms.bool( False ),
reverseTrajectories = cms.bool( False ),
useHitsSplitting = cms.bool( False ),
MeasurementTrackerEvent = cms.InputTag( "hltIter3IterL3FromL1MuonMaskedMeasurementTrackerEvent" ),
src = cms.InputTag( "hltIter3IterL3FromL1MuonPixelSeeds" ),
clustersToSkip = cms.InputTag( "" ),
phase2clustersToSkip = cms.InputTag( "" ),
TrajectoryBuilderPSet = cms.PSet( refToPSet_ = cms.string( "HLTIter2IterL3MuonPSetGroupedCkfTrajectoryBuilderIT" ) ),
TransientInitialStateEstimatorParameters = cms.PSet(
propagatorAlongTISE = cms.string( "PropagatorWithMaterialParabolicMf" ),
numberMeasurementsForFit = cms.int32( 4 ),
propagatorOppositeTISE = cms.string( "PropagatorWithMaterialParabolicMfOpposite" )
),
numHitsForSeedCleaner = cms.int32( 4 ),
NavigationSchool = cms.string( "SimpleNavigationSchool" ),
RedundantSeedCleaner = cms.string( "CachingSeedCleanerBySharedInput" ),
TrajectoryCleaner = cms.string( "hltESPTrajectoryCleanerBySharedHits" ),
maxNSeeds = cms.uint32( 100000 ),
maxSeedsBeforeCleaning = cms.uint32( 1000 )
)
process.hltIter3IterL3FromL1MuonCtfWithMaterialTracks = cms.EDProducer( "TrackProducer",
useSimpleMF = cms.bool( True ),
SimpleMagneticField = cms.string( "ParabolicMf" ),
src = cms.InputTag( "hltIter3IterL3FromL1MuonCkfTrackCandidates" ),
clusterRemovalInfo = cms.InputTag( "" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Fitter = cms.string( "hltESPFittingSmootherIT" ),
useHitsSplitting = cms.bool( False ),
alias = cms.untracked.string( "ctfWithMaterialTracks" ),
TrajectoryInEvent = cms.bool( False ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
AlgorithmName = cms.string( "hltIter3" ),
Propagator = cms.string( "hltESPRungeKuttaTrackerPropagator" ),
GeometricInnerState = cms.bool( True ),
NavigationSchool = cms.string( "" ),
MeasurementTracker = cms.string( "" ),
MeasurementTrackerEvent = cms.InputTag( "hltIter3IterL3FromL1MuonMaskedMeasurementTrackerEvent" )
)
process.hltIter3IterL3FromL1MuonTrackCutClassifier = cms.EDProducer( "TrackCutClassifier",
src = cms.InputTag( "hltIter3IterL3FromL1MuonCtfWithMaterialTracks" ),
beamspot = cms.InputTag( "hltOnlineBeamSpot" ),
vertices = cms.InputTag( "hltIterL3FromL1MuonTrimmedPixelVertices" ),
ignoreVertices = cms.bool( False ),
qualityCuts = cms.vdouble( -0.7, 0.1, 0.7 ),
mva = cms.PSet(
minPixelHits = cms.vint32( 0, 0, 0 ),
maxDzWrtBS = cms.vdouble( 3.40282346639E38, 24.0, 100.0 ),
dr_par = cms.PSet(
d0err = cms.vdouble( 0.003, 0.003, 3.40282346639E38 ),
dr_par2 = cms.vdouble( 3.40282346639E38, 0.3, 3.40282346639E38 ),
dr_par1 = cms.vdouble( 3.40282346639E38, 0.4, 3.40282346639E38 ),
dr_exp = cms.vint32( 4, 4, 2147483647 ),
d0err_par = cms.vdouble( 0.001, 0.001, 3.40282346639E38 )
),
maxLostLayers = cms.vint32( 1, 1, 1 ),
min3DLayers = cms.vint32( 0, 0, 0 ),
dz_par = cms.PSet(
dz_par1 = cms.vdouble( 3.40282346639E38, 0.4, 3.40282346639E38 ),
dz_par2 = cms.vdouble( 3.40282346639E38, 0.35, 3.40282346639E38 ),
dz_exp = cms.vint32( 4, 4, 2147483647 )
),
minNVtxTrk = cms.int32( 3 ),
maxDz = cms.vdouble( 0.5, 0.2, 3.40282346639E38 ),
minNdof = cms.vdouble( 1.0E-5, 1.0E-5, 1.0E-5 ),
maxChi2 = cms.vdouble( 9999.0, 25.0, 3.40282346639E38 ),
maxChi2n = cms.vdouble( 1.2, 1.0, 0.7 ),
maxDr = cms.vdouble( 0.5, 0.03, 3.40282346639E38 ),
minLayers = cms.vint32( 3, 3, 3 )
)
)
process.hltIter3IterL3FromL1MuonTrackSelectionHighPurity = cms.EDProducer( "TrackCollectionFilterCloner",
originalSource = cms.InputTag( "hltIter3IterL3FromL1MuonCtfWithMaterialTracks" ),
originalMVAVals = cms.InputTag( 'hltIter3IterL3FromL1MuonTrackCutClassifier','MVAValues' ),
originalQualVals = cms.InputTag( 'hltIter3IterL3FromL1MuonTrackCutClassifier','QualityMasks' ),
minQuality = cms.string( "highPurity" ),
copyExtras = cms.untracked.bool( True ),
copyTrajectories = cms.untracked.bool( False )
)
process.hltIter3IterL3FromL1MuonMerged = cms.EDProducer( "TrackListMerger",
ShareFrac = cms.double( 0.19 ),
FoundHitBonus = cms.double( 5.0 ),
LostHitPenalty = cms.double( 20.0 ),
MinPT = cms.double( 0.05 ),
Epsilon = cms.double( -0.001 ),
MaxNormalizedChisq = cms.double( 1000.0 ),
MinFound = cms.int32( 3 ),
TrackProducers = cms.VInputTag( 'hltIter2IterL3FromL1MuonMerged','hltIter3IterL3FromL1MuonTrackSelectionHighPurity' ),
hasSelector = cms.vint32( 0, 0 ),
indivShareFrac = cms.vdouble( 1.0, 1.0 ),
selectedTrackQuals = cms.VInputTag( 'hltIter2IterL3FromL1MuonMerged','hltIter3IterL3FromL1MuonTrackSelectionHighPurity' ),
setsToMerge = cms.VPSet(
cms.PSet( pQual = cms.bool( False ),
tLists = cms.vint32( 0, 1 )
)
),
trackAlgoPriorityOrder = cms.string( "hltESPTrackAlgoPriorityOrder" ),
allowFirstHitShare = cms.bool( True ),
newQuality = cms.string( "confirmed" ),
copyExtras = cms.untracked.bool( True ),
writeOnlyTrkQuals = cms.bool( False ),
copyMVA = cms.bool( False )
)
process.hltIterL3MuonMerged = cms.EDProducer( "TrackListMerger",
ShareFrac = cms.double( 0.19 ),
FoundHitBonus = cms.double( 5.0 ),
LostHitPenalty = cms.double( 20.0 ),
MinPT = cms.double( 0.05 ),
Epsilon = cms.double( -0.001 ),
MaxNormalizedChisq = cms.double( 1000.0 ),
MinFound = cms.int32( 3 ),
TrackProducers = cms.VInputTag( 'hltIterL3OIMuonTrackSelectionHighPurity','hltIter3IterL3MuonMerged' ),
hasSelector = cms.vint32( 0, 0 ),
indivShareFrac = cms.vdouble( 1.0, 1.0 ),
selectedTrackQuals = cms.VInputTag( 'hltIterL3OIMuonTrackSelectionHighPurity','hltIter3IterL3MuonMerged' ),
setsToMerge = cms.VPSet(
cms.PSet( pQual = cms.bool( False ),
tLists = cms.vint32( 0, 1 )
)
),
trackAlgoPriorityOrder = cms.string( "hltESPTrackAlgoPriorityOrder" ),
allowFirstHitShare = cms.bool( True ),
newQuality = cms.string( "confirmed" ),
copyExtras = cms.untracked.bool( True ),
writeOnlyTrkQuals = cms.bool( False ),
copyMVA = cms.bool( False )
)
process.hltIterL3MuonAndMuonFromL1Merged = cms.EDProducer( "TrackListMerger",
ShareFrac = cms.double( 0.19 ),
FoundHitBonus = cms.double( 5.0 ),
LostHitPenalty = cms.double( 20.0 ),
MinPT = cms.double( 0.05 ),
Epsilon = cms.double( -0.001 ),
MaxNormalizedChisq = cms.double( 1000.0 ),
MinFound = cms.int32( 3 ),
TrackProducers = cms.VInputTag( 'hltIterL3MuonMerged','hltIter3IterL3FromL1MuonMerged' ),
hasSelector = cms.vint32( 0, 0 ),
indivShareFrac = cms.vdouble( 1.0, 1.0 ),
selectedTrackQuals = cms.VInputTag( 'hltIterL3MuonMerged','hltIter3IterL3FromL1MuonMerged' ),
setsToMerge = cms.VPSet(
cms.PSet( pQual = cms.bool( False ),
tLists = cms.vint32( 0, 1 )
)
),
trackAlgoPriorityOrder = cms.string( "hltESPTrackAlgoPriorityOrder" ),
allowFirstHitShare = cms.bool( True ),
newQuality = cms.string( "confirmed" ),
copyExtras = cms.untracked.bool( True ),
writeOnlyTrkQuals = cms.bool( False ),
copyMVA = cms.bool( False )
)
process.hltIterL3GlbMuon = cms.EDProducer( "L3MuonProducer",
ServiceParameters = cms.PSet(
RPCLayers = cms.bool( True ),
UseMuonNavigation = cms.untracked.bool( True ),
Propagators = cms.untracked.vstring( 'hltESPSmartPropagatorAny',
'SteppingHelixPropagatorAny',
'hltESPSmartPropagator',
'hltESPSteppingHelixPropagatorOpposite' )
),
MuonCollectionLabel = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' ),
TrackLoaderParameters = cms.PSet(
MuonSeededTracksInstance = cms.untracked.string( "L2Seeded" ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
DoSmoothing = cms.bool( True ),
SmoothTkTrack = cms.untracked.bool( False ),
VertexConstraint = cms.bool( False ),
MuonUpdatorAtVertexParameters = cms.PSet(
MaxChi2 = cms.double( 1000000.0 ),
BeamSpotPositionErrors = cms.vdouble( 0.1, 0.1, 5.3 ),
Propagator = cms.string( "hltESPSteppingHelixPropagatorOpposite" )
),
PutTkTrackIntoEvent = cms.untracked.bool( False ),
Smoother = cms.string( "hltESPKFTrajectorySmootherForMuonTrackLoader" )
),
L3TrajBuilderParameters = cms.PSet(
PtCut = cms.double( 1.0 ),
TrackerPropagator = cms.string( "SteppingHelixPropagatorAny" ),
GlobalMuonTrackMatcher = cms.PSet(
Chi2Cut_3 = cms.double( 200.0 ),
DeltaDCut_2 = cms.double( 10.0 ),
Eta_threshold = cms.double( 1.2 ),
Quality_2 = cms.double( 15.0 ),
DeltaDCut_1 = cms.double( 40.0 ),
Quality_3 = cms.double( 7.0 ),
DeltaDCut_3 = cms.double( 15.0 ),
Quality_1 = cms.double( 20.0 ),
Pt_threshold1 = cms.double( 0.0 ),
DeltaRCut_2 = cms.double( 0.2 ),
DeltaRCut_1 = cms.double( 0.1 ),
Pt_threshold2 = cms.double( 9.99999999E8 ),
Chi2Cut_1 = cms.double( 50.0 ),
Chi2Cut_2 = cms.double( 50.0 ),
DeltaRCut_3 = cms.double( 1.0 ),
LocChi2Cut = cms.double( 0.001 ),
Propagator = cms.string( "hltESPSmartPropagator" ),
MinPt = cms.double( 1.0 ),
MinP = cms.double( 2.5 )
),
ScaleTECxFactor = cms.double( -1.0 ),
tkTrajUseVertex = cms.bool( False ),
MuonTrackingRegionBuilder = cms.PSet(
Rescale_Dz = cms.double( 4.0 ),
Pt_fixed = cms.bool( False ),
Eta_fixed = cms.bool( True ),
Eta_min = cms.double( 0.1 ),
DeltaZ = cms.double( 24.2 ),
maxRegions = cms.int32( 2 ),
EtaR_UpperLimit_Par1 = cms.double( 0.25 ),
UseVertex = cms.bool( False ),
Z_fixed = cms.bool( False ),
PhiR_UpperLimit_Par1 = cms.double( 0.6 ),
PhiR_UpperLimit_Par2 = cms.double( 0.2 ),
Rescale_phi = cms.double( 3.0 ),
DeltaEta = cms.double( 0.2 ),
precise = cms.bool( True ),
OnDemand = cms.int32( -1 ),
EtaR_UpperLimit_Par2 = cms.double( 0.15 ),
MeasurementTrackerName = cms.InputTag( "hltESPMeasurementTracker" ),
vertexCollection = cms.InputTag( "pixelVertices" ),
Pt_min = cms.double( 3.0 ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Phi_fixed = cms.bool( True ),
DeltaR = cms.double( 0.025 ),
input = cms.InputTag( 'hltL2Muons','UpdatedAtVtx' ),
DeltaPhi = cms.double( 0.15 ),
Phi_min = cms.double( 0.1 ),
Rescale_eta = cms.double( 3.0 )
),
TrackTransformer = cms.PSet(
Fitter = cms.string( "hltESPL3MuKFTrajectoryFitter" ),
RefitDirection = cms.string( "insideOut" ),
RefitRPCHits = cms.bool( True ),
Propagator = cms.string( "hltESPSmartPropagatorAny" ),
DoPredictionsOnly = cms.bool( False ),
TrackerRecHitBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
MuonRecHitBuilder = cms.string( "hltESPMuonTransientTrackingRecHitBuilder" ),
Smoother = cms.string( "hltESPKFTrajectorySmootherForMuonTrackLoader" )
),
tkTrajBeamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
RefitRPCHits = cms.bool( True ),
tkTrajVertex = cms.InputTag( "Notused" ),
GlbRefitterParameters = cms.PSet(
Fitter = cms.string( "hltESPL3MuKFTrajectoryFitter" ),
DTRecSegmentLabel = cms.InputTag( "hltDt4DSegments" ),
RefitFlag = cms.bool( True ),
SkipStation = cms.int32( -1 ),
Chi2CutRPC = cms.double( 1.0 ),
PropDirForCosmics = cms.bool( False ),
CSCRecSegmentLabel = cms.InputTag( "hltCscSegments" ),
GEMRecHitLabel = cms.InputTag( "hltGemRecHits" ),
HitThreshold = cms.int32( 1 ),
Chi2CutGEM = cms.double( 1.0 ),
DYTthrs = cms.vint32( 30, 15 ),
TrackerSkipSystem = cms.int32( -1 ),
RefitDirection = cms.string( "insideOut" ),
Chi2CutCSC = cms.double( 150.0 ),
Chi2CutDT = cms.double( 10.0 ),
RefitRPCHits = cms.bool( True ),
TrackerSkipSection = cms.int32( -1 ),
Propagator = cms.string( "hltESPSmartPropagatorAny" ),
DoPredictionsOnly = cms.bool( False ),
TrackerRecHitBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
MuonHitsOption = cms.int32( 1 ),
MuonRecHitBuilder = cms.string( "hltESPMuonTransientTrackingRecHitBuilder" )
),
PCut = cms.double( 2.5 ),
tkTrajMaxDXYBeamSpot = cms.double( 9999.0 ),
TrackerRecHitBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
tkTrajMaxChi2 = cms.double( 9999.0 ),
MuonRecHitBuilder = cms.string( "hltESPMuonTransientTrackingRecHitBuilder" ),
ScaleTECyFactor = cms.double( -1.0 ),
tkTrajLabel = cms.InputTag( "hltIterL3MuonAndMuonFromL1Merged" )
)
)
process.hltIterL3MuonsNoID = cms.EDProducer( "MuonIdProducer",
MuonCaloCompatibility = cms.PSet(
delta_eta = cms.double( 0.02 ),
delta_phi = cms.double( 0.02 ),
allSiPMHO = cms.bool( False ),
MuonTemplateFileName = cms.FileInPath( "RecoMuon/MuonIdentification/data/MuID_templates_muons_lowPt_3_1_norm.root" ),
PionTemplateFileName = cms.FileInPath( "RecoMuon/MuonIdentification/data/MuID_templates_pions_lowPt_3_1_norm.root" )
),
TrackAssociatorParameters = cms.PSet(
useMuon = cms.bool( True ),
truthMatch = cms.bool( False ),
usePreshower = cms.bool( False ),
dRPreshowerPreselection = cms.double( 0.2 ),
muonMaxDistanceSigmaY = cms.double( 0.0 ),
useEcal = cms.bool( False ),
muonMaxDistanceSigmaX = cms.double( 0.0 ),
dRMuon = cms.double( 9999.0 ),
dREcal = cms.double( 9999.0 ),
CSCSegmentCollectionLabel = cms.InputTag( "hltCscSegments" ),
DTRecSegment4DCollectionLabel = cms.InputTag( "hltDt4DSegments" ),
EBRecHitCollectionLabel = cms.InputTag( "Notused" ),
useGEM = cms.bool( True ),
GEMSegmentCollectionLabel = cms.InputTag( "hltGemSegments" ),
CaloTowerCollectionLabel = cms.InputTag( "Notused" ),
propagateAllDirections = cms.bool( True ),
muonMaxDistanceY = cms.double( 5.0 ),
useHO = cms.bool( False ),
muonMaxDistanceX = cms.double( 5.0 ),
trajectoryUncertaintyTolerance = cms.double( -1.0 ),
useHcal = cms.bool( False ),
HBHERecHitCollectionLabel = cms.InputTag( "Notused" ),
accountForTrajectoryChangeCalo = cms.bool( False ),
dREcalPreselection = cms.double( 0.05 ),
useCalo = cms.bool( False ),
dRMuonPreselection = cms.double( 0.2 ),
EERecHitCollectionLabel = cms.InputTag( "Notused" ),
dRHcal = cms.double( 9999.0 ),
dRHcalPreselection = cms.double( 0.2 ),
HORecHitCollectionLabel = cms.InputTag( "Notused" )
),
CaloExtractorPSet = cms.PSet(
DR_Veto_H = cms.double( 0.1 ),
CenterConeOnCalIntersection = cms.bool( False ),
NoiseTow_EE = cms.double( 0.15 ),
Noise_EB = cms.double( 0.025 ),
Noise_HE = cms.double( 0.2 ),
DR_Veto_E = cms.double( 0.07 ),
NoiseTow_EB = cms.double( 0.04 ),
Noise_EE = cms.double( 0.1 ),
UseRecHitsFlag = cms.bool( False ),
DR_Max = cms.double( 1.0 ),
DepositLabel = cms.untracked.string( "Cal" ),
Noise_HO = cms.double( 0.2 ),
DR_Veto_HO = cms.double( 0.1 ),
Threshold_H = cms.double( 0.5 ),
PrintTimeReport = cms.untracked.bool( False ),
Threshold_E = cms.double( 0.2 ),
PropagatorName = cms.string( "hltESPFastSteppingHelixPropagatorAny" ),
ComponentName = cms.string( "CaloExtractorByAssociator" ),
Threshold_HO = cms.double( 0.5 ),
DepositInstanceLabels = cms.vstring( 'ecal',
'hcal',
'ho' ),
ServiceParameters = cms.PSet(
RPCLayers = cms.bool( False ),
UseMuonNavigation = cms.untracked.bool( False ),
Propagators = cms.untracked.vstring( 'hltESPFastSteppingHelixPropagatorAny' )
),
TrackAssociatorParameters = cms.PSet(
useMuon = cms.bool( False ),
truthMatch = cms.bool( False ),
usePreshower = cms.bool( False ),
dRPreshowerPreselection = cms.double( 0.2 ),
muonMaxDistanceSigmaY = cms.double( 0.0 ),
useEcal = cms.bool( False ),
muonMaxDistanceSigmaX = cms.double( 0.0 ),
dRMuon = cms.double( 9999.0 ),
dREcal = cms.double( 1.0 ),
CSCSegmentCollectionLabel = cms.InputTag( "hltCscSegments" ),
DTRecSegment4DCollectionLabel = cms.InputTag( "hltDt4DSegments" ),
EBRecHitCollectionLabel = cms.InputTag( "Notused" ),
CaloTowerCollectionLabel = cms.InputTag( "Notused" ),
propagateAllDirections = cms.bool( True ),
muonMaxDistanceY = cms.double( 5.0 ),
useHO = cms.bool( False ),
muonMaxDistanceX = cms.double( 5.0 ),
trajectoryUncertaintyTolerance = cms.double( -1.0 ),
useHcal = cms.bool( False ),
HBHERecHitCollectionLabel = cms.InputTag( "Notused" ),
accountForTrajectoryChangeCalo = cms.bool( False ),
dREcalPreselection = cms.double( 1.0 ),
useCalo = cms.bool( True ),
dRMuonPreselection = cms.double( 0.2 ),
EERecHitCollectionLabel = cms.InputTag( "Notused" ),
dRHcal = cms.double( 1.0 ),
dRHcalPreselection = cms.double( 1.0 ),
HORecHitCollectionLabel = cms.InputTag( "Notused" )
),
Noise_HB = cms.double( 0.2 )
),
TrackExtractorPSet = cms.PSet(
Diff_z = cms.double( 0.2 ),
inputTrackCollection = cms.InputTag( "hltIter3IterL3FromL1MuonMerged" ),
Chi2Ndof_Max = cms.double( 1.0E64 ),
BeamSpotLabel = cms.InputTag( "hltOnlineBeamSpot" ),
DR_Veto = cms.double( 0.01 ),
Pt_Min = cms.double( -1.0 ),
DR_Max = cms.double( 1.0 ),
NHits_Min = cms.uint32( 0 ),
Chi2Prob_Min = cms.double( -1.0 ),
Diff_r = cms.double( 0.1 ),
BeamlineOption = cms.string( "BeamSpotFromEvent" ),
ComponentName = cms.string( "TrackExtractor" )
),
JetExtractorPSet = cms.PSet(
JetCollectionLabel = cms.InputTag( "Notused" ),
DR_Veto = cms.double( 0.1 ),
DR_Max = cms.double( 1.0 ),
ExcludeMuonVeto = cms.bool( True ),
PrintTimeReport = cms.untracked.bool( False ),
PropagatorName = cms.string( "hltESPFastSteppingHelixPropagatorAny" ),
ComponentName = cms.string( "JetExtractor" ),
ServiceParameters = cms.PSet(
RPCLayers = cms.bool( False ),
UseMuonNavigation = cms.untracked.bool( False ),
Propagators = cms.untracked.vstring( 'hltESPFastSteppingHelixPropagatorAny' )
),
TrackAssociatorParameters = cms.PSet(
useMuon = cms.bool( False ),
truthMatch = cms.bool( False ),
usePreshower = cms.bool( False ),
dRPreshowerPreselection = cms.double( 0.2 ),
muonMaxDistanceSigmaY = cms.double( 0.0 ),
useEcal = cms.bool( False ),
muonMaxDistanceSigmaX = cms.double( 0.0 ),
dRMuon = cms.double( 9999.0 ),
dREcal = cms.double( 0.5 ),
CSCSegmentCollectionLabel = cms.InputTag( "hltCscSegments" ),
DTRecSegment4DCollectionLabel = cms.InputTag( "hltDt4DSegments" ),
EBRecHitCollectionLabel = cms.InputTag( "Notused" ),
CaloTowerCollectionLabel = cms.InputTag( "Notused" ),
propagateAllDirections = cms.bool( True ),
muonMaxDistanceY = cms.double( 5.0 ),
useHO = cms.bool( False ),
muonMaxDistanceX = cms.double( 5.0 ),
trajectoryUncertaintyTolerance = cms.double( -1.0 ),
useHcal = cms.bool( False ),
HBHERecHitCollectionLabel = cms.InputTag( "Notused" ),
accountForTrajectoryChangeCalo = cms.bool( False ),
dREcalPreselection = cms.double( 0.5 ),
useCalo = cms.bool( True ),
dRMuonPreselection = cms.double( 0.2 ),
EERecHitCollectionLabel = cms.InputTag( "Notused" ),
dRHcal = cms.double( 0.5 ),
dRHcalPreselection = cms.double( 0.5 ),
HORecHitCollectionLabel = cms.InputTag( "Notused" )
),
Threshold = cms.double( 5.0 )
),
trackDepositName = cms.string( "tracker" ),
ecalDepositName = cms.string( "ecal" ),
hcalDepositName = cms.string( "hcal" ),
hoDepositName = cms.string( "ho" ),
jetDepositName = cms.string( "jets" ),
TimingFillerParameters = cms.PSet(
DTTimingParameters = cms.PSet(
HitError = cms.double( 6.0 ),
MatchParameters = cms.PSet(
TightMatchDT = cms.bool( False ),
DTradius = cms.double( 0.01 ),
TightMatchCSC = cms.bool( True ),
CSCsegments = cms.InputTag( "hltCscSegments" ),
DTsegments = cms.InputTag( "hltDt4DSegments" )
),
debug = cms.bool( False ),
DoWireCorr = cms.bool( False ),
RequireBothProjections = cms.bool( False ),
DTTimeOffset = cms.double( 2.7 ),
PruneCut = cms.double( 10000.0 ),
DTsegments = cms.InputTag( "hltDt4DSegments" ),
UseSegmentT0 = cms.bool( False ),
HitsMin = cms.int32( 5 ),
DropTheta = cms.bool( True ),
ServiceParameters = cms.PSet(
RPCLayers = cms.bool( True ),
Propagators = cms.untracked.vstring( 'hltESPFastSteppingHelixPropagatorAny' )
)
),
UseCSC = cms.bool( True ),
CSCTimingParameters = cms.PSet(
MatchParameters = cms.PSet(
TightMatchDT = cms.bool( False ),
DTradius = cms.double( 0.01 ),
TightMatchCSC = cms.bool( True ),
CSCsegments = cms.InputTag( "hltCscSegments" ),
DTsegments = cms.InputTag( "hltDt4DSegments" )
),
debug = cms.bool( False ),
CSCWireTimeOffset = cms.double( 0.0 ),
CSCStripError = cms.double( 7.0 ),
CSCTimeOffset = cms.double( 0.0 ),
CSCWireError = cms.double( 8.6 ),
PruneCut = cms.double( 100.0 ),
CSCsegments = cms.InputTag( "hltCscSegments" ),
UseStripTime = cms.bool( True ),
CSCStripTimeOffset = cms.double( 0.0 ),
UseWireTime = cms.bool( True ),
ServiceParameters = cms.PSet(
RPCLayers = cms.bool( True ),
Propagators = cms.untracked.vstring( 'hltESPFastSteppingHelixPropagatorAny' )
)
),
ErrorDT = cms.double( 6.0 ),
EcalEnergyCut = cms.double( 0.4 ),
UseECAL = cms.bool( True ),
ErrorEB = cms.double( 2.085 ),
UseDT = cms.bool( True ),
ErrorEE = cms.double( 6.95 ),
ErrorCSC = cms.double( 7.4 )
),
ShowerDigiFillerParameters = cms.PSet(
cscDigiCollectionLabel = cms.InputTag( 'muonCSCDigis','MuonCSCStripDigi' ),
digiMaxDistanceX = cms.double( 25.0 ),
dtDigiCollectionLabel = cms.InputTag( "muonDTDigis" )
),
TrackerKinkFinderParameters = cms.PSet(
usePosition = cms.bool( False ),
diagonalOnly = cms.bool( False )
),
fillEnergy = cms.bool( False ),
storeCrossedHcalRecHits = cms.bool( False ),
maxAbsPullX = cms.double( 4.0 ),
maxAbsEta = cms.double( 3.0 ),
minPt = cms.double( 2.0 ),
inputCollectionTypes = cms.vstring( 'inner tracks',
'links',
'outer tracks' ),
addExtraSoftMuons = cms.bool( False ),
fillGlobalTrackRefits = cms.bool( False ),
debugWithTruthMatching = cms.bool( False ),
inputCollectionLabels = cms.VInputTag( 'hltIterL3MuonAndMuonFromL1Merged','hltIterL3GlbMuon','hltL2Muons:UpdatedAtVtx' ),
fillCaloCompatibility = cms.bool( False ),
maxAbsPullY = cms.double( 9999.0 ),
maxAbsDy = cms.double( 9999.0 ),
minP = cms.double( 0.0 ),
minPCaloMuon = cms.double( 1.0E9 ),
maxAbsDx = cms.double( 3.0 ),
fillIsolation = cms.bool( False ),
writeIsoDeposits = cms.bool( False ),
minNumberOfMatches = cms.int32( 1 ),
fillMatching = cms.bool( True ),
fillShowerDigis = cms.bool( False ),
ptThresholdToFillCandidateP4WithGlobalFit = cms.double( 200.0 ),
sigmaThresholdToFillCandidateP4WithGlobalFit = cms.double( 2.0 ),
fillGlobalTrackQuality = cms.bool( False ),
globalTrackQualityInputTag = cms.InputTag( "glbTrackQual" ),
fillTrackerKink = cms.bool( False ),
minCaloCompatibility = cms.double( 0.6 ),
runArbitrationCleaner = cms.bool( False ),
arbitrationCleanerOptions = cms.PSet(
OverlapDTheta = cms.double( 0.02 ),
Overlap = cms.bool( True ),
Clustering = cms.bool( True ),
ME1a = cms.bool( True ),
ClusterDTheta = cms.double( 0.02 ),
ClusterDPhi = cms.double( 0.6 ),
OverlapDPhi = cms.double( 0.0786 )
),
arbitrateTrackerMuons = cms.bool( True )
)
process.hltIterL3Muons = cms.EDProducer( "MuonIDFilterProducerForHLT",
inputMuonCollection = cms.InputTag( "hltIterL3MuonsNoID" ),
applyTriggerIdLoose = cms.bool( True ),
typeMuon = cms.uint32( 0 ),
allowedTypeMask = cms.uint32( 0 ),
requiredTypeMask = cms.uint32( 0 ),
minNMuonHits = cms.int32( 0 ),
minNMuonStations = cms.int32( 0 ),
minNTrkLayers = cms.int32( 0 ),
minTrkHits = cms.int32( 0 ),
minPixLayer = cms.int32( 0 ),
minPixHits = cms.int32( 0 ),
minPt = cms.double( 0.0 ),
maxNormalizedChi2 = cms.double( 9999.0 )
)
process.hltL3MuonsIterL3Links = cms.EDProducer( "MuonLinksProducer",
inputCollection = cms.InputTag( "hltIterL3Muons" )
)
process.hltIterL3MuonTracks = cms.EDProducer( "HLTMuonTrackSelector",
track = cms.InputTag( "hltIterL3MuonAndMuonFromL1Merged" ),
muon = cms.InputTag( "hltIterL3Muons" ),
originalMVAVals = cms.InputTag( "none" ),
copyMVA = cms.bool( False ),
copyExtras = cms.untracked.bool( True ),
copyTrajectories = cms.untracked.bool( False )
)
process.hltIterL3MuonCandidates = cms.EDProducer( "L3MuonCandidateProducerFromMuons",
InputObjects = cms.InputTag( "hltIterL3Muons" )
)
process.hltPixelTracksFitter = cms.EDProducer( "PixelFitterByHelixProjectionsProducer",
scaleErrorsForBPix1 = cms.bool( False ),
scaleFactor = cms.double( 0.65 )
)
process.hltPixelTracksFilter = cms.EDProducer( "PixelTrackFilterByKinematicsProducer",
ptMin = cms.double( 0.1 ),
nSigmaInvPtTolerance = cms.double( 0.0 ),
tipMax = cms.double( 1.0 ),
nSigmaTipMaxTolerance = cms.double( 0.0 ),
chi2 = cms.double( 1000.0 )
)
process.hltPixelTracksTrackingRegions = cms.EDProducer( "GlobalTrackingRegionFromBeamSpotEDProducer",
RegionPSet = cms.PSet(
nSigmaZ = cms.double( 4.0 ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
ptMin = cms.double( 0.8 ),
originRadius = cms.double( 0.02 ),
precise = cms.bool( True )
)
)
process.hltPixelTracks = cms.EDProducer( "PixelTrackProducerFromSoA",
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
trackSrc = cms.InputTag( "hltPixelTracksSoA" ),
pixelRecHitLegacySrc = cms.InputTag( "hltSiPixelRecHits" ),
minNumberOfHits = cms.int32( 0 ),
minQuality = cms.string( "loose" )
)
process.hltPixelVertices = cms.EDProducer( "PixelVertexProducerFromSoA",
TrackCollection = cms.InputTag( "hltPixelTracks" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
src = cms.InputTag( "hltPixelVerticesSoA" )
)
process.hltTrimmedPixelVertices = cms.EDProducer( "PixelVertexCollectionTrimmer",
src = cms.InputTag( "hltPixelVertices" ),
maxVtx = cms.uint32( 100 ),
fractionSumPt2 = cms.double( 0.3 ),
minSumPt2 = cms.double( 0.0 ),
PVcomparer = cms.PSet( refToPSet_ = cms.string( "HLTPSetPvClusterComparerForIT" ) )
)
process.hltIter0PFLowPixelSeedsFromPixelTracks = cms.EDProducer( "SeedGeneratorFromProtoTracksEDProducer",
InputCollection = cms.InputTag( "hltPixelTracks" ),
InputVertexCollection = cms.InputTag( "hltTrimmedPixelVertices" ),
originHalfLength = cms.double( 0.3 ),
originRadius = cms.double( 0.1 ),
useProtoTrackKinematics = cms.bool( False ),
useEventsWithNoVertex = cms.bool( True ),
TTRHBuilder = cms.string( "hltESPTTRHBuilderPixelOnly" ),
usePV = cms.bool( False ),
includeFourthHit = cms.bool( True ),
SeedCreatorPSet = cms.PSet( refToPSet_ = cms.string( "HLTSeedFromProtoTracks" ) )
)
process.hltIter0PFlowCkfTrackCandidates = cms.EDProducer( "CkfTrackCandidateMaker",
cleanTrajectoryAfterInOut = cms.bool( False ),
doSeedingRegionRebuilding = cms.bool( False ),
onlyPixelHitsForSeedCleaner = cms.bool( False ),
reverseTrajectories = cms.bool( False ),
useHitsSplitting = cms.bool( False ),
MeasurementTrackerEvent = cms.InputTag( "hltSiStripClusters" ),
src = cms.InputTag( "hltIter0PFLowPixelSeedsFromPixelTracks" ),
clustersToSkip = cms.InputTag( "" ),
phase2clustersToSkip = cms.InputTag( "" ),
TrajectoryBuilderPSet = cms.PSet( refToPSet_ = cms.string( "HLTIter0GroupedCkfTrajectoryBuilderIT" ) ),
TransientInitialStateEstimatorParameters = cms.PSet(
propagatorAlongTISE = cms.string( "PropagatorWithMaterialParabolicMf" ),
numberMeasurementsForFit = cms.int32( 4 ),
propagatorOppositeTISE = cms.string( "PropagatorWithMaterialParabolicMfOpposite" )
),
numHitsForSeedCleaner = cms.int32( 4 ),
NavigationSchool = cms.string( "SimpleNavigationSchool" ),
RedundantSeedCleaner = cms.string( "CachingSeedCleanerBySharedInput" ),
TrajectoryCleaner = cms.string( "hltESPTrajectoryCleanerBySharedHits" ),
maxNSeeds = cms.uint32( 100000 ),
maxSeedsBeforeCleaning = cms.uint32( 1000 )
)
process.hltIter0PFlowCtfWithMaterialTracks = cms.EDProducer( "TrackProducer",
useSimpleMF = cms.bool( True ),
SimpleMagneticField = cms.string( "ParabolicMf" ),
src = cms.InputTag( "hltIter0PFlowCkfTrackCandidates" ),
clusterRemovalInfo = cms.InputTag( "" ),
beamSpot = cms.InputTag( "hltOnlineBeamSpot" ),
Fitter = cms.string( "hltESPFittingSmootherIT" ),
useHitsSplitting = cms.bool( False ),
alias = cms.untracked.string( "ctfWithMaterialTracks" ),
TrajectoryInEvent = cms.bool( False ),
TTRHBuilder = cms.string( "hltESPTTRHBWithTrackAngle" ),
AlgorithmName = cms.string( "hltIter0" ),
Propagator = cms.string( "hltESPRungeKuttaTrackerPropagator" ),
GeometricInnerState = cms.bool( True ),
NavigationSchool = cms.string( "" ),
MeasurementTracker = cms.string( "" ),
MeasurementTrackerEvent = cms.InputTag( "hltSiStripClusters" )
)
process.hltIter0PFlowTrackCutClassifier = cms.EDProducer( "TrackCutClassifier",
src = cms.InputTag( "hltIter0PFlowCtfWithMaterialTracks" ),
beamspot = cms.InputTag( "hltOnlineBeamSpot" ),
vertices = cms.InputTag( "hltTrimmedPixelVertices" ),
ignoreVertices = cms.bool( False ),
qualityCuts = cms.vdouble( -0.7, 0.1, 0.7 ),
mva = cms.PSet(
minPixelHits = cms.vint32( 0, 0, 0 ),
maxDzWrtBS = cms.vdouble( 3.40282346639E38, 24.0, 15.0 ),
dr_par = cms.PSet(
d0err = cms.vdouble( 0.003, 0.003, 0.003 ),
dr_par2 = cms.vdouble( 3.40282346639E38, 0.6, 0.6 ),
dr_par1 = cms.vdouble( 3.40282346639E38, 0.8, 0.8 ),
dr_exp = cms.vint32( 4, 4, 4 ),
d0err_par = cms.vdouble( 0.001, 0.001, 0.001 )
),
maxLostLayers = cms.vint32( 1, 1, 1 ),
min3DLayers = cms.vint32( 0, 0, 0 ),
dz_par = cms.PSet(
dz_par1 = cms.vdouble( 3.40282346639E38, 0.75, 0.75 ),
dz_par2 = cms.vdouble( 3.40282346639E38, 0.5, 0.5 ),
dz_exp = cms.vint32( 4, 4, 4 )
),
minNVtxTrk = cms.int32( 3 ),
maxDz = cms.vdouble( 0.5, 0.2, 3.40282346639E38 ),
minNdof = cms.vdouble( 1.0E-5, 1.0E-5, 1.0E-5 ),
maxChi2 = cms.vdouble( 9999.0, 25.0, 16.0 ),
maxChi2n = cms.vdouble( 1.2, 1.0, 0.7 ),
maxDr = cms.vdouble( 0.5, 0.03, 3.40282346639E38 ),
minLayers = cms.vint32( 3, 3, 3 )
)
)
process.hltMergedTracks = cms.EDProducer( "TrackCollectionFilterCloner",
originalSource = cms.InputTag( "hltIter0PFlowCtfWithMaterialTracks" ),
originalMVAVals = cms.InputTag( 'hltIter0PFlowTrackCutClassifier','MVAValues' ),
originalQualVals = cms.InputTag( 'hltIter0PFlowTrackCutClassifier','QualityMasks' ),
minQuality = cms.string( "highPurity" ),
copyExtras = cms.untracked.bool( True ),
copyTrajectories = cms.untracked.bool( False )
)
process.hltPFMuonMerging = cms.EDProducer( "TrackListMerger",
ShareFrac = cms.double( 0.19 ),
FoundHitBonus = cms.double( 5.0 ),
LostHitPenalty = cms.double( 20.0 ),
MinPT = cms.double( 0.05 ),
Epsilon = cms.double( -0.001 ),
MaxNormalizedChisq = cms.double( 1000.0 ),
MinFound = cms.int32( 3 ),
TrackProducers = cms.VInputTag( 'hltIterL3MuonTracks','hltMergedTracks' ),
hasSelector = cms.vint32( 0, 0 ),
indivShareFrac = cms.vdouble( 1.0, 1.0 ),
selectedTrackQuals = cms.VInputTag( 'hltIterL3MuonTracks','hltMergedTracks' ),
setsToMerge = cms.VPSet(
cms.PSet( pQual = cms.bool( False ),
tLists = cms.vint32( 0, 1 )
)
),
trackAlgoPriorityOrder = cms.string( "hltESPTrackAlgoPriorityOrder" ),
allowFirstHitShare = cms.bool( True ),
newQuality = cms.string( "confirmed" ),
copyExtras = cms.untracked.bool( True ),
writeOnlyTrkQuals = cms.bool( False ),
copyMVA = cms.bool( False )
)
process.hltVerticesPF = cms.EDProducer( "PrimaryVertexProducer",
vertexCollections = cms.VPSet(
cms.PSet( chi2cutoff = cms.double( 3.0 ),
label = cms.string( "" ),
useBeamConstraint = cms.bool( False ),
minNdof = cms.double( 0.0 ),
maxDistanceToBeam = cms.double( 1.0 ),
algorithm = cms.string( "AdaptiveVertexFitter" )
),
cms.PSet( chi2cutoff = cms.double( 3.0 ),
label = cms.string( "WithBS" ),
useBeamConstraint = cms.bool( True ),
minNdof = cms.double( 0.0 ),
maxDistanceToBeam = cms.double( 1.0 ),
algorithm = cms.string( "AdaptiveVertexFitter" )
)
),
verbose = cms.untracked.bool( False ),
TkFilterParameters = cms.PSet(
maxEta = cms.double( 100.0 ),
minPt = cms.double( 0.0 ),
minSiliconLayersWithHits = cms.int32( 5 ),
minPixelLayersWithHits = cms.int32( 2 ),
maxNormalizedChi2 = cms.double( 20.0 ),
trackQuality = cms.string( "any" ),
algorithm = cms.string( "filter" ),
maxD0Significance = cms.double( 999.0 )
),
beamSpotLabel = cms.InputTag( "hltOnlineBeamSpot" ),
TrackLabel = cms.InputTag( "hltPFMuonMerging" ),
TrackTimeResosLabel = cms.InputTag( "dummy_default" ),
TrackTimesLabel = cms.InputTag( "dummy_default" ),
TkClusParameters = cms.PSet(
TkDAClusParameters = cms.PSet(
zmerge = cms.double( 0.01 ),
Tstop = cms.double( 0.5 ),
d0CutOff = cms.double( 999.0 ),
dzCutOff = cms.double( 4.0 ),
vertexSize = cms.double( 0.15 ),
coolingFactor = cms.double( 0.6 ),
Tpurge = cms.double( 2.0 ),
Tmin = cms.double( 2.4 ),
uniquetrkweight = cms.double( 0.9 )
),
algorithm = cms.string( "DA_vect" )
),
isRecoveryIteration = cms.bool( False ),
recoveryVtxCollection = cms.InputTag( "" )
)
process.hltVerticesPFSelector = cms.EDFilter( "PrimaryVertexObjectFilter",
filterParams = cms.PSet(
maxZ = cms.double( 24.0 ),
minNdof = cms.double( 4.0 ),
maxRho = cms.double( 2.0 ),
pvSrc = cms.InputTag( "hltVerticesPF" )
),
src = cms.InputTag( "hltVerticesPF" )
)
process.hltVerticesPFFilter = cms.EDFilter( "VertexSelector",
src = cms.InputTag( "hltVerticesPFSelector" ),
cut = cms.string( "!isFake" ),
filter = cms.bool( True )
)
process.hltBoolEnd = cms.EDFilter( "HLTBool",
result = cms.bool( True )
)
process.hltL1EventNumberL1Fat = cms.EDFilter( "HLTL1NumberFilter",
rawInput = cms.InputTag( "rawDataCollector" ),
period = cms.uint32( 107 ),
invert = cms.bool( False ),
fedId = cms.int32( 1024 ),
useTCDSEventNumber = cms.bool( True )
)
process.hltPrePhysics = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreDSTPhysics = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltRandomEventsFilter = cms.EDFilter( "HLTTriggerTypeFilter",
SelectedTriggerType = cms.int32( 3 )
)
process.hltPreRandom = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreZeroBias = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltL1sL1UnpairedBunchBptxMinus = cms.EDFilter( "HLTL1TSeed",
saveTags = cms.bool( True ),
L1SeedsLogicalExpression = cms.string( "L1_UnpairedBunchBptxMinus" ),
L1ObjectMapInputTag = cms.InputTag( "hltGtStage2ObjectMap" ),
L1GlobalInputTag = cms.InputTag( "hltGtStage2Digis" ),
L1MuonInputTag = cms.InputTag( 'hltGtStage2Digis','Muon' ),
L1EGammaInputTag = cms.InputTag( 'hltGtStage2Digis','EGamma' ),
L1JetInputTag = cms.InputTag( 'hltGtStage2Digis','Jet' ),
L1TauInputTag = cms.InputTag( 'hltGtStage2Digis','Tau' ),
L1EtSumInputTag = cms.InputTag( 'hltGtStage2Digis','EtSum' )
)
process.hltPreHIL1UnpairedBunchBptxMinusForPPRef = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltL1sL1UnpairedBunchBptxPlus = cms.EDFilter( "HLTL1TSeed",
saveTags = cms.bool( True ),
L1SeedsLogicalExpression = cms.string( "L1_UnpairedBunchBptxPlus" ),
L1ObjectMapInputTag = cms.InputTag( "hltGtStage2ObjectMap" ),
L1GlobalInputTag = cms.InputTag( "hltGtStage2Digis" ),
L1MuonInputTag = cms.InputTag( 'hltGtStage2Digis','Muon' ),
L1EGammaInputTag = cms.InputTag( 'hltGtStage2Digis','EGamma' ),
L1JetInputTag = cms.InputTag( 'hltGtStage2Digis','Jet' ),
L1TauInputTag = cms.InputTag( 'hltGtStage2Digis','Tau' ),
L1EtSumInputTag = cms.InputTag( 'hltGtStage2Digis','EtSum' )
)
process.hltPreHIL1UnpairedBunchBptxPlusForPPRef = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltL1sNotBptxOR = cms.EDFilter( "HLTL1TSeed",
saveTags = cms.bool( True ),
L1SeedsLogicalExpression = cms.string( "L1_NotBptxOR" ),
L1ObjectMapInputTag = cms.InputTag( "hltGtStage2ObjectMap" ),
L1GlobalInputTag = cms.InputTag( "hltGtStage2Digis" ),
L1MuonInputTag = cms.InputTag( 'hltGtStage2Digis','Muon' ),
L1EGammaInputTag = cms.InputTag( 'hltGtStage2Digis','EGamma' ),
L1JetInputTag = cms.InputTag( 'hltGtStage2Digis','Jet' ),
L1TauInputTag = cms.InputTag( 'hltGtStage2Digis','Tau' ),
L1EtSumInputTag = cms.InputTag( 'hltGtStage2Digis','EtSum' )
)
process.hltPreHIL1NotBptxORForPPRef = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltL1sHTTForBeamSpotPP5TeV = cms.EDFilter( "HLTL1TSeed",
saveTags = cms.bool( True ),
L1SeedsLogicalExpression = cms.string( "L1_DoubleJet40er2p7 OR L1_DoubleJet50er2p7 OR L1_DoubleJet60er2p7 OR L1_DoubleJet80er2p7 OR L1_DoubleJet100er2p7 OR L1_DoubleJet112er2p7 OR L1_DoubleJet120er2p7 OR L1_DoubleJet150er2p7 OR L1_SingleJet80 OR L1_SingleJet90 OR L1_SingleJet120 OR L1_SingleJet140 OR L1_SingleJet150 OR L1_SingleJet160 OR L1_SingleJet170 OR L1_SingleJet180 OR L1_SingleJet200" ),
L1ObjectMapInputTag = cms.InputTag( "hltGtStage2ObjectMap" ),
L1GlobalInputTag = cms.InputTag( "hltGtStage2Digis" ),
L1MuonInputTag = cms.InputTag( 'hltGtStage2Digis','Muon' ),
L1EGammaInputTag = cms.InputTag( 'hltGtStage2Digis','EGamma' ),
L1JetInputTag = cms.InputTag( 'hltGtStage2Digis','Jet' ),
L1TauInputTag = cms.InputTag( 'hltGtStage2Digis','Tau' ),
L1EtSumInputTag = cms.InputTag( 'hltGtStage2Digis','EtSum' )
)
process.hltPreHIHT80BeamspotppRef5TeV = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltAK4CaloJets = cms.EDProducer( "FastjetJetProducer",
useMassDropTagger = cms.bool( False ),
useFiltering = cms.bool( False ),
useDynamicFiltering = cms.bool( False ),
useTrimming = cms.bool( False ),
usePruning = cms.bool( False ),
useCMSBoostedTauSeedingAlgorithm = cms.bool( False ),
useKtPruning = cms.bool( False ),
useConstituentSubtraction = cms.bool( False ),
useSoftDrop = cms.bool( False ),
correctShape = cms.bool( False ),
UseOnlyVertexTracks = cms.bool( False ),
UseOnlyOnePV = cms.bool( False ),
muCut = cms.double( -1.0 ),
yCut = cms.double( -1.0 ),
rFilt = cms.double( -1.0 ),
rFiltFactor = cms.double( -1.0 ),
trimPtFracMin = cms.double( -1.0 ),
zcut = cms.double( -1.0 ),
rcut_factor = cms.double( -1.0 ),
csRho_EtaMax = cms.double( -1.0 ),
csRParam = cms.double( -1.0 ),
beta = cms.double( -1.0 ),
R0 = cms.double( -1.0 ),
gridMaxRapidity = cms.double( -1.0 ),
gridSpacing = cms.double( -1.0 ),
DzTrVtxMax = cms.double( 0.0 ),
DxyTrVtxMax = cms.double( 0.0 ),
MaxVtxZ = cms.double( 15.0 ),
subjetPtMin = cms.double( -1.0 ),
muMin = cms.double( -1.0 ),
muMax = cms.double( -1.0 ),
yMin = cms.double( -1.0 ),
yMax = cms.double( -1.0 ),
dRMin = cms.double( -1.0 ),
dRMax = cms.double( -1.0 ),
maxDepth = cms.int32( -1 ),
nFilt = cms.int32( -1 ),
MinVtxNdof = cms.int32( 5 ),
src = cms.InputTag( "hltTowerMakerForAll" ),
srcPVs = cms.InputTag( "NotUsed" ),
jetType = cms.string( "CaloJet" ),
jetAlgorithm = cms.string( "AntiKt" ),
rParam = cms.double( 0.4 ),
inputEtMin = cms.double( 0.3 ),
inputEMin = cms.double( 0.0 ),
jetPtMin = cms.double( 1.0 ),
doPVCorrection = cms.bool( False ),
doAreaFastjet = cms.bool( False ),
doRhoFastjet = cms.bool( False ),
doPUOffsetCorr = cms.bool( False ),
puPtMin = cms.double( 10.0 ),
nSigmaPU = cms.double( 1.0 ),
radiusPU = cms.double( 0.4 ),
subtractorName = cms.string( "" ),
useExplicitGhosts = cms.bool( False ),
doAreaDiskApprox = cms.bool( True ),
voronoiRfact = cms.double( 0.9 ),
Rho_EtaMax = cms.double( 4.4 ),
Ghost_EtaMax = cms.double( 6.0 ),
Active_Area_Repeats = cms.int32( 5 ),
GhostArea = cms.double( 0.01 ),
restrictInputs = cms.bool( False ),
maxInputs = cms.uint32( 1 ),
writeCompound = cms.bool( False ),
writeJetsWithConst = cms.bool( False ),
doFastJetNonUniform = cms.bool( False ),
useDeterministicSeed = cms.bool( True ),
minSeed = cms.uint32( 14327 ),
verbosity = cms.int32( 0 ),
puWidth = cms.double( 0.0 ),
nExclude = cms.uint32( 0 ),
maxBadEcalCells = cms.uint32( 9999999 ),
maxBadHcalCells = cms.uint32( 9999999 ),
maxProblematicEcalCells = cms.uint32( 9999999 ),
maxProblematicHcalCells = cms.uint32( 9999999 ),
maxRecoveredEcalCells = cms.uint32( 9999999 ),
maxRecoveredHcalCells = cms.uint32( 9999999 ),
puCenters = cms.vdouble( ),
applyWeight = cms.bool( False ),
srcWeights = cms.InputTag( "" ),
minimumTowersFraction = cms.double( 0.0 ),
jetCollInstanceName = cms.string( "" ),
sumRecHits = cms.bool( False )
)
process.hltAK4CaloJetsIDPassed = cms.EDProducer( "HLTCaloJetIDProducer",
min_N90 = cms.int32( -2 ),
min_N90hits = cms.int32( 2 ),
min_EMF = cms.double( 1.0E-6 ),
max_EMF = cms.double( 999.0 ),
jetsInput = cms.InputTag( "hltAK4CaloJets" ),
JetIDParams = cms.PSet(
hfRecHitsColl = cms.InputTag( "hltHfreco" ),
hoRecHitsColl = cms.InputTag( "hltHoreco" ),
ebRecHitsColl = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEB' ),
hbheRecHitsColl = cms.InputTag( "hltHbhereco" ),
useRecHits = cms.bool( True ),
eeRecHitsColl = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEE' )
)
)
process.hltFixedGridRhoFastjetAllCalo = cms.EDProducer( "FixedGridRhoProducerFastjet",
pfCandidatesTag = cms.InputTag( "hltTowerMakerForAll" ),
maxRapidity = cms.double( 5.0 ),
gridSpacing = cms.double( 0.55 )
)
process.hltAK4CaloFastJetCorrector = cms.EDProducer( "L1FastjetCorrectorProducer",
level = cms.string( "L1FastJet" ),
algorithm = cms.string( "AK4CaloHLT" ),
srcRho = cms.InputTag( "hltFixedGridRhoFastjetAllCalo" )
)
process.hltAK4CaloRelativeCorrector = cms.EDProducer( "LXXXCorrectorProducer",
level = cms.string( "L2Relative" ),
algorithm = cms.string( "AK4CaloHLT" )
)
process.hltAK4CaloAbsoluteCorrector = cms.EDProducer( "LXXXCorrectorProducer",
level = cms.string( "L3Absolute" ),
algorithm = cms.string( "AK4CaloHLT" )
)
process.hltAK4CaloResidualCorrector = cms.EDProducer( "LXXXCorrectorProducer",
level = cms.string( "L2L3Residual" ),
algorithm = cms.string( "AK4CaloHLT" )
)
process.hltAK4CaloCorrector = cms.EDProducer( "ChainedJetCorrectorProducer",
correctors = cms.VInputTag( 'hltAK4CaloFastJetCorrector','hltAK4CaloRelativeCorrector','hltAK4CaloAbsoluteCorrector','hltAK4CaloResidualCorrector' )
)
process.hltAK4CaloJetsCorrected = cms.EDProducer( "CorrectedCaloJetProducer",
src = cms.InputTag( "hltAK4CaloJets" ),
correctors = cms.VInputTag( 'hltAK4CaloCorrector' )
)
process.hltAK4CaloJetsCorrectedIDPassed = cms.EDProducer( "CorrectedCaloJetProducer",
src = cms.InputTag( "hltAK4CaloJetsIDPassed" ),
correctors = cms.VInputTag( 'hltAK4CaloCorrector' )
)
process.hltHtMht = cms.EDProducer( "HLTHtMhtProducer",
usePt = cms.bool( False ),
excludePFMuons = cms.bool( False ),
minNJetHt = cms.int32( 0 ),
minNJetMht = cms.int32( 0 ),
minPtJetHt = cms.double( 40.0 ),
minPtJetMht = cms.double( 30.0 ),
maxEtaJetHt = cms.double( 2.5 ),
maxEtaJetMht = cms.double( 5.0 ),
jetsLabel = cms.InputTag( "hltAK4CaloJetsCorrected" ),
pfCandidatesLabel = cms.InputTag( "" )
)
process.hltHT80 = cms.EDFilter( "HLTHtMhtFilter",
saveTags = cms.bool( True ),
htLabels = cms.VInputTag( 'hltHtMht' ),
mhtLabels = cms.VInputTag( 'hltHtMht' ),
minHt = cms.vdouble( 80.0 ),
minMht = cms.vdouble( 0.0 ),
minMeff = cms.vdouble( 0.0 ),
meffSlope = cms.vdouble( 1.0 )
)
process.hltPreHIZeroBiaspart0 = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreHIZeroBiaspart1 = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 1 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreHIZeroBiaspart2 = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 2 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreHIZeroBiaspart3 = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 3 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreHIZeroBiaspart4 = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 4 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreHIZeroBiaspart5 = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 5 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreHIZeroBiaspart6 = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 6 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreHIZeroBiaspart7 = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 7 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreHIZeroBiaspart8 = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 8 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreHIZeroBiaspart9 = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 9 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreHIZeroBiaspart10 = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 10 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreHIZeroBiaspart11 = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 11 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltL1sAlCaHIEcalPi0Eta = cms.EDFilter( "HLTL1TSeed",
saveTags = cms.bool( True ),
L1SeedsLogicalExpression = cms.string( "L1_AlwaysTrue OR L1_DoubleEG_15_10 OR L1_DoubleEG_18_17 OR L1_DoubleEG_20_18 OR L1_DoubleEG_22_10 OR L1_DoubleEG_22_12 OR L1_DoubleEG_22_15 OR L1_DoubleEG_23_10 OR L1_DoubleEG_24_17 OR L1_DoubleEG_25_12 OR L1_DoubleJet100er2p7 OR L1_DoubleJet112er2p7 OR L1_DoubleJet120er2p7 OR L1_DoubleJet40er2p7 OR L1_DoubleJet50er2p7 OR L1_DoubleJet60er2p7 OR L1_DoubleJet80er2p7 OR L1_IsolatedBunch OR L1_SingleEG10 OR L1_SingleEG15 OR L1_SingleEG18 OR L1_SingleEG24 OR L1_SingleEG26 OR L1_SingleEG28 OR L1_SingleEG30 OR L1_SingleEG32 OR L1_SingleEG34 OR L1_SingleEG36 OR L1_SingleEG38 OR L1_SingleEG40 OR L1_SingleEG42 OR L1_SingleEG45 OR L1_SingleEG5 OR L1_SingleIsoEG18 OR L1_SingleIsoEG20 OR L1_SingleIsoEG22 OR L1_SingleIsoEG24 OR L1_SingleIsoEG26 OR L1_SingleIsoEG28 OR L1_SingleIsoEG30 OR L1_SingleIsoEG32 OR L1_SingleIsoEG34 OR L1_SingleIsoEG36 OR L1_SingleJet120 OR L1_SingleJet140 OR L1_SingleJet150 OR L1_SingleJet16 OR L1_SingleJet160 OR L1_SingleJet170 OR L1_SingleJet180 OR L1_SingleJet20 OR L1_SingleJet200 OR L1_SingleJet35 OR L1_SingleJet60 OR L1_SingleJet90" ),
L1ObjectMapInputTag = cms.InputTag( "hltGtStage2ObjectMap" ),
L1GlobalInputTag = cms.InputTag( "hltGtStage2Digis" ),
L1MuonInputTag = cms.InputTag( 'hltGtStage2Digis','Muon' ),
L1EGammaInputTag = cms.InputTag( 'hltGtStage2Digis','EGamma' ),
L1JetInputTag = cms.InputTag( 'hltGtStage2Digis','Jet' ),
L1TauInputTag = cms.InputTag( 'hltGtStage2Digis','Tau' ),
L1EtSumInputTag = cms.InputTag( 'hltGtStage2Digis','EtSum' )
)
process.hltPreAlCaHIEcalPi0EBonly = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltEcalPreshowerDigis = cms.EDProducer( "ESRawToDigi",
sourceTag = cms.InputTag( "rawDataCollector" ),
debugMode = cms.untracked.bool( False ),
InstanceES = cms.string( "" ),
LookupTable = cms.FileInPath( "EventFilter/ESDigiToRaw/data/ES_lookup_table.dat" ),
ESdigiCollection = cms.string( "" )
)
process.hltEcalPreshowerRecHit = cms.EDProducer( "ESRecHitProducer",
ESrechitCollection = cms.string( "EcalRecHitsES" ),
ESdigiCollection = cms.InputTag( "hltEcalPreshowerDigis" ),
algo = cms.string( "ESRecHitWorker" ),
ESRecoAlgo = cms.int32( 0 )
)
process.hltSimple3x3Clusters = cms.EDProducer( "EgammaHLTNxNClusterProducer",
doBarrel = cms.bool( True ),
doEndcaps = cms.bool( True ),
barrelHitProducer = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEB' ),
endcapHitProducer = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEE' ),
clusEtaSize = cms.int32( 3 ),
clusPhiSize = cms.int32( 3 ),
barrelClusterCollection = cms.string( "Simple3x3ClustersBarrel" ),
endcapClusterCollection = cms.string( "Simple3x3ClustersEndcap" ),
clusSeedThr = cms.double( 0.5 ),
clusSeedThrEndCap = cms.double( 1.0 ),
useRecoFlag = cms.bool( False ),
flagLevelRecHitsToUse = cms.int32( 1 ),
useDBStatus = cms.bool( True ),
statusLevelRecHitsToUse = cms.int32( 1 ),
posCalcParameters = cms.PSet(
T0_barl = cms.double( 7.4 ),
T0_endcPresh = cms.double( 1.2 ),
LogWeighted = cms.bool( True ),
T0_endc = cms.double( 3.1 ),
X0 = cms.double( 0.89 ),
W0 = cms.double( 4.2 )
),
maxNumberofSeeds = cms.int32( 700 ),
maxNumberofClusters = cms.int32( 300 ),
debugLevel = cms.int32( 0 )
)
process.hltAlCaPi0RecHitsFilterEBonlyRegional = cms.EDFilter( "HLTRegionalEcalResonanceFilter",
barrelHits = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEB' ),
endcapHits = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEE' ),
preshRecHitProducer = cms.InputTag( 'hltEcalPreshowerRecHit','EcalRecHitsES' ),
barrelClusters = cms.InputTag( 'hltSimple3x3Clusters','Simple3x3ClustersBarrel' ),
endcapClusters = cms.InputTag( 'hltSimple3x3Clusters','Simple3x3ClustersEndcap' ),
useRecoFlag = cms.bool( False ),
flagLevelRecHitsToUse = cms.int32( 1 ),
useDBStatus = cms.bool( True ),
statusLevelRecHitsToUse = cms.int32( 1 ),
doSelBarrel = cms.bool( True ),
barrelSelection = cms.PSet(
massHighPi0Cand = cms.double( 0.163 ),
ptMinForIsolation = cms.double( 1.0 ),
seleMinvMaxBarrel = cms.double( 0.22 ),
massLowPi0Cand = cms.double( 0.104 ),
seleS9S25Gamma = cms.double( 0.0 ),
seleBeltDeta = cms.double( 0.05 ),
seleS4S9GammaBarrel_region2 = cms.double( 0.9 ),
barrelHitCollection = cms.string( "pi0EcalRecHitsEB" ),
removePi0CandidatesForEta = cms.bool( False ),
seleMinvMinBarrel = cms.double( 0.06 ),
seleS4S9GammaBarrel_region1 = cms.double( 0.88 ),
selePtPairBarrel_region1 = cms.double( 2.0 ),
selePtPairBarrel_region2 = cms.double( 1.75 ),
seleBeltDR = cms.double( 0.2 ),
region1_Barrel = cms.double( 1.0 ),
seleIsoBarrel_region1 = cms.double( 0.5 ),
selePtGammaBarrel_region1 = cms.double( 0.65 ),
seleIsoBarrel_region2 = cms.double( 0.5 ),
selePtGammaBarrel_region2 = cms.double( 0.65 ),
store5x5RecHitEB = cms.bool( False ),
seleNxtalBarrel_region2 = cms.uint32( 6 ),
seleNxtalBarrel_region1 = cms.uint32( 6 )
),
doSelEndcap = cms.bool( False ),
endcapSelection = cms.PSet(
seleBeltDetaEndCap = cms.double( 0.05 ),
selePtPairMaxEndCap_region3 = cms.double( 2.5 ),
seleS4S9GammaEndCap_region2 = cms.double( 0.65 ),
seleS4S9GammaEndCap_region1 = cms.double( 0.65 ),
seleNxtalEndCap_region2 = cms.uint32( 6 ),
seleNxtalEndCap_region3 = cms.uint32( 6 ),
ptMinForIsolationEndCap = cms.double( 0.5 ),
selePtPairEndCap_region1 = cms.double( 1.5 ),
endcapHitCollection = cms.string( "pi0EcalRecHitsEE" ),
selePtPairEndCap_region2 = cms.double( 1.5 ),
seleS4S9GammaEndCap_region3 = cms.double( 0.65 ),
selePtGammaEndCap_region3 = cms.double( 0.5 ),
selePtGammaEndCap_region2 = cms.double( 0.5 ),
selePtGammaEndCap_region1 = cms.double( 0.5 ),
region1_EndCap = cms.double( 1.8 ),
region2_EndCap = cms.double( 2.0 ),
store5x5RecHitEE = cms.bool( False ),
seleIsoEndCap_region3 = cms.double( 0.5 ),
seleIsoEndCap_region2 = cms.double( 0.5 ),
seleMinvMinEndCap = cms.double( 0.05 ),
selePtPairEndCap_region3 = cms.double( 99.0 ),
seleIsoEndCap_region1 = cms.double( 0.5 ),
seleBeltDREndCap = cms.double( 0.2 ),
seleMinvMaxEndCap = cms.double( 0.3 ),
seleNxtalEndCap_region1 = cms.uint32( 6 ),
seleS9S25GammaEndCap = cms.double( 0.0 )
),
storeRecHitES = cms.bool( False ),
preshowerSelection = cms.PSet(
preshClusterEnergyCut = cms.double( 0.0 ),
debugLevelES = cms.string( "" ),
ESCollection = cms.string( "pi0EcalRecHitsES" ),
preshNclust = cms.int32( 4 ),
preshStripEnergyCut = cms.double( 0.0 ),
preshCalibPlaneY = cms.double( 0.7 ),
preshSeededNstrip = cms.int32( 15 ),
preshCalibGamma = cms.double( 0.024 ),
preshCalibPlaneX = cms.double( 1.0 ),
preshCalibMIP = cms.double( 9.0E-5 )
),
debugLevel = cms.int32( 0 )
)
process.hltAlCaPi0EBUncalibrator = cms.EDProducer( "EcalRecalibRecHitProducer",
doEnergyScale = cms.bool( False ),
doEnergyScaleInverse = cms.bool( False ),
doIntercalib = cms.bool( False ),
doIntercalibInverse = cms.bool( False ),
EERecHitCollection = cms.InputTag( 'hltAlCaPi0RecHitsFilterEBonlyRegional','pi0EcalRecHitsEB' ),
EBRecHitCollection = cms.InputTag( 'hltAlCaPi0RecHitsFilterEBonlyRegional','pi0EcalRecHitsEB' ),
doLaserCorrections = cms.bool( False ),
doLaserCorrectionsInverse = cms.bool( False ),
EBRecalibRecHitCollection = cms.string( "pi0EcalRecHitsEB" ),
EERecalibRecHitCollection = cms.string( "pi0EcalRecHitsEE" )
)
process.hltAlCaPi0EBRechitsToDigis = cms.EDProducer( "HLTRechitsToDigis",
region = cms.string( "barrel" ),
digisIn = cms.InputTag( 'hltEcalDigis','ebDigis' ),
digisOut = cms.string( "pi0EBDigis" ),
recHits = cms.InputTag( 'hltAlCaPi0EBUncalibrator','pi0EcalRecHitsEB' ),
srFlagsIn = cms.InputTag( "hltEcalDigis" ),
srFlagsOut = cms.string( "pi0EBSrFlags" )
)
process.hltPreAlCaHIEcalPi0EEonly = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltAlCaPi0RecHitsFilterEEonlyRegional = cms.EDFilter( "HLTRegionalEcalResonanceFilter",
barrelHits = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEB' ),
endcapHits = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEE' ),
preshRecHitProducer = cms.InputTag( 'hltEcalPreshowerRecHit','EcalRecHitsES' ),
barrelClusters = cms.InputTag( 'hltSimple3x3Clusters','Simple3x3ClustersBarrel' ),
endcapClusters = cms.InputTag( 'hltSimple3x3Clusters','Simple3x3ClustersEndcap' ),
useRecoFlag = cms.bool( False ),
flagLevelRecHitsToUse = cms.int32( 1 ),
useDBStatus = cms.bool( True ),
statusLevelRecHitsToUse = cms.int32( 1 ),
doSelBarrel = cms.bool( False ),
barrelSelection = cms.PSet(
massHighPi0Cand = cms.double( 0.163 ),
ptMinForIsolation = cms.double( 1.0 ),
seleMinvMaxBarrel = cms.double( 0.22 ),
massLowPi0Cand = cms.double( 0.104 ),
seleS9S25Gamma = cms.double( 0.0 ),
seleBeltDeta = cms.double( 0.05 ),
seleS4S9GammaBarrel_region2 = cms.double( 0.65 ),
barrelHitCollection = cms.string( "pi0EcalRecHitsEB" ),
removePi0CandidatesForEta = cms.bool( False ),
seleMinvMinBarrel = cms.double( 0.06 ),
seleS4S9GammaBarrel_region1 = cms.double( 0.65 ),
selePtPairBarrel_region1 = cms.double( 1.5 ),
selePtPairBarrel_region2 = cms.double( 1.5 ),
seleBeltDR = cms.double( 0.2 ),
region1_Barrel = cms.double( 1.0 ),
seleIsoBarrel_region1 = cms.double( 0.5 ),
selePtGammaBarrel_region1 = cms.double( 0.5 ),
seleIsoBarrel_region2 = cms.double( 0.5 ),
selePtGammaBarrel_region2 = cms.double( 0.5 ),
store5x5RecHitEB = cms.bool( False ),
seleNxtalBarrel_region2 = cms.uint32( 6 ),
seleNxtalBarrel_region1 = cms.uint32( 6 )
),
doSelEndcap = cms.bool( True ),
endcapSelection = cms.PSet(
seleBeltDetaEndCap = cms.double( 0.05 ),
selePtPairMaxEndCap_region3 = cms.double( 999.0 ),
seleS4S9GammaEndCap_region2 = cms.double( 0.92 ),
seleS4S9GammaEndCap_region1 = cms.double( 0.85 ),
seleNxtalEndCap_region2 = cms.uint32( 6 ),
seleNxtalEndCap_region3 = cms.uint32( 6 ),
ptMinForIsolationEndCap = cms.double( 0.5 ),
selePtPairEndCap_region1 = cms.double( 3.75 ),
endcapHitCollection = cms.string( "pi0EcalRecHitsEE" ),
selePtPairEndCap_region2 = cms.double( 2.0 ),
seleS4S9GammaEndCap_region3 = cms.double( 0.92 ),
selePtGammaEndCap_region3 = cms.double( 0.95 ),
selePtGammaEndCap_region2 = cms.double( 0.95 ),
selePtGammaEndCap_region1 = cms.double( 1.1 ),
region1_EndCap = cms.double( 1.8 ),
region2_EndCap = cms.double( 2.0 ),
store5x5RecHitEE = cms.bool( False ),
seleIsoEndCap_region3 = cms.double( 0.5 ),
seleIsoEndCap_region2 = cms.double( 0.5 ),
seleMinvMinEndCap = cms.double( 0.05 ),
selePtPairEndCap_region3 = cms.double( 2.0 ),
seleIsoEndCap_region1 = cms.double( 0.5 ),
seleBeltDREndCap = cms.double( 0.2 ),
seleMinvMaxEndCap = cms.double( 0.3 ),
seleNxtalEndCap_region1 = cms.uint32( 6 ),
seleS9S25GammaEndCap = cms.double( 0.0 )
),
storeRecHitES = cms.bool( True ),
preshowerSelection = cms.PSet(
preshClusterEnergyCut = cms.double( 0.0 ),
debugLevelES = cms.string( "" ),
ESCollection = cms.string( "pi0EcalRecHitsES" ),
preshNclust = cms.int32( 4 ),
preshStripEnergyCut = cms.double( 0.0 ),
preshCalibPlaneY = cms.double( 0.7 ),
preshSeededNstrip = cms.int32( 15 ),
preshCalibGamma = cms.double( 0.024 ),
preshCalibPlaneX = cms.double( 1.0 ),
preshCalibMIP = cms.double( 9.0E-5 )
),
debugLevel = cms.int32( 0 )
)
process.hltAlCaPi0EEUncalibrator = cms.EDProducer( "EcalRecalibRecHitProducer",
doEnergyScale = cms.bool( False ),
doEnergyScaleInverse = cms.bool( False ),
doIntercalib = cms.bool( False ),
doIntercalibInverse = cms.bool( False ),
EERecHitCollection = cms.InputTag( 'hltAlCaPi0RecHitsFilterEEonlyRegional','pi0EcalRecHitsEE' ),
EBRecHitCollection = cms.InputTag( 'hltAlCaPi0RecHitsFilterEEonlyRegional','pi0EcalRecHitsEE' ),
doLaserCorrections = cms.bool( False ),
doLaserCorrectionsInverse = cms.bool( False ),
EBRecalibRecHitCollection = cms.string( "pi0EcalRecHitsEB" ),
EERecalibRecHitCollection = cms.string( "pi0EcalRecHitsEE" )
)
process.hltAlCaPi0EERechitsToDigis = cms.EDProducer( "HLTRechitsToDigis",
region = cms.string( "endcap" ),
digisIn = cms.InputTag( 'hltEcalDigis','eeDigis' ),
digisOut = cms.string( "pi0EEDigis" ),
recHits = cms.InputTag( 'hltAlCaPi0EEUncalibrator','pi0EcalRecHitsEE' ),
srFlagsIn = cms.InputTag( "hltEcalDigis" ),
srFlagsOut = cms.string( "pi0EESrFlags" )
)
process.hltPreAlCaHIEcalEtaEBonly = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltAlCaEtaRecHitsFilterEBonlyRegional = cms.EDFilter( "HLTRegionalEcalResonanceFilter",
barrelHits = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEB' ),
endcapHits = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEE' ),
preshRecHitProducer = cms.InputTag( 'hltEcalPreshowerRecHit','EcalRecHitsES' ),
barrelClusters = cms.InputTag( 'hltSimple3x3Clusters','Simple3x3ClustersBarrel' ),
endcapClusters = cms.InputTag( 'hltSimple3x3Clusters','Simple3x3ClustersEndcap' ),
useRecoFlag = cms.bool( False ),
flagLevelRecHitsToUse = cms.int32( 1 ),
useDBStatus = cms.bool( True ),
statusLevelRecHitsToUse = cms.int32( 1 ),
doSelBarrel = cms.bool( True ),
barrelSelection = cms.PSet(
massHighPi0Cand = cms.double( 0.156 ),
ptMinForIsolation = cms.double( 1.0 ),
seleMinvMaxBarrel = cms.double( 0.8 ),
massLowPi0Cand = cms.double( 0.084 ),
seleS9S25Gamma = cms.double( 0.8 ),
seleBeltDeta = cms.double( 0.1 ),
seleS4S9GammaBarrel_region2 = cms.double( 0.9 ),
barrelHitCollection = cms.string( "etaEcalRecHitsEB" ),
removePi0CandidatesForEta = cms.bool( True ),
seleMinvMinBarrel = cms.double( 0.2 ),
seleS4S9GammaBarrel_region1 = cms.double( 0.9 ),
selePtPairBarrel_region1 = cms.double( 3.0 ),
selePtPairBarrel_region2 = cms.double( 3.0 ),
seleBeltDR = cms.double( 0.3 ),
region1_Barrel = cms.double( 1.0 ),
seleIsoBarrel_region1 = cms.double( 0.5 ),
selePtGammaBarrel_region1 = cms.double( 0.65 ),
seleIsoBarrel_region2 = cms.double( 0.5 ),
selePtGammaBarrel_region2 = cms.double( 1.4 ),
store5x5RecHitEB = cms.bool( True ),
seleNxtalBarrel_region2 = cms.uint32( 6 ),
seleNxtalBarrel_region1 = cms.uint32( 6 )
),
doSelEndcap = cms.bool( False ),
endcapSelection = cms.PSet(
seleBeltDetaEndCap = cms.double( 0.05 ),
selePtPairMaxEndCap_region3 = cms.double( 2.5 ),
seleS4S9GammaEndCap_region2 = cms.double( 0.65 ),
seleS4S9GammaEndCap_region1 = cms.double( 0.65 ),
seleNxtalEndCap_region2 = cms.uint32( 6 ),
seleNxtalEndCap_region3 = cms.uint32( 6 ),
ptMinForIsolationEndCap = cms.double( 0.5 ),
selePtPairEndCap_region1 = cms.double( 1.5 ),
endcapHitCollection = cms.string( "etaEcalRecHitsEE" ),
selePtPairEndCap_region2 = cms.double( 1.5 ),
seleS4S9GammaEndCap_region3 = cms.double( 0.65 ),
selePtGammaEndCap_region3 = cms.double( 0.5 ),
selePtGammaEndCap_region2 = cms.double( 0.5 ),
selePtGammaEndCap_region1 = cms.double( 0.5 ),
region1_EndCap = cms.double( 1.8 ),
region2_EndCap = cms.double( 2.0 ),
store5x5RecHitEE = cms.bool( False ),
seleIsoEndCap_region3 = cms.double( 0.5 ),
seleIsoEndCap_region2 = cms.double( 0.5 ),
seleMinvMinEndCap = cms.double( 0.05 ),
selePtPairEndCap_region3 = cms.double( 99.0 ),
seleIsoEndCap_region1 = cms.double( 0.5 ),
seleBeltDREndCap = cms.double( 0.2 ),
seleMinvMaxEndCap = cms.double( 0.3 ),
seleNxtalEndCap_region1 = cms.uint32( 6 ),
seleS9S25GammaEndCap = cms.double( 0.0 )
),
storeRecHitES = cms.bool( False ),
preshowerSelection = cms.PSet(
preshClusterEnergyCut = cms.double( 0.0 ),
debugLevelES = cms.string( "" ),
ESCollection = cms.string( "etaEcalRecHitsES" ),
preshNclust = cms.int32( 4 ),
preshStripEnergyCut = cms.double( 0.0 ),
preshCalibPlaneY = cms.double( 0.7 ),
preshSeededNstrip = cms.int32( 15 ),
preshCalibGamma = cms.double( 0.024 ),
preshCalibPlaneX = cms.double( 1.0 ),
preshCalibMIP = cms.double( 9.0E-5 )
),
debugLevel = cms.int32( 0 )
)
process.hltAlCaEtaEBUncalibrator = cms.EDProducer( "EcalRecalibRecHitProducer",
doEnergyScale = cms.bool( False ),
doEnergyScaleInverse = cms.bool( False ),
doIntercalib = cms.bool( False ),
doIntercalibInverse = cms.bool( False ),
EERecHitCollection = cms.InputTag( 'hltAlCaEtaRecHitsFilterEBonlyRegional','etaEcalRecHitsEB' ),
EBRecHitCollection = cms.InputTag( 'hltAlCaEtaRecHitsFilterEBonlyRegional','etaEcalRecHitsEB' ),
doLaserCorrections = cms.bool( False ),
doLaserCorrectionsInverse = cms.bool( False ),
EBRecalibRecHitCollection = cms.string( "etaEcalRecHitsEB" ),
EERecalibRecHitCollection = cms.string( "etaEcalRecHitsEE" )
)
process.hltAlCaEtaEBRechitsToDigis = cms.EDProducer( "HLTRechitsToDigis",
region = cms.string( "barrel" ),
digisIn = cms.InputTag( 'hltEcalDigis','ebDigis' ),
digisOut = cms.string( "etaEBDigis" ),
recHits = cms.InputTag( 'hltAlCaEtaEBUncalibrator','etaEcalRecHitsEB' ),
srFlagsIn = cms.InputTag( "hltEcalDigis" ),
srFlagsOut = cms.string( "etaEBSrFlags" )
)
process.hltPreAlCaHIEcalEtaEEonly = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltAlCaEtaRecHitsFilterEEonlyRegional = cms.EDFilter( "HLTRegionalEcalResonanceFilter",
barrelHits = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEB' ),
endcapHits = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEE' ),
preshRecHitProducer = cms.InputTag( 'hltEcalPreshowerRecHit','EcalRecHitsES' ),
barrelClusters = cms.InputTag( 'hltSimple3x3Clusters','Simple3x3ClustersBarrel' ),
endcapClusters = cms.InputTag( 'hltSimple3x3Clusters','Simple3x3ClustersEndcap' ),
useRecoFlag = cms.bool( False ),
flagLevelRecHitsToUse = cms.int32( 1 ),
useDBStatus = cms.bool( True ),
statusLevelRecHitsToUse = cms.int32( 1 ),
doSelBarrel = cms.bool( False ),
barrelSelection = cms.PSet(
massHighPi0Cand = cms.double( 0.163 ),
ptMinForIsolation = cms.double( 1.0 ),
seleMinvMaxBarrel = cms.double( 0.8 ),
massLowPi0Cand = cms.double( 0.104 ),
seleS9S25Gamma = cms.double( 0.0 ),
seleBeltDeta = cms.double( 0.05 ),
seleS4S9GammaBarrel_region2 = cms.double( 0.65 ),
barrelHitCollection = cms.string( "etaEcalRecHitsEB" ),
removePi0CandidatesForEta = cms.bool( False ),
seleMinvMinBarrel = cms.double( 0.3 ),
seleS4S9GammaBarrel_region1 = cms.double( 0.65 ),
selePtPairBarrel_region1 = cms.double( 1.5 ),
selePtPairBarrel_region2 = cms.double( 1.5 ),
seleBeltDR = cms.double( 0.2 ),
region1_Barrel = cms.double( 1.0 ),
seleIsoBarrel_region1 = cms.double( 0.5 ),
selePtGammaBarrel_region1 = cms.double( 1.0 ),
seleIsoBarrel_region2 = cms.double( 0.5 ),
selePtGammaBarrel_region2 = cms.double( 0.5 ),
store5x5RecHitEB = cms.bool( False ),
seleNxtalBarrel_region2 = cms.uint32( 6 ),
seleNxtalBarrel_region1 = cms.uint32( 6 )
),
doSelEndcap = cms.bool( True ),
endcapSelection = cms.PSet(
seleBeltDetaEndCap = cms.double( 0.1 ),
selePtPairMaxEndCap_region3 = cms.double( 999.0 ),
seleS4S9GammaEndCap_region2 = cms.double( 0.9 ),
seleS4S9GammaEndCap_region1 = cms.double( 0.9 ),
seleNxtalEndCap_region2 = cms.uint32( 6 ),
seleNxtalEndCap_region3 = cms.uint32( 6 ),
ptMinForIsolationEndCap = cms.double( 0.5 ),
selePtPairEndCap_region1 = cms.double( 3.0 ),
endcapHitCollection = cms.string( "etaEcalRecHitsEE" ),
selePtPairEndCap_region2 = cms.double( 3.0 ),
seleS4S9GammaEndCap_region3 = cms.double( 0.9 ),
selePtGammaEndCap_region3 = cms.double( 1.0 ),
selePtGammaEndCap_region2 = cms.double( 1.0 ),
selePtGammaEndCap_region1 = cms.double( 1.0 ),
region1_EndCap = cms.double( 1.8 ),
region2_EndCap = cms.double( 2.0 ),
store5x5RecHitEE = cms.bool( True ),
seleIsoEndCap_region3 = cms.double( 0.5 ),
seleIsoEndCap_region2 = cms.double( 0.5 ),
seleMinvMinEndCap = cms.double( 0.2 ),
selePtPairEndCap_region3 = cms.double( 3.0 ),
seleIsoEndCap_region1 = cms.double( 0.5 ),
seleBeltDREndCap = cms.double( 0.3 ),
seleMinvMaxEndCap = cms.double( 0.8 ),
seleNxtalEndCap_region1 = cms.uint32( 6 ),
seleS9S25GammaEndCap = cms.double( 0.85 )
),
storeRecHitES = cms.bool( True ),
preshowerSelection = cms.PSet(
preshClusterEnergyCut = cms.double( 0.0 ),
debugLevelES = cms.string( "" ),
ESCollection = cms.string( "etaEcalRecHitsES" ),
preshNclust = cms.int32( 4 ),
preshStripEnergyCut = cms.double( 0.0 ),
preshCalibPlaneY = cms.double( 0.7 ),
preshSeededNstrip = cms.int32( 15 ),
preshCalibGamma = cms.double( 0.024 ),
preshCalibPlaneX = cms.double( 1.0 ),
preshCalibMIP = cms.double( 9.0E-5 )
),
debugLevel = cms.int32( 0 )
)
process.hltAlCaEtaEEUncalibrator = cms.EDProducer( "EcalRecalibRecHitProducer",
doEnergyScale = cms.bool( False ),
doEnergyScaleInverse = cms.bool( False ),
doIntercalib = cms.bool( False ),
doIntercalibInverse = cms.bool( False ),
EERecHitCollection = cms.InputTag( 'hltAlCaEtaRecHitsFilterEEonlyRegional','etaEcalRecHitsEE' ),
EBRecHitCollection = cms.InputTag( 'hltAlCaEtaRecHitsFilterEEonlyRegional','etaEcalRecHitsEE' ),
doLaserCorrections = cms.bool( False ),
doLaserCorrectionsInverse = cms.bool( False ),
EBRecalibRecHitCollection = cms.string( "etaEcalRecHitsEB" ),
EERecalibRecHitCollection = cms.string( "etaEcalRecHitsEE" )
)
process.hltAlCaEtaEERechitsToDigis = cms.EDProducer( "HLTRechitsToDigis",
region = cms.string( "endcap" ),
digisIn = cms.InputTag( 'hltEcalDigis','eeDigis' ),
digisOut = cms.string( "etaEEDigis" ),
recHits = cms.InputTag( 'hltAlCaEtaEEUncalibrator','etaEcalRecHitsEE' ),
srFlagsIn = cms.InputTag( "hltEcalDigis" ),
srFlagsOut = cms.string( "etaEESrFlags" )
)
process.hltCalibrationEventsFilter = cms.EDFilter( "HLTTriggerTypeFilter",
SelectedTriggerType = cms.int32( 2 )
)
process.hltPreEcalCalibration = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltEcalCalibrationRaw = cms.EDProducer( "EvFFEDSelector",
inputTag = cms.InputTag( "rawDataCollector" ),
fedList = cms.vuint32( 601, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 616, 617, 618, 619, 620, 621, 622, 623, 624, 625, 626, 627, 628, 629, 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, 1024 )
)
process.hltPreHcalCalibration = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltHcalCalibTypeFilter = cms.EDFilter( "HLTHcalCalibTypeFilter",
InputTag = cms.InputTag( "rawDataCollector" ),
CalibTypes = cms.vint32( 1, 2, 3, 4, 5, 6 )
)
process.hltHcalCalibrationRaw = cms.EDProducer( "EvFFEDSelector",
inputTag = cms.InputTag( "rawDataCollector" ),
fedList = cms.vuint32( 700, 701, 702, 703, 704, 705, 706, 707, 708, 709, 710, 711, 712, 713, 714, 715, 716, 717, 718, 719, 720, 721, 722, 723, 724, 725, 726, 727, 728, 729, 730, 731, 1024, 1100, 1101, 1102, 1103, 1104, 1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, 1113, 1114, 1115, 1116, 1117, 1118, 1119, 1120, 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128, 1129, 1130, 1131, 1132, 1133, 1134, 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, 1143, 1144, 1145, 1146, 1147, 1148, 1149, 1150, 1151, 1152, 1153, 1154, 1155, 1156, 1157, 1158, 1159, 1160, 1161, 1162, 1163, 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1171, 1172, 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, 1192, 1193, 1194, 1195, 1196, 1197, 1198, 1199 )
)
process.hltL1sZeroBiasIorAlwaysTrueIorIsolatedBunch = cms.EDFilter( "HLTL1TSeed",
saveTags = cms.bool( True ),
L1SeedsLogicalExpression = cms.string( "L1_ZeroBias OR L1_AlwaysTrue OR L1_IsolatedBunch" ),
L1ObjectMapInputTag = cms.InputTag( "hltGtStage2ObjectMap" ),
L1GlobalInputTag = cms.InputTag( "hltGtStage2Digis" ),
L1MuonInputTag = cms.InputTag( 'hltGtStage2Digis','Muon' ),
L1EGammaInputTag = cms.InputTag( 'hltGtStage2Digis','EGamma' ),
L1JetInputTag = cms.InputTag( 'hltGtStage2Digis','Jet' ),
L1TauInputTag = cms.InputTag( 'hltGtStage2Digis','Tau' ),
L1EtSumInputTag = cms.InputTag( 'hltGtStage2Digis','EtSum' )
)
process.hltPreAlCaEcalPhiSym = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltEcalPhiSymFilter = cms.EDFilter( "HLTEcalPhiSymFilter",
barrelDigiCollection = cms.InputTag( 'hltEcalDigis','ebDigis' ),
endcapDigiCollection = cms.InputTag( 'hltEcalDigis','eeDigis' ),
barrelUncalibHitCollection = cms.InputTag( 'hltEcalUncalibRecHit','EcalUncalibRecHitsEB' ),
endcapUncalibHitCollection = cms.InputTag( 'hltEcalUncalibRecHit','EcalUncalibRecHitsEE' ),
barrelHitCollection = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEB' ),
endcapHitCollection = cms.InputTag( 'hltEcalRecHit','EcalRecHitsEE' ),
statusThreshold = cms.uint32( 3 ),
useRecoFlag = cms.bool( False ),
ampCut_barrelP = cms.vdouble( 14.31759, 14.33355, 14.34853, 14.36281, 14.37667, 14.39011, 14.40334, 14.41657, 14.42994, 14.44359, 14.45759, 14.47222, 14.48748, 14.50358, 14.52052, 14.53844, 14.55755, 14.57778, 14.59934, 14.62216, 14.64645, 14.67221, 14.69951, 14.72849, 14.75894, 14.79121, 14.82502, 14.86058, 14.89796, 14.93695, 14.97783, 15.02025, 15.06442, 15.11041, 15.15787, 15.20708, 15.25783, 15.31026, 15.36409, 15.41932, 15.47602, 15.53384, 15.5932, 15.65347, 15.715, 15.77744, 15.84086, 15.90505, 15.97001, 16.03539, 16.10147, 16.16783, 16.23454, 16.30146, 16.36824, 16.43502, 16.50159, 16.56781, 16.63354, 16.69857, 16.76297, 16.82625, 16.88862, 16.94973, 17.00951, 17.06761, 17.12403, 17.1787, 17.23127, 17.28167, 17.32955, 17.37491, 17.41754, 17.45723, 17.49363, 17.52688, 17.55642, 17.58218, 17.60416, 17.62166, 17.63468, 17.64315, 17.64665, 17.6449, 17.6379 ),
ampCut_barrelM = cms.vdouble( 17.6379, 17.6449, 17.64665, 17.64315, 17.63468, 17.62166, 17.60416, 17.58218, 17.55642, 17.52688, 17.49363, 17.45723, 17.41754, 17.37491, 17.32955, 17.28167, 17.23127, 17.1787, 17.12403, 17.06761, 17.00951, 16.94973, 16.88862, 16.82625, 16.76297, 16.69857, 16.63354, 16.56781, 16.50159, 16.43502, 16.36824, 16.30146, 16.23454, 16.16783, 16.10147, 16.03539, 15.97001, 15.90505, 15.84086, 15.77744, 15.715, 15.65347, 15.5932, 15.53384, 15.47602, 15.41932, 15.36409, 15.31026, 15.25783, 15.20708, 15.15787, 15.11041, 15.06442, 15.02025, 14.97783, 14.93695, 14.89796, 14.86058, 14.82502, 14.79121, 14.75894, 14.72849, 14.69951, 14.67221, 14.64645, 14.62216, 14.59934, 14.57778, 14.55755, 14.53844, 14.52052, 14.50358, 14.48748, 14.47222, 14.45759, 14.44359, 14.42994, 14.41657, 14.40334, 14.39011, 14.37667, 14.36281, 14.34853, 14.33355, 14.31759 ),
ampCut_endcapP = cms.vdouble( 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 11.5, 11.5, 11.5, 11.5, 11.5, 11.5, 11.5, 11.5, 11.5 ),
ampCut_endcapM = cms.vdouble( 11.5, 11.5, 11.5, 11.5, 11.5, 11.5, 11.5, 11.5, 11.5, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0 ),
phiSymBarrelDigiCollection = cms.string( "phiSymEcalDigisEB" ),
phiSymEndcapDigiCollection = cms.string( "phiSymEcalDigisEE" )
)
process.hltL1sL1ZeroBiasFirstCollisionAfterAbortGap = cms.EDFilter( "HLTL1TSeed",
saveTags = cms.bool( True ),
L1SeedsLogicalExpression = cms.string( "L1_FirstCollisionInOrbit" ),
L1ObjectMapInputTag = cms.InputTag( "hltGtStage2ObjectMap" ),
L1GlobalInputTag = cms.InputTag( "hltGtStage2Digis" ),
L1MuonInputTag = cms.InputTag( 'hltGtStage2Digis','Muon' ),
L1EGammaInputTag = cms.InputTag( 'hltGtStage2Digis','EGamma' ),
L1JetInputTag = cms.InputTag( 'hltGtStage2Digis','Jet' ),
L1TauInputTag = cms.InputTag( 'hltGtStage2Digis','Tau' ),
L1EtSumInputTag = cms.InputTag( 'hltGtStage2Digis','EtSum' )
)
process.hltPreZeroBiasFirstCollisionAfterAbortGap = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltL1sSingleMu7to30 = cms.EDFilter( "HLTL1TSeed",
saveTags = cms.bool( True ),
L1SeedsLogicalExpression = cms.string( "L1_SingleMu7 OR L1_SingleMu12 OR L1_SingleMu16 OR L1_SingleMu18 OR L1_SingleMu20 OR L1_SingleMu22 OR L1_SingleMu25 OR L1_SingleMu30" ),
L1ObjectMapInputTag = cms.InputTag( "hltGtStage2ObjectMap" ),
L1GlobalInputTag = cms.InputTag( "hltGtStage2Digis" ),
L1MuonInputTag = cms.InputTag( 'hltGtStage2Digis','Muon' ),
L1EGammaInputTag = cms.InputTag( 'hltGtStage2Digis','EGamma' ),
L1JetInputTag = cms.InputTag( 'hltGtStage2Digis','Jet' ),
L1TauInputTag = cms.InputTag( 'hltGtStage2Digis','Tau' ),
L1EtSumInputTag = cms.InputTag( 'hltGtStage2Digis','EtSum' )
)
process.hltPreAlCaHIRPCMuonNormalisation = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltHIRPCMuonNormaL1Filtered0 = cms.EDFilter( "HLTMuonL1TFilter",
saveTags = cms.bool( True ),
CandTag = cms.InputTag( 'hltGtStage2Digis','Muon' ),
PreviousCandTag = cms.InputTag( "hltL1sSingleMu7to30" ),
MaxEta = cms.double( 1.6 ),
MinPt = cms.double( 0.0 ),
MinN = cms.int32( 1 ),
CentralBxOnly = cms.bool( True ),
SelectQualities = cms.vint32( )
)
process.hltPreAlCaLumiPixelsCountsRandom = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPixelTrackerHVOn = cms.EDFilter( "DetectorStateFilter",
DebugOn = cms.untracked.bool( False ),
DetectorType = cms.untracked.string( "pixel" ),
DcsStatusLabel = cms.untracked.InputTag( "hltScalersRawToDigi" ),
DCSRecordLabel = cms.untracked.InputTag( "hltOnlineMetaDataDigis" )
)
process.hltAlcaPixelClusterCounts = cms.EDProducer( "AlcaPCCEventProducer",
pixelClusterLabel = cms.InputTag( "hltSiPixelClusters" ),
trigstring = cms.untracked.string( "alcaPCCEvent" )
)
process.hltPreAlCaLumiPixelsCountsZeroBias = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltFEDSelector = cms.EDProducer( "EvFFEDSelector",
inputTag = cms.InputTag( "rawDataCollector" ),
fedList = cms.vuint32( 1023, 1024 )
)
process.hltTriggerSummaryAOD = cms.EDProducer( "TriggerSummaryProducerAOD",
throw = cms.bool( False ),
processName = cms.string( "@" ),
moduleLabelPatternsToMatch = cms.vstring( 'hlt*' ),
moduleLabelPatternsToSkip = cms.vstring( )
)
process.hltTriggerSummaryRAW = cms.EDProducer( "TriggerSummaryProducerRAW",
processName = cms.string( "@" )
)
process.hltPreHLTAnalyzerEndpath = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltL1TGlobalSummary = cms.EDAnalyzer( "L1TGlobalSummary",
AlgInputTag = cms.InputTag( "hltGtStage2Digis" ),
ExtInputTag = cms.InputTag( "hltGtStage2Digis" ),
MinBx = cms.int32( 0 ),
MaxBx = cms.int32( 0 ),
DumpTrigResults = cms.bool( False ),
DumpRecord = cms.bool( False ),
DumpTrigSummary = cms.bool( True ),
ReadPrescalesFromFile = cms.bool( False ),
psFileName = cms.string( "prescale_L1TGlobal.csv" ),
psColumn = cms.int32( 0 )
)
process.hltTrigReport = cms.EDAnalyzer( "HLTrigReport",
HLTriggerResults = cms.InputTag( 'TriggerResults','','@currentProcess' ),
reportBy = cms.untracked.string( "job" ),
resetBy = cms.untracked.string( "never" ),
serviceBy = cms.untracked.string( "never" ),
ReferencePath = cms.untracked.string( "HLTriggerFinalPath" ),
ReferenceRate = cms.untracked.double( 100.0 )
)
process.hltPrePhysicsCommissioningOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPrePhysicsEGammaOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPrePhysicsEndOfFillOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPrePhysicsHadronsTausOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPrePhysicsMuonsOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPrePhysicsTracksOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPrePhysicsForwardOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreDQMOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreDQMOutputSmart = cms.EDFilter( "TriggerResultsFilter",
usePathStatus = cms.bool( False ),
hltResults = cms.InputTag( 'TriggerResults','','@currentProcess' ),
l1tResults = cms.InputTag( "" ),
l1tIgnoreMaskAndPrescale = cms.bool( False ),
throw = cms.bool( True ),
triggerConditions = cms.vstring( '( HLT_Random_v3 OR FALSE OR FALSE OR FALSE OR FALSE OR FALSE OR FALSE OR FALSE OR FALSE OR FALSE OR FALSE OR FALSE OR FALSE OR FALSE ) / 10',
'HLT_Physics_v7',
'HLT_Random_v3 / 3',
'HLT_ZeroBias_v6 / 3',
'HLT_HIL1UnpairedBunchBptxMinusForPPRef_v2 / 3',
'HLT_HIL1UnpairedBunchBptxPlusForPPRef_v2 / 3',
'HLT_HIL1NotBptxORForPPRef_v2 / 3',
'HLT_HIZeroBias_part0_v6 / 3',
'HLT_HIZeroBias_part1_v6 / 3',
'HLT_HIZeroBias_part2_v6 / 3',
'HLT_HIZeroBias_part3_v6 / 3',
'HLT_HIZeroBias_part4_v6 / 3',
'HLT_HIZeroBias_part5_v6 / 3',
'HLT_HIZeroBias_part6_v6 / 3',
'HLT_HIZeroBias_part7_v6 / 3',
'HLT_HIZeroBias_part8_v6 / 3',
'HLT_HIZeroBias_part9_v6 / 3',
'HLT_HIZeroBias_part10_v6 / 3',
'HLT_HIZeroBias_part11_v6 / 3',
'HLT_ZeroBias_FirstCollisionAfterAbortGap_v5 / 3' )
)
process.hltPreDQMOnlineBeamspotOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreDQMOnlineBeamspotOutputSmart = cms.EDFilter( "TriggerResultsFilter",
usePathStatus = cms.bool( False ),
hltResults = cms.InputTag( 'TriggerResults','','@currentProcess' ),
l1tResults = cms.InputTag( "" ),
l1tIgnoreMaskAndPrescale = cms.bool( False ),
throw = cms.bool( True ),
triggerConditions = cms.vstring( 'HLT_ZeroBias_Beamspot_v4',
'HLT_HIHT80_Beamspot_ppRef5TeV_v3' )
)
process.hltPreDQMCalibrationOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreDQMEventDisplayOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreDQMEventDisplayOutputSmart = cms.EDFilter( "TriggerResultsFilter",
usePathStatus = cms.bool( False ),
hltResults = cms.InputTag( 'TriggerResults','','@currentProcess' ),
l1tResults = cms.InputTag( "" ),
l1tIgnoreMaskAndPrescale = cms.bool( False ),
throw = cms.bool( True ),
triggerConditions = cms.vstring( )
)
process.hltPreHLTMonitorOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreHLTMonitorOutputSmart = cms.EDFilter( "TriggerResultsFilter",
usePathStatus = cms.bool( False ),
hltResults = cms.InputTag( 'TriggerResults','','@currentProcess' ),
l1tResults = cms.InputTag( "" ),
l1tIgnoreMaskAndPrescale = cms.bool( False ),
throw = cms.bool( True ),
triggerConditions = cms.vstring( )
)
process.hltPreRPCMONOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreCalibrationOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreEcalCalibrationOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreALCAPHISYMOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreALCALumiPixelCountsExpressOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreALCALumiPixelCountsPromptOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreALCAP0Output = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreExpressOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreExpressOutputSmart = cms.EDFilter( "TriggerResultsFilter",
usePathStatus = cms.bool( False ),
hltResults = cms.InputTag( 'TriggerResults','','@currentProcess' ),
l1tResults = cms.InputTag( "" ),
l1tIgnoreMaskAndPrescale = cms.bool( False ),
throw = cms.bool( True ),
triggerConditions = cms.vstring( 'HLT_Physics_v7',
'HLT_Random_v3',
'HLT_ZeroBias_v6',
'HLT_ZeroBias_FirstCollisionAfterAbortGap_v5' )
)
process.hltPreExpressAlignmentOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPreExpressAlignmentOutputSmart = cms.EDFilter( "TriggerResultsFilter",
usePathStatus = cms.bool( False ),
hltResults = cms.InputTag( 'TriggerResults','','@currentProcess' ),
l1tResults = cms.InputTag( "" ),
l1tIgnoreMaskAndPrescale = cms.bool( False ),
throw = cms.bool( True ),
triggerConditions = cms.vstring( 'HLT_ZeroBias_Beamspot_v4',
'HLT_HIHT80_Beamspot_ppRef5TeV_v3' )
)
process.hltPreNanoDSTOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPrePhysicsHIZeroBias1Output = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPrePhysicsHIZeroBias2Output = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPrePhysicsHIZeroBias3Output = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPrePhysicsHIZeroBias4Output = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPrePhysicsHIZeroBias5Output = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltPrePhysicsHIZeroBias6Output = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.statusOnGPU = SwitchProducerCUDA(
cpu = cms.EDProducer( "BooleanProducer",
value = cms.bool( False )
),
)
process.hltEcalDigis = SwitchProducerCUDA(
cpu = cms.EDAlias(
hltEcalDigisLegacy = cms.VPSet(
cms.PSet( type = cms.string( "EBDigiCollection" ) ),
cms.PSet( type = cms.string( "EEDigiCollection" ) ),
cms.PSet( type = cms.string( "EBDetIdedmEDCollection" ) ),
cms.PSet( type = cms.string( "EEDetIdedmEDCollection" ) ),
cms.PSet( type = cms.string( "EBSrFlagsSorted" ) ),
cms.PSet( type = cms.string( "EESrFlagsSorted" ) ),
cms.PSet( type = cms.string( "EcalElectronicsIdedmEDCollection" ),
fromProductInstance = cms.string( "EcalIntegrityBlockSizeErrors" )
),
cms.PSet( type = cms.string( "EcalElectronicsIdedmEDCollection" ),
fromProductInstance = cms.string( "EcalIntegrityTTIdErrors" )
),
cms.PSet( type = cms.string( "EcalElectronicsIdedmEDCollection" ),
fromProductInstance = cms.string( "EcalIntegrityZSXtalIdErrors" )
),
cms.PSet( type = cms.string( "EcalPnDiodeDigisSorted" ) ),
cms.PSet( type = cms.string( "EcalPseudoStripInputDigisSorted" ),
fromProductInstance = cms.string( "EcalPseudoStripInputs" )
),
cms.PSet( type = cms.string( "EcalTriggerPrimitiveDigisSorted" ),
fromProductInstance = cms.string( "EcalTriggerPrimitives" )
)
)
),
)
process.hltEcalUncalibRecHit = SwitchProducerCUDA(
cpu = cms.EDProducer( "EcalUncalibRecHitProducer",
EBdigiCollection = cms.InputTag( 'hltEcalDigis','ebDigis' ),
EEhitCollection = cms.string( "EcalUncalibRecHitsEE" ),
EEdigiCollection = cms.InputTag( 'hltEcalDigis','eeDigis' ),
EBhitCollection = cms.string( "EcalUncalibRecHitsEB" ),
algo = cms.string( "EcalUncalibRecHitWorkerMultiFit" ),
algoPSet = cms.PSet(
ebSpikeThreshold = cms.double( 1.042 ),
EBtimeFitLimits_Upper = cms.double( 1.4 ),
EEtimeFitLimits_Lower = cms.double( 0.2 ),
timealgo = cms.string( "None" ),
EBtimeNconst = cms.double( 28.5 ),
prefitMaxChiSqEE = cms.double( 10.0 ),
outOfTimeThresholdGain12mEB = cms.double( 5.0 ),
outOfTimeThresholdGain12mEE = cms.double( 1000.0 ),
EEtimeFitParameters = cms.vdouble( -2.390548, 3.553628, -17.62341, 67.67538, -133.213, 140.7432, -75.41106, 16.20277 ),
prefitMaxChiSqEB = cms.double( 25.0 ),
simplifiedNoiseModelForGainSwitch = cms.bool( True ),
EBtimeFitParameters = cms.vdouble( -2.015452, 3.130702, -12.3473, 41.88921, -82.83944, 91.01147, -50.35761, 11.05621 ),
selectiveBadSampleCriteriaEB = cms.bool( False ),
dynamicPedestalsEB = cms.bool( False ),
useLumiInfoRunHeader = cms.bool( False ),
EBamplitudeFitParameters = cms.vdouble( 1.138, 1.652 ),
doPrefitEE = cms.bool( False ),
dynamicPedestalsEE = cms.bool( False ),
selectiveBadSampleCriteriaEE = cms.bool( False ),
outOfTimeThresholdGain61pEE = cms.double( 1000.0 ),
outOfTimeThresholdGain61pEB = cms.double( 5.0 ),
activeBXs = cms.vint32( -5, -4, -3, -2, -1, 0, 1, 2, 3, 4 ),
EcalPulseShapeParameters = cms.PSet(
EEPulseShapeTemplate = cms.vdouble( 0.116442, 0.756246, 1.0, 0.897182, 0.686831, 0.491506, 0.344111, 0.245731, 0.174115, 0.123361, 0.0874288, 0.061957 ),
EEdigiCollection = cms.string( "" ),
EcalPreMixStage2 = cms.bool( False ),
EcalPreMixStage1 = cms.bool( False ),
EBPulseShapeCovariance = cms.vdouble( 3.001E-6, 1.233E-5, 0.0, -4.416E-6, -4.571E-6, -3.614E-6, -2.636E-6, -1.286E-6, -8.41E-7, -5.296E-7, 0.0, 0.0, 1.233E-5, 6.154E-5, 0.0, -2.2E-5, -2.309E-5, -1.838E-5, -1.373E-5, -7.334E-6, -5.088E-6, -3.745E-6, -2.428E-6, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -4.416E-6, -2.2E-5, 0.0, 8.319E-6, 8.545E-6, 6.792E-6, 5.059E-6, 2.678E-6, 1.816E-6, 1.223E-6, 8.245E-7, 5.589E-7, -4.571E-6, -2.309E-5, 0.0, 8.545E-6, 9.182E-6, 7.219E-6, 5.388E-6, 2.853E-6, 1.944E-6, 1.324E-6, 9.083E-7, 6.335E-7, -3.614E-6, -1.838E-5, 0.0, 6.792E-6, 7.219E-6, 6.016E-6, 4.437E-6, 2.385E-6, 1.636E-6, 1.118E-6, 7.754E-7, 5.556E-7, -2.636E-6, -1.373E-5, 0.0, 5.059E-6, 5.388E-6, 4.437E-6, 3.602E-6, 1.917E-6, 1.322E-6, 9.079E-7, 6.529E-7, 4.752E-7, -1.286E-6, -7.334E-6, 0.0, 2.678E-6, 2.853E-6, 2.385E-6, 1.917E-6, 1.375E-6, 9.1E-7, 6.455E-7, 4.693E-7, 3.657E-7, -8.41E-7, -5.088E-6, 0.0, 1.816E-6, 1.944E-6, 1.636E-6, 1.322E-6, 9.1E-7, 9.115E-7, 6.062E-7, 4.436E-7, 3.422E-7, -5.296E-7, -3.745E-6, 0.0, 1.223E-6, 1.324E-6, 1.118E-6, 9.079E-7, 6.455E-7, 6.062E-7, 7.217E-7, 4.862E-7, 3.768E-7, 0.0, -2.428E-6, 0.0, 8.245E-7, 9.083E-7, 7.754E-7, 6.529E-7, 4.693E-7, 4.436E-7, 4.862E-7, 6.509E-7, 4.418E-7, 0.0, 0.0, 0.0, 5.589E-7, 6.335E-7, 5.556E-7, 4.752E-7, 3.657E-7, 3.422E-7, 3.768E-7, 4.418E-7, 6.142E-7 ),
ESdigiCollection = cms.string( "" ),
EBdigiCollection = cms.string( "" ),
EBCorrNoiseMatrixG01 = cms.vdouble( 1.0, 0.73354, 0.64442, 0.58851, 0.55425, 0.53082, 0.51916, 0.51097, 0.50732, 0.50409 ),
EBCorrNoiseMatrixG12 = cms.vdouble( 1.0, 0.71073, 0.55721, 0.46089, 0.40449, 0.35931, 0.33924, 0.32439, 0.31581, 0.30481 ),
EBCorrNoiseMatrixG06 = cms.vdouble( 1.0, 0.70946, 0.58021, 0.49846, 0.45006, 0.41366, 0.39699, 0.38478, 0.37847, 0.37055 ),
EEPulseShapeCovariance = cms.vdouble( 3.941E-5, 3.333E-5, 0.0, -1.449E-5, -1.661E-5, -1.424E-5, -1.183E-5, -6.842E-6, -4.915E-6, -3.411E-6, 0.0, 0.0, 3.333E-5, 2.862E-5, 0.0, -1.244E-5, -1.431E-5, -1.233E-5, -1.032E-5, -5.883E-6, -4.154E-6, -2.902E-6, -2.128E-6, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.449E-5, -1.244E-5, 0.0, 5.84E-6, 6.649E-6, 5.72E-6, 4.812E-6, 2.708E-6, 1.869E-6, 1.33E-6, 9.186E-7, 6.446E-7, -1.661E-5, -1.431E-5, 0.0, 6.649E-6, 7.966E-6, 6.898E-6, 5.794E-6, 3.157E-6, 2.184E-6, 1.567E-6, 1.084E-6, 7.575E-7, -1.424E-5, -1.233E-5, 0.0, 5.72E-6, 6.898E-6, 6.341E-6, 5.347E-6, 2.859E-6, 1.991E-6, 1.431E-6, 9.839E-7, 6.886E-7, -1.183E-5, -1.032E-5, 0.0, 4.812E-6, 5.794E-6, 5.347E-6, 4.854E-6, 2.628E-6, 1.809E-6, 1.289E-6, 9.02E-7, 6.146E-7, -6.842E-6, -5.883E-6, 0.0, 2.708E-6, 3.157E-6, 2.859E-6, 2.628E-6, 1.863E-6, 1.296E-6, 8.882E-7, 6.108E-7, 4.283E-7, -4.915E-6, -4.154E-6, 0.0, 1.869E-6, 2.184E-6, 1.991E-6, 1.809E-6, 1.296E-6, 1.217E-6, 8.669E-7, 5.751E-7, 3.882E-7, -3.411E-6, -2.902E-6, 0.0, 1.33E-6, 1.567E-6, 1.431E-6, 1.289E-6, 8.882E-7, 8.669E-7, 9.522E-7, 6.717E-7, 4.293E-7, 0.0, -2.128E-6, 0.0, 9.186E-7, 1.084E-6, 9.839E-7, 9.02E-7, 6.108E-7, 5.751E-7, 6.717E-7, 7.911E-7, 5.493E-7, 0.0, 0.0, 0.0, 6.446E-7, 7.575E-7, 6.886E-7, 6.146E-7, 4.283E-7, 3.882E-7, 4.293E-7, 5.493E-7, 7.027E-7 ),
EBPulseShapeTemplate = cms.vdouble( 0.0113979, 0.758151, 1.0, 0.887744, 0.673548, 0.474332, 0.319561, 0.215144, 0.147464, 0.101087, 0.0693181, 0.0475044 ),
EECorrNoiseMatrixG01 = cms.vdouble( 1.0, 0.72698, 0.62048, 0.55691, 0.51848, 0.49147, 0.47813, 0.47007, 0.46621, 0.46265 ),
EECorrNoiseMatrixG12 = cms.vdouble( 1.0, 0.71373, 0.44825, 0.30152, 0.21609, 0.14786, 0.11772, 0.10165, 0.09465, 0.08098 ),
UseLCcorrection = cms.untracked.bool( True ),
EECorrNoiseMatrixG06 = cms.vdouble( 1.0, 0.71217, 0.47464, 0.34056, 0.26282, 0.20287, 0.17734, 0.16256, 0.15618, 0.14443 )
),
doPrefitEB = cms.bool( False ),
addPedestalUncertaintyEE = cms.double( 0.0 ),
addPedestalUncertaintyEB = cms.double( 0.0 ),
gainSwitchUseMaxSampleEB = cms.bool( True ),
EEtimeNconst = cms.double( 31.8 ),
EEamplitudeFitParameters = cms.vdouble( 1.89, 1.4 ),
chi2ThreshEE_ = cms.double( 50.0 ),
eePulseShape = cms.vdouble( 5.2E-5, -5.26E-5, 6.66E-5, 0.1168, 0.7575, 1.0, 0.8876, 0.6732, 0.4741, 0.3194 ),
outOfTimeThresholdGain12pEB = cms.double( 5.0 ),
gainSwitchUseMaxSampleEE = cms.bool( False ),
mitigateBadSamplesEB = cms.bool( False ),
outOfTimeThresholdGain12pEE = cms.double( 1000.0 ),
ebPulseShape = cms.vdouble( 5.2E-5, -5.26E-5, 6.66E-5, 0.1168, 0.7575, 1.0, 0.8876, 0.6732, 0.4741, 0.3194 ),
ampErrorCalculation = cms.bool( False ),
mitigateBadSamplesEE = cms.bool( False ),
amplitudeThresholdEB = cms.double( 10.0 ),
kPoorRecoFlagEB = cms.bool( True ),
amplitudeThresholdEE = cms.double( 10.0 ),
EBtimeFitLimits_Lower = cms.double( 0.2 ),
kPoorRecoFlagEE = cms.bool( False ),
EEtimeFitLimits_Upper = cms.double( 1.4 ),
outOfTimeThresholdGain61mEE = cms.double( 1000.0 ),
EEtimeConstantTerm = cms.double( 1.0 ),
EBtimeConstantTerm = cms.double( 0.6 ),
chi2ThreshEB_ = cms.double( 65.0 ),
outOfTimeThresholdGain61mEB = cms.double( 5.0 )
)
),
)
process.hltEcalRecHit = SwitchProducerCUDA(
cpu = cms.EDProducer( "EcalRecHitProducer",
recoverEEVFE = cms.bool( False ),
EErechitCollection = cms.string( "EcalRecHitsEE" ),
recoverEBIsolatedChannels = cms.bool( False ),
recoverEBVFE = cms.bool( False ),
laserCorrection = cms.bool( True ),
EBLaserMIN = cms.double( 0.5 ),
killDeadChannels = cms.bool( True ),
dbStatusToBeExcludedEB = cms.vint32( 14, 78, 142 ),
EEuncalibRecHitCollection = cms.InputTag( 'hltEcalUncalibRecHit','EcalUncalibRecHitsEE' ),
dbStatusToBeExcludedEE = cms.vint32( 14, 78, 142 ),
EELaserMIN = cms.double( 0.5 ),
ebFEToBeRecovered = cms.InputTag( 'hltEcalDetIdToBeRecovered','ebFE' ),
cleaningConfig = cms.PSet(
e6e2thresh = cms.double( 0.04 ),
tightenCrack_e6e2_double = cms.double( 3.0 ),
e4e1Threshold_endcap = cms.double( 0.3 ),
tightenCrack_e4e1_single = cms.double( 3.0 ),
tightenCrack_e1_double = cms.double( 2.0 ),
cThreshold_barrel = cms.double( 4.0 ),
e4e1Threshold_barrel = cms.double( 0.08 ),
tightenCrack_e1_single = cms.double( 2.0 ),
e4e1_b_barrel = cms.double( -0.024 ),
e4e1_a_barrel = cms.double( 0.04 ),
ignoreOutOfTimeThresh = cms.double( 1.0E9 ),
cThreshold_endcap = cms.double( 15.0 ),
e4e1_b_endcap = cms.double( -0.0125 ),
e4e1_a_endcap = cms.double( 0.02 ),
cThreshold_double = cms.double( 10.0 )
),
logWarningEtThreshold_EE_FE = cms.double( 50.0 ),
eeDetIdToBeRecovered = cms.InputTag( 'hltEcalDetIdToBeRecovered','eeDetId' ),
recoverEBFE = cms.bool( True ),
eeFEToBeRecovered = cms.InputTag( 'hltEcalDetIdToBeRecovered','eeFE' ),
ebDetIdToBeRecovered = cms.InputTag( 'hltEcalDetIdToBeRecovered','ebDetId' ),
singleChannelRecoveryThreshold = cms.double( 8.0 ),
sum8ChannelRecoveryThreshold = cms.double( 0.0 ),
bdtWeightFileNoCracks = cms.FileInPath( "RecoLocalCalo/EcalDeadChannelRecoveryAlgos/data/BDTWeights/bdtgAllRH_8GT700MeV_noCracks_ZskimData2017_v1.xml" ),
bdtWeightFileCracks = cms.FileInPath( "RecoLocalCalo/EcalDeadChannelRecoveryAlgos/data/BDTWeights/bdtgAllRH_8GT700MeV_onlyCracks_ZskimData2017_v1.xml" ),
ChannelStatusToBeExcluded = cms.vstring( ),
EBrechitCollection = cms.string( "EcalRecHitsEB" ),
triggerPrimitiveDigiCollection = cms.InputTag( 'hltEcalDigisLegacy','EcalTriggerPrimitives' ),
recoverEEFE = cms.bool( True ),
singleChannelRecoveryMethod = cms.string( "NeuralNetworks" ),
EBLaserMAX = cms.double( 3.0 ),
flagsMapDBReco = cms.PSet(
kGood = cms.vstring( 'kOk',
'kDAC',
'kNoLaser',
'kNoisy' ),
kNeighboursRecovered = cms.vstring( 'kFixedG0',
'kNonRespondingIsolated',
'kDeadVFE' ),
kDead = cms.vstring( 'kNoDataNoTP' ),
kNoisy = cms.vstring( 'kNNoisy',
'kFixedG6',
'kFixedG1' ),
kTowerRecovered = cms.vstring( 'kDeadFE' )
),
EBuncalibRecHitCollection = cms.InputTag( 'hltEcalUncalibRecHit','EcalUncalibRecHitsEB' ),
algoRecover = cms.string( "EcalRecHitWorkerRecover" ),
algo = cms.string( "EcalRecHitWorkerSimple" ),
EELaserMAX = cms.double( 8.0 ),
logWarningEtThreshold_EB_FE = cms.double( 50.0 ),
recoverEEIsolatedChannels = cms.bool( False ),
skipTimeCalib = cms.bool( True )
),
)
process.hltHbhereco = SwitchProducerCUDA(
cpu = cms.EDProducer( "HBHEPhase1Reconstructor",
digiLabelQIE8 = cms.InputTag( "hltHcalDigis" ),
processQIE8 = cms.bool( False ),
digiLabelQIE11 = cms.InputTag( "hltHcalDigis" ),
processQIE11 = cms.bool( True ),
tsFromDB = cms.bool( False ),
recoParamsFromDB = cms.bool( True ),
saveEffectivePedestal = cms.bool( True ),
dropZSmarkedPassed = cms.bool( True ),
makeRecHits = cms.bool( True ),
saveInfos = cms.bool( False ),
saveDroppedInfos = cms.bool( False ),
use8ts = cms.bool( True ),
sipmQTSShift = cms.int32( 0 ),
sipmQNTStoSum = cms.int32( 3 ),
algorithm = cms.PSet(
ts4Thresh = cms.double( 0.0 ),
meanTime = cms.double( 0.0 ),
nnlsThresh = cms.double( 1.0E-11 ),
nMaxItersMin = cms.int32( 50 ),
timeSigmaSiPM = cms.double( 2.5 ),
applyTimeSlew = cms.bool( True ),
timeSlewParsType = cms.int32( 3 ),
ts4Max = cms.vdouble( 100.0, 20000.0, 30000.0 ),
samplesToAdd = cms.int32( 2 ),
deltaChiSqThresh = cms.double( 0.001 ),
applyTimeConstraint = cms.bool( False ),
timeSigmaHPD = cms.double( 5.0 ),
useMahi = cms.bool( True ),
correctForPhaseContainment = cms.bool( True ),
respCorrM3 = cms.double( 1.0 ),
pulseJitter = cms.double( 1.0 ),
applyPedConstraint = cms.bool( False ),
fitTimes = cms.int32( 1 ),
nMaxItersNNLS = cms.int32( 500 ),
applyTimeSlewM3 = cms.bool( True ),
meanPed = cms.double( 0.0 ),
ts4Min = cms.double( 0.0 ),
applyPulseJitter = cms.bool( False ),
useM2 = cms.bool( False ),
timeMin = cms.double( -12.5 ),
useM3 = cms.bool( False ),
chiSqSwitch = cms.double( -1.0 ),
dynamicPed = cms.bool( False ),
tdcTimeShift = cms.double( 0.0 ),
correctionPhaseNS = cms.double( 6.0 ),
firstSampleShift = cms.int32( 0 ),
activeBXs = cms.vint32( -3, -2, -1, 0, 1, 2, 3, 4 ),
ts4chi2 = cms.vdouble( 15.0, 15.0 ),
timeMax = cms.double( 12.5 ),
Class = cms.string( "SimpleHBHEPhase1Algo" ),
calculateArrivalTime = cms.bool( False ),
applyLegacyHBMCorrection = cms.bool( False )
),
algoConfigClass = cms.string( "" ),
setNegativeFlagsQIE8 = cms.bool( False ),
setNegativeFlagsQIE11 = cms.bool( False ),
setNoiseFlagsQIE8 = cms.bool( False ),
setNoiseFlagsQIE11 = cms.bool( False ),
setPulseShapeFlagsQIE8 = cms.bool( False ),
setPulseShapeFlagsQIE11 = cms.bool( False ),
setLegacyFlagsQIE8 = cms.bool( False ),
setLegacyFlagsQIE11 = cms.bool( False ),
flagParametersQIE8 = cms.PSet(
hitEnergyMinimum = cms.double( 1.0 ),
pulseShapeParameterSets = cms.VPSet(
cms.PSet( pulseShapeParameters = cms.vdouble( 0.0, 100.0, -50.0, 0.0, -15.0, 0.15 ) ),
cms.PSet( pulseShapeParameters = cms.vdouble( 100.0, 2000.0, -50.0, 0.0, -5.0, 0.05 ) ),
cms.PSet( pulseShapeParameters = cms.vdouble( 2000.0, 1000000.0, -50.0, 0.0, 95.0, 0.0 ) ),
cms.PSet( pulseShapeParameters = cms.vdouble( -1000000.0, 1000000.0, 45.0, 0.1, 1000000.0, 0.0 ) )
),
nominalPedestal = cms.double( 3.0 ),
hitMultiplicityThreshold = cms.int32( 17 )
),
flagParametersQIE11 = cms.PSet( ),
pulseShapeParametersQIE8 = cms.PSet(
UseDualFit = cms.bool( True ),
LinearCut = cms.vdouble( -3.0, -0.054, -0.054 ),
TriangleIgnoreSlow = cms.bool( False ),
TS4TS5LowerThreshold = cms.vdouble( 100.0, 120.0, 160.0, 200.0, 300.0, 500.0 ),
LinearThreshold = cms.vdouble( 20.0, 100.0, 100000.0 ),
RightSlopeSmallCut = cms.vdouble( 1.08, 1.16, 1.16 ),
TS4TS5UpperThreshold = cms.vdouble( 70.0, 90.0, 100.0, 400.0 ),
TS3TS4ChargeThreshold = cms.double( 70.0 ),
R45PlusOneRange = cms.double( 0.2 ),
TS4TS5LowerCut = cms.vdouble( -1.0, -0.7, -0.5, -0.4, -0.3, 0.1 ),
RightSlopeThreshold = cms.vdouble( 250.0, 400.0, 100000.0 ),
TS3TS4UpperChargeThreshold = cms.double( 20.0 ),
MinimumChargeThreshold = cms.double( 20.0 ),
RightSlopeCut = cms.vdouble( 5.0, 4.15, 4.15 ),
RMS8MaxThreshold = cms.vdouble( 20.0, 100.0, 100000.0 ),
MinimumTS4TS5Threshold = cms.double( 100.0 ),
LeftSlopeThreshold = cms.vdouble( 250.0, 500.0, 100000.0 ),
TS5TS6ChargeThreshold = cms.double( 70.0 ),
TrianglePeakTS = cms.uint32( 10000 ),
TS5TS6UpperChargeThreshold = cms.double( 20.0 ),
RightSlopeSmallThreshold = cms.vdouble( 150.0, 200.0, 100000.0 ),
RMS8MaxCut = cms.vdouble( -13.5, -11.5, -11.5 ),
TS4TS5ChargeThreshold = cms.double( 70.0 ),
R45MinusOneRange = cms.double( 0.2 ),
LeftSlopeCut = cms.vdouble( 5.0, 2.55, 2.55 ),
TS4TS5UpperCut = cms.vdouble( 1.0, 0.8, 0.75, 0.72 )
),
pulseShapeParametersQIE11 = cms.PSet( )
),
)
process.hltSiPixelDigis = SwitchProducerCUDA(
cpu = cms.EDAlias(
hltSiPixelDigisLegacy = cms.VPSet(
cms.PSet( type = cms.string( "DetIdedmEDCollection" ) ),
cms.PSet( type = cms.string( "SiPixelRawDataErroredmDetSetVector" ) ),
cms.PSet( type = cms.string( "PixelFEDChanneledmNewDetSetVector" ) )
)
),
)
process.hltSiPixelClusters = SwitchProducerCUDA(
cpu = cms.EDAlias(
hltSiPixelClustersLegacy = cms.VPSet(
cms.PSet( type = cms.string( "SiPixelClusteredmNewDetSetVector" ) )
)
),
)
process.hltSiPixelRecHits = SwitchProducerCUDA(
cpu = cms.EDAlias(
hltSiPixelRecHitSoA = cms.VPSet(
cms.PSet( type = cms.string( "SiPixelRecHitedmNewDetSetVector" ) ),
cms.PSet( type = cms.string( "uintAsHostProduct" ) )
)
),
)
process.hltPixelTracksSoA = SwitchProducerCUDA(
cpu = cms.EDProducer( "CAHitNtupletCUDA",
onGPU = cms.bool( False ),
pixelRecHitSrc = cms.InputTag( "hltSiPixelRecHitSoA" ),
ptmin = cms.double( 0.899999976158 ),
CAThetaCutBarrel = cms.double( 0.00200000009499 ),
CAThetaCutForward = cms.double( 0.00300000002608 ),
hardCurvCut = cms.double( 0.0328407224959 ),
dcaCutInnerTriplet = cms.double( 0.15000000596 ),
dcaCutOuterTriplet = cms.double( 0.25 ),
earlyFishbone = cms.bool( True ),
lateFishbone = cms.bool( False ),
idealConditions = cms.bool( False ),
fillStatistics = cms.bool( False ),
minHitsPerNtuplet = cms.uint32( 3 ),
maxNumberOfDoublets = cms.uint32( 524288 ),
minHitsForSharingCut = cms.uint32( 10 ),
includeJumpingForwardDoublets = cms.bool( True ),
fitNas4 = cms.bool( False ),
doClusterCut = cms.bool( True ),
doZ0Cut = cms.bool( True ),
doPtCut = cms.bool( True ),
useRiemannFit = cms.bool( False ),
doSharedHitCut = cms.bool( True ),
dupPassThrough = cms.bool( False ),
useSimpleTripletCleaner = cms.bool( True ),
trackQualityCuts = cms.PSet(
chi2MaxPt = cms.double( 10.0 ),
tripletMaxTip = cms.double( 0.3 ),
chi2Scale = cms.double( 8.0 ),
quadrupletMaxTip = cms.double( 0.5 ),
quadrupletMinPt = cms.double( 0.3 ),
quadrupletMaxZip = cms.double( 12.0 ),
tripletMaxZip = cms.double( 12.0 ),
tripletMinPt = cms.double( 0.5 ),
chi2Coeff = cms.vdouble( 0.9, 1.8 )
)
),
)
process.hltPixelVerticesSoA = SwitchProducerCUDA(
cpu = cms.EDProducer( "PixelVertexProducerCUDA",
onGPU = cms.bool( False ),
oneKernel = cms.bool( True ),
useDensity = cms.bool( True ),
useDBSCAN = cms.bool( False ),
useIterative = cms.bool( False ),
minT = cms.int32( 2 ),
eps = cms.double( 0.07 ),
errmax = cms.double( 0.01 ),
chi2max = cms.double( 9.0 ),
PtMin = cms.double( 0.5 ),
pixelTrackSrc = cms.InputTag( "hltPixelTracksSoA" )
),
)
process.hltOutputPhysicsCommissioning = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputPhysicsCommissioning.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'HLT_Physics_v7',
'HLT_Random_v3',
'HLT_ZeroBias_FirstCollisionAfterAbortGap_v5',
'HLT_ZeroBias_v6' ) ),
outputCommands = cms.untracked.vstring( 'drop *',
'keep FEDRawDataCollection_rawDataCollector_*_*',
'keep FEDRawDataCollection_source_*_*',
'keep GlobalObjectMapRecord_hltGtStage2ObjectMap_*_*',
'keep edmTriggerResults_*_*_*',
'keep triggerTriggerEvent_*_*_*' )
)
process.hltOutputPhysicsEndOfFill = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputPhysicsEndOfFill.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'HLT_HIL1NotBptxORForPPRef_v2',
'HLT_HIL1UnpairedBunchBptxMinusForPPRef_v2',
'HLT_HIL1UnpairedBunchBptxPlusForPPRef_v2' ) ),
outputCommands = cms.untracked.vstring( 'drop *',
'keep FEDRawDataCollection_rawDataCollector_*_*',
'keep FEDRawDataCollection_source_*_*',
'keep GlobalObjectMapRecord_hltGtStage2ObjectMap_*_*',
'keep edmTriggerResults_*_*_*',
'keep triggerTriggerEvent_*_*_*' )
)
process.hltOutputDQM = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputDQM.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'HLT_HIL1NotBptxORForPPRef_v2',
'HLT_HIL1UnpairedBunchBptxMinusForPPRef_v2',
'HLT_HIL1UnpairedBunchBptxPlusForPPRef_v2',
'HLT_HIZeroBias_part0_v6',
'HLT_HIZeroBias_part10_v6',
'HLT_HIZeroBias_part11_v6',
'HLT_HIZeroBias_part1_v6',
'HLT_HIZeroBias_part2_v6',
'HLT_HIZeroBias_part3_v6',
'HLT_HIZeroBias_part4_v6',
'HLT_HIZeroBias_part5_v6',
'HLT_HIZeroBias_part6_v6',
'HLT_HIZeroBias_part7_v6',
'HLT_HIZeroBias_part8_v6',
'HLT_HIZeroBias_part9_v6',
'HLT_Physics_v7',
'HLT_Random_v3',
'HLT_ZeroBias_FirstCollisionAfterAbortGap_v5',
'HLT_ZeroBias_v6' ) ),
outputCommands = cms.untracked.vstring( 'drop *',
'keep *_hltDeepCombinedSecondaryVertexBJetTagsCalo_*_*',
'keep *_hltDeepCombinedSecondaryVertexBJetTagsPF_*_*',
'keep *_hltEcalRecHit_*_*',
'keep *_hltEgammaCandidates_*_*',
'keep *_hltEgammaGsfElectrons_*_*',
'keep *_hltFullIterativeTrackingMergedForRefPP_*_*',
'keep *_hltHbhereco_*_*',
'keep *_hltHfreco_*_*',
'keep *_hltHoreco_*_*',
'keep *_hltIter0HighPtTkMuPixelTracks_*_*',
'keep *_hltIter0HighPtTkMuTrackSelectionHighPurity_*_*',
'keep *_hltIter2HighPtTkMuMerged_*_*',
'keep *_hltIter2HighPtTkMuTrackSelectionHighPurity_*_*',
'keep *_hltIter2Merged_*_*',
'keep *_hltL3NoFiltersNoVtxMuonCandidates_*_*',
'keep *_hltMergedTracks_*_*',
'keep *_hltOnlineBeamSpot_*_*',
'keep *_hltPFJetForBtag_*_*',
'keep *_hltPixelTracks_*_*',
'keep *_hltPixelVertices_*_*',
'keep *_hltSelector8CentralJetsL1FastJet_*_*',
'keep *_hltSiPixelClustersCache_*_*',
'keep *_hltSiPixelClusters_*_*',
'keep *_hltSiStripRawToClustersFacility_*_*',
'keep FEDRawDataCollection_rawDataCollector_*_*',
'keep FEDRawDataCollection_source_*_*',
'keep GlobalObjectMapRecord_hltGtStage2ObjectMap_*_*',
'keep edmTriggerResults_*_*_*',
'keep triggerTriggerEvent_*_*_*' )
)
process.hltOutputDQMOnlineBeamspot = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputDQMOnlineBeamspot.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'HLT_HIHT80_Beamspot_ppRef5TeV_v3',
'HLT_ZeroBias_Beamspot_v4' ) ),
outputCommands = cms.untracked.vstring( 'drop *',
'keep edmTriggerResults_*_*_*',
'keep recoTracks_hltPFMuonMerging_*_*',
'keep recoVertexs_hltVerticesPFFilter_*_*' )
)
process.hltOutputDQMCalibration = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputDQMCalibration.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'HLT_EcalCalibration_v4',
'HLT_HcalCalibration_v5' ) ),
outputCommands = cms.untracked.vstring( 'drop *_hlt*_*_*',
'keep *_hltEcalCalibrationRaw_*_*',
'keep *_hltHcalCalibrationRaw_*_*',
'keep edmTriggerResults_*_*_*',
'keep triggerTriggerEvent_*_*_*' )
)
process.hltOutputRPCMON = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputRPCMON.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'AlCa_HIRPCMuonNormalisation_v1' ) ),
outputCommands = cms.untracked.vstring( 'drop *',
'keep *_hltCscSegments_*_*',
'keep *_hltDt4DSegments_*_*',
'keep *_hltGtStage2Digis_*_*',
'keep *_hltMuonCSCDigis_MuonCSCStripDigi_*',
'keep *_hltMuonCSCDigis_MuonCSCWireDigi_*',
'keep *_hltMuonDTDigis_*_*',
'keep *_hltMuonRPCDigis_*_*',
'keep *_hltRpcRecHits_*_*',
'keep edmTriggerResults_*_*_*',
'keep triggerTriggerEvent_*_*_*' )
)
process.hltOutputCalibration = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputCalibration.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'HLT_EcalCalibration_v4',
'HLT_HcalCalibration_v5' ) ),
outputCommands = cms.untracked.vstring( 'drop *_hlt*_*_*',
'keep *_hltEcalCalibrationRaw_*_*',
'keep *_hltHcalCalibrationRaw_*_*',
'keep edmTriggerResults_*_*_*',
'keep triggerTriggerEvent_*_*_*' )
)
process.hltOutputEcalCalibration = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputEcalCalibration.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'HLT_EcalCalibration_v4' ) ),
outputCommands = cms.untracked.vstring( 'drop *_hlt*_*_*',
'keep *_hltEcalCalibrationRaw_*_*',
'keep edmTriggerResults_*_*_*',
'keep triggerTriggerEvent_*_*_*' )
)
process.hltOutputALCAPHISYM = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputALCAPHISYM.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'AlCa_EcalPhiSym_v9' ) ),
outputCommands = cms.untracked.vstring( 'drop *',
'keep *_hltEcalPhiSymFilter_*_*',
'keep *_hltGtStage2Digis_*_*',
'keep edmTriggerResults_*_*_*' )
)
process.hltOutputALCALumiPixelCountsExpress = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputALCALumiPixelCountsExpress.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'AlCa_LumiPixelsCounts_Random_v2' ) ),
outputCommands = cms.untracked.vstring( 'drop *',
'keep *_hltAlcaPixelClusterCounts_*_*',
'keep edmTriggerResults_*_*_*' )
)
process.hltOutputALCALumiPixelCountsPrompt = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputALCALumiPixelCountsPrompt.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'AlCa_LumiPixelsCounts_ZeroBias_v2' ) ),
outputCommands = cms.untracked.vstring( 'drop *',
'keep *_hltAlcaPixelClusterCounts_*_*',
'keep edmTriggerResults_*_*_*' )
)
process.hltOutputALCAP0 = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputALCAP0.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'AlCa_HIEcalEtaEBonly_v1',
'AlCa_HIEcalEtaEEonly_v1',
'AlCa_HIEcalPi0EBonly_v1',
'AlCa_HIEcalPi0EEonly_v1' ) ),
outputCommands = cms.untracked.vstring( 'drop *',
'keep *_hltAlCaEtaEBRechitsToDigisLowPU_*_*',
'keep *_hltAlCaEtaEBRechitsToDigis_*_*',
'keep *_hltAlCaEtaEERechitsToDigisLowPU_*_*',
'keep *_hltAlCaEtaEERechitsToDigis_*_*',
'keep *_hltAlCaEtaRecHitsFilterEEonlyRegionalLowPU_etaEcalRecHitsES_*',
'keep *_hltAlCaEtaRecHitsFilterEEonlyRegional_etaEcalRecHitsES_*',
'keep *_hltAlCaPi0EBRechitsToDigisLowPU_*_*',
'keep *_hltAlCaPi0EBRechitsToDigis_*_*',
'keep *_hltAlCaPi0EERechitsToDigisLowPU_*_*',
'keep *_hltAlCaPi0EERechitsToDigis_*_*',
'keep *_hltAlCaPi0RecHitsFilterEEonlyRegionalLowPU_pi0EcalRecHitsES_*',
'keep *_hltAlCaPi0RecHitsFilterEEonlyRegional_pi0EcalRecHitsES_*',
'keep *_hltGtStage2Digis_*_*',
'keep edmTriggerResults_*_*_*' )
)
process.hltOutputExpress = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputExpress.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'HLT_Physics_v7',
'HLT_Random_v3',
'HLT_ZeroBias_FirstCollisionAfterAbortGap_v5',
'HLT_ZeroBias_v6' ) ),
outputCommands = cms.untracked.vstring( 'drop *',
'keep FEDRawDataCollection_rawDataCollector_*_*',
'keep FEDRawDataCollection_source_*_*',
'keep GlobalObjectMapRecord_hltGtStage2ObjectMap_*_*',
'keep edmTriggerResults_*_*_*',
'keep triggerTriggerEvent_*_*_*' )
)
process.hltOutputExpressAlignment = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputExpressAlignment.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'HLT_HIHT80_Beamspot_ppRef5TeV_v3',
'HLT_ZeroBias_Beamspot_v4' ) ),
outputCommands = cms.untracked.vstring( 'drop *',
'keep FEDRawDataCollection_rawDataCollector_*_*',
'keep FEDRawDataCollection_source_*_*',
'keep GlobalObjectMapRecord_hltGtStage2ObjectMap_*_*',
'keep edmTriggerResults_*_*_*',
'keep triggerTriggerEvent_*_*_*' )
)
process.hltOutputNanoDST = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputNanoDST.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'DST_Physics_v7' ) ),
outputCommands = cms.untracked.vstring( 'drop *',
'keep *_hltFEDSelector_*_*',
'keep *_hltGtStage2Digis_*_*',
'keep edmTriggerResults_*_*_*' )
)
process.hltOutputPhysicsHIZeroBias1 = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputPhysicsHIZeroBias1.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'HLT_HIZeroBias_part0_v6',
'HLT_HIZeroBias_part1_v6' ) ),
outputCommands = cms.untracked.vstring( 'drop *',
'keep FEDRawDataCollection_rawDataCollector_*_*',
'keep FEDRawDataCollection_source_*_*',
'keep GlobalObjectMapRecord_hltGtStage2ObjectMap_*_*',
'keep edmTriggerResults_*_*_*',
'keep triggerTriggerEvent_*_*_*' )
)
process.hltOutputPhysicsHIZeroBias2 = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputPhysicsHIZeroBias2.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'HLT_HIZeroBias_part2_v6',
'HLT_HIZeroBias_part3_v6' ) ),
outputCommands = cms.untracked.vstring( 'drop *',
'keep FEDRawDataCollection_rawDataCollector_*_*',
'keep FEDRawDataCollection_source_*_*',
'keep GlobalObjectMapRecord_hltGtStage2ObjectMap_*_*',
'keep edmTriggerResults_*_*_*',
'keep triggerTriggerEvent_*_*_*' )
)
process.hltOutputPhysicsHIZeroBias3 = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputPhysicsHIZeroBias3.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'HLT_HIZeroBias_part4_v6',
'HLT_HIZeroBias_part5_v6' ) ),
outputCommands = cms.untracked.vstring( 'drop *',
'keep FEDRawDataCollection_rawDataCollector_*_*',
'keep FEDRawDataCollection_source_*_*',
'keep GlobalObjectMapRecord_hltGtStage2ObjectMap_*_*',
'keep edmTriggerResults_*_*_*',
'keep triggerTriggerEvent_*_*_*' )
)
process.hltOutputPhysicsHIZeroBias4 = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputPhysicsHIZeroBias4.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'HLT_HIZeroBias_part6_v6',
'HLT_HIZeroBias_part7_v6' ) ),
outputCommands = cms.untracked.vstring( 'drop *',
'keep FEDRawDataCollection_rawDataCollector_*_*',
'keep FEDRawDataCollection_source_*_*',
'keep GlobalObjectMapRecord_hltGtStage2ObjectMap_*_*',
'keep edmTriggerResults_*_*_*',
'keep triggerTriggerEvent_*_*_*' )
)
process.hltOutputPhysicsHIZeroBias5 = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputPhysicsHIZeroBias5.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'HLT_HIZeroBias_part8_v6',
'HLT_HIZeroBias_part9_v6' ) ),
outputCommands = cms.untracked.vstring( 'drop *',
'keep FEDRawDataCollection_rawDataCollector_*_*',
'keep FEDRawDataCollection_source_*_*',
'keep GlobalObjectMapRecord_hltGtStage2ObjectMap_*_*',
'keep edmTriggerResults_*_*_*',
'keep triggerTriggerEvent_*_*_*' )
)
process.hltOutputPhysicsHIZeroBias6 = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputPhysicsHIZeroBias6.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'HLT_HIZeroBias_part10_v6',
'HLT_HIZeroBias_part11_v6' ) ),
outputCommands = cms.untracked.vstring( 'drop *',
'keep FEDRawDataCollection_rawDataCollector_*_*',
'keep FEDRawDataCollection_source_*_*',
'keep GlobalObjectMapRecord_hltGtStage2ObjectMap_*_*',
'keep edmTriggerResults_*_*_*',
'keep triggerTriggerEvent_*_*_*' )
)
process.HLTL1UnpackerSequence = cms.Sequence( process.hltGtStage2Digis + process.hltGtStage2ObjectMap )
process.HLTBeamSpot = cms.Sequence( process.hltScalersRawToDigi + process.hltOnlineMetaDataDigis + process.hltOnlineBeamSpot )
process.HLTBeginSequence = cms.Sequence( process.hltTriggerType + process.HLTL1UnpackerSequence + process.HLTBeamSpot )
process.HLTDoFullUnpackingEgammaEcalWithoutPreshowerSequence = cms.Sequence( process.hltEcalDigisLegacy + process.hltEcalDigis + process.hltEcalUncalibRecHit + process.hltEcalDetIdToBeRecovered + process.hltEcalRecHit )
process.HLTDoLocalHcalSequence = cms.Sequence( process.hltHcalDigis + process.hltHbhereco + process.hltHfprereco + process.hltHfreco + process.hltHoreco )
process.HLTDoCaloSequencePF = cms.Sequence( process.HLTDoFullUnpackingEgammaEcalWithoutPreshowerSequence + process.HLTDoLocalHcalSequence + process.hltTowerMakerForAll )
process.HLTAK4CaloJetsPrePFRecoSequence = cms.Sequence( process.HLTDoCaloSequencePF + process.hltAK4CaloJetsPF )
process.HLTPreAK4PFJetsRecoSequence = cms.Sequence( process.HLTAK4CaloJetsPrePFRecoSequence + process.hltAK4CaloJetsPFEt5 )
process.HLTMuonLocalRecoSequence = cms.Sequence( process.hltMuonDTDigis + process.hltDt1DRecHits + process.hltDt4DSegments + process.hltMuonCSCDigis + process.hltCsc2DRecHits + process.hltCscSegments + process.hltMuonRPCDigis + process.hltRpcRecHits + process.hltMuonGEMDigis + process.hltGemRecHits + process.hltGemSegments )
process.HLTL2muonrecoNocandSequence = cms.Sequence( process.HLTMuonLocalRecoSequence + process.hltL2OfflineMuonSeeds + process.hltL2MuonSeeds + process.hltL2Muons )
process.HLTL2muonrecoSequence = cms.Sequence( process.HLTL2muonrecoNocandSequence + process.hltL2MuonCandidates )
process.HLTDoLocalPixelSequence = cms.Sequence( process.hltSiPixelDigisLegacy + process.hltSiPixelDigis + process.hltSiPixelClustersLegacy + process.hltSiPixelClusters + process.hltSiPixelClustersCache + process.hltSiPixelRecHitSoA + process.hltSiPixelRecHits )
process.HLTDoLocalStripSequence = cms.Sequence( process.hltSiStripExcludedFEDListProducer + process.hltSiStripRawToClustersFacility + process.hltSiStripClusters )
process.HLTIterL3OImuonTkCandidateSequence = cms.Sequence( process.hltIterL3OISeedsFromL2Muons + process.hltIterL3OITrackCandidates + process.hltIterL3OIMuCtfWithMaterialTracks + process.hltIterL3OIMuonTrackCutClassifier + process.hltIterL3OIMuonTrackSelectionHighPurity + process.hltL3MuonsIterL3OI )
process.HLTIterL3MuonRecoPixelTracksSequence = cms.Sequence( process.hltIterL3MuonPixelTracksFilter + process.hltIterL3MuonPixelTracksFitter + process.hltIterL3MuonPixelTracksTrackingRegions + process.hltIterL3MuonPixelLayerQuadruplets + process.hltIterL3MuonPixelTracksHitDoublets + process.hltIterL3MuonPixelTracksHitQuadruplets + process.hltIterL3MuonPixelTracks )
process.HLTIterL3MuonRecopixelvertexingSequence = cms.Sequence( process.HLTIterL3MuonRecoPixelTracksSequence + process.hltIterL3MuonPixelVertices + process.hltIterL3MuonTrimmedPixelVertices )
process.HLTIterativeTrackingIteration0ForIterL3Muon = cms.Sequence( process.hltIter0IterL3MuonPixelSeedsFromPixelTracks + process.hltIter0IterL3MuonCkfTrackCandidates + process.hltIter0IterL3MuonCtfWithMaterialTracks + process.hltIter0IterL3MuonTrackCutClassifier + process.hltIter0IterL3MuonTrackSelectionHighPurity )
process.HLTIterativeTrackingIteration2ForIterL3Muon = cms.Sequence( process.hltIter2IterL3MuonClustersRefRemoval + process.hltIter2IterL3MuonMaskedMeasurementTrackerEvent + process.hltIter2IterL3MuonPixelLayerTriplets + process.hltIter2IterL3MuonPixelClusterCheck + process.hltIter2IterL3MuonPixelHitDoublets + process.hltIter2IterL3MuonPixelHitTriplets + process.hltIter2IterL3MuonPixelSeeds + process.hltIter2IterL3MuonCkfTrackCandidates + process.hltIter2IterL3MuonCtfWithMaterialTracks + process.hltIter2IterL3MuonTrackCutClassifier + process.hltIter2IterL3MuonTrackSelectionHighPurity )
process.HLTIterativeTrackingIteration3ForIterL3Muon = cms.Sequence( process.hltIter3IterL3MuonClustersRefRemoval + process.hltIter3IterL3MuonMaskedMeasurementTrackerEvent + process.hltIter3IterL3MuonPixelLayerPairs + process.hltIter3IterL3MuonL2Candidates + process.hltIter3IterL3MuonTrackingRegions + process.hltIter3IterL3MuonPixelClusterCheck + process.hltIter3IterL3MuonPixelHitDoublets + process.hltIter3IterL3MuonPixelSeeds + process.hltIter3IterL3MuonCkfTrackCandidates + process.hltIter3IterL3MuonCtfWithMaterialTracks + process.hltIter3IterL3MuonTrackCutClassifier + process.hltIter3IterL3MuonTrackSelectionHighPurity )
process.HLTIterativeTrackingIter023ForIterL3Muon = cms.Sequence( process.HLTIterativeTrackingIteration0ForIterL3Muon + process.HLTIterativeTrackingIteration2ForIterL3Muon + process.hltIter2IterL3MuonMerged + process.HLTIterativeTrackingIteration3ForIterL3Muon + process.hltIter3IterL3MuonMerged )
process.HLTIterL3IOmuonTkCandidateSequence = cms.Sequence( process.HLTIterL3MuonRecopixelvertexingSequence + process.HLTIterativeTrackingIter023ForIterL3Muon + process.hltL3MuonsIterL3IO )
process.HLTIterL3OIAndIOFromL2muonTkCandidateSequence = cms.Sequence( process.HLTIterL3OImuonTkCandidateSequence + process.hltIterL3OIL3MuonsLinksCombination + process.hltIterL3OIL3Muons + process.hltIterL3OIL3MuonCandidates + process.hltL2SelectorForL3IO + process.HLTIterL3IOmuonTkCandidateSequence + process.hltIterL3MuonsFromL2LinksCombination )
process.HLTRecoPixelTracksSequenceForIterL3FromL1Muon = cms.Sequence( process.hltIterL3FromL1MuonPixelTracksTrackingRegions + process.hltIterL3FromL1MuonPixelLayerQuadruplets + process.hltIterL3FromL1MuonPixelTracksHitDoublets + process.hltIterL3FromL1MuonPixelTracksHitQuadruplets + process.hltIterL3FromL1MuonPixelTracks )
process.HLTRecopixelvertexingSequenceForIterL3FromL1Muon = cms.Sequence( process.HLTRecoPixelTracksSequenceForIterL3FromL1Muon + process.hltIterL3FromL1MuonPixelVertices + process.hltIterL3FromL1MuonTrimmedPixelVertices )
process.HLTIterativeTrackingIteration0ForIterL3FromL1Muon = cms.Sequence( process.hltIter0IterL3FromL1MuonPixelSeedsFromPixelTracks + process.hltIter0IterL3FromL1MuonCkfTrackCandidates + process.hltIter0IterL3FromL1MuonCtfWithMaterialTracks + process.hltIter0IterL3FromL1MuonTrackCutClassifier + process.hltIter0IterL3FromL1MuonTrackSelectionHighPurity )
process.HLTIterativeTrackingIteration2ForIterL3FromL1Muon = cms.Sequence( process.hltIter2IterL3FromL1MuonClustersRefRemoval + process.hltIter2IterL3FromL1MuonMaskedMeasurementTrackerEvent + process.hltIter2IterL3FromL1MuonPixelLayerTriplets + process.hltIter2IterL3FromL1MuonPixelClusterCheck + process.hltIter2IterL3FromL1MuonPixelHitDoublets + process.hltIter2IterL3FromL1MuonPixelHitTriplets + process.hltIter2IterL3FromL1MuonPixelSeeds + process.hltIter2IterL3FromL1MuonCkfTrackCandidates + process.hltIter2IterL3FromL1MuonCtfWithMaterialTracks + process.hltIter2IterL3FromL1MuonTrackCutClassifier + process.hltIter2IterL3FromL1MuonTrackSelectionHighPurity )
process.HLTIterativeTrackingIteration3ForIterL3FromL1Muon = cms.Sequence( process.hltIter3IterL3FromL1MuonClustersRefRemoval + process.hltIter3IterL3FromL1MuonMaskedMeasurementTrackerEvent + process.hltIter3IterL3FromL1MuonPixelLayerPairs + process.hltIter3IterL3FromL1MuonTrackingRegions + process.hltIter3IterL3FromL1MuonPixelClusterCheck + process.hltIter3IterL3FromL1MuonPixelHitDoublets + process.hltIter3IterL3FromL1MuonPixelSeeds + process.hltIter3IterL3FromL1MuonCkfTrackCandidates + process.hltIter3IterL3FromL1MuonCtfWithMaterialTracks + process.hltIter3IterL3FromL1MuonTrackCutClassifier + process.hltIter3IterL3FromL1MuonTrackSelectionHighPurity )
process.HLTIterativeTrackingIter023ForIterL3FromL1Muon = cms.Sequence( process.HLTIterativeTrackingIteration0ForIterL3FromL1Muon + process.HLTIterativeTrackingIteration2ForIterL3FromL1Muon + process.hltIter2IterL3FromL1MuonMerged + process.HLTIterativeTrackingIteration3ForIterL3FromL1Muon + process.hltIter3IterL3FromL1MuonMerged )
process.HLTIterL3IOmuonFromL1TkCandidateSequence = cms.Sequence( process.HLTRecopixelvertexingSequenceForIterL3FromL1Muon + process.HLTIterativeTrackingIter023ForIterL3FromL1Muon )
process.HLTIterL3muonTkCandidateSequence = cms.Sequence( process.HLTDoLocalPixelSequence + process.HLTDoLocalStripSequence + process.HLTIterL3OIAndIOFromL2muonTkCandidateSequence + process.hltL1MuonsPt0 + process.HLTIterL3IOmuonFromL1TkCandidateSequence )
process.HLTL3muonrecoNocandSequence = cms.Sequence( process.HLTIterL3muonTkCandidateSequence + process.hltIterL3MuonMerged + process.hltIterL3MuonAndMuonFromL1Merged + process.hltIterL3GlbMuon + process.hltIterL3MuonsNoID + process.hltIterL3Muons + process.hltL3MuonsIterL3Links + process.hltIterL3MuonTracks )
process.HLTL3muonrecoSequence = cms.Sequence( process.HLTL3muonrecoNocandSequence + process.hltIterL3MuonCandidates )
process.HLTRecoPixelTracksSequence = cms.Sequence( process.hltPixelTracksTrackingRegions + process.hltPixelTracksSoA + process.hltPixelTracks )
process.HLTRecopixelvertexingSequence = cms.Sequence( process.hltPixelTracksFitter + process.hltPixelTracksFilter + process.HLTRecoPixelTracksSequence + process.hltPixelVerticesSoA + process.hltPixelVertices + process.hltTrimmedPixelVertices )
process.HLTIterativeTrackingIteration0 = cms.Sequence( process.hltIter0PFLowPixelSeedsFromPixelTracks + process.hltIter0PFlowCkfTrackCandidates + process.hltIter0PFlowCtfWithMaterialTracks + process.hltIter0PFlowTrackCutClassifier + process.hltMergedTracks )
process.HLTIterativeTrackingIter02 = cms.Sequence( process.HLTIterativeTrackingIteration0 )
process.HLTTrackingForBeamSpot = cms.Sequence( process.HLTPreAK4PFJetsRecoSequence + process.HLTL2muonrecoSequence + process.HLTL3muonrecoSequence + process.HLTDoLocalPixelSequence + process.HLTRecopixelvertexingSequence + process.HLTDoLocalStripSequence + process.HLTIterativeTrackingIter02 + process.hltPFMuonMerging )
process.HLTEndSequence = cms.Sequence( process.hltBoolEnd )
process.HLTBeginSequenceL1Fat = cms.Sequence( process.hltTriggerType + process.hltL1EventNumberL1Fat + process.HLTL1UnpackerSequence + process.HLTBeamSpot )
process.HLTBeginSequenceRandom = cms.Sequence( process.hltRandomEventsFilter + process.hltGtStage2Digis )
process.HLTDoCaloSequence = cms.Sequence( process.HLTDoFullUnpackingEgammaEcalWithoutPreshowerSequence + process.HLTDoLocalHcalSequence + process.hltTowerMakerForAll )
process.HLTAK4CaloJetsReconstructionSequence = cms.Sequence( process.HLTDoCaloSequence + process.hltAK4CaloJets + process.hltAK4CaloJetsIDPassed )
process.HLTAK4CaloCorrectorProducersSequence = cms.Sequence( process.hltAK4CaloFastJetCorrector + process.hltAK4CaloRelativeCorrector + process.hltAK4CaloAbsoluteCorrector + process.hltAK4CaloResidualCorrector + process.hltAK4CaloCorrector )
process.HLTAK4CaloJetsCorrectionSequence = cms.Sequence( process.hltFixedGridRhoFastjetAllCalo + process.HLTAK4CaloCorrectorProducersSequence + process.hltAK4CaloJetsCorrected + process.hltAK4CaloJetsCorrectedIDPassed )
process.HLTAK4CaloJetsSequence = cms.Sequence( process.HLTAK4CaloJetsReconstructionSequence + process.HLTAK4CaloJetsCorrectionSequence )
process.HLTPreshowerSequence = cms.Sequence( process.hltEcalPreshowerDigis + process.hltEcalPreshowerRecHit )
process.HLTDoFullUnpackingEgammaEcalSequence = cms.Sequence( process.HLTDoFullUnpackingEgammaEcalWithoutPreshowerSequence + process.HLTPreshowerSequence )
process.HLTBeginSequenceCalibration = cms.Sequence( process.hltCalibrationEventsFilter + process.hltGtStage2Digis )
process.HLTriggerFirstPath = cms.Path( process.hltGetConditions + process.hltGetRaw + process.hltPSetMap + process.hltBoolFalse )
process.Status_OnCPU = cms.Path( process.statusOnGPU + ~process.statusOnGPUFilter )
process.Status_OnGPU = cms.Path( process.statusOnGPU + process.statusOnGPUFilter )
process.HLT_ZeroBias_Beamspot_v4 = cms.Path( process.HLTBeginSequence + process.hltL1sZeroBias + process.hltPreZeroBiasBeamspot + process.HLTTrackingForBeamSpot + process.hltVerticesPF + process.hltVerticesPFSelector + process.hltVerticesPFFilter + process.HLTEndSequence )
process.HLT_Physics_v7 = cms.Path( process.HLTBeginSequenceL1Fat + process.hltPrePhysics + process.HLTEndSequence )
process.DST_Physics_v7 = cms.Path( process.HLTBeginSequence + process.hltPreDSTPhysics + process.HLTEndSequence )
process.HLT_Random_v3 = cms.Path( process.HLTBeginSequenceRandom + process.hltPreRandom + process.HLTEndSequence )
process.HLT_ZeroBias_v6 = cms.Path( process.HLTBeginSequence + process.hltL1sZeroBias + process.hltPreZeroBias + process.HLTEndSequence )
process.HLT_HIL1UnpairedBunchBptxMinusForPPRef_v2 = cms.Path( process.HLTBeginSequence + process.hltL1sL1UnpairedBunchBptxMinus + process.hltPreHIL1UnpairedBunchBptxMinusForPPRef + process.HLTEndSequence )
process.HLT_HIL1UnpairedBunchBptxPlusForPPRef_v2 = cms.Path( process.HLTBeginSequence + process.hltL1sL1UnpairedBunchBptxPlus + process.hltPreHIL1UnpairedBunchBptxPlusForPPRef + process.HLTEndSequence )
process.HLT_HIL1NotBptxORForPPRef_v2 = cms.Path( process.HLTBeginSequence + process.hltL1sNotBptxOR + process.hltPreHIL1NotBptxORForPPRef + process.HLTEndSequence )
process.HLT_HIHT80_Beamspot_ppRef5TeV_v3 = cms.Path( process.HLTBeginSequence + process.hltL1sHTTForBeamSpotPP5TeV + process.hltPreHIHT80BeamspotppRef5TeV + process.HLTAK4CaloJetsSequence + process.hltHtMht + process.hltHT80 + process.HLTTrackingForBeamSpot + process.hltVerticesPF + process.hltVerticesPFSelector + process.hltVerticesPFFilter + process.HLTEndSequence )
process.HLT_HIZeroBias_part0_v6 = cms.Path( process.HLTBeginSequence + process.hltL1sZeroBias + process.hltPreHIZeroBiaspart0 + process.HLTEndSequence )
process.HLT_HIZeroBias_part1_v6 = cms.Path( process.HLTBeginSequence + process.hltL1sZeroBias + process.hltPreHIZeroBiaspart1 + process.HLTEndSequence )
process.HLT_HIZeroBias_part2_v6 = cms.Path( process.HLTBeginSequence + process.hltL1sZeroBias + process.hltPreHIZeroBiaspart2 + process.HLTEndSequence )
process.HLT_HIZeroBias_part3_v6 = cms.Path( process.HLTBeginSequence + process.hltL1sZeroBias + process.hltPreHIZeroBiaspart3 + process.HLTEndSequence )
process.HLT_HIZeroBias_part4_v6 = cms.Path( process.HLTBeginSequence + process.hltL1sZeroBias + process.hltPreHIZeroBiaspart4 + process.HLTEndSequence )
process.HLT_HIZeroBias_part5_v6 = cms.Path( process.HLTBeginSequence + process.hltL1sZeroBias + process.hltPreHIZeroBiaspart5 + process.HLTEndSequence )
process.HLT_HIZeroBias_part6_v6 = cms.Path( process.HLTBeginSequence + process.hltL1sZeroBias + process.hltPreHIZeroBiaspart6 + process.HLTEndSequence )
process.HLT_HIZeroBias_part7_v6 = cms.Path( process.HLTBeginSequence + process.hltL1sZeroBias + process.hltPreHIZeroBiaspart7 + process.HLTEndSequence )
process.HLT_HIZeroBias_part8_v6 = cms.Path( process.HLTBeginSequence + process.hltL1sZeroBias + process.hltPreHIZeroBiaspart8 + process.HLTEndSequence )
process.HLT_HIZeroBias_part9_v6 = cms.Path( process.HLTBeginSequence + process.hltL1sZeroBias + process.hltPreHIZeroBiaspart9 + process.HLTEndSequence )
process.HLT_HIZeroBias_part10_v6 = cms.Path( process.HLTBeginSequence + process.hltL1sZeroBias + process.hltPreHIZeroBiaspart10 + process.HLTEndSequence )
process.HLT_HIZeroBias_part11_v6 = cms.Path( process.HLTBeginSequence + process.hltL1sZeroBias + process.hltPreHIZeroBiaspart11 + process.HLTEndSequence )
process.AlCa_HIEcalPi0EBonly_v1 = cms.Path( process.HLTBeginSequence + process.hltL1sAlCaHIEcalPi0Eta + process.hltPreAlCaHIEcalPi0EBonly + process.HLTDoFullUnpackingEgammaEcalSequence + process.hltSimple3x3Clusters + process.hltAlCaPi0RecHitsFilterEBonlyRegional + process.hltAlCaPi0EBUncalibrator + process.hltAlCaPi0EBRechitsToDigis + process.HLTEndSequence )
process.AlCa_HIEcalPi0EEonly_v1 = cms.Path( process.HLTBeginSequence + process.hltL1sAlCaHIEcalPi0Eta + process.hltPreAlCaHIEcalPi0EEonly + process.HLTDoFullUnpackingEgammaEcalSequence + process.hltSimple3x3Clusters + process.hltAlCaPi0RecHitsFilterEEonlyRegional + process.hltAlCaPi0EEUncalibrator + process.hltAlCaPi0EERechitsToDigis + process.HLTEndSequence )
process.AlCa_HIEcalEtaEBonly_v1 = cms.Path( process.HLTBeginSequence + process.hltL1sAlCaHIEcalPi0Eta + process.hltPreAlCaHIEcalEtaEBonly + process.HLTDoFullUnpackingEgammaEcalSequence + process.hltSimple3x3Clusters + process.hltAlCaEtaRecHitsFilterEBonlyRegional + process.hltAlCaEtaEBUncalibrator + process.hltAlCaEtaEBRechitsToDigis + process.HLTEndSequence )
process.AlCa_HIEcalEtaEEonly_v1 = cms.Path( process.HLTBeginSequence + process.hltL1sAlCaHIEcalPi0Eta + process.hltPreAlCaHIEcalEtaEEonly + process.HLTDoFullUnpackingEgammaEcalSequence + process.hltSimple3x3Clusters + process.hltAlCaEtaRecHitsFilterEEonlyRegional + process.hltAlCaEtaEEUncalibrator + process.hltAlCaEtaEERechitsToDigis + process.HLTEndSequence )
process.HLT_EcalCalibration_v4 = cms.Path( process.HLTBeginSequenceCalibration + process.hltPreEcalCalibration + process.hltEcalCalibrationRaw + process.HLTEndSequence )
process.HLT_HcalCalibration_v5 = cms.Path( process.HLTBeginSequenceCalibration + process.hltPreHcalCalibration + process.hltHcalCalibTypeFilter + process.hltHcalCalibrationRaw + process.HLTEndSequence )
process.AlCa_EcalPhiSym_v9 = cms.Path( process.HLTBeginSequence + process.hltL1sZeroBiasIorAlwaysTrueIorIsolatedBunch + process.hltPreAlCaEcalPhiSym + process.HLTDoFullUnpackingEgammaEcalSequence + process.hltEcalPhiSymFilter + process.HLTEndSequence )
process.HLT_ZeroBias_FirstCollisionAfterAbortGap_v5 = cms.Path( process.HLTBeginSequence + process.hltL1sL1ZeroBiasFirstCollisionAfterAbortGap + process.hltPreZeroBiasFirstCollisionAfterAbortGap + process.HLTEndSequence )
process.AlCa_HIRPCMuonNormalisation_v1 = cms.Path( process.HLTBeginSequence + process.hltL1sSingleMu7to30 + process.hltPreAlCaHIRPCMuonNormalisation + process.hltHIRPCMuonNormaL1Filtered0 + process.HLTMuonLocalRecoSequence + process.HLTEndSequence )
process.AlCa_LumiPixelsCounts_Random_v2 = cms.Path( process.HLTBeginSequenceRandom + process.hltPreAlCaLumiPixelsCountsRandom + process.HLTBeamSpot + process.hltPixelTrackerHVOn + process.HLTDoLocalPixelSequence + process.hltAlcaPixelClusterCounts + process.HLTEndSequence )
process.AlCa_LumiPixelsCounts_ZeroBias_v2 = cms.Path( process.HLTBeginSequence + process.hltL1sZeroBias + process.hltPreAlCaLumiPixelsCountsZeroBias + process.hltPixelTrackerHVOn + process.HLTDoLocalPixelSequence + process.hltAlcaPixelClusterCounts + process.HLTEndSequence )
process.HLTriggerFinalPath = cms.Path( process.hltGtStage2Digis + process.hltScalersRawToDigi + process.hltFEDSelector + process.hltTriggerSummaryAOD + process.hltTriggerSummaryRAW + process.hltBoolFalse )
process.HLTAnalyzerEndpath = cms.EndPath( process.hltGtStage2Digis + process.hltPreHLTAnalyzerEndpath + process.hltL1TGlobalSummary + process.hltTrigReport )
process.PhysicsCommissioningOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPrePhysicsCommissioningOutput + process.hltOutputPhysicsCommissioning )
process.PhysicsEGammaOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPrePhysicsEGammaOutput )
process.PhysicsEndOfFillOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPrePhysicsEndOfFillOutput + process.hltOutputPhysicsEndOfFill )
process.PhysicsHadronsTausOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPrePhysicsHadronsTausOutput )
process.PhysicsMuonsOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPrePhysicsMuonsOutput )
process.PhysicsTracksOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPrePhysicsTracksOutput )
process.PhysicsForwardOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPrePhysicsForwardOutput )
process.load( "DQMServices.Core.DQMStore_cfi" )
process.dqmOutput = cms.OutputModule("DQMRootOutputModule",
fileName = cms.untracked.string("DQMIO.root")
)
process.DQMOutput = cms.EndPath( process.dqmOutput + process.hltGtStage2Digis + process.hltPreDQMOutput + process.hltPreDQMOutputSmart + process.hltOutputDQM )
process.DQMOnlineBeamspotOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPreDQMOnlineBeamspotOutput + process.hltPreDQMOnlineBeamspotOutputSmart + process.hltOutputDQMOnlineBeamspot )
process.DQMCalibrationOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPreDQMCalibrationOutput + process.hltOutputDQMCalibration )
process.DQMEventDisplayOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPreDQMEventDisplayOutput + process.hltPreDQMEventDisplayOutputSmart )
process.HLTMonitorOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPreHLTMonitorOutput + process.hltPreHLTMonitorOutputSmart )
process.RPCMONOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPreRPCMONOutput + process.hltOutputRPCMON )
process.CalibrationOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPreCalibrationOutput + process.hltOutputCalibration )
process.EcalCalibrationOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPreEcalCalibrationOutput + process.hltOutputEcalCalibration )
process.ALCAPHISYMOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPreALCAPHISYMOutput + process.hltOutputALCAPHISYM )
process.ALCALumiPixelCountsExpressOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPreALCALumiPixelCountsExpressOutput + process.hltOutputALCALumiPixelCountsExpress )
process.ALCALumiPixelCountsPromptOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPreALCALumiPixelCountsPromptOutput + process.hltOutputALCALumiPixelCountsPrompt )
process.ALCAP0Output = cms.EndPath( process.hltGtStage2Digis + process.hltPreALCAP0Output + process.hltOutputALCAP0 )
process.ExpressOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPreExpressOutput + process.hltPreExpressOutputSmart + process.hltOutputExpress )
process.ExpressAlignmentOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPreExpressAlignmentOutput + process.hltPreExpressAlignmentOutputSmart + process.hltOutputExpressAlignment )
process.NanoDSTOutput = cms.EndPath( process.hltGtStage2Digis + process.hltPreNanoDSTOutput + process.hltOutputNanoDST )
process.PhysicsHIZeroBias1Output = cms.EndPath( process.hltGtStage2Digis + process.hltPrePhysicsHIZeroBias1Output + process.hltOutputPhysicsHIZeroBias1 )
process.PhysicsHIZeroBias2Output = cms.EndPath( process.hltGtStage2Digis + process.hltPrePhysicsHIZeroBias2Output + process.hltOutputPhysicsHIZeroBias2 )
process.PhysicsHIZeroBias3Output = cms.EndPath( process.hltGtStage2Digis + process.hltPrePhysicsHIZeroBias3Output + process.hltOutputPhysicsHIZeroBias3 )
process.PhysicsHIZeroBias4Output = cms.EndPath( process.hltGtStage2Digis + process.hltPrePhysicsHIZeroBias4Output + process.hltOutputPhysicsHIZeroBias4 )
process.PhysicsHIZeroBias5Output = cms.EndPath( process.hltGtStage2Digis + process.hltPrePhysicsHIZeroBias5Output + process.hltOutputPhysicsHIZeroBias5 )
process.PhysicsHIZeroBias6Output = cms.EndPath( process.hltGtStage2Digis + process.hltPrePhysicsHIZeroBias6Output + process.hltOutputPhysicsHIZeroBias6 )
process.schedule = cms.Schedule( *(process.HLTriggerFirstPath, process.Status_OnCPU, process.Status_OnGPU, process.HLT_ZeroBias_Beamspot_v4, process.HLT_Physics_v7, process.DST_Physics_v7, process.HLT_Random_v3, process.HLT_ZeroBias_v6, process.HLT_HIL1UnpairedBunchBptxMinusForPPRef_v2, process.HLT_HIL1UnpairedBunchBptxPlusForPPRef_v2, process.HLT_HIL1NotBptxORForPPRef_v2, process.HLT_HIHT80_Beamspot_ppRef5TeV_v3, process.HLT_HIZeroBias_part0_v6, process.HLT_HIZeroBias_part1_v6, process.HLT_HIZeroBias_part2_v6, process.HLT_HIZeroBias_part3_v6, process.HLT_HIZeroBias_part4_v6, process.HLT_HIZeroBias_part5_v6, process.HLT_HIZeroBias_part6_v6, process.HLT_HIZeroBias_part7_v6, process.HLT_HIZeroBias_part8_v6, process.HLT_HIZeroBias_part9_v6, process.HLT_HIZeroBias_part10_v6, process.HLT_HIZeroBias_part11_v6, process.AlCa_HIEcalPi0EBonly_v1, process.AlCa_HIEcalPi0EEonly_v1, process.AlCa_HIEcalEtaEBonly_v1, process.AlCa_HIEcalEtaEEonly_v1, process.HLT_EcalCalibration_v4, process.HLT_HcalCalibration_v5, process.AlCa_EcalPhiSym_v9, process.HLT_ZeroBias_FirstCollisionAfterAbortGap_v5, process.AlCa_HIRPCMuonNormalisation_v1, process.AlCa_LumiPixelsCounts_Random_v2, process.AlCa_LumiPixelsCounts_ZeroBias_v2, process.HLTriggerFinalPath, process.HLTAnalyzerEndpath, process.PhysicsCommissioningOutput, process.PhysicsEGammaOutput, process.PhysicsEndOfFillOutput, process.PhysicsHadronsTausOutput, process.PhysicsMuonsOutput, process.PhysicsTracksOutput, process.PhysicsForwardOutput, process.DQMOutput, process.DQMOnlineBeamspotOutput, process.DQMCalibrationOutput, process.DQMEventDisplayOutput, process.HLTMonitorOutput, process.RPCMONOutput, process.CalibrationOutput, process.EcalCalibrationOutput, process.ALCAPHISYMOutput, process.ALCALumiPixelCountsExpressOutput, process.ALCALumiPixelCountsPromptOutput, process.ALCAP0Output, process.ExpressOutput, process.ExpressAlignmentOutput, process.NanoDSTOutput, process.PhysicsHIZeroBias1Output, process.PhysicsHIZeroBias2Output, process.PhysicsHIZeroBias3Output, process.PhysicsHIZeroBias4Output, process.PhysicsHIZeroBias5Output, process.PhysicsHIZeroBias6Output, ))
process.source = cms.Source( "PoolSource",
fileNames = cms.untracked.vstring(
'file:RelVal_Raw_PRef_DATA.root',
),
inputCommands = cms.untracked.vstring(
'keep *'
)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32( 100 )
)
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool( True ),
numberOfThreads = cms.untracked.uint32( 4 ),
numberOfStreams = cms.untracked.uint32( 0 ),
)
if 'GlobalTag' in process.__dict__:
from Configuration.AlCa.GlobalTag import GlobalTag as customiseGlobalTag
process.GlobalTag = customiseGlobalTag(process.GlobalTag, globaltag = 'auto:run3_hlt_PRef')
if 'MessageLogger' in process.__dict__:
process.MessageLogger.TriggerSummaryProducerAOD = cms.untracked.PSet()
process.MessageLogger.L1GtTrigReport = cms.untracked.PSet()
process.MessageLogger.L1TGlobalSummary = cms.untracked.PSet()
process.MessageLogger.HLTrigReport = cms.untracked.PSet()
process.MessageLogger.FastReport = cms.untracked.PSet()
process.MessageLogger.ThroughputService = cms.untracked.PSet()
_customInfo = {}
_customInfo['menuType' ]= "PRef"
_customInfo['globalTags']= {}
_customInfo['globalTags'][True ] = "auto:run3_hlt_PRef"
_customInfo['globalTags'][False] = "auto:run3_mc_PRef"
_customInfo['inputFiles']={}
_customInfo['inputFiles'][True] = "file:RelVal_Raw_PRef_DATA.root"
_customInfo['inputFiles'][False] = "file:RelVal_Raw_PRef_MC.root"
_customInfo['maxEvents' ]= 100
_customInfo['globalTag' ]= "auto:run3_hlt_PRef"
_customInfo['inputFile' ]= ['file:RelVal_Raw_PRef_DATA.root']
_customInfo['realData' ]= True
from HLTrigger.Configuration.customizeHLTforALL import customizeHLTforAll
process = customizeHLTforAll(process,"PRef",_customInfo)
from HLTrigger.Configuration.customizeHLTforCMSSW import customizeHLTforCMSSW
process = customizeHLTforCMSSW(process,"PRef")
from HLTrigger.Configuration.Eras import modifyHLTforEras
modifyHLTforEras(process)
| true | true |
1c3bd12c2509fbbc3cd382544adb42425da721b8 | 214 | py | Python | users/urls.py | tomuhenry/wonya-backend | d54360e8d5d5363e4bf5471fb81d732a221e37c9 | [
"MIT"
] | 3 | 2020-06-16T22:28:05.000Z | 2020-06-17T10:07:00.000Z | users/urls.py | tomuhenry/wonya-backend | d54360e8d5d5363e4bf5471fb81d732a221e37c9 | [
"MIT"
] | 3 | 2020-06-17T11:04:31.000Z | 2021-04-08T21:09:07.000Z | users/urls.py | tomuhenry/wonya-backend | d54360e8d5d5363e4bf5471fb81d732a221e37c9 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.urls import path, include
app_name = 'users'
urlpatterns = [
path('', include('rest_auth.urls')),
path('register/', include('rest_auth.registration.urls')),
]
| 21.4 | 62 | 0.705607 | from django.contrib import admin
from django.urls import path, include
app_name = 'users'
urlpatterns = [
path('', include('rest_auth.urls')),
path('register/', include('rest_auth.registration.urls')),
]
| true | true |
1c3bd1d2a2eba796bbc6c2d572f4e7f6f5aa296d | 137 | py | Python | jsonfield/__init__.py | alexsilva/django-jsonfield | 37499674f62073baa7306fa6a9ae2721c72d2120 | [
"BSD-3-Clause"
] | null | null | null | jsonfield/__init__.py | alexsilva/django-jsonfield | 37499674f62073baa7306fa6a9ae2721c72d2120 | [
"BSD-3-Clause"
] | null | null | null | jsonfield/__init__.py | alexsilva/django-jsonfield | 37499674f62073baa7306fa6a9ae2721c72d2120 | [
"BSD-3-Clause"
] | null | null | null | import os
from jsonfield.fields import JSONField
__version__ = open(os.path.join(os.path.dirname(__file__), 'VERSION')).read().strip()
| 22.833333 | 85 | 0.759124 | import os
from jsonfield.fields import JSONField
__version__ = open(os.path.join(os.path.dirname(__file__), 'VERSION')).read().strip()
| true | true |
1c3bd36a5104a45808c96e6ecc8528009f469bec | 1,587 | py | Python | utils/views.py | dayraliz99/gmeBox | 82e7a19cf69452a469d09063146b215413db886b | [
"Apache-2.0"
] | null | null | null | utils/views.py | dayraliz99/gmeBox | 82e7a19cf69452a469d09063146b215413db886b | [
"Apache-2.0"
] | null | null | null | utils/views.py | dayraliz99/gmeBox | 82e7a19cf69452a469d09063146b215413db886b | [
"Apache-2.0"
] | null | null | null | from django.core.exceptions import PermissionDenied
class CustomUserOnlyMixin(object):
"""
Permite personalizar los permisos de acceso para los views
"""
permissions_required = None
def has_permissions(self):
if self.request.user.is_active is False:
return False
if self.request.user.is_superuser is True:
return True
groups = self.request.user.groups.all()
for permission_required in self.permissions_required:
for group in groups:
for permission in group.permissions.all():
if permission.codename == permission_required:
return True
return False
def dispatch(self, request, *args, **kwargs):
if not self.has_permissions():
raise PermissionDenied
return super(CustomUserOnlyMixin, self).dispatch(
request, *args, **kwargs)
class CustomGroupOnlyMixin(object):
"""
Permite personalizar los grupos de acceso para los views
"""
groups_required = None
def has_permissions(self):
if self.request.user.is_active is False:
return False
groups = self.request.user.groups.all()
for group_required in groups:
if group_required.name == group_required:
return True
return False
def dispatch(self, request, *args, **kwargs):
if not self.has_permissions():
raise PermissionDenied
return super(CustomGroupOnlyMixin, self).dispatch(
request, *args, **kwargs)
| 31.74 | 66 | 0.627599 | from django.core.exceptions import PermissionDenied
class CustomUserOnlyMixin(object):
permissions_required = None
def has_permissions(self):
if self.request.user.is_active is False:
return False
if self.request.user.is_superuser is True:
return True
groups = self.request.user.groups.all()
for permission_required in self.permissions_required:
for group in groups:
for permission in group.permissions.all():
if permission.codename == permission_required:
return True
return False
def dispatch(self, request, *args, **kwargs):
if not self.has_permissions():
raise PermissionDenied
return super(CustomUserOnlyMixin, self).dispatch(
request, *args, **kwargs)
class CustomGroupOnlyMixin(object):
groups_required = None
def has_permissions(self):
if self.request.user.is_active is False:
return False
groups = self.request.user.groups.all()
for group_required in groups:
if group_required.name == group_required:
return True
return False
def dispatch(self, request, *args, **kwargs):
if not self.has_permissions():
raise PermissionDenied
return super(CustomGroupOnlyMixin, self).dispatch(
request, *args, **kwargs)
| true | true |
1c3bd7d8a3f08cc0b0e10770c6b45638d2469949 | 1,934 | py | Python | AnaphoraFinder.py | Studi60Lax/Rhetorical-Analysis | b4020d9c72e63b3dfebbd88d1234de9a1e31fe82 | [
"MIT"
] | 4 | 2019-10-20T17:44:02.000Z | 2022-03-28T14:30:50.000Z | AnaphoraFinder.py | Studi60Lax/Rhetorical-Analysis | b4020d9c72e63b3dfebbd88d1234de9a1e31fe82 | [
"MIT"
] | null | null | null | AnaphoraFinder.py | Studi60Lax/Rhetorical-Analysis | b4020d9c72e63b3dfebbd88d1234de9a1e31fe82 | [
"MIT"
] | 1 | 2020-02-02T07:07:40.000Z | 2020-02-02T07:07:40.000Z | from nltk import word_tokenize
class AnaphoraFinder:
def __init__(self):
self.f = ""
self.counter = 0
self.first_word = ""
self.b = False
#Path is inputted from AIPController
#Returns anaphora counter
def sendFile(self, path):
self.f = open(path)
for line in self.f:
try:
if self.b:
self.counter += self.get_all_anaphora(line)
else:
try:
self.first_word = word_tokenize(line)[0].lower()
except:
continue
self.counter += self.get_all_anaphora(line)
except:
continue
c = self.counter
self.counter = 0
return c
#Returns the length of an array which contains all
#instances of anaphora
def get_all_anaphora(self, line):
ana = []
for w in word_tokenize(line)[1:]:
try:
new_word = self.get_next_word(line, w).lower()
except:
pass
if w.find('.') != -1 or w.find('!') != -1 or w.find('?') != -1:
if new_word == self.first_word:
ana.append([self.first_word, new_word])
self.first_word = new_word
elif new_word == False:
self.b = True
else:
self.first_word = new_word
return len(ana)
#Gets the next word after the period for
#anaphora comparison. If end-of-line and
#would be IndexError, return False
def get_next_word(self, line, target):
tokens = word_tokenize(line)[1:]
for w in range(len(tokens)):
if tokens[w] == target:
try:
return tokens[w+1].lower()
except:
return False
| 29.753846 | 75 | 0.481386 | from nltk import word_tokenize
class AnaphoraFinder:
def __init__(self):
self.f = ""
self.counter = 0
self.first_word = ""
self.b = False
def sendFile(self, path):
self.f = open(path)
for line in self.f:
try:
if self.b:
self.counter += self.get_all_anaphora(line)
else:
try:
self.first_word = word_tokenize(line)[0].lower()
except:
continue
self.counter += self.get_all_anaphora(line)
except:
continue
c = self.counter
self.counter = 0
return c
def get_all_anaphora(self, line):
ana = []
for w in word_tokenize(line)[1:]:
try:
new_word = self.get_next_word(line, w).lower()
except:
pass
if w.find('.') != -1 or w.find('!') != -1 or w.find('?') != -1:
if new_word == self.first_word:
ana.append([self.first_word, new_word])
self.first_word = new_word
elif new_word == False:
self.b = True
else:
self.first_word = new_word
return len(ana)
def get_next_word(self, line, target):
tokens = word_tokenize(line)[1:]
for w in range(len(tokens)):
if tokens[w] == target:
try:
return tokens[w+1].lower()
except:
return False
| true | true |
1c3bd8efbe0b47e5e3dd582d5599cf423a55c082 | 4,430 | py | Python | napari/_qt/tests/test_qt_dims.py | arokem/napari | e16e1163cf422d3aba6d86d1ae7dcd70a85b87dd | [
"BSD-3-Clause"
] | null | null | null | napari/_qt/tests/test_qt_dims.py | arokem/napari | e16e1163cf422d3aba6d86d1ae7dcd70a85b87dd | [
"BSD-3-Clause"
] | 1 | 2019-09-18T22:59:55.000Z | 2019-09-23T16:41:08.000Z | napari/_qt/tests/test_qt_dims.py | arokem/napari | e16e1163cf422d3aba6d86d1ae7dcd70a85b87dd | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from napari.components import Dims
from napari._qt.qt_dims import QtDims
def test_creating_view(qtbot):
"""
Test creating dims view.
"""
ndim = 4
dims = Dims(ndim)
view = QtDims(dims)
qtbot.addWidget(view)
# Check that the dims model has been appended to the dims view
assert view.dims == dims
# Check the number of displayed sliders is two less than the number of
# dimensions
assert view.nsliders == view.dims.ndim
assert np.sum(view._displayed_sliders) == view.dims.ndim - 2
def test_changing_ndim(qtbot):
"""
Test changing the number of dimensions
"""
ndim = 4
view = QtDims(Dims(ndim))
qtbot.addWidget(view)
# Check that adding dimensions adds sliders
view.dims.ndim = 5
assert view.nsliders == view.dims.ndim
assert np.sum(view._displayed_sliders) == view.dims.ndim - 2
# Check that removing dimensions removes sliders
view.dims.ndim = 2
assert view.nsliders == view.dims.ndim
assert np.sum(view._displayed_sliders) == view.dims.ndim - 2
def test_changing_display(qtbot):
"""
Test changing the displayed property of an axis
"""
ndim = 4
view = QtDims(Dims(ndim))
qtbot.addWidget(view)
assert view.nsliders == view.dims.ndim
assert np.sum(view._displayed_sliders) == view.dims.ndim - 2
# Check changing displayed removes a slider
view.dims.ndisplay = 3
assert view.nsliders == view.dims.ndim
assert np.sum(view._displayed_sliders) == view.dims.ndim - 3
def test_slider_values(qtbot):
"""
Test the values of a slider stays matched to the values of the dims point.
"""
ndim = 4
view = QtDims(Dims(ndim))
qtbot.addWidget(view)
# Check that values of the dimension slider matches the values of the
# dims point at initialization
assert view.sliders[0].getValues() == [view.dims.point[0]] * 2
# Check that values of the dimension slider matches the values of the
# dims point after the point has been moved within the dims
view.dims.set_point(0, 2)
assert view.sliders[0].getValues() == [view.dims.point[0]] * 2
# Check that values of the dimension slider matches the values of the
# dims point after the point has been moved within the slider
view.sliders[0].setValue(1)
assert view.sliders[0].getValues() == [view.dims.point[0]] * 2
def test_slider_range(qtbot):
"""
Tests range of the slider is matched to the range of the dims
"""
ndim = 4
view = QtDims(Dims(ndim))
qtbot.addWidget(view)
# Check the range of slider matches the values of the range of the dims
# at initialization
assert view.sliders[0].start == view.dims.range[0][0]
assert view.sliders[0].end == view.dims.range[0][1] - view.dims.range[0][2]
assert view.sliders[0].single_step == view.dims.range[0][2]
# Check the range of slider stays matched to the values of the range of
# the dims
view.dims.set_range(0, (1, 5, 2))
assert view.sliders[0].start == view.dims.range[0][0]
assert view.sliders[0].end == view.dims.range[0][1] - view.dims.range[0][2]
assert view.sliders[0].single_step == view.dims.range[0][2]
def test_order_when_changing_ndim(qtbot):
"""
Test order of the sliders when changing the number of dimensions.
"""
ndim = 4
view = QtDims(Dims(ndim))
qtbot.addWidget(view)
# Check that values of the dimension slider matches the values of the
# dims point after the point has been moved within the dims
view.dims.set_point(0, 2)
view.dims.set_point(1, 1)
for i in range(view.dims.ndim - 2):
assert view.sliders[i].getValues() == [view.dims.point[i]] * 2
# Check the matching dimensions and sliders are preserved when
# dimensions are added
view.dims.ndim = 5
for i in range(view.dims.ndim - 2):
assert view.sliders[i].getValues() == [view.dims.point[i]] * 2
# Check the matching dimensions and sliders are preserved when dims
# dimensions are removed
view.dims.ndim = 4
for i in range(view.dims.ndim - 2):
assert view.sliders[i].getValues() == [view.dims.point[i]] * 2
# Check the matching dimensions and sliders are preserved when dims
# dimensions are removed
view.dims.ndim = 3
for i in range(view.dims.ndim - 2):
assert view.sliders[i].getValues() == [view.dims.point[i]] * 2
| 30.763889 | 79 | 0.669752 | import numpy as np
from napari.components import Dims
from napari._qt.qt_dims import QtDims
def test_creating_view(qtbot):
ndim = 4
dims = Dims(ndim)
view = QtDims(dims)
qtbot.addWidget(view)
assert view.dims == dims
assert view.nsliders == view.dims.ndim
assert np.sum(view._displayed_sliders) == view.dims.ndim - 2
def test_changing_ndim(qtbot):
ndim = 4
view = QtDims(Dims(ndim))
qtbot.addWidget(view)
view.dims.ndim = 5
assert view.nsliders == view.dims.ndim
assert np.sum(view._displayed_sliders) == view.dims.ndim - 2
view.dims.ndim = 2
assert view.nsliders == view.dims.ndim
assert np.sum(view._displayed_sliders) == view.dims.ndim - 2
def test_changing_display(qtbot):
ndim = 4
view = QtDims(Dims(ndim))
qtbot.addWidget(view)
assert view.nsliders == view.dims.ndim
assert np.sum(view._displayed_sliders) == view.dims.ndim - 2
view.dims.ndisplay = 3
assert view.nsliders == view.dims.ndim
assert np.sum(view._displayed_sliders) == view.dims.ndim - 3
def test_slider_values(qtbot):
ndim = 4
view = QtDims(Dims(ndim))
qtbot.addWidget(view)
assert view.sliders[0].getValues() == [view.dims.point[0]] * 2
view.dims.set_point(0, 2)
assert view.sliders[0].getValues() == [view.dims.point[0]] * 2
view.sliders[0].setValue(1)
assert view.sliders[0].getValues() == [view.dims.point[0]] * 2
def test_slider_range(qtbot):
ndim = 4
view = QtDims(Dims(ndim))
qtbot.addWidget(view)
assert view.sliders[0].start == view.dims.range[0][0]
assert view.sliders[0].end == view.dims.range[0][1] - view.dims.range[0][2]
assert view.sliders[0].single_step == view.dims.range[0][2]
view.dims.set_range(0, (1, 5, 2))
assert view.sliders[0].start == view.dims.range[0][0]
assert view.sliders[0].end == view.dims.range[0][1] - view.dims.range[0][2]
assert view.sliders[0].single_step == view.dims.range[0][2]
def test_order_when_changing_ndim(qtbot):
ndim = 4
view = QtDims(Dims(ndim))
qtbot.addWidget(view)
view.dims.set_point(0, 2)
view.dims.set_point(1, 1)
for i in range(view.dims.ndim - 2):
assert view.sliders[i].getValues() == [view.dims.point[i]] * 2
view.dims.ndim = 5
for i in range(view.dims.ndim - 2):
assert view.sliders[i].getValues() == [view.dims.point[i]] * 2
view.dims.ndim = 4
for i in range(view.dims.ndim - 2):
assert view.sliders[i].getValues() == [view.dims.point[i]] * 2
view.dims.ndim = 3
for i in range(view.dims.ndim - 2):
assert view.sliders[i].getValues() == [view.dims.point[i]] * 2
| true | true |
1c3bd95b330ff1dc43faa1814ed8f9bf4188d8ab | 686 | py | Python | i2c_responder/calc_icmpv6_chksum.py | ddgarrett/mbos | 58ca81fa518c4cbad77fb83f58120bfe1495482e | [
"MIT"
] | null | null | null | i2c_responder/calc_icmpv6_chksum.py | ddgarrett/mbos | 58ca81fa518c4cbad77fb83f58120bfe1495482e | [
"MIT"
] | null | null | null | i2c_responder/calc_icmpv6_chksum.py | ddgarrett/mbos | 58ca81fa518c4cbad77fb83f58120bfe1495482e | [
"MIT"
] | null | null | null | from struct import unpack
def calc_icmpv6_chksum(packet):
"""Calculate the ICMPv6 checksum for a packet.
:param packet: The packet bytes to checksum.
:returns: The checksum integer.
from: https://www.programcreek.com/python/?CodeExample=calculate+checksum
"""
total = 0
# Add up 16-bit words
num_words = len(packet) // 2
for chunk in unpack("!%sH" % num_words, packet[0:num_words * 2]):
total += chunk
# Add any left over byte
if len(packet) % 2:
total += packet[-1] << 8
# Fold 32-bits into 16-bits
total = (total >> 16) + (total & 0xffff)
total += total >> 16
return ~total + 0x10000 & 0xffff | 26.384615 | 77 | 0.61516 | from struct import unpack
def calc_icmpv6_chksum(packet):
total = 0
num_words = len(packet) // 2
for chunk in unpack("!%sH" % num_words, packet[0:num_words * 2]):
total += chunk
if len(packet) % 2:
total += packet[-1] << 8
total = (total >> 16) + (total & 0xffff)
total += total >> 16
return ~total + 0x10000 & 0xffff | true | true |
1c3bdb24cdb1ae51082f0482d8a76ee462ef1546 | 4,527 | py | Python | megadoot/megadoot.py | mayanks0ni/modmail-plugins | 82df5f13d2cc016af0792f2c42314577101553d3 | [
"MIT"
] | 14 | 2019-08-09T09:59:46.000Z | 2020-09-30T10:20:46.000Z | megadoot/megadoot.py | mayanks0ni/modmail-plugins | 82df5f13d2cc016af0792f2c42314577101553d3 | [
"MIT"
] | 1,093 | 2019-11-14T09:57:33.000Z | 2021-11-08T04:56:29.000Z | megadoot/megadoot.py | mayanks0ni/modmail-plugins | 82df5f13d2cc016af0792f2c42314577101553d3 | [
"MIT"
] | 38 | 2019-08-09T16:44:44.000Z | 2022-03-20T16:45:40.000Z | import random
import discord
from discord.ext import commands
class Megadoot(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.fights = [
'{0} tried to throw a snowball at {1} but it hits Dabbit\'s car, and Dabbit is not pleased!',
'{0} tackled {1} down with a fish.',
'{0} fought {1}, but it was not effective...',
'{0} tried to throw a bucket of water at {1}, but accidentally threw it all over {2}!',
'{0} got tired of ks’ puns and tried to fight but accidentally hit {1}',
'{0} tried to hit {1}, but {1} had a reverse card up their sleeve so {0} got hit instead',
'{0} tried to fight {1}, but ended up being given cereal soup by Dabbit.',
'{0} tried to attack {1}, but they slipped and crashed into Ghoul\'s car, making a huge cat shaped dent in the hood',
'{0} tried to fight {1} but was attacked by a gang of kittens',
'{0} challenged {1} to a race in Mario Kart but the CPU won instead!',
'{1} dodged a mighty fine swing from {0}, and then backhanded {0} in self defense.',
'{0} begged their pet to attack {1}, but the pet stared back with no indication of understanding.',
'{0} fought like a dog, but {1} fought back like a bear, winning the fight!',
'A wild {1} appears!\n{1} uses Bite! It\'s not very effective...\n{0} uses Mega Punch! It\'s very effective!\n{0} has won!',
'As {0} ran all sweaty and tired reaching out for a last punch, {1} dashed to the side, leaving {0} tumbling onto the ground.',
'{0} tried to modify the Dupe Bomber 3000 to take down {1} with tons of dupe reports, but Dannysaur got there first and denied them all... Which broke the machine.',
'{0} Mega Evolved and tried to wipe out {1} with Hyper Beam! But {1} used Mimic and reversed it back onto {0} instead!',
'{0} threw a snowball at {1} but unfortunately it hits a window at Discord HQ. Oops',
'{0} tricked {1} into waking up the Sleeping Pizza. The Sleeping Pizza does not like being woken up, so it turned both {0} and {1} into Calzone Pizza. Rest In Pepperoni.',
'{0} went to tackle {1}, but they did a dank meme and lowkey dabbed out of the way',
'{0} hit the Smash ball, but fell off the stage before they could use it on {1}',
'{0} threw a pokeball at {1}, but it was only a Goldeen'
]
self.hugs = [
'{0} gave {1} an awkward hug.',
'{0} pretended to give {1} a hug, but put a "Kick Me" sign on them.',
'{0} gave {1} a great bear hug!',
'{1}, {0} just gave you the best hug of your life!',
'{0} gave {1} a friendly little hug.',
'{0} tried to give {1} a hug but was denied.',
'{0} tackle-hugs {1}.',
'{0} gave {1} a bog standard hug',
'{1} accidentally reported the wrong thing so {0} gave them a hug to stop {1} from crying',
'{0} gives {1} a cereal soupy hug',
'{0} hugged {1} so hard, they exploded in a cloud of pepperonis',
'{0} goes to hug {1}, what a good friendship.',
'{0} successfully hugs {1} with the power of the Wumpus.',
'{0} sent {1} some love, do I get some too?',
'{1} ducked when {0} tried to hug them.',
'{0} hugged {1} but {1} took it as an attack!',
'{0} fills {1} with sweet love',
'{0} gave {1} a Legacy Hug, in recognition of the legendary Dabbit Prime.',
'Is {0} sure they want to hug {1}? Sure thing, as they just did!',
'{0} attempts to hug {1} but Dannysaur threw a banana peel on the floor and made {0} slip',
'{1} is confused if cereal is soup or salad, so {0} hugged {1} to calm them down'
]
@commands.command()
async def fight(self, ctx, user: discord.Member):
"""Fight someone to show them how strong you are!"""
await ctx.send(random.choice(self.fights).format(ctx.author.name, user.name, ctx.guild.owner.name))
@commands.command()
async def hug(self, ctx, user: discord.Member):
"""Hug someone to show them how much you love them!"""
await ctx.send(random.choice(self.hugs).format(ctx.author.name, user.name))
def setup(bot):
bot.add_cog(Megadoot(bot))
| 61.175676 | 184 | 0.578529 | import random
import discord
from discord.ext import commands
class Megadoot(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.fights = [
'{0} tried to throw a snowball at {1} but it hits Dabbit\'s car, and Dabbit is not pleased!',
'{0} tackled {1} down with a fish.',
'{0} fought {1}, but it was not effective...',
'{0} tried to throw a bucket of water at {1}, but accidentally threw it all over {2}!',
'{0} got tired of ks’ puns and tried to fight but accidentally hit {1}',
'{0} tried to hit {1}, but {1} had a reverse card up their sleeve so {0} got hit instead',
'{0} tried to fight {1}, but ended up being given cereal soup by Dabbit.',
'{0} tried to attack {1}, but they slipped and crashed into Ghoul\'s car, making a huge cat shaped dent in the hood',
'{0} tried to fight {1} but was attacked by a gang of kittens',
'{0} challenged {1} to a race in Mario Kart but the CPU won instead!',
'{1} dodged a mighty fine swing from {0}, and then backhanded {0} in self defense.',
'{0} begged their pet to attack {1}, but the pet stared back with no indication of understanding.',
'{0} fought like a dog, but {1} fought back like a bear, winning the fight!',
'A wild {1} appears!\n{1} uses Bite! It\'s not very effective...\n{0} uses Mega Punch! It\'s very effective!\n{0} has won!',
'As {0} ran all sweaty and tired reaching out for a last punch, {1} dashed to the side, leaving {0} tumbling onto the ground.',
'{0} tried to modify the Dupe Bomber 3000 to take down {1} with tons of dupe reports, but Dannysaur got there first and denied them all... Which broke the machine.',
'{0} Mega Evolved and tried to wipe out {1} with Hyper Beam! But {1} used Mimic and reversed it back onto {0} instead!',
'{0} threw a snowball at {1} but unfortunately it hits a window at Discord HQ. Oops',
'{0} tricked {1} into waking up the Sleeping Pizza. The Sleeping Pizza does not like being woken up, so it turned both {0} and {1} into Calzone Pizza. Rest In Pepperoni.',
'{0} went to tackle {1}, but they did a dank meme and lowkey dabbed out of the way',
'{0} hit the Smash ball, but fell off the stage before they could use it on {1}',
'{0} threw a pokeball at {1}, but it was only a Goldeen'
]
self.hugs = [
'{0} gave {1} an awkward hug.',
'{0} pretended to give {1} a hug, but put a "Kick Me" sign on them.',
'{0} gave {1} a great bear hug!',
'{1}, {0} just gave you the best hug of your life!',
'{0} gave {1} a friendly little hug.',
'{0} tried to give {1} a hug but was denied.',
'{0} tackle-hugs {1}.',
'{0} gave {1} a bog standard hug',
'{1} accidentally reported the wrong thing so {0} gave them a hug to stop {1} from crying',
'{0} gives {1} a cereal soupy hug',
'{0} hugged {1} so hard, they exploded in a cloud of pepperonis',
'{0} goes to hug {1}, what a good friendship.',
'{0} successfully hugs {1} with the power of the Wumpus.',
'{0} sent {1} some love, do I get some too?',
'{1} ducked when {0} tried to hug them.',
'{0} hugged {1} but {1} took it as an attack!',
'{0} fills {1} with sweet love',
'{0} gave {1} a Legacy Hug, in recognition of the legendary Dabbit Prime.',
'Is {0} sure they want to hug {1}? Sure thing, as they just did!',
'{0} attempts to hug {1} but Dannysaur threw a banana peel on the floor and made {0} slip',
'{1} is confused if cereal is soup or salad, so {0} hugged {1} to calm them down'
]
@commands.command()
async def fight(self, ctx, user: discord.Member):
await ctx.send(random.choice(self.fights).format(ctx.author.name, user.name, ctx.guild.owner.name))
@commands.command()
async def hug(self, ctx, user: discord.Member):
await ctx.send(random.choice(self.hugs).format(ctx.author.name, user.name))
def setup(bot):
bot.add_cog(Megadoot(bot))
| true | true |
1c3bdb32fc865ce079b625627d488a6b11d6b380 | 7,138 | py | Python | veriloggen/stream/scheduler.py | jesseclin/veriloggen | a645f2c53f04e5b88213eef17779d212192ea2b5 | [
"Apache-2.0"
] | 232 | 2015-09-01T16:07:48.000Z | 2022-03-28T14:53:28.000Z | veriloggen/stream/scheduler.py | jesseclin/veriloggen | a645f2c53f04e5b88213eef17779d212192ea2b5 | [
"Apache-2.0"
] | 34 | 2015-08-21T09:13:03.000Z | 2022-03-21T23:52:44.000Z | veriloggen/stream/scheduler.py | jesseclin/veriloggen | a645f2c53f04e5b88213eef17779d212192ea2b5 | [
"Apache-2.0"
] | 46 | 2015-09-24T14:39:57.000Z | 2022-02-23T21:59:56.000Z | from __future__ import absolute_import
from __future__ import print_function
from . import stypes
from .visitor import _Visitor
class _Scheduler(_Visitor):
def max_stage(self, *vars):
return stypes._max(*vars)
def next_stage(self, node, stage):
if stage is None:
return 0
return stage + node.latency
def schedule(self, nodes):
raise NotImplementedError()
class ASAPScheduler(_Scheduler):
""" Determine the scheduled cycle and insert delay variables to fill the gap """
def schedule(self, nodes):
for node in sorted(nodes, key=lambda x: x.object_id):
self.visit(node)
def balance_output(self, nodes, max_stage):
ret = []
for node in sorted(nodes, key=lambda x: x.object_id):
if not node._has_output():
continue
r = self.fill_gap(node, max_stage)
t_data = node.output_data
t_sig_data = node.output_sig_data
node._disable_output()
node._disable_output_sig()
node._set_output_node(r)
r._disable_output()
r._disable_output_sig()
r.output_data = t_data
r.output_sig_data = t_sig_data
ret.append(r)
return ret
def fill_gap(self, node, end_stage):
if end_stage is None:
return node
if node.end_stage is None:
return node
if node.end_stage == end_stage:
return node
if node.end_stage > end_stage:
raise ValueError("Illegal stage number: node.end_stage (%d) > end_stage (%d)" %
node.end_stage, end_stage)
if isinstance(node, stypes._Delay) and node._get_parent_value() is not None:
node = node._get_parent_value()
prev = node
cur_end_stage = prev.end_stage
for i in range(cur_end_stage, end_stage):
r = node._get_delayed_value(i + 1)
if r is not None:
prev = r
cur_end_stage += 1
continue
r = stypes._Delay(prev)
r._set_start_stage(cur_end_stage)
r._set_end_stage(cur_end_stage + 1)
node._add_delayed_value(i + 1, r)
r._set_parent_value(node)
prev = r
cur_end_stage += 1
return prev
def visit__BinaryOperator(self, node):
if node._has_start_stage():
return node._get_end_stage()
left = self.visit(node.left)
right = self.visit(node.right)
mine = self.max_stage(left, right)
if mine is None:
mine = 0
node.left = self.fill_gap(node.left, mine)
node.right = self.fill_gap(node.right, mine)
node._set_start_stage(mine)
if getattr(node, 'variable_latency', None):
node.latency = getattr(node, node.variable_latency)()
if getattr(node, 'variable_iteration_interval', None):
node.iteration_interval = getattr(node, node.variable_iteration_interval)()
end = self.next_stage(node, mine)
node._set_end_stage(end)
return end
def visit__UnaryOperator(self, node):
if node._has_start_stage():
return node._get_end_stage()
right = self.visit(node.right)
mine = self.max_stage(right)
if mine is None:
mine = 0
node.right = self.fill_gap(node.right, mine)
node._set_start_stage(mine)
if getattr(node, 'variable_latency', None):
node.latency = getattr(node, node.variable_latency)()
if getattr(node, 'variable_iteration_interval', None):
node.iteration_interval = getattr(node, node.variable_iteration_interval)()
end = self.next_stage(node, mine)
node._set_end_stage(end)
return end
def visit__SpecialOperator(self, node):
if node._has_start_stage():
return node._get_end_stage()
ret = []
for var in node.args:
var = self.visit(var)
ret.append(var)
mine = self.max_stage(*ret)
if mine is None:
mine = 0
node.args = [self.fill_gap(var, mine) for var in node.args]
node._set_start_stage(mine)
if getattr(node, 'variable_latency', None):
node.latency = getattr(node, node.variable_latency)()
if getattr(node, 'variable_iteration_interval', None):
node.iteration_interval = getattr(node, node.variable_iteration_interval)()
end = self.next_stage(node, mine)
node._set_end_stage(end)
return end
def visit__Accumulator(self, node):
if node._has_start_stage():
return node._get_end_stage()
right = self.visit(node.right)
size = self.visit(node.size) if node.size is not None else None
interval = (self.visit(node.interval)
if node.interval is not None else None)
initval = self.visit(node.initval)
offset = (self.visit(node.offset)
if node.offset is not None else None)
dependency = (self.visit(node.dependency)
if node.dependency is not None else None)
enable = self.visit(node.enable) if node.enable is not None else None
reset = self.visit(node.reset) if node.reset is not None else None
mine = self.max_stage(right, size, interval, initval,
offset, dependency, enable, reset)
if mine is None:
mine = 0
node.right = self.fill_gap(node.right, mine)
if node.size is not None:
node.size = self.fill_gap(node.size, mine)
if node.interval is not None:
node.interval = self.fill_gap(node.interval, mine)
node.initval = self.fill_gap(node.initval, mine)
if node.offset is not None:
node.offset = self.fill_gap(node.offset, mine)
if node.enable is not None:
node.enable = self.fill_gap(node.enable, mine)
if node.reset is not None:
node.reset = self.fill_gap(node.reset, mine)
node.reg_initval = self.fill_gap(node.reg_initval, mine)
node._set_start_stage(mine)
if getattr(node, 'variable_latency', None):
node.latency = getattr(node, node.variable_latency)()
if getattr(node, 'variable_iteration_interval', None):
node.iteration_interval = getattr(node, node.variable_iteration_interval)()
end = self.next_stage(node, mine)
node._set_end_stage(end)
return end
def visit__ParameterVariable(self, node):
return None
def visit__Variable(self, node):
if node._has_start_stage():
return node._get_end_stage()
if isinstance(node.input_data, stypes._Numeric):
data = self.visit(node.input_data)
node._set_start_stage(data)
return data
mine = 0
node._set_start_stage(mine)
end = mine
node._set_end_stage(end)
return end
def visit__Constant(self, node):
return None
| 36.793814 | 91 | 0.60283 | from __future__ import absolute_import
from __future__ import print_function
from . import stypes
from .visitor import _Visitor
class _Scheduler(_Visitor):
def max_stage(self, *vars):
return stypes._max(*vars)
def next_stage(self, node, stage):
if stage is None:
return 0
return stage + node.latency
def schedule(self, nodes):
raise NotImplementedError()
class ASAPScheduler(_Scheduler):
def schedule(self, nodes):
for node in sorted(nodes, key=lambda x: x.object_id):
self.visit(node)
def balance_output(self, nodes, max_stage):
ret = []
for node in sorted(nodes, key=lambda x: x.object_id):
if not node._has_output():
continue
r = self.fill_gap(node, max_stage)
t_data = node.output_data
t_sig_data = node.output_sig_data
node._disable_output()
node._disable_output_sig()
node._set_output_node(r)
r._disable_output()
r._disable_output_sig()
r.output_data = t_data
r.output_sig_data = t_sig_data
ret.append(r)
return ret
def fill_gap(self, node, end_stage):
if end_stage is None:
return node
if node.end_stage is None:
return node
if node.end_stage == end_stage:
return node
if node.end_stage > end_stage:
raise ValueError("Illegal stage number: node.end_stage (%d) > end_stage (%d)" %
node.end_stage, end_stage)
if isinstance(node, stypes._Delay) and node._get_parent_value() is not None:
node = node._get_parent_value()
prev = node
cur_end_stage = prev.end_stage
for i in range(cur_end_stage, end_stage):
r = node._get_delayed_value(i + 1)
if r is not None:
prev = r
cur_end_stage += 1
continue
r = stypes._Delay(prev)
r._set_start_stage(cur_end_stage)
r._set_end_stage(cur_end_stage + 1)
node._add_delayed_value(i + 1, r)
r._set_parent_value(node)
prev = r
cur_end_stage += 1
return prev
def visit__BinaryOperator(self, node):
if node._has_start_stage():
return node._get_end_stage()
left = self.visit(node.left)
right = self.visit(node.right)
mine = self.max_stage(left, right)
if mine is None:
mine = 0
node.left = self.fill_gap(node.left, mine)
node.right = self.fill_gap(node.right, mine)
node._set_start_stage(mine)
if getattr(node, 'variable_latency', None):
node.latency = getattr(node, node.variable_latency)()
if getattr(node, 'variable_iteration_interval', None):
node.iteration_interval = getattr(node, node.variable_iteration_interval)()
end = self.next_stage(node, mine)
node._set_end_stage(end)
return end
def visit__UnaryOperator(self, node):
if node._has_start_stage():
return node._get_end_stage()
right = self.visit(node.right)
mine = self.max_stage(right)
if mine is None:
mine = 0
node.right = self.fill_gap(node.right, mine)
node._set_start_stage(mine)
if getattr(node, 'variable_latency', None):
node.latency = getattr(node, node.variable_latency)()
if getattr(node, 'variable_iteration_interval', None):
node.iteration_interval = getattr(node, node.variable_iteration_interval)()
end = self.next_stage(node, mine)
node._set_end_stage(end)
return end
def visit__SpecialOperator(self, node):
if node._has_start_stage():
return node._get_end_stage()
ret = []
for var in node.args:
var = self.visit(var)
ret.append(var)
mine = self.max_stage(*ret)
if mine is None:
mine = 0
node.args = [self.fill_gap(var, mine) for var in node.args]
node._set_start_stage(mine)
if getattr(node, 'variable_latency', None):
node.latency = getattr(node, node.variable_latency)()
if getattr(node, 'variable_iteration_interval', None):
node.iteration_interval = getattr(node, node.variable_iteration_interval)()
end = self.next_stage(node, mine)
node._set_end_stage(end)
return end
def visit__Accumulator(self, node):
if node._has_start_stage():
return node._get_end_stage()
right = self.visit(node.right)
size = self.visit(node.size) if node.size is not None else None
interval = (self.visit(node.interval)
if node.interval is not None else None)
initval = self.visit(node.initval)
offset = (self.visit(node.offset)
if node.offset is not None else None)
dependency = (self.visit(node.dependency)
if node.dependency is not None else None)
enable = self.visit(node.enable) if node.enable is not None else None
reset = self.visit(node.reset) if node.reset is not None else None
mine = self.max_stage(right, size, interval, initval,
offset, dependency, enable, reset)
if mine is None:
mine = 0
node.right = self.fill_gap(node.right, mine)
if node.size is not None:
node.size = self.fill_gap(node.size, mine)
if node.interval is not None:
node.interval = self.fill_gap(node.interval, mine)
node.initval = self.fill_gap(node.initval, mine)
if node.offset is not None:
node.offset = self.fill_gap(node.offset, mine)
if node.enable is not None:
node.enable = self.fill_gap(node.enable, mine)
if node.reset is not None:
node.reset = self.fill_gap(node.reset, mine)
node.reg_initval = self.fill_gap(node.reg_initval, mine)
node._set_start_stage(mine)
if getattr(node, 'variable_latency', None):
node.latency = getattr(node, node.variable_latency)()
if getattr(node, 'variable_iteration_interval', None):
node.iteration_interval = getattr(node, node.variable_iteration_interval)()
end = self.next_stage(node, mine)
node._set_end_stage(end)
return end
def visit__ParameterVariable(self, node):
return None
def visit__Variable(self, node):
if node._has_start_stage():
return node._get_end_stage()
if isinstance(node.input_data, stypes._Numeric):
data = self.visit(node.input_data)
node._set_start_stage(data)
return data
mine = 0
node._set_start_stage(mine)
end = mine
node._set_end_stage(end)
return end
def visit__Constant(self, node):
return None
| true | true |
1c3bdbf32ed8ebc506854076d2d9c825108d5262 | 440 | py | Python | app.py | AcharyaRakesh/Baldness-Prediction | f97c3bf1f068b167405f3bc711a7f6630905da2e | [
"MIT"
] | null | null | null | app.py | AcharyaRakesh/Baldness-Prediction | f97c3bf1f068b167405f3bc711a7f6630905da2e | [
"MIT"
] | null | null | null | app.py | AcharyaRakesh/Baldness-Prediction | f97c3bf1f068b167405f3bc711a7f6630905da2e | [
"MIT"
] | null | null | null |
from main import load_model
model = load_model('model.h5')
import numpy as np
from tensorflow.keras.preprocessing import image
test_image = image.load_img('',target_size=(64,64) )
test_image = image.img_to_array(test_image)
test_image = test_image/255
test_image=np.expand_dims(test_image,axis=0)
result = model.predict(test_image)
if result[0]<=0.5:
print('image classified as Bald')
else:
print('image is not') | 25.882353 | 53 | 0.736364 |
from main import load_model
model = load_model('model.h5')
import numpy as np
from tensorflow.keras.preprocessing import image
test_image = image.load_img('',target_size=(64,64) )
test_image = image.img_to_array(test_image)
test_image = test_image/255
test_image=np.expand_dims(test_image,axis=0)
result = model.predict(test_image)
if result[0]<=0.5:
print('image classified as Bald')
else:
print('image is not') | true | true |
1c3bdca0bbd1a7ab4b6286aab45327684c4f6227 | 7,131 | py | Python | ze_trajectory_analysis/py/ze_trajectory_analysis/consistency_single_run.py | rockenbf/ze_oss | ee04158e2d51acb07a267196f618e9afbc3ffd83 | [
"BSD-3-Clause"
] | 30 | 2016-09-27T07:41:28.000Z | 2021-12-03T20:44:28.000Z | ze_trajectory_analysis/py/ze_trajectory_analysis/consistency_single_run.py | rockenbf/ze_oss | ee04158e2d51acb07a267196f618e9afbc3ffd83 | [
"BSD-3-Clause"
] | 1 | 2018-12-18T15:53:06.000Z | 2018-12-21T03:10:06.000Z | ze_trajectory_analysis/py/ze_trajectory_analysis/consistency_single_run.py | rockenbf/ze_oss | ee04158e2d51acb07a267196f618e9afbc3ffd83 | [
"BSD-3-Clause"
] | 12 | 2016-11-05T07:51:29.000Z | 2020-07-13T02:26:08.000Z | #!/usr/bin/python3
"""
Zurich Eye
"""
import os
import yaml
import logging
import argparse
import numpy as np
import ze_trajectory_analysis.analyse as traj_analysis
import ze_py.transformations as tf
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import vikit_py.transformations as tf
from matplotlib import rc
from matplotlib.ticker import FuncFormatter
rc('font',**{'family':'serif','serif':['Cardo']})
rc('text', usetex=True)
_EPS = np.finfo(float).eps * 4.0
FORMAT = '.pdf'
# Init logging.
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
logger.info('Trajectory alignment example.')
# Load Data
data_dir = '/home/cfo/vin_ws/src/svo_gtsam/trace/data/20160407_1459_gtsam_vicon_asl_140313_vicon_aslam_2'
ta = traj_analysis.TrajectoryAnalysis(data_dir)
ta.load_data(data_dir, data_format='svo_gtsam')
ta.apply_hand_eye_calibration_to_groundtruth()
ta.align_trajectory('first_frame')
#ta.align_trajectory(align_type='se3', first_idx=0, last_idx=100)
ta.plot_aligned_trajectory()
#ta.compute_rms_errors()
t_es = ta.t_es
p_es = ta.p_es_aligned
q_es = ta.q_es_aligned
p_gt = ta.p_gt
q_gt = ta.q_gt
distances = ta.distances
# Load covariances:
cov_data = np.genfromtxt(os.path.join(data_dir, 'estimate_covariance.csv'), delimiter=',')
t_cov = cov_data[:,0]
# Load covariance
n = len(cov_data)
yaw_sigma_3 = np.zeros(n)
yaw_error = np.zeros(n)
roll_sigma_3 = np.zeros(n)
roll_error = np.zeros(n)
pitch_sigma_3 = np.zeros(n)
pitch_error = np.zeros(n)
error_pos_W = np.zeros((3,n))
error_pos_W_sigma_3 = np.zeros((3,n))
nees_rot = np.zeros(n)
nees_pos = np.zeros(n)
nees_se3 = np.zeros(n)
for i in range(1000):
#assert t_cov[i] == t_es[i]
Cov_T_B = np.reshape(cov_data[i,1:],(6,6))
Cov_R_B = Cov_T_B[:3,:3]
Cov_t_B = Cov_T_B[3:6,3:6]
p_W_Bes = p_es[i,:]
p_W_Bgt = p_gt[i,:]
R_W_Bes = tf.quaternion_matrix(q_es[i,:])[:3,:3]
R_W_Bgt = tf.quaternion_matrix(q_gt[i,:])[:3,:3]
Cov_R_W = np.dot(R_W_Bes, np.dot(Cov_R_B, np.transpose(R_W_Bes)))
Cov_T_W = np.dot(R_W_Bes, np.dot(Cov_t_B, np.transpose(R_W_Bes)))
yaw_sigma_3[i] = np.sqrt(Cov_R_W[2,2])*3.0*180/np.pi
pitch_sigma_3[i] = np.sqrt(Cov_R_W[1,1])*3.0*180/np.pi
roll_sigma_3[i] = np.sqrt(Cov_R_W[0,0])*3.0*180/np.pi
R_Bgt_Bes = np.dot(R_W_Bgt, np.transpose(R_W_Bes))
yaw_error[i], pitch_error[i], roll_error[i] = tf.euler_from_matrix(R_Bgt_Bes, 'rzyx')
# compute normalized estimation error squared (in estimated body frame)
error_rot_B = tf.logmap_so3(np.transpose(R_Bgt_Bes))
error_pos_B = np.dot(np.transpose(R_W_Bes), (p_W_Bgt - p_W_Bes))
error_se3_B = np.concatenate((error_rot_B, error_pos_B))
nees_rot[i] = np.dot(error_rot_B, np.dot(np.linalg.inv(Cov_R_B), error_rot_B))
nees_pos[i] = np.dot(error_pos_B, np.dot(np.linalg.inv(Cov_t_B), error_pos_B))
nees_se3[i] = np.dot(error_se3_B, np.dot(np.linalg.inv(Cov_T_B), error_se3_B))
# translation error in world coordiantes
error_pos_W[:,i] = p_W_Bgt - p_W_Bes
error_pos_W_sigma_3[0,i] = np.sqrt(Cov_T_W[0,0])*3.0
error_pos_W_sigma_3[1,i] = np.sqrt(Cov_T_W[1,1])*3.0
error_pos_W_sigma_3[2,i] = np.sqrt(Cov_T_W[2,2])*3.0
yaw_error *= 180/np.pi
pitch_error *= 180/np.pi
roll_error *= 180/np.pi
n_max = 1000
# rotation error
D = distances[:n_max]
y_lim = 5 #args.rpy_ylim
fig = plt.figure(figsize=(6,8))
gs1 = gridspec.GridSpec(3, 1)
gs1.update(wspace=0.005) # set the spacing between axes.
ax = fig.add_subplot(611, ylabel='Err. Yaw [deg]')
ax.locator_params(axis = 'y', nbins = 4)
ax.yaxis.set_major_formatter(FuncFormatter(lambda y, pos: '%.2f'%y))
ax.plot(D, yaw_sigma_3[:n_max], 'r-', alpha=0.5, lw=0.7)
ax.plot(D, -yaw_sigma_3[:n_max], 'r-', alpha=0.5, lw=0.7)
ax.plot(D, yaw_error[:n_max], 'r-', lw=1)
ax.set_xticks([])
ax.set_ylim([-y_lim,y_lim])
y_lim = 4 #args.rpy_ylim
ax = fig.add_subplot(612, ylabel='Err. Pitch [deg]')
ax.locator_params(axis = 'y', nbins = 4)
ax.yaxis.set_major_formatter(FuncFormatter(lambda y, pos: '%.2f'%y))
ax.plot(D, pitch_sigma_3[:n_max], 'g-', alpha=0.5, lw=0.7)
ax.plot(D, -pitch_sigma_3[:n_max], 'g-', alpha=0.5, lw=0.7)
ax.plot(D, pitch_error[:n_max], 'g-', lw=1)
ax.set_xticks([])
ax.set_ylim([-y_lim,y_lim])
ax = fig.add_subplot(613, ylabel='Err. Roll [deg]')
ax.locator_params(axis = 'y', nbins = 4)
ax.yaxis.set_major_formatter(FuncFormatter(lambda y, pos: '%.2f'%y))
ax.plot(D, roll_sigma_3[:n_max], 'b-', alpha=0.5, lw=0.7)
ax.plot(D, -roll_sigma_3[:n_max], 'b-', alpha=0.5, lw=0.7)
ax.plot(D, roll_error[:n_max], 'b-', lw=1)
ax.set_ylim([-y_lim,y_lim])
ax.set_xticks([])
# translation error
y_lim = 0.9
ax = fig.add_subplot(614, ylabel='Err. x [m]')
ax.locator_params(axis = 'y', nbins = 4)
ax.yaxis.set_major_formatter(FuncFormatter(lambda y, pos: '%.2f'%y))
ax.plot(D, error_pos_W_sigma_3[0,:n_max], 'r-', alpha=0.5, lw=0.7)
ax.plot(D, -error_pos_W_sigma_3[0,:n_max], 'r-', alpha=0.5, lw=0.7)
ax.plot(D, error_pos_W[0,:n_max], 'r-', lw=1)
ax.set_xticks([])
ax.set_ylim([-y_lim,y_lim])
ax = fig.add_subplot(615, ylabel='Err. y [m]')
ax.locator_params(axis = 'y', nbins = 4)
ax.yaxis.set_major_formatter(FuncFormatter(lambda y, pos: '%.2f'%y))
ax.plot(D, error_pos_W_sigma_3[1,:n_max], 'g-', alpha=0.5, lw=0.7)
ax.plot(D, -error_pos_W_sigma_3[1,:n_max], 'g-', alpha=0.5, lw=0.7)
ax.plot(D, error_pos_W[1,:n_max], 'g-', lw=1)
ax.set_ylim([-y_lim,y_lim])
ax.set_xticks([])
ax = fig.add_subplot(616, xlabel='Distance Travelled [m]', ylabel='Err. z [m]')
ax.locator_params(axis = 'y', nbins = 4)
ax.yaxis.set_major_formatter(FuncFormatter(lambda y, pos: '%.2f'%y))
ax.plot(D, error_pos_W_sigma_3[2,:n_max], 'b-', alpha=0.5, lw=0.7)
ax.plot(D, -error_pos_W_sigma_3[2,:n_max], 'b-', alpha=0.5, lw=0.7)
ax.plot(D, error_pos_W[2,:n_max], 'b-', lw=1)
ax.set_ylim([-y_lim,y_lim])
ax.tick_params('x',top='off')
fig.tight_layout()
fig.savefig(os.path.join(data_dir,'consistency_single_run'+FORMAT), bbox_inches="tight")
# write to file
file_out = open(os.path.join(data_dir, 'consistency_errors.csv'), 'w')
file_out.write('# trans_error -x, -y, -z, rot_error -yaw, -pitch, -roll\n')
for i in range(len(yaw_error)):
file_out.write(
'%.8f, %.8f, %.8f, %.8f, %.8f, %.8f\n' %
(error_pos_W[0,i], error_pos_W[1,i], error_pos_W[2,i], yaw_error[i], pitch_error[i], roll_error[i]))
file_out.close()
# NEES Rot and Pos
fig = plt.figure(figsize=(6,3))
ax = fig.add_subplot(211, ylabel='Rot. NEES')
ax.plot(nees_rot)
ax = fig.add_subplot(212, ylabel='Pos. NEES', xlabel='Keyframes')
ax.plot(nees_pos)
fig.savefig(os.path.join(data_dir,'consistency_nees_posrot'+FORMAT), bbox_inches="tight")
# NEES Pose
fig = plt.figure(figsize=(6,1.5))
ax = fig.add_subplot(111, ylabel='Pose NEES', xlabel='Keyframes')
ax.plot(nees_se3)
fig.savefig(os.path.join(data_dir,'consistency_pose'+FORMAT), bbox_inches="tight")
# write to file
file_out = open(os.path.join(data_dir, 'consistency_nees.csv'), 'w')
file_out.write('# NEES orientation, NEES position \n')
for i in range(len(nees_rot)):
file_out.write('%.8f, %.8f, %.8f\n' % (nees_rot[i], nees_pos[i], nees_se3[i]))
file_out.close() | 37.335079 | 109 | 0.697097 |
import os
import yaml
import logging
import argparse
import numpy as np
import ze_trajectory_analysis.analyse as traj_analysis
import ze_py.transformations as tf
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import vikit_py.transformations as tf
from matplotlib import rc
from matplotlib.ticker import FuncFormatter
rc('font',**{'family':'serif','serif':['Cardo']})
rc('text', usetex=True)
_EPS = np.finfo(float).eps * 4.0
FORMAT = '.pdf'
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
logger.info('Trajectory alignment example.')
data_dir = '/home/cfo/vin_ws/src/svo_gtsam/trace/data/20160407_1459_gtsam_vicon_asl_140313_vicon_aslam_2'
ta = traj_analysis.TrajectoryAnalysis(data_dir)
ta.load_data(data_dir, data_format='svo_gtsam')
ta.apply_hand_eye_calibration_to_groundtruth()
ta.align_trajectory('first_frame')
ta.plot_aligned_trajectory()
t_es = ta.t_es
p_es = ta.p_es_aligned
q_es = ta.q_es_aligned
p_gt = ta.p_gt
q_gt = ta.q_gt
distances = ta.distances
cov_data = np.genfromtxt(os.path.join(data_dir, 'estimate_covariance.csv'), delimiter=',')
t_cov = cov_data[:,0]
n = len(cov_data)
yaw_sigma_3 = np.zeros(n)
yaw_error = np.zeros(n)
roll_sigma_3 = np.zeros(n)
roll_error = np.zeros(n)
pitch_sigma_3 = np.zeros(n)
pitch_error = np.zeros(n)
error_pos_W = np.zeros((3,n))
error_pos_W_sigma_3 = np.zeros((3,n))
nees_rot = np.zeros(n)
nees_pos = np.zeros(n)
nees_se3 = np.zeros(n)
for i in range(1000):
Cov_T_B = np.reshape(cov_data[i,1:],(6,6))
Cov_R_B = Cov_T_B[:3,:3]
Cov_t_B = Cov_T_B[3:6,3:6]
p_W_Bes = p_es[i,:]
p_W_Bgt = p_gt[i,:]
R_W_Bes = tf.quaternion_matrix(q_es[i,:])[:3,:3]
R_W_Bgt = tf.quaternion_matrix(q_gt[i,:])[:3,:3]
Cov_R_W = np.dot(R_W_Bes, np.dot(Cov_R_B, np.transpose(R_W_Bes)))
Cov_T_W = np.dot(R_W_Bes, np.dot(Cov_t_B, np.transpose(R_W_Bes)))
yaw_sigma_3[i] = np.sqrt(Cov_R_W[2,2])*3.0*180/np.pi
pitch_sigma_3[i] = np.sqrt(Cov_R_W[1,1])*3.0*180/np.pi
roll_sigma_3[i] = np.sqrt(Cov_R_W[0,0])*3.0*180/np.pi
R_Bgt_Bes = np.dot(R_W_Bgt, np.transpose(R_W_Bes))
yaw_error[i], pitch_error[i], roll_error[i] = tf.euler_from_matrix(R_Bgt_Bes, 'rzyx')
error_rot_B = tf.logmap_so3(np.transpose(R_Bgt_Bes))
error_pos_B = np.dot(np.transpose(R_W_Bes), (p_W_Bgt - p_W_Bes))
error_se3_B = np.concatenate((error_rot_B, error_pos_B))
nees_rot[i] = np.dot(error_rot_B, np.dot(np.linalg.inv(Cov_R_B), error_rot_B))
nees_pos[i] = np.dot(error_pos_B, np.dot(np.linalg.inv(Cov_t_B), error_pos_B))
nees_se3[i] = np.dot(error_se3_B, np.dot(np.linalg.inv(Cov_T_B), error_se3_B))
error_pos_W[:,i] = p_W_Bgt - p_W_Bes
error_pos_W_sigma_3[0,i] = np.sqrt(Cov_T_W[0,0])*3.0
error_pos_W_sigma_3[1,i] = np.sqrt(Cov_T_W[1,1])*3.0
error_pos_W_sigma_3[2,i] = np.sqrt(Cov_T_W[2,2])*3.0
yaw_error *= 180/np.pi
pitch_error *= 180/np.pi
roll_error *= 180/np.pi
n_max = 1000
D = distances[:n_max]
y_lim = 5
fig = plt.figure(figsize=(6,8))
gs1 = gridspec.GridSpec(3, 1)
gs1.update(wspace=0.005)
ax = fig.add_subplot(611, ylabel='Err. Yaw [deg]')
ax.locator_params(axis = 'y', nbins = 4)
ax.yaxis.set_major_formatter(FuncFormatter(lambda y, pos: '%.2f'%y))
ax.plot(D, yaw_sigma_3[:n_max], 'r-', alpha=0.5, lw=0.7)
ax.plot(D, -yaw_sigma_3[:n_max], 'r-', alpha=0.5, lw=0.7)
ax.plot(D, yaw_error[:n_max], 'r-', lw=1)
ax.set_xticks([])
ax.set_ylim([-y_lim,y_lim])
y_lim = 4
ax = fig.add_subplot(612, ylabel='Err. Pitch [deg]')
ax.locator_params(axis = 'y', nbins = 4)
ax.yaxis.set_major_formatter(FuncFormatter(lambda y, pos: '%.2f'%y))
ax.plot(D, pitch_sigma_3[:n_max], 'g-', alpha=0.5, lw=0.7)
ax.plot(D, -pitch_sigma_3[:n_max], 'g-', alpha=0.5, lw=0.7)
ax.plot(D, pitch_error[:n_max], 'g-', lw=1)
ax.set_xticks([])
ax.set_ylim([-y_lim,y_lim])
ax = fig.add_subplot(613, ylabel='Err. Roll [deg]')
ax.locator_params(axis = 'y', nbins = 4)
ax.yaxis.set_major_formatter(FuncFormatter(lambda y, pos: '%.2f'%y))
ax.plot(D, roll_sigma_3[:n_max], 'b-', alpha=0.5, lw=0.7)
ax.plot(D, -roll_sigma_3[:n_max], 'b-', alpha=0.5, lw=0.7)
ax.plot(D, roll_error[:n_max], 'b-', lw=1)
ax.set_ylim([-y_lim,y_lim])
ax.set_xticks([])
y_lim = 0.9
ax = fig.add_subplot(614, ylabel='Err. x [m]')
ax.locator_params(axis = 'y', nbins = 4)
ax.yaxis.set_major_formatter(FuncFormatter(lambda y, pos: '%.2f'%y))
ax.plot(D, error_pos_W_sigma_3[0,:n_max], 'r-', alpha=0.5, lw=0.7)
ax.plot(D, -error_pos_W_sigma_3[0,:n_max], 'r-', alpha=0.5, lw=0.7)
ax.plot(D, error_pos_W[0,:n_max], 'r-', lw=1)
ax.set_xticks([])
ax.set_ylim([-y_lim,y_lim])
ax = fig.add_subplot(615, ylabel='Err. y [m]')
ax.locator_params(axis = 'y', nbins = 4)
ax.yaxis.set_major_formatter(FuncFormatter(lambda y, pos: '%.2f'%y))
ax.plot(D, error_pos_W_sigma_3[1,:n_max], 'g-', alpha=0.5, lw=0.7)
ax.plot(D, -error_pos_W_sigma_3[1,:n_max], 'g-', alpha=0.5, lw=0.7)
ax.plot(D, error_pos_W[1,:n_max], 'g-', lw=1)
ax.set_ylim([-y_lim,y_lim])
ax.set_xticks([])
ax = fig.add_subplot(616, xlabel='Distance Travelled [m]', ylabel='Err. z [m]')
ax.locator_params(axis = 'y', nbins = 4)
ax.yaxis.set_major_formatter(FuncFormatter(lambda y, pos: '%.2f'%y))
ax.plot(D, error_pos_W_sigma_3[2,:n_max], 'b-', alpha=0.5, lw=0.7)
ax.plot(D, -error_pos_W_sigma_3[2,:n_max], 'b-', alpha=0.5, lw=0.7)
ax.plot(D, error_pos_W[2,:n_max], 'b-', lw=1)
ax.set_ylim([-y_lim,y_lim])
ax.tick_params('x',top='off')
fig.tight_layout()
fig.savefig(os.path.join(data_dir,'consistency_single_run'+FORMAT), bbox_inches="tight")
file_out = open(os.path.join(data_dir, 'consistency_errors.csv'), 'w')
file_out.write('# trans_error -x, -y, -z, rot_error -yaw, -pitch, -roll\n')
for i in range(len(yaw_error)):
file_out.write(
'%.8f, %.8f, %.8f, %.8f, %.8f, %.8f\n' %
(error_pos_W[0,i], error_pos_W[1,i], error_pos_W[2,i], yaw_error[i], pitch_error[i], roll_error[i]))
file_out.close()
fig = plt.figure(figsize=(6,3))
ax = fig.add_subplot(211, ylabel='Rot. NEES')
ax.plot(nees_rot)
ax = fig.add_subplot(212, ylabel='Pos. NEES', xlabel='Keyframes')
ax.plot(nees_pos)
fig.savefig(os.path.join(data_dir,'consistency_nees_posrot'+FORMAT), bbox_inches="tight")
fig = plt.figure(figsize=(6,1.5))
ax = fig.add_subplot(111, ylabel='Pose NEES', xlabel='Keyframes')
ax.plot(nees_se3)
fig.savefig(os.path.join(data_dir,'consistency_pose'+FORMAT), bbox_inches="tight")
file_out = open(os.path.join(data_dir, 'consistency_nees.csv'), 'w')
file_out.write('# NEES orientation, NEES position \n')
for i in range(len(nees_rot)):
file_out.write('%.8f, %.8f, %.8f\n' % (nees_rot[i], nees_pos[i], nees_se3[i]))
file_out.close() | true | true |
1c3bde8ca6b6b305dd3385b8719759404968c735 | 6,169 | py | Python | basicsr/utils/img_util.py | IanYeung/ReCp | 1a7ace0e1ca3c262e24a222f3f0ab0d5674e9410 | [
"Apache-2.0",
"MIT"
] | null | null | null | basicsr/utils/img_util.py | IanYeung/ReCp | 1a7ace0e1ca3c262e24a222f3f0ab0d5674e9410 | [
"Apache-2.0",
"MIT"
] | null | null | null | basicsr/utils/img_util.py | IanYeung/ReCp | 1a7ace0e1ca3c262e24a222f3f0ab0d5674e9410 | [
"Apache-2.0",
"MIT"
] | null | null | null | import cv2
import math
import numpy as np
import os
import torch
from torchvision.utils import make_grid
def normalize(imgs):
def _norm(img):
return img.astype(np.float32) / 255.
if isinstance(imgs, list):
return [_norm(img) for img in imgs]
else:
return _norm(imgs)
def img2tensor(imgs, bgr2rgb=True, float32=True):
"""Numpy array to tensor.
Args:
imgs (list[ndarray] | ndarray): Input images.
bgr2rgb (bool): Whether to change bgr to rgb.
float32 (bool): Whether to change to float32.
Returns:
list[tensor] | tensor: Tensor images. If returned results only have
one element, just return tensor.
"""
def _totensor(img, bgr2rgb, float32):
if img.shape[2] == 3 and bgr2rgb:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = torch.from_numpy(img.transpose(2, 0, 1))
if float32:
img = img.float()
return img
if isinstance(imgs, list):
return [_totensor(img, bgr2rgb, float32) for img in imgs]
else:
return _totensor(imgs, bgr2rgb, float32)
def tensor2img(tensor, rgb2bgr=True, out_type=np.uint8, min_max=(0, 1)):
"""Convert torch Tensors into image numpy arrays.
After clamping to [min, max], values will be normalized to [0, 1].
Args:
tensor (Tensor or list[Tensor]): Accept shapes:
1) 4D mini-batch Tensor of shape (B x 3/1 x H x W);
2) 3D Tensor of shape (3/1 x H x W);
3) 2D Tensor of shape (H x W).
Tensor channel should be in RGB order.
rgb2bgr (bool): Whether to change rgb to bgr.
out_type (numpy type): output types. If ``np.uint8``, transform outputs
to uint8 type with range [0, 255]; otherwise, float type with
range [0, 1]. Default: ``np.uint8``.
min_max (tuple[int]): min and max values for clamp.
Returns:
(Tensor or list): 3D ndarray of shape (H x W x C) OR 2D ndarray of
shape (H x W). The channel order is BGR.
"""
if not (torch.is_tensor(tensor) or
(isinstance(tensor, list)
and all(torch.is_tensor(t) for t in tensor))):
raise TypeError(
f'tensor or list of tensors expected, got {type(tensor)}')
if torch.is_tensor(tensor):
tensor = [tensor]
result = []
for _tensor in tensor:
_tensor = _tensor.squeeze(0).float().detach().cpu().clamp_(*min_max)
_tensor = (_tensor - min_max[0]) / (min_max[1] - min_max[0])
n_dim = _tensor.dim()
if n_dim == 4:
img_np = make_grid(
_tensor, nrow=int(math.sqrt(_tensor.size(0))),
normalize=False).numpy()
img_np = img_np.transpose(1, 2, 0)
if rgb2bgr:
img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
elif n_dim == 3:
img_np = _tensor.numpy()
img_np = img_np.transpose(1, 2, 0)
if img_np.shape[2] == 1: # gray image
img_np = np.squeeze(img_np, axis=2)
else:
if rgb2bgr:
img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
elif n_dim == 2:
img_np = _tensor.numpy()
else:
raise TypeError('Only support 4D, 3D or 2D tensor. '
f'But received with dimension: {n_dim}')
if out_type == np.uint8:
# Unlike MATLAB, numpy.unit8() WILL NOT round by default.
img_np = (img_np * 255.0).round()
img_np = img_np.astype(out_type)
result.append(img_np)
if len(result) == 1:
result = result[0]
return result
def imfrombytes(content, flag='color', float32=False):
"""Read an image from bytes.
Args:
content (bytes): Image bytes got from files or other streams.
flag (str): Flags specifying the color type of a loaded image,
candidates are `color`, `grayscale` and `unchanged`.
float32 (bool): Whether to change to float32., If True, will also norm
to [0, 1]. Default: False.
Returns:
ndarray: Loaded image array.
"""
img_np = np.frombuffer(content, np.uint8)
imread_flags = {
'color': cv2.IMREAD_COLOR,
'grayscale': cv2.IMREAD_GRAYSCALE,
'unchanged': cv2.IMREAD_UNCHANGED
}
img = cv2.imdecode(img_np, imread_flags[flag])
if float32:
img = img.astype(np.float32) / 255.
return img
def imread(path, flag='color', float32=False):
"""
read image by cv2 or from lmdb
return: Numpy float32, HWC, BGR, [0,1]
"""
imread_flags = {
'color': cv2.IMREAD_COLOR,
'grayscale': cv2.IMREAD_GRAYSCALE,
'unchanged': cv2.IMREAD_UNCHANGED
}
img = cv2.imread(path, imread_flags[flag])
if float32:
img = img.astype(np.float32) / 255.
return img
def imwrite(img, file_path, params=None, auto_mkdir=True):
"""Write image to file.
Args:
img (ndarray): Image array to be written.
file_path (str): Image file path.
params (None or list): Same as opencv's :func:`imwrite` interface.
auto_mkdir (bool): If the parent folder of `file_path` does not exist,
whether to create it automatically.
Returns:
bool: Successful or not.
"""
if auto_mkdir:
dir_name = os.path.abspath(os.path.dirname(file_path))
os.makedirs(dir_name, exist_ok=True)
return cv2.imwrite(file_path, img, params)
def crop_border(imgs, crop_border):
"""Crop borders of images.
Args:
imgs (list[ndarray] | ndarray): Images with shape (h, w, c).
crop_border (int): Crop border for each end of height and weight.
Returns:
list[ndarray]: Cropped images.
"""
if crop_border == 0:
return imgs
else:
if isinstance(imgs, list):
return [
v[crop_border:-crop_border, crop_border:-crop_border, ...]
for v in imgs
]
else:
return imgs[crop_border:-crop_border, crop_border:-crop_border,
...]
| 31.963731 | 79 | 0.58486 | import cv2
import math
import numpy as np
import os
import torch
from torchvision.utils import make_grid
def normalize(imgs):
def _norm(img):
return img.astype(np.float32) / 255.
if isinstance(imgs, list):
return [_norm(img) for img in imgs]
else:
return _norm(imgs)
def img2tensor(imgs, bgr2rgb=True, float32=True):
def _totensor(img, bgr2rgb, float32):
if img.shape[2] == 3 and bgr2rgb:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = torch.from_numpy(img.transpose(2, 0, 1))
if float32:
img = img.float()
return img
if isinstance(imgs, list):
return [_totensor(img, bgr2rgb, float32) for img in imgs]
else:
return _totensor(imgs, bgr2rgb, float32)
def tensor2img(tensor, rgb2bgr=True, out_type=np.uint8, min_max=(0, 1)):
if not (torch.is_tensor(tensor) or
(isinstance(tensor, list)
and all(torch.is_tensor(t) for t in tensor))):
raise TypeError(
f'tensor or list of tensors expected, got {type(tensor)}')
if torch.is_tensor(tensor):
tensor = [tensor]
result = []
for _tensor in tensor:
_tensor = _tensor.squeeze(0).float().detach().cpu().clamp_(*min_max)
_tensor = (_tensor - min_max[0]) / (min_max[1] - min_max[0])
n_dim = _tensor.dim()
if n_dim == 4:
img_np = make_grid(
_tensor, nrow=int(math.sqrt(_tensor.size(0))),
normalize=False).numpy()
img_np = img_np.transpose(1, 2, 0)
if rgb2bgr:
img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
elif n_dim == 3:
img_np = _tensor.numpy()
img_np = img_np.transpose(1, 2, 0)
if img_np.shape[2] == 1:
img_np = np.squeeze(img_np, axis=2)
else:
if rgb2bgr:
img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
elif n_dim == 2:
img_np = _tensor.numpy()
else:
raise TypeError('Only support 4D, 3D or 2D tensor. '
f'But received with dimension: {n_dim}')
if out_type == np.uint8:
img_np = (img_np * 255.0).round()
img_np = img_np.astype(out_type)
result.append(img_np)
if len(result) == 1:
result = result[0]
return result
def imfrombytes(content, flag='color', float32=False):
img_np = np.frombuffer(content, np.uint8)
imread_flags = {
'color': cv2.IMREAD_COLOR,
'grayscale': cv2.IMREAD_GRAYSCALE,
'unchanged': cv2.IMREAD_UNCHANGED
}
img = cv2.imdecode(img_np, imread_flags[flag])
if float32:
img = img.astype(np.float32) / 255.
return img
def imread(path, flag='color', float32=False):
imread_flags = {
'color': cv2.IMREAD_COLOR,
'grayscale': cv2.IMREAD_GRAYSCALE,
'unchanged': cv2.IMREAD_UNCHANGED
}
img = cv2.imread(path, imread_flags[flag])
if float32:
img = img.astype(np.float32) / 255.
return img
def imwrite(img, file_path, params=None, auto_mkdir=True):
if auto_mkdir:
dir_name = os.path.abspath(os.path.dirname(file_path))
os.makedirs(dir_name, exist_ok=True)
return cv2.imwrite(file_path, img, params)
def crop_border(imgs, crop_border):
if crop_border == 0:
return imgs
else:
if isinstance(imgs, list):
return [
v[crop_border:-crop_border, crop_border:-crop_border, ...]
for v in imgs
]
else:
return imgs[crop_border:-crop_border, crop_border:-crop_border,
...]
| true | true |
1c3bde9c0433366821197ae865f4d4b41ed5c11f | 2,182 | py | Python | AlgorithmFactories/ClassificationAlgorithmFactories/LogisticRegressionAlgorithmFactory.py | CzakoZoltan08/COVID-19-patient-filtering-using-AutomaticAI | 87c4cb1d2848a0258b0b6d652316cb69de382ff0 | [
"MIT"
] | 1 | 2021-07-26T19:42:50.000Z | 2021-07-26T19:42:50.000Z | AlgorithmFactories/ClassificationAlgorithmFactories/LogisticRegressionAlgorithmFactory.py | CzakoZoltan08/COVID-19-patient-filtering-using-AutomaticAI | 87c4cb1d2848a0258b0b6d652316cb69de382ff0 | [
"MIT"
] | null | null | null | AlgorithmFactories/ClassificationAlgorithmFactories/LogisticRegressionAlgorithmFactory.py | CzakoZoltan08/COVID-19-patient-filtering-using-AutomaticAI | 87c4cb1d2848a0258b0b6d652316cb69de382ff0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 23 12:28:07 2019
@author: czzo
"""
from collections import OrderedDict
from sklearn.linear_model import LogisticRegression
from ..Algorithm import Algorithm
const_param_logistic_regression = {
'verbose': 0,
'dual': False,
'class_weight': 'balanced',
'penalty': 'l1'
}
dicrete_hyper_parameter_list_of_solvers = ["liblinear", "newton-cg", "lbfgs", "sag"]
continuous_hyper_parameter_mapping_index_key_mapping = ["C", "tol", "intercept_scaling"]
discrete_hyper_parameter_mapping = ["solver"]
discrete_parameter_dict_logistic_regression = OrderedDict()
discrete_parameter_dict_logistic_regression["solver"] = dicrete_hyper_parameter_list_of_solvers
parameter_constraint_dict = OrderedDict()
parameter_constraint_dict['sag'] = [('penalty','l2')]
parameter_constraint_dict['saga'] = [('penalty','l1')]
parameter_constraint_dict['newton-cg'] = [('penalty','l2')]
parameter_constraint_dict['lbfgs'] = [('penalty','l2')]
parameter_constraint_dict['liblinear'] = [('penalty','l1')]
# logistic regression
param_dict_logistic_regression = OrderedDict()
param_dict_logistic_regression['tol'] = 0.0
param_dict_logistic_regression['C'] = 0.0
param_dict_logistic_regression['solver'] = 'liblinear'
param_dict_logistic_regression['intercept_scaling'] = 0.0
param_dict_logistic_regression['max_iter'] = 1000
param_dict_logistic_regression['penalty'] = 'l1'
bounds=[(0.001,3),(0.001,2),(0.001,1),(0.0, 3.99)]
def get_algorithm():
return Algorithm(algorithm_type=LogisticRegression,
algorithm_name="LOGISTIC_REGRESSION",
hyper_parameter_dict=param_dict_logistic_regression,
discrete_hyper_parameter_dict=discrete_parameter_dict_logistic_regression,
discrete_hyper_parameter_mapping=discrete_hyper_parameter_mapping,
continuous_hyper_parameter_mapping=continuous_hyper_parameter_mapping_index_key_mapping,
parameter_constraint_dict=parameter_constraint_dict,
constant_hyper_parameter_dict=const_param_logistic_regression,
bounds=bounds) | 36.983051 | 111 | 0.733731 |
from collections import OrderedDict
from sklearn.linear_model import LogisticRegression
from ..Algorithm import Algorithm
const_param_logistic_regression = {
'verbose': 0,
'dual': False,
'class_weight': 'balanced',
'penalty': 'l1'
}
dicrete_hyper_parameter_list_of_solvers = ["liblinear", "newton-cg", "lbfgs", "sag"]
continuous_hyper_parameter_mapping_index_key_mapping = ["C", "tol", "intercept_scaling"]
discrete_hyper_parameter_mapping = ["solver"]
discrete_parameter_dict_logistic_regression = OrderedDict()
discrete_parameter_dict_logistic_regression["solver"] = dicrete_hyper_parameter_list_of_solvers
parameter_constraint_dict = OrderedDict()
parameter_constraint_dict['sag'] = [('penalty','l2')]
parameter_constraint_dict['saga'] = [('penalty','l1')]
parameter_constraint_dict['newton-cg'] = [('penalty','l2')]
parameter_constraint_dict['lbfgs'] = [('penalty','l2')]
parameter_constraint_dict['liblinear'] = [('penalty','l1')]
param_dict_logistic_regression = OrderedDict()
param_dict_logistic_regression['tol'] = 0.0
param_dict_logistic_regression['C'] = 0.0
param_dict_logistic_regression['solver'] = 'liblinear'
param_dict_logistic_regression['intercept_scaling'] = 0.0
param_dict_logistic_regression['max_iter'] = 1000
param_dict_logistic_regression['penalty'] = 'l1'
bounds=[(0.001,3),(0.001,2),(0.001,1),(0.0, 3.99)]
def get_algorithm():
return Algorithm(algorithm_type=LogisticRegression,
algorithm_name="LOGISTIC_REGRESSION",
hyper_parameter_dict=param_dict_logistic_regression,
discrete_hyper_parameter_dict=discrete_parameter_dict_logistic_regression,
discrete_hyper_parameter_mapping=discrete_hyper_parameter_mapping,
continuous_hyper_parameter_mapping=continuous_hyper_parameter_mapping_index_key_mapping,
parameter_constraint_dict=parameter_constraint_dict,
constant_hyper_parameter_dict=const_param_logistic_regression,
bounds=bounds) | true | true |
1c3be05818eb09a29d2c3fb648e19af0b8059c8d | 10,447 | py | Python | pip/resolve.py | lyw07/kolibripip | e7039eca92b61827faa754311f1489e89a11519d | [
"MIT"
] | null | null | null | pip/resolve.py | lyw07/kolibripip | e7039eca92b61827faa754311f1489e89a11519d | [
"MIT"
] | null | null | null | pip/resolve.py | lyw07/kolibripip | e7039eca92b61827faa754311f1489e89a11519d | [
"MIT"
] | null | null | null | """Dependency Resolution
The dependency resolution in pip is performed as follows:
for top-level requirements:
a. only one spec allowed per project, regardless of conflicts or not.
otherwise a "double requirement" exception is raised
b. they override sub-dependency requirements.
for sub-dependencies
a. "first found, wins" (where the order is breadth first)
"""
import logging
from itertools import chain
from pip.exceptions import (
BestVersionAlreadyInstalled,
DistributionNotFound, HashError, HashErrors, UnsupportedPythonVersion
)
from pip.req.req_install import InstallRequirement
from pip.utils import dist_in_usersite, ensure_dir
from pip.utils.logging import indent_log
from pip.utils.packaging import check_dist_requires_python
logger = logging.getLogger(__name__)
class Resolver(object):
"""Resolves which packages need to be installed/uninstalled to perform \
the requested operation without breaking the requirements of any package.
"""
_allowed_strategies = {"eager", "only-if-needed", "to-satisfy-only"}
def __init__(self, preparer, session, finder, wheel_cache, use_user_site,
ignore_dependencies, ignore_installed, ignore_requires_python,
force_reinstall, isolated, upgrade_strategy):
super(Resolver, self).__init__()
assert upgrade_strategy in self._allowed_strategies
self.preparer = preparer
self.finder = finder
self.session = session
# NOTE: This would eventually be replaced with a cache that can give
# information about both sdist and wheels transparently.
self.wheel_cache = wheel_cache
self.require_hashes = None # This is set in resolve
self.upgrade_strategy = upgrade_strategy
self.force_reinstall = force_reinstall
self.isolated = isolated
self.ignore_dependencies = ignore_dependencies
self.ignore_installed = ignore_installed
self.ignore_requires_python = ignore_requires_python
self.use_user_site = use_user_site
def resolve(self, requirement_set):
"""Resolve what operations need to be done
As a side-effect of this method, the packages (and their dependencies)
are downloaded, unpacked and prepared for installation. This
preparation is done by ``pip.operations.prepare``.
Once PyPI has static dependency metadata available, it would be
possible to move the preparation to become a step separated from
dependency resolution.
"""
# make the wheelhouse
if self.preparer.wheel_download_dir:
ensure_dir(self.preparer.wheel_download_dir)
# If any top-level requirement has a hash specified, enter
# hash-checking mode, which requires hashes from all.
root_reqs = (
requirement_set.unnamed_requirements +
requirement_set.requirements.values()
)
self.require_hashes = (
requirement_set.require_hashes or
any(req.has_hash_options for req in root_reqs)
)
# Display where finder is looking for packages
locations = self.finder.get_formatted_locations()
if locations:
logger.info(locations)
# Actually prepare the files, and collect any exceptions. Most hash
# exceptions cannot be checked ahead of time, because
# req.populate_link() needs to be called before we can make decisions
# based on link type.
discovered_reqs = []
hash_errors = HashErrors()
for req in chain(root_reqs, discovered_reqs):
try:
discovered_reqs.extend(
self._resolve_one(requirement_set, req)
)
except HashError as exc:
exc.req = req
hash_errors.append(exc)
if hash_errors:
raise hash_errors
def _is_upgrade_allowed(self, req):
if self.upgrade_strategy == "to-satisfy-only":
return False
elif self.upgrade_strategy == "eager":
return True
else:
assert self.upgrade_strategy == "only-if-needed"
return req.is_direct
# XXX: Stop passing requirement_set for options
def _check_skip_installed(self, req_to_install):
"""Check if req_to_install should be skipped.
This will check if the req is installed, and whether we should upgrade
or reinstall it, taking into account all the relevant user options.
After calling this req_to_install will only have satisfied_by set to
None if the req_to_install is to be upgraded/reinstalled etc. Any
other value will be a dist recording the current thing installed that
satisfies the requirement.
Note that for vcs urls and the like we can't assess skipping in this
routine - we simply identify that we need to pull the thing down,
then later on it is pulled down and introspected to assess upgrade/
reinstalls etc.
:return: A text reason for why it was skipped, or None.
"""
# Check whether to upgrade/reinstall this req or not.
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
upgrade_allowed = self._is_upgrade_allowed(req_to_install)
# Is the best version is installed.
best_installed = False
if upgrade_allowed:
# For link based requirements we have to pull the
# tree down and inspect to assess the version #, so
# its handled way down.
should_check_possibility_for_upgrade = not (
self.force_reinstall or req_to_install.link
)
if should_check_possibility_for_upgrade:
try:
self.finder.find_requirement(
req_to_install, upgrade_allowed)
except BestVersionAlreadyInstalled:
best_installed = True
except DistributionNotFound:
# No distribution found, so we squash the
# error - it will be raised later when we
# re-try later to do the install.
# Why don't we just raise here?
pass
if not best_installed:
# don't uninstall conflict if user install and
# conflict is not user install
if not (self.use_user_site and not
dist_in_usersite(req_to_install.satisfied_by)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
# Figure out a nice message to say why we're skipping this.
if best_installed:
skip_reason = 'already up-to-date'
elif self.upgrade_strategy == "only-if-needed":
skip_reason = 'not upgraded as not directly required'
else:
skip_reason = 'already satisfied'
return skip_reason
else:
return None
def _resolve_one(self, requirement_set, req_to_install):
"""Prepare a single requirements file.
:return: A list of additional InstallRequirements to also install.
"""
# Tell user what we are doing for this requirement:
# obtain (editable), skipping, processing (local url), collecting
# (remote url or package name)
if req_to_install.constraint or req_to_install.prepared:
return []
req_to_install.prepared = True
abstract_dist = self.preparer.prepare_requirement(req_to_install, self)
# register tmp src for cleanup in case something goes wrong
requirement_set.reqs_to_cleanup.append(req_to_install)
# Parse and return dependencies
dist = abstract_dist.dist(self.finder)
try:
check_dist_requires_python(dist)
except UnsupportedPythonVersion as err:
if self.ignore_requires_python:
logger.warning(err.args[0])
else:
raise
more_reqs = []
def add_req(subreq, extras_requested):
sub_install_req = InstallRequirement.from_req(
str(subreq),
req_to_install,
isolated=self.isolated,
wheel_cache=self.wheel_cache,
)
more_reqs.extend(
requirement_set.add_requirement(
sub_install_req, req_to_install.name,
extras_requested=extras_requested
)
)
with indent_log():
# We add req_to_install before its dependencies, so that we
# can refer to it when adding dependencies.
if not requirement_set.has_requirement(req_to_install.name):
# 'unnamed' requirements will get added here
requirement_set.add_requirement(req_to_install, None)
if not self.ignore_dependencies:
if req_to_install.extras:
logger.debug(
"Installing extra requirements: %r",
','.join(req_to_install.extras),
)
missing_requested = sorted(
set(req_to_install.extras) - set(dist.extras)
)
for missing in missing_requested:
logger.warning(
'%s does not provide the extra \'%s\'',
dist, missing
)
available_requested = sorted(
set(dist.extras) & set(req_to_install.extras)
)
for subreq in dist.requires(available_requested):
add_req(subreq, extras_requested=available_requested)
if not req_to_install.editable and not req_to_install.satisfied_by:
# XXX: --no-install leads this to report 'Successfully
# downloaded' for only non-editable reqs, even though we took
# action on them.
requirement_set.successfully_downloaded.append(req_to_install)
return more_reqs
| 39.722433 | 79 | 0.615679 |
import logging
from itertools import chain
from pip.exceptions import (
BestVersionAlreadyInstalled,
DistributionNotFound, HashError, HashErrors, UnsupportedPythonVersion
)
from pip.req.req_install import InstallRequirement
from pip.utils import dist_in_usersite, ensure_dir
from pip.utils.logging import indent_log
from pip.utils.packaging import check_dist_requires_python
logger = logging.getLogger(__name__)
class Resolver(object):
_allowed_strategies = {"eager", "only-if-needed", "to-satisfy-only"}
def __init__(self, preparer, session, finder, wheel_cache, use_user_site,
ignore_dependencies, ignore_installed, ignore_requires_python,
force_reinstall, isolated, upgrade_strategy):
super(Resolver, self).__init__()
assert upgrade_strategy in self._allowed_strategies
self.preparer = preparer
self.finder = finder
self.session = session
self.wheel_cache = wheel_cache
self.require_hashes = None
self.upgrade_strategy = upgrade_strategy
self.force_reinstall = force_reinstall
self.isolated = isolated
self.ignore_dependencies = ignore_dependencies
self.ignore_installed = ignore_installed
self.ignore_requires_python = ignore_requires_python
self.use_user_site = use_user_site
def resolve(self, requirement_set):
if self.preparer.wheel_download_dir:
ensure_dir(self.preparer.wheel_download_dir)
root_reqs = (
requirement_set.unnamed_requirements +
requirement_set.requirements.values()
)
self.require_hashes = (
requirement_set.require_hashes or
any(req.has_hash_options for req in root_reqs)
)
locations = self.finder.get_formatted_locations()
if locations:
logger.info(locations)
discovered_reqs = []
hash_errors = HashErrors()
for req in chain(root_reqs, discovered_reqs):
try:
discovered_reqs.extend(
self._resolve_one(requirement_set, req)
)
except HashError as exc:
exc.req = req
hash_errors.append(exc)
if hash_errors:
raise hash_errors
def _is_upgrade_allowed(self, req):
if self.upgrade_strategy == "to-satisfy-only":
return False
elif self.upgrade_strategy == "eager":
return True
else:
assert self.upgrade_strategy == "only-if-needed"
return req.is_direct
def _check_skip_installed(self, req_to_install):
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
upgrade_allowed = self._is_upgrade_allowed(req_to_install)
best_installed = False
if upgrade_allowed:
should_check_possibility_for_upgrade = not (
self.force_reinstall or req_to_install.link
)
if should_check_possibility_for_upgrade:
try:
self.finder.find_requirement(
req_to_install, upgrade_allowed)
except BestVersionAlreadyInstalled:
best_installed = True
except DistributionNotFound:
pass
if not best_installed:
# don't uninstall conflict if user install and
if not (self.use_user_site and not
dist_in_usersite(req_to_install.satisfied_by)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
if best_installed:
skip_reason = 'already up-to-date'
elif self.upgrade_strategy == "only-if-needed":
skip_reason = 'not upgraded as not directly required'
else:
skip_reason = 'already satisfied'
return skip_reason
else:
return None
def _resolve_one(self, requirement_set, req_to_install):
# Tell user what we are doing for this requirement:
# obtain (editable), skipping, processing (local url), collecting
# (remote url or package name)
if req_to_install.constraint or req_to_install.prepared:
return []
req_to_install.prepared = True
abstract_dist = self.preparer.prepare_requirement(req_to_install, self)
# register tmp src for cleanup in case something goes wrong
requirement_set.reqs_to_cleanup.append(req_to_install)
# Parse and return dependencies
dist = abstract_dist.dist(self.finder)
try:
check_dist_requires_python(dist)
except UnsupportedPythonVersion as err:
if self.ignore_requires_python:
logger.warning(err.args[0])
else:
raise
more_reqs = []
def add_req(subreq, extras_requested):
sub_install_req = InstallRequirement.from_req(
str(subreq),
req_to_install,
isolated=self.isolated,
wheel_cache=self.wheel_cache,
)
more_reqs.extend(
requirement_set.add_requirement(
sub_install_req, req_to_install.name,
extras_requested=extras_requested
)
)
with indent_log():
# We add req_to_install before its dependencies, so that we
# can refer to it when adding dependencies.
if not requirement_set.has_requirement(req_to_install.name):
# 'unnamed' requirements will get added here
requirement_set.add_requirement(req_to_install, None)
if not self.ignore_dependencies:
if req_to_install.extras:
logger.debug(
"Installing extra requirements: %r",
','.join(req_to_install.extras),
)
missing_requested = sorted(
set(req_to_install.extras) - set(dist.extras)
)
for missing in missing_requested:
logger.warning(
'%s does not provide the extra \'%s\'',
dist, missing
)
available_requested = sorted(
set(dist.extras) & set(req_to_install.extras)
)
for subreq in dist.requires(available_requested):
add_req(subreq, extras_requested=available_requested)
if not req_to_install.editable and not req_to_install.satisfied_by:
# XXX: --no-install leads this to report 'Successfully
# action on them.
requirement_set.successfully_downloaded.append(req_to_install)
return more_reqs
| true | true |
1c3be1d05a6dd4619a12e37be7a86cf50a72ae07 | 2,307 | py | Python | backend/content_faith_recor_33375/urls.py | crowdbotics-apps/content-faith-recor-33375 | 3f6ff2b7833f282d1136d477c6a533591dd5fb63 | [
"FTL",
"AML",
"RSA-MD"
] | 1 | 2022-02-05T03:02:39.000Z | 2022-02-05T03:02:39.000Z | backend/content_faith_recor_33375/urls.py | crowdbotics-apps/content-faith-recor-33375 | 3f6ff2b7833f282d1136d477c6a533591dd5fb63 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/content_faith_recor_33375/urls.py | crowdbotics-apps/content-faith-recor-33375 | 3f6ff2b7833f282d1136d477c6a533591dd5fb63 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | """content_faith_recor_33375 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Content Faith Records LLC"
admin.site.site_title = "Content Faith Records LLC Admin Portal"
admin.site.index_title = "Content Faith Records LLC Admin"
# swagger
api_info = openapi.Info(
title="Content Faith Records LLC API",
default_version="v1",
description="API documentation for Content Faith Records LLC App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
| 36.619048 | 87 | 0.717815 |
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Content Faith Records LLC"
admin.site.site_title = "Content Faith Records LLC Admin Portal"
admin.site.index_title = "Content Faith Records LLC Admin"
api_info = openapi.Info(
title="Content Faith Records LLC API",
default_version="v1",
description="API documentation for Content Faith Records LLC App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
| true | true |
1c3be326bf00b9905fe2485f4e6ee3a8b341b075 | 5,403 | py | Python | xray/backends/common.py | eriknw/xray | 19df8d202b1d8054019e7e42365c67cdde6ff448 | [
"Apache-2.0"
] | null | null | null | xray/backends/common.py | eriknw/xray | 19df8d202b1d8054019e7e42365c67cdde6ff448 | [
"Apache-2.0"
] | null | null | null | xray/backends/common.py | eriknw/xray | 19df8d202b1d8054019e7e42365c67cdde6ff448 | [
"Apache-2.0"
] | 1 | 2020-02-05T00:19:02.000Z | 2020-02-05T00:19:02.000Z | import numpy as np
import itertools
from collections import Mapping
from ..core.utils import FrozenOrderedDict
from ..core.pycompat import iteritems
from ..core.variable import Coordinate
NONE_VAR_NAME = '__values__'
def _encode_variable_name(name):
if name is None:
name = NONE_VAR_NAME
return name
def _decode_variable_name(name):
if name == NONE_VAR_NAME:
name = None
return name
def is_trivial_index(var):
"""
Determines if in index is 'trivial' meaning that it is
equivalent to np.arange(). This is determined by
checking if there are any attributes or encodings,
if ndims is one, dtype is int and finally by comparing
the actual values to np.arange()
"""
# if either attributes or encodings are defined
# the index is not trival.
if len(var.attrs) or len(var.encoding):
return False
# if the index is not a 1d integer array
if var.ndim > 1 or not var.dtype.kind == 'i':
return False
if isinstance(var, Coordinate):
arange = np.arange(var.size, dtype=var.dtype)
if np.any(var.values != arange):
return False
return True
class AbstractDataStore(Mapping):
def __iter__(self):
return iter(self.variables)
def __getitem__(self, key):
return self.variables[key]
def __len__(self):
return len(self.variables)
def get_attrs(self):
raise NotImplementedError
def get_variables(self):
raise NotImplementedError
def load(self):
"""
This loads the variables and attributes simultaneously.
A centralized loading function makes it easier to create
data stores that do automatic encoding/decoding.
For example:
class SuffixAppendingDataStore(AbstractDataStore):
def load(self):
variables, attributes = AbstractDataStore.load(self)
variables = {'%s_suffix' % k: v
for k, v in iteritems(variables)}
attributes = {'%s_suffix' % k: v
for k, v in iteritems(attributes)}
return variables, attributes
This function will be called anytime variables or attributes
are requested, so care should be taken to make sure its fast.
"""
variables = FrozenOrderedDict((_decode_variable_name(k), v)
for k, v in iteritems(self.get_variables()))
attributes = FrozenOrderedDict(self.get_attrs())
return variables, attributes
def get_dimensions(self):
return list(itertools.chain(*[x.dims
for x in self.variables.values()]))
@property
def variables(self):
# Because encoding/decoding might happen which may require both the
# attributes and the variables, and because a store may be updated
# we need to load both the attributes and variables
# anytime either one is requested.
variables, _ = self.load()
return variables
@property
def attrs(self):
# Because encoding/decoding might happen which may require both the
# attributes and the variables, and because a store may be updated
# we need to load both the attributes and variables
# anytime either one is requested.
_, attributes = self.load()
return attributes
@property
def dimensions(self):
return self.get_dimensions()
def close(self):
pass
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, tracebook):
self.close()
class AbstractWritableDataStore(AbstractDataStore):
def set_dimension(self, d, l):
raise NotImplementedError
def set_attribute(self, k, v):
raise NotImplementedError
def set_variable(self, k, v):
raise NotImplementedError
def sync(self):
pass
def store_dataset(self, dataset):
# in stores variables are all variables AND coordinates
# in xray.Dataset variables are variables NOT coordinates,
# so here we pass the whole dataset in instead of doing
# dataset.variables
self.store(dataset, dataset.attrs)
def store(self, variables, attributes):
self.set_attributes(attributes)
neccesary_dims = [v.dims for v in variables.values()]
neccesary_dims = set(itertools.chain(*neccesary_dims))
# set all non-indexes and any index which is not trivial.
variables = dict((k, v) for k, v in iteritems(variables)
if not (k in neccesary_dims and is_trivial_index(v)))
self.set_variables(variables)
def set_dimensions(self, dimensions):
for d, l in iteritems(dimensions):
self.set_dimension(d, l)
def set_attributes(self, attributes):
for k, v in iteritems(attributes):
self.set_attribute(k, v)
def set_variables(self, variables):
for vn, v in iteritems(variables):
self.set_variable(_encode_variable_name(vn), v)
self.set_necessary_dimensions(v)
def set_necessary_dimensions(self, variable):
for d, l in zip(variable.dims, variable.shape):
if d not in self.dimensions:
self.set_dimension(d, l)
| 31.051724 | 82 | 0.635388 | import numpy as np
import itertools
from collections import Mapping
from ..core.utils import FrozenOrderedDict
from ..core.pycompat import iteritems
from ..core.variable import Coordinate
NONE_VAR_NAME = '__values__'
def _encode_variable_name(name):
if name is None:
name = NONE_VAR_NAME
return name
def _decode_variable_name(name):
if name == NONE_VAR_NAME:
name = None
return name
def is_trivial_index(var):
if len(var.attrs) or len(var.encoding):
return False
if var.ndim > 1 or not var.dtype.kind == 'i':
return False
if isinstance(var, Coordinate):
arange = np.arange(var.size, dtype=var.dtype)
if np.any(var.values != arange):
return False
return True
class AbstractDataStore(Mapping):
def __iter__(self):
return iter(self.variables)
def __getitem__(self, key):
return self.variables[key]
def __len__(self):
return len(self.variables)
def get_attrs(self):
raise NotImplementedError
def get_variables(self):
raise NotImplementedError
def load(self):
variables = FrozenOrderedDict((_decode_variable_name(k), v)
for k, v in iteritems(self.get_variables()))
attributes = FrozenOrderedDict(self.get_attrs())
return variables, attributes
def get_dimensions(self):
return list(itertools.chain(*[x.dims
for x in self.variables.values()]))
@property
def variables(self):
variables, _ = self.load()
return variables
@property
def attrs(self):
_, attributes = self.load()
return attributes
@property
def dimensions(self):
return self.get_dimensions()
def close(self):
pass
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, tracebook):
self.close()
class AbstractWritableDataStore(AbstractDataStore):
def set_dimension(self, d, l):
raise NotImplementedError
def set_attribute(self, k, v):
raise NotImplementedError
def set_variable(self, k, v):
raise NotImplementedError
def sync(self):
pass
def store_dataset(self, dataset):
self.store(dataset, dataset.attrs)
def store(self, variables, attributes):
self.set_attributes(attributes)
neccesary_dims = [v.dims for v in variables.values()]
neccesary_dims = set(itertools.chain(*neccesary_dims))
variables = dict((k, v) for k, v in iteritems(variables)
if not (k in neccesary_dims and is_trivial_index(v)))
self.set_variables(variables)
def set_dimensions(self, dimensions):
for d, l in iteritems(dimensions):
self.set_dimension(d, l)
def set_attributes(self, attributes):
for k, v in iteritems(attributes):
self.set_attribute(k, v)
def set_variables(self, variables):
for vn, v in iteritems(variables):
self.set_variable(_encode_variable_name(vn), v)
self.set_necessary_dimensions(v)
def set_necessary_dimensions(self, variable):
for d, l in zip(variable.dims, variable.shape):
if d not in self.dimensions:
self.set_dimension(d, l)
| true | true |
1c3be36c98aa6bb261b2a02b8e62a5a9a521c7b8 | 9,295 | py | Python | main.py | nsobczak/tic-tac-toe | 6b44ed29bdba4d9ddaa680591c8cc6aa9e06f22e | [
"MIT"
] | null | null | null | main.py | nsobczak/tic-tac-toe | 6b44ed29bdba4d9ddaa680591c8cc6aa9e06f22e | [
"MIT"
] | null | null | null | main.py | nsobczak/tic-tac-toe | 6b44ed29bdba4d9ddaa680591c8cc6aa9e06f22e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
###############
# tic-tac-toe #
###############
Created on Tue Apr 14 17:08:22 2015
@author: Nicolas Sobczak
"""
# %%____________________________________________________________________________________________________
# Config
# Import
import random as rdm
import numpy as np
from copy import deepcopy
# Initialisation
grilleVide = np.array([[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
# Création dico possibilités de victoire
dico = {}
dico['L1'] = ([1, 1], [1, 2], [1, 3])
dico['L2'] = ([2, 1], [2, 2], [2, 3])
dico['L3'] = ([3, 1], [3, 2], [3, 3])
dico['C1'] = ([1, 1], [2, 1], [3, 1])
dico['C2'] = ([1, 2], [2, 2], [3, 2])
dico['C3'] = ([1, 3], [2, 3], [3, 3])
dico['D1'] = ([1, 1], [2, 2], [3, 3])
dico['D2'] = ([3, 1], [2, 2], [1, 3])
lCles = ['L1', 'L2', 'L3', 'C1', 'C2', 'C3', 'D1', 'D2']
# ______________________________________________________________________________
# %% Niveau 0+
# %% Fonction qui choisi la liste des cases composant le coup gagnant
def coup_gagnant(grille):
cle_choisie = 0
for cle in lCles:
row = dico[cle]
compteur = 0
for n in range(3):
case = row[n]
i = case[0]
j = case[1]
if grille[i - 1][j - 1] == -1:
compteur += 1
elif grille[i - 1][j - 1] == 1:
compteur -= 1
if compteur == 2 and cle_choisie == 0:
cle_choisie = cle
if cle_choisie == 0:
res = 'aleatoire'
else:
res = cle_choisie
return res
# %% Fonction qui choisi une case aléatoirement
def coup_ordi_aleatoire(grille):
i = rdm.choice([0, 1, 2])
j = rdm.choice([0, 1, 2])
# print("i j :", i+1, j+1)
if grille[i][j] != 0:
grille = coup_ordi_aleatoire(grille)
else:
grille[i][j] = -1
return grille
# %% Fonction qui effectue un coup gagnant
def coup_ordi_gagnant(grille, cle):
choix = dico[cle]
c = 0
case = choix[c]
i = case[0]
j = case[1]
while grille[i - 1][j - 1] != 0:
c += 1
case = choix[c]
i = case[0]
j = case[1]
grille[i - 1][j - 1] = -1
return grille
# ______________________________________________________________________________
# %% Niveau 1
def valNum_posOrdi(grille, i, j):
res = 0
grille[i][j] = -1
# colonne
compteur_O = 0
compteur_J = 0
for n in range(3):
if grille[n][j] == -1:
compteur_O += 1
elif grille[n][j] == 1:
compteur_J += 1
if compteur_O == 3:
res += 10000
elif compteur_O == 2 and compteur_J == 0:
res += 200
elif compteur_O == 1 and compteur_J == 0:
res += 30
if compteur_J == 2:
res -= 200
elif compteur_J == 1:
res -= 30
# ligne
compteur_O = 0
compteur_J = 0
for m in range(3):
if grille[i][m] == -1:
compteur_O += 1
elif grille[i][m] == 1:
compteur_J += 1
if compteur_O == 3:
res += 10000
elif compteur_O == 2 and compteur_J == 0:
res += 200
elif compteur_O == 1 and compteur_J == 0:
res += 30
if compteur_J == 2:
res -= 200
elif compteur_J == 1:
res -= 30
# diagonale 1
if [i + 1, j + 1] in dico['D1']:
compteur_O = 0
compteur_J = 0
for n in range(3):
if grille[n][n] == -1:
compteur_O += 1
elif grille[n][n] == 1:
compteur_J += 1
if compteur_O == 3:
res += 10000
elif compteur_O == 2 and compteur_J == 0:
res += 200
elif compteur_O == 1 and compteur_J == 0:
res += 30
if compteur_J == 2:
res -= 200
elif compteur_J == 1:
res -= 30
# diagonale 2
if [i + 1, j + 1] in dico['D2']:
compteur_O = 0
compteur_J = 0
for n in range(3):
if grille[2 - n][n] == -1:
compteur_O += 1
elif grille[2 - n][n] == 1:
compteur_J += 1
if compteur_O == 3:
res += 10000
elif compteur_O == 2 and compteur_J == 0:
res += 200
elif compteur_O == 1 and compteur_J == 0:
res += 30
if compteur_J == 2:
res -= 200
elif compteur_J == 1:
res -= 30
return res
# %% fonction qui choisit le meilleur coup possible
def coup_ordi_optimal(grille):
lisCoord = []
lisVal = []
# recherche des cases libres
for i in range(3):
for j in range(3):
if grille[i][j] == 0:
lisCoord += [[i, j]]
# recherche de la meilleure position possible
for coord in lisCoord:
i = coord[0]
j = coord[1]
grilleTest = deepcopy(grille)
grilleTest[i][j] = -1
valPos = valNum_posOrdi(grilleTest, i, j)
lisVal += [valPos]
valMax = max(lisVal)
indice = lisVal.index(valMax)
coord = lisCoord[indice]
i = coord[0]
j = coord[1]
grille[i][j] = -1
return grille
# %%____________________________________________________________________________
# """""""""""""""""""""" L'ordinateur met des -1 """"""""""""""""""""
### Niveau 0 ###
def tour_ordi_n0(grille):
cle = coup_gagnant(grille)
if cle == 'aleatoire':
grille = coup_ordi_aleatoire(grille)
else:
grille = coup_ordi_gagnant(grille, cle)
return grille
### Niveau 1 ###
def tour_ordi_n1(grille):
grille = coup_ordi_optimal(grille)
return grille
# %%""""""""""""""""""""" Le joueur met des 1"""""""""""""""""""""""""""
def tour_joueur(grille):
i = input("\nentrer la ligne (de 1 à 3)\n")
while i not in ['1', '2', '3']:
i = input("\nentrer la ligne (de 1 à 3)\n")
j = input("\nentrer la colonne (de 1 à 3)\n")
while j not in ['1', '2', '3']:
j = input("\nentrer la colonne (de 1 à 3)\n")
i = int(i) - 1
j = int(j) - 1
# print("i j :", i,j)
if grille[i][j] != 0:
grille = tour_joueur(grille)
else:
grille[i][j] = 1
return grille
# %%""""""""""""""""" Condition de fin de partie""""""""""""""""""""""""
def partie_finie(grille):
res = False
# vérification lignes
for i in range(3):
n = 0
for j in range(3):
if grille[i][j] == 1:
n += 1
elif grille[i][j] == -1:
n -= 1
if n == 3 or n == -3:
res = True
# vérification colonnes
if not res:
for j in range(3):
n = 0
for i in range(3):
if grille[i][j] == 1:
n += 1
elif grille[i][j] == -1:
n -= 1
if n == 3 or n == -3:
res = True
# vérification 1ere diagonale
if not res:
n = 0
for i in range(3):
if grille[i][i] == 1:
n += 1
elif grille[i][i] == -1:
n -= 1
if n == 3 or n == -3:
res = True
# vérification 2eme diagonale
if not res:
n = 0
for i in range(3):
if grille[2 - i][i] == 1:
n += 1
elif grille[2 - i][i] == -1:
n -= 1
if n == 3 or n == -3:
res = True
return res
# %% Grille pleine
def grille_pleine(grille):
res = True
for i in range(3):
for j in range(3):
if grille[i][j] == 0:
res = False
return res
# ______________________________________________________________________________
# %% Fonction principale
def nouvelle_partie():
# initialisation
grille = deepcopy(grilleVide)
print(grille)
niveau = input("\nchoix du niveau (de 0 à 1)\n")
niveau = int(niveau)
if niveau not in [0, 1]:
niveau = 0
print('niveau :', niveau)
# maj
while not partie_finie(grille):
grille = tour_joueur(grille)
if partie_finie(grille):
print('\ngagné\n')
else:
if not grille_pleine(grille):
if niveau == 0:
grille = tour_ordi_n0(grille)
elif niveau == 1:
grille = tour_ordi_n1(grille)
if partie_finie(grille):
print('\nperdu\n')
else:
print("\négalité\n")
break
print(grille)
return grille
# %%____________________________________________________________________________________________________
# ____________________________________________________________________________________________________
def monMain():
nouvelle_partie()
if __name__ == "__main__":
monMain()
| 25.053908 | 105 | 0.471544 |
import random as rdm
import numpy as np
from copy import deepcopy
grilleVide = np.array([[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
dico = {}
dico['L1'] = ([1, 1], [1, 2], [1, 3])
dico['L2'] = ([2, 1], [2, 2], [2, 3])
dico['L3'] = ([3, 1], [3, 2], [3, 3])
dico['C1'] = ([1, 1], [2, 1], [3, 1])
dico['C2'] = ([1, 2], [2, 2], [3, 2])
dico['C3'] = ([1, 3], [2, 3], [3, 3])
dico['D1'] = ([1, 1], [2, 2], [3, 3])
dico['D2'] = ([3, 1], [2, 2], [1, 3])
lCles = ['L1', 'L2', 'L3', 'C1', 'C2', 'C3', 'D1', 'D2']
def coup_gagnant(grille):
cle_choisie = 0
for cle in lCles:
row = dico[cle]
compteur = 0
for n in range(3):
case = row[n]
i = case[0]
j = case[1]
if grille[i - 1][j - 1] == -1:
compteur += 1
elif grille[i - 1][j - 1] == 1:
compteur -= 1
if compteur == 2 and cle_choisie == 0:
cle_choisie = cle
if cle_choisie == 0:
res = 'aleatoire'
else:
res = cle_choisie
return res
def coup_ordi_aleatoire(grille):
i = rdm.choice([0, 1, 2])
j = rdm.choice([0, 1, 2])
if grille[i][j] != 0:
grille = coup_ordi_aleatoire(grille)
else:
grille[i][j] = -1
return grille
def coup_ordi_gagnant(grille, cle):
choix = dico[cle]
c = 0
case = choix[c]
i = case[0]
j = case[1]
while grille[i - 1][j - 1] != 0:
c += 1
case = choix[c]
i = case[0]
j = case[1]
grille[i - 1][j - 1] = -1
return grille
def valNum_posOrdi(grille, i, j):
res = 0
grille[i][j] = -1
compteur_O = 0
compteur_J = 0
for n in range(3):
if grille[n][j] == -1:
compteur_O += 1
elif grille[n][j] == 1:
compteur_J += 1
if compteur_O == 3:
res += 10000
elif compteur_O == 2 and compteur_J == 0:
res += 200
elif compteur_O == 1 and compteur_J == 0:
res += 30
if compteur_J == 2:
res -= 200
elif compteur_J == 1:
res -= 30
compteur_O = 0
compteur_J = 0
for m in range(3):
if grille[i][m] == -1:
compteur_O += 1
elif grille[i][m] == 1:
compteur_J += 1
if compteur_O == 3:
res += 10000
elif compteur_O == 2 and compteur_J == 0:
res += 200
elif compteur_O == 1 and compteur_J == 0:
res += 30
if compteur_J == 2:
res -= 200
elif compteur_J == 1:
res -= 30
if [i + 1, j + 1] in dico['D1']:
compteur_O = 0
compteur_J = 0
for n in range(3):
if grille[n][n] == -1:
compteur_O += 1
elif grille[n][n] == 1:
compteur_J += 1
if compteur_O == 3:
res += 10000
elif compteur_O == 2 and compteur_J == 0:
res += 200
elif compteur_O == 1 and compteur_J == 0:
res += 30
if compteur_J == 2:
res -= 200
elif compteur_J == 1:
res -= 30
if [i + 1, j + 1] in dico['D2']:
compteur_O = 0
compteur_J = 0
for n in range(3):
if grille[2 - n][n] == -1:
compteur_O += 1
elif grille[2 - n][n] == 1:
compteur_J += 1
if compteur_O == 3:
res += 10000
elif compteur_O == 2 and compteur_J == 0:
res += 200
elif compteur_O == 1 and compteur_J == 0:
res += 30
if compteur_J == 2:
res -= 200
elif compteur_J == 1:
res -= 30
return res
def coup_ordi_optimal(grille):
lisCoord = []
lisVal = []
for i in range(3):
for j in range(3):
if grille[i][j] == 0:
lisCoord += [[i, j]]
for coord in lisCoord:
i = coord[0]
j = coord[1]
grilleTest = deepcopy(grille)
grilleTest[i][j] = -1
valPos = valNum_posOrdi(grilleTest, i, j)
lisVal += [valPos]
valMax = max(lisVal)
indice = lisVal.index(valMax)
coord = lisCoord[indice]
i = coord[0]
j = coord[1]
grille[i][j] = -1
return grille
### Niveau 0 ###
def tour_ordi_n0(grille):
cle = coup_gagnant(grille)
if cle == 'aleatoire':
grille = coup_ordi_aleatoire(grille)
else:
grille = coup_ordi_gagnant(grille, cle)
return grille
### Niveau 1 ###
def tour_ordi_n1(grille):
grille = coup_ordi_optimal(grille)
return grille
# %%""""""""""""""""""""" Le joueur met des 1"""""""""""""""""""""""""""
def tour_joueur(grille):
i = input("\nentrer la ligne (de 1 à 3)\n")
while i not in ['1', '2', '3']:
i = input("\nentrer la ligne (de 1 à 3)\n")
j = input("\nentrer la colonne (de 1 à 3)\n")
while j not in ['1', '2', '3']:
j = input("\nentrer la colonne (de 1 à 3)\n")
i = int(i) - 1
j = int(j) - 1
# print("i j :", i,j)
if grille[i][j] != 0:
grille = tour_joueur(grille)
else:
grille[i][j] = 1
return grille
# %%""""""""""""""""" Condition de fin de partie""""""""""""""""""""""""
def partie_finie(grille):
res = False
# vérification lignes
for i in range(3):
n = 0
for j in range(3):
if grille[i][j] == 1:
n += 1
elif grille[i][j] == -1:
n -= 1
if n == 3 or n == -3:
res = True
# vérification colonnes
if not res:
for j in range(3):
n = 0
for i in range(3):
if grille[i][j] == 1:
n += 1
elif grille[i][j] == -1:
n -= 1
if n == 3 or n == -3:
res = True
# vérification 1ere diagonale
if not res:
n = 0
for i in range(3):
if grille[i][i] == 1:
n += 1
elif grille[i][i] == -1:
n -= 1
if n == 3 or n == -3:
res = True
# vérification 2eme diagonale
if not res:
n = 0
for i in range(3):
if grille[2 - i][i] == 1:
n += 1
elif grille[2 - i][i] == -1:
n -= 1
if n == 3 or n == -3:
res = True
return res
# %% Grille pleine
def grille_pleine(grille):
res = True
for i in range(3):
for j in range(3):
if grille[i][j] == 0:
res = False
return res
# ______________________________________________________________________________
# %% Fonction principale
def nouvelle_partie():
# initialisation
grille = deepcopy(grilleVide)
print(grille)
niveau = input("\nchoix du niveau (de 0 à 1)\n")
niveau = int(niveau)
if niveau not in [0, 1]:
niveau = 0
print('niveau :', niveau)
# maj
while not partie_finie(grille):
grille = tour_joueur(grille)
if partie_finie(grille):
print('\ngagné\n')
else:
if not grille_pleine(grille):
if niveau == 0:
grille = tour_ordi_n0(grille)
elif niveau == 1:
grille = tour_ordi_n1(grille)
if partie_finie(grille):
print('\nperdu\n')
else:
print("\négalité\n")
break
print(grille)
return grille
# %%____________________________________________________________________________________________________
# ____________________________________________________________________________________________________
def monMain():
nouvelle_partie()
if __name__ == "__main__":
monMain()
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.