hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0c9fc8e4cb48225517936c3d7574c3c36bcd7773
| 117
|
py
|
Python
|
Project3/task/admin.py
|
Nakui/PruebaEmpleo
|
b3cdc8dfeddd1cd24569a291178f2614e42a1eeb
|
[
"MIT"
] | null | null | null |
Project3/task/admin.py
|
Nakui/PruebaEmpleo
|
b3cdc8dfeddd1cd24569a291178f2614e42a1eeb
|
[
"MIT"
] | null | null | null |
Project3/task/admin.py
|
Nakui/PruebaEmpleo
|
b3cdc8dfeddd1cd24569a291178f2614e42a1eeb
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from task.models import Task
# Register your models here.
admin.site.register(Task)
| 23.4
| 32
| 0.811966
| 18
| 117
| 5.277778
| 0.611111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119658
| 117
| 5
| 33
| 23.4
| 0.92233
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0ca0273c4f5d96e99b0d13a0dd96a10ea5c42444
| 133
|
py
|
Python
|
cron.py
|
christopherpryer/luigi_demo
|
f8aade5f5a4a8f60529c1623cb82b3856c3f6744
|
[
"MIT"
] | null | null | null |
cron.py
|
christopherpryer/luigi_demo
|
f8aade5f5a4a8f60529c1623cb82b3856c3f6744
|
[
"MIT"
] | null | null | null |
cron.py
|
christopherpryer/luigi_demo
|
f8aade5f5a4a8f60529c1623cb82b3856c3f6744
|
[
"MIT"
] | 1
|
2020-01-28T15:59:05.000Z
|
2020-01-28T15:59:05.000Z
|
import datetime
import luigi
from apscheduler.schedulers.blocking import BlockingScheduler
sched = BlockingScheduler()
# TODO: ...
| 16.625
| 61
| 0.804511
| 13
| 133
| 8.230769
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.120301
| 133
| 8
| 62
| 16.625
| 0.91453
| 0.067669
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0b369425d34c3982a24eeed07724db5355e8fe36
| 527
|
py
|
Python
|
fangfangfang/models/__init__.py
|
nryang/fangfangfang
|
6afe43d8491a5b88bec785025e094bb1e242d052
|
[
"MIT"
] | null | null | null |
fangfangfang/models/__init__.py
|
nryang/fangfangfang
|
6afe43d8491a5b88bec785025e094bb1e242d052
|
[
"MIT"
] | 1
|
2019-08-11T18:13:39.000Z
|
2019-08-11T18:13:39.000Z
|
fangfangfang/models/__init__.py
|
nryang/fangfangfang
|
6afe43d8491a5b88bec785025e094bb1e242d052
|
[
"MIT"
] | null | null | null |
"""NOTE: This file is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the file manually.
"""
# coding: utf-8
# flake8: noqa
from __future__ import absolute_import
# import models into model package
from fangfangfang.models.defang_request import DefangRequest
from fangfangfang.models.defang_response import DefangResponse
from fangfangfang.models.model import Model
from fangfangfang.models.refang_request import RefangRequest
from fangfangfang.models.refang_response import RefangResponse
| 32.9375
| 91
| 0.833017
| 68
| 527
| 6.323529
| 0.558824
| 0.186047
| 0.255814
| 0.130233
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004246
| 0.106262
| 527
| 15
| 92
| 35.133333
| 0.908705
| 0.343454
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0b3721cc15b0a4aae3da7e2b70b4118ebe724b40
| 27
|
py
|
Python
|
validium/__init__.py
|
json2d/validium
|
cc67dede318c0f90e5d7f1813d6380f153b596e4
|
[
"MIT"
] | null | null | null |
validium/__init__.py
|
json2d/validium
|
cc67dede318c0f90e5d7f1813d6380f153b596e4
|
[
"MIT"
] | null | null | null |
validium/__init__.py
|
json2d/validium
|
cc67dede318c0f90e5d7f1813d6380f153b596e4
|
[
"MIT"
] | null | null | null |
from .core import Validator
| 27
| 27
| 0.851852
| 4
| 27
| 5.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 27
| 1
| 27
| 27
| 0.958333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
0b7dccac9922b48f4165d836adfb47940d4d3a83
| 16,681
|
py
|
Python
|
experiments/draw_openness_curves.py
|
Cogito2012/DEAR
|
97d0e8f191da0f20dcc9721280af48171dabef5e
|
[
"Apache-2.0"
] | 47
|
2021-09-02T10:42:29.000Z
|
2022-03-31T01:37:49.000Z
|
experiments/draw_openness_curves.py
|
Cogito2012/DEAR
|
97d0e8f191da0f20dcc9721280af48171dabef5e
|
[
"Apache-2.0"
] | 2
|
2021-12-05T02:28:42.000Z
|
2022-01-05T06:46:10.000Z
|
experiments/draw_openness_curves.py
|
Cogito2012/DEAR
|
97d0e8f191da0f20dcc9721280af48171dabef5e
|
[
"Apache-2.0"
] | 6
|
2021-09-19T16:31:32.000Z
|
2022-03-03T06:57:34.000Z
|
import os, argparse
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import f1_score
def softmax_curvepoints(result_file, thresh, ood_ncls, num_rand):
assert os.path.exists(result_file), "File not found! Run baseline_i3d_softmax.py first!"
# load the testing results
results = np.load(result_file, allow_pickle=True)
ind_softmax = results['ind_softmax'] # (N1, C)
ood_softmax = results['ood_softmax'] # (N2, C)
ind_labels = results['ind_label'] # (N1,)
ood_labels = results['ood_label'] # (N2,)
ind_ncls = ind_softmax.shape[1]
ind_results = np.argmax(ind_softmax, axis=1)
ood_results = np.argmax(ood_softmax, axis=1)
ind_conf = np.max(ind_softmax, axis=1)
ood_conf = np.max(ood_softmax, axis=1)
ind_results[ind_conf < thresh] = ind_ncls # incorrect rejection
# open set F1 score (multi-class)
macro_F1 = f1_score(ind_labels, ind_results, average='macro')
macro_F1_list = [macro_F1 * 100]
openness_list = [0]
for n in range(ood_ncls):
ncls_novel = n + 1
openness = (1 - np.sqrt((2 * ind_ncls) / (2 * ind_ncls + ncls_novel))) * 100
openness_list.append(openness)
# randoml select the subset of ood samples
macro_F1_multi = np.zeros((num_rand), dtype=np.float32)
for m in range(num_rand):
cls_select = np.random.choice(ood_ncls, ncls_novel, replace=False)
ood_sub_results = np.concatenate([ood_results[ood_labels == clsid] for clsid in cls_select])
ood_sub_labels = np.ones_like(ood_sub_results) * ind_ncls
ood_sub_confs = np.concatenate([ood_conf[ood_labels == clsid] for clsid in cls_select])
ood_sub_results[ood_sub_confs < thresh] = ind_ncls # correct rejection
# construct preds and labels
preds = np.concatenate((ind_results, ood_sub_results), axis=0)
labels = np.concatenate((ind_labels, ood_sub_labels), axis=0)
macro_F1_multi[m] = f1_score(labels, preds, average='macro')
macro_F1 = np.mean(macro_F1_multi) * 100
macro_F1_list.append(macro_F1)
return openness_list, macro_F1_list
def openmax_curvepoints(result_file, ood_ncls, num_rand):
assert os.path.exists(result_file), "File not found! Run baseline_i3d_openmax.py first!"
results = np.load(result_file, allow_pickle=True)
ind_openmax = results['ind_openmax'] # (N1, C+1)
ood_openmax = results['ood_openmax'] # (N2, C+1)
ind_labels = results['ind_label'] # (N1,)
ood_labels = results['ood_label'] # (N2,)
ind_results = np.argmax(ind_openmax, axis=1)
ood_results = np.argmax(ood_openmax, axis=1)
ind_ncls = ind_openmax.shape[1] - 1 # (C+1)-1
# open set F1 score (multi-class)
macro_F1 = f1_score(ind_labels, ind_results, average='macro')
macro_F1_list = [macro_F1 * 100]
openness_list = [0]
for n in range(ood_ncls):
ncls_novel = n + 1
openness = (1 - np.sqrt((2 * ind_ncls) / (2 * ind_ncls + ncls_novel))) * 100
openness_list.append(openness)
# randoml select the subset of ood samples
macro_F1_multi = np.zeros((num_rand), dtype=np.float32)
for m in range(num_rand):
cls_select = np.random.choice(ood_ncls, ncls_novel, replace=False)
ood_sub_results = np.concatenate([ood_results[ood_labels == clsid] for clsid in cls_select])
ood_sub_labels = np.ones_like(ood_sub_results) * ind_ncls
# construct preds and labels
preds = np.concatenate((ind_results, ood_sub_results), axis=0)
labels = np.concatenate((ind_labels, ood_sub_labels), axis=0)
macro_F1_multi[m] = f1_score(labels, preds, average='macro')
macro_F1 = np.mean(macro_F1_multi) * 100
macro_F1_list.append(macro_F1)
return openness_list, macro_F1_list
def uncertainty_curvepoints(result_file, thresh, ind_ncls, ood_ncls, num_rand):
assert os.path.exists(result_file), "File not found! Run ood_detection first!"
# load the testing results
results = np.load(result_file, allow_pickle=True)
ind_uncertainties = results['ind_unctt'] # (N1,)
ood_uncertainties = results['ood_unctt'] # (N2,)
ind_results = results['ind_pred'] # (N1,)
ood_results = results['ood_pred'] # (N2,)
ind_labels = results['ind_label']
ood_labels = results['ood_label']
# open set F1 score (multi-class)
ind_results[ind_uncertainties > thresh] = ind_ncls # falsely rejection
macro_F1 = f1_score(ind_labels, ind_results, average='macro')
macro_F1_list = [macro_F1 * 100]
openness_list = [0]
for n in range(ood_ncls):
ncls_novel = n + 1
openness = (1 - np.sqrt((2 * ind_ncls) / (2 * ind_ncls + ncls_novel))) * 100
openness_list.append(openness)
# randoml select the subset of ood samples
macro_F1_multi = np.zeros((num_rand), dtype=np.float32)
for m in range(num_rand):
cls_select = np.random.choice(ood_ncls, ncls_novel, replace=False)
ood_sub_results = np.concatenate([ood_results[ood_labels == clsid] for clsid in cls_select])
ood_sub_uncertainties = np.concatenate([ood_uncertainties[ood_labels == clsid] for clsid in cls_select])
ood_sub_results[ood_sub_uncertainties > thresh] = ind_ncls # correctly rejection
ood_sub_labels = np.ones_like(ood_sub_results) * ind_ncls
# construct preds and labels
preds = np.concatenate((ind_results, ood_sub_results), axis=0)
labels = np.concatenate((ind_labels, ood_sub_labels), axis=0)
macro_F1_multi[m] = f1_score(labels, preds, average='macro')
macro_F1 = np.mean(macro_F1_multi) * 100
macro_F1_list.append(macro_F1)
return openness_list, macro_F1_list
def plot_all_curves(openness, values, line_styles, result_prefix, ylim=[60, 80], fontsize=18):
fig = plt.figure(figsize=(8,6)) # (w, h)
plt.rcParams["font.family"] = "Arial"
for k, v in values.items():
plt.plot(openness, v, line_styles[k], linewidth=2, label=k)
plt.xlim(0, max(openness))
plt.ylim(ylim)
plt.xlabel('Openness (%)', fontsize=fontsize)
plt.ylabel('Open maF1 (%)', fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(np.arange(ylim[0], ylim[1]+1, 5), fontsize=fontsize)
plt.grid('on')
plt.legend(fontsize=fontsize-10, loc='lower center', ncol=3, handletextpad=0.3, columnspacing=0.5)
plt.tight_layout()
result_path = os.path.dirname(result_prefix)
if not os.path.exists(result_path):
os.makedirs(result_path)
plt.savefig(result_prefix + '_%s.png'%(args.ood_data), bbox_inches='tight', dpi=fig.dpi, pad_inches=0.0)
plt.savefig(result_prefix + '_%s.pdf'%(args.ood_data), bbox_inches='tight', dpi=fig.dpi, pad_inches=0.0)
def main_i3d():
# SoftMax
print('Compute Open maF1 for SoftMax...')
result_file = 'i3d/results_baselines/openmax/I3D_OpenMax_%s_result.npz'%(args.ood_data)
openness_softmax, maF1_softmax = softmax_curvepoints(result_file, 0.996825, args.ood_ncls, args.num_rand)
# OpenMax
print('Compute Open maF1 for OpenMax...')
result_file = 'i3d/results_baselines/openmax/I3D_OpenMax_%s_result.npz'%(args.ood_data)
openness_openmax, maF1_openmax = openmax_curvepoints(result_file, args.ood_ncls, args.num_rand)
# RPL
print('Compute Open maF1 for RPL...')
result_file = 'i3d/results_baselines/rpl/I3D_RPL_%s_result.npz'%(args.ood_data)
openness_rpl, maF1_rpl = softmax_curvepoints(result_file, 0.995178, args.ood_ncls, args.num_rand)
# MCDropout BALD
print('Compute Open maF1 for MC Dropout BALD...')
result_file = 'i3d/results/I3D_DNN_BALD_%s_result.npz'%(args.ood_data)
openness_dnn, maF1_dnn = uncertainty_curvepoints(result_file, 0.000433, args.ind_ncls, args.ood_ncls, args.num_rand)
# BNN SVI BALD
print('Compute Open maF1 for BNN SVI BALD...')
result_file = 'i3d/results/I3D_BNN_BALD_%s_result.npz'%(args.ood_data)
openness_bnn, maF1_bnn = uncertainty_curvepoints(result_file, 0.000004, args.ind_ncls, args.ood_ncls, args.num_rand)
# DEAR (full)
print('Compute Open maF1 for DEAR (full)...')
result_file = 'i3d/results/I3D_EDLNoKLAvUCDebias_EDL_%s_result.npz'%(args.ood_data)
openness_dear, maF1_dear = uncertainty_curvepoints(result_file, 0.004550, args.ind_ncls, args.ood_ncls, args.num_rand)
# draw F1 curve
line_styles = {'DEAR (full)': 'r-', 'SoftMax': 'b-', 'RPL': 'm-', 'BNN SVI': 'c-', 'MC Dropout': 'y-', 'OpenMax': 'k-'}
values = {'DEAR (full)': maF1_dear, 'SoftMax': maF1_softmax, 'RPL': maF1_rpl, 'BNN SVI': maF1_bnn, 'MC Dropout': maF1_dnn, 'OpenMax': maF1_openmax}
result_prefix = args.result_prefix + '_I3D'
plot_all_curves(openness_dear, values, line_styles, result_prefix, ylim=[60,80], fontsize=30)
def main_tsm():
# SoftMax
print('Compute Open maF1 for SoftMax...')
result_file = 'tsm/results_baselines/openmax/TSM_OpenMax_%s_result.npz'%(args.ood_data)
openness_softmax, maF1_softmax = softmax_curvepoints(result_file, 0.999683, args.ood_ncls, args.num_rand)
# OpenMax
print('Compute Open maF1 for OpenMax...')
result_file = 'tsm/results_baselines/openmax/TSM_OpenMax_%s_result.npz'%(args.ood_data)
openness_openmax, maF1_openmax = openmax_curvepoints(result_file, args.ood_ncls, args.num_rand)
# RPL
print('Compute Open maF1 for RPL...')
result_file = 'tsm/results_baselines/rpl/TSM_RPL_%s_result.npz'%(args.ood_data)
openness_rpl, maF1_rpl = softmax_curvepoints(result_file, 0.999167, args.ood_ncls, args.num_rand)
# MCDropout BALD
print('Compute Open maF1 for MC Dropout BALD...')
result_file = 'tsm/results/TSM_DNN_BALD_%s_result.npz'%(args.ood_data)
openness_dnn, maF1_dnn = uncertainty_curvepoints(result_file, 0.000022, args.ind_ncls, args.ood_ncls, args.num_rand)
# BNN SVI BALD
print('Compute Open maF1 for BNN SVI BALD...')
result_file = 'tsm/results/TSM_BNN_BALD_%s_result.npz'%(args.ood_data)
openness_bnn, maF1_bnn = uncertainty_curvepoints(result_file, 0.000003, args.ind_ncls, args.ood_ncls, args.num_rand)
# DEAR (full)
print('Compute Open maF1 for DEAR (full)...')
result_file = 'tsm/results/TSM_EDLNoKLAvUCDebias_EDL_%s_result.npz'%(args.ood_data)
openness_dear, maF1_dear = uncertainty_curvepoints(result_file, 0.004549, args.ind_ncls, args.ood_ncls, args.num_rand)
# draw F1 curve
line_styles = {'DEAR (full)': 'r-', 'SoftMax': 'b-', 'RPL': 'm-', 'BNN SVI': 'c-', 'MC Dropout': 'y-', 'OpenMax': 'k-'}
values = {'DEAR (full)': maF1_dear, 'SoftMax': maF1_softmax, 'RPL': maF1_rpl, 'BNN SVI': maF1_bnn, 'MC Dropout': maF1_dnn, 'OpenMax': maF1_openmax}
result_prefix = args.result_prefix + '_TSM'
ylim = [60, 90] if args.ood_data == 'HMDB' else [55, 90]
plot_all_curves(openness_dear, values, line_styles, result_prefix, ylim=ylim, fontsize=30)
def main_slowfast():
# SoftMax
print('Compute Open maF1 for SoftMax...')
result_file = 'slowfast/results_baselines/openmax/SlowFast_OpenMax_%s_result.npz'%(args.ood_data)
openness_softmax, maF1_softmax = softmax_curvepoints(result_file, 0.997915, args.ood_ncls, args.num_rand)
# OpenMax
print('Compute Open maF1 for OpenMax...')
result_file = 'slowfast/results_baselines/openmax/SlowFast_OpenMax_%s_result.npz'%(args.ood_data)
openness_openmax, maF1_openmax = openmax_curvepoints(result_file, args.ood_ncls, args.num_rand)
# RPL
print('Compute Open maF1 for RPL...')
result_file = 'slowfast/results_baselines/rpl/SlowFast_RPL_%s_result.npz'%(args.ood_data)
openness_rpl, maF1_rpl = softmax_curvepoints(result_file, 0.997780, args.ood_ncls, args.num_rand)
# MCDropout BALD
print('Compute Open maF1 for MC Dropout BALD...')
result_file = 'slowfast/results/SlowFast_DNN_BALD_%s_result.npz'%(args.ood_data)
openness_dnn, maF1_dnn = uncertainty_curvepoints(result_file, 0.000065, args.ind_ncls, args.ood_ncls, args.num_rand)
# BNN SVI BALD
print('Compute Open maF1 for BNN SVI BALD...')
result_file = 'slowfast/results/SlowFast_BNN_BALD_%s_result.npz'%(args.ood_data)
openness_bnn, maF1_bnn = uncertainty_curvepoints(result_file, 0.000004, args.ind_ncls, args.ood_ncls, args.num_rand)
# DEAR (full)
print('Compute Open maF1 for DEAR (full)...')
result_file = 'slowfast/results/SlowFast_EDLNoKLAvUCDebias_EDL_%s_result.npz'%(args.ood_data)
openness_dear, maF1_dear = uncertainty_curvepoints(result_file, 0.004552, args.ind_ncls, args.ood_ncls, args.num_rand)
# draw F1 curve
line_styles = {'DEAR (full)': 'r-', 'SoftMax': 'b-', 'RPL': 'm-', 'BNN SVI': 'c-', 'MC Dropout': 'y-', 'OpenMax': 'k-'}
values = {'DEAR (full)': maF1_dear, 'SoftMax': maF1_softmax, 'RPL': maF1_rpl, 'BNN SVI': maF1_bnn, 'MC Dropout': maF1_dnn, 'OpenMax': maF1_openmax}
result_prefix = args.result_prefix + '_SlowFast'
plot_all_curves(openness_dear, values, line_styles, result_prefix, ylim=[60,90], fontsize=30)
def main_tpn():
# SoftMax
print('Compute Open maF1 for SoftMax...')
result_file = 'tpn_slowonly/results_baselines/openmax/TPN_OpenMax_%s_result.npz'%(args.ood_data)
openness_softmax, maF1_softmax = softmax_curvepoints(result_file, 0.997623, args.ood_ncls, args.num_rand)
# OpenMax
print('Compute Open maF1 for OpenMax...')
result_file = 'tpn_slowonly/results_baselines/openmax/TPN_OpenMax_%s_result.npz'%(args.ood_data)
openness_openmax, maF1_openmax = openmax_curvepoints(result_file, args.ood_ncls, args.num_rand)
# RPL
print('Compute Open maF1 for RPL...')
result_file = 'tpn_slowonly/results_baselines/rpl/TPN_RPL_%s_result.npz'%(args.ood_data)
openness_rpl, maF1_rpl = softmax_curvepoints(result_file, 0.996931, args.ood_ncls, args.num_rand)
# MCDropout BALD
print('Compute Open maF1 for MC Dropout BALD...')
result_file = 'tpn_slowonly/results/TPN_SlowOnly_Dropout_BALD_%s_result.npz'%(args.ood_data)
openness_dnn, maF1_dnn = uncertainty_curvepoints(result_file, 0.000096, args.ind_ncls, args.ood_ncls, args.num_rand)
# BNN SVI BALD
print('Compute Open maF1 for BNN SVI BALD...')
result_file = 'tpn_slowonly/results/TPN_SlowOnly_BNN_BALD_%s_result.npz'%(args.ood_data)
openness_bnn, maF1_bnn = uncertainty_curvepoints(result_file, 0.000007, args.ind_ncls, args.ood_ncls, args.num_rand)
# DEAR (full)
print('Compute Open maF1 for DEAR (full)...')
result_file = 'tpn_slowonly/results/TPN_SlowOnly_EDLlogNoKLAvUCDebias_EDL_%s_result.npz'%(args.ood_data)
openness_dear, maF1_dear = uncertainty_curvepoints(result_file, 0.004555, args.ind_ncls, args.ood_ncls, args.num_rand)
# draw F1 curve
line_styles = {'DEAR (full)': 'r-', 'SoftMax': 'b-', 'RPL': 'm-', 'BNN SVI': 'c-', 'MC Dropout': 'y-', 'OpenMax': 'k-'}
values = {'DEAR (full)': maF1_dear, 'SoftMax': maF1_softmax, 'RPL': maF1_rpl, 'BNN SVI': maF1_bnn, 'MC Dropout': maF1_dnn, 'OpenMax': maF1_openmax}
result_prefix = args.result_prefix + '_TPN'
ylim = [50, 85] if args.ood_data == 'HMDB' else [50, 85]
plot_all_curves(openness_dear, values, line_styles, result_prefix, ylim=ylim, fontsize=30)
def parse_args():
parser = argparse.ArgumentParser(description='Compare the performance of Open macroF1 against openness')
# model config
parser.add_argument('--ind_ncls', type=int, default=101, help='the number of classes in known dataset')
parser.add_argument('--ood_ncls', type=int, default=51, choices=[51, 305], help='the number of classes in unknwon dataset')
parser.add_argument('--ood_data', default='HMDB', choices=['HMDB', 'MiT'], help='the name of OOD dataset.')
parser.add_argument('--model', default='I3D', choices=['I3D', 'TSM', 'SlowFast', 'TPN'], help='the action recognition model.')
parser.add_argument('--num_rand', type=int, default=10, help='the number of random selection for ood classes')
parser.add_argument('--result_prefix', default='../temp/F1_openness')
args = parser.parse_args()
return args
if __name__ == '__main__':
""" Example script:
python draw_openness_curves.py --model I3D --ood_data MiT --ood_ncls 305
"""
np.random.seed(123)
args = parse_args()
if args.model == 'I3D':
# draw results on I3D
main_i3d()
elif args.model == 'TSM':
# draw results on TSM
main_tsm()
elif args.model == 'SlowFast':
# draw results on SlowFast
main_slowfast()
elif args.model == 'TPN':
# draw results on TPN
main_tpn()
else:
raise NotImplementedError
| 51.484568
| 151
| 0.69714
| 2,422
| 16,681
| 4.523121
| 0.107349
| 0.052031
| 0.028115
| 0.043816
| 0.786308
| 0.753811
| 0.731995
| 0.724601
| 0.719306
| 0.697125
| 0
| 0.031574
| 0.17409
| 16,681
| 323
| 152
| 51.643963
| 0.763591
| 0.055033
| 0
| 0.480519
| 0
| 0
| 0.211134
| 0.0853
| 0
| 0
| 0
| 0
| 0.012987
| 1
| 0.038961
| false
| 0
| 0.017316
| 0
| 0.073593
| 0.103896
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0b820575bc341d84887ee689c9676c233d60cb5c
| 2,449
|
py
|
Python
|
cmd/daprd/direct_message.py
|
ls-2018/dapr_cn
|
7dc0f85d07d349580a2413cfde5f842a502ae475
|
[
"MIT"
] | 1
|
2021-11-23T09:44:44.000Z
|
2021-11-23T09:44:44.000Z
|
cmd/daprd/direct_message.py
|
ls-2018/dapr_cn
|
7dc0f85d07d349580a2413cfde5f842a502ae475
|
[
"MIT"
] | null | null | null |
cmd/daprd/direct_message.py
|
ls-2018/dapr_cn
|
7dc0f85d07d349580a2413cfde5f842a502ae475
|
[
"MIT"
] | null | null | null |
url = 'http://127.0.0.1:3001/post'
dapr_url = "http://localhost:3500/v1.0/invoke/dp-61c2cb20562850d49d47d1c7-executorapp/method/health"
# dapr_url = "http://localhost:3500/v1.0/healthz"
# res = requests.post(dapr_url, json.dumps({'a': random.random() * 1000}))
# res = requests.get(dapr_url, )
#
#
#
# print(res.text)
# print(res.status_code)
# INFO[0000] *----/v1.0/invoke/{id}/method/{method:*}
# INFO[0000] GET----/v1.0/state/{storeName}/{key}
# INFO[0000] DELETE----/v1.0/state/{storeName}/{key}
# INFO[0000] PUT----/v1.0/state/{storeName}
# INFO[0000] PUT----/v1.0/state/{storeName}/bulk
# INFO[0000] PUT----/v1.0/state/{storeName}/transaction
# INFO[0000] POST----/v1.0/state/{storeName}
# INFO[0000] POST----/v1.0/state/{storeName}/bulk
# INFO[0000] POST----/v1.0/state/{storeName}/transaction
# INFO[0000] POST----/v1.0-alpha1/state/{storeName}/query
# INFO[0000] PUT----/v1.0-alpha1/state/{storeName}/query
# INFO[0000] GET----/v1.0/secrets/{secretStoreName}/bulk
# INFO[0000] GET----/v1.0/secrets/{secretStoreName}/{key}
# INFO[0000] POST----/v1.0/publish/{pubsubname}/{topic:*}
# INFO[0000] PUT----/v1.0/publish/{pubsubname}/{topic:*}
# INFO[0000] POST----/v1.0/bindings/{name}
# INFO[0000] PUT----/v1.0/bindings/{name}
# INFO[0000] GET----/v1.0/healthz
# INFO[0000] GET----/v1.0/healthz/outbound
# INFO[0000] GET----/v1.0/actors/{actorType}/{actorId}/method/{method}
# INFO[0000] GET----/v1.0/actors/{actorType}/{actorId}/state/{key}
# INFO[0000] GET----/v1.0/actors/{actorType}/{actorId}/reminders/{name}
# INFO[0000] POST----/v1.0/actors/{actorType}/{actorId}/state
# INFO[0000] POST----/v1.0/actors/{actorType}/{actorId}/method/{method}
# INFO[0000] POST----/v1.0/actors/{actorType}/{actorId}/reminders/{name}
# INFO[0000] POST----/v1.0/actors/{actorType}/{actorId}/timers/{name}
# INFO[0000] PUT----/v1.0/actors/{actorType}/{actorId}/state
# INFO[0000] PUT----/v1.0/actors/{actorType}/{actorId}/method/{method}
# INFO[0000] PUT----/v1.0/actors/{actorType}/{actorId}/reminders/{name}
# INFO[0000] PUT----/v1.0/actors/{actorType}/{actorId}/timers/{name}
# INFO[0000] DELETE----/v1.0/actors/{actorType}/{actorId}/method/{method}
# INFO[0000] DELETE----/v1.0/actors/{actorType}/{actorId}/reminders/{name}
# INFO[0000] DELETE----/v1.0/actors/{actorType}/{actorId}/timers/{name}
# INFO[0000] *----/{method:*}
# INFO[0000] GET----/v1.0/metadata
# INFO[0000] PUT----/v1.0/metadata/{key}
# INFO[0000] POST----/v1.0/shutdown
| 37.106061
| 100
| 0.661903
| 359
| 2,449
| 4.501393
| 0.158774
| 0.070545
| 0.07797
| 0.155941
| 0.851485
| 0.832921
| 0.745668
| 0.507426
| 0.405322
| 0.14604
| 0
| 0.115721
| 0.064924
| 2,449
| 65
| 101
| 37.676923
| 0.589956
| 0.899959
| 0
| 0
| 0
| 0.5
| 0.567839
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0b84e005c73d6752a7067c40b12a8f51ae4e8ea0
| 130
|
py
|
Python
|
milksets/tests/test_abalone.py
|
luispedro/milksets
|
84fc8cba4d4a87acf573ce562cd065b0ee37fadd
|
[
"MIT"
] | 7
|
2015-05-15T19:49:25.000Z
|
2021-02-04T10:18:15.000Z
|
milksets/tests/test_abalone.py
|
luispedro/milksets
|
84fc8cba4d4a87acf573ce562cd065b0ee37fadd
|
[
"MIT"
] | null | null | null |
milksets/tests/test_abalone.py
|
luispedro/milksets
|
84fc8cba4d4a87acf573ce562cd065b0ee37fadd
|
[
"MIT"
] | 2
|
2021-02-04T10:18:17.000Z
|
2021-04-20T02:26:38.000Z
|
import milksets.abalone
def test_abalone():
features,labels = milksets.abalone.load()
assert len(features) == len(labels)
| 26
| 45
| 0.730769
| 16
| 130
| 5.875
| 0.625
| 0.319149
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146154
| 130
| 4
| 46
| 32.5
| 0.846847
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| true
| 0
| 0.25
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e7e60b4231684356eed281998528a9c4460b65ce
| 302
|
py
|
Python
|
plugins/dos2unix/__init__.py
|
LukasPersonal/hg-fast-export
|
77a770c2b856a49f0d58a035cd9e300c8c0203ac
|
[
"MIT"
] | null | null | null |
plugins/dos2unix/__init__.py
|
LukasPersonal/hg-fast-export
|
77a770c2b856a49f0d58a035cd9e300c8c0203ac
|
[
"MIT"
] | 1
|
2021-09-30T17:11:13.000Z
|
2021-09-30T17:11:13.000Z
|
plugins/dos2unix/__init__.py
|
LukasPersonal/hg-fast-export
|
77a770c2b856a49f0d58a035cd9e300c8c0203ac
|
[
"MIT"
] | null | null | null |
def build_filter(args):
return Filter(args)
class Filter:
def __init__(self, args):
pass
def file_data_filter(self, file_data):
file_ctx = file_data["file_ctx"]
if not file_ctx.isbinary():
file_data["data"] = file_data["data"].replace(b"\r\n", b"\n")
| 23.230769
| 73
| 0.612583
| 44
| 302
| 3.886364
| 0.431818
| 0.233918
| 0.140351
| 0.175439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.245033
| 302
| 12
| 74
| 25.166667
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0.072848
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.111111
| 0
| 0.111111
| 0.555556
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
|
0
| 5
|
e7e6ae5e438b7227ac07d104cafe2480c399b5be
| 194
|
py
|
Python
|
ool/oppositions/managers.py
|
HeLsEroC/bbr
|
0dd40bffd05faa777bec3a89dd1712f0f546d60e
|
[
"MIT"
] | null | null | null |
ool/oppositions/managers.py
|
HeLsEroC/bbr
|
0dd40bffd05faa777bec3a89dd1712f0f546d60e
|
[
"MIT"
] | null | null | null |
ool/oppositions/managers.py
|
HeLsEroC/bbr
|
0dd40bffd05faa777bec3a89dd1712f0f546d60e
|
[
"MIT"
] | null | null | null |
from django.db.models import Manager
from ool.users.constants import USER_TYPES
from .mixins import OppositionManagerMixin
class OppositionManager(Manager, OppositionManagerMixin):
pass
| 19.4
| 57
| 0.829897
| 22
| 194
| 7.272727
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123711
| 194
| 9
| 58
| 21.555556
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.6
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
f000666e36187ea5185bf372f5e897b7675d27b0
| 118,488
|
py
|
Python
|
test/python/T0Component_t/Tier0Feeder_t/Tier0Feeder_t.py
|
mapellidario/T0
|
8c1fdadfa12f36629b2e3de60a683d47ea895f75
|
[
"Apache-2.0"
] | null | null | null |
test/python/T0Component_t/Tier0Feeder_t/Tier0Feeder_t.py
|
mapellidario/T0
|
8c1fdadfa12f36629b2e3de60a683d47ea895f75
|
[
"Apache-2.0"
] | 10
|
2017-12-05T12:48:49.000Z
|
2019-08-06T09:35:08.000Z
|
test/python/T0Component_t/Tier0Feeder_t/Tier0Feeder_t.py
|
mapellidario/T0
|
8c1fdadfa12f36629b2e3de60a683d47ea895f75
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""
_Tier0Feeder_t_
Testing the Tier0Feeder code
"""
from __future__ import print_function
import unittest
import threading
import logging
import time
import os
from WMQuality.TestInit import TestInit
from WMCore.DAOFactory import DAOFactory
from WMCore.Database.DBFactory import DBFactory
from WMCore.Configuration import loadConfigurationFile
from WMCore.Services.UUIDLib import makeUUID
from WMCore.Services.RequestDB.RequestDBWriter import RequestDBWriter
from T0.RunConfig import RunConfigAPI
from T0.RunLumiCloseout import RunLumiCloseoutAPI
from T0.ConditionUpload import ConditionUploadAPI
from T0.StorageManager import StorageManagerAPI
class Tier0FeederTest(unittest.TestCase):
"""
_Tier0FeederTest_
Testing the Tier0Feeder code
"""
def setUp(self):
"""
_setUp_
"""
self.testInit = TestInit(__file__)
self.testInit.setLogging()
self.testInit.setDatabaseConnection(destroyAllDatabase=True)
self.p5id=1
self.testInit.setSchema(customModules = ["WMComponent.DBS3Buffer", "T0.WMBS"])
self.testDir = self.testInit.generateWorkDir()
self.hltkey = "/cdaq/physics/Run2011/3e33/v2.1/HLT/V2"
self.hltConfig = None
self.dqmUploadProxy = None
self.dbInterfaceStorageManager = None
self.getExpressReadyRunsDAO = None
if 'WMAGENT_CONFIG' in os.environ:
wmAgentConfig = loadConfigurationFile(os.environ["WMAGENT_CONFIG"])
self.dqmUploadProxy = getattr(wmAgentConfig.Tier0Feeder, "dqmUploadProxy", None)
self.localRequestCouchDB = RequestDBWriter(wmAgentConfig.AnalyticsDataCollector.localT0RequestDBURL,
couchapp = wmAgentConfig.AnalyticsDataCollector.RequestCouchApp)
if hasattr(wmAgentConfig, "HLTConfDatabase"):
connectUrl = getattr(wmAgentConfig.HLTConfDatabase, "connectUrl", None)
dbFactory = DBFactory(logging, dburl = connectUrl, options = {})
dbInterface = dbFactory.connect()
#print(dbInterface, "this is the dbinterface")
daoFactory = DAOFactory(package = "T0.WMBS",
logger = logging,
dbinterface = dbInterface)
self.dbInterface = dbInterface
getHLTConfigDAO = daoFactory(classname = "RunConfig.GetHLTConfig")
self.hltConfig = getHLTConfigDAO.execute(self.hltkey, transaction = False)
if self.hltConfig['process'] == None or len(self.hltConfig['mapping']) == 0:
raise RuntimeError("HLTConfDB query returned no process or mapping")
else:
print("Your config is missing the HLTConfDatabase section")
print("Using reference HLT config instead")
if hasattr(wmAgentConfig, "StorageManagerDatabase"):
connectUrl = getattr(wmAgentConfig.StorageManagerDatabase, "connectUrl", None)
dbFactory = DBFactory(logging, dburl = connectUrl, options = {})
self.dbInterfaceStorageManager = dbFactory.connect()
else:
print("Did not connect to Storagemanagerdatabase")
if hasattr(wmAgentConfig, "PopConLogDatabase"):
connectUrl = getattr(wmAgentConfig.PopConLogDatabase, "connectUrl", None)
dbFactory = DBFactory(logging, dburl = connectUrl, options = {})
dbInterface = dbFactory.connect()
daoFactory = DAOFactory(package = "T0.WMBS",
logger = logging,
dbinterface = dbInterface)
self.getExpressReadyRunsDAO = daoFactory(classname = "Tier0Feeder.GetExpressReadyRuns")
else:
print("Did not connect to popconlogdatabase")
else:
print("You do not have WMAGENT_CONFIG in your environment")
print("Using reference HLT config instead")
myThread = threading.currentThread()
daoFactory = DAOFactory(package = "T0.WMBS",
logger = logging,
dbinterface = myThread.dbi)
self.dbInterfaceSMNotify = None
insertCMSSVersionDAO = daoFactory(classname = "RunConfig.InsertCMSSWVersion")
insertCMSSVersionDAO.execute(binds = { 'VERSION' : "CMSSW_4_2_7" },
transaction = False)
insertStreamDAO = daoFactory(classname = "RunConfig.InsertStream")
insertStreamDAO.execute(binds = { 'STREAM' : "A" },
transaction = False)
insertStreamDAO.execute(binds = { 'STREAM' : "Express" },
transaction = False)
insertStreamDAO.execute(binds = { 'STREAM' : "HLTMON" },
transaction = False)
self.tier0Config = loadConfigurationFile("ExampleConfig.py")
self.insertLocation(self.tier0Config.Global.StreamerPNN)
self.referenceMapping = {}
self.referenceMapping['A'] = {}
self.referenceMapping['A']['BTag'] = []
self.referenceMapping['A']['BTag'].append("HLT_BTagMu_DiJet110_Mu5_v10")
self.referenceMapping['A']['BTag'].append("HLT_BTagMu_DiJet20_Mu5_v10")
self.referenceMapping['A']['BTag'].append("HLT_BTagMu_DiJet40_Mu5_v10")
self.referenceMapping['A']['BTag'].append("HLT_BTagMu_DiJet70_Mu5_v10")
self.referenceMapping['A']['Commissioning'] = []
self.referenceMapping['A']['Commissioning'].append("HLT_Activity_Ecal_SC7_v8")
self.referenceMapping['A']['Commissioning'].append("HLT_BeamGas_BSC_v5")
self.referenceMapping['A']['Commissioning'].append("HLT_BeamGas_HF_v6")
self.referenceMapping['A']['Commissioning'].append("HLT_IsoTrackHB_v7")
self.referenceMapping['A']['Commissioning'].append("HLT_IsoTrackHE_v8")
self.referenceMapping['A']['Commissioning'].append("HLT_L1SingleEG12_v3")
self.referenceMapping['A']['Commissioning'].append("HLT_L1SingleEG5_v3")
self.referenceMapping['A']['Commissioning'].append("HLT_L1SingleJet16_v4")
self.referenceMapping['A']['Commissioning'].append("HLT_L1SingleJet36_v4")
self.referenceMapping['A']['Commissioning'].append("HLT_L1SingleMuOpen_DT_v4")
self.referenceMapping['A']['Commissioning'].append("HLT_L1SingleMuOpen_v4")
self.referenceMapping['A']['Commissioning'].append("HLT_L1_Interbunch_BSC_v3")
self.referenceMapping['A']['Commissioning'].append("HLT_L1_PreCollisions_v3")
self.referenceMapping['A']['Commissioning'].append("HLT_Mu5_TkMu0_OST_Jpsi_Tight_B5Q7_v9")
self.referenceMapping['A']['Cosmics'] = []
self.referenceMapping['A']['Cosmics'].append("HLT_BeamHalo_v6")
self.referenceMapping['A']['Cosmics'].append("HLT_L1SingleMuOpen_AntiBPTX_v3")
self.referenceMapping['A']['Cosmics'].append("HLT_L1TrackerCosmics_v4")
self.referenceMapping['A']['Cosmics'].append("HLT_RegionalCosmicTracking_v7")
self.referenceMapping['A']['DoubleElectron'] = []
self.referenceMapping['A']['DoubleElectron'].append("HLT_DoubleEle10_CaloIdL_TrkIdVL_Ele10_CaloIdT_TrkIdVL_v3")
self.referenceMapping['A']['DoubleElectron'].append("HLT_Ele17_CaloIdL_CaloIsoVL_v8")
self.referenceMapping['A']['DoubleElectron'].append("HLT_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v8")
self.referenceMapping['A']['DoubleElectron'].append("HLT_Ele17_CaloIdVT_CaloIsoVT_TrkIdT_TrkIsoVT_Ele8_Mass30_v7")
self.referenceMapping['A']['DoubleElectron'].append("HLT_Ele17_CaloIdVT_CaloIsoVT_TrkIdT_TrkIsoVT_SC8_Mass30_v8")
self.referenceMapping['A']['DoubleElectron'].append("HLT_Ele22_CaloIdL_CaloIsoVL_Ele15_HFT_v1")
self.referenceMapping['A']['DoubleElectron'].append("HLT_Ele32_CaloIdT_CaloIsoT_TrkIdT_TrkIsoT_Ele17_v1")
self.referenceMapping['A']['DoubleElectron'].append("HLT_Ele32_CaloIdT_CaloIsoT_TrkIdT_TrkIsoT_SC17_v6")
self.referenceMapping['A']['DoubleElectron'].append("HLT_Ele8_CaloIdL_CaloIsoVL_Jet40_v8")
self.referenceMapping['A']['DoubleElectron'].append("HLT_Ele8_CaloIdL_CaloIsoVL_v8")
self.referenceMapping['A']['DoubleElectron'].append("HLT_Ele8_CaloIdL_TrkIdVL_v8")
self.referenceMapping['A']['DoubleElectron'].append("HLT_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v6")
self.referenceMapping['A']['DoubleElectron'].append("HLT_Ele8_v8")
self.referenceMapping['A']['DoubleElectron'].append("HLT_Photon20_CaloIdVT_IsoT_Ele8_CaloIdL_CaloIsoVL_v9")
self.referenceMapping['A']['DoubleElectron'].append("HLT_TripleEle10_CaloIdL_TrkIdVL_v9")
self.referenceMapping['A']['DoubleMu'] = []
self.referenceMapping['A']['DoubleMu'].append("HLT_DoubleMu3_v10")
self.referenceMapping['A']['DoubleMu'].append("HLT_DoubleMu45_v6")
self.referenceMapping['A']['DoubleMu'].append("HLT_DoubleMu5_Acoplanarity03_v6")
self.referenceMapping['A']['DoubleMu'].append("HLT_DoubleMu5_IsoMu5_v8")
self.referenceMapping['A']['DoubleMu'].append("HLT_DoubleMu5_v1")
self.referenceMapping['A']['DoubleMu'].append("HLT_DoubleMu6_Acoplanarity03_v1")
self.referenceMapping['A']['DoubleMu'].append("HLT_DoubleMu6_v8")
self.referenceMapping['A']['DoubleMu'].append("HLT_DoubleMu7_v8")
self.referenceMapping['A']['DoubleMu'].append("HLT_L1DoubleMu0_v4")
self.referenceMapping['A']['DoubleMu'].append("HLT_L2DoubleMu0_v7")
self.referenceMapping['A']['DoubleMu'].append("HLT_L2DoubleMu23_NoVertex_v7")
self.referenceMapping['A']['DoubleMu'].append("HLT_L2DoubleMu30_NoVertex_v3")
self.referenceMapping['A']['DoubleMu'].append("HLT_Mu13_Mu8_v7")
self.referenceMapping['A']['DoubleMu'].append("HLT_Mu17_Mu8_v7")
self.referenceMapping['A']['DoubleMu'].append("HLT_Mu8_Jet40_v10")
self.referenceMapping['A']['DoubleMu'].append("HLT_TripleMu5_v9")
self.referenceMapping['A']['ElectronHad'] = []
self.referenceMapping['A']['ElectronHad'].append("HLT_DoubleEle8_CaloIdT_TrkIdVL_HT150_v6")
self.referenceMapping['A']['ElectronHad'].append("HLT_DoubleEle8_CaloIdT_TrkIdVL_Mass4_HT150_v3")
self.referenceMapping['A']['ElectronHad'].append("HLT_DoubleEle8_CaloIdT_TrkIdVL_v3")
self.referenceMapping['A']['ElectronHad'].append("HLT_Ele12_CaloIdL_CaloIsoVL_TrkIdVL_TrkIsoVL_R005_MR200_v1")
self.referenceMapping['A']['ElectronHad'].append("HLT_Ele12_CaloIdL_CaloIsoVL_TrkIdVL_TrkIsoVL_R025_MR200_v1")
self.referenceMapping['A']['ElectronHad'].append("HLT_Ele12_CaloIdL_CaloIsoVL_TrkIdVL_TrkIsoVL_R029_MR200_v1")
self.referenceMapping['A']['ElectronHad'].append("HLT_Ele15_CaloIdT_CaloIsoVL_TrkIdT_TrkIsoVL_HT250_PFMHT25_v4")
self.referenceMapping['A']['ElectronHad'].append("HLT_Ele15_CaloIdT_CaloIsoVL_TrkIdT_TrkIsoVL_HT250_PFMHT40_v1")
self.referenceMapping['A']['ElectronHad'].append("HLT_Ele15_CaloIdT_CaloIsoVL_TrkIdT_TrkIsoVL_v2")
self.referenceMapping['A']['ElectronHad'].append("HLT_Ele20_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_Jet35_Jet25_Deta3_Jet20_v2")
self.referenceMapping['A']['ElectronHad'].append("HLT_Ele25_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_CentralJet30_BTagIP_v5")
self.referenceMapping['A']['ElectronHad'].append("HLT_Ele25_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_CentralJet30_v5")
self.referenceMapping['A']['ElectronHad'].append("HLT_Ele25_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_DiCentralJet30_v5")
self.referenceMapping['A']['ElectronHad'].append("HLT_Ele25_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_QuadCentralJet30_v5")
self.referenceMapping['A']['ElectronHad'].append("HLT_Ele25_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_TriCentralJet30_v5")
self.referenceMapping['A']['ElectronHad'].append("HLT_Ele25_CaloIdVT_TrkIdT_CentralJet30_BTagIP_v9")
self.referenceMapping['A']['ElectronHad'].append("HLT_Ele25_CaloIdVT_TrkIdT_CentralJet30_v9")
self.referenceMapping['A']['ElectronHad'].append("HLT_Ele25_CaloIdVT_TrkIdT_DiCentralJet30_v8")
self.referenceMapping['A']['ElectronHad'].append("HLT_Ele25_CaloIdVT_TrkIdT_QuadCentralJet30_v5")
self.referenceMapping['A']['ElectronHad'].append("HLT_Ele25_CaloIdVT_TrkIdT_TriCentralJet30_v8")
self.referenceMapping['A']['ElectronHad'].append("HLT_Ele27_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_CentralJet30_CentralJet25_PFMHT20_v2")
self.referenceMapping['A']['ElectronHad'].append("HLT_Ele27_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_CentralJet30_CentralJet25_v1")
self.referenceMapping['A']['ElectronHad'].append("HLT_Ele27_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_Jet35_Jet25_Deta3_Jet20_v1")
self.referenceMapping['A']['ElectronHad'].append("HLT_Ele27_CaloIdVT_TrkIdT_CentralJet30_CentralJet25_v1")
self.referenceMapping['A']['ElectronHad'].append("HLT_Ele27_CaloIdVT_TrkIdT_Jet35_Jet25_Deta3_Jet20_v1")
self.referenceMapping['A']['ElectronHad'].append("HLT_Ele30_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_DiCentralJet30_PFMHT25_v1")
self.referenceMapping['A']['ElectronHad'].append("HLT_Ele30_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_Jet35_Jet25_Deta3p5_Jet25_v1")
self.referenceMapping['A']['ElectronHad'].append("HLT_Ele8_CaloIdT_TrkIdT_DiJet30_v5")
self.referenceMapping['A']['ElectronHad'].append("HLT_Ele8_CaloIdT_TrkIdT_QuadJet30_v5")
self.referenceMapping['A']['ElectronHad'].append("HLT_Ele8_CaloIdT_TrkIdT_TriJet30_v5")
self.referenceMapping['A']['ElectronHad'].append("HLT_HT200_DoubleEle5_CaloIdVL_MassJPsi_v3")
self.referenceMapping['A']['ElectronHad'].append("HLT_HT300_Ele5_CaloIdVL_CaloIsoVL_TrkIdVL_TrkIsoVL_PFMHT40_v6")
self.referenceMapping['A']['ElectronHad'].append("HLT_HT350_Ele30_CaloIdT_TrkIdT_v1")
self.referenceMapping['A']['ElectronHad'].append("HLT_HT350_Ele5_CaloIdVL_CaloIsoVL_TrkIdVL_TrkIsoVL_PFMHT45_v6")
self.referenceMapping['A']['ElectronHad'].append("HLT_HT400_Ele60_CaloIdT_TrkIdT_v1")
self.referenceMapping['A']['FEDMonitor'] = []
self.referenceMapping['A']['FEDMonitor'].append("HLT_DTErrors_v2")
self.referenceMapping['A']['HT'] = []
self.referenceMapping['A']['HT'].append("HLT_DiJet130_PT130_v6")
self.referenceMapping['A']['HT'].append("HLT_DiJet160_PT160_v6")
self.referenceMapping['A']['HT'].append("HLT_FatJetMass750_DR1p1_Deta2p0_v2")
self.referenceMapping['A']['HT'].append("HLT_FatJetMass850_DR1p1_Deta2p0_v2")
self.referenceMapping['A']['HT'].append("HLT_HT150_v8")
self.referenceMapping['A']['HT'].append("HLT_HT2000_v2")
self.referenceMapping['A']['HT'].append("HLT_HT200_AlphaT0p55_v2")
self.referenceMapping['A']['HT'].append("HLT_HT200_v8")
self.referenceMapping['A']['HT'].append("HLT_HT250_AlphaT0p53_v6")
self.referenceMapping['A']['HT'].append("HLT_HT250_AlphaT0p55_v2")
self.referenceMapping['A']['HT'].append("HLT_HT250_DoubleDisplacedJet60_PromptTrack_v6")
self.referenceMapping['A']['HT'].append("HLT_HT250_DoubleDisplacedJet60_v8")
self.referenceMapping['A']['HT'].append("HLT_HT250_MHT100_v2")
self.referenceMapping['A']['HT'].append("HLT_HT250_MHT90_v2")
self.referenceMapping['A']['HT'].append("HLT_HT250_v8")
self.referenceMapping['A']['HT'].append("HLT_HT300_AlphaT0p53_v6")
self.referenceMapping['A']['HT'].append("HLT_HT300_AlphaT0p54_v2")
self.referenceMapping['A']['HT'].append("HLT_HT300_CentralJet30_BTagIP_PFMHT55_v8")
self.referenceMapping['A']['HT'].append("HLT_HT300_CentralJet30_BTagIP_PFMHT65_v1")
self.referenceMapping['A']['HT'].append("HLT_HT300_CentralJet30_BTagIP_v7")
self.referenceMapping['A']['HT'].append("HLT_HT300_MHT80_v2")
self.referenceMapping['A']['HT'].append("HLT_HT300_MHT90_v2")
self.referenceMapping['A']['HT'].append("HLT_HT300_PFMHT55_v8")
self.referenceMapping['A']['HT'].append("HLT_HT300_PFMHT65_v1")
self.referenceMapping['A']['HT'].append("HLT_HT300_v9")
self.referenceMapping['A']['HT'].append("HLT_HT350_AlphaT0p52_v2")
self.referenceMapping['A']['HT'].append("HLT_HT350_AlphaT0p53_v7")
self.referenceMapping['A']['HT'].append("HLT_HT350_MHT70_v2")
self.referenceMapping['A']['HT'].append("HLT_HT350_MHT80_v2")
self.referenceMapping['A']['HT'].append("HLT_HT350_MHT90_v1")
self.referenceMapping['A']['HT'].append("HLT_HT350_v8")
self.referenceMapping['A']['HT'].append("HLT_HT400_AlphaT0p51_v7")
self.referenceMapping['A']['HT'].append("HLT_HT400_AlphaT0p52_v2")
self.referenceMapping['A']['HT'].append("HLT_HT400_MHT80_v1")
self.referenceMapping['A']['HT'].append("HLT_HT400_v8")
self.referenceMapping['A']['HT'].append("HLT_HT450_AlphaT0p51_v2")
self.referenceMapping['A']['HT'].append("HLT_HT450_AlphaT0p52_v2")
self.referenceMapping['A']['HT'].append("HLT_HT450_v8")
self.referenceMapping['A']['HT'].append("HLT_HT500_JetPt60_DPhi2p94_v2")
self.referenceMapping['A']['HT'].append("HLT_HT500_v8")
self.referenceMapping['A']['HT'].append("HLT_HT550_JetPt60_DPhi2p94_v2")
self.referenceMapping['A']['HT'].append("HLT_HT550_v8")
self.referenceMapping['A']['HT'].append("HLT_HT600_JetPt60_DPhi2p94_v1")
self.referenceMapping['A']['HT'].append("HLT_HT600_v1")
self.referenceMapping['A']['HT'].append("HLT_HT650_v1")
self.referenceMapping['A']['HT'].append("HLT_R014_MR150_v7")
self.referenceMapping['A']['HT'].append("HLT_R020_MR150_v7")
self.referenceMapping['A']['HT'].append("HLT_R020_MR550_v7")
self.referenceMapping['A']['HT'].append("HLT_R023_MR550_v3")
self.referenceMapping['A']['HT'].append("HLT_R025_MR150_v7")
self.referenceMapping['A']['HT'].append("HLT_R025_MR450_v7")
self.referenceMapping['A']['HT'].append("HLT_R029_MR450_v3")
self.referenceMapping['A']['HT'].append("HLT_R033_MR350_v7")
self.referenceMapping['A']['HT'].append("HLT_R036_MR350_v3")
self.referenceMapping['A']['HT'].append("HLT_R038_MR250_v7")
self.referenceMapping['A']['HT'].append("HLT_R042_MR250_v3")
self.referenceMapping['A']['HcalHPDNoise'] = []
self.referenceMapping['A']['HcalHPDNoise'].append("HLT_GlobalRunHPDNoise_v5")
self.referenceMapping['A']['HcalHPDNoise'].append("HLT_L1Tech_HBHEHO_totalOR_v3")
self.referenceMapping['A']['HcalHPDNoise'].append("HLT_L1Tech_HCAL_HF_single_channel_v1")
self.referenceMapping['A']['HcalNZS'] = []
self.referenceMapping['A']['HcalNZS'].append("HLT_HcalNZS_v7")
self.referenceMapping['A']['HcalNZS'].append("HLT_HcalPhiSym_v8")
self.referenceMapping['A']['HighPileUp'] = []
self.referenceMapping['A']['HighPileUp'].append("HLT_60Jet10_v1")
self.referenceMapping['A']['HighPileUp'].append("HLT_70Jet10_v1")
self.referenceMapping['A']['HighPileUp'].append("HLT_70Jet13_v1")
self.referenceMapping['A']['Jet'] = []
self.referenceMapping['A']['Jet'].append("HLT_DiJetAve110_v6")
self.referenceMapping['A']['Jet'].append("HLT_DiJetAve190_v6")
self.referenceMapping['A']['Jet'].append("HLT_DiJetAve240_v6")
self.referenceMapping['A']['Jet'].append("HLT_DiJetAve300_v6")
self.referenceMapping['A']['Jet'].append("HLT_DiJetAve30_v6")
self.referenceMapping['A']['Jet'].append("HLT_DiJetAve370_v6")
self.referenceMapping['A']['Jet'].append("HLT_DiJetAve60_v6")
self.referenceMapping['A']['Jet'].append("HLT_Jet110_v6")
self.referenceMapping['A']['Jet'].append("HLT_Jet190_v6")
self.referenceMapping['A']['Jet'].append("HLT_Jet240_CentralJet30_BTagIP_v3")
self.referenceMapping['A']['Jet'].append("HLT_Jet240_v6")
self.referenceMapping['A']['Jet'].append("HLT_Jet270_CentralJet30_BTagIP_v3")
self.referenceMapping['A']['Jet'].append("HLT_Jet300_v5")
self.referenceMapping['A']['Jet'].append("HLT_Jet30_v6")
self.referenceMapping['A']['Jet'].append("HLT_Jet370_NoJetID_v6")
self.referenceMapping['A']['Jet'].append("HLT_Jet370_v6")
self.referenceMapping['A']['Jet'].append("HLT_Jet60_v6")
self.referenceMapping['A']['Jet'].append("HLT_Jet800_v1")
self.referenceMapping['A']['LogMonitor'] = []
self.referenceMapping['A']['LogMonitor'].append("HLT_LogMonitor_v1")
self.referenceMapping['A']['MET'] = []
self.referenceMapping['A']['MET'].append("HLT_CentralJet80_MET100_v7")
self.referenceMapping['A']['MET'].append("HLT_CentralJet80_MET160_v7")
self.referenceMapping['A']['MET'].append("HLT_CentralJet80_MET65_v7")
self.referenceMapping['A']['MET'].append("HLT_CentralJet80_MET80_v6")
self.referenceMapping['A']['MET'].append("HLT_DiCentralJet20_BTagIP_MET65_v7")
self.referenceMapping['A']['MET'].append("HLT_DiCentralJet20_MET100_HBHENoiseFiltered_v1")
self.referenceMapping['A']['MET'].append("HLT_DiCentralJet20_MET80_v5")
self.referenceMapping['A']['MET'].append("HLT_DiJet60_MET45_v7")
self.referenceMapping['A']['MET'].append("HLT_L2Mu60_1Hit_MET40_v5")
self.referenceMapping['A']['MET'].append("HLT_L2Mu60_1Hit_MET60_v5")
self.referenceMapping['A']['MET'].append("HLT_MET100_HBHENoiseFiltered_v6")
self.referenceMapping['A']['MET'].append("HLT_MET100_v7")
self.referenceMapping['A']['MET'].append("HLT_MET120_HBHENoiseFiltered_v6")
self.referenceMapping['A']['MET'].append("HLT_MET120_v7")
self.referenceMapping['A']['MET'].append("HLT_MET200_HBHENoiseFiltered_v6")
self.referenceMapping['A']['MET'].append("HLT_MET200_v7")
self.referenceMapping['A']['MET'].append("HLT_MET400_v2")
self.referenceMapping['A']['MET'].append("HLT_MET65_HBHENoiseFiltered_v5")
self.referenceMapping['A']['MET'].append("HLT_MET65_v4")
self.referenceMapping['A']['MET'].append("HLT_PFMHT150_v12")
self.referenceMapping['A']['MinimumBias'] = []
self.referenceMapping['A']['MinimumBias'].append("HLT_JetE30_NoBPTX3BX_NoHalo_v8")
self.referenceMapping['A']['MinimumBias'].append("HLT_JetE30_NoBPTX_NoHalo_v8")
self.referenceMapping['A']['MinimumBias'].append("HLT_JetE30_NoBPTX_v6")
self.referenceMapping['A']['MinimumBias'].append("HLT_JetE50_NoBPTX3BX_NoHalo_v3")
self.referenceMapping['A']['MinimumBias'].append("HLT_Physics_v2")
self.referenceMapping['A']['MinimumBias'].append("HLT_PixelTracks_Multiplicity100_v7")
self.referenceMapping['A']['MinimumBias'].append("HLT_PixelTracks_Multiplicity80_v7")
self.referenceMapping['A']['MinimumBias'].append("HLT_Random_v1")
self.referenceMapping['A']['MinimumBias'].append("HLT_ZeroBias_v4")
self.referenceMapping['A']['MuEG'] = []
self.referenceMapping['A']['MuEG'].append("HLT_DoubleMu5_Ele8_CaloIdL_TrkIdVL_v10")
self.referenceMapping['A']['MuEG'].append("HLT_DoubleMu5_Ele8_CaloIdT_TrkIdVL_v4")
self.referenceMapping['A']['MuEG'].append("HLT_Mu15_DoublePhoton15_CaloIdL_v10")
self.referenceMapping['A']['MuEG'].append("HLT_Mu15_Photon20_CaloIdL_v10")
self.referenceMapping['A']['MuEG'].append("HLT_Mu17_Ele8_CaloIdL_v9")
self.referenceMapping['A']['MuEG'].append("HLT_Mu17_Ele8_CaloIdT_CaloIsoVL_v4")
self.referenceMapping['A']['MuEG'].append("HLT_Mu5_DoubleEle8_CaloIdT_TrkIdVL_v4")
self.referenceMapping['A']['MuEG'].append("HLT_Mu5_Ele8_CaloIdT_CaloIsoVL_v1")
self.referenceMapping['A']['MuEG'].append("HLT_Mu5_Ele8_CaloIdT_TrkIdVL_Ele8_CaloIdL_TrkIdVL_v4")
self.referenceMapping['A']['MuEG'].append("HLT_Mu8_Ele17_CaloIdL_v9")
self.referenceMapping['A']['MuEG'].append("HLT_Mu8_Ele17_CaloIdT_CaloIsoVL_v4")
self.referenceMapping['A']['MuEG'].append("HLT_Mu8_Photon20_CaloIdVT_IsoT_v9")
self.referenceMapping['A']['MuHad'] = []
self.referenceMapping['A']['MuHad'].append("HLT_DoubleMu5_HT150_v1")
self.referenceMapping['A']['MuHad'].append("HLT_DoubleMu5_Mass4_HT150_v1")
self.referenceMapping['A']['MuHad'].append("HLT_HT250_Mu15_PFMHT40_v4")
self.referenceMapping['A']['MuHad'].append("HLT_HT300_Mu15_PFMHT40_v1")
self.referenceMapping['A']['MuHad'].append("HLT_HT300_Mu5_PFMHT40_v8")
self.referenceMapping['A']['MuHad'].append("HLT_HT350_Mu5_PFMHT45_v8")
self.referenceMapping['A']['MuHad'].append("HLT_IsoMu17_eta2p1_CentralJet30_BTagIP_v1")
self.referenceMapping['A']['MuHad'].append("HLT_IsoMu17_eta2p1_CentralJet30_v1")
self.referenceMapping['A']['MuHad'].append("HLT_IsoMu17_eta2p1_DiCentralJet30_v1")
self.referenceMapping['A']['MuHad'].append("HLT_IsoMu17_eta2p1_QuadCentralJet30_v1")
self.referenceMapping['A']['MuHad'].append("HLT_IsoMu17_eta2p1_TriCentralJet30_v1")
self.referenceMapping['A']['MuHad'].append("HLT_IsoMu20_DiCentralJet34_v3")
self.referenceMapping['A']['MuHad'].append("HLT_Mu10_R005_MR200_v1")
self.referenceMapping['A']['MuHad'].append("HLT_Mu10_R025_MR200_v1")
self.referenceMapping['A']['MuHad'].append("HLT_Mu10_R029_MR200_v1")
self.referenceMapping['A']['MuHad'].append("HLT_Mu12_eta2p1_DiCentralJet20_BTagIP3D1stTrack_v1")
self.referenceMapping['A']['MuHad'].append("HLT_Mu12_eta2p1_DiCentralJet20_DiBTagIP3D1stTrack_v1")
self.referenceMapping['A']['MuHad'].append("HLT_Mu12_eta2p1_DiCentralJet30_BTagIP3D_v1")
self.referenceMapping['A']['MuHad'].append("HLT_Mu17_eta2p1_CentralJet30_BTagIP_v1")
self.referenceMapping['A']['MuHad'].append("HLT_Mu17_eta2p1_CentralJet30_v1")
self.referenceMapping['A']['MuHad'].append("HLT_Mu17_eta2p1_DiCentralJet30_v1")
self.referenceMapping['A']['MuHad'].append("HLT_Mu17_eta2p1_QuadCentralJet30_v1")
self.referenceMapping['A']['MuHad'].append("HLT_Mu17_eta2p1_TriCentralJet30_v1")
self.referenceMapping['A']['MuHad'].append("HLT_Mu40_HT200_v4")
self.referenceMapping['A']['MuHad'].append("HLT_Mu5_DiJet30_v1")
self.referenceMapping['A']['MuHad'].append("HLT_Mu5_Ele8_CaloIdT_TrkIdVL_HT150_v1")
self.referenceMapping['A']['MuHad'].append("HLT_Mu5_Ele8_CaloIdT_TrkIdVL_Mass4_HT150_v6")
self.referenceMapping['A']['MuHad'].append("HLT_Mu5_QuadJet30_v1")
self.referenceMapping['A']['MuHad'].append("HLT_Mu5_TriJet30_v1")
self.referenceMapping['A']['MuHad'].append("HLT_Mu60_HT200_v1")
self.referenceMapping['A']['MuOnia'] = []
self.referenceMapping['A']['MuOnia'].append("HLT_Dimuon0_Jpsi_Muon_v7")
self.referenceMapping['A']['MuOnia'].append("HLT_Dimuon0_Jpsi_NoVertexing_v3")
self.referenceMapping['A']['MuOnia'].append("HLT_Dimuon0_Jpsi_v6")
self.referenceMapping['A']['MuOnia'].append("HLT_Dimuon0_Upsilon_Muon_v7")
self.referenceMapping['A']['MuOnia'].append("HLT_Dimuon0_Upsilon_v6")
self.referenceMapping['A']['MuOnia'].append("HLT_Dimuon10_Jpsi_Barrel_v6")
self.referenceMapping['A']['MuOnia'].append("HLT_Dimuon11_PsiPrime_v1")
self.referenceMapping['A']['MuOnia'].append("HLT_Dimuon13_Jpsi_Barrel_v1")
self.referenceMapping['A']['MuOnia'].append("HLT_Dimuon6_LowMass_v1")
self.referenceMapping['A']['MuOnia'].append("HLT_Dimuon7_Upsilon_Barrel_v1")
self.referenceMapping['A']['MuOnia'].append("HLT_Dimuon9_PsiPrime_v1")
self.referenceMapping['A']['MuOnia'].append("HLT_Dimuon9_Upsilon_Barrel_v1")
self.referenceMapping['A']['MuOnia'].append("HLT_DoubleMu4_Dimuon4_Bs_Barrel_v1")
self.referenceMapping['A']['MuOnia'].append("HLT_DoubleMu4_Dimuon6_Bs_v1")
self.referenceMapping['A']['MuOnia'].append("HLT_DoubleMu4_Jpsi_Displaced_v1")
self.referenceMapping['A']['MuOnia'].append("HLT_DoubleMu4p5_LowMass_Displaced_v1")
self.referenceMapping['A']['MuOnia'].append("HLT_DoubleMu5_Jpsi_Displaced_v1")
self.referenceMapping['A']['MuOnia'].append("HLT_DoubleMu5_LowMass_Displaced_v1")
self.referenceMapping['A']['MuOnia'].append("HLT_Mu5_L2Mu2_Jpsi_v9")
self.referenceMapping['A']['MuOnia'].append("HLT_Mu5_Track2_Jpsi_v9")
self.referenceMapping['A']['MuOnia'].append("HLT_Mu7_Track7_Jpsi_v10")
self.referenceMapping['A']['MultiJet'] = []
self.referenceMapping['A']['MultiJet'].append("HLT_CentralJet46_CentralJet38_CentralJet20_DiBTagIP3D_v1")
self.referenceMapping['A']['MultiJet'].append("HLT_CentralJet46_CentralJet38_DiBTagIP3D_v3")
self.referenceMapping['A']['MultiJet'].append("HLT_CentralJet60_CentralJet53_DiBTagIP3D_v2")
self.referenceMapping['A']['MultiJet'].append("HLT_DiCentralJet36_BTagIP3DLoose_v1")
self.referenceMapping['A']['MultiJet'].append("HLT_DoubleJet30_ForwardBackward_v7")
self.referenceMapping['A']['MultiJet'].append("HLT_DoubleJet60_ForwardBackward_v7")
self.referenceMapping['A']['MultiJet'].append("HLT_DoubleJet70_ForwardBackward_v7")
self.referenceMapping['A']['MultiJet'].append("HLT_DoubleJet80_ForwardBackward_v7")
self.referenceMapping['A']['MultiJet'].append("HLT_EightJet120_v1")
self.referenceMapping['A']['MultiJet'].append("HLT_ExclDiJet60_HFAND_v6")
self.referenceMapping['A']['MultiJet'].append("HLT_ExclDiJet60_HFOR_v6")
self.referenceMapping['A']['MultiJet'].append("HLT_L1DoubleJet36Central_v4")
self.referenceMapping['A']['MultiJet'].append("HLT_L1ETM30_v4")
self.referenceMapping['A']['MultiJet'].append("HLT_L1MultiJet_v4")
self.referenceMapping['A']['MultiJet'].append("HLT_QuadJet40_IsoPFTau40_v12")
self.referenceMapping['A']['MultiJet'].append("HLT_QuadJet40_v7")
self.referenceMapping['A']['MultiJet'].append("HLT_QuadJet45_IsoPFTau45_v7")
self.referenceMapping['A']['MultiJet'].append("HLT_QuadJet50_DiJet40_v1")
self.referenceMapping['A']['MultiJet'].append("HLT_QuadJet50_Jet40_Jet30_v3")
self.referenceMapping['A']['MultiJet'].append("HLT_QuadJet70_v6")
self.referenceMapping['A']['MultiJet'].append("HLT_QuadJet80_v1")
self.referenceMapping['A']['Photon'] = []
self.referenceMapping['A']['Photon'].append("HLT_DoubleEle33_CaloIdL_v5")
self.referenceMapping['A']['Photon'].append("HLT_DoubleEle45_CaloIdL_v4")
self.referenceMapping['A']['Photon'].append("HLT_DoublePhoton33_HEVT_v4")
self.referenceMapping['A']['Photon'].append("HLT_DoublePhoton38_HEVT_v3")
self.referenceMapping['A']['Photon'].append("HLT_DoublePhoton40_MR150_v6")
self.referenceMapping['A']['Photon'].append("HLT_DoublePhoton40_R014_MR150_v6")
self.referenceMapping['A']['Photon'].append("HLT_DoublePhoton5_IsoVL_CEP_v7")
self.referenceMapping['A']['Photon'].append("HLT_DoublePhoton60_v4")
self.referenceMapping['A']['Photon'].append("HLT_DoublePhoton80_v2")
self.referenceMapping['A']['Photon'].append("HLT_Photon135_v2")
self.referenceMapping['A']['Photon'].append("HLT_Photon200_NoHE_v4")
self.referenceMapping['A']['Photon'].append("HLT_Photon20_CaloIdVL_IsoL_v7")
self.referenceMapping['A']['Photon'].append("HLT_Photon20_R9Id_Photon18_R9Id_v7")
self.referenceMapping['A']['Photon'].append("HLT_Photon225_NoHE_v2")
self.referenceMapping['A']['Photon'].append("HLT_Photon26_CaloIdXL_IsoXL_Photon18_CaloIdXL_IsoXL_v1")
self.referenceMapping['A']['Photon'].append("HLT_Photon26_CaloIdXL_IsoXL_Photon18_R9Id_v1")
self.referenceMapping['A']['Photon'].append("HLT_Photon26_CaloIdXL_IsoXL_Photon18_v1")
self.referenceMapping['A']['Photon'].append("HLT_Photon26_Photon18_v7")
self.referenceMapping['A']['Photon'].append("HLT_Photon26_R9Id_Photon18_CaloIdXL_IsoXL_v1")
self.referenceMapping['A']['Photon'].append("HLT_Photon26_R9Id_Photon18_R9Id_v4")
self.referenceMapping['A']['Photon'].append("HLT_Photon30_CaloIdVL_IsoL_v9")
self.referenceMapping['A']['Photon'].append("HLT_Photon30_CaloIdVL_v8")
self.referenceMapping['A']['Photon'].append("HLT_Photon36_CaloIdL_IsoVL_Photon22_CaloIdL_IsoVL_v4")
self.referenceMapping['A']['Photon'].append("HLT_Photon36_CaloIdL_IsoVL_Photon22_R9Id_v3")
self.referenceMapping['A']['Photon'].append("HLT_Photon36_CaloIdL_IsoVL_Photon22_v5")
self.referenceMapping['A']['Photon'].append("HLT_Photon36_CaloIdVL_Photon22_CaloIdVL_v2")
self.referenceMapping['A']['Photon'].append("HLT_Photon36_Photon22_v1")
self.referenceMapping['A']['Photon'].append("HLT_Photon36_R9Id_Photon22_CaloIdL_IsoVL_v4")
self.referenceMapping['A']['Photon'].append("HLT_Photon36_R9Id_Photon22_R9Id_v3")
self.referenceMapping['A']['Photon'].append("HLT_Photon400_v2")
self.referenceMapping['A']['Photon'].append("HLT_Photon44_CaloIdL_Photon34_CaloIdL_v2")
self.referenceMapping['A']['Photon'].append("HLT_Photon48_CaloIdL_Photon38_CaloIdL_v2")
self.referenceMapping['A']['Photon'].append("HLT_Photon50_CaloIdVL_IsoL_v7")
self.referenceMapping['A']['Photon'].append("HLT_Photon50_CaloIdVL_v4")
self.referenceMapping['A']['Photon'].append("HLT_Photon75_CaloIdVL_IsoL_v8")
self.referenceMapping['A']['Photon'].append("HLT_Photon75_CaloIdVL_v7")
self.referenceMapping['A']['Photon'].append("HLT_Photon90_CaloIdVL_IsoL_v5")
self.referenceMapping['A']['Photon'].append("HLT_Photon90_CaloIdVL_v4")
self.referenceMapping['A']['PhotonHad'] = []
self.referenceMapping['A']['PhotonHad'].append("HLT_Photon30_CaloIdVT_CentralJet20_BTagIP_v3")
self.referenceMapping['A']['PhotonHad'].append("HLT_Photon40_CaloIdL_R005_MR150_v5")
self.referenceMapping['A']['PhotonHad'].append("HLT_Photon40_CaloIdL_R017_MR500_v3")
self.referenceMapping['A']['PhotonHad'].append("HLT_Photon40_CaloIdL_R023_MR350_v3")
self.referenceMapping['A']['PhotonHad'].append("HLT_Photon40_CaloIdL_R029_MR250_v3")
self.referenceMapping['A']['PhotonHad'].append("HLT_Photon40_CaloIdL_R042_MR200_v3")
self.referenceMapping['A']['PhotonHad'].append("HLT_Photon55_CaloIdL_R017_MR500_v1")
self.referenceMapping['A']['PhotonHad'].append("HLT_Photon55_CaloIdL_R023_MR350_v1")
self.referenceMapping['A']['PhotonHad'].append("HLT_Photon55_CaloIdL_R029_MR250_v1")
self.referenceMapping['A']['PhotonHad'].append("HLT_Photon55_CaloIdL_R042_MR200_v1")
self.referenceMapping['A']['PhotonHad'].append("HLT_Photon70_CaloIdL_HT400_v3")
self.referenceMapping['A']['PhotonHad'].append("HLT_Photon70_CaloIdL_HT500_v1")
self.referenceMapping['A']['PhotonHad'].append("HLT_Photon70_CaloIdL_MHT110_v1")
self.referenceMapping['A']['PhotonHad'].append("HLT_Photon70_CaloIdL_MHT90_v3")
self.referenceMapping['A']['SingleElectron'] = []
self.referenceMapping['A']['SingleElectron'].append("HLT_Ele100_CaloIdVL_CaloIsoVL_TrkIdVL_TrkIsoVL_v3")
self.referenceMapping['A']['SingleElectron'].append("HLT_Ele25_CaloIdL_CaloIsoVL_TrkIdVL_TrkIsoVL_v5")
self.referenceMapping['A']['SingleElectron'].append("HLT_Ele27_WP80_PFMT50_v4")
self.referenceMapping['A']['SingleElectron'].append("HLT_Ele32_CaloIdVL_CaloIsoVL_TrkIdVL_TrkIsoVL_v5")
self.referenceMapping['A']['SingleElectron'].append("HLT_Ele32_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_v7")
self.referenceMapping['A']['SingleElectron'].append("HLT_Ele32_WP70_PFMT50_v4")
self.referenceMapping['A']['SingleElectron'].append("HLT_Ele65_CaloIdVT_TrkIdT_v4")
self.referenceMapping['A']['SingleMu'] = []
self.referenceMapping['A']['SingleMu'].append("HLT_IsoMu15_eta2p1_v1")
self.referenceMapping['A']['SingleMu'].append("HLT_IsoMu15_v14")
self.referenceMapping['A']['SingleMu'].append("HLT_IsoMu17_v14")
self.referenceMapping['A']['SingleMu'].append("HLT_IsoMu20_v9")
self.referenceMapping['A']['SingleMu'].append("HLT_IsoMu24_eta2p1_v3")
self.referenceMapping['A']['SingleMu'].append("HLT_IsoMu24_v9")
self.referenceMapping['A']['SingleMu'].append("HLT_IsoMu30_eta2p1_v3")
self.referenceMapping['A']['SingleMu'].append("HLT_IsoMu34_eta2p1_v1")
self.referenceMapping['A']['SingleMu'].append("HLT_L1SingleMu10_v4")
self.referenceMapping['A']['SingleMu'].append("HLT_L1SingleMu20_v4")
self.referenceMapping['A']['SingleMu'].append("HLT_L2Mu10_v6")
self.referenceMapping['A']['SingleMu'].append("HLT_L2Mu20_v6")
self.referenceMapping['A']['SingleMu'].append("HLT_Mu100_eta2p1_v1")
self.referenceMapping['A']['SingleMu'].append("HLT_Mu12_v8")
self.referenceMapping['A']['SingleMu'].append("HLT_Mu15_v9")
self.referenceMapping['A']['SingleMu'].append("HLT_Mu20_v8")
self.referenceMapping['A']['SingleMu'].append("HLT_Mu24_eta2p1_v1")
self.referenceMapping['A']['SingleMu'].append("HLT_Mu24_v8")
self.referenceMapping['A']['SingleMu'].append("HLT_Mu30_eta2p1_v1")
self.referenceMapping['A']['SingleMu'].append("HLT_Mu30_v8")
self.referenceMapping['A']['SingleMu'].append("HLT_Mu40_eta2p1_v1")
self.referenceMapping['A']['SingleMu'].append("HLT_Mu40_v6")
self.referenceMapping['A']['SingleMu'].append("HLT_Mu5_v10")
self.referenceMapping['A']['SingleMu'].append("HLT_Mu60_eta2p1_v1")
self.referenceMapping['A']['SingleMu'].append("HLT_Mu8_v8")
self.referenceMapping['A']['Tau'] = []
self.referenceMapping['A']['Tau'].append("HLT_DoubleIsoPFTau45_Trk5_eta2p1_v3")
self.referenceMapping['A']['Tau'].append("HLT_IsoPFTau40_IsoPFTau30_Trk5_eta2p1_v3")
self.referenceMapping['A']['Tau'].append("HLT_MediumIsoPFTau35_Trk20_MET60_v1")
self.referenceMapping['A']['Tau'].append("HLT_MediumIsoPFTau35_Trk20_MET70_v1")
self.referenceMapping['A']['Tau'].append("HLT_MediumIsoPFTau35_Trk20_v1")
self.referenceMapping['A']['TauPlusX'] = []
self.referenceMapping['A']['TauPlusX'].append("HLT_Ele18_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_MediumIsoPFTau20_v1")
self.referenceMapping['A']['TauPlusX'].append("HLT_Ele18_CaloIdVT_TrkIdT_MediumIsoPFTau20_v1")
self.referenceMapping['A']['TauPlusX'].append("HLT_Ele20_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_MediumIsoPFTau20_v1")
self.referenceMapping['A']['TauPlusX'].append("HLT_HT300_DoubleIsoPFTau10_Trk3_PFMHT40_v8")
self.referenceMapping['A']['TauPlusX'].append("HLT_HT350_DoubleIsoPFTau10_Trk3_PFMHT45_v8")
self.referenceMapping['A']['TauPlusX'].append("HLT_IsoMu15_LooseIsoPFTau15_v9")
self.referenceMapping['A']['TauPlusX'].append("HLT_IsoMu15_eta2p1_LooseIsoPFTau20_v1")
self.referenceMapping['A']['TauPlusX'].append("HLT_IsoMu15_eta2p1_MediumIsoPFTau20_v1")
self.referenceMapping['A']['TauPlusX'].append("HLT_IsoMu15_eta2p1_TightIsoPFTau20_v1")
self.referenceMapping['A']['TauPlusX'].append("HLT_Mu15_LooseIsoPFTau15_v9")
self.referenceMapping['A']['TauPlusX'].append("HLT_QuadJet50_IsoPFTau50_v1")
self.referenceMapping['ALCAP0'] = {}
self.referenceMapping['ALCAP0']['AlCaP0'] = []
self.referenceMapping['ALCAP0']['AlCaP0'].append("AlCa_EcalEta_v9")
self.referenceMapping['ALCAP0']['AlCaP0'].append("AlCa_EcalPi0_v10")
self.referenceMapping['ALCAPHISYM'] = {}
self.referenceMapping['ALCAPHISYM']['AlCaPhiSym'] = []
self.referenceMapping['ALCAPHISYM']['AlCaPhiSym'].append("AlCa_EcalPhiSym_v7")
self.referenceMapping['Calibration'] = {}
self.referenceMapping['Calibration']['TestEnablesEcalHcalDT'] = []
self.referenceMapping['Calibration']['TestEnablesEcalHcalDT'].append("HLT_DTCalibration_v1")
self.referenceMapping['Calibration']['TestEnablesEcalHcalDT'].append("HLT_EcalCalibration_v2")
self.referenceMapping['Calibration']['TestEnablesEcalHcalDT'].append("HLT_HcalCalibration_v2")
self.referenceMapping['EcalCalibration'] = {}
self.referenceMapping['EcalCalibration']['EcalLaser'] = []
self.referenceMapping['EcalCalibration']['EcalLaser'].append("HLT_EcalCalibration_v2")
self.referenceMapping['Express'] = {}
self.referenceMapping['Express']['ExpressPhysics'] = []
self.referenceMapping['Express']['ExpressPhysics'].append("HLT_DoubleEle45_CaloIdL_v4")
self.referenceMapping['Express']['ExpressPhysics'].append("HLT_DoubleMu45_v6")
self.referenceMapping['Express']['ExpressPhysics'].append("HLT_DoublePhoton80_v2")
self.referenceMapping['Express']['ExpressPhysics'].append("HLT_EightJet120_v1")
self.referenceMapping['Express']['ExpressPhysics'].append("HLT_Ele100_CaloIdVL_CaloIsoVL_TrkIdVL_TrkIsoVL_v3")
self.referenceMapping['Express']['ExpressPhysics'].append("HLT_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v8")
self.referenceMapping['Express']['ExpressPhysics'].append("HLT_Ele65_CaloIdVT_TrkIdT_v4")
self.referenceMapping['Express']['ExpressPhysics'].append("HLT_HT2000_v2")
self.referenceMapping['Express']['ExpressPhysics'].append("HLT_Jet370_v6")
self.referenceMapping['Express']['ExpressPhysics'].append("HLT_Jet800_v1")
self.referenceMapping['Express']['ExpressPhysics'].append("HLT_MET200_v7")
self.referenceMapping['Express']['ExpressPhysics'].append("HLT_MET400_v2")
self.referenceMapping['Express']['ExpressPhysics'].append("HLT_Mu100_eta2p1_v1")
self.referenceMapping['Express']['ExpressPhysics'].append("HLT_Mu17_Ele8_CaloIdT_CaloIsoVL_v4")
self.referenceMapping['Express']['ExpressPhysics'].append("HLT_Mu17_Mu8_v7")
self.referenceMapping['Express']['ExpressPhysics'].append("HLT_Photon36_CaloIdL_IsoVL_Photon22_CaloIdL_IsoVL_v4")
self.referenceMapping['Express']['ExpressPhysics'].append("HLT_Photon400_v2")
self.referenceMapping['Express']['ExpressPhysics'].append("HLT_Photon75_CaloIdVL_IsoL_v8")
self.referenceMapping['Express']['ExpressPhysics'].append("HLT_ZeroBias_v4")
self.referenceMapping['HLTMON'] = {}
self.referenceMapping['HLTMON']['OfflineMonitor'] = []
self.referenceMapping['HLTMON']['OfflineMonitor'].append("AlCa_EcalEta_v9")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("AlCa_EcalPhiSym_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("AlCa_EcalPi0_v10")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("AlCa_RPCMuonNoHits_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("AlCa_RPCMuonNoTriggers_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("AlCa_RPCMuonNormalisation_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_60Jet10_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_70Jet10_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_70Jet13_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Activity_Ecal_SC7_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_BTagMu_DiJet110_Mu5_v10")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_BTagMu_DiJet20_Mu5_v10")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_BTagMu_DiJet40_Mu5_v10")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_BTagMu_DiJet70_Mu5_v10")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_BeamGas_BSC_v5")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_BeamGas_HF_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_BeamHalo_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_CentralJet46_CentralJet38_CentralJet20_DiBTagIP3D_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_CentralJet46_CentralJet38_DiBTagIP3D_v3")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_CentralJet60_CentralJet53_DiBTagIP3D_v2")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_CentralJet80_MET100_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_CentralJet80_MET160_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_CentralJet80_MET65_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_CentralJet80_MET80_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DTErrors_v2")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DiCentralJet20_BTagIP_MET65_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DiCentralJet20_MET100_HBHENoiseFiltered_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DiCentralJet20_MET80_v5")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DiCentralJet36_BTagIP3DLoose_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DiJet130_PT130_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DiJet160_PT160_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DiJet60_MET45_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DiJetAve110_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DiJetAve190_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DiJetAve240_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DiJetAve300_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DiJetAve30_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DiJetAve370_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DiJetAve60_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Dimuon0_Jpsi_Muon_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Dimuon0_Jpsi_NoVertexing_v3")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Dimuon0_Jpsi_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Dimuon0_Upsilon_Muon_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Dimuon0_Upsilon_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Dimuon10_Jpsi_Barrel_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Dimuon11_PsiPrime_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Dimuon13_Jpsi_Barrel_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Dimuon6_LowMass_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Dimuon7_Upsilon_Barrel_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Dimuon9_PsiPrime_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Dimuon9_Upsilon_Barrel_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DoubleEle10_CaloIdL_TrkIdVL_Ele10_CaloIdT_TrkIdVL_v3")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DoubleEle33_CaloIdL_v5")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DoubleEle45_CaloIdL_v4")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DoubleEle8_CaloIdT_TrkIdVL_HT150_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DoubleEle8_CaloIdT_TrkIdVL_Mass4_HT150_v3")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DoubleEle8_CaloIdT_TrkIdVL_v3")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DoubleIsoPFTau45_Trk5_eta2p1_v3")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DoubleJet30_ForwardBackward_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DoubleJet60_ForwardBackward_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DoubleJet70_ForwardBackward_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DoubleJet80_ForwardBackward_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DoubleMu3_v10")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DoubleMu45_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DoubleMu4_Dimuon4_Bs_Barrel_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DoubleMu4_Dimuon6_Bs_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DoubleMu4_Jpsi_Displaced_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DoubleMu4p5_LowMass_Displaced_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DoubleMu5_Acoplanarity03_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DoubleMu5_Ele8_CaloIdL_TrkIdVL_v10")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DoubleMu5_Ele8_CaloIdT_TrkIdVL_v4")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DoubleMu5_HT150_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DoubleMu5_IsoMu5_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DoubleMu5_Jpsi_Displaced_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DoubleMu5_LowMass_Displaced_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DoubleMu5_Mass4_HT150_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DoubleMu5_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DoubleMu6_Acoplanarity03_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DoubleMu6_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DoubleMu7_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DoublePhoton33_HEVT_v4")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DoublePhoton38_HEVT_v3")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DoublePhoton40_MR150_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DoublePhoton40_R014_MR150_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DoublePhoton5_IsoVL_CEP_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DoublePhoton60_v4")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_DoublePhoton80_v2")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_EightJet120_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele100_CaloIdVL_CaloIsoVL_TrkIdVL_TrkIsoVL_v3")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele12_CaloIdL_CaloIsoVL_TrkIdVL_TrkIsoVL_R005_MR200_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele12_CaloIdL_CaloIsoVL_TrkIdVL_TrkIsoVL_R025_MR200_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele12_CaloIdL_CaloIsoVL_TrkIdVL_TrkIsoVL_R029_MR200_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele15_CaloIdT_CaloIsoVL_TrkIdT_TrkIsoVL_HT250_PFMHT25_v4")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele15_CaloIdT_CaloIsoVL_TrkIdT_TrkIsoVL_HT250_PFMHT40_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele15_CaloIdT_CaloIsoVL_TrkIdT_TrkIsoVL_v2")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele17_CaloIdL_CaloIsoVL_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele17_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele17_CaloIdVT_CaloIsoVT_TrkIdT_TrkIsoVT_Ele8_Mass30_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele17_CaloIdVT_CaloIsoVT_TrkIdT_TrkIsoVT_SC8_Mass30_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele18_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_MediumIsoPFTau20_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele18_CaloIdVT_TrkIdT_MediumIsoPFTau20_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele20_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_Jet35_Jet25_Deta3_Jet20_v2")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele20_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_MediumIsoPFTau20_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele22_CaloIdL_CaloIsoVL_Ele15_HFT_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele25_CaloIdL_CaloIsoVL_TrkIdVL_TrkIsoVL_v5")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele25_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_CentralJet30_BTagIP_v5")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele25_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_CentralJet30_v5")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele25_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_DiCentralJet30_v5")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele25_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_QuadCentralJet30_v5")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele25_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_TriCentralJet30_v5")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele25_CaloIdVT_TrkIdT_CentralJet30_BTagIP_v9")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele25_CaloIdVT_TrkIdT_CentralJet30_v9")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele25_CaloIdVT_TrkIdT_DiCentralJet30_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele25_CaloIdVT_TrkIdT_QuadCentralJet30_v5")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele25_CaloIdVT_TrkIdT_TriCentralJet30_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele27_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_CentralJet30_CentralJet25_PFMHT20_v2")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele27_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_CentralJet30_CentralJet25_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele27_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_Jet35_Jet25_Deta3_Jet20_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele27_CaloIdVT_TrkIdT_CentralJet30_CentralJet25_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele27_CaloIdVT_TrkIdT_Jet35_Jet25_Deta3_Jet20_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele27_WP80_PFMT50_v4")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele30_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_DiCentralJet30_PFMHT25_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele30_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_Jet35_Jet25_Deta3p5_Jet25_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele32_CaloIdT_CaloIsoT_TrkIdT_TrkIsoT_Ele17_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele32_CaloIdT_CaloIsoT_TrkIdT_TrkIsoT_SC17_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele32_CaloIdVL_CaloIsoVL_TrkIdVL_TrkIsoVL_v5")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele32_CaloIdVT_CaloIsoT_TrkIdT_TrkIsoT_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele32_WP70_PFMT50_v4")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele65_CaloIdVT_TrkIdT_v4")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele8_CaloIdL_CaloIsoVL_Jet40_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele8_CaloIdL_CaloIsoVL_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele8_CaloIdL_TrkIdVL_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele8_CaloIdT_CaloIsoVL_TrkIdVL_TrkIsoVL_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele8_CaloIdT_TrkIdT_DiJet30_v5")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele8_CaloIdT_TrkIdT_QuadJet30_v5")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele8_CaloIdT_TrkIdT_TriJet30_v5")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Ele8_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_ExclDiJet60_HFAND_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_ExclDiJet60_HFOR_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_FatJetMass750_DR1p1_Deta2p0_v2")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_FatJetMass850_DR1p1_Deta2p0_v2")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_GlobalRunHPDNoise_v5")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT150_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT2000_v2")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT200_AlphaT0p55_v2")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT200_DoubleEle5_CaloIdVL_MassJPsi_v3")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT200_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT250_AlphaT0p53_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT250_AlphaT0p55_v2")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT250_DoubleDisplacedJet60_PromptTrack_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT250_DoubleDisplacedJet60_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT250_MHT100_v2")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT250_MHT90_v2")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT250_Mu15_PFMHT40_v4")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT250_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT300_AlphaT0p53_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT300_AlphaT0p54_v2")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT300_CentralJet30_BTagIP_PFMHT55_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT300_CentralJet30_BTagIP_PFMHT65_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT300_CentralJet30_BTagIP_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT300_DoubleIsoPFTau10_Trk3_PFMHT40_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT300_Ele5_CaloIdVL_CaloIsoVL_TrkIdVL_TrkIsoVL_PFMHT40_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT300_MHT80_v2")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT300_MHT90_v2")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT300_Mu15_PFMHT40_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT300_Mu5_PFMHT40_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT300_PFMHT55_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT300_PFMHT65_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT300_v9")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT350_AlphaT0p52_v2")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT350_AlphaT0p53_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT350_DoubleIsoPFTau10_Trk3_PFMHT45_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT350_Ele30_CaloIdT_TrkIdT_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT350_Ele5_CaloIdVL_CaloIsoVL_TrkIdVL_TrkIsoVL_PFMHT45_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT350_MHT70_v2")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT350_MHT80_v2")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT350_MHT90_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT350_Mu5_PFMHT45_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT350_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT400_AlphaT0p51_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT400_AlphaT0p52_v2")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT400_Ele60_CaloIdT_TrkIdT_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT400_MHT80_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT400_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT450_AlphaT0p51_v2")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT450_AlphaT0p52_v2")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT450_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT500_JetPt60_DPhi2p94_v2")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT500_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT550_JetPt60_DPhi2p94_v2")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT550_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT600_JetPt60_DPhi2p94_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT600_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HT650_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HcalNZS_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_HcalPhiSym_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_IsoMu15_LooseIsoPFTau15_v9")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_IsoMu15_eta2p1_LooseIsoPFTau20_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_IsoMu15_eta2p1_MediumIsoPFTau20_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_IsoMu15_eta2p1_TightIsoPFTau20_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_IsoMu15_eta2p1_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_IsoMu15_v14")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_IsoMu17_eta2p1_CentralJet30_BTagIP_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_IsoMu17_eta2p1_CentralJet30_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_IsoMu17_eta2p1_DiCentralJet30_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_IsoMu17_eta2p1_QuadCentralJet30_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_IsoMu17_eta2p1_TriCentralJet30_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_IsoMu17_v14")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_IsoMu20_DiCentralJet34_v3")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_IsoMu20_v9")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_IsoMu24_eta2p1_v3")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_IsoMu24_v9")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_IsoMu30_eta2p1_v3")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_IsoMu34_eta2p1_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_IsoPFTau40_IsoPFTau30_Trk5_eta2p1_v3")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_IsoTrackHB_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_IsoTrackHE_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Jet110_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Jet190_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Jet240_CentralJet30_BTagIP_v3")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Jet240_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Jet270_CentralJet30_BTagIP_v3")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Jet300_v5")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Jet30_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Jet370_NoJetID_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Jet370_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Jet60_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Jet800_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_JetE30_NoBPTX3BX_NoHalo_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_JetE30_NoBPTX_NoHalo_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_JetE30_NoBPTX_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_JetE50_NoBPTX3BX_NoHalo_v3")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_L1DoubleJet36Central_v4")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_L1DoubleMu0_v4")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_L1ETM30_v4")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_L1MultiJet_v4")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_L1SingleEG12_v3")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_L1SingleEG5_v3")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_L1SingleJet16_v4")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_L1SingleJet36_v4")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_L1SingleMu10_v4")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_L1SingleMu20_v4")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_L1SingleMuOpen_AntiBPTX_v3")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_L1SingleMuOpen_DT_v4")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_L1SingleMuOpen_v4")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_L1Tech_HBHEHO_totalOR_v3")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_L1Tech_HCAL_HF_single_channel_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_L1TrackerCosmics_v4")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_L1_Interbunch_BSC_v3")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_L1_PreCollisions_v3")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_L2DoubleMu0_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_L2DoubleMu23_NoVertex_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_L2DoubleMu30_NoVertex_v3")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_L2Mu10_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_L2Mu20_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_L2Mu60_1Hit_MET40_v5")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_L2Mu60_1Hit_MET60_v5")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_LogMonitor_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_MET100_HBHENoiseFiltered_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_MET100_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_MET120_HBHENoiseFiltered_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_MET120_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_MET200_HBHENoiseFiltered_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_MET200_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_MET400_v2")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_MET65_HBHENoiseFiltered_v5")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_MET65_v4")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_MediumIsoPFTau35_Trk20_MET60_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_MediumIsoPFTau35_Trk20_MET70_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_MediumIsoPFTau35_Trk20_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu100_eta2p1_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu10_R005_MR200_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu10_R025_MR200_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu10_R029_MR200_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu12_eta2p1_DiCentralJet20_BTagIP3D1stTrack_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu12_eta2p1_DiCentralJet20_DiBTagIP3D1stTrack_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu12_eta2p1_DiCentralJet30_BTagIP3D_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu12_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu13_Mu8_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu15_DoublePhoton15_CaloIdL_v10")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu15_LooseIsoPFTau15_v9")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu15_Photon20_CaloIdL_v10")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu15_v9")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu17_Ele8_CaloIdL_v9")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu17_Ele8_CaloIdT_CaloIsoVL_v4")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu17_Mu8_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu17_eta2p1_CentralJet30_BTagIP_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu17_eta2p1_CentralJet30_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu17_eta2p1_DiCentralJet30_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu17_eta2p1_QuadCentralJet30_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu17_eta2p1_TriCentralJet30_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu20_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu24_eta2p1_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu24_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu30_eta2p1_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu30_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu40_HT200_v4")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu40_eta2p1_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu40_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu5_DiJet30_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu5_DoubleEle8_CaloIdT_TrkIdVL_v4")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu5_Ele8_CaloIdT_CaloIsoVL_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu5_Ele8_CaloIdT_TrkIdVL_Ele8_CaloIdL_TrkIdVL_v4")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu5_Ele8_CaloIdT_TrkIdVL_HT150_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu5_Ele8_CaloIdT_TrkIdVL_Mass4_HT150_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu5_L2Mu2_Jpsi_v9")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu5_QuadJet30_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu5_TkMu0_OST_Jpsi_Tight_B5Q7_v9")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu5_Track2_Jpsi_v9")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu5_TriJet30_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu5_v10")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu60_HT200_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu60_eta2p1_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu7_Track7_Jpsi_v10")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu8_Ele17_CaloIdL_v9")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu8_Ele17_CaloIdT_CaloIsoVL_v4")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu8_Jet40_v10")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu8_Photon20_CaloIdVT_IsoT_v9")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Mu8_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_PFMHT150_v12")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon135_v2")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon200_NoHE_v4")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon20_CaloIdVL_IsoL_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon20_CaloIdVT_IsoT_Ele8_CaloIdL_CaloIsoVL_v9")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon20_R9Id_Photon18_R9Id_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon225_NoHE_v2")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon26_CaloIdXL_IsoXL_Photon18_CaloIdXL_IsoXL_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon26_CaloIdXL_IsoXL_Photon18_R9Id_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon26_CaloIdXL_IsoXL_Photon18_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon26_Photon18_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon26_R9Id_Photon18_CaloIdXL_IsoXL_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon26_R9Id_Photon18_R9Id_v4")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon30_CaloIdVL_IsoL_v9")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon30_CaloIdVL_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon30_CaloIdVT_CentralJet20_BTagIP_v3")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon36_CaloIdL_IsoVL_Photon22_CaloIdL_IsoVL_v4")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon36_CaloIdL_IsoVL_Photon22_R9Id_v3")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon36_CaloIdL_IsoVL_Photon22_v5")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon36_CaloIdVL_Photon22_CaloIdVL_v2")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon36_Photon22_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon36_R9Id_Photon22_CaloIdL_IsoVL_v4")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon36_R9Id_Photon22_R9Id_v3")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon400_v2")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon40_CaloIdL_R005_MR150_v5")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon40_CaloIdL_R017_MR500_v3")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon40_CaloIdL_R023_MR350_v3")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon40_CaloIdL_R029_MR250_v3")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon40_CaloIdL_R042_MR200_v3")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon44_CaloIdL_Photon34_CaloIdL_v2")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon48_CaloIdL_Photon38_CaloIdL_v2")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon50_CaloIdVL_IsoL_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon50_CaloIdVL_v4")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon55_CaloIdL_R017_MR500_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon55_CaloIdL_R023_MR350_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon55_CaloIdL_R029_MR250_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon55_CaloIdL_R042_MR200_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon70_CaloIdL_HT400_v3")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon70_CaloIdL_HT500_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon70_CaloIdL_MHT110_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon70_CaloIdL_MHT90_v3")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon75_CaloIdVL_IsoL_v8")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon75_CaloIdVL_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon90_CaloIdVL_IsoL_v5")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Photon90_CaloIdVL_v4")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Physics_v2")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_PixelTracks_Multiplicity100_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_PixelTracks_Multiplicity80_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_QuadJet40_IsoPFTau40_v12")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_QuadJet40_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_QuadJet45_IsoPFTau45_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_QuadJet50_DiJet40_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_QuadJet50_IsoPFTau50_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_QuadJet50_Jet40_Jet30_v3")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_QuadJet70_v6")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_QuadJet80_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_R014_MR150_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_R020_MR150_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_R020_MR550_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_R023_MR550_v3")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_R025_MR150_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_R025_MR450_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_R029_MR450_v3")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_R033_MR350_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_R036_MR350_v3")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_R038_MR250_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_R042_MR250_v3")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_Random_v1")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_RegionalCosmicTracking_v7")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_TripleEle10_CaloIdL_TrkIdVL_v9")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_TripleMu5_v9")
self.referenceMapping['HLTMON']['OfflineMonitor'].append("HLT_ZeroBias_v4")
self.referenceMapping['NanoDST'] = {}
self.referenceMapping['NanoDST']['L1Accept'] = []
self.referenceMapping['NanoDST']['L1Accept'].append("DST_Physics_v2")
self.referenceMapping['RPCMON'] = {}
self.referenceMapping['RPCMON']['RPCMonitor'] = []
self.referenceMapping['RPCMON']['RPCMonitor'].append("AlCa_RPCMuonNoHits_v6")
self.referenceMapping['RPCMON']['RPCMonitor'].append("AlCa_RPCMuonNoTriggers_v6")
self.referenceMapping['RPCMON']['RPCMonitor'].append("AlCa_RPCMuonNormalisation_v6")
self.referenceMapping['TrackerCalibration'] = {}
self.referenceMapping['TrackerCalibration']['TestEnablesTracker'] = []
self.referenceMapping['TrackerCalibration']['TestEnablesTracker'].append("HLT_TrackerCalibration_v2")
# remember for later
self.insertRunDAO = daoFactory(classname = "RunConfig.InsertRun")
self.insertLumiDAO = daoFactory(classname = "RunConfig.InsertLumiSection")
self.insertStreamCMSSWVersionDAO = daoFactory(classname = "RunConfig.InsertStreamCMSSWVersion")
self.insertStreamerDAO = daoFactory(classname = "RunConfig.InsertStreamer")
self.findNewRunsDAO = daoFactory(classname = "Tier0Feeder.FindNewRuns")
self.findNewRunStreamsDAO = daoFactory(classname = "Tier0Feeder.FindNewRunStreams")
self.feedStreamersDAO = daoFactory(classname = "Tier0Feeder.FeedStreamers")
self.insertClosedLumiDAO = daoFactory(classname = "RunLumiCloseout.InsertClosedLumi")
self.finalCloseLumiDAO = daoFactory(classname = "RunLumiCloseout.FinalCloseLumi")
self.insertSplitLumisDAO = daoFactory(classname = "JobSplitting.InsertSplitLumis")
self.findNewExpressRunsDAO = daoFactory(classname = "Tier0Feeder.FindNewExpressRuns")
self.releaseExpressDAO = daoFactory(classname = "Tier0Feeder.ReleaseExpress")
self.getStreamerWorkflowsForMonitoringDAO = daoFactory(classname = "Tier0Feeder.GetStreamerWorkflowsForMonitoring")
self.getPromptRecoWorkflowsForMonitoringDAO = daoFactory(classname = "Tier0Feeder.GetPromptRecoWorkflowsForMonitoring")
self.markTrackedWorkflowMonitoringDAO = daoFactory(classname = "Tier0Feeder.MarkTrackedWorkflowMonitoring")
return
def tearDown(self):
"""
_tearDown_
"""
self.testInit.clearDatabase()
return
def changeActiveLumiSplits(self, lumi ):
"""
__
It deletes a lumi subscription from one table and inserts it onto the completed table
"""
myThread = threading.currentThread()
myThread.dbi.processData("""INSERT INTO wmbs_sub_files_complete (fileid, subscription)
SELECT
fileid,
subscription
FROM
wmbs_sub_files_available
WHERE
fileid = '%s'
""" % lumi, transaction = False)
myThread.dbi.processData("""DELETE FROM wmbs_sub_files_available
WHERE fileid = '%s'
""" % lumi, transaction = False)
return
def insertLocation(self, pnn):
"""
__
it is inserting a pnn location
"""
myThread = threading.currentThread()
myThread.dbi.processData("""INSERT INTO wmbs_pnns (id, pnn)
VALUES (wmbs_pnns_SEQ.nextval, '%s')
""" % pnn, transaction = False)
return
def insertRun(self, run):
"""
_insertRun_
insert run and lumi records for given run
"""
self.insertRunDAO.execute(binds = { 'RUN' : run,
'HLTKEY' : self.hltkey },
transaction = False)
return
def insertRunStreamLumi(self, run, stream, lumi):
"""
_insertRunStreamLumi_
insert run/stream/cmssw assoc and single streamer with given lumi
"""
self.insertStreamCMSSWVersionDAO.execute(binds = { 'RUN' : run,
'STREAM' : stream,
'VERSION' : "CMSSW_4_2_7" },
transaction = False)
self.insertLumiDAO.execute(binds = { 'RUN' : run,
'LUMI' : lumi },
transaction = False)
self.insertStreamerDAO.execute(streamerPNN =self.tier0Config.Global.StreamerPNN,
binds = { 'RUN' : run,
'P5_ID': self.p5id,
'LUMI' : lumi,
'STREAM' : stream,
'LFN' : makeUUID(),
'FILESIZE' : 100,
'EVENTS' : 100,
'TIME' : int(time.time()) },
transaction = False)
return
def feedStreamers(self):
"""
_feedStreamers_
helper function to wrap the feedStreamersDAO
call into an transaction
"""
myThread = threading.currentThread()
myThread.transaction.begin()
self.feedStreamersDAO.execute(conn = myThread.transaction.conn, transaction = True)
myThread.transaction.commit()
return
def getNumFeedStreamers(self):
"""
_getNumFeedStreamers_
helper function that counts the number of feed streamers
"""
myThread = threading.currentThread()
results = myThread.dbi.processData("""SELECT COUNT(*)
FROM wmbs_sub_files_available
""", transaction = False)[0].fetchall()
return results[0][0]
def getNumActiveSplitLumis(self):
"""
_getNumActiveSplitLumis_
helper function that counts the number of active split lumis
"""
myThread = threading.currentThread()
results = myThread.dbi.processData("""SELECT COUNT(*)
FROM lumi_section_split_active
""", transaction = False)[0].fetchall()
return results[0][0]
def getClosedLumis(self):
"""
_getClosedLumis_
helper function that retrieves the closed lumis
"""
myThread = threading.currentThread()
results = myThread.dbi.processData("""SELECT lumi_section_closed.run_id,
stream.name,
lumi_section_closed.lumi_id,
lumi_section_closed.filecount
FROM lumi_section_closed
INNER JOIN stream ON
stream.id = lumi_section_closed.stream_id
""", transaction = False)[0].fetchall()
runStreamLumiDict = {}
for result in results:
run = result[0]
stream = result[1]
lumi = result[2]
filecount = result[3]
if run not in runStreamLumiDict:
runStreamLumiDict[run] = {}
if stream not in runStreamLumiDict[run]:
runStreamLumiDict[run][stream] = {}
runStreamLumiDict[run][stream][lumi] = filecount
return runStreamLumiDict
def getEndedRuns(self):
"""
_getEndedRuns_
helper function that retrieves the ended runs
"""
myThread = threading.currentThread()
results = myThread.dbi.processData("""SELECT run_id, lumicount
FROM run
WHERE close_time > 0
""", transaction = False)[0].fetchall()
runLumiDict = {}
for result in results:
runLumiDict[result[0]] = result[1]
return runLumiDict
def getClosedRunStreamFilesets(self):
"""
_getClosedRunStreamFilesets_
helper function that retrieves closed run/stream filesets
"""
myThread = threading.currentThread()
results = myThread.dbi.processData("""SELECT run_stream_fileset_assoc.run_id,
stream.name
FROM run_stream_fileset_assoc
INNER JOIN wmbs_fileset ON
wmbs_fileset.id = run_stream_fileset_assoc.fileset AND
wmbs_fileset.open = 0
INNER JOIN stream ON
stream.id = run_stream_fileset_assoc.stream_id
""", transaction = False)[0].fetchall()
runStreamDict = {}
for result in results:
runStreamDict[result[0]] = result[1]
return runStreamDict
def feedCouchMonitoring(self):
"""
_feedCouchMonitoring_
check for workflows that haven't been uploaded to Couch for monitoring yet
"""
workflows = self.getStreamerWorkflowsForMonitoringDAO.execute()
workflows += self.getPromptRecoWorkflowsForMonitoringDAO.execute()
if len(workflows) == 0:
logging.debug("No workflows to publish to couch monitoring, doing nothing")
if workflows:
logging.debug(" Going to publish %d workflows" % len(workflows))
for (workflowId, run, workflowName) in workflows:
logging.info(" Publishing workflow %s to monitoring" % workflowName)
doc = {}
doc["RequestName"] = workflowName
doc["Run"] = run
response = self.localRequestCouchDB.insertGenericRequest(doc)
if response == "OK" or "EXISTS":
logging.info(" Successfully uploaded request %s" % workflowName)
self.markTrackedWorkflowMonitoringDAO.execute(workflowId)
return
def test00(self):
"""
_test00_
Test the FindNewRuns, FindNewRunStreams and FeedStreamers DAOs
and their interaction with the RunConfigAPI.configureRun and
RunConfigAPI.configureRunStream methods
Don't test the interaction with the StorageManager DB to close
lumis, we instead close lumis directly and just test that
the system behaves correctly with open/closed lumis.
"""
runs = self.findNewRunsDAO.execute(transaction = False)
self.assertEqual(len(runs), 0,
"ERROR: there should be no new run")
runStreams = self.findNewRunStreamsDAO.execute(transaction = False)
self.assertEqual(len(runStreams.keys()), 0,
"ERROR: there should be no new run/stream")
self.feedStreamers()
self.assertEqual(self.getNumFeedStreamers(), 0,
"ERROR: there should be no streamers feed")
self.insertRun(176161)
self.insertRunStreamLumi(176161, "A", 1)
runs = self.findNewRunsDAO.execute(transaction = False)
self.assertEqual(len(runs), 1,
"ERROR: there should be one new run")
runStreams = self.findNewRunStreamsDAO.execute(transaction = False)
self.assertEqual(len(runStreams.keys()), 0,
"ERROR: there should be no new run/stream")
RunConfigAPI.configureRun(self.tier0Config, 176161,
self.hltConfig,
{ 'process' : "HLT",
'mapping' : self.referenceMapping })
runs = self.findNewRunsDAO.execute(transaction = False)
self.assertEqual(len(runs), 0,
"ERROR: there should be no new run")
self.feedStreamers()
self.assertEqual(self.getNumFeedStreamers(), 0,
"ERROR: there should be no streamers feed")
runStreams = self.findNewRunStreamsDAO.execute(transaction = False)
self.assertEqual(set(runStreams.keys()), set([176161]),
"ERROR: there should be new run/stream for run 176161")
self.assertEqual(set(runStreams[176161]), set(["A"]),
"ERROR: there should be new run/stream for run 176161 and stream A")
RunConfigAPI.configureRunStream(self.tier0Config, 176161, "A", self.testDir, self.dqmUploadProxy)
runStreams = self.findNewRunStreamsDAO.execute(transaction = False)
self.assertEqual(len(runStreams.keys()), 0,
"ERROR: there should be no new run/stream")
self.feedStreamers()
self.assertEqual(self.getNumFeedStreamers(), 0,
"ERROR: there should be no streamers feed")
self.insertClosedLumiDAO.execute(binds = { 'RUN' : 176161,
'LUMI' : 1,
'STREAM' : 'A',
'FILECOUNT' : 1,
'INSERT_TIME' : int(time.time()),
'CLOSE_TIME' : 0 },
conn = None,
transaction = False)
self.feedStreamers()
self.assertEqual(self.getNumFeedStreamers(), 0,
"ERROR: there should be no streamers feed")
self.finalCloseLumiDAO.execute(int(time.time()), conn=None, transaction = False)
self.feedStreamers()
self.assertEqual(self.getNumFeedStreamers(), 1,
"ERROR: there should be 1 streamers feed")
self.insertRunStreamLumi(176161, "A", 2)
self.feedStreamers()
self.assertEqual(self.getNumFeedStreamers(), 1,
"ERROR: there should be 1 streamers feed")
self.insertClosedLumiDAO.execute(binds = { 'RUN' : 176161,
'STREAM' : 'A',
'LUMI' : 2,
'INSERT_TIME' : int(time.time()),
'CLOSE_TIME' : int(time.time()),
'FILECOUNT' : 1 },
transaction = False)
self.feedStreamers()
self.assertEqual(self.getNumFeedStreamers(), 2,
"ERROR: there should be 2 streamers feed")
self.insertRunStreamLumi(176161, "A", 3)
self.feedStreamers()
self.assertEqual(self.getNumFeedStreamers(), 2,
"ERROR: there should be 2 streamers feed")
self.insertClosedLumiDAO.execute(binds = { 'RUN' : 176161,
'STREAM' : 'A',
'LUMI' : 3,
'INSERT_TIME' : int(time.time()),
'CLOSE_TIME' : 0,
'FILECOUNT' : 2 },
transaction = False)
self.feedStreamers()
self.assertEqual(self.getNumFeedStreamers(), 2,
"ERROR: there should be 2 streamers feed")
self.finalCloseLumiDAO.execute(int(time.time()), transaction = False)
self.feedStreamers()
self.assertEqual(self.getNumFeedStreamers(), 2,
"ERROR: there should be 2 streamers feed")
self.insertRunStreamLumi(176161, "A", 3)
self.finalCloseLumiDAO.execute(int(time.time()), transaction = False)
self.feedStreamers()
self.assertEqual(self.getNumFeedStreamers(), 4,
"ERROR: there should be 4 streamers feed")
self.insertRun(176162)
self.insertRunStreamLumi(176162, "A", 1)
self.insertRunStreamLumi(176162, "Express", 1)
self.insertRunStreamLumi(176162, "HLTMON", 1)
self.insertRun(176163)
self.insertRunStreamLumi(176163, "A", 1)
self.insertRunStreamLumi(176163, "Express", 1)
runs = self.findNewRunsDAO.execute(transaction = False)
self.assertEqual(len(runs), 2,
"ERROR: there should be two new runs")
runStreams = self.findNewRunStreamsDAO.execute(transaction = False)
self.assertEqual(len(runStreams.keys()), 0,
"ERROR: there should be no new run/stream")
self.feedStreamers()
self.assertEqual(self.getNumFeedStreamers(), 4,
"ERROR: there should be 4 streamers feed")
RunConfigAPI.configureRun(self.tier0Config, 176162,
self.hltConfig,
{ 'process' : "HLT",
'mapping' : self.referenceMapping })
runStreams = self.findNewRunStreamsDAO.execute(transaction = False)
self.assertEqual(set(runStreams.keys()), set([176162]),
"ERROR: there should be new run/stream for run 176162")
self.assertEqual(set(runStreams[176162]), set(["A", "Express", "HLTMON"]),
"ERROR: there should be new run/stream for run 176162 and stream A,Express,HLTMON")
runs = self.findNewRunsDAO.execute(transaction = False)
self.assertEqual(len(runs), 1,
"ERROR: there should be one new run")
self.feedStreamers()
self.assertEqual(self.getNumFeedStreamers(), 4,
"ERROR: there should be 4 streamers feed")
RunConfigAPI.configureRun(self.tier0Config, 176163,
self.hltConfig,
{ 'process' : "HLT",
'mapping' : self.referenceMapping })
runs = self.findNewRunsDAO.execute(transaction = False)
self.assertEqual(len(runs), 0,
"ERROR: there should be no new run")
runStreams = self.findNewRunStreamsDAO.execute(transaction = False)
self.assertEqual(set(runStreams.keys()), set([176162, 176163]),
"ERROR: there should be new run/stream for run 176162 and 176163")
self.assertEqual(set(runStreams[176162]), set(["A", "Express", "HLTMON"]),
"ERROR: there should be new run/stream for run 176162 and stream A, Express and HLTMON")
self.assertEqual(set(runStreams[176163]), set(["A", "Express"]),
"ERROR: there should be new run/stream for run 176162 and stream A and Express")
self.feedStreamers()
self.assertEqual(self.getNumFeedStreamers(), 4,
"ERROR: there should be 4 streamers feed")
RunConfigAPI.configureRunStream(self.tier0Config, 176162, "A", self.testDir, self.dqmUploadProxy)
RunConfigAPI.configureRunStream(self.tier0Config, 176163, "Express", self.testDir, self.dqmUploadProxy)
runStreams = self.findNewRunStreamsDAO.execute(transaction = False)
self.assertEqual(set(runStreams.keys()), set([176162, 176163]),
"ERROR: there should be new run/stream for run 176162 and 176163")
self.assertEqual(set(runStreams[176162]), set(["Express", "HLTMON"]),
"ERROR: there should be new run/stream for run 176162 and stream Express and HLTMON")
self.assertEqual(set(runStreams[176163]), set(["A"]),
"ERROR: there should be new run/stream for run 176162 and stream A")
self.feedStreamers()
self.assertEqual(self.getNumFeedStreamers(), 4,
"ERROR: there should be 4 streamers feed")
self.insertClosedLumiDAO.execute(binds = { 'RUN' : 176162,
'STREAM' : 'A',
'LUMI' : 1,
'INSERT_TIME' : int(time.time()),
'CLOSE_TIME' : int(time.time()),
'FILECOUNT' : 1 },
transaction = False)
self.feedStreamers()
self.assertEqual(self.getNumFeedStreamers(), 5,
"ERROR: there should be 5 streamers feed")
self.insertClosedLumiDAO.execute(binds = { 'RUN' : 176163,
'STREAM' : 'Express',
'LUMI' : 1,
'INSERT_TIME' : int(time.time()),
'CLOSE_TIME' : int(time.time()),
'FILECOUNT' : 1 },
transaction = False)
self.feedStreamers()
self.assertEqual(self.getNumFeedStreamers(), 6,
"ERROR: there should be 6 streamers feed")
RunConfigAPI.configureRunStream(self.tier0Config, 176162, "Express", self.testDir, self.dqmUploadProxy)
RunConfigAPI.configureRunStream(self.tier0Config, 176162, "HLTMON", self.testDir, self.dqmUploadProxy)
runStreams = self.findNewRunStreamsDAO.execute(transaction = False)
self.assertEqual(set(runStreams.keys()), set([176163]),
"ERROR: there should be new run/stream for run 176163")
self.assertEqual(set(runStreams[176163]), set(["A"]),
"ERROR: there should be new run/stream for run 176163 and stream A")
self.feedStreamers()
self.assertEqual(self.getNumFeedStreamers(), 6,
"ERROR: there should be 6 streamers feed")
self.insertClosedLumiDAO.execute(binds = { 'RUN' : 176162,
'STREAM' : 'Express',
'LUMI' : 1,
'INSERT_TIME' : int(time.time()),
'CLOSE_TIME' : int(time.time()),
'FILECOUNT' : 1 },
transaction = False)
self.feedStreamers()
self.assertEqual(self.getNumFeedStreamers(), 7,
"ERROR: there should be 7 streamers feed")
self.insertClosedLumiDAO.execute(binds = { 'RUN' : 176162,
'STREAM' : 'HLTMON',
'LUMI' : 1,
'INSERT_TIME' : int(time.time()),
'CLOSE_TIME' : int(time.time()),
'FILECOUNT' : 1 },
transaction = False)
self.feedStreamers()
self.assertEqual(self.getNumFeedStreamers(), 8,
"ERROR: there should be 8 streamers feed")
RunConfigAPI.configureRunStream(self.tier0Config, 176163, "A", self.testDir, self.dqmUploadProxy)
runStreams = self.findNewRunStreamsDAO.execute(transaction = False)
self.assertEqual(len(runStreams.keys()), 0,
"ERROR: there should be no new run/stream")
self.feedStreamers()
self.assertEqual(self.getNumFeedStreamers(), 8,
"ERROR: there should be 8 streamers feed")
self.insertClosedLumiDAO.execute(binds = { 'RUN' : 176163,
'STREAM' : 'A',
'LUMI' : 1,
'INSERT_TIME' : int(time.time()),
'CLOSE_TIME' : int(time.time()),
'FILECOUNT' : 1 },
transaction = False)
self.feedStreamers()
self.assertEqual(self.getNumFeedStreamers(), 9,
"ERROR: there should be 9 streamers feed")
return
def test01(self):
"""
_test01_
Test the interaction with StorageManager DB to end runs and close lumis
for real run examples with full run and run/stream configuration
"""
if self.dbInterfaceStorageManager == None:
print("Your config is missing the StorageManagerDatabase section")
print("Skipping run/lumi closing test")
return
RunLumiCloseoutAPI.closeRuns(self.dbInterfaceStorageManager)
self.assertEqual(len(self.getEndedRuns()), 0,
"ERROR: there should be no ended runs")
RunLumiCloseoutAPI.closeLumiSections(self.dbInterfaceStorageManager)
self.assertEqual(len(self.getClosedLumis()), 0,
"ERROR: there should be no closed lumis")
self.insertRun(176161)
RunLumiCloseoutAPI.closeRuns(self.dbInterfaceStorageManager)
endedRuns = self.getEndedRuns()
self.assertEqual(endedRuns.keys(), [176161],
"ERROR: there should be 1 ended run: 176161")
self.assertEqual(endedRuns[176161], 23,
"ERROR: there should be 23 lumis in run 176161")
RunLumiCloseoutAPI.closeLumiSections(self.dbInterfaceStorageManager)
self.assertEqual(len(self.getClosedLumis()), 0,
"ERROR: there should be no closed lumis")
self.insertRunStreamLumi(176161, "A", 1)
RunLumiCloseoutAPI.closeLumiSections(self.dbInterfaceStorageManager)
self.assertEqual(len(self.getClosedLumis()), 0,
"ERROR: there should be no closed lumis")
RunConfigAPI.configureRun(self.tier0Config, 176161,
self.hltConfig,
{ 'process' : "HLT",
'mapping' : self.referenceMapping })
RunLumiCloseoutAPI.closeLumiSections(self.dbInterfaceStorageManager)
self.assertEqual(len(self.getClosedLumis()), 0,
"ERROR: there should be no closed lumis")
RunConfigAPI.configureRunStream(self.tier0Config, 176161, "A", self.testDir, self.dqmUploadProxy)
RunLumiCloseoutAPI.closeLumiSections(self.dbInterfaceStorageManager)
runStreamLumiDict = self.getClosedLumis()
self.assertEqual(runStreamLumiDict.keys(), [176161],
"ERROR: there should be closed lumis for run 176161")
self.assertEqual(runStreamLumiDict[176161].keys(), ['A'],
"ERROR: there should be closed lumis for run 176161 and stream A")
self.assertEqual(sorted(runStreamLumiDict[176161]['A'].keys()), range(1, 24),
"ERROR: there should be closed lumis for run 176161, stream A and lumi 1 to 23")
for lumi in range(1, 24):
self.assertEqual(runStreamLumiDict[176161]['A'][lumi], 14,
"ERROR: there should be 14 closed lumis for run 176161, stream A and lumi %d" % lumi)
self.insertRunStreamLumi(176161, "HLTMON", 1)
RunConfigAPI.configureRunStream(self.tier0Config, 176161, "HLTMON", self.testDir, self.dqmUploadProxy)
RunLumiCloseoutAPI.closeLumiSections(self.dbInterfaceStorageManager)
runStreamLumiDict = self.getClosedLumis()
self.assertEqual(runStreamLumiDict.keys(), [176161],
"ERROR: there should be closed lumis for run 176161")
self.assertEqual(sorted(runStreamLumiDict[176161].keys()), ['A', 'HLTMON'],
"ERROR: there should be closed lumis for run 176161 and stream A and HLTMON")
self.assertEqual(sorted(runStreamLumiDict[176161]['A'].keys()), range(1, 24),
"ERROR: there should be closed lumis for run 176161, stream A and lumi 1 to 23")
self.assertEqual(sorted(runStreamLumiDict[176161]['HLTMON'].keys()), range(1, 24),
"ERROR: there should be closed lumis for run 176161, stream HLTMON and lumi 1 to 23")
for lumi in range(1, 24):
self.assertEqual(runStreamLumiDict[176161]['A'][lumi], 14,
"ERROR: there should be 14 closed lumis for run 176161, stream A and lumi %d" % lumi)
self.assertEqual(runStreamLumiDict[176161]['HLTMON'][1], 9,
"ERROR: there should be 9 closed lumis for run 176161, stream HLTMON and lumi 1")
self.assertEqual(runStreamLumiDict[176161]['HLTMON'][2], 1,
"ERROR: there should be 1 closed lumis for run 176161, stream HLTMON and lumi 2")
for lumi in range(3, 23):
self.assertEqual(runStreamLumiDict[176161]['HLTMON'][lumi], 14,
"ERROR: there should be 14 closed lumis for run 176161, stream HLTMON and lumi %d" % lumi)
self.assertEqual(runStreamLumiDict[176161]['HLTMON'][23], 6,
"ERROR: there should be 6 closed lumis for run 176161, stream HLTMON and lumi 23")
return
def test02(self):
"""
_test02_
Test closeout code for run/stream filesets
"""
if self.dbInterfaceStorageManager == None:
print("Your config is missing the StorageManagerDatabase section")
print("Skipping run/lumi closing test")
return
RunLumiCloseoutAPI.closeRunStreamFilesets()
self.assertEqual(len(self.getClosedRunStreamFilesets()), 0,
"ERROR: there should be no closed run/stream filesets")
self.insertRun(176161)
for count in range(14):
self.insertRunStreamLumi(176161, "A", 1)
RunConfigAPI.configureRun(self.tier0Config, 176161,
self.hltConfig,
{ 'process' : "HLT",
'mapping' : self.referenceMapping })
RunConfigAPI.configureRunStream(self.tier0Config, 176161, "A", self.testDir, self.dqmUploadProxy)
RunLumiCloseoutAPI.closeRuns(self.dbInterfaceStorageManager)
RunLumiCloseoutAPI.closeLumiSections(self.dbInterfaceStorageManager)
RunLumiCloseoutAPI.closeRunStreamFilesets()
self.assertEqual(len(self.getClosedRunStreamFilesets()), 0,
"ERROR: there should be no closed run/stream filesets")
self.feedStreamers()
RunLumiCloseoutAPI.closeRunStreamFilesets()
self.assertEqual(len(self.getClosedRunStreamFilesets()), 0,
"ERROR: there should be no closed run/stream filesets")
for lumi in range(2, 24):
for count in range(14):
self.insertRunStreamLumi(176161, "A", lumi)
RunLumiCloseoutAPI.closeRuns(self.dbInterfaceStorageManager)
RunLumiCloseoutAPI.stopRuns(self.dbInterfaceStorageManager)
RunLumiCloseoutAPI.closeLumiSections(self.dbInterfaceStorageManager)
RunLumiCloseoutAPI.closeRunStreamFilesets()
self.assertEqual(len(self.getClosedRunStreamFilesets()), 0,
"ERROR: there should be no closed run/stream filesets")
self.feedStreamers()
RunLumiCloseoutAPI.closeRunStreamFilesets()
self.assertEqual(self.getClosedRunStreamFilesets(), { 176161 : 'A' },
"ERROR: there should be 1 closed run/stream filesets for run 176161 and stream A")
self.assertEqual(len(self.getStreamerWorkflowsForMonitoringDAO.execute()), 1,
"ERROR: there should be 1 workflow to be injected to couchDB")
self.feedCouchMonitoring()
self.assertEqual(len(self.getStreamerWorkflowsForMonitoringDAO.execute()), 0,
"ERROR: there should be no workflow to be injected to couchDB")
return
def test03(self):
"""
_test03_
Test active split lumi checks
"""
myThread = threading.currentThread()
self.insertRun(176161)
self.insertRunStreamLumi(176161, "A", 1)
self.insertRunStreamLumi(176161, "A", 1)
self.insertRunStreamLumi(176161, "A", 1)
RunConfigAPI.configureRun(self.tier0Config, 176161,
self.hltConfig,
{ 'process' : "HLT",
'mapping' : self.referenceMapping })
RunConfigAPI.configureRunStream(self.tier0Config, 176161, "A", self.testDir, self.dqmUploadProxy)
self.insertClosedLumiDAO.execute(binds = { 'RUN' : 176161,
'STREAM' : 'A',
'LUMI' : 1,
'INSERT_TIME' : int(time.time()),
'CLOSE_TIME' : int(time.time()),
'FILECOUNT' : 3 },
transaction = False)
self.feedStreamers()
subID = myThread.dbi.processData("""SELECT wmbs_subscription.id
FROM run_stream_fileset_assoc
INNER JOIN stream ON
stream.id = run_stream_fileset_assoc.stream_id
INNER JOIN wmbs_subscription ON
wmbs_subscription.fileset = run_stream_fileset_assoc.fileset
WHERE run_stream_fileset_assoc.run_id = 176161
AND stream.name = 'A'
""", transaction = False)[0].fetchall()[0][0]
self.insertSplitLumisDAO.execute( binds = { 'SUB' : subID,
'LUMI' : 1,
'NFILES' : 3 }, conn = None, transaction = False)
RunLumiCloseoutAPI.checkActiveSplitLumis()
self.changeActiveLumiSplits(1)
myThread.dbi.processData("""DELETE FROM wmbs_sub_files_available
WHERE fileid = 1
""", transaction = False)
RunLumiCloseoutAPI.checkActiveSplitLumis()
self.assertEqual(self.getNumActiveSplitLumis(), 1,
"ERROR: there should be one split lumi.")
self.changeActiveLumiSplits(2)
RunLumiCloseoutAPI.checkActiveSplitLumis()
self.assertEqual(self.getNumActiveSplitLumis(), 1,
"ERROR: there should be one split lumi.")
self.changeActiveLumiSplits(3)
RunLumiCloseoutAPI.checkActiveSplitLumis()
self.assertEqual(self.getNumActiveSplitLumis(), 0,
"ERROR: there should be no split lumi.")
return
def test04(self):
"""
_test04_
Test releasing express processing without PopConLog DB
"""
self.insertRun(176161)
runs = self.findNewExpressRunsDAO.execute(transaction = False)
self.assertEqual(set(runs), set([176161]),
"ERROR: only run 176161 should not be express released.")
self.releaseExpressDAO.execute(binds = { 'RUN' : 176161 }, transaction = False)
runs = self.findNewExpressRunsDAO.execute(transaction = False)
self.assertEqual(set(runs), set([]),
"ERROR: there should be no run not express released.")
return
def test05(self):
"""
_test05_
Test the interaction with PopConLog DB to release express processing
"""
if self.getExpressReadyRunsDAO == None:
print("Your config is missing the PopConLogDatabase section")
print("Skipping PopConLog based express release test")
return
self.insertRun(176161)
runs = self.getExpressReadyRunsDAO.execute(binds = { 'RUN' : 176161 }, transaction = False)
self.assertEqual(set(runs), set([176161]),
"ERROR: only run 176161 should be ready for express release.")
return
if __name__ == '__main__':
unittest.main()
| 64.747541
| 156
| 0.671494
| 11,470
| 118,488
| 6.659459
| 0.068091
| 0.224654
| 0.112995
| 0.205279
| 0.879137
| 0.865888
| 0.809044
| 0.622277
| 0.4458
| 0.279718
| 0
| 0.050853
| 0.198737
| 118,488
| 1,829
| 157
| 64.782942
| 0.753697
| 0.014913
| 0
| 0.23755
| 0
| 0
| 0.383943
| 0.174672
| 0
| 0
| 0
| 0
| 0.05249
| 1
| 0.012786
| false
| 0
| 0.010767
| 0
| 0.039031
| 0.008748
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
f0342be59fc1ee954d778c9b5f25e9cc15102b33
| 249
|
py
|
Python
|
chainer_mask_rcnn/models/__init__.py
|
m3at/chainer-mask-rcnn
|
fa491663675cdc97974008becc99454d5e6e1d09
|
[
"MIT"
] | 16
|
2018-12-20T14:03:54.000Z
|
2021-01-22T23:37:31.000Z
|
chainer_mask_rcnn/models/__init__.py
|
Swall0w/chainer-mask-rcnn
|
83366fc77e52aa6a29cfac4caa697d8b45dcffc6
|
[
"MIT"
] | 2
|
2018-12-28T04:58:19.000Z
|
2019-01-07T03:39:38.000Z
|
chainer_mask_rcnn/models/__init__.py
|
Swall0w/chainer-mask-rcnn
|
83366fc77e52aa6a29cfac4caa697d8b45dcffc6
|
[
"MIT"
] | 3
|
2019-02-27T05:06:59.000Z
|
2019-07-07T05:56:36.000Z
|
# flake8: noqa
from . import utils
from .mask_rcnn import MaskRCNN
from .mask_rcnn_resnet import MaskRCNNResNet
from .mask_rcnn_train_chain import MaskRCNNTrainChain
from .mask_rcnn_vgg import MaskRCNNVGG16
from .mask_rcnn_vgg import VGG16RoIHead
| 24.9
| 53
| 0.851406
| 35
| 249
| 5.771429
| 0.457143
| 0.19802
| 0.29703
| 0.148515
| 0.207921
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022727
| 0.116466
| 249
| 9
| 54
| 27.666667
| 0.895455
| 0.048193
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f0775b42f648ccb7825b270c0f225f1e60552fb3
| 88
|
py
|
Python
|
nenupytv/read/__init__.py
|
AlanLoh/nenupy-tv
|
9c33652521293eaba726f02fdb2331ae32dda6f6
|
[
"MIT"
] | null | null | null |
nenupytv/read/__init__.py
|
AlanLoh/nenupy-tv
|
9c33652521293eaba726f02fdb2331ae32dda6f6
|
[
"MIT"
] | 14
|
2019-11-12T09:48:00.000Z
|
2020-02-28T17:02:54.000Z
|
nenupytv/read/__init__.py
|
AlanLoh/nenupy-tv
|
9c33652521293eaba726f02fdb2331ae32dda6f6
|
[
"MIT"
] | 1
|
2020-09-09T17:40:58.000Z
|
2020-09-09T17:40:58.000Z
|
#! /usr/bin/python3
# -*- coding: utf-8 -*-
from .crosslets import *
from .xst import *
| 17.6
| 24
| 0.625
| 12
| 88
| 4.583333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027397
| 0.170455
| 88
| 5
| 25
| 17.6
| 0.726027
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b2d73ac5e7efaf8a0793a7aab2743b04385cb540
| 288
|
py
|
Python
|
wagtail-{{ cookiecutter.project_name_kebab }}/wagtail_{{ cookiecutter.project_name_snake }}/apps.py
|
lb-/cookiecutter-wagtail-package
|
245f5be9ccdf5230f55a747755939ca049f5e607
|
[
"MIT"
] | 3
|
2020-11-16T17:39:49.000Z
|
2021-02-25T23:32:33.000Z
|
wagtail-{{ cookiecutter.project_name_kebab }}/wagtail_{{ cookiecutter.project_name_snake }}/apps.py
|
lb-/cookiecutter-wagtail-package
|
245f5be9ccdf5230f55a747755939ca049f5e607
|
[
"MIT"
] | 8
|
2021-11-02T12:43:58.000Z
|
2022-03-27T21:48:41.000Z
|
wagtail-{{ cookiecutter.project_name_kebab }}/wagtail_{{ cookiecutter.project_name_snake }}/apps.py
|
kaedroho/cookiecutter-wagtail-plugin
|
dabe4cf807c00d7ea683d215c1a9b8e637b8bbd6
|
[
"MIT"
] | 1
|
2022-02-21T22:56:44.000Z
|
2022-02-21T22:56:44.000Z
|
from django.apps import AppConfig
class Wagtail{{ cookiecutter.project_name_camel }}AppConfig(AppConfig):
label = "wagtail_{{ cookiecutter.project_name_snake }}"
name = "wagtail_{{ cookiecutter.project_name_snake }}"
verbose_name = "Wagtail {{ cookiecutter.project_name }}"
| 36
| 71
| 0.753472
| 31
| 288
| 6.677419
| 0.451613
| 0.36715
| 0.502415
| 0.57971
| 0.521739
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131944
| 288
| 7
| 72
| 41.142857
| 0.828
| 0
| 0
| 0
| 0
| 0
| 0.447917
| 0.302083
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.2
| null | null | 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b2e197e1a6ddebcc69a39d744174fc3999f89c45
| 55
|
py
|
Python
|
rfvision/models/pose_estimators/articulation/datasets/__init__.py
|
tycoer/rfvision-1
|
db6e28746d8251d1f394544c32b9e0af388d9964
|
[
"Apache-2.0"
] | null | null | null |
rfvision/models/pose_estimators/articulation/datasets/__init__.py
|
tycoer/rfvision-1
|
db6e28746d8251d1f394544c32b9e0af388d9964
|
[
"Apache-2.0"
] | null | null | null |
rfvision/models/pose_estimators/articulation/datasets/__init__.py
|
tycoer/rfvision-1
|
db6e28746d8251d1f394544c32b9e0af388d9964
|
[
"Apache-2.0"
] | null | null | null |
from .articulation_dataset import ArticulationDataset
| 18.333333
| 53
| 0.890909
| 5
| 55
| 9.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 55
| 2
| 54
| 27.5
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b2e3ebd060fed4c6cd4740f9647ee93584f3dc88
| 250
|
py
|
Python
|
pyroll/core/roll_pass/base_plugins/strain_rate.py
|
pyroll-project/pyroll-core
|
f59094d58c2f7493ddc6345b3afc4700ca259681
|
[
"BSD-3-Clause"
] | null | null | null |
pyroll/core/roll_pass/base_plugins/strain_rate.py
|
pyroll-project/pyroll-core
|
f59094d58c2f7493ddc6345b3afc4700ca259681
|
[
"BSD-3-Clause"
] | null | null | null |
pyroll/core/roll_pass/base_plugins/strain_rate.py
|
pyroll-project/pyroll-core
|
f59094d58c2f7493ddc6345b3afc4700ca259681
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
from ..roll_pass import RollPass
@RollPass.hookimpl
def strain_rate(roll_pass: RollPass):
return roll_pass.velocity / roll_pass.roll.contact_length * roll_pass.strain_change
RollPass.plugin_manager.register(sys.modules[__name__])
| 20.833333
| 87
| 0.812
| 35
| 250
| 5.428571
| 0.571429
| 0.210526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104
| 250
| 11
| 88
| 22.727273
| 0.848214
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0.833333
| 0.333333
| 0.166667
| 0.666667
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
|
0
| 5
|
b2f197adcb4655b8bc8f9cabd0ad4da99e3b49dc
| 203
|
py
|
Python
|
watchmate_v2/app/api/throttling.py
|
rroy11705/Rest_API_With_Django
|
6a75db2e2c3913ec9afc1cbfef67a5c9fd655e60
|
[
"CNRI-Python"
] | 6
|
2021-08-04T06:10:03.000Z
|
2022-03-18T03:00:39.000Z
|
watchmate_v2/app/api/throttling.py
|
rroy11705/Rest_API_With_Django
|
6a75db2e2c3913ec9afc1cbfef67a5c9fd655e60
|
[
"CNRI-Python"
] | 1
|
2022-02-22T03:30:50.000Z
|
2022-03-09T14:33:00.000Z
|
watchmate_v2/app/api/throttling.py
|
rroy11705/Rest_API_With_Django
|
6a75db2e2c3913ec9afc1cbfef67a5c9fd655e60
|
[
"CNRI-Python"
] | 3
|
2021-06-14T15:23:19.000Z
|
2021-12-20T18:50:21.000Z
|
from rest_framework.throttling import UserRateThrottle
class ReviewCreateThrottle(UserRateThrottle):
scope = 'review-create'
class ReviewListThrottle(UserRateThrottle):
scope = 'review-list'
| 20.3
| 54
| 0.79803
| 18
| 203
| 8.944444
| 0.722222
| 0.26087
| 0.335404
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128079
| 203
| 9
| 55
| 22.555556
| 0.909605
| 0
| 0
| 0
| 0
| 0
| 0.118227
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
65443f0e7c5843c1d5ba013eda677bc4cc5765ab
| 351
|
py
|
Python
|
novice/03-04/latihan/test2.py
|
anisarizqi/praxis-academy
|
8db4d61b60d05c8e877711b4210bfe743f308f44
|
[
"MIT"
] | null | null | null |
novice/03-04/latihan/test2.py
|
anisarizqi/praxis-academy
|
8db4d61b60d05c8e877711b4210bfe743f308f44
|
[
"MIT"
] | null | null | null |
novice/03-04/latihan/test2.py
|
anisarizqi/praxis-academy
|
8db4d61b60d05c8e877711b4210bfe743f308f44
|
[
"MIT"
] | null | null | null |
import pytest
@pytest.fixture(params=[0, 1], ids=["spam", "ham"])
def a(request):
return request.param
def test_a(a):
pass
def idfn(fixture_value):
if fixture_value == 0:
return "eggs"
else:
return None
@pytest.fixture(params=[0, 1], ids=idfn)
def b(request):
return request.param
def test_b(b):
pass
| 13
| 51
| 0.618234
| 52
| 351
| 4.096154
| 0.442308
| 0.122066
| 0.178404
| 0.187793
| 0.525822
| 0.525822
| 0
| 0
| 0
| 0
| 0
| 0.018727
| 0.239316
| 351
| 26
| 52
| 13.5
| 0.779026
| 0
| 0
| 0.25
| 0
| 0
| 0.031339
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.3125
| false
| 0.125
| 0.0625
| 0.125
| 0.625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 5
|
331ca59b81f1f82a16e1451a1935c09faf0ce7e9
| 109
|
py
|
Python
|
microclim/__init__.py
|
trenchproject/microclim-api
|
b184ff562bb0289ceab39d295c55a3a2915da5b2
|
[
"MIT"
] | null | null | null |
microclim/__init__.py
|
trenchproject/microclim-api
|
b184ff562bb0289ceab39d295c55a3a2915da5b2
|
[
"MIT"
] | null | null | null |
microclim/__init__.py
|
trenchproject/microclim-api
|
b184ff562bb0289ceab39d295c55a3a2915da5b2
|
[
"MIT"
] | 1
|
2020-12-11T03:57:59.000Z
|
2020-12-11T03:57:59.000Z
|
#!/usr/bin/env python
# Copyright 2017 Aji John
from .api import *
| 27.25
| 62
| 0.440367
| 11
| 109
| 4.363636
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 0.486239
| 109
| 3
| 63
| 36.333333
| 0.785714
| 0.779817
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
331ed09bc45e5fdeb4516e37c5c87ab8122590c0
| 69
|
py
|
Python
|
Prac/p3.py
|
SaicharanKandukuri/snippets-python-from-scrach
|
b0823fde3cf1a88bf43d97bdc542de7e32c76dac
|
[
"MIT"
] | 1
|
2021-05-29T03:09:24.000Z
|
2021-05-29T03:09:24.000Z
|
Prac/p3.py
|
SaicharanKandukuri/snippets-python-from-scrach
|
b0823fde3cf1a88bf43d97bdc542de7e32c76dac
|
[
"MIT"
] | null | null | null |
Prac/p3.py
|
SaicharanKandukuri/snippets-python-from-scrach
|
b0823fde3cf1a88bf43d97bdc542de7e32c76dac
|
[
"MIT"
] | null | null | null |
aTuple = ("Orange", [10, 20, 30], (5, 15, 25))
print(aTuple[1][1])
| 13.8
| 46
| 0.521739
| 12
| 69
| 3
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.22807
| 0.173913
| 69
| 4
| 47
| 17.25
| 0.403509
| 0
| 0
| 0
| 0
| 0
| 0.089552
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
3336c60fd6a2a4b52c40719cd9457bce593cd563
| 42,769
|
py
|
Python
|
rigid_body_motion/reference_frames.py
|
phausamann/rigid-body-motion
|
2d4fbb1b949cc0b609a59877d7539af75dad6861
|
[
"MIT"
] | 8
|
2021-05-20T02:24:07.000Z
|
2022-03-05T17:15:11.000Z
|
rigid_body_motion/reference_frames.py
|
phausamann/rigid-body-motion
|
2d4fbb1b949cc0b609a59877d7539af75dad6861
|
[
"MIT"
] | 10
|
2019-06-13T09:36:15.000Z
|
2022-01-17T16:55:05.000Z
|
rigid_body_motion/reference_frames.py
|
phausamann/rigid-body-motion
|
2d4fbb1b949cc0b609a59877d7539af75dad6861
|
[
"MIT"
] | 1
|
2021-08-13T10:24:31.000Z
|
2021-08-13T10:24:31.000Z
|
""""""
import numpy as np
from anytree import NodeMixin, RenderTree, Walker
from quaternion import as_float_array, as_quat_array, from_rotation_matrix
from rigid_body_motion.core import (
TransformMatcher,
_estimate_angular_velocity,
_estimate_linear_velocity,
_resolve_rf,
)
from rigid_body_motion.utils import qinv, rotate_vectors
_registry = {}
def _register(rf, update=False):
""" Register a reference frame. """
if rf.name is None:
raise ValueError("Reference frame name cannot be None.")
if rf.name in _registry:
if update:
# TODO keep children?
_registry[rf.name].parent = None
else:
raise ValueError(
f"Reference frame with name {rf.name} is already registered. "
f"Specify update=True to overwrite."
)
# TODO check if name is a cs transform?
_registry[rf.name] = rf
def _deregister(name):
""" Deregister a reference frame. """
if name not in _registry:
raise ValueError(
"Reference frame with name " + name + " not found in registry"
)
_registry.pop(name)
def render_tree(root):
""" Render a reference frame tree.
Parameters
----------
root: str or ReferenceFrame
The root of the rendered tree.
"""
for pre, _, node in RenderTree(_resolve_rf(root)):
print(f"{pre}{node.name}")
def register_frame(
name,
parent=None,
translation=None,
rotation=None,
timestamps=None,
inverse=False,
discrete=False,
update=False,
):
""" Register a new reference frame in the registry.
Parameters
----------
name: str
The name of the reference frame.
parent: str or ReferenceFrame, optional
The parent reference frame. If str, the frame will be looked up
in the registry under that name. If not specified, this frame
will be a root node of a new reference frame tree.
translation: array_like, optional
The translation of this frame wrt the parent frame. Not
applicable if there is no parent frame.
rotation: array_like, optional
The rotation of this frame wrt the parent frame. Not
applicable if there is no parent frame.
timestamps: array_like, optional
The timestamps for translation and rotation of this frame. Not
applicable if this is a static reference frame.
inverse: bool, default False
If True, invert the transform wrt the parent frame, i.e. the
translation and rotation are specified for the parent frame wrt this
frame.
discrete: bool, default False
If True, transformations with timestamps are assumed to be events.
Instead of interpolating between timestamps, transformations are
fixed between their timestamp and the next one.
update: bool, default False
If True, overwrite if there is a frame with the same name in the
registry.
"""
# TODO make this a class with __call__, from_dataset etc. methods?
rf = ReferenceFrame(
name,
parent=parent,
translation=translation,
rotation=rotation,
timestamps=timestamps,
inverse=inverse,
discrete=discrete,
)
_register(rf, update=update)
def deregister_frame(name):
""" Remove a reference frame from the registry.
Parameters
----------
name: str
The name of the reference frame.
"""
_deregister(name)
def clear_registry():
""" Clear the reference frame registry. """
_registry.clear()
class ReferenceFrame(NodeMixin):
""" A three-dimensional reference frame. """
def __init__(
self,
name=None,
parent=None,
translation=None,
rotation=None,
timestamps=None,
inverse=False,
discrete=False,
):
""" Constructor.
Parameters
----------
name: str, optional
The name of this reference frame.
parent: str or ReferenceFrame, optional
The parent reference frame. If str, the frame will be looked up
in the registry under that name. If not specified, this frame
will be a root node of a new reference frame tree.
translation: array_like, optional
The translation of this frame wrt the parent frame. Not
applicable if there is no parent frame.
rotation: array_like, optional
The rotation of this frame wrt the parent frame. Not
applicable if there is no parent frame.
timestamps: array_like, optional
The timestamps for translation and rotation of this frame. Not
applicable if this is a static reference frame.
inverse: bool, default False
If True, invert the transform wrt the parent frame, i.e. the
translation and rotation are specified for the parent frame wrt
this frame.
discrete: bool, default False
If True, transformations with timestamps are assumed to be events.
Instead of interpolating between timestamps, transformations are
fixed between their timestamp and the next one.
"""
super(ReferenceFrame, self).__init__()
# TODO check name requirement
self.name = name
if parent is not None:
self.parent = _resolve_rf(parent)
(
self.translation,
self.rotation,
self.timestamps,
) = self._init_arrays(translation, rotation, timestamps, inverse)
else:
self.parent = None
self._verify_root(translation, rotation, timestamps)
self.translation, self.rotation, self.timestamps = None, None, None
if discrete and self.timestamps is None:
raise ValueError("timestamps must be provided when discrete=True")
else:
self.discrete = discrete
def __del__(self):
""" Destructor. """
if self.name in _registry and _registry[self.name] is self:
_deregister(self.name)
def __str__(self):
""" String representation. """
return f"<ReferenceFrame '{self.name}'>"
def __repr__(self):
""" String representation. """
return self.__str__()
@staticmethod
def _init_arrays(translation, rotation, timestamps, inverse):
""" Initialize translation, rotation and timestamp arrays. """
if timestamps is not None:
timestamps = np.asarray(timestamps)
if timestamps.ndim != 1:
raise ValueError("timestamps must be one-dimensional.")
t_shape = (len(timestamps), 3)
r_shape = (len(timestamps), 4)
else:
t_shape = (3,)
r_shape = (4,)
if translation is not None:
translation = np.asarray(translation)
if translation.shape != t_shape:
raise ValueError(
f"Expected translation to be of shape {t_shape}, got "
f"{translation.shape}"
)
else:
translation = np.zeros(t_shape)
if rotation is not None:
rotation = np.asarray(rotation)
if rotation.shape != r_shape:
raise ValueError(
f"Expected rotation to be of shape {r_shape}, got "
f"{rotation.shape}"
)
else:
rotation = np.zeros(r_shape)
rotation[..., 0] = 1.0
if inverse:
rotation = qinv(rotation)
translation = -rotate_vectors(rotation, translation)
return translation, rotation, timestamps
@staticmethod
def _verify_root(translation, rotation, timestamps):
""" Verify arguments for root node. """
# TODO test
if translation is not None:
raise ValueError("translation specified without parent frame.")
if rotation is not None:
raise ValueError("rotation specified without parent frame.")
if timestamps is not None:
raise ValueError("timestamps specified without parent frame.")
@classmethod
def _validate_input(cls, arr, axis, n_axis, timestamps, time_axis):
""" Validate shape of array and timestamps. """
# TODO process DataArray (dim=str, timestamps=str)
arr = np.asarray(arr)
if arr.shape[axis] != n_axis:
raise ValueError(
f"Expected array to have length {n_axis} along axis {axis}, "
f"got {arr.shape[axis]}"
)
if timestamps is not None:
timestamps = np.asarray(timestamps)
if timestamps.ndim != 1:
raise ValueError("timestamps must be one-dimensional")
if arr.shape[time_axis] != len(timestamps):
raise ValueError(
f"Axis {time_axis} of the array must have the same length "
f"as the timestamps"
)
# TODO this should be done somewhere else
arr = np.swapaxes(arr, 0, time_axis)
return arr, timestamps
@classmethod
def _expand_singleton_axes(cls, t_or_r, ndim):
""" Expand singleton axes for correct broadcasting with array. """
if t_or_r.ndim > 1:
for _ in range(ndim - 2):
t_or_r = np.expand_dims(t_or_r, 1)
return t_or_r
@classmethod
def _match_arrays(cls, arrays, timestamps=None):
""" Match multiple arrays with timestamps. """
matcher = TransformMatcher()
for array in arrays:
matcher.add_array(*array)
return matcher.get_arrays(timestamps)
def _walk(self, to_rf):
""" Walk from this frame to a target frame along the tree. """
to_rf = _resolve_rf(to_rf)
walker = Walker()
up, _, down = walker.walk(self, to_rf)
return up, down
def _get_matcher(self, to_frame, arrays=None):
""" Get a TransformMatcher from this frame to another. """
up, down = self._walk(to_frame)
matcher = TransformMatcher()
for rf in up:
matcher.add_reference_frame(rf)
for rf in down:
matcher.add_reference_frame(rf, inverse=True)
if arrays is not None:
for array in arrays:
matcher.add_array(*array)
return matcher
@classmethod
def from_dataset(
cls,
ds,
translation,
rotation,
timestamps,
parent,
name=None,
inverse=False,
discrete=False,
):
""" Construct a reference frame from a Dataset.
Parameters
----------
ds: xarray Dataset
The dataset from which to construct the reference frame.
translation: str
The name of the variable representing the translation
wrt the parent frame.
rotation: str
The name of the variable representing the rotation
wrt the parent frame.
timestamps: str
The name of the variable or coordinate representing the
timestamps.
parent: str or ReferenceFrame
The parent reference frame. If str, the frame will be looked up
in the registry under that name.
name: str, default None
The name of the reference frame.
inverse: bool, default False
If True, invert the transform wrt the parent frame, i.e. the
translation and rotation are specified for the parent frame wrt
this frame.
discrete: bool, default False
If True, transformations with timestamps are assumed to be events.
Instead of interpolating between timestamps, transformations are
fixed between their timestamp and the next one.
Returns
-------
rf: ReferenceFrame
The constructed reference frame.
"""
# TODO raise errors here if dimensions etc. don't match
return cls(
name,
parent,
ds[translation].data,
ds[rotation].data,
ds[timestamps].data,
inverse=inverse,
discrete=discrete,
)
@classmethod
def from_translation_dataarray(
cls, da, timestamps, parent, name=None, inverse=False, discrete=False,
):
""" Construct a reference frame from a translation DataArray.
Parameters
----------
da: xarray DataArray
The array that describes the translation of this frame
wrt the parent frame.
timestamps: str
The name of the variable or coordinate representing the
timestamps.
parent: str or ReferenceFrame
The parent reference frame. If str, the frame will be looked up
in the registry under that name.
name: str, default None
The name of the reference frame.
inverse: bool, default False
If True, invert the transform wrt the parent frame, i.e. the
translation is specified for the parent frame wrt this frame.
discrete: bool, default False
If True, transformations with timestamps are assumed to be events.
Instead of interpolating between timestamps, transformations are
fixed between their timestamp and the next one.
Returns
-------
rf: ReferenceFrame
The constructed reference frame.
"""
# TODO raise errors here if dimensions etc. don't match
return cls(
name,
parent,
translation=da.data,
timestamps=da[timestamps].data,
inverse=inverse,
discrete=discrete,
)
@classmethod
def from_rotation_dataarray(
cls, da, timestamps, parent, name=None, inverse=False, discrete=False,
):
""" Construct a reference frame from a rotation DataArray.
Parameters
----------
da: xarray DataArray
The array that describes the rotation of this frame
wrt the parent frame.
timestamps: str
The name of the variable or coordinate representing the
timestamps.
parent: str or ReferenceFrame
The parent reference frame. If str, the frame will be looked up
in the registry under that name.
name: str, default None
The name of the reference frame.
inverse: bool, default False
If True, invert the transform wrt the parent frame, i.e. the
rotation is specified for the parent frame wrt this frame.
discrete: bool, default False
If True, transformations with timestamps are assumed to be events.
Instead of interpolating between timestamps, transformations are
fixed between their timestamp and the next one.
Returns
-------
rf: ReferenceFrame
The constructed reference frame.
"""
# TODO raise errors here if dimensions etc. don't match
return cls(
name,
parent,
rotation=da.data,
timestamps=da[timestamps].data,
inverse=inverse,
discrete=discrete,
)
@classmethod
def from_rotation_matrix(cls, mat, parent, name=None, inverse=False):
""" Construct a static reference frame from a rotation matrix.
Parameters
----------
mat: array_like, shape (3, 3)
The rotation matrix that describes the rotation of this frame
wrt the parent frame.
parent: str or ReferenceFrame
The parent reference frame. If str, the frame will be looked up
in the registry under that name.
name: str, default None
The name of the reference frame.
inverse: bool, default False
If True, invert the transform wrt the parent frame, i.e. the
rotation is specified for the parent frame wrt this frame.
Returns
-------
rf: ReferenceFrame
The constructed reference frame.
"""
# TODO support moving reference frame
if mat.shape != (3, 3):
raise ValueError(
f"Expected mat to have shape (3, 3), got {mat.shape}"
)
return cls(
name,
parent,
rotation=as_float_array(from_rotation_matrix(mat)),
inverse=inverse,
)
def get_transformation(self, to_frame):
""" Alias for lookup_transform.
See Also
--------
ReferenceFrame.lookup_transform
"""
import warnings
warnings.warn(
DeprecationWarning(
"get_transformation is deprecated, use lookup_transform "
"instead."
)
)
return self.lookup_transform(to_frame)
def lookup_transform(self, to_frame):
""" Look up the transformation from this frame to another.
Parameters
----------
to_frame: str or ReferenceFrame
The target reference frame. If str, the frame will be looked up
in the registry under that name.
Returns
-------
t: array_like, shape (3,) or (n_timestamps, 3)
The translation from this frame to the target frame.
r: array_like, shape (4,) or (n_timestamps, 4)
The rotation from this frame to the target frame.
ts: array_like, shape (n_timestamps,) or None
The timestamps for which the transformation is defined.
See Also
--------
lookup_transform
"""
matcher = self._get_matcher(to_frame)
return matcher.get_transformation()
def transform_vectors(
self,
arr,
to_frame,
axis=-1,
time_axis=0,
timestamps=None,
return_timestamps=False,
):
""" Transform array of vectors from this frame to another.
Parameters
----------
arr: array_like
The array to transform.
to_frame: str or ReferenceFrame
The target reference frame. If str, the frame will be looked up
in the registry under that name.
axis: int, default -1
The axis of the array representing the spatial coordinates of the
vectors.
time_axis: int, default 0
The axis of the array representing the timestamps of the vectors.
timestamps: array_like, optional
The timestamps of the vectors, corresponding to the `time_axis`
of the array. If not None, the axis defined by `time_axis` will be
re-sampled to the timestamps for which the transformation is
defined.
return_timestamps: bool, default False
If True, also return the timestamps after the transformation.
Returns
-------
arr_transformed: array_like
The transformed array.
ts: array_like, shape (n_timestamps,) or None
The timestamps after the transformation.
"""
arr, arr_ts = self._validate_input(arr, axis, 3, timestamps, time_axis)
matcher = self._get_matcher(to_frame, arrays=[(arr, arr_ts)])
t, r, ts = matcher.get_transformation()
arr, _ = matcher.get_arrays(ts)
r = self._expand_singleton_axes(r, arr.ndim)
arr = rotate_vectors(r, arr, axis=axis)
# undo time axis swap
if time_axis is not None:
arr = np.swapaxes(arr, 0, time_axis)
if not return_timestamps:
return arr
else:
return arr, ts
def transform_points(
self,
arr,
to_frame,
axis=-1,
time_axis=0,
timestamps=None,
return_timestamps=False,
):
""" Transform array of points from this frame to another.
Parameters
----------
arr: array_like
The array to transform.
to_frame: str or ReferenceFrame
The target reference frame. If str, the frame will be looked up
in the registry under that name.
axis: int, default -1
The axis of the array representing the spatial coordinates of the
points.
time_axis: int, default 0
The axis of the array representing the timestamps of the points.
timestamps: array_like, optional
The timestamps of the vectors, corresponding to the `time_axis`
of the array. If not None, the axis defined by `time_axis` will be
re-sampled to the timestamps for which the transformation is
defined.
return_timestamps: bool, default False
If True, also return the timestamps after the transformation.
Returns
-------
arr_transformed: array_like
The transformed array.
ts: array_like, shape (n_timestamps,) or None
The timestamps after the transformation.
"""
arr, arr_ts = self._validate_input(arr, axis, 3, timestamps, time_axis)
matcher = self._get_matcher(to_frame, arrays=[(arr, arr_ts)])
t, r, ts = matcher.get_transformation()
arr, _ = matcher.get_arrays(ts)
t = self._expand_singleton_axes(t, arr.ndim)
r = self._expand_singleton_axes(r, arr.ndim)
arr = rotate_vectors(r, arr, axis=axis)
arr = arr + np.array(t)
# undo time axis swap
if time_axis is not None:
arr = np.swapaxes(arr, 0, time_axis)
if not return_timestamps:
return arr
else:
return arr, ts
def transform_quaternions(
self,
arr,
to_frame,
axis=-1,
time_axis=0,
timestamps=None,
return_timestamps=False,
):
""" Transform array of quaternions from this frame to another.
Parameters
----------
arr: array_like
The array to transform.
to_frame: str or ReferenceFrame
The target reference frame. If str, the frame will be looked up
in the registry under that name.
axis: int, default -1
The axis of the array representing the spatial coordinates of the
quaternions.
time_axis: int, default 0
The axis of the array representing the timestamps of the
quaternions.
timestamps: array_like, optional
The timestamps of the quaternions, corresponding to the `time_axis`
of the array. If not None, the axis defined by `time_axis` will be
re-sampled to the timestamps for which the transformation is
defined.
return_timestamps: bool, default False
If True, also return the timestamps after the transformation.
Returns
-------
arr_transformed: array_like
The transformed array.
ts: array_like, shape (n_timestamps,) or None
The timestamps after the transformation.
"""
arr, arr_ts = self._validate_input(arr, axis, 4, timestamps, time_axis)
matcher = self._get_matcher(to_frame, arrays=[(arr, arr_ts)])
t, r, ts = matcher.get_transformation()
arr, _ = matcher.get_arrays(ts)
r = self._expand_singleton_axes(r, arr.ndim)
arr = np.swapaxes(arr, axis, -1)
arr = as_quat_array(r) * as_quat_array(arr)
arr = np.swapaxes(as_float_array(arr), -1, axis)
# undo time axis swap
if time_axis is not None:
arr = np.swapaxes(arr, 0, time_axis)
if not return_timestamps:
return arr
else:
return arr, ts
def transform_angular_velocity(
self,
arr,
to_frame,
what="reference_frame",
axis=-1,
time_axis=0,
timestamps=None,
return_timestamps=False,
cutoff=None,
):
""" Transform array of angular velocities from this frame to another.
Parameters
----------
arr: array_like
The array to transform.
to_frame: str or ReferenceFrame
The target reference frame. If str, the frame will be looked up
in the registry under that name.
what: str, default "reference_frame"
What frame of the velocity to transform. Can be "reference_frame",
"moving_frame" or "representation_frame".
axis: int, default -1
The axis of the array representing the spatial coordinates of the
velocities.
time_axis: int, default 0
The axis of the array representing the timestamps of the
velocities.
timestamps: array_like, optional
The timestamps of the velocities, corresponding to the `time_axis`
of the array. If not None, the axis defined by `time_axis` will be
re-sampled to the timestamps for which the transformation is
defined.
return_timestamps: bool, default False
If True, also return the timestamps after the transformation.
cutoff: float, optional
Frequency of a low-pass filter applied to linear and angular
velocity after the twist estimation as a fraction of the Nyquist
frequency.
Returns
-------
arr_transformed: array_like
The transformed array.
ts: array_like, shape (n_timestamps,) or None
The timestamps after the transformation.
See Also
--------
transform_angular_velocity
"""
if what == "reference_frame":
angular, angular_ts = self.lookup_angular_velocity(
to_frame,
to_frame,
cutoff=cutoff,
allow_static=True,
return_timestamps=True,
)
elif what == "moving_frame":
angular, angular_ts = _resolve_rf(
to_frame
).lookup_angular_velocity(
self,
to_frame,
cutoff=cutoff,
allow_static=True,
return_timestamps=True,
)
elif what == "representation_frame":
return self.transform_vectors(
arr,
to_frame,
axis=axis,
time_axis=time_axis,
timestamps=timestamps,
return_timestamps=return_timestamps,
)
else:
raise ValueError(
f"Expected 'what' to be 'reference_frame', 'moving_frame' or "
f"'representation_frame', got {what}"
)
arr, ts = self.transform_vectors(
arr,
to_frame,
axis=axis,
time_axis=time_axis,
timestamps=timestamps,
return_timestamps=True,
)
arr, angular, ts_out = self._match_arrays(
[(arr, ts), (angular, angular_ts)]
)
arr += angular
if return_timestamps:
return arr, ts_out
else:
return arr
def transform_linear_velocity(
self,
arr,
to_frame,
what="reference_frame",
moving_frame=None,
reference_frame=None,
axis=-1,
time_axis=0,
timestamps=None,
return_timestamps=False,
outlier_thresh=None,
cutoff=None,
):
""" Transform array of linear velocities from this frame to another.
Parameters
----------
arr: array_like
The array to transform.
to_frame: str or ReferenceFrame
The target reference frame. If str, the frame will be looked up
in the registry under that name.
what: str, default "reference_frame"
What frame of the velocity to transform. Can be "reference_frame",
"moving_frame" or "representation_frame".
moving_frame: str or ReferenceFrame, optional
The moving frame when transforming the reference frame of the
velocity.
reference_frame: str or ReferenceFrame, optional
The reference frame when transforming the moving frame of the
velocity.
axis: int, default -1
The axis of the array representing the spatial coordinates of the
velocities.
time_axis: int, default 0
The axis of the array representing the timestamps of the
velocities.
timestamps: array_like, optional
The timestamps of the velocities, corresponding to the `time_axis`
of the array. If not None, the axis defined by `time_axis` will be
re-sampled to the timestamps for which the transformation is
defined.
return_timestamps: bool, default False
If True, also return the timestamps after the transformation.
cutoff: float, optional
Frequency of a low-pass filter applied to linear and angular
velocity after the twist estimation as a fraction of the Nyquist
frequency.
outlier_thresh: float, optional
Suppress outliers by throwing out samples where the
norm of the second-order differences of the position is above
`outlier_thresh` and interpolating the missing values.
Returns
-------
arr_transformed: array_like
The transformed array.
ts: array_like, shape (n_timestamps,) or None
The timestamps after the transformation.
See Also
--------
transform_linear_velocity
"""
if what == "reference_frame":
linear, angular, linear_ts = self.lookup_twist(
to_frame,
to_frame,
cutoff=cutoff,
outlier_thresh=outlier_thresh,
allow_static=True,
return_timestamps=True,
)
angular_ts = linear_ts
translation, _, translation_ts = _resolve_rf(
moving_frame
).lookup_transform(self)
elif what == "moving_frame":
to_frame = _resolve_rf(to_frame)
linear, linear_ts = to_frame.lookup_linear_velocity(
self,
to_frame,
cutoff=cutoff,
outlier_thresh=outlier_thresh,
allow_static=True,
return_timestamps=True,
)
angular, angular_ts = self.lookup_angular_velocity(
reference_frame,
to_frame,
cutoff=cutoff,
allow_static=True,
return_timestamps=True,
)
translation, _, translation_ts = to_frame.lookup_transform(self)
elif what == "representation_frame":
return self.transform_vectors(
arr,
to_frame,
axis=axis,
time_axis=time_axis,
timestamps=timestamps,
return_timestamps=return_timestamps,
)
else:
raise ValueError(
f"Expected 'what' to be 'reference_frame', 'moving_frame' or "
f"'representation_frame', got {what}"
)
arr, ts = self.transform_vectors(
arr,
to_frame,
axis=axis,
time_axis=time_axis,
timestamps=timestamps,
return_timestamps=True,
)
translation, translation_ts = self.transform_vectors(
translation,
to_frame,
timestamps=translation_ts,
return_timestamps=True,
)
arr, linear, angular, translation, ts_out = self._match_arrays(
[
(arr, ts),
(linear, linear_ts),
(angular, angular_ts),
(translation, translation_ts),
]
)
arr = arr + linear + np.cross(angular, translation)
if return_timestamps:
return arr, ts_out
else:
return arr
def lookup_twist(
self,
reference=None,
represent_in=None,
outlier_thresh=None,
cutoff=None,
mode="quaternion",
allow_static=False,
return_timestamps=False,
):
""" Estimate linear and angular velocity of this frame wrt a reference.
Parameters
----------
reference: str or ReferenceFrame, optional
The reference frame wrt which the twist is estimated. Defaults to
the parent frame.
represent_in: str or ReferenceFrame, optional
The reference frame in which the twist is represented. Defaults
to the parent frame.
outlier_thresh: float, optional
Suppress outliers by throwing out samples where the
norm of the second-order differences of the position is above
`outlier_thresh` and interpolating the missing values.
cutoff: float, optional
Frequency of a low-pass filter applied to linear and angular
velocity after the estimation as a fraction of the Nyquist
frequency.
mode: str, default "quaternion"
If "quaternion", compute the angular velocity from the quaternion
derivative. If "rotation_vector", compute the angular velocity from
the gradient of the axis-angle representation of the rotations.
allow_static: bool, default False
If True, return a zero velocity vector and None for timestamps if
the transform between this frame and the reference frame is static.
Otherwise, a `ValueError` will be raised.
return_timestamps: bool, default False
If True, also return the timestamps of the lookup.
Returns
-------
linear: numpy.ndarray, shape (N, 3)
Linear velocity of moving frame wrt reference frame, represented
in representation frame.
angular: numpy.ndarray, shape (N, 3)
Angular velocity of moving frame wrt reference frame, represented
in representation frame.
timestamps: each numpy.ndarray
Timestamps of the twist.
"""
try:
reference = _resolve_rf(reference or self.parent)
represent_in = _resolve_rf(represent_in or self.parent)
except TypeError:
raise ValueError(f"Frame {self.name} has no parent frame")
translation, rotation, timestamps = self.lookup_transform(reference)
if timestamps is None:
if allow_static:
return np.zeros(3), np.zeros(3), None
else:
raise ValueError(
"Twist cannot be estimated for static transforms"
)
linear = _estimate_linear_velocity(
translation,
timestamps,
outlier_thresh=outlier_thresh,
cutoff=cutoff,
)
angular = _estimate_angular_velocity(
rotation, timestamps, cutoff=cutoff, mode=mode
)
# linear velocity is represented in reference frame after estimation
linear, linear_ts = reference.transform_vectors(
linear, represent_in, timestamps=timestamps, return_timestamps=True
)
# angular velocity is represented in moving frame after estimation
angular, angular_ts = self.transform_vectors(
angular,
represent_in,
timestamps=timestamps,
return_timestamps=True,
)
angular, linear, twist_ts = self._match_arrays(
[(angular, angular_ts), (linear, linear_ts)],
)
if return_timestamps:
return linear, angular, twist_ts
else:
return linear, angular
def lookup_linear_velocity(
self,
reference=None,
represent_in=None,
outlier_thresh=None,
cutoff=None,
allow_static=False,
return_timestamps=False,
):
""" Estimate linear velocity of this frame wrt a reference.
Parameters
----------
reference: str or ReferenceFrame, optional
The reference frame wrt which the twist is estimated. Defaults to
the parent frame.
represent_in: str or ReferenceFrame, optional
The reference frame in which the twist is represented. Defaults
to the parent frame.
outlier_thresh: float, optional
Suppress outliers by throwing out samples where the
norm of the second-order differences of the position is above
`outlier_thresh` and interpolating the missing values.
cutoff: float, optional
Frequency of a low-pass filter applied to linear and angular
velocity after the estimation as a fraction of the Nyquist
frequency.
allow_static: bool, default False
If True, return a zero velocity vector and None for timestamps if
the transform between this frame and the reference frame is static.
Otherwise, a `ValueError` will be raised.
return_timestamps: bool, default False
If True, also return the timestamps of the lookup.
Returns
-------
linear: numpy.ndarray, shape (N, 3)
Linear velocity of moving frame wrt reference frame, represented
in representation frame.
timestamps: each numpy.ndarray
Timestamps of the linear velocity.
"""
try:
reference = _resolve_rf(reference or self.parent)
represent_in = _resolve_rf(represent_in or self.parent)
except TypeError:
raise ValueError(f"Frame {self.name} has no parent frame")
translation, _, timestamps = self.lookup_transform(reference)
if timestamps is None:
if allow_static:
return np.zeros(3), None
else:
raise ValueError(
"Velocity cannot be estimated for static transforms"
)
linear = _estimate_linear_velocity(
translation,
timestamps,
outlier_thresh=outlier_thresh,
cutoff=cutoff,
)
# linear velocity is represented in reference frame after estimation
linear, linear_ts = reference.transform_vectors(
linear, represent_in, timestamps=timestamps, return_timestamps=True
)
if return_timestamps:
return linear, linear_ts
else:
return linear
def lookup_angular_velocity(
self,
reference=None,
represent_in=None,
outlier_thresh=None,
cutoff=None,
mode="quaternion",
allow_static=False,
return_timestamps=False,
):
""" Estimate angular velocity of this frame wrt a reference.
Parameters
----------
reference: str or ReferenceFrame, optional
The reference frame wrt which the twist is estimated. Defaults to
the parent frame.
represent_in: str or ReferenceFrame, optional
The reference frame in which the twist is represented. Defaults
to the parent frame.
outlier_thresh: float, optional
Suppress samples where the norm of the second-order differences of
the rotation is above `outlier_thresh` and interpolate the missing
values.
cutoff: float, optional
Frequency of a low-pass filter applied to linear and angular
velocity after the estimation as a fraction of the Nyquist
frequency.
mode: str, default "quaternion"
If "quaternion", compute the angular velocity from the quaternion
derivative. If "rotation_vector", compute the angular velocity from
the gradient of the axis-angle representation of the rotations.
allow_static: bool, default False
If True, return a zero velocity vector and None for timestamps if
the transform between this frame and the reference frame is static.
Otherwise, a `ValueError` will be raised.
return_timestamps: bool, default False
If True, also return the timestamps of the lookup.
Returns
-------
angular: numpy.ndarray, shape (N, 3)
Angular velocity of moving frame wrt reference frame, represented
in representation frame.
timestamps: each numpy.ndarray
Timestamps of the angular velocity.
"""
try:
reference = _resolve_rf(reference or self.parent)
represent_in = _resolve_rf(represent_in or self.parent)
except TypeError:
raise ValueError(f"Frame {self.name} has no parent frame")
_, rotation, timestamps = self.lookup_transform(reference)
if timestamps is None:
if allow_static:
return np.zeros(3), None
else:
raise ValueError(
"Velocity cannot be estimated for static transforms"
)
angular = _estimate_angular_velocity(
rotation,
timestamps,
cutoff=cutoff,
mode=mode,
outlier_thresh=outlier_thresh,
)
# angular velocity is represented in moving frame after estimation
angular, angular_ts = self.transform_vectors(
angular,
represent_in,
timestamps=timestamps,
return_timestamps=True,
)
if return_timestamps:
return angular, angular_ts
else:
return angular
def register(self, update=False):
""" Register this frame in the registry.
Parameters
----------
update: bool, default False
If True, overwrite if there is a frame with the same name in the
registry.
"""
_register(self, update=update)
def deregister(self):
""" Remove this frame from the registry. """
_deregister(self.name)
| 32.30287
| 79
| 0.587856
| 4,746
| 42,769
| 5.181205
| 0.06279
| 0.0427
| 0.015372
| 0.017568
| 0.780114
| 0.747377
| 0.730663
| 0.719154
| 0.704961
| 0.701993
| 0
| 0.002117
| 0.3485
| 42,769
| 1,323
| 80
| 32.327286
| 0.880383
| 0.446188
| 0
| 0.593537
| 0
| 0
| 0.072613
| 0.002305
| 0
| 0
| 0
| 0.005291
| 0
| 1
| 0.056122
| false
| 0
| 0.010204
| 0
| 0.127551
| 0.001701
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
68443780aa900373a47f7e048d88c6be3ae45cf4
| 69
|
py
|
Python
|
tools/__init__.py
|
Archie2k16/venus-api
|
96d1a660161670fbbbba7ab34137df9f122738b7
|
[
"WTFPL"
] | null | null | null |
tools/__init__.py
|
Archie2k16/venus-api
|
96d1a660161670fbbbba7ab34137df9f122738b7
|
[
"WTFPL"
] | null | null | null |
tools/__init__.py
|
Archie2k16/venus-api
|
96d1a660161670fbbbba7ab34137df9f122738b7
|
[
"WTFPL"
] | null | null | null |
# encoding:utf-8
# !/usr/bin/env python
# me@archie.cc
import dotdict
| 17.25
| 22
| 0.724638
| 12
| 69
| 4.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016393
| 0.115942
| 69
| 4
| 23
| 17.25
| 0.803279
| 0.695652
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6845cf3cf09b35711110c9740ffeeddd9aeca837
| 4,196
|
py
|
Python
|
DailyProgrammer/DP20171110C.py
|
DayGitH/Python-Challenges
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
[
"MIT"
] | 2
|
2020-12-23T18:59:22.000Z
|
2021-04-14T13:16:09.000Z
|
DailyProgrammer/DP20171110C.py
|
DayGitH/Python-Challenges
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
[
"MIT"
] | null | null | null |
DailyProgrammer/DP20171110C.py
|
DayGitH/Python-Challenges
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
[
"MIT"
] | null | null | null |
"""
[2017-11-10] Challenge #339 [Hard] Severing the Power Grid
https://www.reddit.com/r/dailyprogrammer/comments/7c4bju/20171110_challenge_339_hard_severing_the_power/
# Description
In energy production, the power grid is a a large directed graph of energy consumers and producers. At times you need
to cut at certain nodes and trim demand because you cannot supply enough of a load.
In DailyProgrammeropolis, all buildings are connected to the grid and all consume power to varying degrees. Some
generate power because they have installed on-site generation and sell the excess to the grid, some do not.
The scenario you're facing is this: due to a fault with the bulk power generation facility not local to
DailyProgrammerololis, you must trim the power grid. You have connectivity data, and power consumption and production
data. Your goal with this challenge is to **maximize the number of powered nodes with the generated energy you have**.
Note that when you cut off a node, you run the risk the downstream ones will loose power, too, if they are no longer
connected. This is how you'll shed demand, by selectively cutting the graph. You can make as many cuts as you want
(there is no restriction on this).
# Input Description
You'll be given an extensive set of data for this challenge. The first set of data looks like this: you'll be given a
single integer on one line telling you how many nodes to read. Then you'll be given those nodes, one per line, with the
node ID, the amount of power it consumes in kWH, then how much the node generates in kWH. Not all nodes produce
electricity, but some do (e.g. a wind farm, solar cells, etc), and there is obviously one that generates the most -
that's your main power plant.
The next set of data is the edge data. The first line is how many edges to read, then the next *N* lines have data
showing how the nodes are connected (e.g. power flows from node a to b).
Example:
3
0 40.926 0.0
1 36.812 1.552
2 1.007 0.0
2
0 1
0 2
# Output Description
Your program should emit a list of edges to sever as a list of (i,j) two tuples. Multiple answers are possible. You may
wind up with a number of small islands as opposed to one powered network.
# Challenge Input
101
0 1.926 0.0
1 36.812 0.0
2 1.007 0.0
3 6.812 0.0
4 1.589 0.0
5 1.002 0.0
6 1.531 0.0
7 2.810 0.0
8 1.246 0.0
9 5.816 0.0
10 1.167 0.0
11 1.357 0.0
12 1.585 0.0
13 1.117 0.0
14 3.110 1.553
15 2.743 0.0
16 1.282 0.0
17 1.154 0.0
18 1.160 0.0
19 1.253 0.0
20 1.086 0.0
21 1.148 0.0
22 1.357 0.0
23 2.161 0.0
24 1.260 0.0
25 2.241 0.0
26 2.970 0.0
27 6.972 0.0
28 2.443 0.0
29 1.255 0.0
30 1.844 0.0
31 2.503 0.0
32 1.054 0.0
33 1.368 0.0
34 1.011 1.601
35 1.432 0.0
36 1.061 1.452
37 1.432 0.0
38 2.011 0.0
39 1.232 0.0
40 1.767 0.0
41 1.590 0.0
42 2.453 0.0
43 1.972 0.0
44 1.445 0.0
45 1.197 0.0
46 2.497 0.0
47 3.510 0.0
48 12.510 0.0
49 3.237 0.0
50 1.287 0.0
51 1.613 0.0
52 1.776 0.0
53 2.013 0.0
54 1.079 0.0
55 1.345 1.230
56 1.613 0.0
57 2.243 0.0
58 1.209 0.0
59 1.429 0.0
60 7.709 0.0
61 1.282 8.371
62 1.036 0.0
63 1.086 0.0
64 1.087 0.0
65 1.000 0.0
66 1.140 0.0
67 1.210 0.0
68 1.080 0.0
69 1.087 0.0
70 1.399 0.0
71 2.681 0.0
72 1.693 0.0
73 1.266 0.0
74 1.234 0.0
75 2.755 0.0
76 2.173 0.0
77 1.093 0.0
78 1.005 0.0
79 1.420 0.0
80 1.135 0.0
81 1.101 0.0
82 1.187 1.668
83 2.334 0.0
84 2.054 3.447
85 1.711 0.0
86 2.083 0.0
87 2.724 0.0
88 1.654 0.0
89 1.608 0.0
90 1.033 17.707
91 1.017 0.0
92 1.528 0.0
93 1.278 0.0
94 1.128 0.0
95 1.508 1.149
96 5.123 0.0
97 2.000 0.0
98 1.426 0.0
99 1.802 0.0
100 2.995 98.606
Edge data is too much to put up here. You can download it
[here](https://github.com/paralax/ColossalOpera/blob/master/hard/microgrid_edges.txt).
"""
def main():
pass
if __name__ == "__main__":
main()
| 27.973333
| 119
| 0.643708
| 949
| 4,196
| 2.830348
| 0.420443
| 0.069248
| 0.013403
| 0.013403
| 0.03723
| 0.032018
| 0
| 0
| 0
| 0
| 0
| 0.290663
| 0.277645
| 4,196
| 149
| 120
| 28.161074
| 0.595513
| 0.982841
| 0
| 0
| 0
| 0
| 0.125
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0.25
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
6874e644be4f30574e6ebffa5035e0598a5d0f56
| 17
|
py
|
Python
|
python/youarehere/api/__init__.py
|
whosonfirst/youarehere-www
|
e4ff8f0971586646c9c0586a28638da8234d8341
|
[
"BSD-2-Clause"
] | 1
|
2021-01-18T04:33:54.000Z
|
2021-01-18T04:33:54.000Z
|
python/youarehere/api/__init__.py
|
thisisaaronland/youarehere-www
|
e4ff8f0971586646c9c0586a28638da8234d8341
|
[
"BSD-2-Clause"
] | null | null | null |
python/youarehere/api/__init__.py
|
thisisaaronland/youarehere-www
|
e4ff8f0971586646c9c0586a28638da8234d8341
|
[
"BSD-2-Clause"
] | 1
|
2015-06-15T20:31:10.000Z
|
2015-06-15T20:31:10.000Z
|
# I blame, Guido
| 8.5
| 16
| 0.647059
| 3
| 17
| 3.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.235294
| 17
| 1
| 17
| 17
| 0.846154
| 0.823529
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d7c41b4a96d0f00142f1834c33753e5b6a5efa30
| 32
|
py
|
Python
|
ptz_held_zoom_out.py
|
FarmVivi/kodihikvision
|
96fe3dda2acce3363a9aa07ad979032cfec30501
|
[
"MIT"
] | null | null | null |
ptz_held_zoom_out.py
|
FarmVivi/kodihikvision
|
96fe3dda2acce3363a9aa07ad979032cfec30501
|
[
"MIT"
] | null | null | null |
ptz_held_zoom_out.py
|
FarmVivi/kodihikvision
|
96fe3dda2acce3363a9aa07ad979032cfec30501
|
[
"MIT"
] | null | null | null |
import api
api.held_zoom_out()
| 8
| 19
| 0.78125
| 6
| 32
| 3.833333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 32
| 3
| 20
| 10.666667
| 0.821429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
d7d4452e1bf2e1ad0d640dabe364de17ae9359cd
| 2,735
|
py
|
Python
|
tests/test_all.py
|
maxblee/force_deps
|
70af2d6943b5b4733ca5e4efee1dcaeab2de4e9c
|
[
"MIT"
] | null | null | null |
tests/test_all.py
|
maxblee/force_deps
|
70af2d6943b5b4733ca5e4efee1dcaeab2de4e9c
|
[
"MIT"
] | null | null | null |
tests/test_all.py
|
maxblee/force_deps
|
70af2d6943b5b4733ca5e4efee1dcaeab2de4e9c
|
[
"MIT"
] | null | null | null |
import subprocess
import pytest
from force_deps import *
@pytest.mark.parametrize("pkg_name", ["re", "pytest"])
def test_available_function_returns(pkg_name):
"""If a package has been installed in the environment, returns the function"""
@requires(pkg_name)
def returns_none():
return None
assert returns_none() is None
def test_unavailable_function_raises_error():
"""Makes sure `requires` raises an error if the package has not been installed"""
@requires("bad_function_name")
def returns_none():
return None
with pytest.raises(ImportError):
returns_none()
def test_newly_installed_program_runs():
"""Makes sure that after installing (but before importing), requires lets a program run"""
subprocess.run(["pip", "install", "frozendict"])
@requires("frozendict")
def returns_none():
return None
try:
assert returns_none() is None
# TODO: Clean up this with a better setup/tear down approach
subprocess.run(["pip", "uninstall", "frozendict", "--yes"])
except ImportError as err:
subprocess.run(["pip", "uninstall", "frozendict", "--yes"])
pytest.fail(err)
def test_one_valid_one_invalid_passes_any():
"""If one module is available, `requires_any` is true"""
@requires_any(["re", "bad_function_name"])
def returns_none():
return None
assert returns_none() is None
def test_one_valid_fails_all():
@requires_all(["re", "bad_function_name"])
def returns_none():
return None
with pytest.raises(ImportError):
returns_none()
def test_all_valid_passes_all():
@requires_all(["re", "itertools"])
def returns_none():
return None
assert returns_none() is None
def test_all_invalid_fails_any():
"""If all modules are unavailable `requires_any` raises error"""
@requires_any(["bad_function_name", "worse_function_name"])
def returns_none():
return None
with pytest.raises(ImportError):
returns_none()
def test_single_valid_passes_any():
"""Make sure that `requires_any(string)` == `requires(str)`"""
@requires_any("re")
def return_val():
return 0
@requires("re")
def return_zero():
return 0
@requires_all("re")
def return_nothing():
return 0
assert return_nothing() == return_val()
assert return_val() == return_zero()
assert return_val() == 0
def test_empty_seq_passes_any_and_all():
"""Make sure that `requires_any(empty_list)` is true"""
@requires_any([])
def returns_none():
return None
@requires_all([])
def returns_null():
return None
assert returns_none() is None
assert returns_null() is None
| 31.436782
| 94
| 0.668373
| 350
| 2,735
| 4.96
| 0.274286
| 0.101382
| 0.064516
| 0.092166
| 0.380184
| 0.312788
| 0.269009
| 0.25
| 0.25
| 0.241359
| 0
| 0.001856
| 0.212066
| 2,735
| 87
| 95
| 31.436782
| 0.803712
| 0.186472
| 0
| 0.471429
| 0
| 0
| 0.094977
| 0
| 0
| 0
| 0
| 0.011494
| 0.128571
| 1
| 0.3
| false
| 0.057143
| 0.1
| 0.171429
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 5
|
d7e38f686e961d245a0d9417f2012614d6a47116
| 4,909
|
py
|
Python
|
tracking/dsets/mot_wrapper.py
|
bjuncek/detr
|
a1bd3788ca16fb8dc92f7e69b2d801259ecec8f9
|
[
"Apache-2.0"
] | null | null | null |
tracking/dsets/mot_wrapper.py
|
bjuncek/detr
|
a1bd3788ca16fb8dc92f7e69b2d801259ecec8f9
|
[
"Apache-2.0"
] | null | null | null |
tracking/dsets/mot_wrapper.py
|
bjuncek/detr
|
a1bd3788ca16fb8dc92f7e69b2d801259ecec8f9
|
[
"Apache-2.0"
] | null | null | null |
import os.path as osp
import torch
from torch.utils.data import Dataset
from .mot_sequence import MOTSequence
class MOT17Wrapper(Dataset):
"""A Wrapper for the MOT_Sequence class to return multiple sequences."""
def __init__(self, split, dets, dataloader):
"""Initliazes all subset of the dataset.
Keyword arguments:
split -- the split of the dataset to use
dataloader -- args for the MOT_Sequence dataloader
"""
mot_dir = 'MOT17'
train_sequences = ['MOT17-02', 'MOT17-04', 'MOT17-05', 'MOT17-09', 'MOT17-10', 'MOT17-11', 'MOT17-13']
test_sequences = ['MOT17-01', 'MOT17-03', 'MOT17-06', 'MOT17-07', 'MOT17-08', 'MOT17-12', 'MOT17-14']
if "train" == split:
sequences = train_sequences
elif "test" == split:
sequences = test_sequences
elif "all" == split:
sequences = train_sequences + test_sequences
elif f"MOT17-{split}" in train_sequences + test_sequences:
sequences = [f"MOT17-{split}"]
else:
raise NotImplementedError("MOT split not available.")
self._data = []
for s in sequences:
if dets == 'ALL':
self._data.append(MOTSequence(f"{s}-DPM", mot_dir, **dataloader))
self._data.append(MOTSequence(f"{s}-FRCNN", mot_dir, **dataloader))
self._data.append(MOTSequence(f"{s}-SDP", mot_dir, **dataloader))
elif dets == 'DPM16':
self._data.append(MOTSequence(s.replace('17', '16'), 'MOT16', **dataloader))
else:
self._data.append(MOTSequence(f"{s}-{dets}", mot_dir, **dataloader))
def __len__(self):
return len(self._data)
def __getitem__(self, idx):
return self._data[idx]
class MOT19Wrapper(MOT17Wrapper):
"""A Wrapper for the MOT_Sequence class to return multiple sequences."""
def __init__(self, split, dataloader):
"""Initliazes all subset of the dataset.
Keyword arguments:
split -- the split of the dataset to use
dataloader -- args for the MOT_Sequence dataloader
"""
train_sequences = ['MOT19-01', 'MOT19-02', 'MOT19-03', 'MOT19-05']
test_sequences = ['MOT19-04', 'MOT19-06', 'MOT19-07', 'MOT19-08']
if "train" == split:
sequences = train_sequences
elif "test" == split:
sequences = test_sequences
elif "all" == split:
sequences = train_sequences + test_sequences
elif f"MOT19-{split}" in train_sequences + test_sequences:
sequences = [f"MOT19-{split}"]
else:
raise NotImplementedError("MOT19CVPR split not available.")
self._data = []
for s in sequences:
self._data.append(MOTSequence(s, 'MOT19', **dataloader))
class MOT20Wrapper(MOT17Wrapper):
"""A Wrapper for the MOT_Sequence class to return multiple sequences."""
def __init__(self, split, dataloader):
"""Initliazes all subset of the dataset.
Keyword arguments:
split -- the split of the dataset to use
dataloader -- args for the MOT_Sequence dataloader
"""
train_sequences = ['MOT20-01', 'MOT20-02', 'MOT20-03', 'MOT20-05']
test_sequences = ['MOT20-04', 'MOT20-06', 'MOT20-07', 'MOT20-08']
if "train" == split:
sequences = train_sequences
elif "test" == split:
sequences = test_sequences
elif "all" == split:
sequences = train_sequences + test_sequences
elif f"MOT20-{split}" in train_sequences + test_sequences:
sequences = [f"MOT20-{split}"]
else:
raise NotImplementedError("MOT20 split not available.")
self._data = []
for s in sequences:
self._data.append(MOTSequence(s, 'MOT20', **dataloader))
class MOT17LOWFPSWrapper(MOT17Wrapper):
"""A Wrapper for the MOT_Sequence class to return multiple sequences."""
def __init__(self, split, dataloader):
"""Initliazes all subset of the dataset.
Keyword arguments:
split -- the split of the dataset to use
dataloader -- args for the MOT_Sequence dataloader
"""
sequences = ['MOT17-02', 'MOT17-04', 'MOT17-09', 'MOT17-10', 'MOT17-11']
self._data = []
for s in sequences:
self._data.append(
MOTSequence(f"{s}-FRCNN", osp.join('MOT17_LOW_FPS', f'MOT17_{split}_FPS'), **dataloader))
class MOT17PrivateWrapper(MOT17Wrapper):
"""A Wrapper for the MOT_Sequence class to return multiple sequences."""
def __init__(self, split, dataloader, data_dir):
"""Initliazes all subset of the dataset.
Keyword arguments:
split -- the split of the dataset to use
dataloader -- args for the MOT_Sequence dataloader
"""
train_sequences = ['MOT17-02', 'MOT17-04', 'MOT17-05', 'MOT17-09', 'MOT17-10', 'MOT17-11', 'MOT17-13']
test_sequences = ['MOT17-01', 'MOT17-03', 'MOT17-06', 'MOT17-07', 'MOT17-08', 'MOT17-12', 'MOT17-14']
if "train" == split:
sequences = train_sequences
elif "test" == split:
sequences = test_sequences
elif "all" == split:
sequences = train_sequences + test_sequences
elif f"MOT17-{split}" in train_sequences + test_sequences:
sequences = [f"MOT17-{split}"]
else:
raise NotImplementedError("MOT17 split not available.")
self._data = []
for s in sequences:
self._data.append(MOTSequence(s, data_dir, **dataloader))
| 31.267516
| 104
| 0.690365
| 650
| 4,909
| 5.063077
| 0.133846
| 0.068064
| 0.080219
| 0.051656
| 0.786083
| 0.778183
| 0.748405
| 0.744758
| 0.718019
| 0.679733
| 0
| 0.061997
| 0.16541
| 4,909
| 156
| 105
| 31.467949
| 0.741274
| 0.220819
| 0
| 0.549451
| 0
| 0
| 0.202253
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.043956
| 0.021978
| 0.197802
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d7ef37683d4ee1032c5aebdb10947330c7f6a7e7
| 96
|
py
|
Python
|
keras_svi/__init__.py
|
krzysztofrusek/keras_svi
|
131615e477f9fd2ddcbf52aa8b92736c46464869
|
[
"BSD-3-Clause"
] | null | null | null |
keras_svi/__init__.py
|
krzysztofrusek/keras_svi
|
131615e477f9fd2ddcbf52aa8b92736c46464869
|
[
"BSD-3-Clause"
] | null | null | null |
keras_svi/__init__.py
|
krzysztofrusek/keras_svi
|
131615e477f9fd2ddcbf52aa8b92736c46464869
|
[
"BSD-3-Clause"
] | 1
|
2021-02-23T16:24:21.000Z
|
2021-02-23T16:24:21.000Z
|
'''
Copyright (c) 2020, AGH University of Science and Technology.
'''
from keras_svi import *
| 13.714286
| 61
| 0.708333
| 13
| 96
| 5.153846
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050633
| 0.177083
| 96
| 6
| 62
| 16
| 0.797468
| 0.635417
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d7fc1111c04bab2b3e7f2f0e1914966c27b576f3
| 186
|
py
|
Python
|
Exercism/pangram/pangram.py
|
adityaarakeri/Interview-solved
|
e924011d101621c7121f4f86d82bee089f4c1e25
|
[
"MIT"
] | 46
|
2019-10-14T01:21:35.000Z
|
2022-01-08T23:55:15.000Z
|
Exercism/pangram/pangram.py
|
Siddhant-K-code/Interview-solved
|
e924011d101621c7121f4f86d82bee089f4c1e25
|
[
"MIT"
] | 53
|
2019-10-03T17:16:43.000Z
|
2020-12-08T12:48:19.000Z
|
Exercism/pangram/pangram.py
|
Siddhant-K-code/Interview-solved
|
e924011d101621c7121f4f86d82bee089f4c1e25
|
[
"MIT"
] | 96
|
2019-10-03T18:12:10.000Z
|
2021-03-14T19:41:06.000Z
|
from string import ascii_lowercase
def is_pangram(sentence):
chars = set(ch for ch in sentence.lower() if ch in ascii_lowercase)
return len(chars) == len(set(ascii_lowercase))
| 26.571429
| 71
| 0.741935
| 29
| 186
| 4.62069
| 0.62069
| 0.313433
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 186
| 6
| 72
| 31
| 0.864516
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
cc2b566a879059ac5d0f4228de3c5313014c6b5e
| 5,999
|
py
|
Python
|
test/simple_imputation/test_random_value_imputation.py
|
macarro/imputena
|
3a94ae1419a2af0d9707b20546ee078929ce99e8
|
[
"MIT"
] | 6
|
2020-04-27T21:21:47.000Z
|
2022-03-30T03:02:54.000Z
|
test/simple_imputation/test_random_value_imputation.py
|
macarro/imputena
|
3a94ae1419a2af0d9707b20546ee078929ce99e8
|
[
"MIT"
] | 1
|
2021-07-01T18:49:27.000Z
|
2021-07-01T18:49:27.000Z
|
test/simple_imputation/test_random_value_imputation.py
|
macarro/imputena
|
3a94ae1419a2af0d9707b20546ee078929ce99e8
|
[
"MIT"
] | null | null | null |
import unittest
from imputena import random_value_imputation
from test.example_data import *
class TestRandomValueImputation(unittest.TestCase):
# Positive tests for data as a dataframe ----------------------------------
def test_RVI_df_returning(self):
"""
Positive test
data: Correct dataframe (divcols)
Checks that the original dataframe remains unmodified and that the
returned dataframe contains 0 NA values, 18 less than the original.
"""
# 1. Arrange
df = generate_example_df_divcols()
# 2. Act
df2 = random_value_imputation(df)
# 3. Assert
self.assertEqual(df.isna().sum().sum(), 18)
self.assertEqual(df2.isna().sum().sum(), 0)
def test_RVI_df_inplace(self):
"""
Positive test
data: Correct dataframe (divcols)
Checks that random_value_interpolation removes 18 values from the
dataframe.
"""
# 1. Arrange
df = generate_example_df_divcols()
# 2. Act
random_value_imputation(df, inplace=True)
# 3. Assert
self.assertEqual(df.isna().sum().sum(), 0)
def test_RVI_df_normal_distribution(self):
"""
Positive test
data: Correct dataframe (divcols)
distribution: 'normal'
Checks that the original dataframe remains unmodified and that the
returned dataframe contains 0 NA values, 18 less than the original.
"""
# 1. Arrange
df = generate_example_df_divcols()
# 2. Act
df2 = random_value_imputation(df, 'normal')
# 3. Assert
self.assertEqual(df.isna().sum().sum(), 18)
self.assertEqual(df2.isna().sum().sum(), 0)
def test_RVI_df_integer_distribution(self):
"""
Positive test
data: Correct dataframe (divcols)
distribution: 'integer'
Checks that the original dataframe remains unmodified and that the
returned dataframe contains 0 NA values, 18 less than the original.
"""
# 1. Arrange
df = generate_example_df_divcols()
# 2. Act
df2 = random_value_imputation(df, 'integer')
# 3. Assert
self.assertEqual(df.isna().sum().sum(), 18)
self.assertEqual(df2.isna().sum().sum(), 0)
# Positive tests for data as a series -------------------------------------
def test_RVI_series_returning(self):
"""
Positive test
data: Correct series (example series)
Checks that the original series remains unmodified and that the
returned dataframe contains 0 NA values, 3 less than the original.
"""
# 1. Arrange
ser = generate_example_series()
# 2. Act
ser2 = random_value_imputation(ser)
# 3. Assert
self.assertEqual(ser.isna().sum().sum(), 3)
self.assertEqual(ser2.isna().sum().sum(), 0)
def test_RVI_series_inplace(self):
"""
Positive test
data: Correct series (example series)
Checks that random_value_interpolation removes 3 NA values from the
series.
"""
# 1. Arrange
ser = generate_example_series()
# 2. Act
random_value_imputation(ser, inplace=True)
# 3. Assert
self.assertEqual(ser.isna().sum().sum(), 0)
def test_RVI_series_normal_distribution(self):
"""
Positive test
data: Correct series (example series)
distribution: 'normal'
Checks that the original series remains unmodified and that the
returned dataframe contains 0 NA values, 3 less than the original.
"""
# 1. Arrange
ser = generate_example_series()
# 2. Act
ser2 = random_value_imputation(ser, 'normal')
# 3. Assert
self.assertEqual(ser.isna().sum().sum(), 3)
self.assertEqual(ser2.isna().sum().sum(), 0)
def test_RVI_series_integer_distribution(self):
"""
Positive test
data: Correct series (example series)
distribution: 'integer'
Checks that the original series remains unmodified and that the
returned dataframe contains 0 NA values, 3 less than the original.
"""
# 1. Arrange
ser = generate_example_series()
# 2. Act
ser2 = random_value_imputation(ser, 'integer')
# 3. Assert
self.assertEqual(ser.isna().sum().sum(), 3)
self.assertEqual(ser2.isna().sum().sum(), 0)
# Negative tests ----------------------------------------------------------
def test_RVI_wrong_type(self):
"""
Negative test
data: array (unsupported type)
Checks that the function raises a TypeError if the data is passed as
an array.
"""
# 1. Arrange
data = [2, 4, np.nan, 1]
# 2. Act & 3. Assert
with self.assertRaises(TypeError):
random_value_imputation(data)
def test_RVI_df_wrong_columns(self):
"""
Negative test
data: Correct dataframe (divcols)
columns: ['z'] ('z' doesn't exist as a column in the data)
Checks that random_value_interpolation raises a ValueError if one of
the specified columns doesn't exist in the data.
"""
# 1. Arrange
ser = generate_example_series()
# 2. Act & Assert
with self.assertRaises(ValueError):
random_value_imputation(ser, columns=['z'])
def test_RVI_df_invalid_distribution(self):
"""
Negative test
data: Correct dataframe (divcols)
distribution: '' (invalid value)
Checks that random_value_interpolation raises a ValueError when an
unrecognized distribution is passed
"""
# 1. Arrange
ser = generate_example_series()
# 2. Act & Assert
with self.assertRaises(ValueError):
random_value_imputation(ser, '')
| 29.995
| 79
| 0.590598
| 680
| 5,999
| 5.069118
| 0.136765
| 0.051059
| 0.040615
| 0.046417
| 0.815782
| 0.796055
| 0.738033
| 0.705541
| 0.636786
| 0.559037
| 0
| 0.019263
| 0.29905
| 5,999
| 199
| 80
| 30.145729
| 0.800476
| 0.421737
| 0
| 0.444444
| 0
| 0
| 0.009404
| 0
| 0
| 0
| 0
| 0
| 0.314815
| 1
| 0.203704
| false
| 0
| 0.055556
| 0
| 0.277778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0bd9bdcc300427d0c7913174f4fced198837aa44
| 125
|
py
|
Python
|
otter/plugins/builtin/__init__.py
|
drjbarker/otter-grader
|
9e89e1675b09cf7889995b5f1bc8e1648bf6c309
|
[
"BSD-3-Clause"
] | null | null | null |
otter/plugins/builtin/__init__.py
|
drjbarker/otter-grader
|
9e89e1675b09cf7889995b5f1bc8e1648bf6c309
|
[
"BSD-3-Clause"
] | null | null | null |
otter/plugins/builtin/__init__.py
|
drjbarker/otter-grader
|
9e89e1675b09cf7889995b5f1bc8e1648bf6c309
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Builtin Otter plugins
"""
from .grade_override import GoogleSheetsGradeOverride
from .rate_limiting import RateLimiting
| 17.857143
| 53
| 0.824
| 13
| 125
| 7.769231
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112
| 125
| 6
| 54
| 20.833333
| 0.90991
| 0.168
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0bec74a8a24eff8df4ddbce56c069f9abf75cd24
| 203
|
py
|
Python
|
lycheepy/configuration/configuration/resources/repository.py
|
gabrielbazan/lycheepy
|
f314d3f591f4a449b37ead9baf26b9f5d58d9f0d
|
[
"MIT"
] | 17
|
2018-08-14T02:42:43.000Z
|
2022-02-25T00:38:47.000Z
|
lycheepy/configuration/configuration/resources/repository.py
|
gabrielbazan/lycheepy
|
f314d3f591f4a449b37ead9baf26b9f5d58d9f0d
|
[
"MIT"
] | 1
|
2018-11-01T02:55:01.000Z
|
2018-11-01T02:55:01.000Z
|
lycheepy/configuration/configuration/resources/repository.py
|
gabrielbazan/lycheepy
|
f314d3f591f4a449b37ead9baf26b9f5d58d9f0d
|
[
"MIT"
] | 4
|
2018-10-30T16:01:49.000Z
|
2021-06-08T20:21:07.000Z
|
from simplyrestful.resources import Resource
from serializers import RepositorySerializer
class RepositoryResource(Resource):
endpoint = 'repositories'
serializer = RepositorySerializer
| 25.375
| 45
| 0.79803
| 16
| 203
| 10.125
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162562
| 203
| 7
| 46
| 29
| 0.952941
| 0
| 0
| 0
| 0
| 0
| 0.061224
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f0b357c79baf3ff477c7515cc300fe0a7f789f55
| 657
|
py
|
Python
|
src/zvt/recorders/sina/money_flow/__init__.py
|
vishalbelsare/zvt
|
d55051147274c0a4157f08ec60908c781a323c8f
|
[
"MIT"
] | 2,032
|
2019-04-16T14:10:32.000Z
|
2022-03-31T12:40:13.000Z
|
src/zvt/recorders/sina/money_flow/__init__.py
|
vishalbelsare/zvt
|
d55051147274c0a4157f08ec60908c781a323c8f
|
[
"MIT"
] | 162
|
2019-05-07T09:57:46.000Z
|
2022-03-25T16:23:08.000Z
|
src/zvt/recorders/sina/money_flow/__init__.py
|
vishalbelsare/zvt
|
d55051147274c0a4157f08ec60908c781a323c8f
|
[
"MIT"
] | 755
|
2019-04-30T10:25:16.000Z
|
2022-03-29T17:50:49.000Z
|
# the __all__ is generated
__all__ = []
# __init__.py structure:
# common code of the package
# export interface in __all__ which contains __all__ of its sub modules
# import all from submodule sina_block_money_flow_recorder
from .sina_block_money_flow_recorder import *
from .sina_block_money_flow_recorder import __all__ as _sina_block_money_flow_recorder_all
__all__ += _sina_block_money_flow_recorder_all
# import all from submodule sina_stock_money_flow_recorder
from .sina_stock_money_flow_recorder import *
from .sina_stock_money_flow_recorder import __all__ as _sina_stock_money_flow_recorder_all
__all__ += _sina_stock_money_flow_recorder_all
| 34.578947
| 90
| 0.858447
| 100
| 657
| 4.8
| 0.29
| 0.1875
| 0.354167
| 0.1875
| 0.775
| 0.575
| 0.3375
| 0
| 0
| 0
| 0
| 0
| 0.109589
| 657
| 18
| 91
| 36.5
| 0.820513
| 0.392694
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.571429
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f0c89b372e8ef0dee581bf48f647467565d55421
| 38
|
py
|
Python
|
exercises/crypto-square/crypto_square.py
|
kishankj/python
|
82042de746128127502e109111e6c4e8ab002af6
|
[
"MIT"
] | 1,177
|
2017-06-21T20:24:06.000Z
|
2022-03-29T02:30:55.000Z
|
exercises/crypto-square/crypto_square.py
|
kishankj/python
|
82042de746128127502e109111e6c4e8ab002af6
|
[
"MIT"
] | 1,890
|
2017-06-18T20:06:10.000Z
|
2022-03-31T18:35:51.000Z
|
exercises/crypto-square/crypto_square.py
|
kishankj/python
|
82042de746128127502e109111e6c4e8ab002af6
|
[
"MIT"
] | 1,095
|
2017-06-26T23:06:19.000Z
|
2022-03-29T03:25:38.000Z
|
def cipher_text(plain_text):
pass
| 12.666667
| 28
| 0.736842
| 6
| 38
| 4.333333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184211
| 38
| 2
| 29
| 19
| 0.83871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
0b045924836254c7ae0907838469173e7ad70c07
| 184
|
py
|
Python
|
deeplab_resnet/__init__.py
|
ecustWallace/cataract
|
76b28eb6c12cad04137a0ef90462c743b776db1b
|
[
"MIT"
] | null | null | null |
deeplab_resnet/__init__.py
|
ecustWallace/cataract
|
76b28eb6c12cad04137a0ef90462c743b776db1b
|
[
"MIT"
] | null | null | null |
deeplab_resnet/__init__.py
|
ecustWallace/cataract
|
76b28eb6c12cad04137a0ef90462c743b776db1b
|
[
"MIT"
] | null | null | null |
from .model import DeepLabResNetModel
from .image_reader import ImageReader
from .image_reader_mp import ImageReader_MP
from .utils import decode_labels, inv_preprocess, prepare_label
| 36.8
| 63
| 0.869565
| 25
| 184
| 6.12
| 0.6
| 0.117647
| 0.196078
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097826
| 184
| 4
| 64
| 46
| 0.921687
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
9bc26273ac4cc80c0e240ca96c42243961608b9a
| 105
|
py
|
Python
|
roundingError.py
|
funge/udacity-dl
|
65ff4279b9872e156a783e9eb4d24d863ef235c7
|
[
"Apache-2.0"
] | null | null | null |
roundingError.py
|
funge/udacity-dl
|
65ff4279b9872e156a783e9eb4d24d863ef235c7
|
[
"Apache-2.0"
] | null | null | null |
roundingError.py
|
funge/udacity-dl
|
65ff4279b9872e156a783e9eb4d24d863ef235c7
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
a = 1000000000
for i in xrange(1000000):
a += 1e-6
a -= 1000000000
print(a)
| 10.5
| 25
| 0.628571
| 18
| 105
| 3.666667
| 0.777778
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.353659
| 0.219048
| 105
| 9
| 26
| 11.666667
| 0.45122
| 0.190476
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
50317e4e9f3c234862aabeed2721c5fa3b10253f
| 243
|
py
|
Python
|
example/drf_integrations_example/api/auth_backends.py
|
yoyowallet/drf-integrations-framework
|
7cf5cd28e5aff80c9b1a34b461294f4bd3108fa9
|
[
"MIT"
] | 1
|
2020-07-09T11:39:19.000Z
|
2020-07-09T11:39:19.000Z
|
example/drf_integrations_example/api/auth_backends.py
|
yoyowallet/drf-integrations-framework
|
7cf5cd28e5aff80c9b1a34b461294f4bd3108fa9
|
[
"MIT"
] | 5
|
2020-07-08T11:00:26.000Z
|
2021-01-13T09:33:09.000Z
|
example/drf_integrations_example/api/auth_backends.py
|
yoyowallet/drf-integrations-framework
|
7cf5cd28e5aff80c9b1a34b461294f4bd3108fa9
|
[
"MIT"
] | 2
|
2021-08-12T12:23:54.000Z
|
2021-09-20T06:45:38.000Z
|
from drf_integrations.auth_backends import IntegrationOAuth2Authentication
from .integrations import APIClientIntegration
class OAuth2Authentication(IntegrationOAuth2Authentication):
ensure_integration_classes = (APIClientIntegration,)
| 30.375
| 74
| 0.880658
| 18
| 243
| 11.666667
| 0.722222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013453
| 0.082305
| 243
| 7
| 75
| 34.714286
| 0.928251
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
504eb2866572ffc3d32836fd3bee6cf4b9b96b85
| 9,429
|
py
|
Python
|
cache_dependencies/tests/test_helpers.py
|
Tusky/cache-dependencies
|
6c19d0c2adfce19c3fdc53ad5704eddc6d84e106
|
[
"BSD-3-Clause"
] | 3
|
2017-08-08T20:06:56.000Z
|
2018-09-19T03:16:20.000Z
|
cache_dependencies/tests/test_helpers.py
|
Tusky/cache-dependencies
|
6c19d0c2adfce19c3fdc53ad5704eddc6d84e106
|
[
"BSD-3-Clause"
] | 1
|
2017-10-24T23:11:32.000Z
|
2017-10-24T23:11:32.000Z
|
cache_dependencies/tests/test_helpers.py
|
Tusky/cache-dependencies
|
6c19d0c2adfce19c3fdc53ad5704eddc6d84e106
|
[
"BSD-3-Clause"
] | 8
|
2017-10-24T07:43:56.000Z
|
2021-06-17T07:03:02.000Z
|
import time
from unittest import TestCase
from .helpers import CacheStub
def f():
return 1
class C:
def m(n):
return 2
class CacheStubTest(TestCase):
"""
Because library historically uses Django cache API, - some tests here are taken from Django.
"""
CACHE_NAME = 'default'
def setUp(self):
self.cache = CacheStub()
def test_set_get(self):
self.cache.set('key1', 'value1')
self.assertEqual(self.cache.get('key1'), 'value1')
def test_non_existent(self):
self.assertIsNone(self.cache.get("non_existent_key"))
self.assertEqual(self.cache.get("non_existent_key", 5), 5)
def test_expiration(self):
self.cache.set('key1', 'value', 1)
self.cache.set('key2', 'value', 1)
self.cache.set('key3', 'value', 1)
time.sleep(2)
self.assertIsNone(self.cache.get("key1"))
self.cache.add("key2", "new_value")
self.assertEqual(self.cache.get("key2"), "new_value")
self.assertFalse(self.cache.has_key("key3"))
def test_has_key(self):
self.cache.set("key1", "val1")
self.assertTrue(self.cache.has_key("key1"))
self.assertFalse(self.cache.has_key("val1"))
def test_in(self):
self.cache.set("key1", "val1")
self.assertIn("key1", self.cache)
self.assertNotIn("val1", self.cache)
def test_add(self):
self.cache.add("addkey1", "value")
result = self.cache.add("addkey1", "newvalue")
self.assertFalse(result)
self.assertEqual(self.cache.get("addkey1"), "value")
def test_delete(self):
self.cache.set("key1", "val1")
self.cache.set("key2", "val2")
self.assertEqual(self.cache.get("key1"), "val1")
self.cache.delete("key1")
self.assertIsNone(self.cache.get("key1"))
self.assertEqual(self.cache.get("key2"), "val2")
def test_incr(self):
self.cache.set('answer', 41)
self.assertEqual(self.cache.incr('answer'), 42)
self.assertEqual(self.cache.get('answer'), 42)
self.assertEqual(self.cache.incr('answer', 10), 52)
self.assertEqual(self.cache.get('answer'), 52)
self.assertEqual(self.cache.incr('answer', -10), 42)
with self.assertRaises(ValueError):
self.cache.incr('does_not_exist')
def test_decr(self):
self.cache.set('answer', 43)
self.assertEqual(self.cache.decr('answer'), 42)
self.assertEqual(self.cache.get('answer'), 42)
self.assertEqual(self.cache.decr('answer', 10), 32)
self.assertEqual(self.cache.get('answer'), 32)
self.assertEqual(self.cache.decr('answer', -10), 42)
with self.assertRaises(ValueError):
self.cache.decr('does_not_exist')
def test_set_many(self):
self.cache.set_many({"key1": "val1", "key2": "val2"})
self.assertEqual(self.cache.get("key1"), "val1")
self.assertEqual(self.cache.get("key2"), "val2")
def test_set_many_expiration(self):
self.cache.set_many({"key1": "val1", "key2": "val2"}, 1)
time.sleep(2)
self.assertIsNone(self.cache.get("key1"))
self.assertIsNone(self.cache.get("key2"))
def test_get_many(self):
self.cache.set('a', 'a_val')
self.cache.set('b', 'b_val')
self.cache.set('c', 'c_val')
self.cache.set('d', 'd_val')
self.assertDictEqual(self.cache.get_many(['a', 'c', 'd']), {'a': 'a_val', 'c': 'c_val', 'd': 'd_val'})
self.assertDictEqual(self.cache.get_many(['a', 'b', 'e']), {'a': 'a_val', 'b': 'b_val'})
def test_delete_many(self):
self.cache.set("key1", "val1")
self.cache.set("key2", "val2")
self.cache.set("key3", "val3")
self.cache.delete_many(["key1", "key2"])
self.assertIsNone(self.cache.get("key1"))
self.assertIsNone(self.cache.get("key2"))
self.assertEqual(self.cache.get("key3"), "val3")
def test_clear(self):
self.cache.set("key1", "val1")
self.cache.set("key2", "val2")
self.cache.clear()
self.assertIsNone(self.cache.get("key1"))
self.assertIsNone(self.cache.get("key2"))
def test_multiple_data_types(self):
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
self.cache.set("stuff", stuff)
self.assertEqual(self.cache.get("stuff"), stuff)
def test_cache_versioning_get_set(self):
# set, using default version = 1
self.cache.set('answer1', 42)
self.assertEqual(self.cache.get('answer1'), 42)
self.assertEqual(self.cache.get('answer1', version=1), 42)
self.assertIsNone(self.cache.get('answer1', version=2))
def test_cache_versioning_add(self):
self.cache.add('answer1', 42, version=2)
self.assertIsNone(self.cache.get('answer1', version=1))
self.assertEqual(self.cache.get('answer1', version=2), 42)
self.cache.add('answer1', 37, version=2)
self.assertIsNone(self.cache.get('answer1', version=1))
self.assertEqual(self.cache.get('answer1', version=2), 42)
self.cache.add('answer1', 37, version=1)
self.assertEqual(self.cache.get('answer1', version=1), 37)
self.assertEqual(self.cache.get('answer1', version=2), 42)
def test_cache_versioning_has_key(self):
self.cache.set('answer1', 42)
# has_key
self.assertTrue(self.cache.has_key('answer1'))
self.assertTrue(self.cache.has_key('answer1', version=1))
self.assertFalse(self.cache.has_key('answer1', version=2))
def test_cache_versioning_delete(self):
self.cache.set('answer1', 37, version=1)
self.cache.set('answer1', 42, version=2)
self.cache.delete('answer1')
self.assertIsNone(self.cache.get('answer1', version=1))
self.assertEqual(self.cache.get('answer1', version=2), 42)
self.cache.set('answer2', 37, version=1)
self.cache.set('answer2', 42, version=2)
self.cache.delete('answer2', version=2)
self.assertEqual(self.cache.get('answer2', version=1), 37)
self.assertIsNone(self.cache.get('answer2', version=2))
def test_cache_versioning_incr_decr(self):
self.cache.set('answer1', 37, version=1)
self.cache.set('answer1', 42, version=2)
self.cache.incr('answer1')
self.assertEqual(self.cache.get('answer1', version=1), 38)
self.assertEqual(self.cache.get('answer1', version=2), 42)
self.cache.decr('answer1')
self.assertEqual(self.cache.get('answer1', version=1), 37)
self.assertEqual(self.cache.get('answer1', version=2), 42)
self.cache.set('answer2', 37, version=1)
self.cache.set('answer2', 42, version=2)
self.cache.incr('answer2', version=2)
self.assertEqual(self.cache.get('answer2', version=1), 37)
self.assertEqual(self.cache.get('answer2', version=2), 43)
self.cache.decr('answer2', version=2)
self.assertEqual(self.cache.get('answer2', version=1), 37)
self.assertEqual(self.cache.get('answer2', version=2), 42)
def test_cache_versioning_get_set_many(self):
self.cache.set_many({'ford1': 37, 'arthur1': 42})
self.assertDictEqual(self.cache.get_many(['ford1', 'arthur1']), {'ford1': 37, 'arthur1': 42})
self.assertDictEqual(self.cache.get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42})
self.assertDictEqual(self.cache.get_many(['ford1', 'arthur1'], version=2), {})
self.cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
self.assertDictEqual(self.cache.get_many(['ford2', 'arthur2']), {})
self.assertDictEqual(self.cache.get_many(['ford2', 'arthur2'], version=1), {})
self.assertDictEqual(self.cache.get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42})
def test_incr_version(self):
self.cache.set('answer', 42, version=2)
self.assertIsNone(self.cache.get('answer'))
self.assertIsNone(self.cache.get('answer', version=1))
self.assertEqual(self.cache.get('answer', version=2), 42)
self.assertIsNone(self.cache.get('answer', version=3))
self.assertEqual(self.cache.incr_version('answer', version=2), 3)
self.assertIsNone(self.cache.get('answer'))
self.assertIsNone(self.cache.get('answer', version=1))
self.assertIsNone(self.cache.get('answer', version=2))
self.assertEqual(self.cache.get('answer', version=3), 42)
with self.assertRaises(ValueError):
self.cache.incr_version('does_not_exist')
def test_decr_version(self):
self.cache.set('answer', 42, version=2)
self.assertIsNone(self.cache.get('answer'))
self.assertIsNone(self.cache.get('answer', version=1))
self.assertEqual(self.cache.get('answer', version=2), 42)
self.assertEqual(self.cache.decr_version('answer', version=2), 1)
self.assertEqual(self.cache.get('answer'), 42)
self.assertEqual(self.cache.get('answer', version=1), 42)
self.assertIsNone(self.cache.get('answer', version=2))
with self.assertRaises(ValueError):
self.cache.decr_version('does_not_exist', version=2)
| 39.78481
| 112
| 0.617775
| 1,230
| 9,429
| 4.654472
| 0.091057
| 0.216943
| 0.138341
| 0.180262
| 0.823231
| 0.751092
| 0.652926
| 0.56524
| 0.50393
| 0.432838
| 0
| 0.044605
| 0.205854
| 9,429
| 236
| 113
| 39.95339
| 0.719952
| 0.013999
| 0
| 0.345745
| 0
| 0
| 0.12403
| 0
| 0
| 0
| 0
| 0
| 0.462766
| 1
| 0.138298
| false
| 0
| 0.015957
| 0.010638
| 0.180851
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
aca5893105e5d5fa9a3a6bb0cbb5d3f09c8c4c78
| 273
|
py
|
Python
|
3day/Func07.py
|
jsjang93/joony
|
62f7a325094c887212b894932263bf84500e0f03
|
[
"MIT"
] | null | null | null |
3day/Func07.py
|
jsjang93/joony
|
62f7a325094c887212b894932263bf84500e0f03
|
[
"MIT"
] | null | null | null |
3day/Func07.py
|
jsjang93/joony
|
62f7a325094c887212b894932263bf84500e0f03
|
[
"MIT"
] | null | null | null |
# Func07.py
def pSum(mod, *n):
ans = 0
for i in n:
ans += i
return mod + " " + str(ans)
print(pSum('덧셈',20,10)) # 덧셈 30
print(pSum('덧셈',20,10,5)) # 덧셈 35
print(pSum('덧셈',20,10,5,2)) # 덧셈 37
a = [10,20,30,40,50]
print(pSum('덧셈', *a)) # 덧셈 150
| 12.409091
| 35
| 0.501832
| 55
| 273
| 2.509091
| 0.490909
| 0.26087
| 0.318841
| 0.282609
| 0.34058
| 0.231884
| 0
| 0
| 0
| 0
| 0
| 0.18408
| 0.263736
| 273
| 21
| 36
| 13
| 0.497512
| 0.124542
| 0
| 0
| 0
| 0
| 0.03913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.4
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
acaddc1fad1cf5ce8c26f02fa2bc3a3bdc6f6fd7
| 47
|
py
|
Python
|
Comprehensions-Lab/ascii_values.py
|
dechevh/Python-Advanced
|
9daf33771b9096db77bcbf05ae2a4591b876c723
|
[
"MIT"
] | 2
|
2020-09-15T19:12:26.000Z
|
2020-09-15T19:12:30.000Z
|
Comprehensions-Lab/ascii_values.py
|
dechevh/Python-Advanced
|
9daf33771b9096db77bcbf05ae2a4591b876c723
|
[
"MIT"
] | 1
|
2021-07-06T09:20:49.000Z
|
2021-07-06T09:20:49.000Z
|
Comprehensions-Lab/ascii_values.py
|
dechevh/Python-Advanced
|
9daf33771b9096db77bcbf05ae2a4591b876c723
|
[
"MIT"
] | null | null | null |
print({c: ord(c) for c in input().split(", ")})
| 47
| 47
| 0.553191
| 9
| 47
| 2.888889
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12766
| 47
| 1
| 47
| 47
| 0.634146
| 0
| 0
| 0
| 0
| 0
| 0.041667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
acccbb96f8380454f36e598b963cf2a71fe76c3f
| 180
|
py
|
Python
|
PythonExercicios/ex006.py
|
github-felipe/ExerciciosEmPython-cursoemvideo
|
0045464a287f21b6245554a975588cf06c5b476d
|
[
"MIT"
] | null | null | null |
PythonExercicios/ex006.py
|
github-felipe/ExerciciosEmPython-cursoemvideo
|
0045464a287f21b6245554a975588cf06c5b476d
|
[
"MIT"
] | null | null | null |
PythonExercicios/ex006.py
|
github-felipe/ExerciciosEmPython-cursoemvideo
|
0045464a287f21b6245554a975588cf06c5b476d
|
[
"MIT"
] | null | null | null |
n = float(input('Digite um número: '))
print(f'O \033[34mdobro\033[m de {n} é: {n * 2} \n O \033[36mtriplo\033[m é {n * 3} \n A \033[7;30mraíz quadrada\033[m é: {n ** (1/2):.2f}')
| 60
| 140
| 0.583333
| 40
| 180
| 2.625
| 0.575
| 0.114286
| 0.095238
| 0.114286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.198676
| 0.161111
| 180
| 2
| 141
| 90
| 0.496689
| 0
| 0
| 0
| 0
| 0.5
| 0.822222
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
accff594ee79e60aba387c5a832e53f5050c05e3
| 235
|
py
|
Python
|
fastcord/utils/date.py
|
dskprt/botnolib
|
dd17aff956df0a54838980257249a7dfb725ab23
|
[
"MIT"
] | 3
|
2020-03-17T13:08:42.000Z
|
2021-07-07T10:58:04.000Z
|
fastcord/utils/date.py
|
dskprt/botnolib
|
dd17aff956df0a54838980257249a7dfb725ab23
|
[
"MIT"
] | 1
|
2020-04-07T12:46:09.000Z
|
2020-04-07T12:46:09.000Z
|
fastcord/utils/date.py
|
dskprt/botnolib
|
dd17aff956df0a54838980257249a7dfb725ab23
|
[
"MIT"
] | 1
|
2020-04-12T17:37:32.000Z
|
2020-04-12T17:37:32.000Z
|
from datetime import datetime
def from_iso8601(date):
return datetime.fromisoformat(date)
def to_iso8601(year, month, day, hour, minute, second):
return datetime(year, month, day, hour,
minute, second, 0).isoformat()
| 26.111111
| 55
| 0.719149
| 31
| 235
| 5.387097
| 0.548387
| 0.167665
| 0.143713
| 0.191617
| 0.335329
| 0.335329
| 0
| 0
| 0
| 0
| 0
| 0.046392
| 0.174468
| 235
| 8
| 56
| 29.375
| 0.814433
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.166667
| 0.333333
| 0.833333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
ace58028df63e3d22f74c4758c5f96cfe50d918b
| 227
|
py
|
Python
|
pypy/interpreter/pyparser/test/samples/snippet_with_2.py
|
camillobruni/pygirl
|
ddbd442d53061d6ff4af831c1eab153bcc771b5a
|
[
"MIT"
] | 12
|
2016-01-06T07:10:28.000Z
|
2021-05-13T23:02:02.000Z
|
pypy/interpreter/pyparser/test/samples/snippet_with_2.py
|
woodrow/pyoac
|
b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7
|
[
"MIT"
] | null | null | null |
pypy/interpreter/pyparser/test/samples/snippet_with_2.py
|
woodrow/pyoac
|
b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7
|
[
"MIT"
] | 2
|
2016-07-29T07:09:50.000Z
|
2016-10-16T08:50:26.000Z
|
# EXPECT: Module(None, Stmt([From('__future__', [('with_statement', None)]), With(Name('acontext'), Stmt([Pass()]), AssName('avariable', OP_ASSIGN))]))
from __future__ import with_statement
with acontext as avariable:
pass
| 37.833333
| 151
| 0.713656
| 28
| 227
| 5.392857
| 0.607143
| 0.13245
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101322
| 227
| 5
| 152
| 45.4
| 0.740196
| 0.656388
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
acfb33730892c27922f22a442ef653a991ddabcd
| 70
|
py
|
Python
|
filters/encoding.py
|
adibalcan/crawlingbot
|
9f2a8b13dccafcc07cf7760e1498cf51cf691277
|
[
"MIT"
] | 1
|
2016-10-07T14:10:58.000Z
|
2016-10-07T14:10:58.000Z
|
filters/encoding.py
|
adibalcan/crawlingbot
|
9f2a8b13dccafcc07cf7760e1498cf51cf691277
|
[
"MIT"
] | null | null | null |
filters/encoding.py
|
adibalcan/crawlingbot
|
9f2a8b13dccafcc07cf7760e1498cf51cf691277
|
[
"MIT"
] | null | null | null |
def filter(source, meta={}):
return meta["response"].textencoding
| 23.333333
| 40
| 0.7
| 8
| 70
| 6.125
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128571
| 70
| 2
| 41
| 35
| 0.803279
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
4a1a1ff4dc3cd7c2340004feb86a36272f831801
| 38
|
py
|
Python
|
wp api/errors.py
|
aouwalitshikkha/wp-gutenberg
|
fc1f94ccaede1fd7520645d0c8922cdeaaa28279
|
[
"MIT"
] | 1
|
2022-03-25T08:16:35.000Z
|
2022-03-25T08:16:35.000Z
|
wp api/errors.py
|
aouwalitshikkha/wp-gutenberg
|
fc1f94ccaede1fd7520645d0c8922cdeaaa28279
|
[
"MIT"
] | null | null | null |
wp api/errors.py
|
aouwalitshikkha/wp-gutenberg
|
fc1f94ccaede1fd7520645d0c8922cdeaaa28279
|
[
"MIT"
] | null | null | null |
class WpApiError(Exception):
pass
| 12.666667
| 28
| 0.736842
| 4
| 38
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184211
| 38
| 2
| 29
| 19
| 0.903226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
c5bf5611d679f5c03299d814145fde0f52e5f1f3
| 71
|
py
|
Python
|
semparse/__init__.py
|
lukovnikov/semparse
|
0fd5fcd9c982b6faac8f08b451f20273d2cc0da7
|
[
"MIT"
] | null | null | null |
semparse/__init__.py
|
lukovnikov/semparse
|
0fd5fcd9c982b6faac8f08b451f20273d2cc0da7
|
[
"MIT"
] | null | null | null |
semparse/__init__.py
|
lukovnikov/semparse
|
0fd5fcd9c982b6faac8f08b451f20273d2cc0da7
|
[
"MIT"
] | 1
|
2021-04-06T13:15:01.000Z
|
2021-04-06T13:15:01.000Z
|
import semparse.rnn
import semparse.attention
import semparse.stackcell
| 23.666667
| 25
| 0.887324
| 9
| 71
| 7
| 0.555556
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070423
| 71
| 3
| 26
| 23.666667
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
c5da5f3553a69e8d46e18cc0b6a7ea0fdf3fd6df
| 70
|
py
|
Python
|
invenio_madmp/convert/__init__.py
|
FAIR-Data-Austria/invenio-madmp
|
74372ee794f81666f5e9cf08ef448c21b2e428be
|
[
"MIT"
] | 1
|
2022-03-02T10:37:29.000Z
|
2022-03-02T10:37:29.000Z
|
invenio_madmp/convert/__init__.py
|
FAIR-Data-Austria/invenio-madmp
|
74372ee794f81666f5e9cf08ef448c21b2e428be
|
[
"MIT"
] | 9
|
2020-08-25T12:03:08.000Z
|
2020-10-20T11:45:32.000Z
|
invenio_madmp/convert/__init__.py
|
FAIR-Data-Austria/invenio-madmp
|
74372ee794f81666f5e9cf08ef448c21b2e428be
|
[
"MIT"
] | null | null | null |
"""TODO."""
from .util import convert_dmp
__all__ = ["convert_dmp"]
| 11.666667
| 29
| 0.671429
| 9
| 70
| 4.555556
| 0.777778
| 0.487805
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 70
| 5
| 30
| 14
| 0.683333
| 0.071429
| 0
| 0
| 0
| 0
| 0.186441
| 0
| 0
| 0
| 0
| 0.2
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
c5ff905dd0e4fa7d98375da4780979f2b2fd847a
| 53
|
py
|
Python
|
freeldep/cloud/__init__.py
|
MatthieuBlais/freeldep
|
092de3c603a28b9d12e9ad93d6c0cca773469c9f
|
[
"Apache-2.0"
] | null | null | null |
freeldep/cloud/__init__.py
|
MatthieuBlais/freeldep
|
092de3c603a28b9d12e9ad93d6c0cca773469c9f
|
[
"Apache-2.0"
] | null | null | null |
freeldep/cloud/__init__.py
|
MatthieuBlais/freeldep
|
092de3c603a28b9d12e9ad93d6c0cca773469c9f
|
[
"Apache-2.0"
] | null | null | null |
from freeldep.cloud.compiler import Compiler # noqa
| 26.5
| 52
| 0.811321
| 7
| 53
| 6.142857
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132075
| 53
| 1
| 53
| 53
| 0.934783
| 0.075472
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
a844fbd1b756056a248635a8de6aeef2cfd277c6
| 98
|
py
|
Python
|
backend/accounts/admin.py
|
njokuifeanyigerald/djoser-react
|
51b7c60ea5648263300957bad1c4754c3ea1b6f2
|
[
"MIT"
] | null | null | null |
backend/accounts/admin.py
|
njokuifeanyigerald/djoser-react
|
51b7c60ea5648263300957bad1c4754c3ea1b6f2
|
[
"MIT"
] | null | null | null |
backend/accounts/admin.py
|
njokuifeanyigerald/djoser-react
|
51b7c60ea5648263300957bad1c4754c3ea1b6f2
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import UserAccount
admin.site.register(UserAccount)
| 24.5
| 32
| 0.846939
| 13
| 98
| 6.384615
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091837
| 98
| 4
| 33
| 24.5
| 0.932584
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
a845af35fd9c6dd666d3463aefb09c6ea04b09be
| 47
|
py
|
Python
|
3d_printing/test1/test_cube.py
|
CoffeeAddict93/braille_translation
|
30d5514fa0a6c010df5ad053d6e69298dba836ab
|
[
"MIT"
] | 1
|
2021-11-24T03:51:06.000Z
|
2021-11-24T03:51:06.000Z
|
3d_printing/test1/test_cube.py
|
CoffeeAddict93/braille_translation
|
30d5514fa0a6c010df5ad053d6e69298dba836ab
|
[
"MIT"
] | null | null | null |
3d_printing/test1/test_cube.py
|
CoffeeAddict93/braille_translation
|
30d5514fa0a6c010df5ad053d6e69298dba836ab
|
[
"MIT"
] | null | null | null |
import bpy
bpy.ops.mesh.primitive_cube_add()
| 15.666667
| 33
| 0.787234
| 8
| 47
| 4.375
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106383
| 47
| 3
| 33
| 15.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
a873ae02fba5231f43019031f4889fc120b7b412
| 521
|
py
|
Python
|
_database/models/__init__.py
|
marcoEDU/HackerspaceWebsiteTemplate
|
29621a5f5daef7a8073f368b7d95a1df654c8ba9
|
[
"MIT"
] | 9
|
2019-11-04T04:46:08.000Z
|
2019-12-29T22:24:38.000Z
|
_database/models/__init__.py
|
marcoEDU/HackerspaceWebsiteTemplate
|
29621a5f5daef7a8073f368b7d95a1df654c8ba9
|
[
"MIT"
] | 27
|
2020-02-17T17:57:00.000Z
|
2020-04-23T20:25:44.000Z
|
_database/models/__init__.py
|
marcoEDU/HackerspaceWebsiteTemplate
|
29621a5f5daef7a8073f368b7d95a1df654c8ba9
|
[
"MIT"
] | 4
|
2020-02-17T13:39:18.000Z
|
2020-04-12T07:56:45.000Z
|
# link the models locations
from _database.models.events import Event
from _database.models.machines import Machine
from _database.models.projects import Project
from _database.models.consensus import Consensus
from _database.models.spaces import Space
from _database.models.persons import Person
from _database.models.guildes import Guilde
from _database.models.meetingnotes import MeetingNote
from _database.models.wishes import Wish
from _database.models.photos import Photo
from _database.models.helper import Helper
| 40.076923
| 53
| 0.861804
| 70
| 521
| 6.257143
| 0.385714
| 0.30137
| 0.452055
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09405
| 521
| 12
| 54
| 43.416667
| 0.927966
| 0.047985
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
a877594c249b0ff35ff44e66152d6363dbbb2bf2
| 28
|
py
|
Python
|
tracking/eye_tracking/__init__.py
|
Mirevi/face-synthesizer-JVRB
|
3c5774b1c5c981131df21b299389f568502b8ecf
|
[
"BSD-3-Clause"
] | null | null | null |
tracking/eye_tracking/__init__.py
|
Mirevi/face-synthesizer-JVRB
|
3c5774b1c5c981131df21b299389f568502b8ecf
|
[
"BSD-3-Clause"
] | null | null | null |
tracking/eye_tracking/__init__.py
|
Mirevi/face-synthesizer-JVRB
|
3c5774b1c5c981131df21b299389f568502b8ecf
|
[
"BSD-3-Clause"
] | null | null | null |
from .eye_tracking import *
| 14
| 27
| 0.785714
| 4
| 28
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 28
| 1
| 28
| 28
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
763893d2ffffdf1a442319e031b0f9f3790c1b38
| 324
|
py
|
Python
|
Appendix_C_Python/example03_float.py
|
itanaskovic/Data-Science-Algorithms-in-a-Week
|
879cb4c96b35d57e593a85b54dcda41f91d27533
|
[
"MIT"
] | 30
|
2017-09-02T16:00:02.000Z
|
2022-03-28T02:00:07.000Z
|
AppendixC/example03_float.py
|
abhishek-choudharys/Data-Science-Algorithms-in-a-Week-Second-Edition
|
e4fc518803129e6b11e0bfa0587ff450c2577ff9
|
[
"MIT"
] | null | null | null |
AppendixC/example03_float.py
|
abhishek-choudharys/Data-Science-Algorithms-in-a-Week-Second-Edition
|
e4fc518803129e6b11e0bfa0587ff450c2577ff9
|
[
"MIT"
] | 34
|
2017-08-15T11:03:01.000Z
|
2020-12-24T09:35:58.000Z
|
pi = 3.14159
circle_radius = 10.2
circle_perimeter = 2 * pi * circle_radius
circle_area = pi * circle_radius * circle_radius
print "Let there be a circle with the radius", circle_radius, "cm."
print "Then the perimeter of the circle is", circle_perimeter, "cm."
print "The area of the circle is", circle_area, "cm squared."
| 40.5
| 68
| 0.746914
| 54
| 324
| 4.314815
| 0.388889
| 0.257511
| 0.120172
| 0.171674
| 0.16309
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03663
| 0.157407
| 324
| 7
| 69
| 46.285714
| 0.81685
| 0
| 0
| 0
| 0
| 0
| 0.351852
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.428571
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
764e4066acdde0621e1d9a2228dfec35d8821d7e
| 104
|
py
|
Python
|
GNN_PRP/prp_3_21/adgcl/transfer/learning/__init__.py
|
frankling2020/Self-learn-Repo
|
294df18469d6d4ef6d479b1b533f42445cd01ac1
|
[
"MIT"
] | 42
|
2021-06-30T21:05:28.000Z
|
2022-03-28T09:23:57.000Z
|
GNN_PRP/prp_3_21/adgcl/transfer/learning/__init__.py
|
frankling2020/Self-learn-Repo
|
294df18469d6d4ef6d479b1b533f42445cd01ac1
|
[
"MIT"
] | 3
|
2021-11-04T02:49:41.000Z
|
2021-12-29T08:41:15.000Z
|
GNN_PRP/prp_3_21/adgcl/transfer/learning/__init__.py
|
frankling2020/Self-learn-Repo
|
294df18469d6d4ef6d479b1b533f42445cd01ac1
|
[
"MIT"
] | 3
|
2022-01-25T16:24:17.000Z
|
2022-03-24T13:45:57.000Z
|
from .ginfominmax import GInfoMinMax
from .gsimclr import GSimCLR
from .view_learner import ViewLearner
| 26
| 37
| 0.855769
| 13
| 104
| 6.769231
| 0.538462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 104
| 3
| 38
| 34.666667
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
7656565a26e623ba2382ccc1c234925c74071472
| 636
|
py
|
Python
|
binary_tree/bt_problems.py
|
iamroy/ds_done_right
|
e7d504a6b593dc3446c433ab3e15a762b84bb86a
|
[
"MIT"
] | null | null | null |
binary_tree/bt_problems.py
|
iamroy/ds_done_right
|
e7d504a6b593dc3446c433ab3e15a762b84bb86a
|
[
"MIT"
] | null | null | null |
binary_tree/bt_problems.py
|
iamroy/ds_done_right
|
e7d504a6b593dc3446c433ab3e15a762b84bb86a
|
[
"MIT"
] | null | null | null |
# Print bottom view of a binary tree
# Print top view of a binary tree
# Find distance between given pairs of nodes in a binary tree
# Find the diagonal sum of a binary tree
# Find maximum sum root to leaf path in a binary tree
#543. Diameter of Binary Tree
#226. Invert Binary Tree
#257. Binary Tree Paths
#783. Minimum Distance Between BST Nodes
#897. Increasing Order Search Tree
#513. Find Bottom Left Tree Value
#1448. Count Good Nodes in Binary Tree
#1161. Maximum Level Sum of a Binary Tree
from binary_tree.bt_node import TreeNode
from binary_tree.binary_tree import Binary_Tree
def increasing_BST(self, root):
pass
| 24.461538
| 61
| 0.772013
| 110
| 636
| 4.409091
| 0.472727
| 0.28866
| 0.136082
| 0.107216
| 0.152577
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050193
| 0.185535
| 636
| 25
| 62
| 25.44
| 0.8861
| 0.740566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.25
| 0.5
| 0
| 0.75
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
7663f3a86022b24f5b949f8be6517907516a2ea5
| 22
|
py
|
Python
|
bulkops/tasks/__init__.py
|
princenyeche/BOP
|
ac2a894deb88fe28cf418e5475289fb27b5fd186
|
[
"MIT"
] | 2
|
2022-02-05T09:03:26.000Z
|
2022-03-01T06:57:24.000Z
|
bulkops/tasks/__init__.py
|
princenyeche/BOP
|
ac2a894deb88fe28cf418e5475289fb27b5fd186
|
[
"MIT"
] | 49
|
2020-08-09T06:04:14.000Z
|
2022-03-16T20:01:00.000Z
|
bulkops/tasks/__init__.py
|
princenyeche/BOP
|
ac2a894deb88fe28cf418e5475289fb27b5fd186
|
[
"MIT"
] | null | null | null |
# initial file commit
| 11
| 21
| 0.772727
| 3
| 22
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 22
| 1
| 22
| 22
| 0.944444
| 0.863636
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
769b2f3af1691757d18134724026bcc762ec963e
| 91
|
py
|
Python
|
python-pscheduler/pscheduler/pscheduler/batchprocessor/__init__.py
|
krihal/pscheduler
|
e69e0357797d88d290c78b92b1d99048e73a63e8
|
[
"Apache-2.0"
] | 47
|
2016-09-28T14:19:10.000Z
|
2022-03-21T13:26:47.000Z
|
python-pscheduler/pscheduler/pscheduler/batchprocessor/__init__.py
|
krihal/pscheduler
|
e69e0357797d88d290c78b92b1d99048e73a63e8
|
[
"Apache-2.0"
] | 993
|
2016-07-07T19:30:32.000Z
|
2022-03-21T10:25:52.000Z
|
python-pscheduler/pscheduler/pscheduler/batchprocessor/__init__.py
|
mfeit-internet2/pscheduler-dev
|
d2cd4065a6fce88628b0ca63edc7a69f2672dad2
|
[
"Apache-2.0"
] | 36
|
2016-09-15T09:39:45.000Z
|
2021-06-23T15:05:13.000Z
|
#
# Initialization for pScheduler Batch Processor Package
#
from .batchprocessor import *
| 15.166667
| 55
| 0.791209
| 9
| 91
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 91
| 5
| 56
| 18.2
| 0.935065
| 0.582418
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
76b7bbc499aae796781c2bd017d50c45217c3691
| 33
|
py
|
Python
|
Chapter 02 - Input, Processing, and Output/Book Exercises/quotation.py
|
EllisBarnes00/COP-1000
|
8509e59e8a566c77295c714ddcb0f557c470358b
|
[
"Unlicense"
] | null | null | null |
Chapter 02 - Input, Processing, and Output/Book Exercises/quotation.py
|
EllisBarnes00/COP-1000
|
8509e59e8a566c77295c714ddcb0f557c470358b
|
[
"Unlicense"
] | 1
|
2021-06-07T03:55:29.000Z
|
2021-06-07T03:56:47.000Z
|
Chapter 02 - Input, Processing, and Output/Book Exercises/quotation.py
|
EllisBarnes00/COP-1000
|
8509e59e8a566c77295c714ddcb0f557c470358b
|
[
"Unlicense"
] | null | null | null |
print("""The cat said "meow" """)
| 33
| 33
| 0.575758
| 5
| 33
| 3.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.655172
| 0
| 0
| 0
| 0
| 0
| 0.588235
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
4f2d36698b917acd15e0f4b487f87ade7ff344d3
| 142
|
py
|
Python
|
src/auth/apps.py
|
SerhatTeker/django-bank-allauth-rest
|
c0392a139521686b2cc882edd190b8137de5c36d
|
[
"BSD-3-Clause"
] | null | null | null |
src/auth/apps.py
|
SerhatTeker/django-bank-allauth-rest
|
c0392a139521686b2cc882edd190b8137de5c36d
|
[
"BSD-3-Clause"
] | 5
|
2020-03-19T16:39:01.000Z
|
2022-02-10T09:10:52.000Z
|
src/auth/apps.py
|
SerhatTeker/django-bank-allauth-rest
|
c0392a139521686b2cc882edd190b8137de5c36d
|
[
"BSD-3-Clause"
] | null | null | null |
from django.apps import AppConfig
class SrcAppConfig(AppConfig):
label = "src_auth"
name = "src.auth"
verbose_name = "Src Auth"
| 17.75
| 33
| 0.690141
| 18
| 142
| 5.333333
| 0.666667
| 0.21875
| 0.229167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.211268
| 142
| 7
| 34
| 20.285714
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0.169014
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
4f304ec7f7e962e130056f4c0b3bc21e4da59c96
| 1,846
|
py
|
Python
|
tests/regressiontests/utils/timesince.py
|
hugs/django
|
3690ab217e3a65d01bd2f9d25f05fb2e54815693
|
[
"BSD-3-Clause"
] | 2
|
2015-12-04T12:05:26.000Z
|
2016-05-08T11:26:55.000Z
|
tests/regressiontests/utils/timesince.py
|
hugs/django
|
3690ab217e3a65d01bd2f9d25f05fb2e54815693
|
[
"BSD-3-Clause"
] | null | null | null |
tests/regressiontests/utils/timesince.py
|
hugs/django
|
3690ab217e3a65d01bd2f9d25f05fb2e54815693
|
[
"BSD-3-Clause"
] | 1
|
2015-11-19T14:45:16.000Z
|
2015-11-19T14:45:16.000Z
|
"""
>>> from datetime import datetime, timedelta
>>> from django.utils.timesince import timesince
>>> t = datetime(2007, 8, 14, 13, 46, 0)
>>> onemicrosecond = timedelta(microseconds=1)
>>> onesecond = timedelta(seconds=1)
>>> oneminute = timedelta(minutes=1)
>>> onehour = timedelta(hours=1)
>>> oneday = timedelta(days=1)
>>> oneweek = timedelta(days=7)
>>> onemonth = timedelta(days=30)
>>> oneyear = timedelta(days=365)
# equal datetimes.
>>> timesince(t, t)
u'0 minutes'
# Microseconds and seconds are ignored.
>>> timesince(t, t+onemicrosecond)
u'0 minutes'
>>> timesince(t, t+onesecond)
u'0 minutes'
# Test other units.
>>> timesince(t, t+oneminute)
u'1 minute'
>>> timesince(t, t+onehour)
u'1 hour'
>>> timesince(t, t+oneday)
u'1 day'
>>> timesince(t, t+oneweek)
u'1 week'
>>> timesince(t, t+onemonth)
u'1 month'
>>> timesince(t, t+oneyear)
u'1 year'
# Test multiple units.
>>> timesince(t, t+2*oneday+6*onehour)
u'2 days, 6 hours'
>>> timesince(t, t+2*oneweek+2*oneday)
u'2 weeks, 2 days'
# If the two differing units aren't adjacent, only the first unit is displayed.
>>> timesince(t, t+2*oneweek+3*onehour+4*oneminute)
u'2 weeks'
>>> timesince(t, t+4*oneday+5*oneminute)
u'4 days'
# When the second date occurs before the first, we should always get 0 minutes.
>>> timesince(t, t-onemicrosecond)
u'0 minutes'
>>> timesince(t, t-onesecond)
u'0 minutes'
>>> timesince(t, t-oneminute)
u'0 minutes'
>>> timesince(t, t-onehour)
u'0 minutes'
>>> timesince(t, t-oneday)
u'0 minutes'
>>> timesince(t, t-oneweek)
u'0 minutes'
>>> timesince(t, t-onemonth)
u'0 minutes'
>>> timesince(t, t-oneyear)
u'0 minutes'
>>> timesince(t, t-2*oneday-6*onehour)
u'0 minutes'
>>> timesince(t, t-2*oneweek-2*oneday)
u'0 minutes'
>>> timesince(t, t-2*oneweek-3*onehour-4*oneminute)
u'0 minutes'
>>> timesince(t, t-4*oneday-5*oneminute)
u'0 minutes'
"""
| 23.666667
| 79
| 0.681473
| 298
| 1,846
| 4.221477
| 0.244966
| 0.206677
| 0.218601
| 0.18601
| 0.540541
| 0.424483
| 0.383943
| 0.305246
| 0.160572
| 0.160572
| 0
| 0.042884
| 0.128386
| 1,846
| 77
| 80
| 23.974026
| 0.738968
| 0.995125
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
4f41add4a29cfd368ed1e677552d2c6424d92334
| 182
|
py
|
Python
|
sample_app/xyz/abc1.py
|
harobed/pazel
|
7109fe565aa50d15ec6de1b6f0bae5ac06a28a3a
|
[
"MIT"
] | 41
|
2018-04-30T14:09:29.000Z
|
2022-03-09T10:19:46.000Z
|
sample_app/xyz/abc1.py
|
harobed/pazel
|
7109fe565aa50d15ec6de1b6f0bae5ac06a28a3a
|
[
"MIT"
] | 3
|
2018-08-09T07:47:21.000Z
|
2019-07-25T01:06:56.000Z
|
sample_app/xyz/abc1.py
|
harobed/pazel
|
7109fe565aa50d15ec6de1b6f0bae5ac06a28a3a
|
[
"MIT"
] | 9
|
2018-09-14T21:32:27.000Z
|
2021-07-06T11:17:14.000Z
|
from foo import sample # Import from foo's public interface.
from foo import foo # Import a module with the same name as the package.
def main():
print(sample())
main()
| 18.2
| 76
| 0.692308
| 29
| 182
| 4.344828
| 0.62069
| 0.166667
| 0.206349
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 182
| 9
| 77
| 20.222222
| 0.9
| 0.472527
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0
| 0.6
| 0.2
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
4f693949895326915a9912ac58d08bff7b7a3847
| 59
|
py
|
Python
|
util.py
|
Spferical/Conway-s-Game-of-Tron
|
12b94f8e7d8afc4c08c1711a61b97b0b1c8e241b
|
[
"MIT"
] | null | null | null |
util.py
|
Spferical/Conway-s-Game-of-Tron
|
12b94f8e7d8afc4c08c1711a61b97b0b1c8e241b
|
[
"MIT"
] | null | null | null |
util.py
|
Spferical/Conway-s-Game-of-Tron
|
12b94f8e7d8afc4c08c1711a61b97b0b1c8e241b
|
[
"MIT"
] | null | null | null |
def most_common(lst):
return max(lst, key=lst.count)
| 11.8
| 34
| 0.677966
| 10
| 59
| 3.9
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.186441
| 59
| 4
| 35
| 14.75
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
96c4c9da446c10c47f89049f4bb683092307e515
| 241
|
py
|
Python
|
pylab/devices/generic/time_device.py
|
LukeSkywalker92/pylab
|
41df6546a167187e6f39bfdfbdf9fc2ec9ac0d88
|
[
"MIT"
] | 1
|
2020-07-15T14:00:24.000Z
|
2020-07-15T14:00:24.000Z
|
pylab/devices/generic/time_device.py
|
LukeSkywalker92/pylab
|
41df6546a167187e6f39bfdfbdf9fc2ec9ac0d88
|
[
"MIT"
] | 1
|
2020-02-06T17:43:46.000Z
|
2020-02-12T15:06:37.000Z
|
pylab/devices/generic/time_device.py
|
LukeSkywalker92/pylab
|
41df6546a167187e6f39bfdfbdf9fc2ec9ac0d88
|
[
"MIT"
] | null | null | null |
import time
class TimeDevice():
def __init__(self):
self.start_time = time.time()
def reset_start_time(self):
self.start_time = time.time()
def elapsed_time(self):
return time.time() - self.start_time
| 18.538462
| 44
| 0.639004
| 32
| 241
| 4.5
| 0.34375
| 0.277778
| 0.270833
| 0.236111
| 0.388889
| 0.388889
| 0.388889
| 0
| 0
| 0
| 0
| 0
| 0.248963
| 241
| 12
| 45
| 20.083333
| 0.79558
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.375
| false
| 0
| 0.125
| 0.125
| 0.75
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
96d157b3ed82e2a573bdcca213623a38bcf98d83
| 20
|
py
|
Python
|
Computational_essay/test.py
|
henrik-uio/FYS2130
|
533089e0f1a00c115c63a6d8485acdb451da5038
|
[
"MIT"
] | null | null | null |
Computational_essay/test.py
|
henrik-uio/FYS2130
|
533089e0f1a00c115c63a6d8485acdb451da5038
|
[
"MIT"
] | null | null | null |
Computational_essay/test.py
|
henrik-uio/FYS2130
|
533089e0f1a00c115c63a6d8485acdb451da5038
|
[
"MIT"
] | null | null | null |
import cock in dick
| 10
| 19
| 0.8
| 4
| 20
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 20
| 1
| 20
| 20
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 1
| null | null | 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
8c160039e941b5d103887bd71931ddd9b11da97a
| 40,300
|
py
|
Python
|
xenonpy/descriptor/fingerprint.py
|
mori0711/XenonPy
|
e36ca0ea112b45ee629cd980c88e80cd6c96c514
|
[
"BSD-3-Clause"
] | 93
|
2018-02-11T23:43:47.000Z
|
2022-03-11T02:40:11.000Z
|
xenonpy/descriptor/fingerprint.py
|
mori0711/XenonPy
|
e36ca0ea112b45ee629cd980c88e80cd6c96c514
|
[
"BSD-3-Clause"
] | 192
|
2018-04-20T04:32:12.000Z
|
2022-03-24T05:59:18.000Z
|
xenonpy/descriptor/fingerprint.py
|
mori0711/XenonPy
|
e36ca0ea112b45ee629cd980c88e80cd6c96c514
|
[
"BSD-3-Clause"
] | 51
|
2018-01-18T08:08:55.000Z
|
2022-03-01T05:52:22.000Z
|
# Copyright (c) 2021. yoshida-lab. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import numpy as np
from rdkit import Chem
from rdkit.Chem import Descriptors as ChemDesc
from rdkit.Chem import MACCSkeys as MAC
from rdkit.Chem import rdMolDescriptors as rdMol
from rdkit.Chem import rdmolops as rdm
from rdkit.Chem.rdMHFPFingerprint import MHFPEncoder
from rdkit.ML.Descriptors import MoleculeDescriptors
from scipy.sparse import coo_matrix
from xenonpy.descriptor.base import BaseDescriptor, BaseFeaturizer
__all__ = ['RDKitFP', 'AtomPairFP', 'TopologicalTorsionFP', 'MACCS', 'FCFP', 'ECFP', 'PatternFP', 'LayeredFP',
'MHFP', 'DescriptorFeature', 'Fingerprints']
def count_fp(fp, dim=2**10):
tmp = fp.GetNonzeroElements()
return coo_matrix((list(tmp.values()), (np.repeat(0, len(tmp)), [i % dim for i in tmp.keys()])),
shape=(1, dim)).toarray().flatten()
class RDKitFP(BaseFeaturizer):
def __init__(self, n_jobs=-1, *, n_bits=2048, bit_per_entry=None, counting=False,
input_type='mol', on_errors='raise', return_type='any', target_col=None):
"""
RDKit fingerprint.
Parameters
----------
n_jobs: int
The number of jobs to run in parallel for both fit and predict.
Can be -1 or # of cups. Set -1 to use all cpu cores (default).
n_bits: int
Fingerprint size.
bit_per_entry: int
Number of bits used to represent a single entry (only for non-counting case).
Default value follows rdkit default.
counting: boolean
Record counts of the entries instead of bits only.
input_type: string
Set the specific type of transform input.
Set to ``mol`` (default) to ``rdkit.Chem.rdchem.Mol`` objects as input.
When set to ``smlies``, ``transform`` method can use a SMILES list as input.
Set to ``any`` to use both.
If input is SMILES, ``Chem.MolFromSmiles`` function will be used inside.
for ``None`` returns, a ``ValueError`` exception will be raised.
on_errors: string
How to handle exceptions in feature calculations. Can be 'nan', 'keep', 'raise'.
When 'nan', return a column with ``np.nan``.
The length of column corresponding to the number of feature labs.
When 'keep', return a column with exception objects.
The default is 'raise' which will raise up the exception.
target_col
Only relevant when input is pd.DataFrame, otherwise ignored.
Specify a single column to be used for transformation.
If ``None``, all columns of the pd.DataFrame is used.
Default is None.
"""
super().__init__(n_jobs=n_jobs, on_errors=on_errors, return_type=return_type, target_col=target_col)
self.input_type = input_type
self.n_bits = n_bits
if bit_per_entry is None:
self.bit_per_entry = 2
else:
self.bit_per_entry = bit_per_entry
self.counting = counting
self.__authors__ = ['Stephen Wu', 'TsumiNa']
def featurize(self, x):
if self.input_type == 'smiles':
x_ = x
x = Chem.MolFromSmiles(x)
if x is None:
raise ValueError('cannot convert Mol from SMILES %s' % x_)
if self.input_type == 'any':
if not isinstance(x, Chem.rdchem.Mol):
x_ = x
x = Chem.MolFromSmiles(x)
if x is None:
raise ValueError('cannot convert Mol from SMILES %s' % x_)
if self.counting:
return count_fp(rdm.UnfoldedRDKFingerprintCountBased(x), dim=self.n_bits)
else:
return list(Chem.RDKFingerprint(x, fpSize=self.n_bits, nBitsPerHash=self.bit_per_entry))
@property
def feature_labels(self):
if self.counting:
return ["rdkit_c:" + str(i) for i in range(self.n_bits)]
else:
return ["rdkit:" + str(i) for i in range(self.n_bits)]
class AtomPairFP(BaseFeaturizer):
def __init__(self, n_jobs=-1, *, n_bits=2048, bit_per_entry=None, counting=False,
input_type='mol', on_errors='raise', return_type='any', target_col=None):
"""
Atom Pair fingerprints.
Returns the atom-pair fingerprint for a molecule.The algorithm used is described here:
R.E. Carhart, D.H. Smith, R. Venkataraghavan;
"Atom Pairs as Molecular Features in Structure-Activity Studies: Definition and Applications"
JCICS 25, 64-73 (1985).
This is currently just in binary bits with fixed length after folding.
Parameters
----------
n_jobs: int
The number of jobs to run in parallel for both fit and predict.
Can be -1 or # of cups. Set -1 to use all cpu cores (default).
n_bits: int
Fixed bit length based on folding.
bit_per_entry: int
Number of bits used to represent a single entry (only for non-counting case).
Default value follows rdkit default.
counting: boolean
Record counts of the entries instead of bits only.
input_type: string
Set the specific type of transform input.
Set to ``mol`` (default) to ``rdkit.Chem.rdchem.Mol`` objects as input.
When set to ``smlies``, ``transform`` method can use a SMILES list as input.
Set to ``any`` to use both.
If input is SMILES, ``Chem.MolFromSmiles`` function will be used inside.
for ``None`` returns, a ``ValueError`` exception will be raised.
on_errors: string
How to handle exceptions in feature calculations. Can be 'nan', 'keep', 'raise'.
When 'nan', return a column with ``np.nan``.
The length of column corresponding to the number of feature labs.
When 'keep', return a column with exception objects.
The default is 'raise' which will raise up the exception.
target_col
Only relevant when input is pd.DataFrame, otherwise ignored.
Specify a single column to be used for transformation.
If ``None``, all columns of the pd.DataFrame is used.
Default is None.
"""
super().__init__(n_jobs=n_jobs, on_errors=on_errors, return_type=return_type, target_col=target_col)
self.input_type = input_type
self.n_bits = n_bits
if bit_per_entry is None:
self.bit_per_entry = 4
else:
self.bit_per_entry = bit_per_entry
self.counting = counting
self.__authors__ = ['Stephen Wu', 'TsumiNa']
def featurize(self, x):
if self.input_type == 'smiles':
x_ = x
x = Chem.MolFromSmiles(x)
if x is None:
raise ValueError('cannot convert Mol from SMILES %s' % x_)
if self.input_type == 'any':
if not isinstance(x, Chem.rdchem.Mol):
x_ = x
x = Chem.MolFromSmiles(x)
if x is None:
raise ValueError('cannot convert Mol from SMILES %s' % x_)
if self.counting:
return count_fp(rdMol.GetHashedAtomPairFingerprint(x, nBits=self.n_bits), dim=self.n_bits)
else:
return list(rdMol.GetHashedAtomPairFingerprintAsBitVect(x, nBits=self.n_bits,
nBitsPerEntry=self.bit_per_entry))
@property
def feature_labels(self):
if self.counting:
return ['apfp_c:' + str(i) for i in range(self.n_bits)]
else:
return ['apfp:' + str(i) for i in range(self.n_bits)]
class TopologicalTorsionFP(BaseFeaturizer):
def __init__(self, n_jobs=-1, *, n_bits=2048, bit_per_entry=None, counting=False,
input_type='mol', on_errors='raise', return_type='any', target_col=None):
"""
Topological Torsion fingerprints.
Returns the topological-torsion fingerprint for a molecule.
This is currently just in binary bits with fixed length after folding.
Parameters
----------
n_jobs: int
The number of jobs to run in parallel for both fit and predict.
Can be -1 or # of cups. Set -1 to use all cpu cores (default).
n_bits: int
Fixed bit length based on folding.
bit_per_entry: int
Number of bits used to represent a single entry (only for non-counting case).
Default value follows rdkit default.
counting: boolean
Record counts of the entries instead of bits only.
input_type: string
Set the specific type of transform input.
Set to ``mol`` (default) to ``rdkit.Chem.rdchem.Mol`` objects as input.
When set to ``smlies``, ``transform`` method can use a SMILES list as input.
Set to ``any`` to use both.
If input is SMILES, ``Chem.MolFromSmiles`` function will be used inside.
for ``None`` returns, a ``ValueError`` exception will be raised.
on_errors: string
How to handle exceptions in feature calculations. Can be 'nan', 'keep', 'raise'.
When 'nan', return a column with ``np.nan``.
The length of column corresponding to the number of feature labs.
When 'keep', return a column with exception objects.
The default is 'raise' which will raise up the exception.
target_col
Only relevant when input is pd.DataFrame, otherwise ignored.
Specify a single column to be used for transformation.
If ``None``, all columns of the pd.DataFrame is used.
Default is None.
"""
super().__init__(n_jobs=n_jobs, on_errors=on_errors, return_type=return_type, target_col=target_col)
self.input_type = input_type
self.n_bits = n_bits
if bit_per_entry is None:
self.bit_per_entry = 4
else:
self.bit_per_entry = bit_per_entry
self.counting = counting
self.__authors__ = ['Stephen Wu', 'TsumiNa']
def featurize(self, x):
if self.input_type == 'smiles':
x_ = x
x = Chem.MolFromSmiles(x)
if x is None:
raise ValueError('cannot convert Mol from SMILES %s' % x_)
if self.input_type == 'any':
if not isinstance(x, Chem.rdchem.Mol):
x_ = x
x = Chem.MolFromSmiles(x)
if x is None:
raise ValueError('cannot convert Mol from SMILES %s' % x_)
if self.counting:
return count_fp(rdMol.GetHashedTopologicalTorsionFingerprint(x, nBits=self.n_bits), dim=self.n_bits)
else:
return list(rdMol.GetHashedTopologicalTorsionFingerprintAsBitVect(x, nBits=self.n_bits,
nBitsPerEntry=self.bit_per_entry))
@property
def feature_labels(self):
if self.counting:
return ['ttfp_c:' + str(i) for i in range(self.n_bits)]
else:
return ['ttfp:' + str(i) for i in range(self.n_bits)]
class MACCS(BaseFeaturizer):
def __init__(self, n_jobs=-1,
*, input_type='mol', on_errors='raise', return_type='any', target_col=None):
"""
The MACCS keys for a molecule. The result is a 167-bit vector. There are 166 public keys,
but to maintain consistency with other software packages they are numbered from 1.
Parameters
----------
n_jobs: int
The number of jobs to run in parallel for both fit and predict.
Can be -1 or # of cups. Set -1 to use all cpu cores (default).
input_type: string
Set the specific type of transform input.
Set to ``mol`` (default) to ``rdkit.Chem.rdchem.Mol`` objects as input.
When set to ``smlies``, ``transform`` method can use a SMILES list as input.
Set to ``any`` to use both.
If input is SMILES, ``Chem.MolFromSmiles`` function will be used inside.
for ``None`` returns, a ``ValueError`` exception will be raised.
on_errors: string
How to handle exceptions in feature calculations. Can be 'nan', 'keep', 'raise'.
When 'nan', return a column with ``np.nan``.
The length of column corresponding to the number of feature labs.
When 'keep', return a column with exception objects.
The default is 'raise' which will raise up the exception.
target_col
Only relevant when input is pd.DataFrame, otherwise ignored.
Specify a single column to be used for transformation.
If ``None``, all columns of the pd.DataFrame is used.
Default is None.
"""
super().__init__(n_jobs=n_jobs, on_errors=on_errors, return_type=return_type, target_col=target_col)
self.input_type = input_type
self.__authors__ = ['Stephen Wu', 'TsumiNa']
def featurize(self, x):
if self.input_type == 'smiles':
x_ = x
x = Chem.MolFromSmiles(x)
if x is None:
raise ValueError('cannot convert Mol from SMILES %s' % x_)
if self.input_type == 'any':
if not isinstance(x, Chem.rdchem.Mol):
x_ = x
x = Chem.MolFromSmiles(x)
if x is None:
raise ValueError('cannot convert Mol from SMILES %s' % x_)
return list(MAC.GenMACCSKeys(x))
@property
def feature_labels(self):
return ['maccs:' + str(i) for i in range(167)]
class FCFP(BaseFeaturizer):
def __init__(self, n_jobs=-1, *, radius=3, n_bits=2048, counting=False,
input_type='mol', on_errors='raise', return_type='any', target_col=None):
"""
Morgan (Circular) fingerprints + feature-based (FCFP)
The algorithm used is described in the paper Rogers, D. & Hahn, M. Extended-Connectivity Fingerprints.
JCIM 50:742-54 (2010)
Parameters
----------
n_jobs: int
The number of jobs to run in parallel for both fit and predict.
Can be -1 or # of cups. Set -1 to use all cpu cores (default).
radius: int
The radius parameter in the Morgan fingerprints, which is roughly half of the diameter parameter in FCFP,
i.e., radius=2 is roughly equivalent to FCFP4.
n_bits: int
Fixed bit length based on folding.
counting: boolean
Record counts of the entries instead of bits only.
input_type: string
Set the specific type of transform input.
Set to ``mol`` (default) to ``rdkit.Chem.rdchem.Mol`` objects as input.
When set to ``smlies``, ``transform`` method can use a SMILES list as input.
Set to ``any`` to use both.
If input is SMILES, ``Chem.MolFromSmiles`` function will be used inside.
for ``None`` returns, a ``ValueError`` exception will be raised.
on_errors: string
How to handle exceptions in feature calculations. Can be 'nan', 'keep', 'raise'.
When 'nan', return a column with ``np.nan``.
The length of column corresponding to the number of feature labs.
When 'keep', return a column with exception objects.
The default is 'raise' which will raise up the exception.
target_col
Only relevant when input is pd.DataFrame, otherwise ignored.
Specify a single column to be used for transformation.
If ``None``, all columns of the pd.DataFrame is used.
Default is None.
"""
super().__init__(n_jobs=n_jobs, on_errors=on_errors, return_type=return_type, target_col=target_col)
self.input_type = input_type
self.radius = radius
self.n_bits = n_bits
self.counting = counting
self.__authors__ = ['Stephen Wu', 'TsumiNa']
# self.arg = arg # arg[0] = radius, arg[1] = bit length
def featurize(self, x):
if self.input_type == 'smiles':
x_ = x
x = Chem.MolFromSmiles(x)
if x is None:
raise ValueError('cannot convert Mol from SMILES %s' % x_)
if self.input_type == 'any':
if not isinstance(x, Chem.rdchem.Mol):
x_ = x
x = Chem.MolFromSmiles(x)
if x is None:
raise ValueError('cannot convert Mol from SMILES %s' % x_)
if self.counting:
return count_fp(rdMol.GetHashedMorganFingerprint(
x, radius=self.radius, nBits=self.n_bits, useFeatures=True), dim=self.n_bits)
else:
return list(rdMol.GetMorganFingerprintAsBitVect(
x, radius=self.radius, nBits=self.n_bits, useFeatures=True))
@property
def feature_labels(self):
if self.counting:
return [f'fcfp{self.radius * 2}_c:' + str(i) for i in range(self.n_bits)]
else:
return [f'fcfp{self.radius * 2}:' + str(i) for i in range(self.n_bits)]
class ECFP(BaseFeaturizer):
def __init__(self, n_jobs=-1, *, radius=3, n_bits=2048, counting=False,
input_type='mol', on_errors='raise', return_type='any', target_col=None):
"""
Morgan (Circular) fingerprints (ECFP)
The algorithm used is described in the paper Rogers, D. & Hahn, M. Extended-Connectivity Fingerprints.
JCIM 50:742-54 (2010)
Parameters
----------
n_jobs: int
The number of jobs to run in parallel for both fit and predict.
Can be -1 or # of cups. Set -1 to use all cpu cores (default).
radius: int
The radius parameter in the Morgan fingerprints, which is roughly half of the diameter parameter in ECFP,
i.e., radius=2 is roughly equivalent to ECFP4.
n_bits: int
Fixed bit length based on folding.
counting: boolean
Record counts of the entries instead of bits only.
input_type: string
Set the specific type of transform input.
Set to ``mol`` (default) to ``rdkit.Chem.rdchem.Mol`` objects as input.
When set to ``smlies``, ``transform`` method can use a SMILES list as input.
Set to ``any`` to use both.
If input is SMILES, ``Chem.MolFromSmiles`` function will be used inside.
for ``None`` returns, a ``ValueError`` exception will be raised.
on_errors: string
How to handle exceptions in feature calculations. Can be 'nan', 'keep', 'raise'.
When 'nan', return a column with ``np.nan``.
The length of column corresponding to the number of feature labs.
When 'keep', return a column with exception objects.
The default is 'raise' which will raise up the exception.
target_col
Only relevant when input is pd.DataFrame, otherwise ignored.
Specify a single column to be used for transformation.
If ``None``, all columns of the pd.DataFrame is used.
Default is None.
"""
super().__init__(n_jobs=n_jobs, on_errors=on_errors, return_type=return_type, target_col=target_col)
self.input_type = input_type
self.radius = radius
self.n_bits = n_bits
self.counting = counting
self.__authors__ = ['Stephen Wu', 'TsumiNa']
# self.arg = arg # arg[0] = radius, arg[1] = bit length
def featurize(self, x):
if self.input_type == 'smiles':
x_ = x
x = Chem.MolFromSmiles(x)
if x is None:
raise ValueError('cannot convert Mol from SMILES %s' % x_)
if self.input_type == 'any':
if not isinstance(x, Chem.rdchem.Mol):
x_ = x
x = Chem.MolFromSmiles(x)
if x is None:
raise ValueError('cannot convert Mol from SMILES %s' % x_)
if self.counting:
return count_fp(rdMol.GetHashedMorganFingerprint(x, radius=self.radius,
nBits=self.n_bits), dim=self.n_bits)
else:
return list(rdMol.GetMorganFingerprintAsBitVect(x, radius=self.radius, nBits=self.n_bits))
@property
def feature_labels(self):
if self.counting:
return [f'ecfp{self.radius * 2}_c:' + str(i) for i in range(self.n_bits)]
else:
return [f'ecfp{self.radius * 2}:' + str(i) for i in range(self.n_bits)]
class PatternFP(BaseFeaturizer):
def __init__(self, n_jobs=-1, *, n_bits=2048,
input_type='mol', on_errors='raise', return_type='any', target_col=None):
"""
A fingerprint designed to be used in substructure screening using SMARTS patterns (unique in RDKit).
Parameters
----------
n_jobs: int
The number of jobs to run in parallel for both fit and predict.
Can be -1 or # of cups. Set -1 to use all cpu cores (default).
n_bits: int
Fixed bit length based on folding.
input_type: string
Set the specific type of transform input.
Set to ``mol`` (default) to ``rdkit.Chem.rdchem.Mol`` objects as input.
When set to ``smlies``, ``transform`` method can use a SMILES list as input.
Set to ``any`` to use both.
If input is SMILES, ``Chem.MolFromSmiles`` function will be used inside.
for ``None`` returns, a ``ValueError`` exception will be raised.
on_errors: string
How to handle exceptions in feature calculations. Can be 'nan', 'keep', 'raise'.
When 'nan', return a column with ``np.nan``.
The length of column corresponding to the number of feature labs.
When 'keep', return a column with exception objects.
The default is 'raise' which will raise up the exception.
target_col
Only relevant when input is pd.DataFrame, otherwise ignored.
Specify a single column to be used for transformation.
If ``None``, all columns of the pd.DataFrame is used.
Default is None.
"""
super().__init__(n_jobs=n_jobs, on_errors=on_errors, return_type=return_type, target_col=target_col)
self.input_type = input_type
self.n_bits = n_bits
self.__authors__ = ['Stephen Wu', 'TsumiNa']
def featurize(self, x):
if self.input_type == 'smiles':
x_ = x
x = Chem.MolFromSmiles(x)
if x is None:
raise ValueError('cannot convert Mol from SMILES %s' % x_)
if self.input_type == 'any':
if not isinstance(x, Chem.rdchem.Mol):
x_ = x
x = Chem.MolFromSmiles(x)
if x is None:
raise ValueError('cannot convert Mol from SMILES %s' % x_)
return list(rdm.PatternFingerprint(x, fpSize=self.n_bits))
@property
def feature_labels(self):
return ['patfp:' + str(i) for i in range(self.n_bits)]
class LayeredFP(BaseFeaturizer):
def __init__(self, n_jobs=-1, *, n_bits=2048,
input_type='mol', on_errors='raise', return_type='any', target_col=None):
"""
A substructure fingerprint that is more complex than PatternFP (unique in RDKit).
Parameters
----------
n_jobs: int
The number of jobs to run in parallel for both fit and predict.
Can be -1 or # of cups. Set -1 to use all cpu cores (default).
n_bits: int
Fixed bit length based on folding.
input_type: string
Set the specific type of transform input.
Set to ``mol`` (default) to ``rdkit.Chem.rdchem.Mol`` objects as input.
When set to ``smlies``, ``transform`` method can use a SMILES list as input.
Set to ``any`` to use both.
If input is SMILES, ``Chem.MolFromSmiles`` function will be used inside.
for ``None`` returns, a ``ValueError`` exception will be raised.
on_errors: string
How to handle exceptions in feature calculations. Can be 'nan', 'keep', 'raise'.
When 'nan', return a column with ``np.nan``.
The length of column corresponding to the number of feature labs.
When 'keep', return a column with exception objects.
The default is 'raise' which will raise up the exception.
target_col
Only relevant when input is pd.DataFrame, otherwise ignored.
Specify a single column to be used for transformation.
If ``None``, all columns of the pd.DataFrame is used.
Default is None.
"""
super().__init__(n_jobs=n_jobs, on_errors=on_errors, return_type=return_type, target_col=target_col)
self.input_type = input_type
self.n_bits = n_bits
self.__authors__ = ['Stephen Wu', 'TsumiNa']
def featurize(self, x):
if self.input_type == 'smiles':
x_ = x
x = Chem.MolFromSmiles(x)
if x is None:
raise ValueError('cannot convert Mol from SMILES %s' % x_)
if self.input_type == 'any':
if not isinstance(x, Chem.rdchem.Mol):
x_ = x
x = Chem.MolFromSmiles(x)
if x is None:
raise ValueError('cannot convert Mol from SMILES %s' % x_)
return list(rdm.LayeredFingerprint(x, fpSize=self.n_bits))
@property
def feature_labels(self):
return ['layfp:' + str(i) for i in range(self.n_bits)]
class MHFP(BaseFeaturizer):
def __init__(self, n_jobs=1, *, radius=3, n_bits=2048,
input_type='mol', on_errors='raise', return_type='any', target_col=None):
"""
Variation from the MinHash fingerprint, which is based on ECFP with
locality sensitive hashing to increase compactness of information during hashing.
The algorithm used is described in the paper
Probst, D. & Reymond, J.-L., A probabilistic molecular fingerprint for big data settings.
Journal of Cheminformatics, 10:66 (2018)
Note that MHFP currently does not support parallel computing, so please fix n_jobs to 1.
Parameters
----------
n_jobs: int
The number of jobs to run in parallel for both fit and predict.
Can be -1 or # of cups. Set -1 to use all cpu cores (default).
radius: int
The radius parameter in the SECFP(RDKit version) fingerprints,
which is roughly half of the diameter parameter in ECFP,
i.e., radius=2 is roughly equivalent to ECFP4.
n_bits: int
Fixed bit length based on folding.
input_type: string
Set the specific type of transform input.
Set to ``mol`` (default) to ``rdkit.Chem.rdchem.Mol`` objects as input.
When set to ``smlies``, ``transform`` method can use a SMILES list as input.
Set to ``any`` to use both.
If input is SMILES, ``Chem.MolFromSmiles`` function will be used inside.
for ``None`` returns, a ``ValueError`` exception will be raised.
on_errors: string
How to handle exceptions in feature calculations. Can be 'nan', 'keep', 'raise'.
When 'nan', return a column with ``np.nan``.
The length of column corresponding to the number of feature labs.
When 'keep', return a column with exception objects.
The default is 'raise' which will raise up the exception.
target_col
Only relevant when input is pd.DataFrame, otherwise ignored.
Specify a single column to be used for transformation.
If ``None``, all columns of the pd.DataFrame is used.
Default is None.
"""
super().__init__(n_jobs=n_jobs, on_errors=on_errors, return_type=return_type, target_col=target_col)
self.input_type = input_type
self.radius = radius
self.n_bits = n_bits
self.mhfp = MHFPEncoder()
self.__authors__ = ['Stephen Wu', 'TsumiNa']
def featurize(self, x):
if self.input_type == 'smiles':
x_ = x
x = Chem.MolFromSmiles(x)
if x is None:
raise ValueError('cannot convert Mol from SMILES %s' % x_)
if self.input_type == 'any':
if not isinstance(x, Chem.rdchem.Mol):
x_ = x
x = Chem.MolFromSmiles(x)
if x is None:
raise ValueError('cannot convert Mol from SMILES %s' % x_)
return list(self.mhfp.EncodeSECFPMol(x, radius=self.radius, length=self.n_bits))
@property
def feature_labels(self):
return [f'secfp{self.radius * 2}:' + str(i) for i in range(self.n_bits)]
class DescriptorFeature(BaseFeaturizer):
classic = ['MaxEStateIndex', 'MinEStateIndex', 'MaxAbsEStateIndex', 'MinAbsEStateIndex', 'qed', 'MolWt',
'HeavyAtomMolWt', 'ExactMolWt', 'NumValenceElectrons', 'NumRadicalElectrons', 'MaxPartialCharge',
'MinPartialCharge', 'MaxAbsPartialCharge', 'MinAbsPartialCharge', 'FpDensityMorgan1', 'FpDensityMorgan2',
'FpDensityMorgan3', 'BalabanJ', 'BertzCT', 'Chi0', 'Chi0n', 'Chi0v', 'Chi1', 'Chi1n', 'Chi1v', 'Chi2n',
'Chi2v', 'Chi3n', 'Chi3v', 'Chi4n', 'Chi4v', 'HallKierAlpha', 'Ipc', 'Kappa1', 'Kappa2', 'Kappa3',
'LabuteASA', 'PEOE_VSA1', 'PEOE_VSA10', 'PEOE_VSA11', 'PEOE_VSA12', 'PEOE_VSA13', 'PEOE_VSA14',
'PEOE_VSA2', 'PEOE_VSA3', 'PEOE_VSA4', 'PEOE_VSA5', 'PEOE_VSA6', 'PEOE_VSA7', 'PEOE_VSA8', 'PEOE_VSA9',
'SMR_VSA1', 'SMR_VSA10', 'SMR_VSA2', 'SMR_VSA3', 'SMR_VSA4', 'SMR_VSA5', 'SMR_VSA6', 'SMR_VSA7',
'SMR_VSA8', 'SMR_VSA9', 'SlogP_VSA1', 'SlogP_VSA10', 'SlogP_VSA11', 'SlogP_VSA12', 'SlogP_VSA2',
'SlogP_VSA3', 'SlogP_VSA4', 'SlogP_VSA5', 'SlogP_VSA6', 'SlogP_VSA7', 'SlogP_VSA8', 'SlogP_VSA9', 'TPSA',
'EState_VSA1', 'EState_VSA10', 'EState_VSA11', 'EState_VSA2', 'EState_VSA3', 'EState_VSA4',
'EState_VSA5', 'EState_VSA6', 'EState_VSA7', 'EState_VSA8', 'EState_VSA9', 'VSA_EState1', 'VSA_EState10',
'VSA_EState2', 'VSA_EState3', 'VSA_EState4', 'VSA_EState5', 'VSA_EState6', 'VSA_EState7', 'VSA_EState8',
'VSA_EState9', 'FractionCSP3', 'HeavyAtomCount', 'NHOHCount', 'NOCount', 'NumAliphaticCarbocycles',
'NumAliphaticHeterocycles', 'NumAliphaticRings', 'NumAromaticCarbocycles', 'NumAromaticHeterocycles',
'NumAromaticRings', 'NumHAcceptors', 'NumHDonors', 'NumHeteroatoms', 'NumRotatableBonds',
'NumSaturatedCarbocycles', 'NumSaturatedHeterocycles', 'NumSaturatedRings', 'RingCount', 'MolLogP',
'MolMR', 'fr_Al_COO', 'fr_Al_OH', 'fr_Al_OH_noTert', 'fr_ArN', 'fr_Ar_COO', 'fr_Ar_N', 'fr_Ar_NH',
'fr_Ar_OH', 'fr_COO', 'fr_COO2', 'fr_C_O', 'fr_C_O_noCOO', 'fr_C_S', 'fr_HOCCN', 'fr_Imine', 'fr_NH0',
'fr_NH1', 'fr_NH2', 'fr_N_O', 'fr_Ndealkylation1', 'fr_Ndealkylation2', 'fr_Nhpyrrole', 'fr_SH',
'fr_aldehyde', 'fr_alkyl_carbamate', 'fr_alkyl_halide', 'fr_allylic_oxid', 'fr_amide', 'fr_amidine',
'fr_aniline', 'fr_aryl_methyl', 'fr_azide', 'fr_azo', 'fr_barbitur', 'fr_benzene', 'fr_benzodiazepine',
'fr_bicyclic', 'fr_diazo', 'fr_dihydropyridine', 'fr_epoxide', 'fr_ester', 'fr_ether', 'fr_furan',
'fr_guanido', 'fr_halogen', 'fr_hdrzine', 'fr_hdrzone', 'fr_imidazole', 'fr_imide', 'fr_isocyan',
'fr_isothiocyan', 'fr_ketone', 'fr_ketone_Topliss', 'fr_lactam', 'fr_lactone', 'fr_methoxy',
'fr_morpholine', 'fr_nitrile', 'fr_nitro', 'fr_nitro_arom', 'fr_nitro_arom_nonortho', 'fr_nitroso',
'fr_oxazole', 'fr_oxime', 'fr_para_hydroxylation', 'fr_phenol', 'fr_phenol_noOrthoHbond', 'fr_phos_acid',
'fr_phos_ester', 'fr_piperdine', 'fr_piperzine', 'fr_priamide', 'fr_prisulfonamd', 'fr_pyridine',
'fr_quatN', 'fr_sulfide', 'fr_sulfonamd', 'fr_sulfone', 'fr_term_acetylene', 'fr_tetrazole',
'fr_thiazole', 'fr_thiocyan', 'fr_thiophene', 'fr_unbrch_alkane', 'fr_urea']
def __init__(self, n_jobs=-1,
*, input_type='mol', on_errors='raise', return_type='any', target_col=None, desc_list='all'):
"""
All descriptors in RDKit (length = 200) [may include NaN]
see https://www.rdkit.org/docs/GettingStartedInPython.html#list-of-available-descriptors for the full list
Parameters
----------
n_jobs: int
The number of jobs to run in parallel for both fit and predict.
Can be -1 or # of cups. Set -1 to use all cpu cores (default).
input_type: string
Set the specific type of transform input.
Set to ``mol`` (default) to ``rdkit.Chem.rdchem.Mol`` objects as input.
When set to ``smlies``, ``transform`` method can use a SMILES list as input.
Set to ``any`` to use both.
If input is SMILES, ``Chem.MolFromSmiles`` function will be used inside.
for ``None`` returns, a ``ValueError`` exception will be raised.
on_errors: string
How to handle exceptions in feature calculations. Can be 'nan', 'keep', 'raise'.
When 'nan', return a column with ``np.nan``.
The length of column corresponding to the number of feature labs.
When 'keep', return a column with exception objects.
The default is 'raise' which will raise up the exception.
target_col
Only relevant when input is pd.DataFrame, otherwise ignored.
Specify a single column to be used for transformation.
If ``None``, all columns of the pd.DataFrame is used.
Default is None.
desc_list: string or list
List of descriptor names to be called in rdkit to calculate molecule descriptors.
If ``classic``, the full list of rdkit v.2020.03.xx is used. (length = 200)
Default is to use the latest list available in the rdkit. (length = 208 in rdkit v.2020.09.xx)
"""
# self.arg = arg # arg[0] = radius, arg[1] = bit length
super().__init__(n_jobs=n_jobs, on_errors=on_errors, return_type=return_type, target_col=target_col)
self.input_type = input_type
if desc_list == 'all':
self.nms = [x[0] for x in ChemDesc._descList]
elif desc_list == 'classic':
self.nms = self.classic
else:
self.nms = desc_list
self.calc = MoleculeDescriptors.MolecularDescriptorCalculator(self.nms)
self.__authors__ = ['Stephen Wu', 'TsumiNa']
def featurize(self, x):
if self.input_type == 'smiles':
x_ = x
x = Chem.MolFromSmiles(x)
if x is None:
raise ValueError('cannot convert Mol from SMILES %s' % x_)
if self.input_type == 'any':
if not isinstance(x, Chem.rdchem.Mol):
x_ = x
x = Chem.MolFromSmiles(x)
if x is None:
raise ValueError('cannot convert Mol from SMILES %s' % x_)
return self.calc.CalcDescriptors(x)
@property
def feature_labels(self):
return self.nms
class Fingerprints(BaseDescriptor):
"""
Calculate fingerprints or descriptors of organic molecules.
Note that MHFP currently does not support parallel computing, so n_jobs is fixed to 1.
"""
def __init__(self,
n_jobs=-1,
*,
radius=3,
n_bits=2048,
bit_per_entry=None,
counting=False,
input_type='mol',
featurizers='all',
on_errors='raise',
target_col=None):
"""
Parameters
----------
n_jobs: int
The number of jobs to run in parallel for both fit and predict.
Can be -1 or # of cpus. Set -1 to use all cpu cores (default).
radius: int
The radius parameter in the Morgan fingerprints,
which is roughly half of the diameter parameter in ECFP/FCFP,
i.e., radius=2 is roughly equivalent to ECFP4/FCFP4.
n_bits: int
Fixed bit length based on folding.
bit_per_entry: int
Number of bits used to represent a single entry (only for non-counting case)
in RDKitFP, AtomPairFP, and TopologicalTorsionFP.
Default value follows rdkit default.
counting: boolean
Record counts of the entries instead of bits only.
featurizers: list[str] or str or 'all'
Featurizer(s) that will be used.
Default is 'all'.
input_type: string
Set the specific type of transform input.
Set to ``mol`` (default) to ``rdkit.Chem.rdchem.Mol`` objects as input.
When set to ``smlies``, ``transform`` method can use a SMILES list as input.
Set to ``any`` to use both.
If input is SMILES, ``Chem.MolFromSmiles`` function will be used inside.
for ``None`` returns, a ``ValueError`` exception will be raised.
on_errors: string
How to handle exceptions in feature calculations. Can be 'nan', 'keep', 'raise'.
When 'nan', return a column with ``np.nan``.
The length of column corresponding to the number of feature labs.
When 'keep', return a column with exception objects.
The default is 'raise' which will raise up the exception.
target_col
Only relevant when input is pd.DataFrame, otherwise ignored.
Specify a single column to be used for transformation.
If ``None``, all columns of the pd.DataFrame is used.
Default is None.
"""
super().__init__(featurizers=featurizers)
self.mol = RDKitFP(n_jobs, n_bits=n_bits, bit_per_entry=bit_per_entry, counting=counting,
input_type=input_type, on_errors=on_errors, target_col=target_col)
self.mol = AtomPairFP(n_jobs, n_bits=n_bits, bit_per_entry=bit_per_entry, counting=counting,
input_type=input_type, on_errors=on_errors, target_col=target_col)
self.mol = TopologicalTorsionFP(n_jobs, n_bits=n_bits, input_type=input_type, bit_per_entry=bit_per_entry,
counting=counting, on_errors=on_errors, target_col=target_col)
self.mol = MACCS(n_jobs, input_type=input_type, on_errors=on_errors, target_col=target_col)
self.mol = ECFP(n_jobs, radius=radius, n_bits=n_bits, input_type=input_type, counting=counting,
on_errors=on_errors, target_col=target_col)
self.mol = FCFP(n_jobs, radius=radius, n_bits=n_bits, input_type=input_type, counting=counting,
on_errors=on_errors, target_col=target_col)
self.mol = PatternFP(n_jobs, n_bits=n_bits, input_type=input_type, on_errors=on_errors, target_col=target_col)
self.mol = LayeredFP(n_jobs, n_bits=n_bits, input_type=input_type, on_errors=on_errors, target_col=target_col)
# self.mol = SECFP(n_jobs, radius=radius, n_bits=n_bits, input_type=input_type, on_errors=on_errors)
self.mol = MHFP(1, radius=radius, n_bits=n_bits,
input_type=input_type, on_errors=on_errors, target_col=target_col)
self.mol = DescriptorFeature(n_jobs, input_type=input_type,
on_errors=on_errors, target_col=target_col)
| 49.08648
| 120
| 0.603226
| 5,237
| 40,300
| 4.488448
| 0.103685
| 0.032162
| 0.014549
| 0.015911
| 0.788522
| 0.786693
| 0.785374
| 0.780694
| 0.778184
| 0.773249
| 0
| 0.009723
| 0.300695
| 40,300
| 820
| 121
| 49.146341
| 0.824356
| 0.430893
| 0
| 0.662125
| 0
| 0
| 0.174675
| 0.01021
| 0
| 0
| 0
| 0
| 0
| 1
| 0.087193
| false
| 0
| 0.027248
| 0.013624
| 0.231608
| 0.040872
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8c1988b62202bf3ca7b515e6670d1769ceb51c1e
| 75
|
py
|
Python
|
persian_re/models/__init__.py
|
nimaafshar/persian_relation_extraction
|
6f6ce8678a7d1115977f5ad79bce1816c3d31e3e
|
[
"AFL-3.0"
] | null | null | null |
persian_re/models/__init__.py
|
nimaafshar/persian_relation_extraction
|
6f6ce8678a7d1115977f5ad79bce1816c3d31e3e
|
[
"AFL-3.0"
] | null | null | null |
persian_re/models/__init__.py
|
nimaafshar/persian_relation_extraction
|
6f6ce8678a7d1115977f5ad79bce1816c3d31e3e
|
[
"AFL-3.0"
] | null | null | null |
from .cls_model import CLSModel
from .entity_start import EntityStartModel
| 25
| 42
| 0.866667
| 10
| 75
| 6.3
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106667
| 75
| 2
| 43
| 37.5
| 0.940299
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8c32af06b8cec01bf19d37bd8f0884f9bff68a8d
| 878
|
py
|
Python
|
exp/nb_02.py
|
ftm624/fastai_nbs
|
e567edefbad666c06d929558cdb3a58d6e65f395
|
[
"Apache-2.0"
] | null | null | null |
exp/nb_02.py
|
ftm624/fastai_nbs
|
e567edefbad666c06d929558cdb3a58d6e65f395
|
[
"Apache-2.0"
] | null | null | null |
exp/nb_02.py
|
ftm624/fastai_nbs
|
e567edefbad666c06d929558cdb3a58d6e65f395
|
[
"Apache-2.0"
] | null | null | null |
#################################################
### THIS FILE WAS AUTOGENERATED! DO NOT EDIT! ###
#################################################
# file to edit: dev_nb/02_fully_connected.ipynb
from exp.nb_01 import *
import gzip
import pickle
import torch
import math
from fastai import datasets
from torch import tensor
from torch.nn import init
import torch.nn as nn
import matplotlib.pyplot as plt
def get_data():
path = datasets.download_data('http://deeplearning.net/data/mnist/mnist.pkl', ext='.gz')
with gzip.open(path, 'rb') as f:
((x_train, y_train), (x_valid, y_valid), _) = pickle.load(f, encoding='latin-1')
return map(tensor, (x_train,y_train,x_valid,y_valid))
def normalize(x,m,s): return (x-m)/s
def get_stats(a): return f"Mean: {a.mean()} STD: {a.std()}"
def test_near_zero(a, tol=1e-02): assert a.abs()<tol, f"Near zero: {a}"
| 27.4375
| 92
| 0.621868
| 137
| 878
| 3.854015
| 0.518248
| 0.041667
| 0.026515
| 0.045455
| 0.090909
| 0.090909
| 0.090909
| 0.090909
| 0
| 0
| 0
| 0.01054
| 0.135535
| 878
| 32
| 93
| 27.4375
| 0.685112
| 0.100228
| 0
| 0
| 1
| 0
| 0.149123
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 1
| 0.222222
| false
| 0
| 0.555556
| 0.111111
| 0.833333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
8c3522204142fdcdc60f437befeb64754fca881b
| 126
|
py
|
Python
|
Machine Learning/Projects/Magic/cards_magic.py
|
HackerLion123/Machine-Learning
|
71224ea97ba4aaded13a700e07b498469299964b
|
[
"MIT"
] | 1
|
2018-07-21T15:41:40.000Z
|
2018-07-21T15:41:40.000Z
|
Machine Learning/Projects/Magic/cards_magic.py
|
HackerLion123/Machine-Learning
|
71224ea97ba4aaded13a700e07b498469299964b
|
[
"MIT"
] | null | null | null |
Machine Learning/Projects/Magic/cards_magic.py
|
HackerLion123/Machine-Learning
|
71224ea97ba4aaded13a700e07b498469299964b
|
[
"MIT"
] | null | null | null |
from card_detector import Model
def decode(code):
return number
def main():
pass
if __name__ == '__main__':
main()
| 7.875
| 31
| 0.68254
| 17
| 126
| 4.529412
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.214286
| 126
| 16
| 32
| 7.875
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0.062992
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0.142857
| 0.142857
| 0.142857
| 0.571429
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 5
|
8c5081638b7b13f3ba5fe97660c0209303588da5
| 43
|
py
|
Python
|
Helloworld.py
|
oakkarmin7/Python-Class
|
4d1e3d90c49132579d7f2c3d4ee3e934e1f70dc3
|
[
"MIT"
] | null | null | null |
Helloworld.py
|
oakkarmin7/Python-Class
|
4d1e3d90c49132579d7f2c3d4ee3e934e1f70dc3
|
[
"MIT"
] | null | null | null |
Helloworld.py
|
oakkarmin7/Python-Class
|
4d1e3d90c49132579d7f2c3d4ee3e934e1f70dc3
|
[
"MIT"
] | null | null | null |
print ("Hello world")
print ("6 7 8 9 10")
| 14.333333
| 21
| 0.604651
| 9
| 43
| 2.888889
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176471
| 0.209302
| 43
| 2
| 22
| 21.5
| 0.588235
| 0
| 0
| 0
| 0
| 0
| 0.488372
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
8c64aa9bcdae1a01d85c3ed8d27076c6c8a525f8
| 102
|
py
|
Python
|
Randomnumbers/seed.py
|
Anilkumar95/python-75-hackathon
|
0cc9304e46ceace826090614b46d8048a068d106
|
[
"MIT"
] | null | null | null |
Randomnumbers/seed.py
|
Anilkumar95/python-75-hackathon
|
0cc9304e46ceace826090614b46d8048a068d106
|
[
"MIT"
] | null | null | null |
Randomnumbers/seed.py
|
Anilkumar95/python-75-hackathon
|
0cc9304e46ceace826090614b46d8048a068d106
|
[
"MIT"
] | 2
|
2019-01-27T16:59:48.000Z
|
2019-01-29T13:07:40.000Z
|
import random
(random.seed(700))
print("The maped random number with 700 is:")
print(random.random())
| 20.4
| 45
| 0.745098
| 16
| 102
| 4.75
| 0.625
| 0.315789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065934
| 0.107843
| 102
| 4
| 46
| 25.5
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0.352941
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.25
| 0
| 0.25
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
4fcdc25bd6839fbd2a5289e74f87175da854f0f0
| 84
|
py
|
Python
|
pygrn/__init__.py
|
nico1as/pyGRN
|
115d9d42dfbd374fc64393cabefb2a8e245aa6b7
|
[
"Apache-2.0"
] | 7
|
2018-07-18T16:08:51.000Z
|
2020-12-09T07:18:35.000Z
|
pygrn/__init__.py
|
nico1as/pyGRN
|
115d9d42dfbd374fc64393cabefb2a8e245aa6b7
|
[
"Apache-2.0"
] | 3
|
2018-04-13T11:44:59.000Z
|
2018-04-19T13:58:06.000Z
|
pygrn/__init__.py
|
nico1as/pyGRN
|
115d9d42dfbd374fc64393cabefb2a8e245aa6b7
|
[
"Apache-2.0"
] | 6
|
2018-07-22T01:54:14.000Z
|
2021-08-04T16:01:38.000Z
|
from .layer import GRNInit, GRNLayer, RecurrentGRNLayer
from .recurrent import RGRN
| 28
| 55
| 0.833333
| 10
| 84
| 7
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119048
| 84
| 2
| 56
| 42
| 0.945946
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8b089c03c7497973d44277a4205f2f7085dcf752
| 200
|
py
|
Python
|
atlas/atlas/templatetags/include_anything.py
|
briansok/Atlas
|
01320e6c1f2d41e41a93890de6ef6c92bfbbb7e6
|
[
"MIT"
] | 5
|
2018-06-04T08:12:50.000Z
|
2020-11-30T20:57:48.000Z
|
atlas/atlas/templatetags/include_anything.py
|
briansok/Atlas
|
01320e6c1f2d41e41a93890de6ef6c92bfbbb7e6
|
[
"MIT"
] | null | null | null |
atlas/atlas/templatetags/include_anything.py
|
briansok/Atlas
|
01320e6c1f2d41e41a93890de6ef6c92bfbbb7e6
|
[
"MIT"
] | null | null | null |
from django import template
from django.utils.html import mark_safe
register = template.Library()
@register.simple_tag()
def include_anything(file_name):
return mark_safe(open(file_name).read())
| 25
| 44
| 0.795
| 29
| 200
| 5.275862
| 0.689655
| 0.130719
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105
| 200
| 8
| 44
| 25
| 0.854749
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0.166667
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
8b0a656a8df5564b6556b76820e2f05b1466cc68
| 19
|
py
|
Python
|
Prediction/LSTM/src/LSTM_model/train.py
|
CovidDashboardProject/Src
|
405a454f4c3a1eb55e6f1fadecb9700732618e4d
|
[
"MIT"
] | null | null | null |
Prediction/LSTM/src/LSTM_model/train.py
|
CovidDashboardProject/Src
|
405a454f4c3a1eb55e6f1fadecb9700732618e4d
|
[
"MIT"
] | null | null | null |
Prediction/LSTM/src/LSTM_model/train.py
|
CovidDashboardProject/Src
|
405a454f4c3a1eb55e6f1fadecb9700732618e4d
|
[
"MIT"
] | null | null | null |
# adding something
| 9.5
| 18
| 0.789474
| 2
| 19
| 7.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 19
| 1
| 19
| 19
| 0.9375
| 0.842105
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8b0bd95273fa6611f20726a4fa3e1294a740550c
| 124
|
py
|
Python
|
parsercode/daedcode/process_log.py
|
DeadlyK1tten/arena_log_parser
|
d672df63fefd55bd92ad31bd472464073ceb6019
|
[
"Apache-2.0"
] | null | null | null |
parsercode/daedcode/process_log.py
|
DeadlyK1tten/arena_log_parser
|
d672df63fefd55bd92ad31bd472464073ceb6019
|
[
"Apache-2.0"
] | null | null | null |
parsercode/daedcode/process_log.py
|
DeadlyK1tten/arena_log_parser
|
d672df63fefd55bd92ad31bd472464073ceb6019
|
[
"Apache-2.0"
] | null | null | null |
"""
Script to process output_log.txt
"""
import utils
if __name__ == '__main__':
utils.ProcessFile('output_log.txt')
| 12.4
| 39
| 0.693548
| 16
| 124
| 4.75
| 0.75
| 0.236842
| 0.315789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16129
| 124
| 10
| 39
| 12.4
| 0.730769
| 0.258065
| 0
| 0
| 0
| 0
| 0.261905
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
8b39cde7f7a81b2e494ec4f46f9d8ec50a2456b7
| 59
|
py
|
Python
|
vimfiles/bundle/vim-python/submodules/pylint/tests/functional/o/old_division_floats.py
|
ciskoinch8/vimrc
|
5bf77a7e7bc70fac5173ab2e9ea05d7dda3e52b8
|
[
"MIT"
] | 463
|
2015-01-15T08:17:42.000Z
|
2022-03-28T15:10:20.000Z
|
vimfiles/bundle/vim-python/submodules/pylint/tests/functional/o/old_division_floats.py
|
ciskoinch8/vimrc
|
5bf77a7e7bc70fac5173ab2e9ea05d7dda3e52b8
|
[
"MIT"
] | 52
|
2015-01-06T02:43:59.000Z
|
2022-03-14T11:15:21.000Z
|
vimfiles/bundle/vim-python/submodules/pylint/tests/functional/o/old_division_floats.py
|
ciskoinch8/vimrc
|
5bf77a7e7bc70fac5173ab2e9ea05d7dda3e52b8
|
[
"MIT"
] | 249
|
2015-01-07T22:49:49.000Z
|
2022-03-18T02:32:06.000Z
|
from __future__ import print_function
print(float(1) / 2)
| 14.75
| 37
| 0.779661
| 9
| 59
| 4.555556
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.039216
| 0.135593
| 59
| 3
| 38
| 19.666667
| 0.764706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
8b473473b5bd6e4bdccb9508714b5e0a38dc1e00
| 240
|
py
|
Python
|
extract/__init__.py
|
sergiopm97/sokkai
|
151f509b96534c493c6bd304e504171a1c6fbee7
|
[
"MIT"
] | null | null | null |
extract/__init__.py
|
sergiopm97/sokkai
|
151f509b96534c493c6bd304e504171a1c6fbee7
|
[
"MIT"
] | 1
|
2022-03-26T10:44:44.000Z
|
2022-03-26T10:44:44.000Z
|
extract/__init__.py
|
sergiopm97/sokkai
|
151f509b96534c493c6bd304e504171a1c6fbee7
|
[
"MIT"
] | null | null | null |
from .extract_training_data import extract_training_data as extract_training_data
from .extract_played_games import extract_played_games as extract_played_games
from .extract_game_columns import extract_game_columns as extract_game_columns
| 60
| 81
| 0.9125
| 36
| 240
| 5.583333
| 0.277778
| 0.164179
| 0.283582
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075
| 240
| 3
| 82
| 80
| 0.905405
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8ce1275d1f939e2d193c325ccec05f1741b2f1ab
| 156
|
py
|
Python
|
tradingview_ta/__init__.py
|
fluxguardian/python-tradingview-ta
|
499d9d68e3e8548d6c7caedf16d22946ad005660
|
[
"MIT"
] | 294
|
2021-05-01T07:13:19.000Z
|
2022-03-29T06:28:43.000Z
|
tradingview_ta/__init__.py
|
sina-rostami/python-tradingview-ta
|
6d9e2656adba45149be3ac6ba71b823507ff186d
|
[
"MIT"
] | 48
|
2021-05-16T08:44:41.000Z
|
2022-03-06T08:53:58.000Z
|
tradingview_ta/__init__.py
|
sina-rostami/python-tradingview-ta
|
6d9e2656adba45149be3ac6ba71b823507ff186d
|
[
"MIT"
] | 76
|
2021-05-13T04:38:58.000Z
|
2022-03-24T07:59:54.000Z
|
from .main import TA_Handler, TradingView, Analysis, Interval, Exchange, get_multiple_analysis, __version__
from .technicals import Recommendation, Compute
| 52
| 107
| 0.846154
| 18
| 156
| 6.944444
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096154
| 156
| 2
| 108
| 78
| 0.886525
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8cf5aee02b3cadff38c15ddbb3154d37496edb21
| 121
|
py
|
Python
|
autokey/undo.py
|
jargv/dotfiles
|
3090609afbe500242cdd0d30ae4b900535f61207
|
[
"MIT"
] | 2
|
2016-09-25T23:18:36.000Z
|
2017-04-25T19:51:26.000Z
|
autokey/undo.py
|
jargv/dotfiles
|
3090609afbe500242cdd0d30ae4b900535f61207
|
[
"MIT"
] | null | null | null |
autokey/undo.py
|
jargv/dotfiles
|
3090609afbe500242cdd0d30ae4b900535f61207
|
[
"MIT"
] | null | null | null |
if window.get_active_title() == "Terminal":
keyboard.send_keys("<super>+z")
else:
keyboard.send_keys("<ctrl>+z")
| 24.2
| 43
| 0.669421
| 17
| 121
| 4.529412
| 0.764706
| 0.311688
| 0.415584
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123967
| 121
| 4
| 44
| 30.25
| 0.726415
| 0
| 0
| 0
| 0
| 0
| 0.206612
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
50682cad009b2a8066ab4aa3af08ff115727bd72
| 22
|
py
|
Python
|
examples/compilex-Demo1/temp/4s07mun.py
|
IPPMCMP07/compiler
|
1041c56dae9f7a13ba0657085532b4d074144e39
|
[
"MIT"
] | null | null | null |
examples/compilex-Demo1/temp/4s07mun.py
|
IPPMCMP07/compiler
|
1041c56dae9f7a13ba0657085532b4d074144e39
|
[
"MIT"
] | null | null | null |
examples/compilex-Demo1/temp/4s07mun.py
|
IPPMCMP07/compiler
|
1041c56dae9f7a13ba0657085532b4d074144e39
|
[
"MIT"
] | null | null | null |
print("dragon ball z")
| 22
| 22
| 0.727273
| 4
| 22
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 22
| 1
| 22
| 22
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0.565217
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
5090dbd3adf2cb4aea2b1fd1bc7b68318272e38d
| 30
|
py
|
Python
|
hello.py
|
Kavi16-02/PythonK
|
d365d7a24966e782e910347374dfd61d003da30f
|
[
"MIT"
] | null | null | null |
hello.py
|
Kavi16-02/PythonK
|
d365d7a24966e782e910347374dfd61d003da30f
|
[
"MIT"
] | null | null | null |
hello.py
|
Kavi16-02/PythonK
|
d365d7a24966e782e910347374dfd61d003da30f
|
[
"MIT"
] | null | null | null |
print"Hello world"
#added ssh
| 10
| 18
| 0.766667
| 5
| 30
| 4.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 30
| 2
| 19
| 15
| 0.884615
| 0.3
| 0
| 0
| 0
| 0
| 0.55
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
50a1cedab6c4ccd456ce587492a4805e4f353bfe
| 146
|
py
|
Python
|
Fundamentals/Exer.007.py
|
thiagokanagushiku/Python-Exercises
|
e536ff3c64911d3f25d4b1441c4ef070faab1764
|
[
"MIT"
] | null | null | null |
Fundamentals/Exer.007.py
|
thiagokanagushiku/Python-Exercises
|
e536ff3c64911d3f25d4b1441c4ef070faab1764
|
[
"MIT"
] | null | null | null |
Fundamentals/Exer.007.py
|
thiagokanagushiku/Python-Exercises
|
e536ff3c64911d3f25d4b1441c4ef070faab1764
|
[
"MIT"
] | null | null | null |
n1 = float(input('Primeira nota do aluno:'))
n2 = float(input('Segunda nota do aluno:'))
print(f'A média entre {n1} e {n2} é {(n1 + n2)/2:.1f}')
| 29.2
| 55
| 0.623288
| 27
| 146
| 3.37037
| 0.666667
| 0.21978
| 0.241758
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065041
| 0.157534
| 146
| 4
| 56
| 36.5
| 0.674797
| 0
| 0
| 0
| 0
| 0
| 0.62069
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
50b6a1dd3781f9ace69469027807627cc69c9831
| 165
|
py
|
Python
|
server/src/tests/samples/private2.py
|
higoshi/pyright
|
183c0ef56d2c010d28018149949cda1a40aa59b8
|
[
"MIT"
] | 1
|
2019-09-18T03:19:50.000Z
|
2019-09-18T03:19:50.000Z
|
server/src/tests/samples/private2.py
|
higoshi/pyright
|
183c0ef56d2c010d28018149949cda1a40aa59b8
|
[
"MIT"
] | null | null | null |
server/src/tests/samples/private2.py
|
higoshi/pyright
|
183c0ef56d2c010d28018149949cda1a40aa59b8
|
[
"MIT"
] | null | null | null |
# This sample tests the "reportPrivateUsage" feature.
class _TestClass(object):
pass
class TestClass(object):
def __init__(self):
self._priv1 = 1
| 16.5
| 53
| 0.690909
| 19
| 165
| 5.684211
| 0.789474
| 0.259259
| 0.37037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015504
| 0.218182
| 165
| 9
| 54
| 18.333333
| 0.821705
| 0.309091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0.2
| 0
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
50c9a3709850efd062575309c8be3233cb44ef36
| 23,857
|
py
|
Python
|
skimage/filter/rank/generic.py
|
tonysyu/scikit-image
|
d5776656a8217e58cb28d5760439a54e96d15316
|
[
"BSD-3-Clause"
] | null | null | null |
skimage/filter/rank/generic.py
|
tonysyu/scikit-image
|
d5776656a8217e58cb28d5760439a54e96d15316
|
[
"BSD-3-Clause"
] | null | null | null |
skimage/filter/rank/generic.py
|
tonysyu/scikit-image
|
d5776656a8217e58cb28d5760439a54e96d15316
|
[
"BSD-3-Clause"
] | null | null | null |
"""The local histogram is computed using a sliding window similar to the method
described in [1]_.
Input image can be 8-bit or 16-bit, for 16-bit input images, the number of
histogram bins is determined from the maximum value present in the image.
Result image is 8-/16-bit or double with respect to the input image and the
rank filter operation.
References
----------
.. [1] Huang, T. ,Yang, G. ; Tang, G.. "A fast two-dimensional
median filtering algorithm", IEEE Transactions on Acoustics, Speech and
Signal Processing, Feb 1979. Volume: 27 , Issue: 1, Page(s): 13 - 18.
"""
import warnings
import numpy as np
from skimage import img_as_ubyte
from . import generic_cy
__all__ = ['autolevel', 'bottomhat', 'equalize', 'gradient', 'maximum', 'mean',
'subtract_mean', 'median', 'minimum', 'modal', 'enhance_contrast',
'pop', 'threshold', 'tophat', 'noise_filter', 'entropy', 'otsu']
def _handle_input(image, selem, out, mask, out_dtype=None):
if image.dtype not in (np.uint8, np.uint16):
image = img_as_ubyte(image)
selem = np.ascontiguousarray(img_as_ubyte(selem > 0))
image = np.ascontiguousarray(image)
if mask is None:
mask = np.ones(image.shape, dtype=np.uint8)
else:
mask = img_as_ubyte(mask)
mask = np.ascontiguousarray(mask)
if out is None:
if out_dtype is None:
out_dtype = image.dtype
out = np.empty_like(image, dtype=out_dtype)
if image is out:
raise NotImplementedError("Cannot perform rank operation in place.")
is_8bit = image.dtype in (np.uint8, np.int8)
if is_8bit:
max_bin = 255
else:
max_bin = max(4, image.max())
bitdepth = int(np.log2(max_bin))
if bitdepth > 10:
warnings.warn("Bitdepth of %d may result in bad rank filter "
"performance due to large number of bins." % bitdepth)
return image, selem, out, mask, max_bin
def _apply(func, image, selem, out, mask, shift_x, shift_y, out_dtype=None):
image, selem, out, mask, max_bin = _handle_input(image, selem, out, mask,
out_dtype)
func(image, selem, shift_x=shift_x, shift_y=shift_y, mask=mask,
out=out, max_bin=max_bin)
return out
def autolevel(image, selem, out=None, mask=None, shift_x=False, shift_y=False):
"""Autolevel image using local histogram.
Parameters
----------
image : ndarray (uint8, uint16)
Image array.
selem : ndarray
The neighborhood expressed as a 2-D array of 1's and 0's.
out : ndarray (same dtype as input)
If None, a new array will be allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
Returns
-------
out : ndarray (same dtype as input image)
Output image.
Examples
--------
>>> from skimage import data
>>> from skimage.morphology import disk
>>> from skimage.filter.rank import autolevel
>>> # Load test image
>>> ima = data.camera()
>>> # Stretch image contrast locally
>>> auto = autolevel(ima, disk(20))
"""
return _apply(generic_cy._autolevel, image, selem,
out=out, mask=mask, shift_x=shift_x, shift_y=shift_y)
def bottomhat(image, selem, out=None, mask=None, shift_x=False, shift_y=False):
"""Returns greyscale local bottomhat of an image.
Parameters
----------
image : ndarray (uint8, uint16)
Image array.
selem : ndarray
The neighborhood expressed as a 2-D array of 1's and 0's.
out : ndarray (same dtype as input)
If None, a new array will be allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
Returns
-------
bottomhat : ndarray (same dtype as input image)
The result of the local bottomhat.
"""
return _apply(generic_cy._bottomhat, image, selem,
out=out, mask=mask, shift_x=shift_x, shift_y=shift_y)
def equalize(image, selem, out=None, mask=None, shift_x=False, shift_y=False):
"""Equalize image using local histogram.
Parameters
----------
image : ndarray (uint8, uint16)
Image array.
selem : ndarray
The neighborhood expressed as a 2-D array of 1's and 0's.
out : ndarray (same dtype as input)
If None, a new array will be allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
Returns
-------
out : ndarray (same dtype as input image)
Output image.
Examples
--------
>>> from skimage import data
>>> from skimage.morphology import disk
>>> from skimage.filter.rank import equalize
>>> # Load test image
>>> ima = data.camera()
>>> # Local equalization
>>> equ = equalize(ima, disk(20))
"""
return _apply(generic_cy._equalize, image, selem,
out=out, mask=mask, shift_x=shift_x, shift_y=shift_y)
def gradient(image, selem, out=None, mask=None, shift_x=False, shift_y=False):
"""Return greyscale local gradient of an image (i.e. local maximum - local
minimum).
Parameters
----------
image : ndarray (uint8, uint16)
Image array.
selem : ndarray
The neighborhood expressed as a 2-D array of 1's and 0's.
out : ndarray (same dtype as input)
If None, a new array will be allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
Returns
-------
out : ndarray (same dtype as input image)
Output image.
"""
return _apply(generic_cy._gradient, image, selem,
out=out, mask=mask, shift_x=shift_x, shift_y=shift_y)
def maximum(image, selem, out=None, mask=None, shift_x=False, shift_y=False):
"""Return greyscale local maximum of an image.
Parameters
----------
image : ndarray (uint8, uint16)
Image array.
selem : ndarray
The neighborhood expressed as a 2-D array of 1's and 0's.
out : ndarray (same dtype as input)
If None, a new array will be allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
Returns
-------
out : ndarray (same dtype as input image)
Output image.
See also
--------
skimage.morphology.dilation
Note
----
* the lower algorithm complexity makes the rank.maximum() more efficient
for larger images and structuring elements
"""
return _apply(generic_cy._maximum, image, selem,
out=out, mask=mask, shift_x=shift_x, shift_y=shift_y)
def mean(image, selem, out=None, mask=None, shift_x=False, shift_y=False):
"""Return greyscale local mean of an image.
Parameters
----------
image : ndarray (uint8, uint16)
Image array.
selem : ndarray
The neighborhood expressed as a 2-D array of 1's and 0's.
out : ndarray (same dtype as input)
If None, a new array will be allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
Returns
-------
out : ndarray (same dtype as input image)
Output image.
Examples
--------
>>> from skimage import data
>>> from skimage.morphology import disk
>>> from skimage.filter.rank import mean
>>> # Load test image
>>> ima = data.camera()
>>> # Local mean
>>> avg = mean(ima, disk(20))
"""
return _apply(generic_cy._mean, image, selem, out=out,
mask=mask, shift_x=shift_x, shift_y=shift_y)
def subtract_mean(image, selem, out=None, mask=None, shift_x=False,
shift_y=False):
"""Return image subtracted from its local mean.
Parameters
----------
image : ndarray (uint8, uint16)
Image array.
selem : ndarray
The neighborhood expressed as a 2-D array of 1's and 0's.
out : ndarray (same dtype as input)
If None, a new array will be allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
Returns
-------
out : ndarray (same dtype as input image)
Output image.
"""
return _apply(generic_cy._subtract_mean, image, selem,
out=out, mask=mask, shift_x=shift_x, shift_y=shift_y)
def median(image, selem, out=None, mask=None, shift_x=False, shift_y=False):
"""Return greyscale local median of an image.
Parameters
----------
image : ndarray (uint8, uint16)
Image array.
selem : ndarray
The neighborhood expressed as a 2-D array of 1's and 0's.
out : ndarray (same dtype as input)
If None, a new array will be allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
Returns
-------
out : ndarray (same dtype as input image)
Output image.
Examples
--------
>>> from skimage import data
>>> from skimage.morphology import disk
>>> from skimage.filter.rank import median
>>> # Load test image
>>> ima = data.camera()
>>> # Local mean
>>> avg = median(ima, disk(20))
"""
return _apply(generic_cy._median, image, selem,
out=out, mask=mask, shift_x=shift_x, shift_y=shift_y)
def minimum(image, selem, out=None, mask=None, shift_x=False, shift_y=False):
"""Return greyscale local minimum of an image.
Parameters
----------
image : ndarray (uint8, uint16)
Image array.
selem : ndarray
The neighborhood expressed as a 2-D array of 1's and 0's.
out : ndarray (same dtype as input)
If None, a new array will be allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
Returns
-------
out : ndarray (same dtype as input image)
Output image.
See also
--------
skimage.morphology.erosion
Note
----
* the lower algorithm complexity makes the rank.minimum() more efficient
for larger images and structuring elements
"""
return _apply(generic_cy._minimum, image, selem,
out=out, mask=mask, shift_x=shift_x, shift_y=shift_y)
def modal(image, selem, out=None, mask=None, shift_x=False, shift_y=False):
"""Return greyscale local mode of an image.
Parameters
----------
image : ndarray (uint8, uint16)
Image array.
selem : ndarray
The neighborhood expressed as a 2-D array of 1's and 0's.
out : ndarray (same dtype as input)
If None, a new array will be allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
Returns
-------
out : ndarray (same dtype as input image)
Output image.
"""
return _apply(generic_cy._modal, image, selem,
out=out, mask=mask, shift_x=shift_x, shift_y=shift_y)
def enhance_contrast(image, selem, out=None, mask=None, shift_x=False,
shift_y=False):
"""Enhance an image replacing each pixel by the local maximum if pixel
greylevel is closest to maximimum than local minimum OR local minimum
otherwise.
Parameters
----------
image : ndarray (uint8, uint16)
Image array.
selem : ndarray
The neighborhood expressed as a 2-D array of 1's and 0's.
out : ndarray (same dtype as input)
If None, a new array will be allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
Returns
Output image.
out : ndarray (same dtype as input image)
The result of the local enhance_contrast.
Examples
--------
>>> from skimage import data
>>> from skimage.morphology import disk
>>> from skimage.filter.rank import enhance_contrast
>>> # Load test image
>>> ima = data.camera()
>>> # Local mean
>>> avg = enhance_contrast(ima, disk(20))
"""
return _apply(generic_cy._enhance_contrast, image, selem,
out=out, mask=mask, shift_x=shift_x, shift_y=shift_y)
def pop(image, selem, out=None, mask=None, shift_x=False, shift_y=False):
"""Return the number (population) of pixels actually inside the
neighborhood.
Parameters
----------
image : ndarray (uint8, uint16)
Image array.
selem : ndarray
The neighborhood expressed as a 2-D array of 1's and 0's.
out : ndarray (same dtype as input)
If None, a new array will be allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
Returns
-------
out : ndarray (same dtype as input image)
Output image.
Examples
--------
>>> # Local mean
>>> from skimage.morphology import square
>>> import skimage.filter.rank as rank
>>> ima = 255 * np.array([[0, 0, 0, 0, 0],
... [0, 1, 1, 1, 0],
... [0, 1, 1, 1, 0],
... [0, 1, 1, 1, 0],
... [0, 0, 0, 0, 0]], dtype=np.uint8)
>>> rank.pop(ima, square(3))
array([[4, 6, 6, 6, 4],
[6, 9, 9, 9, 6],
[6, 9, 9, 9, 6],
[6, 9, 9, 9, 6],
[4, 6, 6, 6, 4]], dtype=uint8)
"""
return _apply(generic_cy._pop, image, selem, out=out,
mask=mask, shift_x=shift_x, shift_y=shift_y)
def threshold(image, selem, out=None, mask=None, shift_x=False, shift_y=False):
"""Return greyscale local threshold of an image.
Parameters
----------
image : ndarray (uint8, uint16)
Image array.
selem : ndarray
The neighborhood expressed as a 2-D array of 1's and 0's.
out : ndarray (same dtype as input)
If None, a new array will be allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
Returns
-------
out : ndarray (same dtype as input image)
Output image.
Examples
--------
>>> # Local threshold
>>> from skimage.morphology import square
>>> from skimage.filter.rank import threshold
>>> ima = 255 * np.array([[0, 0, 0, 0, 0],
... [0, 1, 1, 1, 0],
... [0, 1, 1, 1, 0],
... [0, 1, 1, 1, 0],
... [0, 0, 0, 0, 0]], dtype=np.uint8)
>>> threshold(ima, square(3))
array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 0, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]], dtype=uint8)
"""
return _apply(generic_cy._threshold, image, selem,
out=out, mask=mask, shift_x=shift_x, shift_y=shift_y)
def tophat(image, selem, out=None, mask=None, shift_x=False, shift_y=False):
"""Return greyscale local tophat of an image.
Parameters
----------
image : ndarray (uint8, uint16)
Image array.
selem : ndarray
The neighborhood expressed as a 2-D array of 1's and 0's.
out : ndarray (same dtype as input)
If None, a new array will be allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
Returns
-------
out : ndarray (same dtype as input image)
Output image.
"""
return _apply(generic_cy._tophat, image, selem,
out=out, mask=mask, shift_x=shift_x, shift_y=shift_y)
def noise_filter(image, selem, out=None, mask=None, shift_x=False,
shift_y=False):
"""Returns the noise feature as described in [Hashimoto12]_
Parameters
----------
image : ndarray (uint8, uint16)
Image array.
selem : ndarray
The neighborhood expressed as a 2-D array of 1's and 0's.
out : ndarray (same dtype as input)
If None, a new array will be allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
References
----------
.. [Hashimoto12] N. Hashimoto et al. Referenceless image quality evaluation
for whole slide imaging. J Pathol Inform 2012;3:9.
Returns
-------
out : ndarray (same dtype as input image)
Output image.
"""
# ensure that the central pixel in the structuring element is empty
centre_r = int(selem.shape[0] / 2) + shift_y
centre_c = int(selem.shape[1] / 2) + shift_x
# make a local copy
selem_cpy = selem.copy()
selem_cpy[centre_r, centre_c] = 0
return _apply(generic_cy._noise_filter, image, selem_cpy, out=out,
mask=mask, shift_x=shift_x, shift_y=shift_y)
def entropy(image, selem, out=None, mask=None, shift_x=False, shift_y=False):
"""Returns the entropy [1]_ computed locally. Entropy is computed
using base 2 logarithm i.e. the filter returns the minimum number of
bits needed to encode local greylevel distribution.
Parameters
----------
image : ndarray (uint8, uint16)
Image array.
selem : ndarray
The neighborhood expressed as a 2-D array of 1's and 0's.
out : ndarray (same dtype as input)
If None, a new array will be allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
Returns
-------
out : ndarray (double)
Output image.
References
----------
.. [1] http://en.wikipedia.org/wiki/Entropy_(information_theory)>
Examples
--------
>>> # Local entropy
>>> from skimage import data
>>> from skimage.filter.rank import entropy
>>> from skimage.morphology import disk
>>> a8 = data.camera()
>>> ent8 = entropy(a8, disk(5))
"""
return _apply(generic_cy._entropy, image, selem,
out=out, mask=mask, shift_x=shift_x, shift_y=shift_y,
out_dtype=np.double)
def otsu(image, selem, out=None, mask=None, shift_x=False, shift_y=False):
"""Returns the Otsu's threshold value for each pixel.
Parameters
----------
image : ndarray
Image array (uint8 array).
selem : ndarray
The neighborhood expressed as a 2-D array of 1's and 0's.
out : ndarray
If None, a new array will be allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
Returns
-------
out : ndarray (same dtype as input image)
Output image.
References
----------
.. [otsu] http://en.wikipedia.org/wiki/Otsu's_method
Examples
--------
>>> # Local entropy
>>> from skimage import data
>>> from skimage.filter.rank import otsu
>>> from skimage.morphology import disk
>>> # defining a 8-bit test images
>>> a8 = data.camera()
>>> loc_otsu = otsu(a8, disk(5))
>>> thresh_image = a8 >= loc_otsu
"""
return _apply(generic_cy._otsu, image, selem, out=out,
mask=mask, shift_x=shift_x, shift_y=shift_y)
| 32.195682
| 79
| 0.621243
| 3,270
| 23,857
| 4.453517
| 0.081346
| 0.029664
| 0.040788
| 0.029664
| 0.797226
| 0.779716
| 0.770995
| 0.758704
| 0.746206
| 0.738378
| 0
| 0.017633
| 0.279708
| 23,857
| 740
| 80
| 32.239189
| 0.829842
| 0.682022
| 0
| 0.214286
| 0
| 0
| 0.045909
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.193878
| false
| 0
| 0.040816
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
50dbe9cfce2eee920d7a9fe19cf77bea85cf3ed4
| 3,257
|
py
|
Python
|
tests/migrations/0001_initial.py
|
wechange-eg/django-osm-field
|
81dd297034716b110441da38cb72ba26d3a51896
|
[
"MIT"
] | null | null | null |
tests/migrations/0001_initial.py
|
wechange-eg/django-osm-field
|
81dd297034716b110441da38cb72ba26d3a51896
|
[
"MIT"
] | null | null | null |
tests/migrations/0001_initial.py
|
wechange-eg/django-osm-field
|
81dd297034716b110441da38cb72ba26d3a51896
|
[
"MIT"
] | 1
|
2018-11-19T13:50:37.000Z
|
2018-11-19T13:50:37.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import osm_field.validators
import osm_field.fields
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='CustomNamingModel',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),
('location', osm_field.fields.OSMField(lat_field='latitude', lon_field='longitude')),
('latitude', osm_field.fields.LatitudeField(validators=[osm_field.validators.validate_latitude])),
('longitude', osm_field.fields.LongitudeField(validators=[osm_field.validators.validate_longitude])),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='DefaultNamingModel',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),
('location', osm_field.fields.OSMField(lat_field='location_lat', lon_field='location_lon')),
('location_lat', osm_field.fields.LatitudeField(validators=[osm_field.validators.validate_latitude])),
('location_lon', osm_field.fields.LongitudeField(validators=[osm_field.validators.validate_longitude])),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MixedNamingModel',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),
('location', osm_field.fields.OSMField(lat_field='location_lat', lon_field='longitude')),
('location_lat', osm_field.fields.LatitudeField(validators=[osm_field.validators.validate_latitude])),
('longitude', osm_field.fields.LongitudeField(validators=[osm_field.validators.validate_longitude])),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MultipleNamingModel',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)),
('default_location', osm_field.fields.OSMField(lat_field='default_location_lat', lon_field='default_location_lon')),
('default_location_lat', osm_field.fields.LatitudeField(validators=[osm_field.validators.validate_latitude])),
('default_location_lon', osm_field.fields.LongitudeField(validators=[osm_field.validators.validate_longitude])),
('custom_location', osm_field.fields.OSMField(lat_field='custom_latitude', lon_field='custom_longitude')),
('custom_latitude', osm_field.fields.LatitudeField(validators=[osm_field.validators.validate_latitude])),
('custom_longitude', osm_field.fields.LongitudeField(validators=[osm_field.validators.validate_longitude])),
],
options={
},
bases=(models.Model,),
),
]
| 48.61194
| 132
| 0.628492
| 305
| 3,257
| 6.436066
| 0.163934
| 0.110036
| 0.114111
| 0.142639
| 0.77891
| 0.77891
| 0.77891
| 0.740194
| 0.740194
| 0.740194
| 0
| 0.000406
| 0.243169
| 3,257
| 66
| 133
| 49.348485
| 0.795943
| 0.006448
| 0
| 0.533333
| 0
| 0
| 0.12585
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.066667
| 0
| 0.116667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
50e37c3555f815169aaf779e6c86b1808a63972f
| 253
|
py
|
Python
|
expose/__init__.py
|
jtmendel/expose
|
19e643ebd4a849cea42aaf38178a99f855f6997c
|
[
"MIT"
] | null | null | null |
expose/__init__.py
|
jtmendel/expose
|
19e643ebd4a849cea42aaf38178a99f855f6997c
|
[
"MIT"
] | null | null | null |
expose/__init__.py
|
jtmendel/expose
|
19e643ebd4a849cea42aaf38178a99f855f6997c
|
[
"MIT"
] | null | null | null |
try:
from ._version import __version__
except(ImportError):
pass
from . import instruments
from . import sources
from . import sky
from . import utils
from . import telescopes
__all__ = ['instruments', 'sources', 'sky', 'utils', 'telescopes']
| 19.461538
| 66
| 0.719368
| 29
| 253
| 5.965517
| 0.448276
| 0.289017
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 253
| 12
| 67
| 21.083333
| 0.827751
| 0
| 0
| 0
| 0
| 0
| 0.142292
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.1
| 0.7
| 0
| 0.7
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
50f5ed5c178614bb187ae90240953104aac06d90
| 147
|
py
|
Python
|
readhadoop.py
|
kradanfi/pdmwebdashboard
|
42e0d101bb1d29a0026eaf5c54b93d11d437cc7a
|
[
"MIT"
] | null | null | null |
readhadoop.py
|
kradanfi/pdmwebdashboard
|
42e0d101bb1d29a0026eaf5c54b93d11d437cc7a
|
[
"MIT"
] | null | null | null |
readhadoop.py
|
kradanfi/pdmwebdashboard
|
42e0d101bb1d29a0026eaf5c54b93d11d437cc7a
|
[
"MIT"
] | null | null | null |
import pydoop.hdfs as hdfs
import os
with open("/tmp/tmp.txt") as f:
print f.read()
hdfs.rmr("test/hello.txt")
os.remove("/tmp/tmp.txt")
| 16.333333
| 31
| 0.653061
| 27
| 147
| 3.555556
| 0.592593
| 0.125
| 0.1875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.163265
| 147
| 8
| 32
| 18.375
| 0.780488
| 0
| 0
| 0
| 0
| 0
| 0.258503
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.333333
| null | null | 0.166667
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
0f9b4f95439953944ff5e51fc5bac9005d06686f
| 2,817
|
py
|
Python
|
PythonDAdata/3358OS_02_Code/3358OS_02_Code/code2/stacking.py
|
shijiale0609/Python_Data_Analysis
|
c18b5ed006c171bbb6fcb6be5f51b2686edc8f7e
|
[
"MIT"
] | 1
|
2020-02-22T18:55:54.000Z
|
2020-02-22T18:55:54.000Z
|
PythonDAdata/3358OS_02_Code/3358OS_02_Code/code2/stacking.py
|
shijiale0609/Python_Data_Analysis
|
c18b5ed006c171bbb6fcb6be5f51b2686edc8f7e
|
[
"MIT"
] | null | null | null |
PythonDAdata/3358OS_02_Code/3358OS_02_Code/code2/stacking.py
|
shijiale0609/Python_Data_Analysis
|
c18b5ed006c171bbb6fcb6be5f51b2686edc8f7e
|
[
"MIT"
] | 1
|
2020-02-22T18:55:57.000Z
|
2020-02-22T18:55:57.000Z
|
import numpy as np
# Demonstrates array stacking.
#
# Run from the commandline with
#
# python stacking.py
print "In: a = arange(9).reshape(3,3)"
a = np.arange(9).reshape(3,3)
print "In: a"
print a
#Out:
#array([[0, 1, 2],
# [3, 4, 5],
# [6, 7, 8]])
print "In: b = 2 * a"
b = 2 * a
print "In: b"
print b
#Out:
#array([[ 0, 2, 4],
# [ 6, 8, 10],
# [12, 14, 16]])
print "In: hstack((a, b))"
print np.hstack((a, b))
#Out:
#array([[ 0, 1, 2, 0, 2, 4],
# [ 3, 4, 5, 6, 8, 10],
# [ 6, 7, 8, 12, 14, 16]])
print "In: concatenate((a, b), axis=1)"
print np.concatenate((a, b), axis=1)
#Out:
#array([[ 0, 1, 2, 0, 2, 4],
# [ 3, 4, 5, 6, 8, 10],
# [ 6, 7, 8, 12, 14, 16]])
print "In: vstack((a, b))"
print np.vstack((a, b))
#Out:
#array([[ 0, 1, 2],
# [ 3, 4, 5],
# [ 6, 7, 8],
# [ 0, 2, 4],
# [ 6, 8, 10],
# [12, 14, 16]])
print "In: concatenate((a, b), axis=0)"
print np.concatenate((a, b), axis=0)
#Out:
#array([[ 0, 1, 2],
# [ 3, 4, 5],
# [ 6, 7, 8],
# [ 0, 2, 4],
# [ 6, 8, 10],
# [12, 14, 16]])
print "In: dstack((a, b))"
print np.dstack((a, b))
#Out:
#array([[[ 0, 0],
# [ 1, 2],
# [ 2, 4]],
#
# [[ 3, 6],
# [ 4, 8],
# [ 5, 10]],
#
# [[ 6, 12],
# [ 7, 14],
# [ 8, 16]]])
print "In: oned = arange(2)"
oned = np.arange(2)
print "In: oned"
print oned
#Out: array([0, 1])
print "In: twice_oned = 2 * oned"
twice_oned = 2 * oned
print "In: twice_oned"
print twice_oned
#Out: array([0, 2])
print "In: column_stack((oned, twice_oned))"
print np.column_stack((oned, twice_oned))
#Out:
#array([[0, 0],
# [1, 2]])
print "In: column_stack((a, b))"
print np.column_stack((a, b))
#Out:
#array([[ 0, 1, 2, 0, 2, 4],
# [ 3, 4, 5, 6, 8, 10],
# [ 6, 7, 8, 12, 14, 16]])
print "In: column_stack((a, b)) == hstack((a, b))"
print np.column_stack((a, b)) == np.hstack((a, b))
#Out:
#array([[ True, True, True, True, True, True],
# [ True, True, True, True, True, True],
# [ True, True, True, True, True, True]], dtype=bool)
print "In: row_stack((oned, twice_oned))"
print np.row_stack((oned, twice_oned))
#Out:
#array([[0, 1],
# [0, 2]])
print "In: row_stack((a, b))"
print np.row_stack((a, b))
#Out:
#array([[ 0, 1, 2],
# [ 3, 4, 5],
# [ 6, 7, 8],
# [ 0, 2, 4],
# [ 6, 8, 10],
# [12, 14, 16]])
print "In: row_stack((a,b)) == vstack((a, b))"
print np.row_stack((a,b)) == np.vstack((a, b))
#Out:
#array([[ True, True, True],
# [ True, True, True],
# [ True, True, True],
# [ True, True, True],
# [ True, True, True],
# [ True, True, True]], dtype=bool)
| 20.562044
| 64
| 0.448349
| 453
| 2,817
| 2.743929
| 0.10596
| 0.218825
| 0.30893
| 0.386163
| 0.765084
| 0.656476
| 0.494771
| 0.451327
| 0.388576
| 0.361223
| 0
| 0.104082
| 0.304224
| 2,817
| 136
| 65
| 20.713235
| 0.530102
| 0.536741
| 0
| 0
| 0
| 0
| 0.350734
| 0.017945
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.025641
| null | null | 0.871795
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
0f9ce69370b8b00c0f063e3b39e7984f7d31be13
| 153
|
py
|
Python
|
wopmars/data/example/model/DatedPiece.py
|
aitgon/WopMars
|
9d500954a7501bdf51e74da85f56b5dad86ea9ee
|
[
"MIT"
] | null | null | null |
wopmars/data/example/model/DatedPiece.py
|
aitgon/WopMars
|
9d500954a7501bdf51e74da85f56b5dad86ea9ee
|
[
"MIT"
] | null | null | null |
wopmars/data/example/model/DatedPiece.py
|
aitgon/WopMars
|
9d500954a7501bdf51e74da85f56b5dad86ea9ee
|
[
"MIT"
] | 2
|
2017-09-28T14:36:14.000Z
|
2021-08-19T23:06:49.000Z
|
from sqlalchemy.sql.sqltypes import Date
from sqlalchemy import Column
from model.Piece import Piece
class DatedPiece(Piece):
date = Column(Date)
| 17
| 40
| 0.784314
| 21
| 153
| 5.714286
| 0.52381
| 0.233333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156863
| 153
| 8
| 41
| 19.125
| 0.930233
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.6
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0fe48655a151832457f7ad98006f89f36989295c
| 145
|
py
|
Python
|
files/startup/generic_startup_hook.py
|
MissionCriticalCloud/systemvm-packer
|
c18f188032142f747a98a484235de5719ede4c89
|
[
"Apache-2.0"
] | 8
|
2016-04-03T19:58:50.000Z
|
2020-07-21T10:55:05.000Z
|
files/startup/generic_startup_hook.py
|
remibergsma/systemvm-packer
|
c18f188032142f747a98a484235de5719ede4c89
|
[
"Apache-2.0"
] | 13
|
2016-04-10T19:11:01.000Z
|
2018-03-16T08:37:42.000Z
|
files/startup/generic_startup_hook.py
|
remibergsma/systemvm-packer
|
c18f188032142f747a98a484235de5719ede4c89
|
[
"Apache-2.0"
] | 4
|
2016-10-06T00:12:43.000Z
|
2018-01-31T12:32:41.000Z
|
#!/usr/bin/python3.6
import sys
print("Patching the systemvm probably went wrong, please check journalctl -u cosmic-patch-scripts")
sys.exit(1)
| 24.166667
| 99
| 0.772414
| 23
| 145
| 4.869565
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023256
| 0.110345
| 145
| 5
| 100
| 29
| 0.844961
| 0.131034
| 0
| 0
| 0
| 0
| 0.72
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
0fe5ed2dbefcef6e1efc2642963f9a7a34250bd3
| 99
|
py
|
Python
|
run.py
|
miguelgrinberg/campy
|
c7275e2620bd54127ce04a7fa0a09d447638b7a6
|
[
"MIT"
] | 3
|
2015-02-06T13:41:40.000Z
|
2019-09-25T12:21:32.000Z
|
run.py
|
miguelgrinberg/campy
|
c7275e2620bd54127ce04a7fa0a09d447638b7a6
|
[
"MIT"
] | 2
|
2019-01-13T19:47:05.000Z
|
2019-02-14T08:59:40.000Z
|
run.py
|
miguelgrinberg/campy
|
c7275e2620bd54127ce04a7fa0a09d447638b7a6
|
[
"MIT"
] | 2
|
2017-03-18T23:39:22.000Z
|
2018-07-08T20:42:31.000Z
|
#!/usr/bin/env python
import sys
from app import run_app
if __name__ == '__main__':
run_app()
| 14.142857
| 26
| 0.69697
| 16
| 99
| 3.6875
| 0.75
| 0.20339
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 99
| 6
| 27
| 16.5
| 0.728395
| 0.20202
| 0
| 0
| 0
| 0
| 0.102564
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
e8451f673d1a8600971cbec16f4854a1aee30108
| 71
|
py
|
Python
|
test/cmd/case_standard_fail.py
|
mkniewallner/flake8-alphabetize
|
11b1f0ae3da7fdd442b87cd0ebb831ef21ffaad8
|
[
"Unlicense"
] | 8
|
2021-04-10T11:53:52.000Z
|
2022-03-13T18:54:57.000Z
|
test/cmd/case_standard_fail.py
|
mkniewallner/flake8-alphabetize
|
11b1f0ae3da7fdd442b87cd0ebb831ef21ffaad8
|
[
"Unlicense"
] | 5
|
2021-04-20T18:49:39.000Z
|
2022-01-06T18:24:01.000Z
|
test/cmd/case_standard_fail.py
|
mkniewallner/flake8-alphabetize
|
11b1f0ae3da7fdd442b87cd0ebb831ef21ffaad8
|
[
"Unlicense"
] | 3
|
2021-06-17T13:02:50.000Z
|
2022-01-01T08:59:45.000Z
|
from datetime import time, date
print(time(9, 39), date(2021, 4, 11))
| 17.75
| 37
| 0.690141
| 13
| 71
| 3.769231
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 0.15493
| 71
| 3
| 38
| 23.666667
| 0.65
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
e85c0b952a4c96eec6e3ff97f8ddd478c75458c6
| 11,157
|
py
|
Python
|
model/UNet.py
|
THU-CVlab/JMedSeg
|
1c9c66a1b2c6e4c5e3f70ca9e1ed54447b944755
|
[
"MIT"
] | 26
|
2021-08-19T05:22:44.000Z
|
2022-03-08T05:44:43.000Z
|
model/UNet.py
|
Jittor/JMedSeg
|
1c9c66a1b2c6e4c5e3f70ca9e1ed54447b944755
|
[
"MIT"
] | null | null | null |
model/UNet.py
|
Jittor/JMedSeg
|
1c9c66a1b2c6e4c5e3f70ca9e1ed54447b944755
|
[
"MIT"
] | 3
|
2021-08-19T06:12:49.000Z
|
2021-08-19T11:41:16.000Z
|
# import jittor as jt
# from jittor import init
# from jittor import nn
# def double_conv(in_channels, out_channels):
# return nn.Sequential(
# nn.Conv(in_channels, out_channels, 3, padding=1),
# nn.ReLU(),
# nn.Conv(out_channels, out_channels, 3, padding=1),
# nn.ReLU()
# )
# class UNet(nn.Module):
# def __init__(self, n_channels, n_classes):
# super().__init__()
# self.dconv_down1 = double_conv(n_channels, 64)
# self.dconv_down2 = double_conv(64, 128)
# self.dconv_down3 = double_conv(128, 256)
# self.dconv_down4 = double_conv(256, 512)
# self.maxpool = nn.Pool(2, op='maximum')
# self.upsample = nn.Upsample(scale_factor=2, mode='bilinear')
# self.dconv_up3 = double_conv((256 + 512), 256)
# self.dconv_up2 = double_conv((128 + 256), 128)
# self.dconv_up1 = double_conv((128 + 64), 64)
# self.conv_last = nn.Conv(64, n_classes, 1)
# def execute(self, x):
# conv1 = self.dconv_down1(x)
# x = self.maxpool(conv1)
# conv2 = self.dconv_down2(x)
# x = self.maxpool(conv2)
# conv3 = self.dconv_down3(x)
# x = self.maxpool(conv3)
# x = self.dconv_down4(x)
# x = self.upsample(x)
# x = jt.contrib.concat([x, conv3], dim=1)
# x = self.dconv_up3(x)
# x = self.upsample(x)
# x = jt.contrib.concat([x, conv2], dim=1)
# x = self.dconv_up2(x)
# x = self.upsample(x)
# x = jt.contrib.concat([x, conv1], dim=1)
# x = self.dconv_up1(x)
# out = self.conv_last(x)
# return out
import jittor as jt
from jittor import init
from jittor import nn
class DoubleConv(nn.Module):
def __init__(self, in_channels, out_channels, mid_channels=None):
super().__init__()
if (not mid_channels):
mid_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv(in_channels, mid_channels, 3, padding=1),
nn.BatchNorm(mid_channels),
nn.ReLU(),
nn.Conv(mid_channels, out_channels, 3, padding=1),
nn.BatchNorm(out_channels),
nn.ReLU()
)
def execute(self, x):
return self.double_conv(x)
class Down(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.Pool(kernel_size=2, stride=2, op='maximum'),
DoubleConv(in_channels, out_channels)
)
def execute(self, x):
return self.maxpool_conv(x)
class Up(nn.Module):
def __init__(self, in_channels, out_channels, bilinear=True):
super().__init__()
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear')
self.conv = DoubleConv(in_channels, out_channels, (in_channels // 2))
else:
self.up = nn.ConvTranspose(in_channels, (in_channels // 2), 2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def execute(self, x1, x2):
x1 = self.up(x1)
x = jt.contrib.concat([x2, x1], dim=1)
return self.conv(x)
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv(in_channels, out_channels, 1)
def execute(self, x):
return self.conv(x)
class UNet(nn.Module):
def __init__(self, n_channels = 3, n_classes = 2, bilinear=True):
super(UNet, self).__init__()
self.n_channels = n_channels
self.n_classes = n_classes
self.bilinear = bilinear
self.inc = DoubleConv(n_channels, 64)
self.down1 = Down(64, 128)
self.down2 = Down(128, 256)
self.down3 = Down(256, 512)
factor = (2 if bilinear else 1)
self.down4 = Down(512, (1024 // factor))
self.up1 = Up(1024, (512 // factor), bilinear)
self.up2 = Up(512, (256 // factor), bilinear)
self.up3 = Up(256, (128 // factor), bilinear)
self.up4 = Up(128, 64, bilinear)
self.outc = OutConv(64, n_classes)
def execute(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
logits = self.outc(x)
return logits
def get_loss(self, target, pred, ignore_index=None):
loss_pred = nn.cross_entropy_loss(pred, target, ignore_index=ignore_index)
return loss_pred
def update_params(self, loss, optimizer):
optimizer.zero_grad()
loss.backward()
optimizer.step()
def main():
model = UNet()
x = jt.ones([2, 3, 512, 512])
y = model(x)
print (y.shape)
_ = y.data
# Find total parameters and trainable parameters
total_params = sum(p.numel() for p in model.parameters())
print(f'{total_params:,} total parameters.')
total_trainable_params = sum(
p.numel() for p in model.parameters() if p.requires_grad)
print(f'{total_trainable_params:,} training parameters.')
'''
UNet
17,276,290 total parameters.
17,267,458 training parameters.
'''
from jittorsummary import summary
summary(model, input_size=(3, 512, 512))
if __name__ == '__main__':
main()
# ========================================= 使用pytorch进行转换 ========================================= #
# from jittor.utils.pytorch_converter import convert
# pytorch_code="""
# import torch.nn as nn
# import torch.nn.functional as F
# class DoubleConv(nn.Module):
# # (convolution => [BN] => ReLU) * 2 #
# def __init__(self, in_channels, out_channels, mid_channels=None):
# super().__init__()
# if not mid_channels:
# mid_channels = out_channels
# self.double_conv = nn.Sequential(
# nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1),
# nn.BatchNorm2d(mid_channels),
# nn.ReLU(inplace=True),
# nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1),
# nn.BatchNorm2d(out_channels),
# nn.ReLU(inplace=True)
# )
# def forward(self, x):
# return self.double_conv(x)
# class Down(nn.Module):
# # Downscaling with maxpool then double conv #
# def __init__(self, in_channels, out_channels):
# super().__init__()
# self.maxpool_conv = nn.Sequential(
# nn.MaxPool2d(2),
# DoubleConv(in_channels, out_channels)
# )
# def forward(self, x):
# return self.maxpool_conv(x)
# class Up(nn.Module):
# # Upscaling then double conv #
# def __init__(self, in_channels, out_channels, bilinear=True):
# super().__init__()
# # if bilinear, use the normal convolutions to reduce the number of channels
# if bilinear:
# self.up = nn.Upsample(scale_factor=2, mode='bilinear')
# self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
# else:
# self.up = nn.ConvTranspose2d(in_channels , in_channels // 2, kernel_size=2, stride=2)
# self.conv = DoubleConv(in_channels, out_channels)
# def forward(self, x1, x2):
# x1 = self.up(x1)
# x = torch.cat([x2, x1], dim=1)
# return self.conv(x)
# class OutConv(nn.Module):
# def __init__(self, in_channels, out_channels):
# super(OutConv, self).__init__()
# self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
# def forward(self, x):
# return self.conv(x)
# class UNet(nn.Module):
# def __init__(self, n_channels, n_classes, bilinear=True):
# super(UNet, self).__init__()
# self.n_channels = n_channels
# self.n_classes = n_classes
# self.bilinear = bilinear
# self.inc = DoubleConv(n_channels, 64)
# self.down1 = Down(64, 128)
# self.down2 = Down(128, 256)
# self.down3 = Down(256, 512)
# factor = 2 if bilinear else 1
# # Note that the parameters are different for binlinear upsampling layer and
# # non-binlinear upsampling layer, and deconvolution with more channels to
# # restore information
# self.down4 = Down(512, 1024 // factor)
# self.up1 = Up(1024, 512 // factor, bilinear)
# self.up2 = Up(512, 256 // factor, bilinear)
# self.up3 = Up(256, 128 // factor, bilinear)
# self.up4 = Up(128, 64, bilinear)
# self.outc = OutConv(64, n_classes)
# def forward(self, x):
# x1 = self.inc(x)
# x2 = self.down1(x1)
# x3 = self.down2(x2)
# x4 = self.down3(x3)
# x5 = self.down4(x4)
# # print('x5',x5.shape)
# x = self.up1(x5, x4)
# # print('x',x.shape)
# x = self.up2(x, x3)
# # print('x',x.shape)
# x = self.up3(x, x2)
# # print('x',x.shape)
# x = self.up4(x, x1)
# # print('x',x.shape)
# logits = self.outc(x)
# return logits
# """
# jittor_code = convert(pytorch_code)
# print(jittor_code)
# from jittor.utils.pytorch_converter import convert
# pytorch_code="""
# import torch
# import torch.nn as nn
# def double_conv(in_channels, out_channels):
# return nn.Sequential(
# nn.Conv2d(in_channels, out_channels, 3, padding=1),
# nn.ReLU(inplace=True),
# nn.Conv2d(out_channels, out_channels, 3, padding=1),
# nn.ReLU(inplace=True)
# )
# class UNet(nn.Module):
# def __init__(self, n_class):
# super().__init__()
# self.dconv_down1 = double_conv(3, 64)
# self.dconv_down2 = double_conv(64, 128)
# self.dconv_down3 = double_conv(128, 256)
# self.dconv_down4 = double_conv(256, 512)
# self.maxpool = nn.MaxPool2d(2)
# self.upsample = nn.Upsample(scale_factor=2, mode='bilinear')
# self.dconv_up3 = double_conv(256 + 512, 256)
# self.dconv_up2 = double_conv(128 + 256, 128)
# self.dconv_up1 = double_conv(128 + 64, 64)
# self.conv_last = nn.Conv2d(64, n_class, 1)
# def forward(self, x):
# conv1 = self.dconv_down1(x)
# x = self.maxpool(conv1)
# conv2 = self.dconv_down2(x)
# x = self.maxpool(conv2)
# conv3 = self.dconv_down3(x)
# x = self.maxpool(conv3)
# x = self.dconv_down4(x)
# x = self.upsample(x)
# x = torch.cat([x, conv3], dim=1)
# x = self.dconv_up3(x)
# x = self.upsample(x)
# x = torch.cat([x, conv2], dim=1)
# x = self.dconv_up2(x)
# x = self.upsample(x)
# x = torch.cat([x, conv1], dim=1)
# x = self.dconv_up1(x)
# out = self.conv_last(x)
# return out
# """
# jittor_code = convert(pytorch_code)
# print(jittor_code)
| 31.968481
| 101
| 0.567805
| 1,437
| 11,157
| 4.206681
| 0.115518
| 0.054591
| 0.08172
| 0.069479
| 0.819851
| 0.779487
| 0.728701
| 0.712821
| 0.658395
| 0.628288
| 0
| 0.052085
| 0.292731
| 11,157
| 349
| 102
| 31.968481
| 0.713978
| 0.616474
| 0
| 0.113402
| 0
| 0
| 0.026046
| 0.006511
| 0
| 0
| 0
| 0
| 0
| 1
| 0.134021
| false
| 0
| 0.041237
| 0.030928
| 0.28866
| 0.030928
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
e86016c339b9b8d038fa6d2cc33bc66a30ae569d
| 72
|
py
|
Python
|
src/__init__.py
|
ps185367/test-pypi
|
0fa2a1f37889f4cc6700836ba0566d084bb1ef9a
|
[
"Apache-2.0"
] | null | null | null |
src/__init__.py
|
ps185367/test-pypi
|
0fa2a1f37889f4cc6700836ba0566d084bb1ef9a
|
[
"Apache-2.0"
] | 11
|
2021-09-14T13:20:04.000Z
|
2021-11-09T14:32:06.000Z
|
src/__init__.py
|
ps185367/test-pypi
|
0fa2a1f37889f4cc6700836ba0566d084bb1ef9a
|
[
"Apache-2.0"
] | 1
|
2021-09-13T22:22:42.000Z
|
2021-09-13T22:22:42.000Z
|
#!/usr/bin/env python3
from .key import HmacKey
from .sign import sign
| 14.4
| 24
| 0.75
| 12
| 72
| 4.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016393
| 0.152778
| 72
| 4
| 25
| 18
| 0.868852
| 0.291667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
2cd21d398e099cd68ac7ad2398a52dc87ab386c5
| 60
|
py
|
Python
|
Dataset/__init__.py
|
MCC-WH/Token
|
eadc301f2df9e1851633be1b63c273659af0da49
|
[
"MIT"
] | 30
|
2021-12-12T03:34:01.000Z
|
2022-03-05T23:42:00.000Z
|
Dataset/__init__.py
|
MCC-WH/Token
|
eadc301f2df9e1851633be1b63c273659af0da49
|
[
"MIT"
] | 2
|
2021-12-29T14:55:05.000Z
|
2022-01-23T06:31:07.000Z
|
Dataset/__init__.py
|
MCC-WH/Token
|
eadc301f2df9e1851633be1b63c273659af0da49
|
[
"MIT"
] | 2
|
2021-12-15T06:51:59.000Z
|
2022-01-08T06:06:55.000Z
|
from .configdataset import *
from .ImageFromList import *
| 20
| 29
| 0.766667
| 6
| 60
| 7.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 60
| 2
| 30
| 30
| 0.92
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
2cfdf272232af604734e5d1b8dff367cfbb6e12a
| 211
|
py
|
Python
|
hyperformer/data/__init__.py
|
acsets/hyperformer_for_mmt
|
883a825f77b76a4bff292660392e8e37755c5ed6
|
[
"Apache-2.0"
] | 65
|
2021-06-09T08:55:29.000Z
|
2022-03-31T10:46:43.000Z
|
hyperformer/data/__init__.py
|
acsets/hyperformer_for_mmt
|
883a825f77b76a4bff292660392e8e37755c5ed6
|
[
"Apache-2.0"
] | 1
|
2021-08-02T11:28:13.000Z
|
2021-08-24T11:54:26.000Z
|
hyperformer/data/__init__.py
|
acsets/hyperformer_for_mmt
|
883a825f77b76a4bff292660392e8e37755c5ed6
|
[
"Apache-2.0"
] | 7
|
2021-08-02T09:40:46.000Z
|
2022-03-31T11:27:03.000Z
|
from .multitask_sampler import MultiTaskBatchSampler
from .postprocessors import string_to_float, get_post_processor
from .tasks import TASK_MAPPING, AutoTask
from .utils import compute_task_max_decoding_length
| 42.2
| 63
| 0.886256
| 28
| 211
| 6.321429
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085308
| 211
| 4
| 64
| 52.75
| 0.917098
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
fa072dae0c613e7922dee3d1cd7f18d5c27bbd82
| 225
|
py
|
Python
|
docs/source/examples/FB2.0/post_array_connections_connection_key.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 14
|
2018-12-07T18:30:27.000Z
|
2022-02-22T09:12:33.000Z
|
docs/source/examples/FB2.0/post_array_connections_connection_key.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 28
|
2019-09-17T21:03:52.000Z
|
2022-03-29T22:07:35.000Z
|
docs/source/examples/FB2.0/post_array_connections_connection_key.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 15
|
2020-06-11T15:50:08.000Z
|
2022-03-21T09:27:25.000Z
|
# post to the array-connections/connection-key endpoint to get a connection key
res = client.post_array_connections_connection_key()
print(res)
if type(res) == pypureclient.responses.ValidResponse:
print(list(res.items))
| 37.5
| 79
| 0.795556
| 32
| 225
| 5.46875
| 0.625
| 0.222857
| 0.297143
| 0.331429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106667
| 225
| 5
| 80
| 45
| 0.870647
| 0.342222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
d71de44ab55b7adb622714120d198bc449b6f72c
| 90
|
py
|
Python
|
park/admin.py
|
Davepar/streetends
|
0e73b98ef6a7d9ae2be30f9c8f84d8829e277677
|
[
"MIT"
] | null | null | null |
park/admin.py
|
Davepar/streetends
|
0e73b98ef6a7d9ae2be30f9c8f84d8829e277677
|
[
"MIT"
] | null | null | null |
park/admin.py
|
Davepar/streetends
|
0e73b98ef6a7d9ae2be30f9c8f84d8829e277677
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from park.models import Park
admin.site.register(Park)
| 15
| 32
| 0.811111
| 14
| 90
| 5.214286
| 0.642857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122222
| 90
| 5
| 33
| 18
| 0.924051
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d75dd7394bf56ddb52d4fcad52108e4fd16eb58b
| 58
|
py
|
Python
|
hello.py
|
Anilkumar95/python-75-hackathon
|
0cc9304e46ceace826090614b46d8048a068d106
|
[
"MIT"
] | null | null | null |
hello.py
|
Anilkumar95/python-75-hackathon
|
0cc9304e46ceace826090614b46d8048a068d106
|
[
"MIT"
] | null | null | null |
hello.py
|
Anilkumar95/python-75-hackathon
|
0cc9304e46ceace826090614b46d8048a068d106
|
[
"MIT"
] | 2
|
2019-01-27T16:59:48.000Z
|
2019-01-29T13:07:40.000Z
|
#print("Hello world")
print("This line will be printed.")
| 19.333333
| 35
| 0.706897
| 9
| 58
| 4.555556
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12069
| 58
| 2
| 36
| 29
| 0.803922
| 0.344828
| 0
| 0
| 0
| 0
| 0.702703
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
ad2d00503fdf423c5577c029a7427696d58ec247
| 23
|
py
|
Python
|
bfdpie/__init__.py
|
malisal/bfdpie
|
7527a0e8bb8889dbbc85f758c5f2d48c4952dcdf
|
[
"MIT"
] | 2
|
2016-04-18T17:20:15.000Z
|
2018-05-12T18:14:51.000Z
|
bfdpie/__init__.py
|
malisal/bfdpie
|
7527a0e8bb8889dbbc85f758c5f2d48c4952dcdf
|
[
"MIT"
] | 4
|
2018-07-27T18:06:41.000Z
|
2019-06-18T20:02:02.000Z
|
bfdpie/__init__.py
|
malisal/bfdpie
|
7527a0e8bb8889dbbc85f758c5f2d48c4952dcdf
|
[
"MIT"
] | 2
|
2018-03-07T08:48:59.000Z
|
2018-07-27T18:58:36.000Z
|
from .bfdpie import *
| 7.666667
| 21
| 0.695652
| 3
| 23
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.217391
| 23
| 2
| 22
| 11.5
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
ad4a2f42e4540605a6c57444b30726dff8e239aa
| 15,547
|
py
|
Python
|
gwrappy/compute/compute.py
|
hairizuanbinnoorazman/gwrappy
|
aae569eb87d0aeac6126ccceac8a208b8dfdcf51
|
[
"Apache-2.0"
] | 5
|
2016-09-21T10:27:05.000Z
|
2017-03-13T11:37:16.000Z
|
gwrappy/compute/compute.py
|
hairizuanbinnoorazman/gwrappy
|
aae569eb87d0aeac6126ccceac8a208b8dfdcf51
|
[
"Apache-2.0"
] | 1
|
2021-11-15T17:46:52.000Z
|
2021-11-15T17:46:52.000Z
|
gwrappy/compute/compute.py
|
hairizuanbinnoorazman/gwrappy
|
aae569eb87d0aeac6126ccceac8a208b8dfdcf51
|
[
"Apache-2.0"
] | 2
|
2016-09-21T10:34:59.000Z
|
2017-04-05T10:38:10.000Z
|
from gwrappy.service import get_service
from gwrappy.utils import iterate_list
from itertools import chain
from time import sleep
class ComputeEngineUtility:
def __init__(self, project_id, **kwargs):
"""
Initializes object for interacting with Compute Engine API.
| By default, Application Default Credentials are used.
| If gcloud SDK isn't installed, credential files have to be specified using the kwargs *json_credentials_path* and *client_id*.
:param project_id: Project ID linked to Compute Engine.
:keyword max_retries: Argument specified with each API call to natively handle retryable errors.
:type max_retries: integer
:keyword client_secret_path: File path for client secret JSON file. Only required if credentials are invalid or unavailable.
:keyword json_credentials_path: File path for automatically generated credentials.
:keyword client_id: Credentials are stored as a key-value pair per client_id to facilitate multiple clients using the same credentials file. For simplicity, using one's email address is sufficient.
"""
self._service = get_service('compute', **kwargs)
self.project_id = project_id
self._max_retries = kwargs.get('max_retries', 3)
def get_project(self):
"""
Abstraction of projects().get() method. [https://cloud.google.com/compute/docs/reference/latest/projects/get]
:return: Project Resource
"""
return self._service.projects().get(
project=self.project_id
).execute(num_retries=self._max_retries)
def list_regions(self, max_results=None, filter_str=None):
"""
Abstraction of regions().list() method with inbuilt iteration functionality. [https://cloud.google.com/compute/docs/reference/latest/regions/list]
:param max_results: If None, all results are iterated over and returned.
:type max_results: integer
:param filter_str: Check documentation link for more details.
:return: Generator for dictionary objects representing resources.
"""
return iterate_list(
self._service.regions(),
'items',
max_results,
self._max_retries,
project=self.project_id,
filter=filter_str
)
def list_zones(self, max_results=None, filter_str=None):
"""
Abstraction of zones().list() method with inbuilt iteration functionality. [https://cloud.google.com/compute/docs/reference/latest/zones/list]
:param max_results: If None, all results are iterated over and returned.
:type max_results: integer
:param filter_str: Check documentation link for more details.
:return: Generator for dictionary objects representing resources.
"""
return iterate_list(
self._service.zones(),
'items',
max_results,
self._max_retries,
project=self.project_id,
filter=filter_str
)
def list_instances(self, zone_id=None, max_results=None, filter_str=None):
"""
Abstraction of instances().list() method with inbuilt iteration functionality. [https://cloud.google.com/compute/docs/reference/latest/instances/list]
:param zone_id: Zone name. If None, all Zones are iterated over and returned.
:param max_results: If None, all results are iterated over and returned.
:type max_results: integer
:param filter_str: Check documentation link for more details.
:return: Generator for dictionary objects representing resources.
"""
if zone_id is None:
return_list = [
iterate_list(
self._service.instances(),
'items',
max_results,
self._max_retries,
project=self.project_id,
zone=zone['name'],
filter=filter_str
)
for zone in self.list_zones()
]
return chain(*return_list)
else:
return iterate_list(
self._service.instances(),
'items',
max_results,
self._max_retries,
project=self.project_id,
zone=zone_id,
filter=filter_str
)
def list_addresses(self, region_id=None, max_results=None, filter_str=None):
"""
Abstraction of addresses().list() method with inbuilt iteration functionality. [https://cloud.google.com/compute/docs/reference/latest/addresses/list]
:param region_id: Region name. If None, all Regions are iterated over and returned.
:param max_results: If None, all results are iterated over and returned.
:type max_results: integer
:param filter_str: Check documentation link for more details.
:return: Generator for dictionary objects representing resources.
"""
if region_id is None:
return_list = [
iterate_list(
self._service.addresses(),
'items',
max_results,
self._max_retries,
project=self.project_id,
region=region['name'],
filter=filter_str
)
for region in self.list_regions()
]
return chain(*return_list)
else:
return iterate_list(
self._service.addresses(),
'items',
max_results,
self._max_retries,
project=self.project_id,
region=region_id,
filter=filter_str
)
def list_operations(self, operation_type, location_id=None, max_results=None, filter_str=None):
"""
Choose between region or zone operations with operation_type.
Abstraction of zoneOperations()/regionOperations().list() method with inbuilt iteration functionality.
https://cloud.google.com/compute/docs/reference/latest/zoneOperations/list
https://cloud.google.com/compute/docs/reference/latest/regionOperations/list
:param operation_type: 'zone' or 'region' type operations.
:param location_id: Zone/Region name. If None, all Zones/Regions are iterated over and returned.
:param max_results: If None, all results are iterated over and returned.
:type max_results: integer
:param filter_str: Check documentation link for more details.
:return: Generator for dictionary objects representing resources.
"""
assert operation_type in ('region', 'zone')
if location_id is None:
if operation_type == 'region':
return_list = [
iterate_list(
self._service.regionOperations(),
'items',
max_results,
self._max_retries,
project=self.project_id,
region=region['name'],
filter=filter_str
)
for region in self.list_regions()
]
else:
return_list = [
iterate_list(
self._service.zoneOperations(),
'items',
max_results,
self._max_retries,
project=self.project_id,
zone=zone['name'],
filter=filter_str
)
for zone in self.list_zones()
]
return chain(*return_list)
else:
if operation_type == 'region':
return iterate_list(
self._service.regionOperations(),
'items',
max_results,
self._max_retries,
project=self.project_id,
region=location_id,
filter=filter_str
)
else:
return iterate_list(
self._service.zoneOperations(),
'items',
max_results,
self._max_retries,
project=self.project_id,
zone=location_id,
filter=filter_str
)
def get_operation(self, operation_type, location_id, operation_name):
"""
Choose between region or zone operations with operation_type.
Abstraction of zoneOperations()/regionOperations().get() method.
https://cloud.google.com/compute/docs/reference/latest/zoneOperations/get
https://cloud.google.com/compute/docs/reference/latest/regionOperations/get
:param operation_type: 'zone' or 'region' type operations.
:param location_id: Zone/Region name.
:param operation_name: Operation name.
:return: ZoneOperations/RegionOperations Resource.
"""
assert operation_type in ('region', 'zone')
if operation_type == 'region':
return self._service.regionOperations().get(
project=self.project_id,
region=location_id,
operation=operation_name
).execute(num_retries=self._max_retries)
else:
return self._service.zoneOperations().get(
project=self.project_id,
zone=location_id,
operation=operation_name
).execute(num_retries=self._max_retries)
def poll_operation_status(self, operation_type, location_id, operation_name, end_state, sleep_time=0.5):
"""
Poll operation to until desired end_state is achieved. eg. 'DONE' when adding addresses.
:param operation_type: 'zone' or 'region' type operations.
:param location_id: Zone/Region name.
:param operation_name: Operation name.
:param end_state: Final status that signifies operation is finished.
:param sleep_time: Intervals between polls.
:return: ZoneOperations/RegionOperations Resource.
"""
status = None
resp = None
while status != end_state:
resp = self.get_operation(
operation_type=operation_type,
location_id=location_id,
operation_name=operation_name
)
status = resp['status']
sleep(sleep_time)
return resp
def get_address(self, region_id, address_name):
"""
Abstraction of addresses().get() method. [https://cloud.google.com/compute/docs/reference/latest/addresses/get]
:param region_id: Region name.
:param address_name: Address name.
:return: Addresses Resource.
"""
return self._service.addresses().get(
project=self.project_id,
region=region_id,
address=address_name
).execute(num_retries=self._max_retries)
def add_address(self, region_id, address_name):
"""
Abstraction of address.insert() method with operation polling functionality. [https://cloud.google.com/compute/docs/reference/latest/addresses/insert]
:param region_id: Region name.
:param address_name: Address name.
:return: RegionOperations Resource.
"""
resp = self._service.addresses().insert(
project=self.project_id,
region=region_id,
body={'name': address_name}
).execute(num_retries=self._max_retries)
return self.poll_operation_status(
operation_type='region',
location_id=region_id,
operation_name=resp['name'],
end_state='DONE'
)
def delete_address(self, region_id, address_name):
"""
Abstraction of address.delete() method with operation polling functionality. [https://cloud.google.com/compute/docs/reference/latest/addresses/delete]
:param region_id: Region name.
:param address_name: Address name.
:return: RegionOperations Resource.
"""
resp = self._service.addresses().delete(
project=self.project_id,
region=region_id,
address=address_name
).execute(num_retries=self._max_retries)
return self.poll_operation_status(
operation_type='region',
location_id=region_id,
operation_name=resp['name'],
end_state='DONE'
)
def get_instance(self, zone_id, instance_name):
"""
Abstraction of instances().get() method. [https://cloud.google.com/compute/docs/reference/latest/instances/get]
:param zone_id: Zone name.
:param instance_name: Instance name.
:return: Instances Resource.
"""
return self._service.instances().get(
project=self.project_id,
zone=zone_id,
instance=instance_name
).execute(num_retries=self._max_retries)
def start_instance(self, zone_id, instance_name):
"""
Abstraction of instances().start() method with operation polling functionality. [https://cloud.google.com/compute/docs/reference/latest/instances/start]
:param zone_id: Zone name.
:param instance_name: Instance name.
:return: ZoneOperations Resource.
"""
resp = self._service.instances().start(
project=self.project_id,
zone=zone_id,
instance=instance_name
).execute(num_retries=self._max_retries)
return self.poll_operation_status(
operation_type='zone',
location_id=zone_id,
operation_name=resp['name'],
end_state='DONE'
)
def stop_instance(self, zone_id, instance_name):
"""
Abstraction of instances().stop() method with operation polling functionality. [https://cloud.google.com/compute/docs/reference/latest/instances/stop]
:param zone_id: Zone name.
:param instance_name: Instance name.
:return: ZoneOperations Resource.
"""
resp = self._service.instances().stop(
project=self.project_id,
zone=zone_id,
instance=instance_name
).execute(num_retries=self._max_retries)
return self.poll_operation_status(
operation_type='zone',
location_id=zone_id,
operation_name=resp['name'],
end_state='DONE'
)
def delete_instance(self, zone_id, instance_name):
"""
Abstraction of instances().delete() method with operation polling functionality. [https://cloud.google.com/compute/docs/reference/latest/instances/delete]
:param zone_id: Zone name.
:param instance_name: Instance name.
:return: ZoneOperations Resource.
"""
resp = self._service.instances().delete(
project=self.project_id,
zone=zone_id,
instance=instance_name
).execute(num_retries=self._max_retries)
return self.poll_operation_status(
operation_type='zone',
location_id=zone_id,
operation_name=resp['name'],
end_state='DONE'
)
| 36.754137
| 205
| 0.594841
| 1,622
| 15,547
| 5.5
| 0.109125
| 0.025221
| 0.032059
| 0.044838
| 0.786459
| 0.758996
| 0.749355
| 0.717072
| 0.707544
| 0.640287
| 0
| 0.000284
| 0.320962
| 15,547
| 422
| 206
| 36.841232
| 0.844748
| 0.378337
| 0
| 0.691304
| 0
| 0
| 0.022436
| 0
| 0
| 0
| 0
| 0
| 0.008696
| 1
| 0.069565
| false
| 0
| 0.017391
| 0
| 0.178261
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ad54e911436dd459c1b3b4ca73675aac1d117e17
| 150
|
py
|
Python
|
Zadaniy/task2/moduls/__init__.py
|
Dmitry-15/15_laba
|
5b27023e5bddf8e8cfd6455912f72e07adfcdf80
|
[
"MIT"
] | null | null | null |
Zadaniy/task2/moduls/__init__.py
|
Dmitry-15/15_laba
|
5b27023e5bddf8e8cfd6455912f72e07adfcdf80
|
[
"MIT"
] | null | null | null |
Zadaniy/task2/moduls/__init__.py
|
Dmitry-15/15_laba
|
5b27023e5bddf8e8cfd6455912f72e07adfcdf80
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from .fun import get_human, display_people, whois
__all__ = ['get_human', 'display_people', 'whois']
| 21.428571
| 50
| 0.68
| 21
| 150
| 4.47619
| 0.761905
| 0.170213
| 0.319149
| 0.446809
| 0.553191
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015385
| 0.133333
| 150
| 7
| 50
| 21.428571
| 0.707692
| 0.286667
| 0
| 0
| 0
| 0
| 0.264151
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.