hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
515cab618ed79aaba5d009928d26db2168402cb0 | 6,387 | py | Python | tests/test_271.py | pedromtorres/TigerShark | 2790a7c03905a094b126b48387c7919c09cce238 | [
"BSD-3-Clause"
] | 24 | 2015-03-18T10:15:20.000Z | 2022-03-18T13:38:34.000Z | tests/test_271.py | tspannhw/TigerShark | 5081641f1b189a43e9eab4813256598cc0a79f6f | [
"BSD-3-Clause"
] | 6 | 2015-03-27T12:36:57.000Z | 2021-04-13T15:01:24.000Z | tests/test_271.py | tspannhw/TigerShark | 5081641f1b189a43e9eab4813256598cc0a79f6f | [
"BSD-3-Clause"
] | 21 | 2015-11-21T09:19:47.000Z | 2020-09-17T16:52:50.000Z | import unittest
import logging
import sys
import datetime
from tigershark.facade import f271
from tigershark.parsers import M271_4010_X092_A1
class TestParsed271(unittest.TestCase):
def setUp(self):
m = M271_4010_X092_A1.parsed_271
with open('tests/271-example.txt') as f:
parsed = m.unmarshall(f.read().strip())
self.f = f271.F271_4010(parsed)
def test_number_of_facades(self):
self.assertEqual(len(self.f.facades), 1)
def test_header(self):
h = self.f.facades[0].header.hierarchical_transaction_info
self.assertEqual(h.structure, ("0022", "Information Source, "
"Information Receiver, Subscriber, or Dependent."))
self.assertEqual(h.purpose, ("11", "Response"))
self.assertEqual(h.transaction_id, "11111")
self.assertEqual(h.creation_date, datetime.date(2012, 06, 05))
self.assertEqual(h.creation_time, datetime.time(23, 24, 23))
def test_number_of_receivers(self):
self.assertEqual(len(self.f.facades[0].source.receivers), 1)
def test_number_of_subscribers(self):
self.assertEqual(
len(self.f.facades[0].source.receivers[0].subscribers), 1)
def test_hierarchy(self):
source = self.f.facades[0].source
h = source.hierarchy
self.assertEqual(h.id, "1")
self.assertEqual(h.parent_id, '')
self.assertEqual(h.level, ("20", "Information Source"))
self.assertTrue(h.has_children)
receiver = source.receivers[0]
h = receiver.hierarchy
self.assertEqual(h.id, "2")
self.assertEqual(h.parent_id, '1')
self.assertEqual(h.level, ("21", "Information Receiver"))
self.assertTrue(h.has_children)
subscriber = receiver.subscribers[0]
h = subscriber.hierarchy
self.assertEqual(h.id, "3")
self.assertEqual(h.parent_id, '2')
self.assertEqual(h.level, ("22", "Subscriber"))
self.assertFalse(h.has_children)
def test_source(self):
source = self.f.facades[0].source
name = source.source_information.name
self.assertEqual(name.entity_identifier,
("PR", "Payer"))
self.assertEqual(name.entity_type,
("2", "Non-Person Entity"))
self.assertEqual(name.org_name, "Health Net Inc")
self.assertEqual(name.id_code, "10385")
self.assertEqual(name.id_code_qual,
("PI", "Payor Identification"))
self.assertFalse(name.is_person)
self.assertTrue(name.is_organization)
def test_receiver(self):
receiver = self.f.facades[0].source.receivers[0]
name = receiver.receiver_information.name
self.assertEqual(name.entity_identifier,
("1P", "Provider"))
self.assertEqual(name.entity_type,
("2", "Non-Person Entity"))
self.assertEqual(name.org_name, "DR. ACULA")
self.assertEqual(name.id_code, "1111111111")
self.assertEqual(name.id_code_qual,
("XX", "Health Care Financing Administration National "\
"Provider Identifier"))
self.assertFalse(name.is_person)
self.assertTrue(name.is_organization)
def test_subscriber(self):
def test_trace_numbers():
def _test(i, trace_type, trace_number, entity_id, entity_addl_id):
self.assertEqual(subscriber.trace_numbers[i].trace_type,
trace_type)
self.assertEqual(subscriber.trace_numbers[i].trace_number,
trace_number)
self.assertEqual(subscriber.trace_numbers[i].entity_id,
entity_id)
self.assertEqual(
subscriber.trace_numbers[i].entity_additional_id,
entity_addl_id)
self.assertEqual(len(subscriber.trace_numbers), 3)
_test(0, ("1", "Current Transaction Trace Numbers"),
"222222222", "9ZIRMEDCOM", "ELR ID")
_test(1, ("2",
"Referenced Transaction Trace Numbers (Value from 270)"),
"333333333", "9ZIRMEDCOM", "ELI ID")
_test(2, ("1", "Current Transaction Trace Numbers"),
"4444444444", "9MEDDATACO", "")
def test_validations():
self.assertEqual(
len(subscriber.personal_information.request_validations),
4)
validations = subscriber.personal_information.request_validations
for validation in validations:
self.assertFalse(validation.valid_request)
self.assertEqual(validation.follow_up_action_code,
("C", "Please Correct and Resubmit"))
self.assertEqual(validations[0].reject_reason,
("72", "Invalid/Missing Subscriber/Insured ID"))
self.assertEqual(validations[1].reject_reason,
("73", "Invalid/Missing Subscriber/Insured Name"))
self.assertEqual(validations[2].reject_reason,
("73", "Invalid/Missing Subscriber/Insured Name"))
self.assertEqual(validations[3].reject_reason,
("58", "Invalid/Missing Date-of-Birth"))
def test_dates():
self.assertEqual(len(subscriber.personal_information.dates), 1)
date = subscriber.personal_information.dates[0]
self.assertEqual(date.type, ("291", "Plan"))
self.assertEqual(date.time, datetime.date(2012, 4, 8))
self.assertEqual(date.time_range, None)
subscriber = self.f.facades[0].source.receivers[0].subscribers[0]
name = subscriber.personal_information.name
self.assertEqual(name.entity_identifier,
("IL", "Insured"))
self.assertEqual(name.entity_type,
("1", "Person"))
self.assertEqual(name.id_code, "R11111111")
self.assertEqual(name.id_code_qual,
("MI", "Member Identification Number"))
self.assertTrue(name.is_person)
self.assertFalse(name.is_organization)
test_trace_numbers()
test_validations()
test_dates()
if __name__ == "__main__":
logging.basicConfig(
stream=sys.stderr,
level=logging.INFO,
)
unittest.main()
| 41.206452 | 78 | 0.608736 | 690 | 6,387 | 5.469565 | 0.242029 | 0.18283 | 0.059353 | 0.024112 | 0.455485 | 0.330419 | 0.271595 | 0.152623 | 0.138845 | 0.138845 | 0 | 0.04057 | 0.274464 | 6,387 | 154 | 79 | 41.474026 | 0.773845 | 0 | 0 | 0.176471 | 0 | 0 | 0.123376 | 0.003288 | 0 | 0 | 0 | 0 | 0.411765 | 0 | null | null | 0 | 0.044118 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
515d5f82ce87af195121b8847cb3e553f85bb1c4 | 194 | py | Python | src/Lexer/run.py | PetukhovVictor/compiler2 | 0bf87393ce9ecdd421393165fc14cb7f03f5e3b8 | [
"MIT"
] | 3 | 2017-09-08T21:35:31.000Z | 2019-04-24T11:48:59.000Z | src/Lexer/run.py | PetukhovVictor/compiler2 | 0bf87393ce9ecdd421393165fc14cb7f03f5e3b8 | [
"MIT"
] | 1 | 2018-11-19T15:34:00.000Z | 2018-11-19T15:35:52.000Z | src/Lexer/run.py | PetukhovVictor/compiler2 | 0bf87393ce9ecdd421393165fc14cb7f03f5e3b8 | [
"MIT"
] | 4 | 2017-03-13T06:16:48.000Z | 2019-04-24T11:49:00.000Z | from .rules import token_expressions
from .lex import lex
def run(code):
""" Wrapper to run the Lexer (with the token expressions listed here). """
return lex(code, token_expressions)
| 24.25 | 78 | 0.726804 | 28 | 194 | 4.964286 | 0.607143 | 0.345324 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.185567 | 194 | 7 | 79 | 27.714286 | 0.879747 | 0.340206 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0.5 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
515e110e923bd04c8ebed32a1acaa5cd9ac55ba1 | 943 | py | Python | 2016b/main.py | xdr940/iKaggle | cc0210e089e5f1af228f02bf67bb9a4459336722 | [
"MIT"
] | null | null | null | 2016b/main.py | xdr940/iKaggle | cc0210e089e5f1af228f02bf67bb9a4459336722 | [
"MIT"
] | null | null | null | 2016b/main.py | xdr940/iKaggle | cc0210e089e5f1af228f02bf67bb9a4459336722 | [
"MIT"
] | null | null | null | import pandas as pd
import pandas_profiling
from path import Path
import numpy as np
from scipy.stats import chi2_contingency
from collections import Counter
root = Path('/home/roit/datasets/kaggle/2016b')
dump_path = root/'dump'
ge_info = root/'gene_info'
exitnpy = False
if exitnpy==False:
genes_dic = []
genes = []
snps_sorted = pd.read_csv(dump_path/'sorted_cols_series.csv')
cnt=0
for file in ge_info.files():
gene = open(file).read()#str: rs1\n rs2\n...
ls = gene.split('\n')
ls.pop()
cnt+=len(ls)
genes_dic+=ls
genes.append(ls)
print(cnt)
vecs = np.zeros([len(genes),len(snps_sorted)])
for i in range(len(genes)):
for j in range(len(genes[i])):
col = genes_dic.index(genes[i][j])
vecs[i][col] = 1
np.save('vecs.npy',vecs)
else:
vecs = np.load('vecs.npy')
sum_vec =vecs.sum(axis=0)#行相加
print(sum_vec.sum()) | 21.431818 | 65 | 0.622481 | 148 | 943 | 3.851351 | 0.466216 | 0.042105 | 0.035088 | 0.052632 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013793 | 0.231177 | 943 | 44 | 66 | 21.431818 | 0.772414 | 0.02333 | 0 | 0 | 0 | 0 | 0.092391 | 0.058696 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.181818 | 0 | 0.181818 | 0.060606 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
515e89e12ec857736349c37b4aecc618592abe73 | 77 | py | Python | const.py | tfyoung/wattsonhass | eece56779561277fa9f2928230ac39bd43329344 | [
"Apache-2.0"
] | 1 | 2021-02-28T12:07:20.000Z | 2021-02-28T12:07:20.000Z | const.py | tfyoung/wattsonhass | eece56779561277fa9f2928230ac39bd43329344 | [
"Apache-2.0"
] | null | null | null | const.py | tfyoung/wattsonhass | eece56779561277fa9f2928230ac39bd43329344 | [
"Apache-2.0"
] | null | null | null | """Constants for the Wattson power meter integration."""
DOMAIN = "wattson"
| 19.25 | 56 | 0.727273 | 9 | 77 | 6.222222 | 0.888889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 77 | 3 | 57 | 25.666667 | 0.848485 | 0.649351 | 0 | 0 | 0 | 0 | 0.333333 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
515f0a6a29381f3507258c4ec4a21b66fa8b9463 | 2,808 | py | Python | RL/test_depths.py | RECON-Labs-Inc/svox2 | 2946c1573fc4c8c8f378bf8154c29ba8d62af927 | [
"BSD-2-Clause"
] | null | null | null | RL/test_depths.py | RECON-Labs-Inc/svox2 | 2946c1573fc4c8c8f378bf8154c29ba8d62af927 | [
"BSD-2-Clause"
] | null | null | null | RL/test_depths.py | RECON-Labs-Inc/svox2 | 2946c1573fc4c8c8f378bf8154c29ba8d62af927 | [
"BSD-2-Clause"
] | null | null | null | import sys
from pathlib import Path
from datetime import datetime
import argparse
import json
import torch
from torchvision.utils import save_image
import torchvision
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
import open3d as o3d
from svox2 import *
from pyvox.models import Vox, Color
from pyvox.writer import VoxWriter
from importlib import reload as reload
reload(svox2)
from svox2 import *
#TODO> modify this:
sys.path.append("/workspace/svox2/opt")
from util.dataset import datasets
from util import config_util
# Our nice tools
sys.path.append("/workspace/aseeo-research")
import RLResearch.utils.depth_utils as du
import RLResearch.utils.gen_utils as gu
img_id = 3
NPY_PREFIX = "depth_"
project_folder = "/workspace/datasets/cctv_2"
data_folder = project_folder + "/result/depth_npy/"
ply_folder = project_folder + "/result/debug_ply"
Path(ply_folder).mkdir(exist_ok=True, parents=True)
dataset = datasets["nsvf"](
project_folder,
split="test_train"
)
def export_pointcloud(img_id, data_folder, ply_folder):
depth_filename = data_folder + "/" + NPY_PREFIX + str(img_id) + ".npy"
depth_img_np = np.load(depth_filename)
pointcloud_filename = Path(ply_folder)/Path(depth_filename).name
pointcloud_filename = pointcloud_filename.with_suffix(".ply")
c2w = dataset.c2w[img_id].to(device = "cpu")
print("Rendering pose:", img_id)
print(c2w)
# print("ndc")
# print(dataset.ndc_coeffs)
# Can take from somewhere else that takes less time to load
# width=dataset.get_image_size(0)[1]
# height=dataset.get_image_size(0)[0]
# fx = dataset.intrins.get('fx', 0)
# fy = dataset.intrins.get('fy', 0)
# cx = dataset.intrins.get('cx', 0)
# cy = dataset.intrins.get('cy', 0)
width = 3840
height = 2160
fx = 3243.357296552027
fy = 3243.357296552027
cx = 1920.0
cy = 1080.0
# depth_images = [depth_img_np]
radial_weight = du.make_radial_weight(width, height, fx)
depth_img_np = depth_img_np * radial_weight
depth_img_np = depth_img_np.astype(np.float32)
du.write_pointcloud_from_depth(
depth_img_np,
str(pointcloud_filename.resolve()),
w = width,
h = height,
fx = fx,
fy = fy,
cx = cx,
cy = cy,
stride = 8, transform = c2w)
for i in [0, 3, 5, 8, 10, 90, 130, 180]:
export_pointcloud(i, data_folder , ply_folder)
| 26.490566 | 74 | 0.654915 | 376 | 2,808 | 4.702128 | 0.385638 | 0.031674 | 0.039593 | 0.024887 | 0.06448 | 0.022624 | 0 | 0 | 0 | 0 | 0 | 0.041409 | 0.251781 | 2,808 | 105 | 75 | 26.742857 | 0.800095 | 0.130698 | 0 | 0.030303 | 0 | 0 | 0.063041 | 0.021014 | 0 | 0 | 0 | 0.009524 | 0 | 1 | 0.015152 | false | 0 | 0.363636 | 0 | 0.378788 | 0.030303 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
515f21eaa0b5ed2cf1b0e5658806e586107dfcd7 | 1,376 | py | Python | examples/mxnet/dis_kvstore/client.py | tqchen/dgl | d57ff78da11193fbbee7f37a69fcfe1c14da2ae4 | [
"Apache-2.0"
] | 2 | 2020-05-10T14:06:12.000Z | 2021-01-01T02:57:20.000Z | examples/mxnet/dis_kvstore/client.py | tqchen/dgl | d57ff78da11193fbbee7f37a69fcfe1c14da2ae4 | [
"Apache-2.0"
] | null | null | null | examples/mxnet/dis_kvstore/client.py | tqchen/dgl | d57ff78da11193fbbee7f37a69fcfe1c14da2ae4 | [
"Apache-2.0"
] | null | null | null | # This is a simple MXNet server demo shows how to use DGL distributed kvstore.
import dgl
import argparse
import mxnet as mx
ID = []
ID.append(mx.nd.array([0,1], dtype='int64'))
ID.append(mx.nd.array([2,3], dtype='int64'))
ID.append(mx.nd.array([4,5], dtype='int64'))
ID.append(mx.nd.array([6,7], dtype='int64'))
edata_partition_book = {'edata':mx.nd.array([0,0,1,1,2,2,3,3], dtype='int64')}
ndata_partition_book = {'ndata':mx.nd.array([0,0,1,1,2,2,3,3], dtype='int64')}
def start_client():
client = dgl.contrib.start_client(ip_config='ip_config.txt',
ndata_partition_book=ndata_partition_book,
edata_partition_book=edata_partition_book)
client.push(name='edata', id_tensor=ID[client.get_id()], data_tensor=mx.nd.array([[1.,1.,1.],[1.,1.,1.]]))
client.push(name='ndata', id_tensor=ID[client.get_id()], data_tensor=mx.nd.array([[2.,2.,2.],[2.,2.,2.]]))
client.barrier()
tensor_edata = client.pull(name='edata', id_tensor=mx.nd.array([0,1,2,3,4,5,6,7], dtype='int64'))
tensor_ndata = client.pull(name='ndata', id_tensor=mx.nd.array([0,1,2,3,4,5,6,7], dtype='int64'))
print(tensor_edata)
client.barrier()
print(tensor_ndata)
client.barrier()
if client.get_id() == 0:
client.shut_down()
if __name__ == '__main__':
start_client() | 32.761905 | 110 | 0.631541 | 227 | 1,376 | 3.656388 | 0.242291 | 0.048193 | 0.108434 | 0.060241 | 0.439759 | 0.343373 | 0.343373 | 0.245783 | 0.245783 | 0.245783 | 0 | 0.060262 | 0.167878 | 1,376 | 42 | 111 | 32.761905 | 0.664629 | 0.055233 | 0 | 0.111111 | 0 | 0 | 0.070054 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.111111 | 0 | 0.148148 | 0.074074 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
515f518715b6288fc69c17ec11ceead08e8d514f | 9,535 | py | Python | qtgui.py | izaletel/NeuroMorpho-Access-Tool | 3441785edbddaf4b357377e9c7897dbbad42dc87 | [
"MIT"
] | null | null | null | qtgui.py | izaletel/NeuroMorpho-Access-Tool | 3441785edbddaf4b357377e9c7897dbbad42dc87 | [
"MIT"
] | null | null | null | qtgui.py | izaletel/NeuroMorpho-Access-Tool | 3441785edbddaf4b357377e9c7897dbbad42dc87 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'qtgui.ui'
##
## Created by: Qt User Interface Compiler version 6.2.3
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide6.QtCore import (QCoreApplication, QDate, QDateTime, QLocale,
QMetaObject, QObject, QPoint, QRect,
QSize, QTime, QUrl, Qt)
from PySide6.QtGui import (QBrush, QColor, QConicalGradient, QCursor,
QFont, QFontDatabase, QGradient, QIcon,
QImage, QKeySequence, QLinearGradient, QPainter,
QPalette, QPixmap, QRadialGradient, QTransform)
from PySide6.QtWidgets import (QApplication, QComboBox, QLabel, QLineEdit,
QMainWindow, QProgressBar, QPushButton, QSizePolicy,
QTabWidget, QTextBrowser, QWidget)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
if not MainWindow.objectName():
MainWindow.setObjectName(u"MainWindow")
MainWindow.resize(800, 600)
MainWindow.setLocale(QLocale(QLocale.English, QLocale.UnitedStates))
self.centralwidget = QWidget(MainWindow)
self.centralwidget.setObjectName(u"centralwidget")
self.tabWidget = QTabWidget(self.centralwidget)
self.tabWidget.setObjectName(u"tabWidget")
self.tabWidget.setGeometry(QRect(0, 0, 801, 551))
self.tab_acquire = QWidget()
self.tab_acquire.setObjectName(u"tab_acquire")
self.brain_region_menu = QComboBox(self.tab_acquire)
self.brain_region_menu.setObjectName(u"brain_region_menu")
self.brain_region_menu.setGeometry(QRect(90, 20, 191, 21))
self.species_choice_menu = QComboBox(self.tab_acquire)
self.species_choice_menu.setObjectName(u"species_choice_menu")
self.species_choice_menu.setGeometry(QRect(90, 50, 191, 21))
self.cell_type_choice_menu = QComboBox(self.tab_acquire)
self.cell_type_choice_menu.setObjectName(u"cell_type_choice_menu")
self.cell_type_choice_menu.setGeometry(QRect(90, 80, 191, 21))
self.brain_region_menu_label = QLabel(self.tab_acquire)
self.brain_region_menu_label.setObjectName(u"brain_region_menu_label")
self.brain_region_menu_label.setGeometry(QRect(10, 20, 81, 21))
self.species_choice_menu_label = QLabel(self.tab_acquire)
self.species_choice_menu_label.setObjectName(u"species_choice_menu_label")
self.species_choice_menu_label.setGeometry(QRect(10, 50, 81, 21))
self.cell_type_choice_menu_label = QLabel(self.tab_acquire)
self.cell_type_choice_menu_label.setObjectName(u"cell_type_choice_menu_label")
self.cell_type_choice_menu_label.setGeometry(QRect(10, 80, 81, 21))
self.acq_progressbar = QProgressBar(self.tab_acquire)
self.acq_progressbar.setObjectName(u"acq_progressbar")
self.acq_progressbar.setGeometry(QRect(450, 10, 281, 23))
self.acq_progressbar.setValue(0)
self.acq_textbox = QTextBrowser(self.tab_acquire)
self.acq_textbox.setObjectName(u"acq_textbox")
self.acq_textbox.setGeometry(QRect(20, 150, 751, 371))
self.acq_textbox.setOpenLinks(False)
self.acq_button = QPushButton(self.tab_acquire)
self.acq_button.setObjectName(u"acq_button")
self.acq_button.setGeometry(QRect(320, 110, 131, 31))
self.acq_entry = QLineEdit(self.tab_acquire)
self.acq_entry.setObjectName(u"acq_entry")
self.acq_entry.setGeometry(QRect(450, 60, 251, 21))
self.acq_entry_label = QLabel(self.tab_acquire)
self.acq_entry_label.setObjectName(u"acq_entry_label")
self.acq_entry_label.setGeometry(QRect(450, 40, 251, 21))
self.open_csv_file_button = QPushButton(self.tab_acquire)
self.open_csv_file_button.setObjectName(u"open_csv_file_button")
self.open_csv_file_button.setGeometry(QRect(510, 110, 151, 31))
self.acq_button_continue = QPushButton(self.tab_acquire)
self.acq_button_continue.setObjectName(u"acq_button_continue")
self.acq_button_continue.setEnabled(False)
self.acq_button_continue.setGeometry(QRect(30, 110, 121, 31))
self.acq_button_cancel = QPushButton(self.tab_acquire)
self.acq_button_cancel.setObjectName(u"acq_button_cancel")
self.acq_button_cancel.setEnabled(False)
self.acq_button_cancel.setGeometry(QRect(160, 110, 121, 31))
self.tabWidget.addTab(self.tab_acquire, "")
self.tab_image = QWidget()
self.tab_image.setObjectName(u"tab_image")
self.img_textbox = QTextBrowser(self.tab_image)
self.img_textbox.setObjectName(u"img_textbox")
self.img_textbox.setGeometry(QRect(20, 150, 751, 371))
self.img_textbox.setOpenLinks(False)
self.img_progressbar = QProgressBar(self.tab_image)
self.img_progressbar.setObjectName(u"img_progressbar")
self.img_progressbar.setGeometry(QRect(450, 10, 281, 23))
self.img_progressbar.setValue(0)
self.img_button = QPushButton(self.tab_image)
self.img_button.setObjectName(u"img_button")
self.img_button.setGeometry(QRect(320, 110, 131, 31))
self.img_csv_choice_list_label = QLabel(self.tab_image)
self.img_csv_choice_list_label.setObjectName(u"img_csv_choice_list_label")
self.img_csv_choice_list_label.setGeometry(QRect(20, 10, 81, 21))
self.img_csv_choice_list = QLineEdit(self.tab_image)
self.img_csv_choice_list.setObjectName(u"img_csv_choice_list")
self.img_csv_choice_list.setGeometry(QRect(90, 10, 291, 21))
self.img_open_csv_file_button = QPushButton(self.tab_image)
self.img_open_csv_file_button.setObjectName(u"img_open_csv_file_button")
self.img_open_csv_file_button.setGeometry(QRect(260, 50, 121, 21))
self.open_images_directory_button = QPushButton(self.tab_image)
self.open_images_directory_button.setObjectName(u"open_images_directory_button")
self.open_images_directory_button.setGeometry(QRect(510, 110, 151, 31))
self.tabWidget.addTab(self.tab_image, "")
self.tab_about = QWidget()
self.tab_about.setObjectName(u"tab_about")
self.about_label = QTextBrowser(self.tab_about)
self.about_label.setObjectName(u"about_label")
self.about_label.setGeometry(QRect(20, 150, 751, 371))
self.about_label.setOpenLinks(False)
self.tabWidget.addTab(self.tab_about, "")
self.exit_button = QPushButton(self.centralwidget)
self.exit_button.setObjectName(u"exit_button")
self.exit_button.setGeometry(QRect(330, 560, 131, 31))
self.open_file_location_button = QPushButton(self.centralwidget)
self.open_file_location_button.setObjectName(u"open_file_location_button")
self.open_file_location_button.setGeometry(QRect(20, 560, 131, 31))
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QMetaObject.connectSlotsByName(MainWindow)
# setupUi
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QCoreApplication.translate("MainWindow", u"NeuroMorpho Access Tool", None))
self.brain_region_menu_label.setText(QCoreApplication.translate("MainWindow", u"Brain region:", None))
self.species_choice_menu_label.setText(QCoreApplication.translate("MainWindow", u"Species:", None))
self.cell_type_choice_menu_label.setText(QCoreApplication.translate("MainWindow", u"Cell type:", None))
self.acq_button.setText(QCoreApplication.translate("MainWindow", u"Generate CSV", None))
self.acq_entry.setText(QCoreApplication.translate("MainWindow", u"NM_All_All_All.csv", None))
#if QT_CONFIG(accessibility)
self.acq_entry_label.setAccessibleDescription("")
#endif // QT_CONFIG(accessibility)
self.acq_entry_label.setText(QCoreApplication.translate("MainWindow", u"Name of file to generate:", None))
self.open_csv_file_button.setText(QCoreApplication.translate("MainWindow", u"Open CSV file", None))
self.acq_button_continue.setText(QCoreApplication.translate("MainWindow", u"Continue", None))
self.acq_button_cancel.setText(QCoreApplication.translate("MainWindow", u"Cancel", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_acquire), QCoreApplication.translate("MainWindow", u"Generate CSV", None))
self.img_button.setText(QCoreApplication.translate("MainWindow", u"Download Images", None))
self.img_csv_choice_list_label.setText(QCoreApplication.translate("MainWindow", u"CSV File:", None))
self.img_csv_choice_list.setText("")
self.img_open_csv_file_button.setText(QCoreApplication.translate("MainWindow", u"Open CSV files", None))
self.open_images_directory_button.setText(QCoreApplication.translate("MainWindow", u"Open Images Directory", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_image), QCoreApplication.translate("MainWindow", u"Get Images", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_about), QCoreApplication.translate("MainWindow", u"About", None))
self.exit_button.setText(QCoreApplication.translate("MainWindow", u"Exit", None))
self.open_file_location_button.setText(QCoreApplication.translate("MainWindow", u"Open file location", None))
# retranslateUi
| 60.732484 | 140 | 0.724698 | 1,179 | 9,535 | 5.60475 | 0.155216 | 0.036017 | 0.100636 | 0.103511 | 0.545854 | 0.38862 | 0.256053 | 0.154964 | 0.044794 | 0.020278 | 0 | 0.032334 | 0.153435 | 9,535 | 156 | 141 | 61.121795 | 0.786298 | 0.02947 | 0 | 0 | 1 | 0 | 0.101575 | 0.021813 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014815 | false | 0 | 0.022222 | 0 | 0.044444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
515fe6ec967d9adf1eabbb5dad94f9dd79c8c9e6 | 90 | py | Python | tests/test_wb2k.py | reillysiemens/wb2k | 54edaa1afe4904a78746356555468d7c04685b28 | [
"ISC"
] | 6 | 2016-06-09T04:06:29.000Z | 2019-12-22T15:29:54.000Z | tests/test_wb2k.py | reillysiemens/wb2k | 54edaa1afe4904a78746356555468d7c04685b28 | [
"ISC"
] | 19 | 2016-06-03T22:00:13.000Z | 2019-09-25T09:03:16.000Z | tests/test_wb2k.py | reillysiemens/wb2k | 54edaa1afe4904a78746356555468d7c04685b28 | [
"ISC"
] | 4 | 2016-10-06T20:45:44.000Z | 2017-10-28T22:01:20.000Z | # TODO: Write actual tests. This just makes pytest-cov pick up on the module.
import wb2k
| 30 | 77 | 0.766667 | 16 | 90 | 4.3125 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013514 | 0.177778 | 90 | 2 | 78 | 45 | 0.918919 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.5 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
516143d6f8476fa2b9d91fe746b6871aa05baca3 | 4,690 | py | Python | plugins/modules/tag_member.py | robertcsapo/dnacenter-ansible | 33f776f8c0bc7113da73191c301dd1807e6b4a43 | [
"MIT"
] | null | null | null | plugins/modules/tag_member.py | robertcsapo/dnacenter-ansible | 33f776f8c0bc7113da73191c301dd1807e6b4a43 | [
"MIT"
] | null | null | null | plugins/modules/tag_member.py | robertcsapo/dnacenter-ansible | 33f776f8c0bc7113da73191c301dd1807e6b4a43 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2021, Cisco Systems
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
DOCUMENTATION = r"""
---
module: tag_member
short_description: Manage TagMember objects of Tag
description:
- Returns tag members specified by id.
- Adds members to the tag specified by id.
- Removes Tag member from the tag specified by id.
- Returns the number of members in a given tag.
- >
Updates tag membership. As part of the request payload through this API, only the specified members are added /
retained to the given input tags. Possible values of memberType attribute in the request payload can be queried by
using the /tag/member/type API.
- Returns list of supported resource types.
version_added: '1.0.0'
author: Rafael Campos (@racampos)
options:
id:
description:
- Tag ID.
type: str
required: True
member_type:
description:
- Entity type of the member. Possible values can be retrieved by using /tag/member/type API.
- MemberType query parameter.
type: str
required: True
level:
description:
- Level query parameter.
type: str
limit:
description:
- Used to Number of maximum members to return in the result.
type: str
member_association_type:
description:
- >
Indicates how the member is associated with the tag. Possible values and description. 1) DYNAMIC The
member is associated to the tag through rules. 2) STATIC – The member is associated to the tag manually.
3) MIXED – The member is associated manually and also satisfies the rule defined for the tag.
- MemberAssociationType query parameter.
type: str
offset:
description:
- Used for pagination. It indicates the starting row number out of available member records.
type: str
member_id:
description:
- TagMember id to be removed from tag.
- Required for state delete.
type: str
count:
description:
- If true gets the number of objects.
- Required for state query.
type: bool
memberToTags:
description:
- TagMemberDTO's memberToTags.
type: dict
suboptions:
key:
description:
- It is the tag member's key.
type: list
memberType:
description:
- TagMemberDTO's memberType.
type: str
requirements:
- dnacentersdk
seealso:
# Reference by module name
- module: cisco.dnac.plugins.module_utils.definitions.tag_member
# Reference by Internet resource
- name: TagMember reference
description: Complete reference of the TagMember object model.
link: https://developer.cisco.com/docs/dna-center/api/1-3-3-x
# Reference by Internet resource
- name: TagMember reference
description: SDK reference.
link: https://dnacentersdk.readthedocs.io/en/latest/api/api.html#v2-1-1-summary
"""
EXAMPLES = r"""
- name: get_tag_members_by_id
cisco.dnac.tag_member:
state: query # required
id: SomeValue # string, required
member_type: SomeValue # string, required
level: SomeValue # string
limit: SomeValue # string
member_association_type: SomeValue # string
offset: SomeValue # string
register: nm_get_tag_members_by_id
- name: add_members_to_the_tag
cisco.dnac.tag_member:
state: create # required
id: SomeValue # string, required
- name: remove_tag_member
cisco.dnac.tag_member:
state: delete # required
id: SomeValue # string, required
member_id: SomeValue # string, required
- name: get_tag_member_count
cisco.dnac.tag_member:
state: query # required
id: SomeValue # string, required
member_type: SomeValue # string, required
count: True # boolean, required
level: SomeValue # string
member_association_type: SomeValue # string
register: nm_get_tag_member_count
- name: updates_tag_membership
cisco.dnac.tag_member:
state: update # required
memberToTags:
key:
- SomeValue # string
memberType: SomeValue # string
- name: get_tag_resource_types
cisco.dnac.tag_member:
state: query # required
register: nm_get_tag_resource_types
"""
RETURN = r"""
dnac_response:
description: A dictionary with the response returned by the DNA Center Python SDK
returned: always
type: dict
sample: {"response": 29, "version": "1.0"}
sdk_function:
description: The DNA Center SDK function used to execute the task
returned: always
type: str
sample: tag.add_members_to_the_tag
missing_params:
description: Provided arguments do not comply with the schema of the DNA Center Python SDK function
returned: when the function request schema is not satisfied
type: list
sample:
"""
| 29.3125 | 117 | 0.715991 | 631 | 4,690 | 5.229794 | 0.313788 | 0.040909 | 0.048788 | 0.032727 | 0.270303 | 0.178788 | 0.152727 | 0.093333 | 0.05697 | 0.05697 | 0 | 0.00676 | 0.211514 | 4,690 | 159 | 118 | 29.496855 | 0.885073 | 0.034968 | 0 | 0.426573 | 0 | 0.034965 | 0.984962 | 0.102609 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
51617330e9ebead5bbb61a5ae564be20889db321 | 893 | py | Python | unittest_reinvent/scoring_tests/scoring_components/test_tanimoto_similarity.py | MolecularAI/reinvent-scoring | f7e052ceeffd29e17e1672c33607189873c82a45 | [
"MIT"
] | null | null | null | unittest_reinvent/scoring_tests/scoring_components/test_tanimoto_similarity.py | MolecularAI/reinvent-scoring | f7e052ceeffd29e17e1672c33607189873c82a45 | [
"MIT"
] | 2 | 2021-11-01T23:19:42.000Z | 2021-11-22T23:41:39.000Z | unittest_reinvent/scoring_tests/scoring_components/test_tanimoto_similarity.py | MolecularAI/reinvent-scoring | f7e052ceeffd29e17e1672c33607189873c82a45 | [
"MIT"
] | 2 | 2021-11-18T13:14:22.000Z | 2022-03-16T07:52:57.000Z | import unittest
import numpy.testing as npt
from unittest_reinvent.fixtures.test_data import CELECOXIB, CELECOXIB_C, BUTANE
from unittest_reinvent.scoring_tests.scoring_components.fixtures import score_single, score, instantiate_component
class TestTanimotoSimilarity(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.component = instantiate_component()
def test_similarity_1(self):
npt.assert_almost_equal(score_single(self.component, BUTANE), 1.0)
def test_similarity_2(self):
npt.assert_almost_equal(score_single(self.component, CELECOXIB), 1.0)
def test_similarity_3(self):
npt.assert_almost_equal(score_single(self.component, CELECOXIB_C), 0.89, decimal=3)
def test_similarity_4(self):
smiles = [BUTANE, CELECOXIB]
scores = score(self.component, smiles)
npt.assert_almost_equal(scores, 1.0)
| 31.892857 | 114 | 0.753639 | 115 | 893 | 5.6 | 0.365217 | 0.068323 | 0.10559 | 0.124224 | 0.310559 | 0.251553 | 0.251553 | 0.251553 | 0.251553 | 0.177019 | 0 | 0.018667 | 0.160134 | 893 | 27 | 115 | 33.074074 | 0.84 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.222222 | 1 | 0.277778 | false | 0 | 0.222222 | 0 | 0.555556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 2 |
516282162e672e92cb14c5b353f7f1dcb8f0e66a | 2,318 | py | Python | python-project/methods/KDE.py | ferjorosa/bayesian-latent-forests | 3d9e19f1d0be1e4cca0b390866589061a670cc20 | [
"Apache-2.0"
] | null | null | null | python-project/methods/KDE.py | ferjorosa/bayesian-latent-forests | 3d9e19f1d0be1e4cca0b390866589061a670cc20 | [
"Apache-2.0"
] | null | null | null | python-project/methods/KDE.py | ferjorosa/bayesian-latent-forests | 3d9e19f1d0be1e4cca0b390866589061a670cc20 | [
"Apache-2.0"
] | null | null | null | import statsmodels.api as sm
import numpy as np
import os
import time
import json
def apply(train_datasets, var_types_string, test_datasets, n_folds, result_path, filename, foldLog):
print("\n========================")
print("KDE")
print("========================")
results = {}
folds = {}
avg_learning_time = 0
avg_test_ll = 0
for i in range(1, n_folds + 1):
index = i-1
init_time = time.time()*1000
model = sm.nonparametric.KDEMultivariate(data=train_datasets[index], var_type=var_types_string, bw='normal_reference')
test_ll = np.log(model.pdf(test_datasets[index]))
test_ll = np.sum(test_ll)
end_time = time.time()*1000
learning_time = end_time - init_time
fold_result = {"test_ll": test_ll, "learning_time": learning_time}
folds["fold_" + str(i)] = fold_result
avg_learning_time = avg_learning_time + learning_time
avg_test_ll = avg_test_ll + test_ll
if foldLog:
print("----------------------------------------")
print("Fold (" + str(i) + "): ")
print("Test LL: " + str(test_ll))
print("Learning time: " + str(learning_time))
# Generate the average results and store them in the dictionary, then store them in a JSON file
avg_test_ll = avg_test_ll / n_folds
avg_learning_time = avg_learning_time / n_folds / 1000 # in seconds
results["average_test_ll"] = avg_test_ll
results["average_learning_time"] = avg_learning_time
results["folds"] = folds
store_json(results, result_path, filename)
print("----------------------------------------")
print("----------------------------------------")
print("Average Test LL: " + str(avg_test_ll))
print("Average learning time: " + str(avg_learning_time))
def store_json(results, path, filename):
if not os.path.exists(path):
os.makedirs(path)
if os.path.isfile(path + filename + "_results_KDE.json"):
os.remove(path + filename + "_results_KDE.json")
with open(path + filename + "_results_KDE.json", 'w') as fp:
json.dump(results, fp, sort_keys=True, indent=4)
else:
with open(path + filename + "_results_KDE.json", 'w') as fp:
json.dump(results, fp, sort_keys=True, indent=4)
| 36.793651 | 126 | 0.595772 | 301 | 2,318 | 4.325581 | 0.269103 | 0.078341 | 0.080645 | 0.067588 | 0.261137 | 0.18894 | 0.115207 | 0.115207 | 0.115207 | 0.115207 | 0 | 0.010538 | 0.222174 | 2,318 | 62 | 127 | 37.387097 | 0.711592 | 0.044866 | 0 | 0.14 | 0 | 0 | 0.180009 | 0.086386 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.1 | 0 | 0.14 | 0.22 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5162cae16f1582e2cd15f8c44ff6385eae028502 | 3,471 | py | Python | pay-api/tests/unit/services/test_auth.py | stevenc987/sbc-pay | 04f02f362f88a30c082b0643583b8d0ebff6063f | [
"Apache-2.0"
] | null | null | null | pay-api/tests/unit/services/test_auth.py | stevenc987/sbc-pay | 04f02f362f88a30c082b0643583b8d0ebff6063f | [
"Apache-2.0"
] | null | null | null | pay-api/tests/unit/services/test_auth.py | stevenc987/sbc-pay | 04f02f362f88a30c082b0643583b8d0ebff6063f | [
"Apache-2.0"
] | null | null | null | # Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests to assure the auth Service.
Test-Suite to ensure that the auth Service is working as expected.
"""
import pytest
from werkzeug.exceptions import HTTPException
from pay_api.services.auth import check_auth
from pay_api.utils.constants import EDIT_ROLE, VIEW_ROLE
def test_auth_role_for_service_account(session, monkeypatch):
"""Assert the auth works for service account."""
def token_info(): # pylint: disable=unused-argument; mocks of library methods
return {
'username': 'service account',
'realm_access': {
'roles': [
'system',
'edit'
]
}
}
def mock_auth(): # pylint: disable=unused-argument; mocks of library methods
return 'test'
monkeypatch.setattr('pay_api.utils.user_context._get_token', mock_auth)
monkeypatch.setattr('pay_api.utils.user_context._get_token_info', token_info)
# Test one of roles
check_auth('CP0001234', one_of_roles=[EDIT_ROLE])
def test_auth_role_for_service_account_with_no_edit_role(session, monkeypatch):
"""Assert the auth works for service account."""
def token_info(): # pylint: disable=unused-argument; mocks of library methods
return {
'username': 'service account',
'realm_access': {
'roles': [
'system'
]
}
}
def mock_auth(): # pylint: disable=unused-argument; mocks of library methods
return 'test'
monkeypatch.setattr('pay_api.utils.user_context._get_token', mock_auth)
monkeypatch.setattr('pay_api.utils.user_context._get_token_info', token_info)
with pytest.raises(HTTPException) as excinfo:
# Test one of roles
check_auth('CP0001234', one_of_roles=[EDIT_ROLE])
assert excinfo.exception.code == 403
def test_auth_for_client_user_roles(session, public_user_mock):
"""Assert that the auth is working as expected."""
# token = jwt.create_jwt(get_claims(roles=[Role.EDITOR.value]), token_header)
# headers = {'Authorization': 'Bearer ' + token}
# def mock_auth(one, two): # pylint: disable=unused-argument; mocks of library methods
# return headers['Authorization']
# with app.test_request_context():
# monkeypatch.setattr('flask.request.headers.get', mock_auth)
# Test one of roles
check_auth('CP0001234', one_of_roles=[EDIT_ROLE])
# Test contains roles
check_auth('CP0001234', contains_role=EDIT_ROLE)
# Test for exception
with pytest.raises(HTTPException) as excinfo:
check_auth('CP0000000', contains_role=VIEW_ROLE)
assert excinfo.exception.code == 403
with pytest.raises(HTTPException) as excinfo:
check_auth('CP0000000', one_of_roles=[EDIT_ROLE])
assert excinfo.exception.code == 403
| 35.060606 | 91 | 0.682224 | 444 | 3,471 | 5.148649 | 0.317568 | 0.027559 | 0.030621 | 0.059055 | 0.53762 | 0.53762 | 0.506562 | 0.506562 | 0.476815 | 0.404199 | 0 | 0.021892 | 0.223567 | 3,471 | 98 | 92 | 35.418367 | 0.825974 | 0.422645 | 0 | 0.586957 | 0 | 0 | 0.16172 | 0.08086 | 0 | 0 | 0 | 0 | 0.065217 | 1 | 0.152174 | false | 0 | 0.086957 | 0.086957 | 0.326087 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5163521d44aed6af36c7ee8ac7cba90723cfbc26 | 49 | py | Python | api/common.py | ewangplay/wallet-sdk-py | 9f55ffa6bb7968192fd88e2b2dddf04c997874ab | [
"Apache-2.0"
] | 13 | 2018-03-06T10:04:26.000Z | 2021-08-12T11:25:29.000Z | api/common.py | ewangplay/wallet-sdk-py | 9f55ffa6bb7968192fd88e2b2dddf04c997874ab | [
"Apache-2.0"
] | 42 | 2018-02-08T04:32:56.000Z | 2018-09-06T07:30:58.000Z | api/common.py | ewangplay/wallet-sdk-py | 9f55ffa6bb7968192fd88e2b2dddf04c997874ab | [
"Apache-2.0"
] | 15 | 2018-02-11T09:29:31.000Z | 2021-07-04T07:33:22.000Z | from cryption.crypto import sign
VERSION = "v1"
| 12.25 | 32 | 0.755102 | 7 | 49 | 5.285714 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02439 | 0.163265 | 49 | 3 | 33 | 16.333333 | 0.878049 | 0 | 0 | 0 | 0 | 0 | 0.040816 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.5 | 0 | 0.5 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 4 |
5163aaae4b007b773986e024d8629d4d1f39a0b9 | 589 | py | Python | tests/unit/test_subscription.py | eldarion/braintree_python | 8be3f69fb9a4171c5e9be049c8440fcc4f79fb40 | [
"MIT"
] | 3 | 2015-11-05T08:57:12.000Z | 2016-07-17T18:10:55.000Z | tests/unit/test_subscription.py | eldarion/braintree_python | 8be3f69fb9a4171c5e9be049c8440fcc4f79fb40 | [
"MIT"
] | null | null | null | tests/unit/test_subscription.py | eldarion/braintree_python | 8be3f69fb9a4171c5e9be049c8440fcc4f79fb40 | [
"MIT"
] | null | null | null | from tests.test_helper import *
class TestSubscription(unittest.TestCase):
def test_create_raises_exception_with_bad_keys(self):
try:
Subscription.create({"bad_key": "value"})
self.assertTrue(False)
except KeyError, e:
self.assertEquals("'Invalid keys: bad_key'", str(e))
def test_update_raises_exception_with_bad_keys(self):
try:
Subscription.update("id", {"bad_key": "value"})
self.assertTrue(False)
except KeyError, e:
self.assertEquals("'Invalid keys: bad_key'", str(e))
| 34.647059 | 64 | 0.633277 | 68 | 589 | 5.235294 | 0.455882 | 0.067416 | 0.106742 | 0.123596 | 0.713483 | 0.713483 | 0.713483 | 0.713483 | 0.460674 | 0.460674 | 0 | 0 | 0.249576 | 589 | 16 | 65 | 36.8125 | 0.80543 | 0 | 0 | 0.571429 | 0 | 0 | 0.122241 | 0 | 0 | 0 | 0 | 0 | 0.285714 | 0 | null | null | 0 | 0.071429 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 |
5164ffb41b1068e6e8353350ba4bc5a194d1426f | 2,848 | py | Python | testNim.py | PauloHSNeto/Ciencia-de-Computa-o-CourseEra | 230281ba7227348ed2d27bb20039aed223244d94 | [
"bzip2-1.0.6"
] | null | null | null | testNim.py | PauloHSNeto/Ciencia-de-Computa-o-CourseEra | 230281ba7227348ed2d27bb20039aed223244d94 | [
"bzip2-1.0.6"
] | null | null | null | testNim.py | PauloHSNeto/Ciencia-de-Computa-o-CourseEra | 230281ba7227348ed2d27bb20039aed223244d94 | [
"bzip2-1.0.6"
] | null | null | null | computador = 0
usuario = 0
rodada = 0
def computador_escolhe_jogada(n, m):
global computador
n = n - m
if (n == 1):
print(" ")
print("O computador tirou %s peça." % n)
print("Agora restam %s peças no tabuleiro." % n)
print(" ")
if (n == 0):
print ("Fim do jogo! O computador ganhou!")
partida()
else:
print(" ")
print("O computador tirou %s peça." % m)
print("Agora restam %s peças no tabuleiro." % n)
print(" ")
if (n == 0):
print ("Fim do jogo! O computador ganhou!")
partida()
return n
return m
def usuario_escolhe_jogada(n, m):
global usuario
print(" ")
n_user = int(input("Quantas peças você vai tirar? "))
print("Voce tirou %s peças." % n_user)
if (n_user <= m):
n = n - m
print(" ")
print("Agora restam apenas %s peças no tabuleiro." % n)
else:
while (n_user > m):
print("Oops! Jogada inválida! Tente de novo.")
print(" ")
n_user = int(input("Quantas peças você vai tirar? "))
if (n == 0):
print ("Vitoria do usuario")
return n_user
return n
return m
def partida():
global computador
global usuario
global rodada
while(rodada <= 3):
rodada = rodada + 1
if (rodada <= 3 ):
print(" ")
print("**** Rodada %s ****" % rodada)
print(" ")
n = int(input("Quantas peças? "))
m = int(input("Limite de peças por jogada? "))
if (((n )%(m + 1)) == 0):
while (n > 0):
print(" ")
print("Voce começa!")
usuario_escolhe_jogada(n,m)
if n > 0:
n = n - m
computador_escolhe_jogada(n,m)
n = n - m
computador = computador + 1
else:
print(" ")
print("Computador Começa!!")
while( n > 0):
computador_escolhe_jogada(n,m)
computador = computador + 1
n = n - m
if n > 0:
usuario_escolhe_jogada(n,m)
n = n - m
else:
print("**** Final do campeonato! ****")
print(" ")
print("Fim de Campeonato: Computador %s x 0 Usuario " % computador)
break
print("Bem-vindo ao jogo do NIM! Escolha:")
print(" ")
print("1 - para jogar uma partida isolada ")
tipo_jogo = int(input("2 - para jogar um campeonato "))
print(" ")
if ( tipo_jogo == 1 ):
print("Voce escolheu partida isolada!")
if ( tipo_jogo == 2):
print("Voce escolheu um campeonato!")
partida()
else:
pass
| 29.061224 | 79 | 0.466643 | 323 | 2,848 | 4.049536 | 0.20743 | 0.019878 | 0.06422 | 0.068807 | 0.441896 | 0.262997 | 0.262997 | 0.188073 | 0.188073 | 0.188073 | 0 | 0.013642 | 0.408006 | 2,848 | 97 | 80 | 29.360825 | 0.762159 | 0 | 0 | 0.597826 | 0 | 0 | 0.247542 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032609 | false | 0.01087 | 0 | 0 | 0.086957 | 0.358696 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
516516f68aa7cecc66414e1c2f63b93bd749ec5c | 59 | py | Python | example_snippets/multimenus_snippets/NewSnippets/SymPy/Manipulating expressions/Miscellaneous/Simplify Bessel functions.py | kuanpern/jupyterlab-snippets-multimenus | 477f51cfdbad7409eab45abe53cf774cd70f380c | [
"BSD-3-Clause"
] | null | null | null | example_snippets/multimenus_snippets/NewSnippets/SymPy/Manipulating expressions/Miscellaneous/Simplify Bessel functions.py | kuanpern/jupyterlab-snippets-multimenus | 477f51cfdbad7409eab45abe53cf774cd70f380c | [
"BSD-3-Clause"
] | null | null | null | example_snippets/multimenus_snippets/NewSnippets/SymPy/Manipulating expressions/Miscellaneous/Simplify Bessel functions.py | kuanpern/jupyterlab-snippets-multimenus | 477f51cfdbad7409eab45abe53cf774cd70f380c | [
"BSD-3-Clause"
] | 1 | 2021-02-04T04:51:48.000Z | 2021-02-04T04:51:48.000Z | expr = besselj(x, z*polar_lift(-1))
expr = besselsimp(expr) | 29.5 | 35 | 0.711864 | 10 | 59 | 4.1 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018868 | 0.101695 | 59 | 2 | 36 | 29.5 | 0.754717 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
516615c0dc774d664f76be40cc3b724ef7f05aa9 | 15,206 | py | Python | mbuild/formats/hoomd_simulation.py | dcardenasv/mbuild | 20c13f6bb66c6b023b07d7a2b2e4ad0a5073d727 | [
"MIT"
] | null | null | null | mbuild/formats/hoomd_simulation.py | dcardenasv/mbuild | 20c13f6bb66c6b023b07d7a2b2e4ad0a5073d727 | [
"MIT"
] | null | null | null | mbuild/formats/hoomd_simulation.py | dcardenasv/mbuild | 20c13f6bb66c6b023b07d7a2b2e4ad0a5073d727 | [
"MIT"
] | null | null | null | import warnings
import itertools
import numpy as np
import operator
from collections import namedtuple
import parmed as pmd
import mbuild as mb
from mbuild.utils.sorting import natural_sort
from mbuild.utils.io import import_
from mbuild.utils.conversion import RB_to_OPLS
from .hoomd_snapshot import to_hoomdsnapshot
hoomd = import_("hoomd")
hoomd.md = import_("hoomd.md")
hoomd.md.pair = import_("hoomd.md.pair")
hoomd.md.special_pair = import_("hoomd.md.special_pair")
hoomd.md.charge = import_("hoomd.md.charge")
hoomd.md.bond = import_("hoomd.md.bond")
hoomd.md.angle = import_("hoomd.md.angle")
hoomd.md.dihedral = import_("hoomd.md.dihedral")
hoomd.group = import_("hoomd.group")
def create_hoomd_simulation(structure, ref_distance=1.0, ref_mass=1.0,
ref_energy=1.0, r_cut=1.2, auto_scale=False,
snapshot_kwargs={},
pppm_kwargs={'Nx':8, 'Ny':8, 'Nz':8, 'order':4}):
""" Convert a parametrized pmd.Structure to hoomd.SimulationContext
Parameters
----------
structure : parmed.Structure
ParmEd Structure object
ref_distance : float, optional, default=1.0
Reference distance for conversion to reduced units
ref_mass : float, optional, default=1.0
Reference mass for conversion to reduced units
ref_energy : float, optional, default=1.0
Reference energy for conversion to reduced units
r_cut : float, optional, default 1.2
Cutoff radius, in reduced units
auto_scale : bool, optional, default=False
Automatically use largest sigma value as ref_distance,
largest mass value as ref_mass
and largest epsilon value as ref_energy
snapshot_kwargs : dict
Kwargs to pass to to_hoomdsnapshot
pppm_kwargs : dict
Kwargs to pass to hoomd's pppm function
Returns
------
hoomd_objects : list
List of hoomd objects created during conversion
ReferenceValues : namedtuple
Values used in scaling
Notes
-----
While the hoomd objects are returned, the
hoomd.SimulationContext is accessible via `hoomd.context.current`.
If you pass a non-parametrized pmd.Structure, you will not have
angle, dihedral, or force field information. You may be better off
creating a hoomd.Snapshot
Reference units should be expected to convert parmed Structure units :
angstroms, kcal/mol, and daltons
"""
if isinstance(structure, mb.Compound):
raise ValueError("You passed mb.Compound to create_hoomd_simulation, " +
"there will be no angles, dihedrals, or force field parameters. " +
"Please use " +
"hoomd_snapshot.to_hoomdsnapshot to create a hoomd.Snapshot, " +
"then create your own hoomd context " +
"and pass your hoomd.Snapshot " +
"to hoomd.init.read_snapshot()")
elif not isinstance(structure, pmd.Structure):
raise ValueError("Please pass a parmed.Structure to " +
"create_hoomd_simulation")
_check_hoomd_version()
version_numbers = _check_hoomd_version()
if float(version_numbers[0]) >= 3:
warnings.warn("Warning when using Hoomd 3, potential API change " +
"where the hoomd context is not updated upon " +
"creation of forces - utilize " +
"the returned `hoomd_objects`")
hoomd_objects = [] # Potential adaptation for Hoomd v3 API
if auto_scale:
ref_mass = max([atom.mass for atom in structure.atoms])
pair_coeffs = list(set((atom.type,
atom.epsilon,
atom.sigma) for atom in structure.atoms))
ref_energy = max(pair_coeffs, key=operator.itemgetter(1))[1]
ref_distance = max(pair_coeffs, key=operator.itemgetter(2))[2]
ReferenceValues = namedtuple("ref_values", ["distance", "mass", "energy"])
ref_values = ReferenceValues(ref_distance, ref_mass, ref_energy)
if not hoomd.context.current:
hoomd.context.initialize("")
snapshot,_ = to_hoomdsnapshot(structure, ref_distance=ref_distance,
ref_mass=ref_mass, ref_energy=ref_energy, **snapshot_kwargs)
hoomd_objects.append(snapshot)
hoomd.init.read_snapshot(snapshot)
nl = hoomd.md.nlist.cell()
nl.reset_exclusions(exclusions=['1-2', '1-3'])
hoomd_objects.append(nl)
if structure.atoms[0].type != '':
print("Processing LJ and QQ")
lj = _init_hoomd_lj(structure, nl, r_cut=r_cut,
ref_distance=ref_distance, ref_energy=ref_energy)
qq = _init_hoomd_qq(structure, nl, r_cut=r_cut, **pppm_kwargs)
hoomd_objects.append(lj)
hoomd_objects.append(qq)
if structure.adjusts:
print("Processing 1-4 interactions, adjusting neighborlist exclusions")
lj_14, qq_14 = _init_hoomd_14_pairs(structure, nl,
ref_distance=ref_distance, ref_energy=ref_energy)
hoomd_objects.append(lj_14)
hoomd_objects.append(qq_14)
if structure.bond_types:
print("Processing harmonic bonds")
harmonic_bond = _init_hoomd_bonds(structure,
ref_distance=ref_distance, ref_energy=ref_energy)
hoomd_objects.append(harmonic_bond)
if structure.angle_types:
print("Processing harmonic angles")
harmonic_angle = _init_hoomd_angles(structure,
ref_energy=ref_energy)
hoomd_objects.append(harmonic_angle)
if structure.dihedral_types:
print("Processing periodic torsions")
periodic_torsions = _init_hoomd_dihedrals(structure,
ref_energy=ref_energy)
hoomd_objects.append(periodic_torsions)
if structure.rb_torsion_types:
print("Processing RB torsions")
rb_torsions = _init_hoomd_rb_torsions(structure,
ref_energy=ref_energy)
hoomd_objects.append(rb_torsions)
print("HOOMD SimulationContext updated from ParmEd Structure")
return hoomd_objects, ref_values
def _init_hoomd_lj(structure, nl, r_cut=1.2,
ref_distance=1.0, ref_energy=1.0):
""" LJ parameters """
# Identify the unique atom types before setting
atom_type_params = {}
for atom in structure.atoms:
if atom.type not in atom_type_params:
atom_type_params[atom.type] = atom.atom_type
# Set the hoomd parameters for self-interactions
lj = hoomd.md.pair.lj(r_cut, nl)
for name, atom_type in atom_type_params.items():
lj.pair_coeff.set(name, name,
sigma=atom_type.sigma/ref_distance,
epsilon=atom_type.epsilon/ref_energy)
# Cross interactions, mixing rules, NBfixes
all_atomtypes = sorted(atom_type_params.keys())
for a1, a2 in itertools.combinations_with_replacement(all_atomtypes, 2):
nb_fix_info = atom_type_params[a1].nbfix.get(a2, None)
# nb_fix_info = (rmin, eps, rmin14, eps14)
if nb_fix_info is None:
# No nbfix means use mixing rule to find cross-interaction
if structure.combining_rule == 'lorentz':
sigma = ((atom_type_params[a1].sigma + atom_type_params[a2].sigma)
/ (2 * ref_distance))
epsilon = ((atom_type_params[a1].epsilon *
atom_type_params[a2].epsilon) /
ref_energy**2)**0.5
elif structure.combining_rule == 'geometric':
sigma = ((atom_type_params[a1].sigma *
atom_type_params[a2].sigma) /
ref_distance**2)**0.5
epsilon = ((atom_type_params[a1].epsilon *
atom_type_params[a2].epsilon) /
ref_energy**2)**0.5
else:
raise ValueError(
"Mixing rule {} ".format(structure.combining_rule) +
"not supported, use lorentz")
else:
# If we have nbfix info, use it
sigma = nb_fix_info[0] / (ref_distance*(2 ** (1/6)))
epsilon = nb_fix_info[1] / ref_energy
lj.pair_coeff.set(a1, a2, sigma=sigma, epsilon=epsilon)
return lj
def _init_hoomd_qq(structure, nl, Nx=1, Ny=1, Nz=1, order=4, r_cut=1.2):
""" Charge interactions """
charged = hoomd.group.charged()
if len(charged) == 0:
print("No charged groups found, ignoring electrostatics")
return None
else:
qq = hoomd.md.charge.pppm(charged, nl)
qq.set_params(Nx, Ny, Nz, order, r_cut)
return qq
def _init_hoomd_14_pairs(structure, nl, r_cut=1.2, ref_distance=1.0, ref_energy=1.0):
"""Special_pairs to handle 14 scalings
See discussion: https://groups.google.com/forum/
#!topic/hoomd-users/iZ9WCpHczg0 """
# Update neighborlist to exclude 1-4 interactions,
# but impose a special_pair force to handle these pairs
nl.exclusions.append('1-4')
if hoomd.context.current.system_definition.getPairData().getN() == 0:
print("No 1,4 pairs found in hoomd snapshot")
return None, None
lj_14 = hoomd.md.special_pair.lj()
qq_14 = hoomd.md.special_pair.coulomb()
params_14 = {}
# Identify unique 14 scalings
for adjust in structure.adjusts:
t1 = adjust.atom1.type
t2 = adjust.atom2.type
ps = '-'.join(sorted([t1, t2]))
if ps not in params_14:
params_14[ps] = adjust.type
for name, adjust_type in params_14.items():
lj_14.pair_coeff.set(name,
sigma=adjust_type.sigma/ref_distance,
# The adjust epsilon alreayd carries the scaling
epsilon=adjust_type.epsilon/ref_energy,
# Do NOT use hoomd's alpha to modify any LJ terms
alpha=1,
r_cut=r_cut)
qq_14.pair_coeff.set(name,
alpha=adjust_type.chgscale,
r_cut=r_cut)
return lj_14, qq_14
def _init_hoomd_bonds(structure, ref_distance=1.0, ref_energy=1.0):
""" Harmonic bonds """
# Identify the unique bond types before setting
bond_type_params = {}
for bond in structure.bonds:
t1, t2 = bond.atom1.type, bond.atom2.type
t1, t2 = sorted([t1, t2], key=natural_sort)
if t1 != '' and t2 != '':
bond_type = ('-'.join((t1, t2)))
if bond_type not in bond_type_params:
bond_type_params[bond_type] = bond.type
# Set the hoomd parameters
harmonic_bond = hoomd.md.bond.harmonic()
for name, bond_type in bond_type_params.items():
# A (paramerized) parmed structure with no bondtype
# is because of constraints
if bond_type is None:
print("Bond with no bondtype detected, setting coefficients to 0")
harmonic_bond.bond_coeff.set(name,
k=0, r0=0)
else:
harmonic_bond.bond_coeff.set(name,
k=2 * bond_type.k * ref_distance**2 / ref_energy,
r0=bond_type.req / ref_distance)
return harmonic_bond
def _init_hoomd_angles(structure, ref_energy=1.0):
""" Harmonic angles """
# Identify the unique angle types before setting
angle_type_params = {}
for angle in structure.angles:
t1, t2, t3 = angle.atom1.type, angle.atom2.type, angle.atom3.type
t1, t3 = sorted([t1, t3], key=natural_sort)
angle_type = ('-'.join((t1, t2, t3)))
if angle_type not in angle_type_params:
angle_type_params[angle_type] = angle.type
# set the hoomd parameters
harmonic_angle = hoomd.md.angle.harmonic()
for name, angle_type in angle_type_params.items():
harmonic_angle.angle_coeff.set(name,
t0=np.deg2rad(angle_type.theteq),
k=2 * angle_type.k / ref_energy)
return harmonic_angle
def _init_hoomd_dihedrals(structure, ref_energy=1.0):
""" Periodic dihedrals (dubbed harmonic dihedrals in HOOMD) """
# Identify the unique dihedral types before setting
# need Hoomd 2.8.0 to use proper dihedral implemtnation
# from this PR https://github.com/glotzerlab/hoomd-blue/pull/492
version_numbers = _check_hoomd_version()
if float(version_numbers[0]) < 2 or float(version_numbers[1]) < 8:
from mbuild.exceptions import MBuildError
raise MBuildError("Please upgrade Hoomd to at least 2.8.0")
dihedral_type_params = {}
for dihedral in structure.dihedrals:
t1, t2 = dihedral.atom1.type, dihedral.atom2.type
t3, t4 = dihedral.atom3.type, dihedral.atom4.type
if [t2, t3] == sorted([t2, t3], key=natural_sort):
dihedral_type = ('-'.join((t1, t2, t3, t4)))
else:
dihedral_type = ('-'.join((t4, t3, t2, t1)))
if dihedral_type not in dihedral_type_params:
if isinstance(dihedral.type, pmd.DihedralType):
dihedral_type_params[dihedral_type] = dihedral.type
elif isinstance(dihedral.type, pmd.DihedralTypeList):
if len(dihedral.type) > 1:
warnings.warn("Multiple dihedral types detected" +
" for single dihedral, will ignore all except " +
" first diheral type")
dihedral_type_params[dihedral_type] = dihedral.type[0]
# Set the hoomd parameters
periodic_torsion = hoomd.md.dihedral.harmonic() # These are periodic torsions
for name, dihedral_type in dihedral_type_params.items():
periodic_torsion.dihedral_coeff.set(name,
k=2*dihedral_type.phi_k / ref_energy,
d=1,
n=dihedral_type.per,
phi_0=np.deg2rad(dihedral_type.phase))
return periodic_torsion
def _init_hoomd_rb_torsions(structure, ref_energy=1.0):
""" RB dihedrals (implemented as OPLS dihedrals in HOOMD) """
# Identify the unique dihedral types before setting
dihedral_type_params = {}
for dihedral in structure.rb_torsions:
t1, t2 = dihedral.atom1.type, dihedral.atom2.type
t3, t4 = dihedral.atom3.type, dihedral.atom4.type
if [t2, t3] == sorted([t2, t3], key=natural_sort):
dihedral_type = ('-'.join((t1, t2, t3, t4)))
else:
dihedral_type = ('-'.join((t4, t3, t2, t1)))
if dihedral_type not in dihedral_type_params:
dihedral_type_params[dihedral_type] = dihedral.type
# Set the hoomd parameter
rb_torsion = hoomd.md.dihedral.opls()
for name, dihedral_type in dihedral_type_params.items():
F_coeffs = RB_to_OPLS(dihedral_type.c0 / ref_energy,
dihedral_type.c1 / ref_energy,
dihedral_type.c2 / ref_energy,
dihedral_type.c3 / ref_energy,
dihedral_type.c4 / ref_energy,
dihedral_type.c5 / ref_energy)
rb_torsion.dihedral_coeff.set(name, k1=F_coeffs[0],
k2=F_coeffs[1], k3=F_coeffs[2], k4=F_coeffs[3])
return rb_torsion
def _check_hoomd_version():
version = hoomd.__version__
version_numbers = version.split('.')
return version_numbers
| 40.657754 | 85 | 0.636262 | 1,963 | 15,206 | 4.723892 | 0.173714 | 0.037852 | 0.021137 | 0.008304 | 0.303677 | 0.249865 | 0.182897 | 0.144398 | 0.121212 | 0.111722 | 0 | 0.023191 | 0.268381 | 15,206 | 373 | 86 | 40.766756 | 0.810337 | 0.176378 | 0 | 0.164063 | 0 | 0 | 0.099699 | 0.010165 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035156 | false | 0.011719 | 0.082031 | 0 | 0.160156 | 0.039063 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
516648ad34a16a2a40620bcd9fd2d7782735d4c0 | 274 | py | Python | Utility.py | YiFanChen99/PyQtComponent | fdf83d6e3f8a448acce866db85ce418ce0df121e | [
"MIT"
] | null | null | null | Utility.py | YiFanChen99/PyQtComponent | fdf83d6e3f8a448acce866db85ce418ce0df121e | [
"MIT"
] | null | null | null | Utility.py | YiFanChen99/PyQtComponent | fdf83d6e3f8a448acce866db85ce418ce0df121e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from PyQt5.QtWidgets import *
def launch_application(creator):
def wrapper():
import sys
app = QApplication(sys.argv)
widget = creator()
widget.show()
app.exec_()
return wrapper
| 19.571429 | 36 | 0.591241 | 31 | 274 | 5.16129 | 0.774194 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010152 | 0.281022 | 274 | 13 | 37 | 21.076923 | 0.80203 | 0.153285 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.222222 | false | 0 | 0.222222 | 0 | 0.555556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 2 |
51666c4b9d618cca914f038ac201e94c03f87f6e | 34,907 | py | Python | post_optimization_studies/mad_analyses/four_cuts_eff_flow_chart/Output/Histos/MadAnalysis5job_0/selection_4.py | sheride/axion_pheno | 7d3fc08f5ae5b17a3500eba19a2e43f87f076ce5 | [
"MIT"
] | null | null | null | post_optimization_studies/mad_analyses/four_cuts_eff_flow_chart/Output/Histos/MadAnalysis5job_0/selection_4.py | sheride/axion_pheno | 7d3fc08f5ae5b17a3500eba19a2e43f87f076ce5 | [
"MIT"
] | null | null | null | post_optimization_studies/mad_analyses/four_cuts_eff_flow_chart/Output/Histos/MadAnalysis5job_0/selection_4.py | sheride/axion_pheno | 7d3fc08f5ae5b17a3500eba19a2e43f87f076ce5 | [
"MIT"
] | null | null | null | def selection_4():
# Library import
import numpy
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
# Library version
matplotlib_version = matplotlib.__version__
numpy_version = numpy.__version__
# Histo binning
xBinning = numpy.linspace(-8.0,8.0,161,endpoint=True)
# Creating data sequence: middle of each bin
xData = numpy.array([-7.95,-7.85,-7.75,-7.65,-7.55,-7.45,-7.35,-7.25,-7.15,-7.05,-6.95,-6.85,-6.75,-6.65,-6.55,-6.45,-6.35,-6.25,-6.15,-6.05,-5.95,-5.85,-5.75,-5.65,-5.55,-5.45,-5.35,-5.25,-5.15,-5.05,-4.95,-4.85,-4.75,-4.65,-4.55,-4.45,-4.35,-4.25,-4.15,-4.05,-3.95,-3.85,-3.75,-3.65,-3.55,-3.45,-3.35,-3.25,-3.15,-3.05,-2.95,-2.85,-2.75,-2.65,-2.55,-2.45,-2.35,-2.25,-2.15,-2.05,-1.95,-1.85,-1.75,-1.65,-1.55,-1.45,-1.35,-1.25,-1.15,-1.05,-0.95,-0.85,-0.75,-0.65,-0.55,-0.45,-0.35,-0.25,-0.15,-0.05,0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75,0.85,0.95,1.05,1.15,1.25,1.35,1.45,1.55,1.65,1.75,1.85,1.95,2.05,2.15,2.25,2.35,2.45,2.55,2.65,2.75,2.85,2.95,3.05,3.15,3.25,3.35,3.45,3.55,3.65,3.75,3.85,3.95,4.05,4.15,4.25,4.35,4.45,4.55,4.65,4.75,4.85,4.95,5.05,5.15,5.25,5.35,5.45,5.55,5.65,5.75,5.85,5.95,6.05,6.15,6.25,6.35,6.45,6.55,6.65,6.75,6.85,6.95,7.05,7.15,7.25,7.35,7.45,7.55,7.65,7.75,7.85,7.95])
# Creating weights for histo: y5_ETA_0
y5_ETA_0_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.93650155414,2.16577055928,2.86585876426,3.2752672163,3.82387435003,4.39295226636,5.03572372006,5.70305915288,6.52597045348,7.36934973668,8.43790882849,9.65385179504,10.7346868764,11.6640460865,13.5022885242,14.5462796369,14.8819953516,16.2903621546,16.7284257823,17.4162331977,17.174681403,17.7683248984,17.7069129506,17.3384452638,17.0764254865,15.6557786939,15.2299910558,13.7847802841,12.7407891714,11.2996703962,9.60881583332,8.6794566232,7.11142195589,5.67030718072,4.68772801583,3.77883958831,2.8331059921,2.13711178364,1.64991579771,1.08902627442,0.888416044921,0.65505344326,0.405314455517,0.302962222508,0.159669264295,0.131010688652,0.0614112678056,0.0163763340815,0.0286585876426,0.00409408452037,0.00409408452037,0.0122822535611,0.065505344326,0.0736934973668,0.10644618953,0.180139686896,0.282491799906,0.454443213762,0.749217363229,0.822910900595,1.07674428486,1.54346948818,2.12892379059,2.70618969997,3.63964050661,4.68772801583,5.76856309721,7.32431777495,8.39696486329,9.67022778112,11.5371301944,12.6097772828,13.8789442041,14.9556872889,16.2330422033,16.6301698658,17.5267731037,17.891148794,17.9320887592,17.8543008254,17.6250330202,16.585133904,15.979210419,15.3159669827,14.2105639222,12.8513290775,12.1389576829,10.7060309008,9.67431977765,8.58938869975,7.6559374931,6.73476627602,5.75628310765,4.98659576181,4.66725603323,3.78702758135,3.32849037106,2.87814115382,2.10845340799,2.04294786367,0.00409408452037,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y5_ETA_1
y5_ETA_1_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0121240822392,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0121753353338,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0121313846429,0.012170493784,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y5_ETA_2
y5_ETA_2_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0200482816269,0.010032919325,0.0200940397991,0.0100262832744,0.010040728874,0.0100696696577,0.0100271592661,0.0301165257319,0.0401877986198,0.0,0.0502017229729,0.010040728874,0.0,0.0301145712787,0.0402058597513,0.0,0.0100568562125,0.0401512714171,0.0,0.0301337191358,0.0100702894631,0.0,0.050140155629,0.0300994521571,0.0100355638284,0.0100184158769,0.0401671012489,0.0100262832744,0.0301196784758,0.0,0.0,0.0100367001384,0.0100153623019,0.0,0.0,0.0,0.0100369728528,0.0100609841169,0.0,0.0,0.0100568562125,0.0100602899348,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0100340928234,0.0100568562125,0.0,0.0,0.0100532489446,0.0200646444914,0.0301498340782,0.0200868128673,0.0301214387233,0.0301310828965,0.010045943504,0.0200609297906,0.0200572522781,0.0,0.0301088194839,0.010045943504,0.0,0.0301025759767,0.0200638924608,0.0201178403294,0.0300965762597,0.0100299566548,0.0200777058588,0.0100187051194,0.0301077410223,0.0301692711779,0.0100355638284,0.0100324441408,0.0200798255935,0.0,0.0100369728528,0.0200832055994,0.0,0.0,0.0100696696577,0.0,0.0100187051194,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y5_ETA_3
y5_ETA_3_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0275282721771,0.0274997813219,0.00549710596009,0.0164555265735,0.0110053636951,0.0770293110214,0.0440010037659,0.0550185105048,0.0330098306536,0.0549834910877,0.0880414145324,0.121011383177,0.0934765336867,0.0880213454001,0.0990525836506,0.148391326591,0.170522298147,0.115509109618,0.0880932530198,0.142979039121,0.143099738296,0.137527304026,0.142956573069,0.132021455399,0.131943372662,0.0934995278747,0.0934989997397,0.0935291846896,0.104510168858,0.0825596976154,0.071452814174,0.0495039679626,0.0549952725621,0.0274769699499,0.0275034010784,0.0110008136084,0.0220214688448,0.0275283574913,0.0,0.0110360564673,0.021979583672,0.0219691225352,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0110095197118,0.0,0.00550243199904,0.00551802823363,0.00547487959922,0.0220146884032,0.0275094786942,0.0385276029448,0.0384989861497,0.0220041419523,0.0495313903599,0.0165169689939,0.0550053883798,0.0825226875355,0.104575210722,0.126506425347,0.066013876197,0.104442608196,0.126527185118,0.0990133797788,0.170543342297,0.0825431629255,0.181466759744,0.1538988815,0.20895098952,0.132032830616,0.115526172443,0.0824931525979,0.126454668112,0.104502043703,0.08245756442,0.0825350783965,0.0990397052803,0.0770264265915,0.0385428294849,0.0935220345535,0.0329914596786,0.043969193785,0.0494868645118,0.0274664559996,0.0164761360286,0.0165097701068,0.0110073624832,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y5_ETA_4
y5_ETA_4_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00295952755959,0.00197733276196,0.00197153597409,0.00295929588047,0.00592188256639,0.00888223262692,0.00986622754723,0.0207193880908,0.0266393306452,0.0276408337728,0.028623718397,0.0355146850478,0.0365133262569,0.0562514891552,0.0592211905545,0.0720236262363,0.0769833630669,0.0878251039945,0.100658684083,0.116441122468,0.105610244004,0.12136374254,0.104612220072,0.122368861145,0.107579035503,0.124337372063,0.114461228008,0.0977014885466,0.0799371916409,0.0838926916906,0.0818885871641,0.0473736095304,0.0503348935223,0.0305824370646,0.0296045868517,0.0236809646878,0.025663292179,0.01382357141,0.0108591409111,0.00690936069003,0.0049344204759,0.00296236222172,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00197140370055,0.00197179531041,0.000988009622793,0.00394563925743,0.00493570713674,0.00987045228407,0.00888712273977,0.020722578689,0.0157902465318,0.0246790126701,0.033550310684,0.0444057519442,0.0463809967883,0.0631546850964,0.0799404383551,0.08090190669,0.08585110172,0.103606500224,0.0976781202204,0.0977050960069,0.115473882196,0.132224963753,0.1322358663,0.117436981924,0.104620837893,0.107570056935,0.0986854033011,0.0976784008007,0.0799142241437,0.075986220958,0.0641623289231,0.0661257893969,0.0592049169003,0.0414675156152,0.0375125005685,0.0266518204744,0.0197401189434,0.0167707462239,0.0128311630906,0.0118483786737,0.00395132701983,0.00987041220117,0.00296029153951,0.00395140718562,0.000988172359335,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y5_ETA_5
y5_ETA_5_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000504690405707,0.000503681322579,0.00176459470884,0.000504669999982,0.000755187890129,0.00201616849525,0.00327598677425,0.00529427346382,0.0068047412699,0.00630216825829,0.00806693941664,0.010840665667,0.01310830592,0.0178963013326,0.0183999386429,0.0226806437203,0.025963360783,0.0310093045706,0.035792138535,0.0400765646566,0.0519272497141,0.0415845077543,0.0476444880596,0.0441119769099,0.050664615445,0.0526808211507,0.0473877760315,0.0398268505919,0.0317628720061,0.0352897295695,0.0277249790567,0.0264671781437,0.01915871554,0.0146224307521,0.00958156839025,0.0110892754217,0.00579553410721,0.00529300110682,0.0035290593812,0.00201740124113,0.00075590049007,0.000252358446569,0.0,0.000252130822703,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000251770961733,0.000252017871011,0.0,0.0012607777482,0.00100783957898,0.00226974524365,0.0032772871391,0.00453781321669,0.011097473722,0.0110881991197,0.0126025360114,0.0186502288691,0.0219320576825,0.0284887093436,0.0340287397618,0.0400856872162,0.0451133778823,0.0463772524995,0.0509192869005,0.0516829011549,0.0499042020887,0.0499201265568,0.0456294026682,0.0410958106359,0.0451322231699,0.039827538785,0.0239494277149,0.0272195212356,0.0229396403894,0.0196642173734,0.0146193658922,0.012354134315,0.00907918743253,0.00730944287875,0.00453884950745,0.00478865559789,0.00403269148997,0.00352844480876,0.00252244454586,0.00252372690566,0.00100823288933,0.00126131029763,0.000251614677883,0.000756485454199,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y5_ETA_6
y5_ETA_6_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000283957625274,0.000287128707592,0.000572547709956,0.000861351732727,0.000858235034671,0.00142977823532,0.000286450303915,0.00143123581351,0.000859256738999,0.00314945657622,0.00143529663835,0.00429543394091,0.00544119536808,0.00543670366794,0.00687030779608,0.00572291084221,0.00886473662672,0.0131701286858,0.00973534068191,0.0134554561147,0.0177613000431,0.0174690846075,0.0151850655816,0.0186052458129,0.0208887349921,0.0174626364735,0.0197556129072,0.0174692045728,0.0154570468727,0.0123066885575,0.010887799171,0.00944630632579,0.00601141435195,0.00457853701348,0.00315233774244,0.00228783792074,0.0017190425249,0.000574099460894,0.00085594249811,0.000286764712928,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00085828172116,0.00257774402458,0.00229116695735,0.00515367657338,0.00314700028705,0.00745015599335,0.0103183139748,0.0122993706752,0.0143236348717,0.0151823563656,0.0186132135071,0.0191729715183,0.0237481674986,0.0226402381232,0.0217478463732,0.0211806505145,0.0189065386215,0.0180207549592,0.0117413421635,0.0131746173868,0.0146080025783,0.00887664717991,0.00715153841053,0.00629180320999,0.00514740738762,0.00373118123045,0.0042993318129,0.00286459701196,0.00228898858775,0.00114435383452,0.00257892468291,0.00114002408751,0.00200925553992,0.0,0.000568809991618,0.00057289490948,0.000571732745799,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y5_ETA_7
y5_ETA_7_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2.1617996646e-05,8.63041639912e-05,0.000129543836942,0.000129532437545,0.0,0.00021618093038,8.64047469058e-05,0.000237509621227,0.000129647940258,0.000322270298485,0.000410490734636,0.000430353219956,0.000626435839991,0.000475241698333,0.000691605605799,0.000819277594701,0.000970180465002,0.00112314528505,0.00105669727804,0.00114471529107,0.00118825302473,0.0014903026789,0.0017929520587,0.00114463650112,0.00161964099142,0.00144665053218,0.00170707143259,0.00164166529694,0.00123133310612,0.00123126898451,0.000928988408734,0.000496921088276,0.000366997383194,0.000345373062397,0.000172778396927,2.15827549073e-05,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2.15983662139e-05,0.0,0.0,6.48950068125e-05,0.000107900405391,0.000280954818599,0.0004105766073,0.000496826372698,0.00094881791124,0.00116613190815,0.00172641059339,0.00185748940943,0.00190084693203,0.00185734859335,0.00172633389892,0.002138216743,0.00196600957321,0.00140282697541,0.00108005933632,0.000926656561499,0.00125301165787,0.000777599387679,0.00108001868406,0.00099358569012,0.000907322010752,0.000712771184686,0.000691494964593,0.000496688909381,0.00010813861926,0.000280705372971,0.000302205809145,0.000365607872139,0.000194619348176,0.000172678819842,0.000151298580251,0.000108110372225,0.000108019428507,8.64090216796e-05,4.32315844076e-05,4.32170417946e-05,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y5_ETA_8
y5_ETA_8_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2.84292642893e-05,0.0,2.83973993922e-05,2.83973993922e-05,5.66030748554e-05,2.84292642893e-05,0.000141723376559,0.000113644061894,0.000112773137535,0.000198503757442,0.000141120452911,8.50868331672e-05,0.000142062857986,0.000225435534901,0.000111915784712,0.000255442735234,0.000113583286953,5.67183438192e-05,0.000198632048547,0.000170429981271,0.000198798351831,0.000198741185077,0.000142022856106,0.000113357649033,0.000113596531822,0.0,2.83973993922e-05,2.83973993922e-05,2.83498693196e-05,5.64019963668e-05,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,5.67658887402e-05,2.83684893481e-05,0.000142025796111,5.68978177294e-05,8.45111565312e-05,0.000141783943621,0.000113497477428,0.000198740145682,0.000141909131387,0.000198819139742,0.000142041461286,0.000113259010398,0.000113446992503,2.84292642893e-05,5.51480250594e-05,5.65914781711e-05,0.000113545022349,0.000113683677712,8.51264489853e-05,0.000170163302076,2.84080903176e-05,2.83973993922e-05,0.000113642859165,8.52862783201e-05,5.6878173154e-05,2.84489088647e-05,0.0,8.51961775765e-05,0.0,0.0,2.83684893481e-05,2.84489088647e-05,0.0,2.83684893481e-05,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y5_ETA_9
y5_ETA_9_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y5_ETA_10
y5_ETA_10_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0521138287,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0529581672,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.05462838872,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y5_ETA_11
y5_ETA_11_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.230673161153,0.0,0.0,0.0,0.0,0.691026068854,0.230619670779,0.690578010125,0.460783443723,0.229982512821,0.230364746113,0.0,0.229982512821,0.921962685466,0.461195765349,0.691490266921,0.0,0.460080998305,0.461188848491,0.690561870788,0.230752243903,0.461033219172,0.230597152562,0.230428265931,0.229982512821,0.459723627277,0.230360173301,0.230020171273,0.0,0.0,0.0,0.0,0.0,0.0,0.229952462913,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.230020171273,0.0,0.0,0.0,0.690728643934,0.0,0.229982512821,0.230465309552,0.0,0.0,0.0,0.0,0.921987663011,0.691353466828,0.690780136104,0.0,0.691670105244,0.0,0.0,1.15198896663,0.461138893401,0.230020171273,0.0,0.460124420806,0.0,0.230551270733,0.230752243903,0.229932019753,0.230752243903,0.230587737949,0.460570173916,0.0,0.459889631883,0.0,0.690224097526,0.0,0.230619670779,0.230360173301,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y5_ETA_12
y5_ETA_12_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0554467039881,0.0831726910094,0.0276908199867,0.0277244637063,0.0277261678187,0.138459100715,0.110763733864,0.110696415651,0.138481027219,0.0553453804604,0.110839668808,0.0276896813472,0.249341468504,0.304489895107,0.193882570303,0.165968553895,0.415543020352,0.221455956552,0.110817588434,0.41551493904,0.24918529025,0.138688482718,0.415323370639,0.332468383129,0.304440925915,0.30428051623,0.193931154819,0.193743817848,0.138552153719,0.33202835282,0.110822973891,0.110800931985,0.221591477732,0.0553593057136,0.0830496717832,0.027763192836,0.0276953706979,0.0,0.0830522875767,0.0276873271331,0.0,0.0,0.0,0.0,0.0276896813472,0.0276896813472,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.027763192836,0.0,0.0554193535598,0.027603817928,0.0554837482391,0.0277263947773,0.0276586841951,0.0831603813932,0.0553866946095,0.110896946991,0.138389897592,0.0830404011036,0.276814223641,0.221656410957,0.276896121056,0.193704811752,0.193765744352,0.193745241148,0.304459505742,0.304710891184,0.221625021436,0.193771283679,0.166210553255,0.193715351861,0.221506118237,0.332359866169,0.193744356394,0.249216833641,0.19377143755,0.221591593134,0.1384076696,0.110700454743,0.166063030199,0.166173124328,0.0831490719334,0.166071069917,0.0554810939782,0.0830438247156,0.0276586841951,0.0276929395487,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y5_ETA_13
y5_ETA_13_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0100450209578,0.0,0.0200719202967,0.0302570227044,0.0201730639343,0.0807196141628,0.040348427967,0.0302623268889,0.0604818076191,0.0504090944485,0.0403768120299,0.0504226704906,0.0504235808197,0.0403129979564,0.0706525268263,0.0706297079089,0.120951914474,0.0705543933437,0.0604464807792,0.0605190340126,0.0907542330928,0.0302540671691,0.13107884078,0.0302726621592,0.0604138485135,0.0604985091246,0.050332353701,0.0604686624662,0.0301965950553,0.0,0.0302875915573,0.0302954446635,0.0605230030477,0.0100953560911,0.040347511569,0.0,0.0100921881456,0.0100996953267,0.0201665945285,0.0100953560911,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0201987048721,0.0201875442367,0.0201245615971,0.0201424829437,0.0101000473206,0.0504036264047,0.0302668906724,0.050308903622,0.0503705389745,0.0706056145306,0.0504038145394,0.110960869966,0.100781812144,0.0806490333092,0.0806486084889,0.0806237261587,0.0706145357563,0.0402893536739,0.0605118484812,0.0806438140887,0.0806781031534,0.100820045968,0.050385189205,0.0403465891021,0.0605020047886,0.0705439549028,0.0201625041162,0.0705455328066,0.0201750545207,0.0403691045764,0.0201483758077,0.050360561767,0.0100592767124,0.0604937875508,0.0403312045394,0.0100853121261,0.0100921881456,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y5_ETA_14
y5_ETA_14_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00566304279343,0.0113243080673,0.00565465151583,0.0113270474481,0.0226121500506,0.0141291878309,0.00565787567476,0.0141412072492,0.00849151124252,0.0198041884835,0.0282808755203,0.0339622436944,0.0254604059094,0.0226399170613,0.0113074601055,0.0396079460531,0.0396194499137,0.0396201424538,0.0339383202812,0.0481071444908,0.00849126115859,0.0254815707045,0.0339557068853,0.0113223958871,0.0169703182216,0.0283032714978,0.0141288069338,0.0226442338946,0.0197992483641,0.0282745349309,0.0141461896905,0.0113219688207,0.0141390603748,0.0056663977655,0.0113208722989,0.00566218481319,0.00282800521671,0.00283012131147,0.00282190547736,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00282347946712,0.00565759865872,0.0,0.00283355053922,0.00849388896355,0.00566924872226,0.0113134197978,0.0141524456361,0.0113166093298,0.0113165477706,0.0254648920303,0.0169795905641,0.0198015222041,0.0282696871502,0.0226120269323,0.0339307869839,0.0395990199806,0.0282947532545,0.0226330955413,0.0254486750495,0.039614178914,0.0226347076208,0.0424486301401,0.0395931718642,0.0339560146809,0.0395998279441,0.0198092017043,0.016970391323,0.028310854812,0.0198078820307,0.0339708581238,0.0198045462959,0.0169803023414,0.0113155205028,0.00282142762469,0.0113133543913,0.00849043780536,0.0,0.00282930950057,0.00283041371729,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y5_ETA_15
y5_ETA_15_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00152260673975,0.00153821483072,0.00151881876977,0.0,0.00304355646424,0.0,0.00456097823541,0.0015356572123,0.00456914512233,0.00152449658811,0.00305783256077,0.00303609280421,0.00304190063088,0.0,0.00304325980887,0.00151265401114,0.00152644434928,0.0,0.00150849610837,0.00154541020084,0.00152162931349,0.00152305585944,0.0030361719911,0.00152449658811,0.00153629543501,0.0,0.0,0.00152495989053,0.00153629543501,0.00153333597266,0.00152162931349,0.00152094972449,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00154541020084,0.00153219780883,0.00151265401114,0.00305061000709,0.00150849610837,0.00152162931349,0.00151727403443,0.0,0.0,0.0,0.00306015380041,0.00306014316337,0.00305496528615,0.00607878412211,0.00301699221674,0.00306438379871,0.00152192833265,0.00151115655156,0.00305428569715,0.00150849610837,0.00304681258196,0.0,0.00303898371671,0.0,0.0,0.0,0.00151265401114,0.0,0.00152094972449,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating weights for histo: y5_ETA_16
y5_ETA_16_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000180755028423,0.0,0.0,0.0,0.0,0.000180686039232,0.0,0.000542211148965,0.000541323760103,0.0,0.0,0.0,0.0,0.000361533925874,0.000180154568376,0.000722930104357,0.000360550714414,0.000360973850682,0.000180970234659,0.000180626135672,0.000541377657908,0.00018065712691,0.0,0.000180533816432,0.0,0.0,0.0,0.000180553027149,0.000180626135672,0.000180003616023,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000180553027149,0.0,0.0,0.000180626135672,0.00018065712691,0.000361237718936,0.000180626135672,0.0,0.0,0.0,0.000179998688224,0.0,0.000180766962937,0.0,0.000360616354241,0.000902862921764,0.0,0.0,0.0,0.00018065712691,0.000542553400027,0.0,0.000361314061327,0.0,0.000180402036298,0.0,0.0,0.000360206615427,0.0,0.000180755028423,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0])
# Creating a new Canvas
fig = plt.figure(figsize=(12,6),dpi=80)
frame = gridspec.GridSpec(1,1,right=0.7)
pad = fig.add_subplot(frame[0])
# Creating a new Stack
pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights+y5_ETA_7_weights+y5_ETA_8_weights+y5_ETA_9_weights+y5_ETA_10_weights+y5_ETA_11_weights+y5_ETA_12_weights+y5_ETA_13_weights+y5_ETA_14_weights+y5_ETA_15_weights+y5_ETA_16_weights,\
label="$bg\_dip\_1600\_inf$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#e5e5e5", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights+y5_ETA_7_weights+y5_ETA_8_weights+y5_ETA_9_weights+y5_ETA_10_weights+y5_ETA_11_weights+y5_ETA_12_weights+y5_ETA_13_weights+y5_ETA_14_weights+y5_ETA_15_weights,\
label="$bg\_dip\_1200\_1600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#f2f2f2", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights+y5_ETA_7_weights+y5_ETA_8_weights+y5_ETA_9_weights+y5_ETA_10_weights+y5_ETA_11_weights+y5_ETA_12_weights+y5_ETA_13_weights+y5_ETA_14_weights,\
label="$bg\_dip\_800\_1200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#ccc6aa", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights+y5_ETA_7_weights+y5_ETA_8_weights+y5_ETA_9_weights+y5_ETA_10_weights+y5_ETA_11_weights+y5_ETA_12_weights+y5_ETA_13_weights,\
label="$bg\_dip\_600\_800$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#ccc6aa", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights+y5_ETA_7_weights+y5_ETA_8_weights+y5_ETA_9_weights+y5_ETA_10_weights+y5_ETA_11_weights+y5_ETA_12_weights,\
label="$bg\_dip\_400\_600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#c1bfa8", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights+y5_ETA_7_weights+y5_ETA_8_weights+y5_ETA_9_weights+y5_ETA_10_weights+y5_ETA_11_weights,\
label="$bg\_dip\_200\_400$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#bab5a3", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights+y5_ETA_7_weights+y5_ETA_8_weights+y5_ETA_9_weights+y5_ETA_10_weights,\
label="$bg\_dip\_100\_200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#b2a596", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights+y5_ETA_7_weights+y5_ETA_8_weights+y5_ETA_9_weights,\
label="$bg\_dip\_0\_100$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#b7a39b", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights+y5_ETA_7_weights+y5_ETA_8_weights,\
label="$bg\_vbf\_1600\_inf$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#ad998c", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights+y5_ETA_7_weights,\
label="$bg\_vbf\_1200\_1600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#9b8e82", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights,\
label="$bg\_vbf\_800\_1200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#876656", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights,\
label="$bg\_vbf\_600\_800$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#afcec6", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights,\
label="$bg\_vbf\_400\_600$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#84c1a3", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights,\
label="$bg\_vbf\_200\_400$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#89a8a0", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights,\
label="$bg\_vbf\_100\_200$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#829e8c", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights,\
label="$bg\_vbf\_0\_100$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#adbcc6", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights,\
label="$signal$", histtype="step", rwidth=1.0,\
color=None, edgecolor="#7a8e99", linewidth=1, linestyle="solid",\
bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical")
# Axis
plt.rc('text',usetex=False)
plt.xlabel(r"\eta [ j_{2} ] ",\
fontsize=16,color="black")
plt.ylabel(r"$\mathrm{Events}$ $(\mathcal{L}_{\mathrm{int}} = 40.0\ \mathrm{fb}^{-1})$ ",\
fontsize=16,color="black")
# Boundary of y-axis
ymax=(y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights+y5_ETA_7_weights+y5_ETA_8_weights+y5_ETA_9_weights+y5_ETA_10_weights+y5_ETA_11_weights+y5_ETA_12_weights+y5_ETA_13_weights+y5_ETA_14_weights+y5_ETA_15_weights+y5_ETA_16_weights).max()*1.1
ymin=0 # linear scale
#ymin=min([x for x in (y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights+y5_ETA_7_weights+y5_ETA_8_weights+y5_ETA_9_weights+y5_ETA_10_weights+y5_ETA_11_weights+y5_ETA_12_weights+y5_ETA_13_weights+y5_ETA_14_weights+y5_ETA_15_weights+y5_ETA_16_weights) if x])/100. # log scale
plt.gca().set_ylim(ymin,ymax)
# Log/Linear scale for X-axis
plt.gca().set_xscale("linear")
#plt.gca().set_xscale("log",nonposx="clip")
# Log/Linear scale for Y-axis
plt.gca().set_yscale("linear")
#plt.gca().set_yscale("log",nonposy="clip")
# Legend
plt.legend(bbox_to_anchor=(1.05,1), loc=2, borderaxespad=0.)
# Saving the image
plt.savefig('../../HTML/MadAnalysis5job_0/selection_4.png')
plt.savefig('../../PDF/MadAnalysis5job_0/selection_4.png')
plt.savefig('../../DVI/MadAnalysis5job_0/selection_4.eps')
# Running!
if __name__ == '__main__':
selection_4()
| 179.93299 | 1,742 | 0.742373 | 7,584 | 34,907 | 3.320675 | 0.146097 | 0.273031 | 0.395132 | 0.508259 | 0.393146 | 0.385721 | 0.3831 | 0.378613 | 0.374563 | 0.370314 | 0 | 0.556093 | 0.047268 | 34,907 | 193 | 1,743 | 180.865285 | 0.201161 | 0.037729 | 0 | 0.185841 | 0 | 0.00885 | 0.030637 | 0.005961 | 0 | 0 | 0 | 0 | 0 | 1 | 0.00885 | false | 0 | 0.035398 | 0 | 0.044248 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
51668e773b785fc937e32ac3a240021ec3f1a368 | 595 | py | Python | homework/jenya_s/homework12.py | aodarc/LIST-010 | 4579a047ca1ae0266f368349ea4536c6eb367f97 | [
"MIT"
] | null | null | null | homework/jenya_s/homework12.py | aodarc/LIST-010 | 4579a047ca1ae0266f368349ea4536c6eb367f97 | [
"MIT"
] | 4 | 2018-12-19T13:41:12.000Z | 2019-01-14T15:11:11.000Z | homework/jenya_s/homework12.py | aodarc/LIST-010 | 4579a047ca1ae0266f368349ea4536c6eb367f97 | [
"MIT"
] | null | null | null | import os
class Cpypl:
def __init__(self, directory):
self.directory = directory
self.extension_dict = {"c": (".c", ".h"),
"py": (".py", ".pyc"),
"pl": (".pl", ".pm"),
}
def file_list(self):
folder_files = os.listdir(self.directory)
extension = self.extension_dict
return [i for i in folder_files for j in extension if i.endswith(extension[j])]
a = Cpypl(r"C:\Users\admin\Desktop\python test")
print(a.file_list())
| 29.75 | 88 | 0.480672 | 66 | 595 | 4.181818 | 0.545455 | 0.141304 | 0.123188 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.381513 | 595 | 19 | 89 | 31.315789 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0.097222 | 0.050347 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.071429 | 0 | 0.357143 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5166a904988e89265b852c411614fc81169a4bf3 | 68 | py | Python | GCC-paddle/gcc/models/__init__.py | S-HuaBomb/Contrib | 10596814bb98c9e2499e4240e56207dc3b547959 | [
"Apache-2.0"
] | 243 | 2020-06-16T08:06:57.000Z | 2022-03-31T10:09:37.000Z | GCC-paddle/gcc/models/__init__.py | S-HuaBomb/Contrib | 10596814bb98c9e2499e4240e56207dc3b547959 | [
"Apache-2.0"
] | 34 | 2020-02-20T11:04:58.000Z | 2022-03-12T00:54:26.000Z | GCC-paddle/gcc/models/__init__.py | S-HuaBomb/Contrib | 10596814bb98c9e2499e4240e56207dc3b547959 | [
"Apache-2.0"
] | 49 | 2020-06-28T02:37:17.000Z | 2022-03-29T08:07:35.000Z | from .graph_encoder import GraphEncoder
__all__ = ["GraphEncoder"]
| 17 | 39 | 0.794118 | 7 | 68 | 7 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.117647 | 68 | 3 | 40 | 22.666667 | 0.816667 | 0 | 0 | 0 | 0 | 0 | 0.176471 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.5 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 3 |
516815174595feb3cdc84a37dc37e334c26007d4 | 941 | py | Python | sme_material_apps/core/migrations/0005_material_categoria.py | luizhpriotto/piloto_apresentacao | c968025db819633ee4cd75df5357ab6a4ab7d9af | [
"MIT"
] | null | null | null | sme_material_apps/core/migrations/0005_material_categoria.py | luizhpriotto/piloto_apresentacao | c968025db819633ee4cd75df5357ab6a4ab7d9af | [
"MIT"
] | null | null | null | sme_material_apps/core/migrations/0005_material_categoria.py | luizhpriotto/piloto_apresentacao | c968025db819633ee4cd75df5357ab6a4ab7d9af | [
"MIT"
] | null | null | null | # Generated by Django 2.2.9 on 2020-07-20 19:50
from django.db import migrations
import multiselectfield.db.fields
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20200717_1159'),
]
operations = [
migrations.AddField(
model_name='material',
name='categoria',
field=multiselectfield.db.fields.MultiSelectField(choices=[('BERCARIO', 'Kit Infantil (Berçário I e II)'), ('MINI_GRUPO', 'Kit Infantil (Mini grupo I e II)'), ('EMEI', 'Kit Infantil (EMEI - Infantil I e II)'), ('CICLO_ALFABETIZACAO', 'Kit Ensino Fundamental - Ciclo alfabetização (1º ao 3º ano)'), ('CICLO_INTERDISCIPLINAR', 'Kit Ensino Fundamental - Ciclo interdisciplinar (4º ao 6º ano)'), ('CICLO_ALTORAL', 'Kit Ensino Fundamental - Ciclo Autoral (7º ao 9º ano)'), ('MEDIO_EJA_MOVA', 'Kit Ensino Médio, EJA e MOVA')], default='CICLO_ALFABETIZACAO', max_length=25),
),
]
| 47.05 | 579 | 0.676939 | 117 | 941 | 5.34188 | 0.57265 | 0.0576 | 0.0192 | 0.12 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.051047 | 0.188098 | 941 | 19 | 580 | 49.526316 | 0.767016 | 0.047821 | 0 | 0 | 1 | 0 | 0.50783 | 0.050336 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.153846 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
516a41921de7bc6d406a64cbe02684f2db6fb80f | 833 | py | Python | RT_Group/main/models.py | DiForzza/site_one | 440efe197e1ecaab3416460f1827738d160e48b6 | [
"Apache-2.0"
] | null | null | null | RT_Group/main/models.py | DiForzza/site_one | 440efe197e1ecaab3416460f1827738d160e48b6 | [
"Apache-2.0"
] | null | null | null | RT_Group/main/models.py | DiForzza/site_one | 440efe197e1ecaab3416460f1827738d160e48b6 | [
"Apache-2.0"
] | null | null | null | from django.db import models
import datetime
class Task(models.Model):
title = models.CharField("Название", max_length=50)
task = models.TextField("Описание")
date = models.DateTimeField(u'Дата и время', default=datetime.datetime.now())
def __str__(self):
return self.title
class Meta:
verbose_name = 'Новость'
verbose_name_plural = 'Новости'
class Test(models.Model):
text = models.TextField("Описание")
cat = models.ForeignKey('Category', on_delete=models.CASCADE, null=True)
def __str__(self):
return self.text
class Meta:
verbose_name = 'Сообщение сервера'
verbose_name_plural = 'Сообщения сервера'
class Category(models.Model):
name = models.CharField(max_length=100, db_index=True)
def __str__(self):
return self.name | 25.242424 | 81 | 0.681873 | 102 | 833 | 5.352941 | 0.490196 | 0.080586 | 0.054945 | 0.087912 | 0.124542 | 0.087912 | 0 | 0 | 0 | 0 | 0 | 0.00761 | 0.211285 | 833 | 33 | 82 | 25.242424 | 0.82344 | 0 | 0 | 0.217391 | 0 | 0 | 0.110312 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130435 | false | 0 | 0.086957 | 0.130435 | 0.826087 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 |
516aa9e7ed14b4702820fbf786e6257801f2f755 | 364 | py | Python | emapp/auth.py | azrle/flask-restful-emapp-demo | 4be19725538a5e961afb77a1495d6c57ae0be89a | [
"MIT"
] | null | null | null | emapp/auth.py | azrle/flask-restful-emapp-demo | 4be19725538a5e961afb77a1495d6c57ae0be89a | [
"MIT"
] | null | null | null | emapp/auth.py | azrle/flask-restful-emapp-demo | 4be19725538a5e961afb77a1495d6c57ae0be89a | [
"MIT"
] | null | null | null | from functools import wraps
from flask import current_app, request
from flask_restful import abort
def auth_simple_token(func):
@wraps(func)
def wrapper(*args, **kwargs):
token = request.headers.get('x-simple-auth')
if current_app.config['API_KEY'] == token:
return func(*args, **kwargs)
abort(401)
return wrapper
| 26 | 52 | 0.667582 | 48 | 364 | 4.9375 | 0.5625 | 0.075949 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010638 | 0.225275 | 364 | 13 | 53 | 28 | 0.829787 | 0 | 0 | 0 | 0 | 0 | 0.054945 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.272727 | 0 | 0.636364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
5a8c5dccd774b45cfea010980c9e6fb6227679df | 3,307 | py | Python | Python/data/preprocess.py | SCAN-NRAD/BrainRegressorCNN | 7917c6a6c4e3728db17ec762c63f8253392e6c04 | [
"BSD-3-Clause"
] | 1 | 2022-02-11T18:49:34.000Z | 2022-02-11T18:49:34.000Z | Python/data/preprocess.py | SCAN-NRAD/BrainRegressorCNN | 7917c6a6c4e3728db17ec762c63f8253392e6c04 | [
"BSD-3-Clause"
] | null | null | null | Python/data/preprocess.py | SCAN-NRAD/BrainRegressorCNN | 7917c6a6c4e3728db17ec762c63f8253392e6c04 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import scipy.ndimage.measurements as scipy_measurements
import miapy.data.transformation as miapy_tfm
class ClipNegativeTransform(miapy_tfm.Transform):
def __init__(self, entries=('images',)) -> None:
super().__init__()
self.entries = entries
def __call__(self, sample: dict) -> dict:
for entry in self.entries:
if entry not in sample:
continue
img = sample[entry]
m = np.min(img)
if m < 0:
print('Clipping... min: {}'.format(m))
img = np.clip(img, a_min=0, a_max=None)
sample[entry] = img
return sample
class CenterCentroidTransform(miapy_tfm.Transform):
def __init__(self, entries=('images',)) -> None:
super().__init__()
self.entries = entries
def __call__(self, sample: dict) -> dict:
for entry in self.entries:
if entry not in sample:
continue
img = sample[entry]
centroid_transform = []
# move centroid to center
com = scipy_measurements.center_of_mass(img > 0)
for axis in range(0, 3):
diff = com[axis] - int(img.shape[axis] / 2)
centroid_transform.append(-diff)
if abs(diff) > 1:
img = np.roll(img, int(-diff), axis=axis)
sample[entry] = img
# store the centroid transformation (will be written to metadata later)
sample['centroid_transform'] = np.array(centroid_transform)
return sample
class RandomRotateShiftTransform(miapy_tfm.Transform):
def __init__(self, do_rotate=True, shift_amount=0, entries=('images',)) -> None:
super().__init__()
self.entries = entries
self.do_rotate = do_rotate
self.shift_amount = shift_amount
print('Using RandomRotateShiftTransform({}, {})'.format(do_rotate, shift_amount))
def __call__(self, sample: dict) -> dict:
for entry in self.entries:
if entry not in sample:
continue
img = sample[entry]
# shift +/- shift_amount pixels
if self.shift_amount != 0:
# number of pixels to shift
n = np.random.randint(-self.shift_amount, self.shift_amount + 1)
# axis
k = np.random.randint(0, 3)
img = np.roll(img, n, axis=k)
# 3x rotate by 90 degree around a random axis
if self.do_rotate:
planes = [(0, 1), (0, 2), (1, 2)]
for i in range(0, 3):
k = np.random.randint(0, 3)
plane_idx = np.random.randint(0, 3)
img = np.rot90(img, k, planes[plane_idx])
sample[entry] = img
return sample
def get_bounding_box(img):
a = np.argwhere(img)
min0, min1, min2 = a.min(0)
max0, max1, max2 = a.max(0)
return [min0, max0, min1, max1, min2, max2]
# Apply reverse center centroid transform
def revert_centroid_transform(img, centroid_transform):
for axis in range(0, 3):
diff = -centroid_transform[axis]
if abs(diff) > 1:
img = np.roll(img, int(diff), axis=axis)
return img
| 30.063636 | 89 | 0.558512 | 398 | 3,307 | 4.462312 | 0.261307 | 0.04955 | 0.04223 | 0.033784 | 0.395833 | 0.366554 | 0.34009 | 0.292793 | 0.268018 | 0.268018 | 0 | 0.020947 | 0.335954 | 3,307 | 109 | 90 | 30.33945 | 0.787796 | 0.071969 | 0 | 0.486111 | 0 | 0 | 0.031036 | 0.009801 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.041667 | 0 | 0.263889 | 0.027778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5a8e3a68bba6328be4a0daef3330c63f8527f035 | 1,828 | py | Python | conversion_service/config/settings/worker.py | das-g/osmaxx-postgis-conversion | c41aba1cb0fd112de12c8c0540584b7caa651150 | [
"MIT"
] | null | null | null | conversion_service/config/settings/worker.py | das-g/osmaxx-postgis-conversion | c41aba1cb0fd112de12c8c0540584b7caa651150 | [
"MIT"
] | null | null | null | conversion_service/config/settings/worker.py | das-g/osmaxx-postgis-conversion | c41aba1cb0fd112de12c8c0540584b7caa651150 | [
"MIT"
] | null | null | null | # pylint: skip-file
import random
import string
from .common import * # noqa
# we don't use user sessions, so it doesn't matter if we recreate the secret key on each startup
SECRET_KEY = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(30))
# disable databases for the worker
DATABASES = {}
INSTALLED_APPS += (
# sentry
'raven.contrib.django.raven_compat',
)
# SENTRY
SENTRY_DSN = env.str('SENTRY_DSN', default=None)
if SENTRY_DSN:
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry'],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
},
}
RAVEN_CONFIG = {
'dsn': SENTRY_DSN,
'release': env.str('SENTRY_RELEASE', default=''),
}
| 26.114286 | 96 | 0.461707 | 154 | 1,828 | 5.38961 | 0.558442 | 0.043373 | 0.086747 | 0.104819 | 0.210843 | 0.093976 | 0 | 0 | 0 | 0 | 0 | 0.002695 | 0.391138 | 1,828 | 69 | 97 | 26.492754 | 0.743037 | 0.089716 | 0 | 0.196429 | 0 | 0 | 0.308389 | 0.080869 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.053571 | 0 | 0.053571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5a9109ad68997c62118ea1f04a5cdac8b71bc9b1 | 2,289 | py | Python | categorias.py | OrlandoBitencourt/proway_python_exercicio1 | 4fe2aa48aab350cd5e949d95feefe37d1b2af387 | [
"MIT"
] | null | null | null | categorias.py | OrlandoBitencourt/proway_python_exercicio1 | 4fe2aa48aab350cd5e949d95feefe37d1b2af387 | [
"MIT"
] | null | null | null | categorias.py | OrlandoBitencourt/proway_python_exercicio1 | 4fe2aa48aab350cd5e949d95feefe37d1b2af387 | [
"MIT"
] | null | null | null | import main
def cadastrar_categoria(nome: str):
with open("categorias.txt", "a") as file:
dados = f"{nome}\n"
file.write(dados)
print("Categoria cadastrada com sucesso! \n")
def listar_categorias():
main.categorias_list.clear()
print(f"Lista de categorias: \n")
with open("categorias.txt", "r") as file:
for i in file:
print(i.strip())
main.categorias_list.append(i.strip())
print("-----------------")
print("\n")
def deletar_categoria(nome):
categorias = []
contador = 0
categoria = 0
categorias.clear()
main.produtos_categoria.clear()
with open("categorias.txt", "r") as file:
nome_inserido = nome
for i in file:
categorias.append(i.strip())
for index_categoria in range(len(categorias)):
if nome_inserido == categorias[index_categoria]:
contador = 1
categorias.remove(categorias[index_categoria])
break
if contador == 0:
print("Codigo não localizado!\n")
else:
print("Categoria deletado com sucesso!\n")
with open("categorias.txt", "w") as file:
for x in categorias:
file.write(f"{x}\n")
with open("produtos.txt", "r") as file:
for produtos_arquivo in file:
main.produtos_categoria.append(produtos_arquivo.strip())
print("Produtos da categoria alterados:\n")
for prod in range(len(main.produtos_categoria)-1):
try:
cod_prod = main.produtos_categoria[prod].split(",")[0]
prod_nome = main.produtos_categoria[prod].split(",")[1]
prod_preco = main.produtos_categoria[prod].split(",")[2]
cat_produto = main.produtos_categoria[prod].split(",")[3]
if nome == cat_produto:
main.produtos_categoria[prod] = main.produtos_categoria[prod].replace(nome, "NULL")
print(f"{main.produtos_categoria[prod]}")
except:
pass
print(f"-----------------------------\n")
with open("produtos.txt", "w") as file:
for prod in main.produtos_categoria:
file.write(f"{prod}\n") | 31.791667 | 103 | 0.553954 | 259 | 2,289 | 4.787645 | 0.262548 | 0.106452 | 0.18629 | 0.141129 | 0.279032 | 0.101613 | 0.045161 | 0 | 0 | 0 | 0 | 0.005636 | 0.302315 | 2,289 | 72 | 104 | 31.791667 | 0.77082 | 0 | 0 | 0.071429 | 0 | 0 | 0.151092 | 0.027074 | 0 | 0 | 0 | 0 | 0 | 1 | 0.053571 | false | 0.017857 | 0.017857 | 0 | 0.071429 | 0.178571 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5a9137e47101ff21c41e130f1251b26b67a1b350 | 708 | py | Python | test/espnet2/layers/test_log_mel.py | texpomru13/espnet | 7ef005e832e2fb033f356c16f54e0f08762fb4b0 | [
"Apache-2.0"
] | 5,053 | 2017-12-13T06:21:41.000Z | 2022-03-31T13:38:29.000Z | test/espnet2/layers/test_log_mel.py | texpomru13/espnet | 7ef005e832e2fb033f356c16f54e0f08762fb4b0 | [
"Apache-2.0"
] | 3,666 | 2017-12-14T05:58:50.000Z | 2022-03-31T22:11:49.000Z | test/espnet2/layers/test_log_mel.py | texpomru13/espnet | 7ef005e832e2fb033f356c16f54e0f08762fb4b0 | [
"Apache-2.0"
] | 1,709 | 2017-12-13T01:02:42.000Z | 2022-03-31T11:57:45.000Z | import torch
from espnet2.layers.log_mel import LogMel
def test_repr():
print(LogMel())
def test_forward():
layer = LogMel(n_fft=16, n_mels=2)
x = torch.randn(2, 4, 9)
y, _ = layer(x)
assert y.shape == (2, 4, 2)
y, ylen = layer(x, torch.tensor([4, 2], dtype=torch.long))
assert (ylen == torch.tensor((4, 2), dtype=torch.long)).all()
def test_backward_leaf_in():
layer = LogMel(n_fft=16, n_mels=2)
x = torch.randn(2, 4, 9, requires_grad=True)
y, _ = layer(x)
y.sum().backward()
def test_backward_not_leaf_in():
layer = LogMel(n_fft=16, n_mels=2)
x = torch.randn(2, 4, 9, requires_grad=True)
x = x + 2
y, _ = layer(x)
y.sum().backward()
| 22.125 | 65 | 0.610169 | 121 | 708 | 3.396694 | 0.322314 | 0.068127 | 0.087591 | 0.109489 | 0.600973 | 0.600973 | 0.508516 | 0.377129 | 0.377129 | 0.377129 | 0 | 0.048825 | 0.218927 | 708 | 31 | 66 | 22.83871 | 0.694394 | 0 | 0 | 0.454545 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.090909 | 1 | 0.181818 | false | 0 | 0.090909 | 0 | 0.272727 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5a91d9da8ada11831ecc164dcca33401ae0379e5 | 6,347 | py | Python | test/database/test_raw_scraping_data.py | AMHesch/aws-allowlister | b15309cc581b3cfba97733806df4ba2afa73fbee | [
"MIT"
] | 180 | 2021-02-02T21:31:56.000Z | 2022-03-19T19:52:08.000Z | test/database/test_raw_scraping_data.py | AMHesch/aws-allowlister | b15309cc581b3cfba97733806df4ba2afa73fbee | [
"MIT"
] | 36 | 2021-02-05T19:19:26.000Z | 2022-01-27T05:01:08.000Z | test/database/test_raw_scraping_data.py | AMHesch/aws-allowlister | b15309cc581b3cfba97733806df4ba2afa73fbee | [
"MIT"
] | 28 | 2021-02-04T19:19:43.000Z | 2022-03-18T21:27:41.000Z | import unittest
import json
from aws_allowlister.database.database import connect_db
from aws_allowlister.database.raw_scraping_data import RawScrapingData
db_session = connect_db()
raw_scraping_data = RawScrapingData()
class RawScrapingDataTestCase(unittest.TestCase):
def test_standards(self):
"""database.scrapers.raw_scraping_data.standards"""
results = raw_scraping_data.standards(db_session=db_session)
print(results)
print(len(results))
# This will grow over time, so let's just make sure it meets minimum size
self.assertTrue(len(results) >= 7)
expected_results = ['SOC', 'PCI', 'IRAP', 'OSPAR', 'FINMA', 'ISO', 'HIPAA']
def test_get_rows(self):
"""database.scrapers.raw_scraping_data.get_rows"""
results = raw_scraping_data.get_rows(db_session=db_session, sdk_name="ecs")
print(len(results))
# print(results)
results = raw_scraping_data.get_rows(db_session=db_session)
# This will change over time, so let's just check that the size of this is massive
print(len(results))
self.assertTrue(len(results) > 850) # 857
def test_get_sdk_names_matching_compliance_standard(self):
"""database.scrapers.raw_scraping_data.get_sdk_names_matching_compliance_standard"""
results = raw_scraping_data.get_sdk_names_matching_compliance_standard(db_session=db_session, standard_name="SOC")
# print(results)
print(len(results)) # 120
def test_get_service_names_matching_compliance_standard(self):
"""database.scrapers.raw_scraping_data.get_service_names_matching_compliance_standard"""
results = raw_scraping_data.get_service_names_matching_compliance_standard(db_session=db_session, standard_name="SOC")
# print(results)
print(len(results)) # 119
expected_results = {'Amazon API Gateway': 'apigateway', 'Amazon AppStream 2.0': 'appstream', 'Amazon Athena': 'athena', 'Amazon Chime': 'chime', 'Amazon Cloud Directory': 'clouddirectory', 'Amazon CloudFront': 'cloudfront', 'Amazon CloudWatch': 'cloudwatch', 'Amazon CloudWatch Events [includes Amazon EventBridge]': 'events', 'Amazon CloudWatch Logs': 'logs', 'Amazon CloudWatch SDK Metrics for Enterprise Support': 'sdkmetrics', 'Amazon Cognito': 'cognito-sync', 'Amazon Comprehend': 'comprehend', 'Amazon Comprehend Medical': 'comprehendmedical', 'Amazon Connect': 'connect', 'Amazon DynamoDB': 'dynamodb', 'Amazon EC2 Auto Scaling': 'autoscaling', 'Amazon Elastic Block Store (EBS)': 'ec2', 'Amazon Elastic Compute Cloud (EC2)': 'ec2', 'Amazon Elastic Container Registry (ECR)': 'ecr', 'Amazon Elastic Container Service': 'ecs', 'Amazon Elastic File System (EFS)': 'elasticfilesystem', 'Amazon Elastic Kubernetes Service (EKS)': 'eks', 'Amazon Elastic MapReduce (EMR)': 'elasticmapreduce', 'Amazon ElastiCache for Redis': 'elasticache', 'Amazon Elasticsearch Service': 'es', 'Amazon Forecast': 'amazonforecast', 'Amazon FreeRTOS': 'freertos', 'Amazon FSx': 'fsx', 'Amazon GuardDuty': 'guardduty', 'Amazon Inspector': 'inspector', 'Amazon Kinesis Data Analytics': 'kinesisanalytics', 'Amazon Kinesis Data Firehose': 'firehose', 'Amazon Kinesis Data Streams': 'kinesis', 'Amazon Kinesis Video Streams': 'kinesisvideo', 'Amazon Lex': 'models.lex', 'Amazon Macie': 'macie', 'Amazon Managed Streaming for Apache Kafka': 'kafka', 'Amazon MQ': 'mq', 'Amazon Neptune': 'neptune-db', 'Amazon Personalize': 'personalize', 'Amazon Pinpoint': 'mobiletargeting', 'Amazon Polly': 'polly', 'Amazon Quantum Ledger Database (QLDB)': 'qldb', 'Amazon QuickSight': 'quicksight', 'Amazon Redshift': 'redshift', 'Amazon Rekognition': 'rekognition', 'Amazon Relational Database Service (RDS)': 'rds', 'Amazon Route 53': 'route53', 'Amazon S3 Glacier': 'glacier', 'Amazon SageMaker': 'sagemaker', 'Amazon SimpleDB': 'sdb', 'Amazon Simple Email Service (SES)': 'ses', 'Amazon Simple Notification Service (SNS)': 'sns', 'Amazon Simple Queue Service (SQS)': 'sqs', 'Amazon Simple Storage Service (S3)': 's3', 'Amazon Simple Workflow Service (SWF)': 'swf', 'Amazon Textract': 'textract', 'Amazon Transcribe': 'transcribe', 'Amazon Translate': 'translate', 'Amazon Virtual Private Cloud (VPC)': 'ec2', 'Amazon WorkDocs': 'workdocs', 'Amazon WorkLink': 'worklink', 'Amazon WorkMail': 'workmail', 'Amazon WorkSpaces': 'workspaces', 'AWS Amplify': 'amplify', 'AWS AppSync': 'appsync', 'AWS Backup': 'backup', 'AWS Batch': 'batch', 'AWS Certificate Manager (ACM)': 'acm', 'AWS CloudFormation': 'cloudformation', 'AWS CloudHSM': 'cloudhsm', 'AWS CloudTrail': 'cloudtrail', 'AWS CodeBuild': 'codebuild', 'AWS CodeCommit': 'codecommit', 'AWS CodeDeploy': 'codedeploy', 'AWS CodePipeline': 'codepipeline', 'AWS Config': 'config', 'AWS Control Tower': 'controltower', 'AWS Data Exchange': 'dataexchange', 'AWS Database Migration Service (DMS)': 'dms', 'AWS DataSync': 'datasync', 'AWS Direct Connect': 'directconnect', 'AWS Directory Service': 'ds', 'AWS Elastic Beanstalk': 'elasticbeanstalk', 'AWS Elemental MediaConnect': 'mediaconnect', 'AWS Elemental MediaConvert': 'mediaconvert', 'AWS Elemental MediaLive': 'medialive', 'AWS Firewall Manager': 'fms', 'AWS Global Accelerator': 'globalaccelerator', 'AWS Glue': 'glue', 'AWS Identity and Access Management (IAM)': 'iam', 'AWS IoT Core': 'iot', 'AWS IoT Device Management': 'iot', 'AWS IoT Events': 'iotevents', 'AWS IoT Greengrass': 'greengrass', 'AWS Key Management Service (KMS)': 'kms', 'AWS Lambda': 'lambda', 'AWS License Manager': 'license-manager', 'AWS OpsWorks Stacks': 'opsworks', 'AWS OpsWorks Stacksfor Chef Automate': 'opsworks-cm', 'AWS Organizations': 'organizations', 'AWS Outposts': 'outposts', 'AWS Personal Health Dashboard': 'health', 'AWS Resource Groups': 'resource-groups', 'AWS RoboMaker': 'robomaker', 'AWS Secrets Manager': 'secretsmanager', 'AWS Security Hub': 'securityhub', 'AWS Server Migration Service (SMS)': 'sms', 'AWS Serverless Application Repository': 'serverlessrepo', 'AWS Service Catalog': 'servicecatalog', 'AWS Shield': 'DDoSProtection', 'AWS Snowball': 'snowball', 'AWS Step Functions': 'states', 'AWS Storage Gateway': 'storagegateway', 'AWS Systems Manager': 'ssm', 'AWS Transfer Family': 'transfer', 'AWS Web Application Firewall (WAF)': 'waf', 'AWS X-Ray': 'xray', 'Elastic Load Balancing (ELB)': 'elasticloadbalancing'}
| 144.25 | 4,496 | 0.722073 | 733 | 6,347 | 6.143247 | 0.390177 | 0.026871 | 0.036642 | 0.027981 | 0.155896 | 0.149012 | 0.129691 | 0.121253 | 0.121253 | 0.091939 | 0 | 0.004886 | 0.129352 | 6,347 | 43 | 4,497 | 147.604651 | 0.809989 | 0.072948 | 0 | 0.192308 | 0 | 0 | 0.607003 | 0 | 0 | 0 | 0 | 0 | 0.076923 | 1 | 0.153846 | false | 0 | 0.153846 | 0 | 0.346154 | 0.230769 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
5a9713280ba850f01b042a95ba2fef86e7e5c38e | 5,533 | py | Python | GenPasswd/genpasswd.py | Gowthaman1401/GenPass | dd6450d80e99648f1d23cb0e034189b5fe56bbd1 | [
"MIT"
] | null | null | null | GenPasswd/genpasswd.py | Gowthaman1401/GenPass | dd6450d80e99648f1d23cb0e034189b5fe56bbd1 | [
"MIT"
] | null | null | null | GenPasswd/genpasswd.py | Gowthaman1401/GenPass | dd6450d80e99648f1d23cb0e034189b5fe56bbd1 | [
"MIT"
] | null | null | null | import random
from . import constants
from . import exceptions
class PasswordGenerator:
def __init__(self, length=False, ignore=False, only=False, include=False, repeat=False, separator=False,
separator_length=False, separation=False):
self.__password = None
self.__available_char = None
self.__possibility = None
self.__error = None
self.length = length
self.ignore = ignore
self.only = only
self.include = include
self.repeat = repeat
self.separator_length = separator_length
self.separator = separator if separator else separation
def __all_possible_chars(self):
self.__possibility = constants.POSSIBLE_UPPERCASE_CHARS + constants.POSSIBLE_LOWERCASE_CHARS + constants.POSSIBLE_NUMBERS + constants.POSSIBLE_SPECIAL_CHARS
self.__available_char = {"alphabets": constants.POSSIBLE_UPPERCASE_CHARS + constants.POSSIBLE_LOWERCASE_CHARS,
"uppercase": constants.POSSIBLE_UPPERCASE_CHARS,
"lowercase": constants.POSSIBLE_LOWERCASE_CHARS,
"numbers": constants.POSSIBLE_NUMBERS,
"symbols": constants.POSSIBLE_SPECIAL_CHARS
}
def __set_length(self):
if not self.length:
if not self.only:
self.length = random.randint(constants.DEFAULT_MIN_PASS_LEN, constants.DEFAULT_MAX_PASS_LEN)
else:
self.__error = ValueError('[-] Password length must be given.')
def __add_only_wanted(self):
possibility = constants.EMPTY_STRING
try:
choices = [character for character in self.only.split(',')]
for choice in choices:
if choice.lower() in self.__available_char:
possibility += self.__available_char[choice.lower()]
if not choice.lower() in self.__available_char:
for char in choice:
possibility += char
except Exception:
raise exceptions.GenpasswdException
self.__possibility = possibility
def __remove_unwanted(self):
try:
choices = [characters for characters in self.ignore.split(',')]
if self.ignore == ',' or ',,,' in self.ignore or ',,' in self.ignore or len(choices) > 0:
possibility = self.__possibility.replace(',', constants.EMPTY_STRING)
for choice in choices:
if choice.lower() in self.__available_char:
for char in self.__available_char[choice.lower()]:
possibility = possibility.replace(char, constants.EMPTY_STRING)
if choice not in self.__available_char:
for char in choice:
possibility = possibility.replace(char, constants.EMPTY_STRING)
self.__possibility = possibility
except Exception:
raise exceptions.GenpasswdException
def __include_characters(self):
possibility = self.__possibility
try:
choices = [character for character in self.include.split(',')]
for choice in choices:
if choice.lower() in self.__available_char:
for char in self.__available_char[choice.lower()]:
if char not in possibility:
possibility += char
if not choice.lower() in self.__available_char:
for char in choice:
if char not in possibility:
possibility += char
except Exception:
raise exceptions.GenpasswdException
self.__possibility = possibility
def __repeat_char(self):
if self.repeat is None or self.repeat is True:
self.__possibility *= self.length
def __check(self):
if self.length and ((self.length > len(
self.__possibility)) or self.length > constants.PASSWORD_LENGTH_LIMIT) and self.__error is None:
self.__error = ValueError('[-] Password length must be less.')
def __separated_pass(self):
if type(self.separator) is bool:
self.separator = constants.DEFAULT_SEPARATOR
if not self.separator_length:
self.separator_length = constants.DEFAULT_SEPARATE_LENGTH
final_password = constants.EMPTY_STRING
for i in range(len(self.__password)):
final_password += self.separator + self.__password[i] if i % self.separator_length == 0 and i != 0 else \
self.__password[i]
return final_password
def __filter(self):
if self.only:
self.__add_only_wanted()
if self.include:
self.__include_characters()
if self.ignore:
self.__remove_unwanted()
self.__repeat_char()
def generate(self):
self.__all_possible_chars()
self.__set_length()
self.__filter()
self.__check()
if self.__error is not None:
return self.__error
self.__password = constants.EMPTY_STRING.join(random.sample(self.__possibility, self.length))
return self.__separated_pass() if self.separator or self.separator_length else self.__password
| 44.98374 | 165 | 0.590638 | 557 | 5,533 | 5.543986 | 0.156194 | 0.025259 | 0.060557 | 0.049223 | 0.359132 | 0.343588 | 0.334521 | 0.222474 | 0.182319 | 0.167746 | 0 | 0.000815 | 0.33508 | 5,533 | 122 | 166 | 45.352459 | 0.838543 | 0 | 0 | 0.293578 | 0 | 0 | 0.021807 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.100917 | false | 0.155963 | 0.027523 | 0 | 0.165138 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
5a97b4378066662b1ab8308caa3ca06c47283ec7 | 43,014 | py | Python | AESDataV3.py | JHerrmann01/AESDataManipulator | 836ee74326ee0c35435b5fe0bb9875d392b2cc7c | [
"Apache-2.0"
] | null | null | null | AESDataV3.py | JHerrmann01/AESDataManipulator | 836ee74326ee0c35435b5fe0bb9875d392b2cc7c | [
"Apache-2.0"
] | null | null | null | AESDataV3.py | JHerrmann01/AESDataManipulator | 836ee74326ee0c35435b5fe0bb9875d392b2cc7c | [
"Apache-2.0"
] | null | null | null | ###American Environmental Solutions Data Manipulation###
## Created by Jeremy Herrmann ##
##Import Libraries##
from __future__ import print_function
from os.path import join, dirname, abspath
import xlrd
from xlrd.sheet import ctype_text
import xlsxwriter
####################
def loadSpreadsheet():
fname = join(dirname(dirname(abspath(__file__))), 'AES/First Spreadsheet', 'GBZ65745 Excel SE855 GLENWOOD RD-1 (copy).xls')
xl_workbook = xlrd.open_workbook(fname)
xl_sheet = xl_workbook.sheet_by_name("Results")
return xl_workbook, xl_sheet
def grabSimpleInformation(xl_workbook, xl_sheet):
numSpaces = 0
generalAreas = {}
num_cols = xl_sheet.ncols
for row_idx in range(8, xl_sheet.nrows-7):
if(xl_sheet.cell(row_idx,0).value == "Mercury"):
Mercury_Values_Raw = (xl_sheet.row(row_idx))
if(xl_sheet.cell(row_idx,0).value == "pH at 25C - Soil"):
Corrosivity_Values_Raw = (xl_sheet.row(row_idx))
if(xl_sheet.cell(row_idx,0).value == "Flash Point"):
Flashpoint_Values_Raw = (xl_sheet.row(row_idx))
if(xl_sheet.cell(row_idx,0).value == "Ignitability"):
Ignitability_Values_Raw = (xl_sheet.row(row_idx))
if(xl_sheet.cell(row_idx,0).value == "Reactivity Cyanide"):
Reactivity_Values_Cyanide_Raw = (xl_sheet.row(row_idx))
if(xl_sheet.cell(row_idx,0).value == "Reactivity Sulfide"):
Reactivity_Values_Sulfide_Raw = (xl_sheet.row(row_idx))
if(xl_sheet.cell(row_idx,0).value == "Total Cyanide (SW9010C Distill.)"):
Cyanide_Values_Raw = (xl_sheet.row(row_idx))
if(numSpaces%3 == 0):
generalAreas[int(row_idx)] = str(xl_sheet.cell(row_idx,0).value)
numSpaces +=1
if(xl_sheet.cell(row_idx,0).value == ""):
numSpaces += 1
return Mercury_Values_Raw, Corrosivity_Values_Raw, Flashpoint_Values_Raw, Ignitability_Values_Raw, Reactivity_Values_Cyanide_Raw, Reactivity_Values_Sulfide_Raw, Cyanide_Values_Raw, generalAreas
def sortGeneralAreas(generalAreas):
keys = generalAreas.keys()
sortedGenAreas = [[0 for i in range(2)]for i in range(len(keys))]
for x in range(0,len(keys)):
smallestKey = 100000
for key in generalAreas.keys():
if(key < smallestKey):
smallestKey = key
sortedGenAreas[x][0] = int(smallestKey)
sortedGenAreas[x][1] = str(generalAreas.pop(smallestKey))
return sortedGenAreas
def insertRowsIntoAreas(xl_sheet, sortedGenAreas):
rowsInArea = [[""]for i in range(len(sortedGenAreas))]
for x in range(0,len(sortedGenAreas)):
rowsInArea[x][0] = sortedGenAreas[x][1]
numAreas = len(sortedGenAreas)
for x in range(0 , numAreas):
if(x < numAreas-1):
for y in range(sortedGenAreas[x][0]+1, sortedGenAreas[x+1][0]-2):
rowsInArea[x].append(xl_sheet.row(y))
else:
for y in range(sortedGenAreas[x][0]+1, xl_sheet.nrows-7):
rowsInArea[x].append(xl_sheet.row(y))
return rowsInArea
print("Beginning program...")
#Loading the file to be parsed
xl_workbook, xl_sheet = loadSpreadsheet()
#Grabbing basic information
Company_Name = xl_sheet.cell(0, 0).value
Type_Samples_Collected_Raw = xl_sheet.row(4)
global firstIndex
firstIndex = 6
#Begin parsing to find simple useful information
Mercury_Values_Raw, Corrosivity_Values_Raw, Flashpoint_Values_Raw, Ignitability_Values_Raw, Reactivity_Values_Cyanide_Raw, Reactivity_Values_Sulfide_Raw, Cyanide_Values_Raw, generalAreas = grabSimpleInformation(xl_workbook, xl_sheet)
#Sort the general areas in increasing order(Row number)
sortedGenAreas = sortGeneralAreas(generalAreas)
#Insert the rows that belong to each respective area
rowsInArea = insertRowsIntoAreas(xl_sheet, sortedGenAreas)
print("Done Parsing")
print()
########################################################################################################################
def startWritingFinalFile():
workbook = xlsxwriter.Workbook('/home/jeremy/Desktop/AES/Excel_Reformatting.xlsx')
worksheet = workbook.add_worksheet()
return workbook, worksheet
#Refining a given row
def valueRefinerMetals(inputArrayRaw):
outputArray = []
pos = 0
units = str(inputArrayRaw[2].value)
divisor = 1
if(units[0:2] == "ug"):
divisor = 1000
for value in inputArrayRaw:
if((pos >= firstIndex and pos%2 == firstIndex%2) or (pos == 0) or (pos == 2)):
if(pos == 0):
outputArray.append(str(value.value))
elif(pos == 2):
outputArray.append("ppm")
outputArray.append("")
elif(str(value.value).find("<") == -1):
outputArray.append(str(round((float(value.value)/divisor), 5)))
else:
outputArray.append("N.D.")
pos+=1
return(outputArray)
def isDetected(compound):
hasFloat = False
for x in compound:
try:
val = float(x)
hasFloat = True
break
except Exception as e:
val = ""
return hasFloat
def isNumber(value):
try:
val = float(value)
return True
except Exception as e:
return False
def removeUselessRows(rowsInArea, index):
y = 1
lenRow = (len(rowsInArea[index][1]))
while(y < len(rowsInArea[index])):
if not isDetected(rowsInArea[index][y]):
rowsInArea[index].remove(rowsInArea[index][y])
y -= 1
y += 1
if(len(rowsInArea[index]) == 1):
emptyArray = ["None Detected", "_", "_"]
for x in range(len(emptyArray), lenRow):
emptyArray.append("N.D.")
rowsInArea[index].append(emptyArray)
return rowsInArea[index]
def createBeginning(worksheet, currLine):
line = 1
x = len(Type_Samples_Collected)
offset = 4
finalLetter=""
if 64+x+offset > 90:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
secondLetter = chr(64+((x+offset)%26))
finalLetter = firstLetter+secondLetter
else:
finalLetter = chr(64+x+offset)
for x in range(0, 5):
worksheet.merge_range("B"+str(line)+":"+finalLetter+str(line), "")
line += 1
return worksheet, currLine
def createHeading(worksheet, currLine, Type_Samples_Collected_Raw, formatOne):
formatOne.set_text_wrap(True)
Type_Samples_Collected = []
pos = 0
for value in Type_Samples_Collected_Raw:
if((pos >= firstIndex and pos%2 == firstIndex%2) or (pos ==0)):
Type_Samples_Collected.append(value.value)
pos+=1
worksheet.write('B'+str(currLine), 'Parameter', formatOne)
worksheet.write('C'+str(currLine), 'Compounds Detected', formatOne)
worksheet.write('D'+str(currLine), 'Units', formatOne)
worksheet.write('E'+str(currLine), 'NYSDEC Part 375 Unrestricted Use Criteria', formatOne)
offset = 4
for x in range(1,len(Type_Samples_Collected)):
if(64+x+offset < 90):
worksheet.write(str(chr(65+x+offset))+str(currLine), str(Type_Samples_Collected[x]), formatOne)
else:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
secondLetter = chr(65+((x+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(Type_Samples_Collected[x]), formatOne)
currLine += 1
return worksheet, currLine, Type_Samples_Collected
def addMercuryValues(worksheet, currLine, Mercury_Values_Raw, formatOne, formatTwo):
Mercury_Values = valueRefinerMetals(Mercury_Values_Raw)
offset = 2
worksheet.write('B'+str(currLine), 'Mercury 7471', formatOne)
for x in range(0, len(Mercury_Values)):
if(isNumber(Mercury_Values[x])):
if(64+x+offset < 90):
worksheet.write(str(chr(65+x+offset))+str(currLine), str(Mercury_Values[x]), formatTwo)
else:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
secondLetter = chr(65+((x+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(Mercury_Values[x]), formatTwo)
else:
if(64+x+offset < 90):
worksheet.write(str(chr(65+x+offset))+str(currLine), str(Mercury_Values[x]), formatOne)
else:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
secondLetter = chr(65+((x+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(Mercury_Values[x]), formatOne)
currLine += 1
return worksheet, currLine, Mercury_Values
def addPCBValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo):
indexOfPCBS = 0
for x in range(0, len(sortedGenAreas)):
if sortedGenAreas[x][1] == "PCBs By SW8082A":
indexOfPCBS = x
for x in range(1, len(rowsInArea[indexOfPCBS])):
rowsInArea[indexOfPCBS][x] = valueRefinerMetals(rowsInArea[indexOfPCBS][x])
rowsInArea[indexOfPCBS] = removeUselessRows(rowsInArea, indexOfPCBS)
firstLine = currLine
offset = 2
for x in range(1, len(rowsInArea[indexOfPCBS])):
for y in range(0, len(rowsInArea[indexOfPCBS][x])):
if(isNumber(rowsInArea[indexOfPCBS][x][y])):
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfPCBS][x][y]), formatTwo)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfPCBS][x][y]), formatTwo)
else:
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfPCBS][x][y], formatOne)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfPCBS][x][y]), formatOne)
currLine += 1
lastLine = currLine - 1
if(lastLine - firstLine != 0):
worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'PCBS', formatOne)
else:
worksheet.write('B'+str(firstLine), 'PCBS',formatOne)
return worksheet, currLine
def addPesticideValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo):
indexOfPesticides = 0
for x in range(0, len(sortedGenAreas)):
if sortedGenAreas[x][1] == "Pesticides - Soil By SW8081B":
indexOfPesticides = x
for x in range(1, len(rowsInArea[indexOfPesticides])):
rowsInArea[indexOfPesticides][x] = valueRefinerMetals(rowsInArea[indexOfPesticides][x])
rowsInArea[indexOfPesticides] = removeUselessRows(rowsInArea, indexOfPesticides)
firstLine = currLine
offset = 2
for x in range(1, len(rowsInArea[indexOfPesticides])):
for y in range(0, len(rowsInArea[indexOfPesticides][x])):
if(isNumber(rowsInArea[indexOfPesticides][x][y])):
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfPesticides][x][y]), formatTwo)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfPesticides][x][y]), formatTwo)
else:
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfPesticides][x][y], formatOne)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfPesticides][x][y]), formatOne)
currLine += 1
lastLine = currLine - 1
if(lastLine - firstLine != 0):
worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'Pesticides', formatOne)
else:
worksheet.write('B'+str(firstLine), 'Pesticides', formatOne)
return worksheet, currLine
def addMetalValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo):
indexOfMetals = 0
for x in range(0, len(sortedGenAreas)):
if sortedGenAreas[x][1] == "Metals, Total":
indexOfMetals = x
for x in range(1, len(rowsInArea[indexOfMetals])):
rowsInArea[indexOfMetals][x] = valueRefinerMetals(rowsInArea[indexOfMetals][x])
rowsInArea[indexOfMetals] = removeUselessRows(rowsInArea, indexOfMetals)
firstLine = currLine
offset = 2
worksheet.write('B'+str(currLine), 'Metals, Total')
for x in range(1, len(rowsInArea[indexOfMetals])):
if(rowsInArea[indexOfMetals][x][0] != "Mercury"):
for y in range(0, len(rowsInArea[indexOfMetals][x])):
if(isNumber(rowsInArea[indexOfMetals][x][y])):
if(64+y+offset < 90):
worksheet.write(str(chr(65+offset+y))+str(currLine), str(rowsInArea[indexOfMetals][x][y]), formatTwo)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfMetals][x][y]), formatTwo)
else:
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfMetals][x][y], formatOne)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfMetals][x][y]), formatOne)
currLine += 1
lastLine = currLine - 1
if(lastLine - firstLine != 0):
worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'Metals', formatOne)
else:
worksheet.write('B'+str(firstLine), 'Metals', formatOne)
return worksheet, currLine
def addCyanideValues(worksheet, currLine, Cyanide_Values_Raw, formatOne, formatTwo):
Cyanide_Values = valueRefinerMetals(Cyanide_Values_Raw)
worksheet.write('B'+str(currLine), 'Cyanide', formatOne)
offset = 2
for x in range(0, len(Cyanide_Values)):
if(isNumber(Cyanide_Values[x])):
if(64+x+offset < 90):
worksheet.write(str(chr(65+x+offset))+str(currLine), str(Cyanide_Values[x]), formatTwo)
else:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
secondLetter = chr(65+((x+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(Cyanide_Values[x]), formatTwo)
else:
if(64+x+offset < 90):
worksheet.write(str(chr(65+x+offset))+str(currLine), str(Cyanide_Values[x]), formatOne)
else:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
secondLetter = chr(65+((x+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(Cyanide_Values[x]), formatOne)
currLine += 1
return worksheet, currLine, Cyanide_Values
def addSemiVolatileValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo):
indexOfSemiVolatiles = 0
for x in range(0, len(sortedGenAreas)):
if sortedGenAreas[x][1] == "Semivolatiles By SW8270D":
indexOfSemiVolatiles = x
for x in range(1, len(rowsInArea[indexOfSemiVolatiles])):
rowsInArea[indexOfSemiVolatiles][x] = valueRefinerMetals(rowsInArea[indexOfSemiVolatiles][x])
rowsInArea[indexOfSemiVolatiles] = removeUselessRows(rowsInArea, indexOfSemiVolatiles)
firstLine = currLine
offset = 2
for x in range(1, len(rowsInArea[indexOfSemiVolatiles])):
for y in range(0, len(rowsInArea[indexOfSemiVolatiles][x])):
if(isNumber(rowsInArea[indexOfSemiVolatiles][x][y])):
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfSemiVolatiles][x][y]), formatTwo)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfSemiVolatiles][x][y]), formatTwo)
else:
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfSemiVolatiles][x][y], formatOne)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfSemiVolatiles][x][y]), formatOne)
currLine += 1
lastLine = currLine - 1
if(lastLine - firstLine != 0):
worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'SemiVolatiles', formatOne)
else:
worksheet.write('B'+str(firstLine), 'SemiVolatiles', formatOne)
return worksheet, currLine
def addVolatileValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo):
indexOfVolatiles = 0
for x in range(0, len(sortedGenAreas)):
if sortedGenAreas[x][1] == "Volatiles (TCL) By SW8260C":
indexOfVolatiles = x
for x in range(1, len(rowsInArea[indexOfVolatiles])):
rowsInArea[indexOfVolatiles][x] = valueRefinerMetals(rowsInArea[indexOfVolatiles][x])
rowsInArea[indexOfVolatiles] = removeUselessRows(rowsInArea, indexOfVolatiles)
firstLine = currLine
offset = 2
worksheet.write('B'+str(currLine), 'Volatiles (TCL) By SW8260C')
for x in range(1, len(rowsInArea[indexOfVolatiles])):
for y in range(0, len(rowsInArea[indexOfVolatiles][x])):
if(isNumber(rowsInArea[indexOfVolatiles][x][y])):
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfVolatiles][x][y]), formatTwo)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfVolatiles][x][y]), formatTwo)
else:
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfVolatiles][x][y], formatOne)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfVolatiles][x][y]), formatOne)
currLine += 1
lastLine = currLine - 1
if(lastLine - firstLine != 0):
worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'Volatiles', formatOne)
else:
worksheet.write('B'+str(firstLine), 'Volatiles', formatOne)
return worksheet, currLine
def createSecondHeading(worksheet, currLine, Type_Samples_Collected, formatOne):
worksheet.set_row(currLine-1,50)
worksheet.write('B'+str(currLine), 'RCRA Characteristics ', formatOne)
worksheet.merge_range('C'+str(currLine)+':E'+str(currLine), 'Regulatory Criteria', formatOne)
offset = 4
for x in range(1,len(Type_Samples_Collected)):
if(64+x+offset < 90):
worksheet.write(str(chr(65+x+offset))+str(currLine), str(Type_Samples_Collected[x]), formatOne)
else:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
secondLetter = chr(65+((x+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(Type_Samples_Collected[x]), formatOne)
currLine += 1
return worksheet, currLine
def addCorrosivityValues(worksheet, currLine, Corrosivity_Values_Raw, formatOne):
Corrosivity_Values = valueRefinerMetals(Corrosivity_Values_Raw)
worksheet.write('B'+str(currLine), 'Corrosivity', formatOne)
offset = 2
for x in range(0,len(Corrosivity_Values)):
if(64+x+offset < 90):
worksheet.write(str(chr(65+x+offset))+str(currLine), str(Corrosivity_Values[x]), formatOne)
else:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
secondLetter = chr(65+((x+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(Corrosivity_Values[x]), formatOne)
currLine += 1
return worksheet, currLine, Corrosivity_Values
def addFlashpointValues(worksheet, currLine, Flashpoint_Values_Raw, formastOne):
Flashpoint_Values = []
pos = 0
for value in Flashpoint_Values_Raw:
if(pos == 0):
Flashpoint_Values.append(value.value)
Flashpoint_Values.append(" ")
Flashpoint_Values.append("Degree F")
Flashpoint_Values.append(">200 Degree F")
if((pos >= firstIndex and pos%2 == firstIndex%2)):
Flashpoint_Values.append(value.value)
pos+=1
offset = 1
for x in range(0,len(Flashpoint_Values)):
if(64+x+offset < 90):
worksheet.write(str(chr(65+x+offset))+str(currLine), str(Flashpoint_Values[x]), formatOne)
else:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
secondLetter = chr(65+((x+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(Flashpoint_Values[x]), formatOne)
currLine += 1
return worksheet, currLine, Flashpoint_Values
def addIgnitabilityValues(worksheet, currLine, Ignitability_Values_Raw, formatOne):
Ignitability_Values = []
pos = 0
for value in Ignitability_Values_Raw:
if(pos == 0):
Ignitability_Values.append(value.value)
Ignitability_Values.append(" ")
Ignitability_Values.append("Degree F")
Ignitability_Values.append("<140 Degree F")
if((pos >= firstIndex and pos%2 == firstIndex%2)):
Ignitability_Values.append(value.value)
pos+=1
offset = 1
for x in range(0,len(Ignitability_Values)):
if(64+x+offset < 90):
worksheet.write(str(chr(65+x+offset))+str(currLine), str(Ignitability_Values[x]), formatOne)
else:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
secondLetter = chr(65+((x+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(Ignitability_Values[x]), formatOne)
currLine += 1
return worksheet, currLine, Ignitability_Values
def addReactivityValues(worksheet, currLine, Reactivity_Values_Cyanide_Raw, Reactivity_Values_Sulfide_Raw, formatOne):
Reactivity_Values_Cyanide = valueRefinerMetals(Reactivity_Values_Cyanide_Raw)
worksheet.merge_range('B'+str(currLine)+":B"+str(currLine+1), 'Reactivity', formatOne)
worksheet.write('C'+str(currLine), 'Cyanide', formatOne)
offset = 2
for x in range(1,len(Reactivity_Values_Cyanide)):
if(64+x+offset < 90):
worksheet.write(str(chr(65+x+offset))+str(currLine), str(Reactivity_Values_Cyanide[x]), formatOne)
else:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
secondLetter = chr(65+((x+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(Reactivity_Values_Cyanide[x]), formatOne)
currLine += 1
Reactivity_Values_Sulfide = valueRefinerMetals(Reactivity_Values_Sulfide_Raw)
worksheet.write('C'+str(currLine), 'Sulfide', formatOne)
for x in range(1,len(Reactivity_Values_Sulfide)):
if(64+x+offset < 90):
worksheet.write(str(chr(65+x+offset))+str(currLine), str(Reactivity_Values_Sulfide[x]), formatOne)
else:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
secondLetter = chr(65+((x+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(Reactivity_Values_Sulfide[x]), formatOne)
currLine += 1
return worksheet, currLine, Reactivity_Values_Cyanide, Reactivity_Values_Sulfide
def createThirdHeading(worksheet, currLine, Type_Samples_Collected, formatOne):
worksheet.set_row(currLine-1,50)
worksheet.write('B'+str(currLine), 'Toxicity ', formatOne)
worksheet.merge_range('C'+str(currLine)+':E'+str(currLine), 'TCLP Regulatory Criteria', formatOne)
x = len(Type_Samples_Collected)
offset = 4
finalLetter=""
if 64+x+offset > 90:
firstLetter = chr(int(65+(((x+offset)-26)/26)))
print(firstLetter)
secondLetter = chr(64+((x+offset)%26))
print(secondLetter)
finalLetter = firstLetter+secondLetter
else:
finalLetter = chr(64+x+offset)
worksheet.merge_range("F"+str(currLine)+":"+finalLetter+str(currLine), "", formatOne)
currLine += 1
return worksheet, currLine
def addTCLPMetalValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo):
indexOfTCLPMetals = 0
for x in range(0, len(sortedGenAreas)):
if sortedGenAreas[x][1] == "Metals, TCLP":
indexOfTCLPMetals = x
for x in range(1, len(rowsInArea[indexOfTCLPMetals])):
rowsInArea[indexOfTCLPMetals][x] = valueRefinerMetals(rowsInArea[indexOfTCLPMetals][x])
rowsInArea[indexOfTCLPMetals] = removeUselessRows(rowsInArea, indexOfTCLPMetals)
firstLine = currLine
offset = 2
for x in range(1, len(rowsInArea[indexOfTCLPMetals])):
for y in range(0, len(rowsInArea[indexOfTCLPMetals][x])):
if(isNumber(rowsInArea[indexOfTCLPMetals][x][y])):
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfTCLPMetals][x][y]), formatTwo)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfTCLPMetals][x][y]), formatTwo)
else:
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfTCLPMetals][x][y], formatOne)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfTCLPMetals][x][y]), formatOne)
currLine += 1
lastLine = currLine - 1
if(lastLine - firstLine != 0):
worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'TCLP Metals', formatOne)
else:
worksheet.write('B'+str(firstLine), 'TCLP Metals', formatOne)
return worksheet, currLine
def addVOCSValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo):
indexOfVOCS = 0
for x in range(0, len(sortedGenAreas)):
if sortedGenAreas[x][1] == "TCLP Volatiles By SW8260C":
indexOfVOCS = x
for x in range(1, len(rowsInArea[indexOfVOCS])):
rowsInArea[indexOfVOCS][x] = valueRefinerMetals(rowsInArea[indexOfVOCS][x])
rowsInArea[indexOfVOCS] = removeUselessRows(rowsInArea, indexOfVOCS)
firstLine = currLine
offset = 2
for x in range(1, len(rowsInArea[indexOfVOCS])):
for y in range(0, len(rowsInArea[indexOfVOCS][x])):
if(isNumber(rowsInArea[indexOfVOCS][x][y])):
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfVOCS][x][y]), formatTwo)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfVOCS][x][y]), formatTwo)
else:
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfVOCS][x][y], formatOne)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfVOCS][x][y]), formatOne)
currLine += 1
lastLine = currLine - 1
if(lastLine - firstLine != 0):
worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'TCLP Vocs', formatOne)
else:
worksheet.write('B'+str(firstLine), 'TCLP Vocs', formatOne)
return worksheet, currLine
def addSVOCSValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne):
indexOfSVOCS = 0
for x in range(0, len(sortedGenAreas)):
if sortedGenAreas[x][1] == "TCLP Acid/Base-Neutral By SW8270D":
indexOfSVOCS = x
for x in range(1, len(rowsInArea[indexOfSVOCS])):
rowsInArea[indexOfSVOCS][x] = valueRefinerMetals(rowsInArea[indexOfSVOCS][x])
rowsInArea[indexOfSVOCS] = removeUselessRows(rowsInArea, indexOfSVOCS)
firstLine = currLine
offset = 2
for x in range(1, len(rowsInArea[indexOfSVOCS])):
for y in range(0, len(rowsInArea[indexOfSVOCS][x])):
if(isNumber(rowsInArea[indexOfSVOCS][x][y])):
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfSVOCS][x][y]), formatTwo)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfSVOCS][x][y]), formatTwo)
else:
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfSVOCS][x][y], formatOne)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfSVOCS][x][y]), formatOne)
currLine += 1
lastLine = currLine - 1
if(lastLine - firstLine != 0):
worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'TCLP SVocs', formatOne)
else:
worksheet.write('B'+str(firstLine), 'TCLP SVocs', formatOne)
return worksheet, currLine
def addTCLPPesticideValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne):
indexOfTCLPPesticides = 0
for x in range(0, len(sortedGenAreas)):
if sortedGenAreas[x][1] == "TCLP Pesticides By SW8081B":
indexOfTCLPPesticides = x
for x in range(1, len(rowsInArea[indexOfTCLPPesticides])):
rowsInArea[indexOfTCLPPesticides][x] = valueRefinerMetals(rowsInArea[indexOfTCLPPesticides][x])
rowsInArea[indexOfTCLPPesticides] = removeUselessRows(rowsInArea, indexOfTCLPPesticides)
firstLine = currLine
offset = 2
for x in range(1, len(rowsInArea[indexOfTCLPPesticides])):
for y in range(0, len(rowsInArea[indexOfTCLPPesticides][x])):
if(isNumber(rowsInArea[indexOfTCLPPesticides][x][y])):
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfTCLPPesticides][x][y]), formatTwo)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfTCLPPesticides][x][y]), formatTwo)
else:
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfTCLPPesticides][x][y], formatOne)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfTCLPPesticides][x][y]), formatOne)
currLine += 1
lastLine = currLine - 1
if(lastLine - firstLine != 0):
worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'TCLP Pesticides', formatOne)
else:
worksheet.write('B'+str(firstLine), 'TCLP Pesticides', formatOne)
return worksheet, currLine
def addTCLPHerbicideValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne):
indexOfTCLPHerbicides = 0
for x in range(0, len(sortedGenAreas)):
if sortedGenAreas[x][1] == "TCLP Herbicides By SW8151A":
indexOfHerbicides = x
for x in range(1, len(rowsInArea[indexOfHerbicides])):
rowsInArea[indexOfHerbicides][x] = valueRefinerMetals(rowsInArea[indexOfHerbicides][x])
rowsInArea[indexOfTCLPHerbicides] = removeUselessRows(rowsInArea, indexOfTCLPHerbicides)
firstLine = currLine
offset = 2
for x in range(1, len(rowsInArea[indexOfHerbicides])):
for y in range(0, len(rowsInArea[indexOfHerbicides][x])):
if(isNumber(rowsInArea[indexOfHerbicides][x][y])):
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfHerbicides][x][y]), formatTwo)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfHerbicides][x][y]), formatTwo)
else:
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfHerbicides][x][y], formatOne)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfHerbicides][x][y]), formatOne)
currLine += 1
lastLine = currLine - 1
if(lastLine - firstLine != 0):
worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'TCLP Pesticides / Herbicides', formatOne)
else:
worksheet.write('B'+str(firstLine), 'TCLP Pesticides / Herbicides', formatOne)
return worksheet, currLine
def addTPHValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne):
indexOfGasolineHydrocarbons = 0
for x in range(0, len(sortedGenAreas)):
if sortedGenAreas[x][1] == "Gasoline Range Hydrocarbons (C6-C10) By SW8015D":
indexOfGasolineHydrocarbons = x
for x in range(1, len(rowsInArea[indexOfGasolineHydrocarbons])):
rowsInArea[indexOfGasolineHydrocarbons][x] = valueRefinerMetals(rowsInArea[indexOfGasolineHydrocarbons][x])
indexOfDieselHydrocarbons = 0
for x in range(0, len(sortedGenAreas)):
if sortedGenAreas[x][1] == "TPH By SW8015D DRO":
indexOfDieselHydrocarbons = x
for x in range(1, len(rowsInArea[indexOfDieselHydrocarbons])):
rowsInArea[indexOfDieselHydrocarbons][x] = valueRefinerMetals(rowsInArea[indexOfDieselHydrocarbons][x])
offset = 2
worksheet.merge_range('B'+str(currLine)+":B"+str(currLine+1), 'Total Petroleum Hydrocarbons', formatOne)
for x in range(1, len(rowsInArea[indexOfGasolineHydrocarbons])):
for y in range(0, len(rowsInArea[indexOfGasolineHydrocarbons][x])):
if(isNumber(rowsInArea[indexOfGasolineHydrocarbons][x][y])):
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfGasolineHydrocarbons][x][y]), formatTwo)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfGasolineHydrocarbons][x][y]), formatTwo)
else:
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfGasolineHydrocarbons][x][y], formatOne)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfGasolineHydrocarbons][x][y]), formatOne)
currLine += 1
for x in range(1, len(rowsInArea[indexOfDieselHydrocarbons])):
for y in range(0, len(rowsInArea[indexOfDieselHydrocarbons][x])):
if(isNumber(rowsInArea[indexOfDieselHydrocarbons][x][y])):
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfDieselHydrocarbons][x][y]), formatTwo)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfDieselHydrocarbons][x][y]), formatTwo)
else:
if(64+y+offset < 90):
worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfDieselHydrocarbons][x][y], formatOne)
else:
firstLetter = chr(int(65+(((y+offset)-26)/26)))
secondLetter = chr(65+((y+offset)%26))
col = firstLetter + secondLetter
worksheet.write(col+str(currLine), str(rowsInArea[indexOfDieselHydrocarbons][x][y]), formatOne)
currLine += 1
return worksheet, currLine
print("Writing to Excel File...")
workbook, worksheet = startWritingFinalFile()
worksheet.set_column('B:B', 25)
worksheet.set_column('C:C', 30)
worksheet.set_column('E:E', 15)
worksheet.set_row(5,50)
#Important Information - Titles, etc..
formatOne = workbook.add_format()
formatOne.set_align('center')
formatOne.set_align('vcenter')
formatOne.set_font_name('Arial')
formatOne.set_font_size('12')
formatOne.set_border(6)
#Numbers within the text
formatTwo = workbook.add_format()
formatTwo.set_align('center')
formatTwo.set_align('vcenter')
formatTwo.set_font_name('Arial')
formatTwo.set_font_size('12')
formatTwo.set_border(6)
formatTwo.set_bg_color('#87CEFF')
formatTwo.set_bold()
#Current Line to overwrite each process
currLine = 6
#Heading for each column
worksheet, currLine, Type_Samples_Collected = createHeading(worksheet, currLine, Type_Samples_Collected_Raw, formatOne)
#Adding Mercury Values
worksheet, currLine, Mercury_Values = addMercuryValues(worksheet, currLine, Mercury_Values_Raw, formatOne, formatTwo)
#Adding PCB Values
worksheet, currLine = addPCBValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo)
#Adding Pesticide Values
worksheet, currLine = addPesticideValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo)
#Adding Metal Values
worksheet, currLine = addMetalValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo)
#Adding Cyanide Values
worksheet, currLine, Cyanide_Values = addCyanideValues(worksheet, currLine, Cyanide_Values_Raw, formatOne, formatTwo)
#Adding Semi Volatile Organic Compounds
worksheet, currLine = addSemiVolatileValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo)
#Adding Volatile Organic Compounds
worksheet, currLine = addVolatileValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo)
#RCRA Second Heading
worksheet, currLine = createSecondHeading(worksheet, currLine, Type_Samples_Collected, formatOne)
#Adding Corrosivity(pH) Values
worksheet, currLine, Corrosivity_Values = addCorrosivityValues(worksheet, currLine, Corrosivity_Values_Raw, formatOne)
#Adding Flashpoint Values
worksheet, currLine, Flashpoint_Values = addFlashpointValues(worksheet, currLine, Flashpoint_Values_Raw, formatOne)
#Adding Ignitability Values
worksheet, currLine, Ignitability_Values = addIgnitabilityValues(worksheet, currLine, Ignitability_Values_Raw, formatOne)
#Adding Reactivity Values
worksheet, currLine, Reactivity_Values_Cyanide, Reactivity_Values_Sulfide = addReactivityValues(worksheet, currLine, Reactivity_Values_Cyanide_Raw, Reactivity_Values_Sulfide_Raw, formatOne)
#Toxicity Third Heading
worksheet, currLine = createThirdHeading(worksheet, currLine, Type_Samples_Collected, formatOne)
#Adding TCLP Metals(Barium / Lead)
worksheet, currLine = addTCLPMetalValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo)
#Adding TCLP VOCS
worksheet, currLine = addVOCSValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo)
#Adding TCLP SVOCS
worksheet, currLine = addSVOCSValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne)
#Adding TCLP Pesticides
worksheet, currLine = addTCLPPesticideValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne)
#Adding TCLP Herbicides
worksheet, currLine = addTCLPHerbicideValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne)
#Adding Total Petroleum Hydrocarbons
worksheet, currLine = addTPHValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne)
#Beginning information(Company Name, Address, Dates Samples were collected)
worksheet, currLine = createBeginning(worksheet, currLine)
workbook.close()
print("Done Writing")
| 46.907306 | 241 | 0.632561 | 4,692 | 43,014 | 5.72954 | 0.070546 | 0.024737 | 0.02377 | 0.020459 | 0.698732 | 0.662947 | 0.628538 | 0.542499 | 0.462486 | 0.43786 | 0 | 0.028124 | 0.236179 | 43,014 | 916 | 242 | 46.958515 | 0.790108 | 0.023341 | 0 | 0.48072 | 0 | 0 | 0.03106 | 0.001148 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03856 | false | 0 | 0.006427 | 0 | 0.083548 | 0.010283 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5a9823ed404374fd6e8abf22e09c47cf13d68464 | 6,282 | py | Python | lhrhost/robot/robot.py | ethanjli/liquid-handling-robotics | 999ab03c225b4c5382ab9fcac6a4988d0c232c67 | [
"BSD-3-Clause"
] | null | null | null | lhrhost/robot/robot.py | ethanjli/liquid-handling-robotics | 999ab03c225b4c5382ab9fcac6a4988d0c232c67 | [
"BSD-3-Clause"
] | null | null | null | lhrhost/robot/robot.py | ethanjli/liquid-handling-robotics | 999ab03c225b4c5382ab9fcac6a4988d0c232c67 | [
"BSD-3-Clause"
] | 1 | 2018-08-03T17:17:31.000Z | 2018-08-03T17:17:31.000Z | """Abstractions for a liquid-handling robot."""
# Standard imports
import asyncio
import logging
# Local package imports
from lhrhost.robot.p_axis import Axis as PAxis
from lhrhost.robot.x_axis import Axis as XAxis
from lhrhost.robot.y_axis import Axis as YAxis
from lhrhost.robot.z_axis import Axis as ZAxis
from lhrhost.util.cli import Prompt
# Logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class Robot(object):
"""High-level controller for 4-axis liquid-handling robot.
Currently the x-axis is moved manually by the user.
"""
def __init__(self):
"""Initialize member variables."""
self.p = PAxis()
self.z = ZAxis()
self.y = YAxis()
self.x = XAxis()
self.prompt = Prompt(end='', flush=True)
def register_messaging_stack(self, messaging_stack):
"""Associate a messaging stack with the robot.
The messaging stack is used for host-peripheral communication.
"""
messaging_stack.register_response_receivers(
self.p.protocol, self.z.protocol, self.y.protocol, self.x.protocol
)
messaging_stack.register_command_senders(
self.p.protocol, self.z.protocol, self.y.protocol, self.x.protocol
)
async def wait_until_initialized(self):
"""Wait until all axes are initialized."""
await asyncio.gather(
self.p.wait_until_initialized(),
self.z.wait_until_initialized(),
self.y.wait_until_initialized(),
self.x.wait_until_initialized()
)
async def synchronize_values(self):
"""Request the values of all protocol channels."""
await self.p.synchronize_values()
await self.z.synchronize_values()
await self.y.synchronize_values()
await self.x.synchronize_values()
async def load_calibrations(self):
"""Load calibration parameters from json files."""
self.p.load_calibration_json()
self.p.load_preset_json()
self.p.load_tunings_json()
self.z.load_calibration_json()
self.z.load_preset_json()
self.z.load_tunings_json()
self.y.load_calibration_json()
self.y.load_preset_json()
self.y.load_tunings_json()
self.x.load_calibration_json()
self.x.load_preset_json()
self.x.load_tunings_json()
async def ensure_sample_platform_configuration(self, configuration):
"""Ensure that the sample platform is configured as speified."""
await self.prompt(
'Please ensure that the sample platform modules are configured '
'following the "{}" configurationn:'.format(configuration)
)
self.x.configuration = configuration
async def go_to_alignment_hole(self):
"""Move the pipettor head to the alignment hole."""
await self.z.go_to_high_end_position()
await asyncio.gather(
self.y.go_to_alignment_hole(), self.x.go_to_alignment_hole()
)
await self.z.go_to_alignment_hole()
async def align_manually(self):
"""Do a manual alignment of x/y positioning."""
await self.go_to_alignment_hole()
await self.prompt(
'Please move the x-axis and the y-axis so that the pipette tip is '
'directly over the round alignment hole: '
)
await asyncio.gather(self.x.set_alignment(), self.y.set_alignment())
logger.info('Aligned to the zero position at the alignment hole.')
async def go_to_module_position(
self, module_name, x_position, y_position, z_position=None
):
"""Move the pipettor head to the specified x/y position of the module."""
module_type = self.x.get_module_type(module_name)
if (
self.x.current_preset_position is not None and
self.x.at_module(module_name)
):
await self.z.go_to_module_position(module_type, 'far above')
else:
await self.z.go_to_high_end_position()
await asyncio.gather(
self.x.go_to_module_position(module_name, x_position),
self.y.go_to_module_position(module_type, y_position)
)
if z_position is not None:
await self.z.go_to_module_position(module_type, 'far above')
async def intake(self, module_name, volume, height=None):
"""Intake fluid at the specified height.
Height should be a preset z-axis position or a physical z-axis position.
"""
module_type = self.x.get_module_type(module_name)
if height is not None:
try:
await self.z.go_to_module_position(module_type, height)
except KeyError:
await self.z.go_to_physical_position(height)
await self.p.intake(volume)
async def intake_precise(self, module_name, volume, height=None):
"""Intake fluid at the specified height.
Height should be a preset z-axis position or a physical z-axis position.
Volume should be either 20, 30, 40, 50, or 100.
"""
module_type = self.x.get_module_type(module_name)
if height is None:
if self.z.current_preset_position is not None:
height = self.z.current_preset_position[1]
else:
height = await self.z.physical_position
await self.z.go_to_module_position(module_type, 'above')
await self.p.go_to_pre_intake(volume)
try:
await self.z.go_to_module_position(module_type, height)
except KeyError:
await self.z.go_to_physical_position(height)
await self.p.intake(volume)
async def dispense(self, module_name, volume=None, height=None):
"""Dispense fluid at the specified height.
If volume is none, dispenses all syringe contents.
Height should be a preset z-axis position or a physical z-axis position.
"""
module_type = self.x.get_module_type(module_name)
if height is not None:
try:
await self.z.go_to_module_position(module_type, height)
except KeyError:
await self.z.go_to_physical_position(height)
await self.p.dispense(volume)
| 37.616766 | 81 | 0.650748 | 836 | 6,282 | 4.683014 | 0.19378 | 0.055172 | 0.03576 | 0.036782 | 0.407918 | 0.356833 | 0.314432 | 0.304215 | 0.304215 | 0.294508 | 0 | 0.002804 | 0.262018 | 6,282 | 166 | 82 | 37.843373 | 0.841674 | 0.053327 | 0 | 0.304348 | 0 | 0 | 0.054978 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017391 | false | 0 | 0.06087 | 0 | 0.086957 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5a98fddec1d3cbb33837e1f1d6a6a5b060873682 | 12,100 | py | Python | python/tinkoff/cloud/longrunning/v1/longrunning_pb2_grpc.py | qwertBR/voicekit-examples | 273a63e4cf11841339108cdcdf8b485b7c96298a | [
"Apache-2.0"
] | 3 | 2022-02-11T04:34:18.000Z | 2022-03-29T19:35:57.000Z | python/tinkoff/cloud/longrunning/v1/longrunning_pb2_grpc.py | qwertBR/voicekit-examples | 273a63e4cf11841339108cdcdf8b485b7c96298a | [
"Apache-2.0"
] | 3 | 2022-01-27T15:40:38.000Z | 2022-03-31T10:03:35.000Z | python/tinkoff/cloud/longrunning/v1/longrunning_pb2_grpc.py | qwertBR/voicekit-examples | 273a63e4cf11841339108cdcdf8b485b7c96298a | [
"Apache-2.0"
] | 5 | 2022-01-27T15:15:06.000Z | 2022-03-24T22:06:18.000Z | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from tinkoff.cloud.longrunning.v1 import longrunning_pb2 as tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2
class OperationsStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetOperation = channel.unary_unary(
'/tinkoff.cloud.longrunning.v1.Operations/GetOperation',
request_serializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.GetOperationRequest.SerializeToString,
response_deserializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.Operation.FromString,
)
self.WaitOperation = channel.unary_unary(
'/tinkoff.cloud.longrunning.v1.Operations/WaitOperation',
request_serializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.WaitOperationRequest.SerializeToString,
response_deserializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.Operation.FromString,
)
self.ListOperations = channel.unary_unary(
'/tinkoff.cloud.longrunning.v1.Operations/ListOperations',
request_serializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.ListOperationsRequest.SerializeToString,
response_deserializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.ListOperationsResponse.FromString,
)
self.WatchOperations = channel.unary_stream(
'/tinkoff.cloud.longrunning.v1.Operations/WatchOperations',
request_serializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.WatchOperationsRequest.SerializeToString,
response_deserializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.WatchOperationsResponse.FromString,
)
self.DeleteOperation = channel.unary_unary(
'/tinkoff.cloud.longrunning.v1.Operations/DeleteOperation',
request_serializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.DeleteOperationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.CancelOperation = channel.unary_unary(
'/tinkoff.cloud.longrunning.v1.Operations/CancelOperation',
request_serializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.CancelOperationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class OperationsServicer(object):
"""Missing associated documentation comment in .proto file."""
def GetOperation(self, request, context):
"""Starts polling for operation statuses
Returns operation status
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def WaitOperation(self, request, context):
"""Wait for operation update
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListOperations(self, request, context):
"""List operations
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def WatchOperations(self, request, context):
"""Watch operations
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteOperation(self, request, context):
"""Deletes specified operations
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CancelOperation(self, request, context):
"""Cancels specified operations
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_OperationsServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetOperation': grpc.unary_unary_rpc_method_handler(
servicer.GetOperation,
request_deserializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.GetOperationRequest.FromString,
response_serializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.Operation.SerializeToString,
),
'WaitOperation': grpc.unary_unary_rpc_method_handler(
servicer.WaitOperation,
request_deserializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.WaitOperationRequest.FromString,
response_serializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.Operation.SerializeToString,
),
'ListOperations': grpc.unary_unary_rpc_method_handler(
servicer.ListOperations,
request_deserializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.ListOperationsRequest.FromString,
response_serializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.ListOperationsResponse.SerializeToString,
),
'WatchOperations': grpc.unary_stream_rpc_method_handler(
servicer.WatchOperations,
request_deserializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.WatchOperationsRequest.FromString,
response_serializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.WatchOperationsResponse.SerializeToString,
),
'DeleteOperation': grpc.unary_unary_rpc_method_handler(
servicer.DeleteOperation,
request_deserializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.DeleteOperationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'CancelOperation': grpc.unary_unary_rpc_method_handler(
servicer.CancelOperation,
request_deserializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.CancelOperationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'tinkoff.cloud.longrunning.v1.Operations', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Operations(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def GetOperation(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/tinkoff.cloud.longrunning.v1.Operations/GetOperation',
tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.GetOperationRequest.SerializeToString,
tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def WaitOperation(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/tinkoff.cloud.longrunning.v1.Operations/WaitOperation',
tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.WaitOperationRequest.SerializeToString,
tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListOperations(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/tinkoff.cloud.longrunning.v1.Operations/ListOperations',
tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.ListOperationsRequest.SerializeToString,
tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.ListOperationsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def WatchOperations(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/tinkoff.cloud.longrunning.v1.Operations/WatchOperations',
tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.WatchOperationsRequest.SerializeToString,
tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.WatchOperationsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteOperation(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/tinkoff.cloud.longrunning.v1.Operations/DeleteOperation',
tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.DeleteOperationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CancelOperation(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/tinkoff.cloud.longrunning.v1.Operations/CancelOperation',
tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.CancelOperationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 50.416667 | 144 | 0.705702 | 1,158 | 12,100 | 6.948187 | 0.105354 | 0.10788 | 0.057793 | 0.069351 | 0.842158 | 0.837808 | 0.809098 | 0.779518 | 0.740492 | 0.726324 | 0 | 0.00911 | 0.228926 | 12,100 | 239 | 145 | 50.627615 | 0.853269 | 0.051736 | 0 | 0.559585 | 1 | 0 | 0.093099 | 0.061451 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072539 | false | 0 | 0.015544 | 0.031088 | 0.134715 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
5a9ae867d70612f2ffe19fc29174270aa28f529d | 602 | py | Python | src/astro/utils/schema_util.py | malthe/astro | 61036ab08b2826136f64fdbfc6ca21b61643a8a0 | [
"Apache-2.0"
] | null | null | null | src/astro/utils/schema_util.py | malthe/astro | 61036ab08b2826136f64fdbfc6ca21b61643a8a0 | [
"Apache-2.0"
] | null | null | null | src/astro/utils/schema_util.py | malthe/astro | 61036ab08b2826136f64fdbfc6ca21b61643a8a0 | [
"Apache-2.0"
] | null | null | null | import os
from airflow.providers.postgres.hooks.postgres import PostgresHook
from psycopg2 import sql
def set_schema_query(conn_type, hook, schema_id, user):
if conn_type == "postgres":
return (
sql.SQL("CREATE SCHEMA IF NOT EXISTS {schema} AUTHORIZATION {user}")
.format(schema=sql.Identifier(schema_id), user=sql.Identifier(user))
.as_string(hook.get_conn())
)
elif conn_type == "snowflake":
return f"CREATE SCHEMA IF NOT EXISTS {schema_id}"
def get_schema():
return os.getenv("AIRFLOW__ASTRO__SQL_SCHEMA") or "tmp_astro"
| 28.666667 | 80 | 0.682724 | 80 | 602 | 4.925 | 0.4625 | 0.060914 | 0.060914 | 0.086294 | 0.147208 | 0.147208 | 0 | 0 | 0 | 0 | 0 | 0.00211 | 0.212625 | 602 | 20 | 81 | 30.1 | 0.829114 | 0 | 0 | 0 | 0 | 0 | 0.245847 | 0.043189 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.214286 | 0.071429 | 0.571429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
5a9d0c2e4e731186b891bcd4f534edfa2b33e353 | 1,599 | py | Python | references/stm32_parsing_sim/stm32parser.py | koson/OTA_update_STM32_using_ESP32 | 7fe7ae64d5290c0a453c29d787b5fe9456910e96 | [
"MIT"
] | 155 | 2020-02-15T06:54:15.000Z | 2021-09-16T07:19:19.000Z | references/stm32_parsing_sim/stm32parser.py | ksmola/OTA_update_STM32_using_ESP32 | dd616a1212da8f874e4826d63cfbd4c3b9ad2df2 | [
"MIT"
] | 8 | 2020-10-09T08:56:52.000Z | 2021-09-01T03:42:49.000Z | references/stm32_parsing_sim/stm32parser.py | ksmola/OTA_update_STM32_using_ESP32 | dd616a1212da8f874e4826d63cfbd4c3b9ad2df2 | [
"MIT"
] | 25 | 2020-03-16T04:41:12.000Z | 2021-08-19T11:49:40.000Z |
import time
import math as m
start = time.time()
def checksum(block):
data_chk = []
xor = '0'
for line in block:
for x in range(0, len(line) - 1, 2):
xor = hex(int(xor, 16) ^ int(line[x], 16) ^ int(line[x + 1], 16))
data_chk.append(xor)
xor = '0'
return data_chk
def stm_parser():
data, block, d_chk = [], [], []
addr = '0x08000000'
start, i, x = 0, 0, 0
with open("main.bin", "rb") as f:
byte = f.read(1)
while byte:
for b in byte:
data.append(hex(b))
byte = f.read(1)
while(start <= len(data)):
block.append(data[start : start + 256])
start = start + 256
count = int(len(block))
end = len(block) - 1
i_addr = int(addr, 16)
while(len(block[end]) % 256 != 0):
block[end].append(hex(255))
d_chk = checksum(block)
print("Start Flashing")
while(i < count):
l_addr = hex(int(i_addr % 65536))
h_addr = hex(int(i_addr / 65536))
i_addr += 256
a_chk = hex(int(l_addr, 16) ^ int(h_addr, 16))
print("Sending WRITE MEMORY Command...")
print("0x31", "0xCE")
time.sleep(0.1)
print("Sending Address...")
print(h_addr, l_addr, a_chk)
time.sleep(0.1)
print("Sending Data...")
# print(i + 1, block[i], d_chk[i])
print(i + 1, "BLOCK", d_chk[i])
time.sleep(0.1)
i+=1
print()
print("Done Flashing")
stm_parser()
print('Time (ms):', 1000*(time.time() - start))
| 20.766234 | 77 | 0.499062 | 230 | 1,599 | 3.378261 | 0.273913 | 0.030888 | 0.03861 | 0.042471 | 0.149292 | 0.110682 | 0 | 0 | 0 | 0 | 0 | 0.071698 | 0.337086 | 1,599 | 76 | 78 | 21.039474 | 0.661321 | 0.020013 | 0 | 0.137255 | 0 | 0 | 0.086957 | 0 | 0 | 0 | 0.011509 | 0 | 0 | 1 | 0.039216 | false | 0 | 0.039216 | 0 | 0.098039 | 0.196078 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5a9d5971fc9f0f68b3b0383f6727b85c4065e172 | 3,034 | py | Python | sw/device/silicon_creator/lib/crypto/tests/testvectors/wycheproof/rsa_3072_verify_parse_testvectors.py | matutem/opentitan | a41c0a57568f1dc8263a4ecc3913f190750959f5 | [
"Apache-2.0"
] | null | null | null | sw/device/silicon_creator/lib/crypto/tests/testvectors/wycheproof/rsa_3072_verify_parse_testvectors.py | matutem/opentitan | a41c0a57568f1dc8263a4ecc3913f190750959f5 | [
"Apache-2.0"
] | null | null | null | sw/device/silicon_creator/lib/crypto/tests/testvectors/wycheproof/rsa_3072_verify_parse_testvectors.py | matutem/opentitan | a41c0a57568f1dc8263a4ecc3913f190750959f5 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import argparse
import json
import math
import sys
import hjson
def parse_hex_int(hex_str):
# int() throws an error message for empty string
if hex_str == '':
return 0
return int(hex_str, 16)
def parse_test(raw_data, n, e, t):
test = {'n': n, 'e': e}
test['msg'] = parse_hex_int(t['msg'])
# Message is expressed in hex notation, so the length in bytes is
# the number of characters / 2
test['msg_len'] = math.ceil(len(t['msg']) / 2)
test['signature'] = parse_hex_int(t['sig'])
notes = []
if t['comment']:
notes.append(t['comment'])
# Add notes from flags, if any
notes.extend([raw_data['notes'][flag] for flag in t['flags']])
# cases for expected result
if t['result'] == 'valid':
test['valid'] = True
elif t['result'] == 'invalid':
test['valid'] = False
elif t['result'] == 'acceptable':
if t['comment'] == 'short signature':
# We consider short signatures valid
test['valid'] = True
else:
# err on the side of caution and reject "acceptable" signatures otherwise
test['valid'] = False
notes.append('signature marked as acceptable by wycheproof')
else:
raise RuntimeError('Unexpected result type {}'.format(test['result']))
test['comment'] = 'wycheproof test with tcId={:d}, notes={}'.format(
t["tcId"], ', '.join(notes))
return test
def parse_test_group(raw_data, group):
tests = []
n = parse_hex_int(group['n'])
e = parse_hex_int(group['e'])
for t in group['tests']:
tests.append(parse_test(raw_data, n, e, t))
return tests
def parse_test_vectors(raw_data):
if raw_data['algorithm'] != 'RSASSA-PKCS1-v1_5':
raise RuntimeError('Unexpected algorithm: {}, expected {}'.format(
raw_data['algorithm'], 'RSASSA-PKCS1-v1_5'))
tests = []
for group in raw_data['testGroups']:
if group['sha'] != 'SHA-256':
raise RuntimeError(
'Unexpected hash function: {}, expected {}'.format(
group['sha'], 'SHA-256'))
tests.extend(parse_test_group(raw_data, group))
return tests
def main():
parser = argparse.ArgumentParser()
parser.add_argument('src',
metavar='FILE',
type=argparse.FileType('r'),
help='Read test vectors from this JSON file.')
parser.add_argument('dst',
metavar='FILE',
type=argparse.FileType('w'),
help='Write output to this file.')
args = parser.parse_args()
testvecs = parse_test_vectors(json.load(args.src))
args.src.close()
hjson.dump(testvecs, args.dst)
args.dst.close()
return 0
if __name__ == '__main__':
sys.exit(main())
| 28.622642 | 85 | 0.586684 | 385 | 3,034 | 4.503896 | 0.366234 | 0.036332 | 0.031719 | 0.018454 | 0.122261 | 0.086505 | 0.056517 | 0 | 0 | 0 | 0 | 0.010412 | 0.271918 | 3,034 | 105 | 86 | 28.895238 | 0.774559 | 0.152933 | 0 | 0.2 | 0 | 0 | 0.202814 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.071429 | 0 | 0.228571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5a9e608dad9679e93ef744bc1105dbf95d043cce | 2,479 | py | Python | Tools/boot_now.py | wms124/PX4_1.4.1_Back-up | 9d6d903a8f46346281ae11104c47f1904da05e37 | [
"BSD-3-Clause"
] | 4,224 | 2015-01-02T11:51:02.000Z | 2020-10-27T23:42:28.000Z | Tools/boot_now.py | wms124/PX4_1.4.1_Back-up | 9d6d903a8f46346281ae11104c47f1904da05e37 | [
"BSD-3-Clause"
] | 11,736 | 2015-01-01T11:59:16.000Z | 2020-10-28T17:13:38.000Z | Tools/boot_now.py | wms124/PX4_1.4.1_Back-up | 9d6d903a8f46346281ae11104c47f1904da05e37 | [
"BSD-3-Clause"
] | 11,850 | 2015-01-02T14:54:47.000Z | 2020-10-28T16:42:47.000Z | #!/usr/bin/env python
############################################################################
#
# Copyright (C) 2012-2015 PX4 Development Team. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. Neither the name PX4 nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
############################################################################
# send BOOT command to a device
import argparse
import serial, sys
from sys import platform as _platform
# Parse commandline arguments
parser = argparse.ArgumentParser(description="Send boot command to a device")
parser.add_argument('--baud', action="store", type=int, default=115200, help="Baud rate of the serial port")
parser.add_argument('port', action="store", help="Serial port(s) to which the FMU may be attached")
args = parser.parse_args()
REBOOT = b'\x30'
EOC = b'\x20'
print("Sending reboot to %s" % args.port)
try:
port = serial.Serial(args.port, args.baud, timeout=0.5)
except Exception:
print("Unable to open %s" % args.port)
sys.exit(1)
port.write(REBOOT + EOC)
port.close()
sys.exit(0)
| 41.316667 | 108 | 0.702299 | 341 | 2,479 | 5.093842 | 0.533724 | 0.020725 | 0.019574 | 0.026482 | 0.133564 | 0.10593 | 0.078296 | 0.078296 | 0.078296 | 0.078296 | 0 | 0.013069 | 0.166599 | 2,479 | 59 | 109 | 42.016949 | 0.827686 | 0.632513 | 0 | 0 | 0 | 0 | 0.232782 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5aa27bded824f5d2ed1668d9730e5fcc75bdeb84 | 550 | py | Python | cc_server/commons/database.py | curious-containers/cc-server | 3d4f0e39d2a974ee239a455b238ac8a3bfcddb73 | [
"Apache-2.0"
] | 4 | 2016-09-05T15:01:01.000Z | 2016-11-17T12:36:41.000Z | cc_server/commons/database.py | curious-containers/cc-server | 3d4f0e39d2a974ee239a455b238ac8a3bfcddb73 | [
"Apache-2.0"
] | null | null | null | cc_server/commons/database.py | curious-containers/cc-server | 3d4f0e39d2a974ee239a455b238ac8a3bfcddb73 | [
"Apache-2.0"
] | null | null | null | import pymongo
class Mongo:
def __init__(self, config):
self._config = config
self.client = pymongo.MongoClient('mongodb://{}:{}@{}:{}/{}'.format(
self._config.mongo['username'],
self._config.mongo['password'],
self._config.mongo['host'],
self._config.mongo['port'],
self._config.mongo['db']
))
self.db = self.client[self._config.mongo['db']]
def drop_db_collections(self, collections):
for c in collections:
self.db[c].drop()
| 28.947368 | 76 | 0.558182 | 59 | 550 | 4.983051 | 0.372881 | 0.272109 | 0.306122 | 0.115646 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.278182 | 550 | 18 | 77 | 30.555556 | 0.740554 | 0 | 0 | 0 | 0 | 0 | 0.094545 | 0.043636 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0.066667 | 0.066667 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 3 |
5aa3314ea9abea60e379f77782cb7a74e3c4aa9a | 543 | py | Python | coord2vec/evaluation/visualizations/tests/test_bokeh_pr_curve.py | jonzarecki/coord2vec | 4f267fdd87af7b3d3558ca834b88e9ab7c309c18 | [
"Apache-2.0"
] | null | null | null | coord2vec/evaluation/visualizations/tests/test_bokeh_pr_curve.py | jonzarecki/coord2vec | 4f267fdd87af7b3d3558ca834b88e9ab7c309c18 | [
"Apache-2.0"
] | null | null | null | coord2vec/evaluation/visualizations/tests/test_bokeh_pr_curve.py | jonzarecki/coord2vec | 4f267fdd87af7b3d3558ca834b88e9ab7c309c18 | [
"Apache-2.0"
] | 1 | 2021-01-25T09:21:17.000Z | 2021-01-25T09:21:17.000Z | from unittest import TestCase
import numpy as np
from bokeh.models import LayoutDOM
from coord2vec.evaluation.visualizations.bokeh_plots import bokeh_pr_curve_from_y_proba
class TestBokeh_pr_curve(TestCase):
@classmethod
def setUpClass(cls):
cls.y_pred = np.random.choice((0, 1), size=10)
cls.y_true = np.random.choice((0, 1), size=10)
def test_bokeh_pr_curve(self):
fig = bokeh_pr_curve_from_y_proba(self.y_pred, self.y_true, legend='Zarecki is special')
self.assertIsInstance(fig, LayoutDOM)
| 30.166667 | 96 | 0.740331 | 82 | 543 | 4.658537 | 0.487805 | 0.073298 | 0.094241 | 0.08377 | 0.230366 | 0.230366 | 0.115183 | 0 | 0 | 0 | 0 | 0.019912 | 0.167587 | 543 | 17 | 97 | 31.941176 | 0.825221 | 0 | 0 | 0 | 0 | 0 | 0.033149 | 0 | 0 | 0 | 0 | 0 | 0.083333 | 1 | 0.166667 | false | 0 | 0.333333 | 0 | 0.583333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
5aa44f0e70c5feb1742576c75322458cf3b62dfc | 5,267 | py | Python | scripts/pyBusPirateLite/shtxx.py | bopopescu/Bus-Pirate-1 | 62a449f4aa6edb2160e00537f06024b2e1ce6c48 | [
"CC0-1.0"
] | null | null | null | scripts/pyBusPirateLite/shtxx.py | bopopescu/Bus-Pirate-1 | 62a449f4aa6edb2160e00537f06024b2e1ce6c48 | [
"CC0-1.0"
] | null | null | null | scripts/pyBusPirateLite/shtxx.py | bopopescu/Bus-Pirate-1 | 62a449f4aa6edb2160e00537f06024b2e1ce6c48 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
#based on Microwire.py and hackaday buspirate/sht tutorial
import sys,time
from optparse import OptionParser
from pyBusPirateLite.RAW_WIRE import *
def sht_command(rw, data):
##clear shtxx interface
rw.data_high()
for i in range(9):
rw.clk_tick()
#start condition
rw.data_high()
rw.clk_high()
rw.data_low()
rw.clk_low()
rw.clk_high()
rw.data_high()
rw.clk_low()
#command
rw.bulk_trans(1, [data])
#read and return bit
return rw.read_bit()
def sht_acknowledge(rw):
#acknowledge
rw.data_low()
rw.clk_tick()
def sht_wait_conversion_finished(rw, options):
while 1:
mosi_status = (ord(rw.read_pins()) & BBIOPins.MOSI) >> (BBIOPins.MOSI-1)
if mosi_status:
if options.verbose:
print 'waiting...'
else:
if options.verbose:
print 'conversion done'
time.sleep(0.1)
break
def sht_temperature(rw, options):
#soft reset
status = sht_command(rw, 0b00011110)
if options.verbose:
print 'acknowledgment status:', ord(status)
if not status:
print "Error resetting SHT"
#start temperature conversion
status = sht_command(rw, 0b00000011)
if options.verbose:
print 'acknowledgment status:', ord(status)
if not status:
print "Error starting temperature conversion SHT"
sht_wait_conversion_finished(rw, options)
data = list()
for i in range(3):
data.append(ord(rw.read_byte()))
sht_acknowledge(rw)
temp_hb, temp_lb ,temp_crc = data
#temp_hb = 0x17 #for formula testing
#temp_lb = 0xcc #for formula testing
#print (temp_hb<<8)+temp_lb
temp = -39.7 + 0.01 * ((temp_hb<<8)+temp_lb)
if options.verbose:
print 'temp_hb:', temp_hb
print 'temp_lb:', temp_lb
print 'temp_crc:', temp_crc
print 'temp:', temp
return temp
def sht_humidity(rw, options):
#soft reset
status = sht_command(rw, 0b00011110)
if options.verbose:
print 'acknowledgment status:', ord(status)
if not status:
print "Error resetting SHT"
#start humidity conversion
status = sht_command(rw, 0b00000101)
if options.verbose:
print 'acknowledgment status:', ord(status)
if not status:
print "Error starting humidity conversion SHT"
#time.sleep(1)
sht_wait_conversion_finished(rw, options)
data = list()
for i in range(3):
data.append(ord(rw.read_byte()))
sht_acknowledge(rw)
hum_hb, hum_lb ,hum_crc = data
#hum_hb = 0x05 #for formula testing
#hum_lb = 0x80 #for formula testing
#print (hum_hb<<8)+hum_lb
hum = -2.0468 + 0.0367*((hum_hb<<8)+hum_lb) + (-0.0000015955*(((hum_hb<<8)+hum_lb)**2))
if options.verbose:
print 'hum_hb:', hum_hb
print 'hum_lb:', hum_lb
print 'hum_crc:', hum_crc
print 'hum:', hum
return hum
def main():
# First of all parse the command line
parser = OptionParser()
parser.add_option("-d", "--device", dest="device", help="serial interface where bus pirate is in.[/dev/bus_pirate]", default="/dev/bus_pirate")
parser.add_option("-t", "--temperature", action="store_true", dest="temperature", help="get temperature from sht.", default=False)
parser.add_option("-H", "--humidity", dest="humidity", action="store_true", help="get humidity from sht.", default=False)
parser.add_option("-v", "--verbose", dest="verbose", help="don't be quiet.", action="store_true")
(options,args) = parser.parse_args()
if not (options.temperature or options.humidity):
parser.print_help()
exit()
# Create an instance of the RAW_WIRE class as we are using the BitBang/RAW_WIRE mode
rw = RAW_WIRE( options.device, 115200 )
if not rw.BBmode():
print "Can't enter into BitBang mode."
exit()
# We have succesfully activated the BitBang Mode, so we continue with
# the raw-wire mode.
if not rw.enter_rawwire():
print "Can't enable the raw-wire mode."
exit()
# Now we have raw-wire mode enabled, so first configure peripherals
# (Power, PullUps, AUX, CS)
if not rw.raw_cfg_pins( PinCfg.POWER | PinCfg.PULLUPS):
print "Error enabling the internal voltage regulators."
# Configure the raw-wire mode
if not rw.cfg_raw_wire( (RAW_WIRECfg.BIT_ORDER & RAW_WIRE_BIT_ORDER_TYPE.MSB) | (RAW_WIRECfg.WIRES & RAW_WIRE_WIRES_TYPE.TWO) | (RAW_WIRECfg.OUT_TYPE & RAW_WIRE_OUT_TYPE.HIZ) ):
print "Error configuring the raw-wire mode."
# Set raw-wire speed
if not rw.set_speed( RAW_WIRESpeed._5KHZ ):
print "Error setting raw-wire speed."
if options.temperature:
print "Measuring temperature..."
temperature = sht_temperature(rw, options)
print "Temperature: %f°C" % temperature
if options.humidity:
print "Measuring humidity..."
humidity = sht_humidity(rw, options)
print "Humidity: %f%%" % humidity
# Reset the bus pirate
rw.resetBP();
if __name__ == '__main__':
main()
| 30.445087 | 181 | 0.63034 | 709 | 5,267 | 4.519041 | 0.275035 | 0.032772 | 0.03995 | 0.052434 | 0.294007 | 0.235955 | 0.225343 | 0.191011 | 0.191011 | 0.191011 | 0 | 0.024484 | 0.255553 | 5,267 | 172 | 182 | 30.622093 | 0.7924 | 0.152269 | 0 | 0.392857 | 0 | 0 | 0.177572 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.026786 | null | null | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5aa4c977dc126a0fbb76c33235561df665c5a977 | 10,248 | py | Python | pyalp/stimulus/film.py | BaptisteLefebvre/pyalp | 05cb8ff9e66f95ed9c70a8ab8a91c78794f7350a | [
"MIT"
] | 1 | 2020-11-09T09:23:11.000Z | 2020-11-09T09:23:11.000Z | pyalp/stimulus/film.py | BaptisteLefebvre/pyalp | 05cb8ff9e66f95ed9c70a8ab8a91c78794f7350a | [
"MIT"
] | null | null | null | pyalp/stimulus/film.py | BaptisteLefebvre/pyalp | 05cb8ff9e66f95ed9c70a8ab8a91c78794f7350a | [
"MIT"
] | 1 | 2020-11-09T09:23:19.000Z | 2020-11-09T09:23:19.000Z | import gc
import os
import pyalp.io
import pyalp.sequence
import pyalp.utils
from .base import Stimulus
class Film(Stimulus):
"""Film stimulus
Parameters
----------
bin_pathname: none | string, optional
Path name to the .bin file.
vec_pathname: none | string, optional
Path name to the .vec file.
rate: float, optional
Frame rate [Hz]. The default value is 30.0.
sequence_size: integer, optional
Number of frames each sequence. The default value is 200.
interactive: boolean, optional
Specify if it should prompt the input parameters. The default value is False.
verbose: boolean, optional
Verbose mode. The default value is False.
"""
dirname = os.path.join("E:", "BINVECS")
# dirname = os.path.expanduser(os.path.join("~", ".pyalp", "films")) # TODO remove.
def __init__(self, bin_pathname=None, vec_pathname=None, rate=30.0, sequence_size=200,
interactive=False, verbose=False):
Stimulus.__init__(self)
self.bin_pathname = bin_pathname
self.vec_pathname = vec_pathname
self.rate = rate
self.sequence_size = sequence_size
if interactive:
self.prompt_input_arguments()
# Read .vec file.
self.frame_ids = pyalp.io.load_vec(self.vec_pathname)
self.nb_frames = len(self.frame_ids)
self.nb_sequences = int(self.nb_frames / self.sequence_size)
self.nb_cycles = int(self.nb_sequences / 2)
# Read header of .bin file.
self.bin_header = pyalp.io.load_bin_header(self.bin_pathname)
if verbose:
self.print_settings()
def prompt_input_arguments(self, sep=""):
"""Prompt the input arguments.
Parameter
---------
sep: string, optional
Prompt separator. The default value is \"\"
"""
print(sep)
# Print all the user directories.
user_dirnames = os.listdir(self.dirname)
for user_dirname_id, user_dirname in enumerate(user_dirnames):
print(" {}. {}".format(user_dirname_id, user_dirname))
# Prompt user identifier.
prompt = "Enter the user number (e.g. 0): "
user_id = pyalp.utils.input(prompt, int)
user_dirname = user_dirnames[user_id]
user_pathname = os.path.join(self.dirname, user_dirname)
print(sep)
# Print all the .bin files.
bin_pathname = os.path.join(user_pathname, "Bin")
bin_filenames = [name for name in os.listdir(bin_pathname) if os.path.isfile(os.path.join(bin_pathname, name))]
for bin_filename_id, bin_filename in enumerate(bin_filenames):
print(" {}. {}".format(bin_filename_id, bin_filename))
# Prompt .bin filename identifier.
prompt = "Enter the .bin file number (e.g. 0): "
bin_id = pyalp.utils.input(prompt, int)
bin_filename = bin_filenames[bin_id]
self.bin_pathname = os.path.join(bin_pathname, bin_filename)
print(sep)
# Print all the .vec files.
vec_pathname = os.path.join(user_pathname, "Vec")
vec_filenames = [name for name in os.listdir(vec_pathname) if os.path.isfile(os.path.join(vec_pathname, name))]
for vec_filename_id, vec_filename in enumerate(vec_filenames):
print(" {}. {}".format(vec_filename_id, vec_filename))
# Prompt .vec filename identifier.
prompt = "Enter the .vec file number (e.g. 0): "
vec_id = pyalp.utils.input(prompt, int)
vec_filename = vec_filenames[vec_id]
self.vec_pathname = os.path.join(vec_pathname, vec_filename)
print(sep)
# Prompt the frame rate.
prompt = "Enter the frame rate [Hz] (e.g. {}): ".format(self.rate)
self.rate = pyalp.utils.input(prompt, float)
print(sep)
# Prompt the advanced features.
prompt = "Advanced features (y/n): "
advanced = pyalp.utils.input(prompt, lambda arg: arg == "y")
if advanced:
# Prompt the number of frames in each sequence.
prompt = "Number of frames in each sequence (e.g. {}): ".format(self.sequence_size)
self.sequence_size = pyalp.utils.input(prompt, int)
print(sep)
return
def print_settings(self):
"""Print settings."""
print("----------------- Film stimulus ------------------")
print(".bin pathname: {}".format(self.bin_pathname))
print(".vec pathname: {}".format(self.vec_pathname))
print("frame rate: {} Hz".format(self.rate))
print("sequence size: {}".format(self.sequence_size))
print("number of frames: {}".format(self.nb_frames))
print("number of sequences: {}".format(self.nb_sequences))
print("number of cycles: {}".format(self.nb_cycles))
print(".bin header: {}".format(self.bin_header))
print("--------------------------------------------------")
print("")
return
def display(self, device):
"""Display stimulus.
Parameter
---------
device: Device
ALP device.
"""
sequence_1 = None
sequence_2 = None
if self.nb_frames > 0 * self.sequence_size: # i.e. enough frames
# 1. Allocate 1st sequence of frames.
# Define 1st sequence of frames.
sequence_id_1 = 0
nb_frames = min(self.sequence_size, self.nb_frames - 0 * self.sequence_size)
sequence_1 = pyalp.sequence.Film(sequence_id_1, self.bin_pathname, self.frame_ids, nb_frames,
self.sequence_size, self.rate)
# Allocate memory for 1st sequence of frames.
device.allocate(sequence_1)
# Control the timing properties of 1st sequence display.
sequence_1.control_timing()
if self.nb_frames > 1 * self.sequence_size: # i.e. enough frames
# 2. Allocate 2nd sequence of frames.
# Define 2nd sequence of frames.
sequence_id_2 = 1
nb_frames = min(self.sequence_size, self.nb_frames - 1 * self.sequence_size)
sequence_2 = pyalp.sequence.Film(sequence_id_2, self.bin_pathname, self.frame_ids, nb_frames,
self.sequence_size, self.rate)
# Allocate memory for 2nd sequence of frames.
device.allocate(sequence_2)
# Control the timing properties of 2nd sequence display.
sequence_2.control_timing()
# 3. Play on DMD.
# Set up queue mode.
device.control_projection(queue_mode=True)
# Transmit and start 1st sequence of frames into memory.
if self.nb_frames > 0 * self.sequence_size: # i.e. enough frames
sequence_1.load()
sequence_1.start()
# Transmit and start 2nd sequence of frames into memory.
if self.nb_frames > 1 * self.sequence_size: # i.e. enough frames
sequence_2.load()
sequence_2.start()
# Force garbage collection.
gc.collect()
# 4. Repeat.
for cycle_id in range(1, self.nb_cycles):
# a. Wait completion of 1st sequence.
device.synchronize()
# b. Free 1st sequence.
sequence_1.free()
# c. Reallocate 1st sequence.
sequence_id_1 = 2 * cycle_id + 0
nb_frames = self.sequence_size
sequence_1 = pyalp.sequence.Film(sequence_id_1, self.bin_pathname, self.frame_ids, nb_frames,
self.sequence_size, self.rate)
device.allocate(sequence_1)
sequence_1.control_timing()
sequence_1.load()
sequence_1.start()
gc.collect()
# d. Wait completion of 2nd sequence.
device.synchronize()
# e. Free 2nd sequence.
sequence_2.free()
# f. Reallocate 2nd sequence.
sequence_id_2 = 2 * cycle_id + 1
nb_frames = self.sequence_size
sequence_2 = pyalp.sequence.Film(sequence_id_2, self.bin_pathname, self.frame_ids, nb_frames,
self.sequence_size, self.rate)
device.allocate(sequence_2)
sequence_2.control_timing()
sequence_2.load()
sequence_2.start()
gc.collect()
if self.nb_cycles > 0 and self.nb_frames > (self.nb_cycles * 2 + 0) * self.sequence_size:
# i.e. remaining frames
# a. Wait completion of 1st sequence.
device.synchronize()
# b. Free 1st sequence.
sequence_1.free()
# c. Reallocate 1st sequence.
sequence_id_1 = 2 * self.nb_cycles + 0
nb_frames = min(self.sequence_size, self.nb_frames - sequence_id_1 * self.sequence_size)
sequence_1 = pyalp.sequence.Film(sequence_id_1, self.bin_pathname, self.frame_ids, nb_frames,
self.sequence_size, self.rate)
device.allocate(sequence_1)
sequence_1.control_timing()
sequence_1.load()
sequence_1.start()
gc.collect()
if self.nb_cycles > 0 and self.nb_frames > (self.nb_cycles * 2 + 1) * self.sequence_size:
# i.e. remaining frames
# a. Wait completion of 2nd sequence.
device.synchronize()
# b. Free 2nd sequence.
sequence_id_2 = 2 * self.nb_cycles + 1
nb_frames = min(self.sequence_size, self.nb_frames - sequence_id_2 * self.sequence_size)
sequence_2 = pyalp.sequence.Film(sequence_id_2, self.bin_pathname, self.frame_ids, nb_frames,
self.sequence_size, self.rate)
device.allocate(sequence_2)
sequence_2.control_timing()
sequence_2.load()
sequence_2.start()
gc.collect()
# 5. Clean up.
try:
device.wait()
sequence_1.free()
sequence_2.free()
except AttributeError:
pass
return
| 37.265455 | 119 | 0.58509 | 1,241 | 10,248 | 4.638195 | 0.124899 | 0.064628 | 0.075052 | 0.041696 | 0.536484 | 0.437978 | 0.368485 | 0.344858 | 0.317408 | 0.308374 | 0 | 0.016391 | 0.309426 | 10,248 | 274 | 120 | 37.40146 | 0.796948 | 0.20404 | 0 | 0.424837 | 0 | 0 | 0.062815 | 0.006294 | 0 | 0 | 0 | 0.00365 | 0 | 1 | 0.026144 | false | 0.006536 | 0.039216 | 0 | 0.098039 | 0.143791 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5aa5b513bf2a9038fbb780c73e2c734ede5749d9 | 1,328 | py | Python | python/example_code/s3/s3-python-example-download-file.py | AkhmadRiswanda/aws-doc-sdk-examples | 46dbd6e1002f4d5c056df3eb478c318501782a17 | [
"Apache-2.0"
] | null | null | null | python/example_code/s3/s3-python-example-download-file.py | AkhmadRiswanda/aws-doc-sdk-examples | 46dbd6e1002f4d5c056df3eb478c318501782a17 | [
"Apache-2.0"
] | null | null | null | python/example_code/s3/s3-python-example-download-file.py | AkhmadRiswanda/aws-doc-sdk-examples | 46dbd6e1002f4d5c056df3eb478c318501782a17 | [
"Apache-2.0"
] | null | null | null | # Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
import botocore
BUCKET_NAME = 'my-bucket' # replace with your bucket name
KEY = 'my_image_in_s3.jpg' # replace with your object key
s3 = boto3.resource('s3')
try:
s3.Bucket(BUCKET_NAME).download_file(KEY, 'my_local_image.jpg')
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
else:
raise
#snippet-sourcedescription:[s3-python-example-download-file.py demonstrates how to ...]
#snippet-keyword:[Python]
#snippet-keyword:[AWS SDK for Python (Boto3)]
#snippet-keyword:[Code Sample]
#snippet-keyword:[Amazon S3]
#snippet-service:[s3]
#snippet-sourcetype:[full-example]
#snippet-sourcedate:[2018-06-25]
#snippet-sourceauthor:[jschwarzwalder]
| 32.390244 | 88 | 0.718373 | 189 | 1,328 | 5.005291 | 0.57672 | 0.052854 | 0.021142 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.029973 | 0.170934 | 1,328 | 40 | 89 | 33.2 | 0.829246 | 0.673946 | 0 | 0 | 0 | 0 | 0.230352 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5aa5bff8e0546b4cefb038da047c7522a33ff849 | 788 | py | Python | screenpy/questions/text_of_the_alert.py | perrygoy/screenpy | 862c0d7e5ff9f1265e520ab383c04ddbd4d060eb | [
"MIT"
] | 39 | 2019-03-22T15:18:23.000Z | 2022-02-23T17:32:03.000Z | screenpy/questions/text_of_the_alert.py | perrygoy/screenpy | 862c0d7e5ff9f1265e520ab383c04ddbd4d060eb | [
"MIT"
] | 63 | 2019-07-17T06:25:19.000Z | 2022-01-13T07:03:53.000Z | screenpy/questions/text_of_the_alert.py | bandophahita/screenpy | db0f3ef91a891b9d095016d83fa4b589620808ce | [
"MIT"
] | 15 | 2019-07-09T11:02:56.000Z | 2021-12-24T07:43:56.000Z | """
Investigate the text of an alert.
"""
from screenpy.abilities import BrowseTheWeb
from screenpy.actor import Actor
from screenpy.pacing import beat
class TextOfTheAlert:
"""Ask what text appears in the alert.
Abilities Required:
|BrowseTheWeb|
Examples::
the_actor.should(
See.the(TextOfTheAlert(), ReadsExactly("Danger, Will Robinson!"))
)
"""
def describe(self) -> str:
"""Describe the Question.."""
return "The text of the alert."
@beat("{} reads the text from the alert.")
def answered_by(self, the_actor: Actor) -> str:
"""Direct the Actor to read off the alert's text."""
browser = the_actor.uses_ability_to(BrowseTheWeb).browser
return browser.switch_to.alert.text
| 24.625 | 77 | 0.648477 | 96 | 788 | 5.25 | 0.479167 | 0.063492 | 0.035714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.243655 | 788 | 31 | 78 | 25.419355 | 0.845638 | 0.374365 | 0 | 0 | 0 | 0 | 0.124717 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.3 | 0 | 0.8 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
5aa673e34f00e357af0fafa959cade5423c31460 | 418 | py | Python | 4_parallel_execution/purethreads.py | TihonV/pygoexamples | ca9604862d08145057aefc60cd0f9b77c9f5346a | [
"MIT"
] | 18 | 2020-01-23T21:20:47.000Z | 2022-02-20T19:10:02.000Z | 4_parallel_execution/purethreads.py | TihonV/pygoexamples | ca9604862d08145057aefc60cd0f9b77c9f5346a | [
"MIT"
] | null | null | null | 4_parallel_execution/purethreads.py | TihonV/pygoexamples | ca9604862d08145057aefc60cd0f9b77c9f5346a | [
"MIT"
] | 4 | 2019-08-03T12:59:53.000Z | 2022-02-07T23:43:35.000Z | import time
from threading import Thread
COUNT = 50000000
def countdown(n):
while n > 0:
n -= 1
print ('Done! My final value is {0}'.format(n))
half_count = int(COUNT/2)
t1 = Thread(target=countdown, args=(half_count,))
t2 = Thread(target=countdown, args=(half_count,))
start = time.time()
t1.start()
t2.start()
t1.join()
t2.join()
end = time.time()
print('Time taken in seconds -', end - start)
| 17.416667 | 51 | 0.657895 | 65 | 418 | 4.184615 | 0.507692 | 0.099265 | 0.154412 | 0.183824 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0.052632 | 0.181818 | 418 | 23 | 52 | 18.173913 | 0.74269 | 0 | 0 | 0 | 0 | 0 | 0.119904 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.117647 | 0 | 0.176471 | 0.117647 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5aab652c059c797506557e8a980477db680cb80f | 6,829 | py | Python | DPythonS89/test.py | Synchronicity89/Lean | 564af47ea980cf0524874643c7190da82236bcfb | [
"Apache-2.0"
] | null | null | null | DPythonS89/test.py | Synchronicity89/Lean | 564af47ea980cf0524874643c7190da82236bcfb | [
"Apache-2.0"
] | 1 | 2020-08-25T03:02:47.000Z | 2020-08-25T03:02:47.000Z | DPythonS89/test.py | Synchronicity89/Lean | 564af47ea980cf0524874643c7190da82236bcfb | [
"Apache-2.0"
] | null | null | null | from clr import AddReference
import pandas
AddReference("System")
AddReference("QuantConnect.Research")
AddReference("QuantConnect.Common")
AddReference("QuantConnect.Logging")
#AddReference("QuantConnect.Data")
from System import *
from QuantConnect import *
from QuantConnect.Logging import *
#from Data import *
#from QuantConnect.Data import *
from QuantConnect.Research import *
from datetime import datetime, timedelta
from custom_data import QuandlFuture, Nifty
import pandas as pd
#from System import *
#from QuantConnect import *
#from QuantConnect.Data import SubscriptionDataSource
from QuantConnect.Python import PythonData, PythonQuandl
from datetime import datetime
import decimal
class QuandlFuture(PythonQuandl):
'''Custom quandl data type for setting customized value column name. Value column is used for the primary trading calculations and charting.'''
def __init__(self):
# Define ValueColumnName: cannot be None, Empty or non-existant column name
# If ValueColumnName is "Close", do not use PythonQuandl, use Quandl:
# self.AddData[QuandlFuture](self.crude, Resolution.Daily)
self.ValueColumnName = "Settle"
class Nifty(PythonData):
'''NIFTY Custom Data Class'''
def GetSource(self, config, date, isLiveMode):
return SubscriptionDataSource("https://www.dropbox.com/s/rsmg44jr6wexn2h/CNXNIFTY.csv?dl=1", SubscriptionTransportMedium.RemoteFile);
def Reader(self, config, line, date, isLiveMode):
if not (line.strip() and line[0].isdigit()): return None
# New Nifty object
index = Nifty();
index.Symbol = config.Symbol
try:
# Example File Format:
# Date, Open High Low Close Volume Turnover
# 2011-09-13 7792.9 7799.9 7722.65 7748.7 116534670 6107.78
data = line.split(',')
index.Time = datetime.strptime(data[0], "%Y-%m-%d")
index.Value = decimal.Decimal(data[4])
index["Open"] = float(data[1])
index["High"] = float(data[2])
index["Low"] = float(data[3])
index["Close"] = float(data[4])
except ValueError:
# Do nothing
return None
return index
class SecurityHistoryTest():
def __init__(self, start_date, security_type, symbol):
self.qb = QuantBook()
self.qb.SetStartDate(start_date)
self.symbol = self.qb.AddSecurity(security_type, symbol).Symbol
self.column = 'close'
def __str__(self):
return "{} on {}".format(self.symbol.ID, self.qb.StartDate)
def test_period_overload(self, period):
history = self.qb.History([self.symbol], period)
return history[self.column].unstack(level=0)
def test_daterange_overload(self, end):
start = end - timedelta(1)
history = self.qb.History([self.symbol], start, end)
return history[self.column].unstack(level=0)
class OptionHistoryTest(SecurityHistoryTest):
def test_daterange_overload(self, end, start = None):
if start is None:
start = end - timedelta(1)
history = self.qb.GetOptionHistory(self.symbol, start, end)
return history.GetAllData()
class FutureHistoryTest(SecurityHistoryTest):
def test_daterange_overload(self, end, start = None, maxFilter = 182):
if start is None:
start = end - timedelta(1)
self.qb.Securities[self.symbol].SetFilter(0, maxFilter) # default is 35 days
history = self.qb.GetFutureHistory(self.symbol, start, end)
return history.GetAllData()
class FutureContractHistoryTest():
def __init__(self, start_date, security_type, symbol):
self.qb = QuantBook()
self.qb.SetStartDate(start_date)
self.symbol = symbol
self.column = 'close'
def test_daterange_overload(self, end):
start = end - timedelta(1)
history = self.qb.GetFutureHistory(self.symbol, start, end)
return history.GetAllData()
class OptionContractHistoryTest(FutureContractHistoryTest):
def test_daterange_overload(self, end):
start = end - timedelta(1)
history = self.qb.GetOptionHistory(self.symbol, start, end)
return history.GetAllData()
class CustomDataHistoryTest(SecurityHistoryTest):
def __init__(self, start_date, security_type, symbol):
self.qb = QuantBook()
self.qb.SetStartDate(start_date)
if security_type == 'Nifty':
type = Nifty
self.column = 'close'
elif security_type == 'QuandlFuture':
type = QuandlFuture
self.column = 'settle'
else:
raise
self.symbol = self.qb.AddData(type, symbol, Resolution.Daily).Symbol
class MultipleSecuritiesHistoryTest(SecurityHistoryTest):
def __init__(self, start_date, security_type, symbol):
self.qb = QuantBook()
self.qb.SetStartDate(start_date)
self.qb.AddEquity('SPY', Resolution.Daily)
self.qb.AddForex('EURUSD', Resolution.Daily)
self.qb.AddCrypto('BTCUSD', Resolution.Daily)
def test_period_overload(self, period):
history = self.qb.History(self.qb.Securities.Keys, period)
return history['close'].unstack(level=0)
class FundamentalHistoryTest():
def __init__(self):
self.qb = QuantBook()
def getFundamentals(self, ticker, selector, start, end):
return self.qb.GetFundamental(ticker, selector, start, end)
startDate = datetime(2014, 5, 9)
a = CompositeLogHandler()
securityTestHistory = MultipleSecuritiesHistoryTest(startDate, None, None)
#// Get the last 5 candles
periodHistory = securityTestHistory.test_period_overload(5)
#// Note there is no data for BTCUSD at 2014
#//symbol EURUSD SPY
#//time
#//2014-05-03 00:00:00 NaN 173.580655
#//2014-05-04 20:00:00 1.387185 NaN
#//2014-05-05 20:00:00 1.387480 NaN
#//2014-05-06 00:00:00 NaN 173.903690
#//2014-05-06 20:00:00 1.392925 NaN
#//2014-05-07 00:00:00 NaN 172.426958
#//2014-05-07 20:00:00 1.391070 NaN
#//2014-05-08 00:00:00 NaN 173.423752
#//2014-05-08 20:00:00 1.384265 NaN
#//2014-05-09 00:00:00 NaN 173.229931
Console.WriteLine(periodHistory)
count = periodHistory.shape[0]
Assert.AreEqual(10, count)
#// Get the one day of data
timedeltaHistory = securityTestHistory.test_period_overload(TimeSpan.FromDays(8));
firstIndex = timedeltaHistory.index.values[0]
#// EURUSD exchange time zone is NY but data is UTC so we have a 4 hour difference with algo TZ which is NY
Assert.AreEqual(datetime(startDate.years, startDate.days - 8, startDate.hours + 20), firstIndex); | 36.518717 | 147 | 0.661737 | 812 | 6,829 | 5.488916 | 0.294335 | 0.033655 | 0.023334 | 0.026924 | 0.337222 | 0.317029 | 0.298631 | 0.282477 | 0.248373 | 0.221898 | 0 | 0.059295 | 0.231952 | 6,829 | 187 | 148 | 36.518717 | 0.790467 | 0.220969 | 0 | 0.338983 | 0 | 0 | 0.042045 | 0.003977 | 0 | 0 | 0 | 0 | 0.016949 | 1 | 0.144068 | false | 0 | 0.101695 | 0.025424 | 0.432203 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5aad234b18b05002548a9589e1100a906a36ebac | 2,765 | py | Python | edm_web1/app/task/forms.py | zhouli121018/nodejsgm | 0ccbc8acf61badc812f684dd39253d55c99f08eb | [
"MIT"
] | null | null | null | edm_web1/app/task/forms.py | zhouli121018/nodejsgm | 0ccbc8acf61badc812f684dd39253d55c99f08eb | [
"MIT"
] | 18 | 2020-06-05T18:17:40.000Z | 2022-03-11T23:25:21.000Z | edm_web1/app/task/forms.py | zhouli121018/nodejsgm | 0ccbc8acf61badc812f684dd39253d55c99f08eb | [
"MIT"
] | null | null | null | # coding=utf-8
from django import forms
from app.task.models import SendTask
from app.template.models import SendTemplate
from app.address.models import MailList
from django.utils.translation import ugettext_lazy as _
from django.core.cache import cache
class SendTaskForm(forms.ModelForm):
user = forms.CharField(label=u'客户', required=False, widget=forms.HiddenInput())
# send_name = forms.CharField(label=u'发送批次', widget=forms.TextInput(attrs={'readonly': 'readonly'}))
# template = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple, label=u"选择邮件模板")
# maillist = forms.ChoiceField(label=u'选择联系人列表')
# sender = forms.ChoiceField(label=u'发件人')
def __init__(self, user, *args, **kwargs):
super(SendTaskForm, self).__init__(*args, **kwargs)
self.user = user
# self.fields['template'].choices = [(x.id, x) for x in SendTemplate.objects.filter(user=user, name__isnull=False)]
# self.fields['maillist'].choices = [(x.id, x) for x in MailList.objects.filter(customer=user)]
def clean_user(self):
return self.user
class Meta:
model = SendTask
exclude = []
# fields = ['user']
# exclude = [
# 'id', 'send_acct_type', 'send_acct_domain', 'send_replyto', 'send_fullname',
# 'send_maillist', 'send_maillist_id',
# 'send_qty', 'send_qty_remark', 'send_time',
# 'send_status', 'verify_status', 'time_start', 'time_end', 'updated', 'status'
# ]
class SendTaskSearchForm(forms.Form):
name = forms.CharField(label=u'任务名称', required=False)
class TaskExportForm(forms.Form):
export_open_or_click = forms.ChoiceField(label=_(u"导出类型"), choices=( ('open', _(u"打开")), ('click', _(u"点击")) ), initial="open")
export_is_new_maillist = forms.BooleanField(label=_(u'是否导入到新分类'), widget=forms.CheckboxInput(attrs={
"autocomplete": "off",
"onchange": "onchangeIsNewMaillist();",
}))
export_maillist_name = forms.CharField(label=_(u'分类名称'), initial=_(u"打开/点击地址"), max_length=50,
help_text=_(u"默认名称为:打开/点击地址,打开/点击地址将导入此分类中"))
export_maillist_id = forms.ModelChoiceField(
label=_(u'选择地址池'),
queryset=None,
widget=forms.Select(attrs={
#"data-placeholder": _(u"请选择地址池"),
"autocomplete": "off",
"class": "select2 ",
}), help_text=_(u"选择一个地址分类,打开/点击地址将导入此分类中"))
def __init__(self, user, *args, **kwargs):
super(TaskExportForm, self).__init__(*args, **kwargs)
lists = MailList.objects.filter(
customer=user).filter(
isvalid=True, is_smtp=False).order_by('-id')[:500]
self.fields['export_maillist_id'].queryset= lists
| 42.538462 | 131 | 0.644485 | 321 | 2,765 | 5.345794 | 0.398754 | 0.034965 | 0.044289 | 0.04662 | 0.135198 | 0.054779 | 0.054779 | 0 | 0 | 0 | 0 | 0.003179 | 0.203617 | 2,765 | 64 | 132 | 43.203125 | 0.776113 | 0.294756 | 0 | 0.102564 | 0 | 0 | 0.102431 | 0.0388 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.153846 | 0.025641 | 0.512821 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
5aae6a12dc22ce122aa713fc2aeac3ad090fe5d0 | 2,827 | py | Python | map_swcf.py | torimcd/Goldblatt_etal_2020 | 0793b16ef2535db3482c31d84587d80b3578dd3b | [
"BSD-3-Clause"
] | 1 | 2021-12-03T15:11:31.000Z | 2021-12-03T15:11:31.000Z | map_swcf.py | torimcd/Goldblatt_etal_2021 | 0793b16ef2535db3482c31d84587d80b3578dd3b | [
"BSD-3-Clause"
] | null | null | null | map_swcf.py | torimcd/Goldblatt_etal_2021 | 0793b16ef2535db3482c31d84587d80b3578dd3b | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
"""
Author: Victoria McDonald
email: vmcd@atmos.washington.edu
website: http://torimcd.github.com
license: BSD
"""
import matplotlib as mpl
mpl.use("Agg")
import os
import sys
import numpy as np
import netCDF4
import operator
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
download_path = '/home/vmcd/' # enter the path to the directory where you downloaded the archived data, eg '/home/user/Downloads'
filebase = download_path + 'FYSP_clouds_archive/CAM4/'
outfileloc = download_path + 'temp_data/' # this is the location to save the processed netcdf files to
current = '0.775'
cc = '0775'
# SWCF fraction variable
field = 'SWCF'
outfilebase = 'c4_swcf_'
casenames = {'07','0725','075','0775', '08','0825','085','0875','09','0925','095','0975','10', '1025', '105', '1075','11'}
# 1.0 case
outfile_10 = outfileloc + outfilebase + '10.nc'
if not os.path.isfile(outfile_10):
if os.path.isdir(outfileloc):
infile = filebase +'cam4_10.nc'
# calc cldlow global average per month
syscall = r"//usr//bin//cdo timmean -seltimestep,21/40 -select,name="+field+" "+infile+ " " +outfile_10
os.system(syscall)
for c in casenames:
# calc swcf
outfile_case = outfileloc+outfilebase+c+'.nc'
# check directly if the file exists
if not os.path.isfile(outfile_case):
if os.path.isdir(outfileloc):
infile = filebase +'cam4_' + c +'.nc'
# calc cldlow global average per month
syscall = r"//usr//bin//cdo timmean -seltimestep,21/40 -select,name="+field+" "+infile+ " " +outfile_case
os.system(syscall)
control = outfile_10
if os.path.isfile(control):
dsyear = netCDF4.Dataset(control)
control_swcf = dsyear.variables[field][:]
dsyear.close()
#plot the data
dsloc = outfileloc + outfilebase + cc +'.nc'
if os.path.isfile(dsloc):
# open the merged file and get out some variables
dsyear = netCDF4.Dataset(dsloc)
lons = dsyear.variables['lon'][:]
lats = dsyear.variables['lat'][:]
swcf = dsyear.variables[field][:]
swcf_units = dsyear.variables[field].units
dsyear.close() #close the file
swcf_diff = list(map(operator.sub, swcf, control_swcf))
#create plot
fig = plt.figure()
# setup the map
m = Basemap(lat_0=0,lon_0=0)
m.drawcoastlines()
m.drawcountries()
# Create 2D lat/lon arrays for Basemap
lon2d, lat2d = np.meshgrid(lons, lats)
# Plot
cs = m.pcolormesh(lon2d,lat2d,np.squeeze(swcf_diff), cmap='RdBu_r', latlon='True', vmin=-60, vmax=60, rasterized=True)
# This is the fix for the white lines between contour levels
cs.set_edgecolor("face")
# Add Colorbar
cbar = m.colorbar(cs, location='bottom', pad="10%")
cbar.set_label(swcf_units)
plt.title('Shortwave Cloud Forcing: ' + r'$\mathsf{S/S_0}$'+' = '+ current)
plt.show()
fig.savefig('swcf_map_diff_'+cc+'.pdf', bbox_inches='tight')
| 28.27 | 129 | 0.700389 | 416 | 2,827 | 4.677885 | 0.473558 | 0.018499 | 0.024666 | 0.011305 | 0.179342 | 0.169579 | 0.144913 | 0.144913 | 0.102775 | 0.102775 | 0 | 0.044981 | 0.15069 | 2,827 | 99 | 130 | 28.555556 | 0.765514 | 0.229926 | 0 | 0.107143 | 0 | 0 | 0.169846 | 0.011633 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ab017c82dd41b9fd3710bcfd371dbf19774599d | 6,706 | py | Python | src/mbic/mbic_full_model.py | davidanastasiu/antibiofilm | f50945d52bcfd97538a31d7627af6b3089fdd2cf | [
"MIT"
] | null | null | null | src/mbic/mbic_full_model.py | davidanastasiu/antibiofilm | f50945d52bcfd97538a31d7627af6b3089fdd2cf | [
"MIT"
] | null | null | null | src/mbic/mbic_full_model.py | davidanastasiu/antibiofilm | f50945d52bcfd97538a31d7627af6b3089fdd2cf | [
"MIT"
] | null | null | null | # AntiBiofilm Peptide Research
# Department of Computer Science and Engineering, Santa Clara University
# Author: Taylor Downey
# A python script that uses the optimized hyperparameters found for both
# the SVM and the SVR to create a prediction model
# Script prints the average RMSE of the full model when run with cross validation
#
# NOTE: Given the small number of training samples available, the average RMSE
# outputted will vary by about +- 5
# ------------------------------------------------------------------------------
# Libraries
# ------------------------------------------------------------------------------
import numpy as np
import pandas as pd
import json
import warnings
from sklearn import preprocessing
from sklearn.decomposition import PCA
from sklearn.utils.validation import column_or_1d
from sklearn.svm import SVC
from sklearn.svm import SVR
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import RepeatedStratifiedKFold
warnings.filterwarnings("ignore")
# ------------------------------------------------------------------------------
# Functions
# ------------------------------------------------------------------------------
def seperatePeptides(peptides, threshold):
columns = ['MBIC']
filterMBIC = (peptides[columns] <= threshold).all(axis=1)
lower_peptides = peptides[filterMBIC]
filterMBIC = (peptides[columns] > threshold).all(axis=1)
upper_peptides = peptides[filterMBIC]
return lower_peptides, upper_peptides
# ------------------------------------------------------------------------------
# Variables
# ------------------------------------------------------------------------------
training_filename = '../../data/mbic_training_data.csv'
svm_features_filename = 'mbic_svm_forward_selection_features.json'
svr_features_filename = 'mbic_svr_forward_selection_features.json'
svr_svm_results = 'full_model_results.txt'
# Optimized Hyperparameters
svm_c = 10
svm_g = 1000
svm_pca_comp = 6
svm_num_feat = 9
svr_c = 45
svr_g = 40
svr_pca_comp = 8
svr_num_feat = 9
# ------------------------------------------------------------------------------
# Main
# ------------------------------------------------------------------------------
def main():
# Prepare peptides for SVM
with open(svm_features_filename) as f:
svm_feat_dict = json.load(f)
svm_feat_dict = svm_feat_dict[0:svm_num_feat]
peptides_svm = pd.read_csv(training_filename)
peptides_svm.loc[(peptides_svm['MBIC'] > 64), 'MBIC'] = 0
peptides_svm.loc[(peptides_svm['MBIC'] != 0), 'MBIC'] = 1
# Filter out columns based on feat list
labels = peptides_svm.columns.values.tolist()
for l in labels:
if l == 'MBIC':
continue
if l not in svm_feat_dict:
peptides_svm = peptides_svm.drop(columns=[l])
y_svm = peptides_svm['MBIC'].to_numpy()
peptides_svm = peptides_svm.drop(columns=['MBIC'])
min_max_scaler = preprocessing.MinMaxScaler()
X_norm_svm = min_max_scaler.fit_transform(peptides_svm)
pca_svm = PCA(n_components=svm_pca_comp)
X_trans_svm = pca_svm.fit_transform(X_norm_svm)
SVC_rbf = SVC(kernel='rbf', C=svm_c, gamma=svm_g)
# Prepare peptides for SVR
with open(svr_features_filename) as f:
svr_feat_dict = json.load(f)
svr_feat_dict = svr_feat_dict[0:svr_num_feat]
peptides_svr = pd.read_csv(training_filename)
peptides_svr, _ = seperatePeptides(peptides_svr, 64)
# Filter out columns based on feat list
labels = peptides_svr.columns.values.tolist()
for l in labels:
if l == 'MBIC':
continue
if l not in svr_feat_dict:
peptides_svr = peptides_svr.drop(columns=[l])
y_svr = peptides_svr['MBIC'].to_numpy()
peptides_svr = peptides_svr.drop(columns=['MBIC'])
min_max_scaler_svr = preprocessing.MinMaxScaler()
X_norm_svr = min_max_scaler_svr.fit_transform(peptides_svr)
pca_svr = PCA(n_components=svr_pca_comp)
X_trans_svr = pca_svr.fit_transform(X_norm_svr)
SVR_rbf = SVR(kernel='rbf', C=svr_c, gamma=svr_g)
# Prepare test set of petides used by svr after training
peptides_test_svr = pd.read_csv(training_filename)
# Filter out columns based on feat list
labels = peptides_test_svr.columns.values.tolist()
for l in labels:
if l == 'MBIC':
continue
if l not in svr_feat_dict:
peptides_test_svr = peptides_test_svr.drop(columns=[l])
y_svr2 = peptides_test_svr['MBIC'].to_numpy()
peptides_test_svr = peptides_test_svr.drop(columns=['MBIC'])
# Apply svr transformations on test set of peptides for svr
X_norm_test_svr = min_max_scaler_svr.transform(peptides_test_svr)
X_trans_test_svr = pca_svr.transform(X_norm_test_svr)
# Cross validation applied to full model
rskf = RepeatedStratifiedKFold(n_splits=5, n_repeats = 20)
RMSE = []
cnt = 1
for train_index, test_index in rskf.split(X_trans_svm, y_svm):
X_train, X_test = X_trans_svm[train_index], X_trans_svm[test_index]
y_train, y_test = y_svm[train_index], y_svm[test_index]
y_train = y_train.reshape(-1,1)
y_train = column_or_1d(y_train, warn=False)
svm_fit = SVC_rbf.fit(X_train, y_train)
y_pred = svm_fit.predict(X_test)
train_index_svr = []
test_index_svr = []
y_train_svr = []
y_test_svr = []
for i in range(0, len(y_train)):
if(y_train[i] == 0):
continue
else:
train_index_svr.append(train_index[i])
X_train_svr = X_trans_svr[train_index_svr]
y_train_svr = y_svr[train_index_svr]
svr_fit = SVR_rbf.fit(X_train_svr, y_train_svr)
y_train_svr = []
for i in range(0, len(y_pred)):
if(y_pred[i] == 0):
continue
else:
test_index_svr.append(test_index[i])
X_test_svr = X_trans_test_svr[test_index_svr]
y_test_svr = y_svr2[test_index_svr]
y_pred_svr = SVR_rbf.predict(X_test_svr)
rmse = np.sqrt(mean_squared_error(y_test_svr, y_pred_svr))
cnt = cnt + 1
with open (svr_svm_results, 'a', encoding="utf-8") as sfile:
sfile.write(str(rmse) + '\n')
RMSE.append(rmse)
rmse_avg = np.average(RMSE)
print('RMSE average: ' + str(rmse_avg))
if __name__ == "__main__":
main()
| 35.294737 | 81 | 0.596779 | 861 | 6,706 | 4.325203 | 0.220674 | 0.031955 | 0.032223 | 0.012889 | 0.311762 | 0.256176 | 0.1442 | 0.121643 | 0.091568 | 0.055317 | 0 | 0.008141 | 0.230689 | 6,706 | 189 | 82 | 35.481481 | 0.713704 | 0.233224 | 0 | 0.141667 | 0 | 0 | 0.045588 | 0.026414 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016667 | false | 0 | 0.091667 | 0 | 0.116667 | 0.008333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ab37c9f2cc9d5f92cea84f7411a66b98892ba55 | 334 | py | Python | pygoogletranslation/urls.py | sha-cmd/Translator | 8f01b04c90782feb474204c738cd1b9dbe8fe853 | [
"MIT",
"Unlicense"
] | null | null | null | pygoogletranslation/urls.py | sha-cmd/Translator | 8f01b04c90782feb474204c738cd1b9dbe8fe853 | [
"MIT",
"Unlicense"
] | null | null | null | pygoogletranslation/urls.py | sha-cmd/Translator | 8f01b04c90782feb474204c738cd1b9dbe8fe853 | [
"MIT",
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Predefined URLs used to make google translate requests.
"""
BASE = 'https://translate.google.com'
TOKEN = 'https://translate.google.com/translate_a/element.js'
TRANSLATE = 'https://translate.googleapis.com/translate_a/'
TRANSLATEURL = 'https://translate.google.com/_/TranslateWebserverUi/data/batchexecute' | 41.75 | 86 | 0.751497 | 40 | 334 | 6.2 | 0.575 | 0.225806 | 0.241935 | 0.278226 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003247 | 0.077844 | 334 | 8 | 86 | 41.75 | 0.801948 | 0.233533 | 0 | 0 | 0 | 0 | 0.7751 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 |
5ab39146e42528e662c0c49db178a51f75c5037e | 1,901 | py | Python | app/stac_api/migrations/0006_auto_20210419_1409.py | Niclnx/service-stac | ad9129a7130d09b2bed387d8e82575eb86fdfa7b | [
"BSD-3-Clause"
] | 9 | 2020-08-17T11:01:48.000Z | 2022-01-17T22:24:13.000Z | app/stac_api/migrations/0006_auto_20210419_1409.py | Niclnx/service-stac | ad9129a7130d09b2bed387d8e82575eb86fdfa7b | [
"BSD-3-Clause"
] | 100 | 2020-08-14T05:56:40.000Z | 2022-03-01T22:39:58.000Z | app/stac_api/migrations/0006_auto_20210419_1409.py | Niclnx/service-stac | ad9129a7130d09b2bed387d8e82575eb86fdfa7b | [
"BSD-3-Clause"
] | 3 | 2020-09-02T14:01:07.000Z | 2021-07-27T06:30:26.000Z | # Generated by Django 3.1.7 on 2021-04-19 14:09
import django.db.models.deletion
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
('stac_api', '0005_auto_20210408_0821'),
]
operations = [
migrations.AlterField(
model_name='item',
name='collection',
field=models.ForeignKey(
help_text=
'\n <div class=SearchUsage>\n Search Usage:\n <ul>\n <li>\n <i>arg</i> will make a non exact search checking if <i>arg</i> is part of\n the collection ID\n </li>\n <li>\n Multiple <i>arg</i> can be used, separated by spaces. This will search for all\n collections ID containing all arguments.\n </li>\n <li>\n <i>"collectionID"</i> will make an exact search for the specified collection.\n </li>\n </ul>\n Examples :\n <ul>\n <li>\n Searching for <i>pixelkarte</i> will return all collections which have\n pixelkarte as a part of their collection ID\n </li>\n <li>\n Searching for <i>pixelkarte 2016 4</i> will return all collection\n which have pixelkarte, 2016 AND 4 as part of their collection ID\n </li>\n <li>\n Searching for <i>ch.swisstopo.pixelkarte.example</i> will yield only this\n collection, if this collection exists. Please note that it would not return\n a collection named ch.swisstopo.pixelkarte.example.2.\n </li>\n </ul>\n </div>',
on_delete=django.db.models.deletion.PROTECT,
to='stac_api.collection'
),
),
]
| 73.115385 | 1,327 | 0.538664 | 243 | 1,901 | 4.18107 | 0.415638 | 0.035433 | 0.047244 | 0.023622 | 0.170276 | 0.139764 | 0.139764 | 0.084646 | 0.084646 | 0.084646 | 0 | 0.034539 | 0.360337 | 1,901 | 25 | 1,328 | 76.04 | 0.800987 | 0.023672 | 0 | 0.105263 | 1 | 0.052632 | 0.740022 | 0.063646 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.157895 | 0 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
5ab4e1cb0b8290fb37e3f50f7412275885208296 | 4,463 | py | Python | geocoder/opencage.py | guess3233qa/geocoder1 | 31de0344e698fb209b709c5bb11181e845279a59 | [
"MIT"
] | 1 | 2021-02-08T13:30:00.000Z | 2021-02-08T13:30:00.000Z | geocoder/opencage.py | star-is-here/geocoder | 31b5ccdf54f199d7640995a787c84909687ee03c | [
"MIT"
] | null | null | null | geocoder/opencage.py | star-is-here/geocoder | 31b5ccdf54f199d7640995a787c84909687ee03c | [
"MIT"
] | null | null | null | #!/usr/bin/python
# coding: utf8
from __future__ import absolute_import
from geocoder.base import Base
from geocoder.keys import opencage_key
class OpenCage(Base):
"""
OpenCage Geocoding Services
===========================
OpenCage Geocoder simple, easy, and open geocoding for the entire world
Our API combines multiple geocoding systems in the background.
Each is optimized for different parts of the world and types of requests.
We aggregate the best results from open data sources and algorithms so you don't have to.
Each is optimized for different parts of the world and types of requests.
API Reference
-------------
http://geocoder.opencagedata.com/api.html
"""
provider = 'opencage'
method = 'geocode'
def __init__(self, location, **kwargs):
self.url = 'http://api.opencagedata.com/geocode/v1/json'
self.location = location
self.params = {
'query': location,
'key': self._get_api_key(opencage_key, **kwargs),
}
self._initialize(**kwargs)
def _catch_errors(self):
if self.content:
status = self.content.get('status')
if status:
self.status_code = status.get('code')
message = status.get('message')
if self.status_code:
self.error = message
def _exceptions(self):
# Build intial Tree with results
if self.parse['results']:
self._build_tree(self.parse['results'][0])
licenses = self.parse['licenses']
if licenses:
self.parse['licenses'] = licenses[0]
@property
def lat(self):
return self.parse['geometry'].get('lat')
@property
def lng(self):
return self.parse['geometry'].get('lng')
@property
def address(self):
return self.parse.get('formatted')
@property
def housenumber(self):
return self.parse['components'].get('house_number')
@property
def street(self):
return self.parse['components'].get('road')
@property
def neighborhood(self):
neighbourhood = self.parse['components'].get('neighbourhood')
if neighbourhood:
return neighbourhood
elif self.suburb:
return self.suburb
elif self.city_district:
return self.city_district
@property
def suburb(self):
return self.parse['components'].get('suburb')
@property
def city_district(self):
return self.parse['components'].get('city_district')
@property
def city(self):
city = self.parse['components'].get('city')
if city:
return city
elif self.town:
return self.town
elif self.county:
return self.county
@property
def town(self):
return self.parse['components'].get('town')
@property
def county(self):
return self.parse['components'].get('county')
@property
def state(self):
return self.parse['components'].get('state')
@property
def country(self):
return self.parse['components'].get('country_code')
@property
def postal(self):
return self.parse['components'].get('postcode')
@property
def confidence(self):
return self.parse.get('confidence')
@property
def w3w(self):
return self.parse['what3words'].get('words')
@property
def mgrs(self):
return self.parse['annotations'].get('MGRS')
@property
def geohash(self):
return self.parse['annotations'].get('geohash')
@property
def callingcode(self):
return self.parse['annotations'].get('callingcode')
@property
def Maidenhead(self):
return self.parse['annotations'].get('Maidenhead')
@property
def DMS(self):
return self.parse.get('DMS')
@property
def Mercator(self):
return self.parse.get('Mercator')
@property
def license(self):
return self.parse.get('licenses')
@property
def bbox(self):
south = self.parse['southwest'].get('lat')
north = self.parse['northeast'].get('lat')
west = self.parse['southwest'].get('lng')
east = self.parse['northeast'].get('lng')
return self._get_bbox(south, west, north, east)
if __name__ == '__main__':
g = OpenCage('1552 Payette dr., Ottawa')
print(g.json['mgrs'])
| 26.724551 | 93 | 0.60363 | 507 | 4,463 | 5.238659 | 0.278107 | 0.105045 | 0.110693 | 0.150226 | 0.278614 | 0.225904 | 0.045181 | 0.045181 | 0.045181 | 0.045181 | 0 | 0.003056 | 0.266861 | 4,463 | 166 | 94 | 26.885542 | 0.80868 | 0.125476 | 0 | 0.201681 | 0 | 0 | 0.141595 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.226891 | false | 0 | 0.02521 | 0.176471 | 0.512605 | 0.008403 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 2 |
5ab5bd66bce6d4c9eceac8e62755a4fcbe075a48 | 108 | py | Python | Python/type_casting.py | thebiebs/python-practise-exercises | 28db498f35ec6f9bdd437f54c91990027f4b9436 | [
"MIT"
] | null | null | null | Python/type_casting.py | thebiebs/python-practise-exercises | 28db498f35ec6f9bdd437f54c91990027f4b9436 | [
"MIT"
] | null | null | null | Python/type_casting.py | thebiebs/python-practise-exercises | 28db498f35ec6f9bdd437f54c91990027f4b9436 | [
"MIT"
] | null | null | null | a = "3434"
a = int(a)
print(type(a))
b = 32
b = str(b)
print(type(b))
| 4.695652 | 15 | 0.351852 | 16 | 108 | 2.375 | 0.5 | 0.473684 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.107143 | 0.481481 | 108 | 22 | 16 | 4.909091 | 0.571429 | 0 | 0 | 0 | 0 | 0 | 0.05 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.333333 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 |
5ab8816bb76e60e81fc3c314b0b1f78ca16670ca | 64 | py | Python | models/__init__.py | vrdelc/deepmask-pytorch | 4432aa06ef43fe845230fd539dcbad27177c37d4 | [
"MIT"
] | 233 | 2019-02-20T16:40:02.000Z | 2022-01-24T07:08:28.000Z | models/__init__.py | vrdelc/deepmask-pytorch | 4432aa06ef43fe845230fd539dcbad27177c37d4 | [
"MIT"
] | 10 | 2019-03-19T06:33:00.000Z | 2021-02-11T02:49:07.000Z | models/__init__.py | vrdelc/deepmask-pytorch | 4432aa06ef43fe845230fd539dcbad27177c37d4 | [
"MIT"
] | 62 | 2019-02-21T02:27:56.000Z | 2021-11-16T02:37:41.000Z | from .DeepMask import DeepMask
from .SharpMask import SharpMask
| 21.333333 | 32 | 0.84375 | 8 | 64 | 6.75 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.125 | 64 | 2 | 33 | 32 | 0.964286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 5 |
5ab8ac9f9305d1dcc1a98fb23d25074ad1e3b140 | 37 | py | Python | homeassistant/components/systemmonitor/__init__.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | homeassistant/components/systemmonitor/__init__.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 31,101 | 2020-03-02T13:00:16.000Z | 2022-03-31T23:57:36.000Z | homeassistant/components/systemmonitor/__init__.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """The systemmonitor integration."""
| 18.5 | 36 | 0.72973 | 3 | 37 | 9 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.081081 | 37 | 1 | 37 | 37 | 0.794118 | 0.810811 | 0 | null | 0 | null | 0 | 0 | null | 0 | 0 | 0 | null | 1 | null | true | 0 | 0 | null | null | null | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 5 |
5abb45490eb0d080f875383d0f61e11894705270 | 396 | py | Python | setup.py | ajagnic/stromCLI | f36b3a97a28cb4f6bf644fdfd6660307b19a6fab | [
"MIT"
] | null | null | null | setup.py | ajagnic/stromCLI | f36b3a97a28cb4f6bf644fdfd6660307b19a6fab | [
"MIT"
] | null | null | null | setup.py | ajagnic/stromCLI | f36b3a97a28cb4f6bf644fdfd6660307b19a6fab | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
setup(
name='strom-cli',
author='Adrian Agnic',
author_email='adrian@tura.io',
version='0.0.1',
description='CLI tool for use with Strom',
packages=find_packages(),
include_package_data=True,
install_requires=['click', 'requests'],
entry_points='''
[console_scripts]
strom=interface.tool:dstream
''',
)
| 23.294118 | 46 | 0.669192 | 48 | 396 | 5.354167 | 0.770833 | 0.093385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009346 | 0.189394 | 396 | 16 | 47 | 24.75 | 0.791277 | 0 | 0 | 0 | 0 | 0 | 0.353535 | 0.070707 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.066667 | 0 | 0.066667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5abba2ed2df006c8126b41c355cba49e34d89853 | 776 | py | Python | backend/post/migrations/0005_auto_20190424_1912.py | aurma97/to52_ok | c5f940416938e1bb0d3110dfafa43cb7197df984 | [
"MIT"
] | null | null | null | backend/post/migrations/0005_auto_20190424_1912.py | aurma97/to52_ok | c5f940416938e1bb0d3110dfafa43cb7197df984 | [
"MIT"
] | 4 | 2021-04-08T22:09:42.000Z | 2021-06-10T20:38:06.000Z | backend/post/migrations/0005_auto_20190424_1912.py | aurma97/to52_ok | c5f940416938e1bb0d3110dfafa43cb7197df984 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.3 on 2019-04-24 19:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('post', '0004_post_an_type'),
]
operations = [
migrations.CreateModel(
name='PostType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(default='Offres', max_length=100)),
],
),
migrations.AlterField(
model_name='post',
name='an_type',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.PROTECT, to='post.PostType'),
),
]
| 28.740741 | 115 | 0.595361 | 84 | 776 | 5.380952 | 0.654762 | 0.053097 | 0.061947 | 0.097345 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.039007 | 0.273196 | 776 | 26 | 116 | 29.846154 | 0.762411 | 0.05799 | 0 | 0.1 | 1 | 0 | 0.093278 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5abbfe4f3603e53b2ca44eaf8727990f742d0172 | 1,006 | py | Python | aspen/__main__.py | Acidburn0zzz/aspen-python | 3b25a3ca041bd0bf4d313f7fc3a15cf092903de7 | [
"MIT"
] | null | null | null | aspen/__main__.py | Acidburn0zzz/aspen-python | 3b25a3ca041bd0bf4d313f7fc3a15cf092903de7 | [
"MIT"
] | null | null | null | aspen/__main__.py | Acidburn0zzz/aspen-python | 3b25a3ca041bd0bf4d313f7fc3a15cf092903de7 | [
"MIT"
] | null | null | null | """
python -m aspen
===============
Aspen ships with a server (wsgiref.simple_server) that is
suitable for development and testing. It can be invoked via:
python -m aspen
though even for development you'll likely want to specify a
project root, so a more likely incantation is:
ASPEN_PROJECT_ROOT=/path/to/wherever python -m aspen
For production deployment, you should probably deploy using
a higher performance WSGI server like Gunicorn, uwsgi, Spawning,
or the like.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
from aspen import log_dammit
from aspen.website import Website
from wsgiref.simple_server import make_server
if __name__ == '__main__':
website = Website()
port = int(os.environ.get('PORT', '8080'))
server = make_server('0.0.0.0', port, website)
log_dammit("Greetings, program! Welcome to port {0}.".format(port))
server.serve_forever()
| 27.944444 | 71 | 0.754473 | 146 | 1,006 | 4.952055 | 0.547945 | 0.055325 | 0.08852 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010626 | 0.158052 | 1,006 | 35 | 72 | 28.742857 | 0.842975 | 0.475149 | 0 | 0 | 0 | 0 | 0.121154 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.571429 | 0 | 0.571429 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 2 |
5abddcac686664ae3a44fc39af000fbbf1daafbd | 874 | py | Python | ax/benchmark2/__init__.py | lyhyl/Ax | 44384a0cb1a622c9e395c95f683cfee25c7b61f6 | [
"MIT"
] | null | null | null | ax/benchmark2/__init__.py | lyhyl/Ax | 44384a0cb1a622c9e395c95f683cfee25c7b61f6 | [
"MIT"
] | null | null | null | ax/benchmark2/__init__.py | lyhyl/Ax | 44384a0cb1a622c9e395c95f683cfee25c7b61f6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from ax.benchmark2.benchmark import (
benchmark_full_run,
benchmark_replication,
benchmark_test,
)
from ax.benchmark2.benchmark_method import BenchmarkMethod
from ax.benchmark2.benchmark_problem import (
BenchmarkProblem,
SingleObjectiveBenchmarkProblem,
MultiObjectiveBenchmarkProblem,
)
from ax.benchmark2.benchmark_result import BenchmarkResult, AggregatedBenchmarkResult
__all__ = [
"BenchmarkMethod",
"BenchmarkProblem",
"SingleObjectiveBenchmarkProblem",
"MultiObjectiveBenchmarkProblem",
"BenchmarkResult",
"AggregatedBenchmarkResult",
"benchmark_replication",
"benchmark_test",
"benchmark_full_run",
]
| 28.193548 | 85 | 0.772311 | 83 | 874 | 7.951807 | 0.554217 | 0.036364 | 0.09697 | 0.151515 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006766 | 0.154462 | 874 | 30 | 86 | 29.133333 | 0.886333 | 0.21968 | 0 | 0 | 0 | 0 | 0.273264 | 0.15805 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.173913 | 0 | 0.173913 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5abe5b6784894f2e606b06d1d7978dc1a255825c | 724 | py | Python | setup.py | nvaytet/metatoenv | 6d0b5f1093f4042d63f8acad435f0953633f6821 | [
"BSD-3-Clause"
] | null | null | null | setup.py | nvaytet/metatoenv | 6d0b5f1093f4042d63f8acad435f0953633f6821 | [
"BSD-3-Clause"
] | null | null | null | setup.py | nvaytet/metatoenv | 6d0b5f1093f4042d63f8acad435f0953633f6821 | [
"BSD-3-Clause"
] | null | null | null | from setuptools import setup, Command
from distutils.command.build_py import build_py
with open('README.md') as infile:
long_description = infile.read()
from psrecord import __version__
setup(
name='metatoenv',
version=__version__,
description=
'Generate a conda environment file from a conda meta.yaml recipe',
long_description=long_description,
url='https://github.com/nvaytet/metatoenv',
license='BSD-3-Clause',
author='Neil Vaytet',
packages=['metatoenv'],
provides=['metatoenv'],
scripts=['scripts/metatoenv'],
cmdclass={'build_py': build_py},
classifiers=[
"Programming Language :: Python",
"License :: OSI Approved :: BSD License",
],
)
| 25.857143 | 70 | 0.685083 | 82 | 724 | 5.865854 | 0.621951 | 0.058212 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001706 | 0.190608 | 724 | 27 | 71 | 26.814815 | 0.819113 | 0 | 0 | 0 | 0 | 0 | 0.346685 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.130435 | 0 | 0.130435 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5abf1554e4e83fbb167495e7bf4f154fc338e021 | 28,931 | py | Python | git_timestamp/timestamp.py | zeitgitter/git-timestamp | 44d68c13036ba706d1b2d1d25773427b474fa39e | [
"MIT"
] | 16 | 2020-02-16T03:21:22.000Z | 2021-12-17T19:22:56.000Z | git_timestamp/timestamp.py | zeitgitter/git-timestamp | 44d68c13036ba706d1b2d1d25773427b474fa39e | [
"MIT"
] | 1 | 2021-11-02T10:07:18.000Z | 2021-11-02T10:07:18.000Z | git_timestamp/timestamp.py | zeitgitter/git-timestamp | 44d68c13036ba706d1b2d1d25773427b474fa39e | [
"MIT"
] | 2 | 2020-02-16T03:21:26.000Z | 2021-04-05T17:19:05.000Z | #!/usr/bin/python3 -tt
# -*- coding: utf-8 -*-
# (keep hashbang line for `make install`)
#
# git timestamp — Zeitgitter GIT Timestamping client
#
# Copyright (C) 2019-2021 Marcel Waldvogel
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# This has not been modularized for ease of installation
import configargparse
import distutils.util
import os
import re
import sys
import tempfile
import time
import traceback
# Provided e.g. by `pip install python-gnupg` (try with `pip3` if `pip` does not work)
import gnupg
import pygit2 as git
import requests
import deltat
VERSION = '1.1.0'
class GitArgumentParser(configargparse.ArgumentParser):
"""Insert git config options between command line and default.
WARNING: There is no way to handle custom actions correctly by default, so
your custom actions need to include a `convert_default(value)` method."""
def __init__(self, *args, **kwargs):
super(GitArgumentParser, self).__init__(*args, **kwargs)
def repo_config(self, key):
"""`repo_config(key)` is similar to `repo.config[key]`, but `key` can
be a comma-separated list of keys. It returns the value of the first
which exists or raises `KeyError` if none is set.
"""
for k in key.split(','):
if k in repo.config:
return repo.config[k]
raise KeyError("Key%s `%s` not in git config" % ('s' if ',' in key else "", key))
def add_argument(self, *args, **kwargs):
global repo
if repo is None and 'gitopt' in kwargs:
# Called outside a repo (maybe for --help or --version):
# Ignore repo options
del kwargs['gitopt']
elif 'gitopt' in kwargs:
if 'help' in kwargs:
kwargs['help'] += '. '
else:
kwargs['help'] = ''
gitopt = kwargs['gitopt']
try:
if 'action' in kwargs and issubclass(kwargs['action'],
configargparse.Action):
try:
val = kwargs['action'].convert_default(
self.repo_config(gitopt))
except AttributeError:
raise NotImplementedError("Custom action `%r' passed "
"to GitArgumentParser does not support "
"`convert_default()' method." % kwargs['action'])
else:
val = self.repo_config(gitopt)
kwargs['help'] += "Defaults to '%s' from `git config %s`" % (
val, gitopt.replace(',', ' or '))
if 'default' in kwargs:
kwargs['help'] += "; fallback default: '%s'" % kwargs['default']
kwargs['default'] = val
if 'required' in kwargs:
del kwargs['required']
except KeyError:
kwargs['help'] += "Can be set by `git config %s`" % gitopt
if 'default' in kwargs:
kwargs['help'] += "; fallback default: '%s'" % kwargs['default']
del kwargs['gitopt']
return super(GitArgumentParser, self).add_argument(*args, **kwargs)
add = add_argument
def asciibytes(data):
"""For Python 2/3 compatibility:
If it is 'bytes' already, do nothing, otherwise convert to ASCII Bytes"""
if isinstance(data, bytes):
return data
else:
return data.encode('ASCII')
def timestamp_branch_name(fields):
"""Return the first field except 'www', 'igitt', '*stamp*', 'zeitgitter'
'localhost:8080' is returned as 'localhost-8080'"""
for f in fields:
i = f.replace(':', '-')
if (i != '' and i != 'www' and i != 'igitt' and i != 'zeitgitter'
and 'stamp' not in i and valid_name(i)):
return i + '-timestamps'
return 'zeitgitter-timestamps'
class DefaultTrueIfPresent(configargparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if values is None:
values = True
else:
try:
values = self.convert_default(values)
except ValueError:
raise configargparse.ArgumentError(
self, "Requires boolean value")
setattr(namespace, self.dest, values)
@classmethod
def convert_default(cls, value):
return bool(distutils.util.strtobool(value))
server_aliases = {
"gitta": "gitta.zeitgitter.net",
"diversity": "diversity.zeitgitter.net"
}
def expanded_aliases():
return ', '.join(map(lambda t: "%s → %s" % t, server_aliases.items()))
def get_args():
"""Parse command line and git config parameters"""
parser = GitArgumentParser(
auto_env_var_prefix='timestamp_',
add_help=False,
description="""Interface to Zeitgitter, the network of
independent GIT timestampers.""",
epilog="""`--tag` takes precedence over `--branch`.
When in doubt, use `--tag` for single/rare timestamping,
and `--branch` for frequent timestamping.
`bool` values can be specified as true/false/yes/no/0/1.
Arguments with optional `bool` options default to true if
the argument is present, false if absent.
Environment variable `ZEITGITTER_FAKE_TIME` can be used for
repeatable tests against a local Zeitgitter server under test,
see there.""")
parser.add('--help', '-h',
action='help',
help="""Show this help message and exit. When called as
'git timestamp' (space, not dash), use `-h`, as `--help` is
captured by `git` itself.""")
parser.add('--version',
action='version',
version="git timestamp v%s" % VERSION,
help="Show program's version number and exit")
parser.add('--tag',
help="Create a new timestamped tag named TAG")
parser.add('--branch',
gitopt='timestamp.branch',
help="""Create a timestamped commit in branch BRANCH,
with identical contents as the specified commit.
Default name derived from servername, appending
`-timestamps`, and, possibly, by the effects of
`--append-branch-name`.""")
parser.add('--server',
default='https://gitta.zeitgitter.net',
gitopt='timestamp.server',
help="""Comma-separated list of Zeitgitter servers to obtain timestamps from. 'https://'
is optional. The following aliases are supported: """
+ expanded_aliases())
parser.add('--interval',
default='0s',
gitopt='timestamp.interval',
help="""Delay between timestamping against the different
timestampers. For consistent ordering of timestamps,
set this to at least <maximum clock skew>+1s.""")
parser.add('--append-branch-name',
default=True,
action=DefaultTrueIfPresent,
metavar='bool',
gitopt='timestamp.append-branch-name',
help="""Whether to append the branch name of the current branch
to the timestamp branch name, i.e., create per-branch
timestamp branches. (Default branch name will never be
appended.)""")
parser.add('--default-branch',
gitopt='timestamp.defaultBranch',
default="main,master",
help="""Comma-separated list of default branch names, i.e.
those, where the branch name will not automatically be
appended to. `git config init.defaultBranch`, if it exists,
is always appended to this list.""")
parser.add('--gnupg-home',
gitopt='timestamp.gnupg-home',
help="Where to store timestamper public keys")
parser.add('--enable',
nargs='?',
action=DefaultTrueIfPresent,
metavar='bool',
gitopt='timestamp.enable',
help="""Forcibly enable/disable timestamping operations; mainly
for use in `git config`""")
parser.add('--require-enable',
action='store_true',
help="""Disable operation unless `git config timestamp.enable`
has explicitely been set to true""")
parser.add('--quiet', '-q',
nargs='?',
action=DefaultTrueIfPresent,
metavar='bool',
gitopt='timestamp.quiet',
help="Suppress diagnostic messages, only print fatal errors")
parser.add('commit',
nargs='?',
default='HEAD',
metavar='COMMIT',
gitopt='timestamp.commit-branch',
help="""Which commit-ish to timestamp. Must be a branch name
for branch timestamps with `--append-branch-name`""")
arg = parser.parse_args()
arg.interval = deltat.parse_time(arg.interval)
arg.default_branch = arg.default_branch.split(',')
try:
arg.default_branch.append(repo.config['init.defaultBranch'])
except KeyError:
pass
if arg.enable == False:
sys.exit("Timestamping explicitely disabled")
if arg.require_enable and arg.enable != True:
sys.exit("Timestamping not explicitely enabled")
return arg
def ensure_gnupg_ready_for_scan_keys():
"""`scan_keys()` on older GnuPG installs returns an empty list when
`~/.gnupg/pubring.kbx` has not yet been created. `list_keys()` or most
other commands will create it. Trying to have no match (for speed).
Probing for the existance of `pubring.kbx` would be faster, but would
require guessing the path of GnuPG-Home."""
gpg.list_keys(keys='arbitrary.query@creates.keybox')
def validate_key_and_import(text, args):
"""Is this a single key? Then import it"""
ensure_gnupg_ready_for_scan_keys()
f = tempfile.NamedTemporaryFile(mode='w', delete=False)
f.write(text)
f.close()
info = gpg.scan_keys(f.name)
os.unlink(f.name)
if len(info) != 1 or info[0]['type'] != 'pub' or len(info[0]['uids']) == 0:
sys.exit("Invalid key returned\n"
"Maybe not a Zeitgitter server or ~/.gnupg permission problem")
res = gpg.import_keys(text)
count = res.count # pylint: disable=maybe-no-member
if count == 1 and not args.quiet:
print("Imported new key %s: %s" %
(info[0]['keyid'], info[0]['uids'][0]))
return (info[0]['keyid'], info[0]['uids'][0])
def get_global_config_if_possible():
"""Try to return global git configuration, which normally lies in
`~/.gitconfig`.
However (https://github.com/libgit2/pygit2/issues/915),
`get_global_config()` fails, if the underlying file does not
exist yet. (The [paths may be
determined](https://github.com/libgit2/pygit2/issues/915#issuecomment-503300141)
by
`pygit2.option(pygit2.GIT_OPT_GET_SEARCH_PATH, pygit2.GIT_CONFIG_LEVEL_GLOBAL)`
and similar.)
Therefore, we do not simply `touch ~/.gitconfig` first, but
1. try `get_global_config()` (raises `IOError` in Python2, `OSError`
in Python3),
2. try `get_xdg_config()` (relying on the alternative global location
`$XDG_CONFIG_HOME/git/config`, typically aka `~/.config/git/config`
(this might fail due to the file not being there either (`OSError`,
`IOError`), or because the installed `libgit2`/`pygit2` is too old
(`AttributeError`; function added in 2014 only),
3. `touch ~/.gitconfig` and retry `get_global_config()`, and, as fallback
4. use the repo's `.git/config`, which should always be there."""
try:
return git.Config.get_global_config() # 1
except (IOError, OSError):
try:
return git.Config.get_xdg_config() # 2
except (IOError, OSError, AttributeError):
try:
sys.stderr.write("INFO: Creating global .gitconfig\n")
with open(os.path.join(
git.option( # pylint: disable=maybe-no-member
git.GIT_OPT_GET_SEARCH_PATH, # pylint: disable=maybe-no-member
git.GIT_CONFIG_LEVEL_GLOBAL), # pylint: disable=maybe-no-member
'.gitconfig'), 'a'):
pass
return git.Config.get_global_config() # 3
except (IOError, OSError):
sys.stderr.write("INFO: Cannot record key ID in global config,"
" falling back to repo config\n")
return repo.config # 4
# Not reached
def get_keyid(args):
"""Return keyid/fullname from git config, if known.
Otherwise, request it from server and remember TOFU-style"""
keyname = args.server
if keyname.startswith('http://'):
keyname = keyname[7:]
elif keyname.startswith('https://'):
keyname = keyname[8:]
while keyname.endswith('/'):
keyname = keyname[0:-1]
# Replace everything outside 0-9a-z with '-':
keyname = ''.join(map(lambda x:
x if (x >= '0' and x <= '9') or (x >= 'a' and x <= 'z') else '-', keyname))
try:
keyid = repo.config['timestamper.%s.keyid' % keyname]
keys = gpg.list_keys(keys=keyid)
if len(keys) == 0:
sys.stderr.write("WARNING: Key %s missing in keyring;"
" refetching timestamper key\n" % keyid)
raise KeyError("GPG Key not found") # Evil hack
return (keyid, repo.config['timestamper.%s.name' % keyname])
except KeyError:
# Obtain key in TOFU fashion and remember keyid
r = requests.get(args.server, params={'request': 'get-public-key-v1'},
timeout=30)
quit_if_http_error(args.server, r)
(keyid, name) = validate_key_and_import(r.text, args)
if not os.getenv('FORCE_GIT_REPO_CONFIG'):
gcfg = get_global_config_if_possible()
else:
gcfg = repo.config
gcfg['timestamper.%s.keyid' % keyname] = keyid
gcfg['timestamper.%s.name' % keyname] = name
return (keyid, name)
def sig_time():
"""Current time, unless in test mode"""
return int(os.getenv('ZEITGITTER_FAKE_TIME', time.time()))
def validate_timestamp(stamp):
"""Is this timestamp within ± of now?"""
now = sig_time()
# Allow a ±30 s window
return stamp > now - 30 and stamp < now + 30
def time_str(seconds):
"""Format Unix timestamp in ISO format"""
return time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(seconds))
def validate_timestamp_zone_eol(header, text, offset):
"""Does this line end with a current timestamp and GMT?
Returns start of next line."""
stamp = text[offset:offset + 10]
try:
istamp = int(stamp)
sigtime = sig_time()
if not validate_timestamp(istamp):
sys.exit("Ignoring returned %s timestamp (%s) as possible falseticker\n"
"(off by %d seconds compared to this computer's time; check clock)"
% (header, time_str(istamp), istamp - sigtime))
except ValueError:
sys.exit("Returned %s timestamp '%s' is not a number" %
(header, stamp))
tz = text[offset + 10:offset + 17]
if tz != ' +0000\n':
sys.exit("Returned %s timezone is not GMT or not at end of line,\n"
"but '%s' instead of '%s'"
% (header, repr(tz), repr(' +0000\n')))
return offset + 17
def verify_signature_and_timestamp(keyid, signed, signature, args):
"""Is the signature valid
and the signature timestamp within range as well?"""
f = tempfile.NamedTemporaryFile(mode='w', delete=False)
f.write(signature)
f.close()
verified = gpg.verify_data(f.name, signed)
if not verified.valid:
sys.exit("Not a valid OpenPGP signature")
os.remove(f.name)
if not validate_timestamp(int(verified.sig_timestamp)):
sigtime = sig_time()
sys.exit("Signature timestamp (%d, %s) too far off now (%d, %s)" %
(verified.sig_timestamp, time_str(verified.sig_timestamp),
sigtime, time_str(sigtime)))
if keyid != verified.key_id and keyid != verified.pubkey_fingerprint:
sys.exit("Received signature with key ID %s; but expected %s -- refusing" %
(verified.key_id, keyid))
def validate_tag(text, commit, keyid, name, args):
"""Check this tag head to toe"""
if len(text) > 8000:
sys.exit("Returned tag too long (%d > 8000)" % len(text))
if not re.match('^[ -~\n]*$', text, re.MULTILINE):
sys.exit("Returned tag does not only contain ASCII chars")
lead = '''object %s
type commit
tag %s
tagger %s ''' % (commit.id, args.tag, name)
if not text.startswith(lead):
sys.exit("Expected signed tag to start with:\n"
"> %s\n\nInstead, it started with:\n> %s\n"
% (lead.replace('\n', '\n> '), text.replace('\n', '\n> ')))
pos = validate_timestamp_zone_eol('tagger', text, len(lead))
if text[pos] != '\n':
sys.exit("Signed tag has unexpected data after 'tagger' header")
pgpstart = text.find('\n-----BEGIN PGP SIGNATURE-----\n\n', len(lead))
if pgpstart >= 0:
signed = asciibytes(text[:pgpstart + 1])
signature = text[pgpstart + 1:]
verify_signature_and_timestamp(keyid, signed, signature, args)
else:
sys.exit("No OpenPGP signature found")
def quit_if_http_error(server, r):
if r.status_code == 301:
sys.exit("Timestamping server URL changed from %s to %s\n"
"Please change this on the command line(s) or run\n"
" git config [--global] timestamp.server %s"
% (server, r.headers['Location'], r.headers['Location']))
if r.status_code != 200:
sys.exit("Timestamping request failed; server responded with %d %s"
% (r.status_code, r.reason))
def timestamp_tag(repo, keyid, name, args):
"""Obtain and add a signed tag"""
try:
commit = repo.revparse_single(args.commit)
except KeyError as e:
sys.exit("No such revision: '%s'" % (e,))
if not valid_name(args.tag):
sys.exit("Tag name '%s' is not valid for timestamping" % args.tag)
try:
r = repo.lookup_reference('refs/tags/' + args.tag)
sys.exit("Tag '%s' already in use" % args.tag)
except KeyError:
pass
try:
r = requests.post(args.server,
data={
'request': 'stamp-tag-v1',
'commit': commit.id,
'tagname': args.tag
}, allow_redirects=False)
quit_if_http_error(args.server, r)
validate_tag(r.text, commit, keyid, name, args)
tagid = repo.write(
git.GIT_OBJ_TAG, # pylint: disable=maybe-no-member
r.text)
repo.create_reference('refs/tags/%s' % args.tag, tagid)
except requests.exceptions.ConnectionError as e:
sys.exit("Cannot connect to server: %s" % e)
def validate_branch(text, keyid, name, data, args):
"""Check this branch commit head to toe"""
if len(text) > 8000:
sys.exit("Returned branch commit too long (%d > 8000)" % len(text))
if not re.match('^[ -~\n]*$', text, re.MULTILINE):
sys.exit("Returned branch commit does not only contain ASCII chars")
lead = 'tree %s\n' % data['tree']
if 'parent' in data:
lead += 'parent %s\n' % data['parent']
lead += '''parent %s
author %s ''' % (data['commit'], name)
if not text.startswith(lead):
sys.exit("Expected signed branch commit to start with:\n"
"> %s\n\nInstead, it started with:\n> %s\n"
% (lead.replace('\n', '\n> '), text.replace('\n', '\n> ')))
pos = validate_timestamp_zone_eol('tagger', text, len(lead))
follow = 'committer %s ' % name
if not text[pos:].startswith(follow):
sys.exit("Committer in signed branch commit does not match")
pos = validate_timestamp_zone_eol('committer', text, pos + len(follow))
if not text[pos:].startswith('gpgsig '):
sys.exit("Signed branch commit missing 'gpgsig' after 'committer'")
sig = re.match('^-----BEGIN PGP SIGNATURE-----\n \n'
'[ -~\n]+\n -----END PGP SIGNATURE-----\n\n',
text[pos + 7:], re.MULTILINE)
if not sig:
sys.exit("Incorrect OpenPGP signature in signed branch commit")
signature = sig.group()
# Everything except the signature
signed = asciibytes(text[:pos] + text[pos + 7 + sig.end() - 1:])
signature = signature.replace('\n ', '\n')
verify_signature_and_timestamp(keyid, signed, signature, args)
def valid_name(name):
"""Can be sanely, universally stored as file name.
pygit2.reference_is_valid_name() would be better, but is too new
[(2018-10-17)](https://github.com/libgit2/pygit2/commit/1a389cc0ba360f1fd53f1352da41c6a2fae92a66)
to rely on being available."""
return (re.match('^[_a-z][-._a-z0-9]{,99}$', name, re.IGNORECASE)
and '..' not in name and not '\n' in name)
def append_branch_name(repo, commit_name, branch_name, default_branches):
"""Appends current branch name if not the default branch"""
explanation = "for (implicit) options `--branch` and `--append-branch-name`"
if commit_name == 'HEAD':
try:
comref = repo.lookup_reference(commit_name)
comname = comref.target
except git.InvalidSpecError: # pylint: disable=maybe-no-member
# 1. If HEAD or it's target is invalid, we end up here
sys.exit("Invalid HEAD " + explanation)
# Two more options remain:
# 2. If HEAD points to a branch, then we now have its name (a `str`
# starting with 'refs/heads/') and can proceed;
# 3. if it is detached, it points to a commit (a `Oid`) and we fail;
# 4. there might be some other cases, which should fail as well.
# To be able to test for case 2, we convert `comname` to `str`.
if str(comname).startswith('refs/heads/'):
comname = comname[len('refs/heads/'):]
else:
sys.exit(("HEAD must point to branch, not %s\n" + explanation)
% comname)
else:
# 5. Explicit and non-HEAD commit given; check for branch name only: proceed;
try:
comref = repo.lookup_reference('refs/heads/' + commit_name)
comname = commit_name # Branch name itself
except (KeyError, git.InvalidSpecError): # pylint: disable=maybe-no-member
# 6. Explicit commit given, but it's neither HEAD nor tail^H^H^H^H
# a branch: fail
sys.exit(("%s must be a branch name " + explanation)
% commit_name)
# Now that we know which branch to timestamp (to), construct it.
if comname in default_branches:
return branch_name
else:
extended_name = "%s-%s" % (branch_name, comname)
if valid_name(extended_name):
return extended_name
else:
sys.exit(("Branch name %s is not valid for timestamping\n"
"(constructed from base timestamp branch %s and "
"source branch %s)\n" + explanation)
% (extended_name, branch_name, comname))
def timestamp_branch(repo, keyid, name, args, first):
"""Obtain and add branch commit; create/update branch head"""
# If the base name is already invalid, it cannot become valid by appending
if not valid_name(args.branch):
sys.exit("Branch name %s is not valid for timestamping" %
args.branch)
if args.append_branch_name:
args.branch = append_branch_name(repo, args.commit, args.branch, args.default_branch)
try:
commit = repo.revparse_single(args.commit)
except KeyError as e:
sys.exit("No such revision: '%s'" % (e,))
branch_head = None
data = {
'request': 'stamp-branch-v1',
'commit': commit.id,
'tree': commit.tree.id
}
try:
branch_head = repo.lookup_reference('refs/heads/' + args.branch)
if branch_head.target == commit.id:
# Would create a merge commit with the same parent twice
sys.exit("Cannot timestamp head of timestamp branch to itself")
data['parent'] = branch_head.target
try:
if (repo[branch_head.target].parent_ids[0] == commit.id or
repo[branch_head.target].parent_ids[1] == commit.id):
sys.exit("Already timestamped commit %s to branch %s" %
(commit.id.hex, args.branch))
except IndexError:
pass
except KeyError:
pass
if not first:
time.sleep(args.interval.total_seconds())
try:
r = requests.post(args.server, data=data, allow_redirects=False)
quit_if_http_error(args.server, r)
validate_branch(r.text, keyid, name, data, args)
commitid = repo.write(
git.GIT_OBJ_COMMIT, # pylint: disable=maybe-no-member
r.text)
repo.create_reference('refs/heads/' + args.branch,
commitid, force=True)
except requests.exceptions.ConnectionError as e:
sys.exit("Cannot connect to server: %s" % e)
def main():
global repo, gpg
requests.__title__ = 'git-timestamp/%s %s' % (VERSION, requests.__title__)
try:
# Depending on the version of pygit2, `git.discover_repository()`
# returns `None` or raises `KeyError`
path = git.discover_repository( # pylint: disable=maybe-no-member
os.getcwd())
except KeyError:
path = None
if path is not None:
repo = git.Repository(path)
else:
repo = None
args = get_args()
# Only check after parsing the arguments, so --version and --help work
if repo is None:
sys.exit("Not a git repository")
try:
gpg = gnupg.GPG(gnupghome=args.gnupg_home)
except TypeError:
traceback.print_exc()
sys.exit("*** `git timestamp` needs `python-gnupg`"
" module from PyPI, not `gnupg`\n"
" Possible remedy: `pip uninstall gnupg;"
" pip install python-gnupg`\n"
" (try `pip2`/`pip3` if it does not work with `pip`)")
if args.tag is not None or args.branch is not None:
# Single tag or branch against one timestamping server
if ',' in args.server:
(server, _) = args.server.split(',', 1)
args.server = server
print(f"WARNING: Cannot timestamp single tag/branch against"
" multiple servers;\nonly timestamping against {server}")
(keyid, name) = get_keyid(args)
if args.tag:
timestamp_tag(repo, keyid, name, args)
else:
timestamp_branch(repo, keyid, name, args, True)
else:
# Automatic branch, with support for multiple timestamping servers
success = True
first = True
for server in args.server.split(','):
if server in server_aliases:
server = server_aliases[server]
if ':' not in server:
server = 'https://' + server
fields = server.replace('/', '.').split('.')
args.branch = timestamp_branch_name(fields[1:])
args.server = server
try:
(keyid, name) = get_keyid(args)
timestamp_branch(repo, keyid, name, args, first)
first = False # Only on successful timestamp
except SystemExit as e:
sys.stderr.write(e.code + '\n')
success = False
if not success:
sys.exit(1)
if __name__ == "__main__":
main()
| 41.868307 | 103 | 0.587432 | 3,555 | 28,931 | 4.708861 | 0.198312 | 0.01589 | 0.009677 | 0.010753 | 0.201075 | 0.151314 | 0.130645 | 0.090502 | 0.079092 | 0.073238 | 0 | 0.009598 | 0.294148 | 28,931 | 690 | 104 | 41.928986 | 0.809951 | 0.186305 | 0 | 0.209213 | 0 | 0 | 0.283112 | 0.013133 | 0 | 0 | 0 | 0 | 0 | 1 | 0.049904 | false | 0.011516 | 0.03071 | 0.003839 | 0.130518 | 0.009597 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ac1bdcd4d5d7d445d4e50d14f8bb0137e1a3a22 | 7,335 | py | Python | src/environments/finite_diff_wave.py | jaberkow/Insight_Project | 5c24e39fa5ab949e5a99231758ac77d21f566905 | [
"MIT"
] | 6 | 2019-07-10T09:33:44.000Z | 2019-08-28T11:28:15.000Z | src/environments/finite_diff_wave.py | jaberkow/WaveRL | 5c24e39fa5ab949e5a99231758ac77d21f566905 | [
"MIT"
] | 4 | 2019-06-18T00:13:25.000Z | 2019-08-05T11:48:03.000Z | src/environments/finite_diff_wave.py | jaberkow/Insight_Project | 5c24e39fa5ab949e5a99231758ac77d21f566905 | [
"MIT"
] | 3 | 2019-08-15T06:43:31.000Z | 2020-09-03T05:05:17.000Z | """
Some elements of the finite difference routines were adapted from HP Langtangen's wonderful book on the FD method for python:
https://hplgit.github.io/fdm-book/doc/pub/book/html/._fdm-book-solarized001.html
"""
import numpy as np
from scipy.integrate import simps
class Wave1D:
"""
A utility class for simulating the wave equation in 1 dimension using a finite difference
"""
def __init__(self,config):
"""
Constructor 1 dimensional wave system
Inputs:
config: A dict containing parameters for the system, which must have the following keys:
time_interval: (float > 0) the temporal interval between time steps
wave_speed: (float > 0) the speed of standing waves on the bridge, related to material tension
system_length: (float > 0) the lengthe of the system
num_lattice_points: (int > 0) how many discrete points along the length of the system to use for
the finite difference scheme
num_force_points: (int > 0) how many pistons the system has
force_width: (int > 0) how wide the gaussian spread of each piston is
"""
self.dt = config['time_interval']
self.c_speed = config['wave_speed']
self.L = config['system_length']
self.Nx = config['num_lattice_points']
# How many points along the domain can impulse force be applied
self.num_force_points = config['num_force_points']
# Set the locations of the force application
self.force_locations = np.linspace(0.0,self.L,self.num_force_points+2)[1:self.num_force_points+1]
# How wide is the profile of each impulse force, must be > 0
self.force_width = config['force_width']
# Scale the force width by system length
self.force_width *= self.L
# The lattice spacing
self.dx = float(self.L)/float(self.Nx)
# Mesh points in space
self.x_mesh = np.linspace(0.0,self.L,self.Nx+1)
# The courant number
self.C = self.c_speed *self.dt/self.dx
self.C2 = self.C**2 #helper number
# Recalibrate the resolutions to account for rounding
self.dx = self.x_mesh[1] - self.x_mesh[0]
# We set up the conditions of the system before warmup period
# The system is always initially at rest
self.Velocity_0 = lambda x: 0
# We assume the system starts completely flat
self.Initial_Height = lambda x: 0
# Allocate memory for the recursive solution arrays
self.height = np.zeros(self.Nx + 1) # Solution array at new time level
self.height_n = np.zeros(self.Nx + 1) # Solution at 1 time level back
self.height_nm1 = np.zeros(self.Nx + 1) # Solution at 2 time levels back
self.height_traj=[]
self.action_traj=[]
self.reset()
def reset(self):
"""
Resets the state of the wave system
"""
# We reset the time and step index
self.t = 0
self.n = 0
# We set the force vals to zero
self.force_vals = np.zeros(self.num_force_points)
# We set the initial condition of the solution 1 time level back
for i in range(0,self.Nx+1):
self.height_n[i]=self.Initial_Height(self.x_mesh[i])
# We do a special first step for the finite difference scheme
for i in range(1,self.Nx):
self.height[i] =self.height_n[i] + self.dt*self.Velocity_0(self.x_mesh[i])
self.height[i]+=0.5*self.C2*(self.height_n[i-1] - 2*self.height_n[i] + self.height_n[i+1])
self.height[i]+=0.5*(self.dt**2)*self.impulse_term(self.x_mesh[i])
# Force boundary conditions
self.height[0]=0
self.height[self.Nx]=0
# Switch solution steps
self.height_nm1[:] = self.height_n
self.height_n[:] = self.height
def single_step(self):
"""
Run a single step of the wave equation finite difference dynamics
"""
self.t += self.dt
self.n += 1
for i in range(1,self.Nx):
self.height[i] = -self.height_nm1[i] + 2*self.height_n[i]
self.height[i] += self.C2*(self.height_n[i-1] - 2*self.height_n[i] + self.height_n[i+1])
self.height[i] += (self.dt**2)*self.impulse_term(self.x_mesh[i])
# Force boundary conditions
self.height[0] = 0
self.height[self.Nx] = 0
# Switch solution steps
self.height_nm1[:] = self.height_n
self.height_n[:] = self.height
def take_in_action(self,action):
"""
This method acts as the interface where the agent applies an action to environment.
For this simulator, it's simply a setter method for the force_vals attribute that
determine the profile of the impulse term.
"""
self.force_vals = np.copy(action)
def impulse_term(self,x):
"""
The function definition for the active damping terms
Inputs:
x - a scalar, position in the domain
force_vals - A vector of shape (self.num_force_points),
the (signed) values of the force at each piston point
"""
return np.sum(self.force_vals*np.exp(-0.5* ((x-self.force_locations)**2 )/self.force_width))
def get_impulse_profile(self):
"""
A utility function for returning an array representing the shape of the resulting impulse
force, this is used for rendering the history of actions taken by the agent.
Inputs:
force_vals - A vector of shape (self.num_force_points),
the (signed) values of the force at each piston point
"""
profile = []
for i in range(self.Nx+1):
profile.append(self.impulse_term(self.x_mesh[i]))
return np.array(profile)
def get_observation(self):
"""
This is an interface that returns the observation of the system, which is modeled
as the state of the wave system for the current timestep, previous timestep, and
twice previous timestep.
Outputs:
observation - An array of shape (1,self.Nx+1,3). observation[0,:,0]= self.height,
observation[0,:,1]=self.height_n, and observation[0,:,2]=self.height_nm1
"""
observation = np.zeros((1,self.Nx+1,3))
observation[0,:,0]= self.height
observation[0,:,1]=self.height_n
observation[0,:,2]=self.height_nm1
return observation
def energy(self):
"""
Computes the internal energy of the system based upon the integral functional for
the 1-D wave equation. Additionally we add an L2 norm regularizer
See http://web.math.ucsb.edu/~grigoryan/124A/lecs/lec7.pdf for details
"""
dudt = (self.height-self.height_nm1)/self.dt # Time derivative
dudx = np.gradient(self.height,self.x_mesh) # Space derivative
space_term = -self.height*np.gradient(dudx,self.x_mesh) # Alternative tension energy
energy_density = dudt**2 + (self.C**2)*(dudx**2)
energy_density += self.height**2 # Regularize with L2 norm
# Energy_density = dudt**2 + (self.c_speed**2)*space_term
return 0.5*simps(energy_density,self.x_mesh)
| 38.809524 | 125 | 0.625767 | 1,071 | 7,335 | 4.189542 | 0.251167 | 0.095832 | 0.039224 | 0.02407 | 0.267662 | 0.242701 | 0.202362 | 0.171607 | 0.171607 | 0.171607 | 0 | 0.020189 | 0.277437 | 7,335 | 188 | 126 | 39.015957 | 0.826415 | 0.46394 | 0 | 0.138889 | 0 | 0 | 0.023222 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.027778 | 0 | 0.208333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ac23e6adffbb28a348a5b76f4d9393d8fb8087e | 1,977 | py | Python | ejercicios/ahorcado/ahorcado_01.py | carlosviveros/Soluciones | 115f4fa929c7854ca497e4c994352adc64565456 | [
"MIT"
] | 1 | 2022-02-02T04:44:56.000Z | 2022-02-02T04:44:56.000Z | ejercicios/ahorcado/ahorcado_01.py | leugimkm/Soluciones | d71601c8d9b5e86e926f48d9e49462af8a956b6d | [
"MIT"
] | null | null | null | ejercicios/ahorcado/ahorcado_01.py | leugimkm/Soluciones | d71601c8d9b5e86e926f48d9e49462af8a956b6d | [
"MIT"
] | null | null | null | """AyudaEnPython: https://www.facebook.com/groups/ayudapython
Contributor: Carolina Morán
Source:
https://github.com/CarolinaMoran03/Juego-de-ahorcado-con-frase/blob/main/Juego%20de%20ahorcado%20con%20frase
"""
participante=input("Ingrese nombre del participante: ")
print(participante.upper())
def run():
frases = ["Vive tu momento",
"Nunca subestimes el poder de la musica",
"Nunca olvides lo mucho que te ame tu familia", "Amo mi locura", "Que nadie te diga que no","Carlos Rivera", "No me ponga cero inge", "La fuerza estara contigo"]
cantidad = len(frases)
numero = 0
while numero < 1 or numero > cantidad:
numero = int(input("Ingrese el numero de frase que desea revelar (1 al {c}): ".format(c=cantidad)))
frase = frases[numero-1]
patron = ""
for i in frase:
if i == " ":
patron += " "
else:
patron += "_"
patron = list(patron)
presentar(patron)
vidas = 5
cont = 0
a = 10
while vidas > 0:
letra = input("Ingrese letra: ")
x = 0
for i in frase:
if letra.lower() == i.lower():
patron[x] = letra
x += 1
if letra in patron:
print("Felicitaciones ganaste", a, " puntos")
cont += a
presentar(patron)
if "_" not in patron:
print("FELICIDADES",participante.upper(), "Acabas De Adivinar La Frase")
print("Obtuvistes:", cont, " Puntos")
break
if letra not in patron:
vidas -= 1
print("Te Equivocaste, Te Quedan", +vidas, "Intentos")
presentar(patron)
else:
print("Chale",participante.upper(), "Acabas de Perder, Como Cuando La Perdistes A Ella")
print("Tienes:", cont, " Puntos, Gracias Por Participar")
def presentar(patron):
p = ""
for i in patron:
p = p + i
print(p)
if __name__ == "__main__":
run() | 29.507463 | 175 | 0.571067 | 239 | 1,977 | 4.682008 | 0.493724 | 0.053619 | 0.016086 | 0.01966 | 0.023235 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016047 | 0.306525 | 1,977 | 67 | 176 | 29.507463 | 0.800146 | 0.103187 | 0 | 0.137255 | 0 | 0 | 0.293718 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039216 | false | 0 | 0 | 0 | 0.039216 | 0.156863 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ac34357cfbf72b629548e23a5587b2da7dd9eb3 | 946 | py | Python | core/model/encoder/encoder_base.py | baophuc27/answer-generation | 36ab9f84f8d4df90abd2bd0255a5229afbd65892 | [
"MIT"
] | 3 | 2021-03-25T12:29:49.000Z | 2021-06-14T13:15:49.000Z | core/model/encoder/encoder_base.py | baophuc27/answer-generation | 36ab9f84f8d4df90abd2bd0255a5229afbd65892 | [
"MIT"
] | null | null | null | core/model/encoder/encoder_base.py | baophuc27/answer-generation | 36ab9f84f8d4df90abd2bd0255a5229afbd65892 | [
"MIT"
] | null | null | null | import torch.nn as nn
from abc import ABC,abstractmethod
class EncoderBase(nn.Module):
@abstractmethod
def __init__(self,pretrained_emb,__C):
"""Constructor of encoder module should take pretrained embedding as
an argument because of later comparison of different types of embeddings.
Args:
pretrained_emb ([Tensor]): Extracted pretrained embedding.
__C (object): Config object
"""
super(EncoderBase,self).__init__()
self.pretrained_emb = pretrained_emb
self.__C = __C
@abstractmethod
def forward(self,question,answer):
"""Base encoder method in full answer generation
Args:
question ([Tensor]): Index of questions after tokenized and padded
answer ([Tensor]): Index of answers after tokenized and padded
Raises:
NotImplementedError
"""
raise NotImplementedError
| 30.516129 | 81 | 0.650106 | 100 | 946 | 5.95 | 0.54 | 0.087395 | 0.060504 | 0.070588 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.283298 | 946 | 30 | 82 | 31.533333 | 0.877581 | 0.4926 | 0 | 0.181818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.181818 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ac3496c6293aef55801f31bfe043059c6610afa | 467 | py | Python | pmaf/biome/survey/_metakit.py | mmtechslv/PhyloMAF | bab43dd4a4d2812951b1fdf4f1abb83edb79ea88 | [
"BSD-3-Clause"
] | 1 | 2021-07-02T06:24:17.000Z | 2021-07-02T06:24:17.000Z | pmaf/biome/survey/_metakit.py | mmtechslv/PhyloMAF | bab43dd4a4d2812951b1fdf4f1abb83edb79ea88 | [
"BSD-3-Clause"
] | 1 | 2021-06-28T12:02:46.000Z | 2021-06-28T12:02:46.000Z | pmaf/biome/survey/_metakit.py | mmtechslv/PhyloMAF | bab43dd4a4d2812951b1fdf4f1abb83edb79ea88 | [
"BSD-3-Clause"
] | null | null | null | from abc import abstractmethod
from pmaf.biome._metakit import BiomeFeatureMetabase, BiomeSampleMetabase
class BiomeSurveyBackboneMetabase(BiomeFeatureMetabase, BiomeSampleMetabase):
@abstractmethod
def to_assembly(self):
pass
@property
@abstractmethod
def essentials(self):
pass
@property
@abstractmethod
def assemblies(self):
pass
@property
@abstractmethod
def controller(self):
pass
| 19.458333 | 77 | 0.704497 | 39 | 467 | 8.384615 | 0.512821 | 0.207951 | 0.146789 | 0.275229 | 0.302752 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.239829 | 467 | 23 | 78 | 20.304348 | 0.921127 | 0 | 0 | 0.611111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.222222 | false | 0.222222 | 0.111111 | 0 | 0.388889 | 0 | 0 | 0 | 0 | null | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 4 |
5ac34cb506d949289482e24d712032fea0a5bf81 | 5,912 | py | Python | python/pynamics/frame.py | zmpatel19/Foldable-Robotics | 97590ec7d173cc1936cc8ff0379b16ad63bcda23 | [
"MIT"
] | 2 | 2018-08-20T22:01:18.000Z | 2021-04-19T00:50:56.000Z | python/pynamics/frame.py | zmpatel19/Foldable-Robotics | 97590ec7d173cc1936cc8ff0379b16ad63bcda23 | [
"MIT"
] | 3 | 2017-10-24T03:10:17.000Z | 2017-10-24T03:15:27.000Z | python/pynamics/frame.py | zmpatel19/Foldable-Robotics | 97590ec7d173cc1936cc8ff0379b16ad63bcda23 | [
"MIT"
] | 2 | 2017-03-03T23:04:17.000Z | 2021-03-20T20:33:53.000Z | # -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes
Email: danaukes<at>gmail.com
Please see LICENSE for full license.
"""
import pynamics
from pynamics.tree_node import TreeNode
from pynamics.vector import Vector
from pynamics.rotation import Rotation, RotationalVelocity
from pynamics.name_generator import NameGenerator
from pynamics.quaternion import Quaternion
import sympy
class Frame(NameGenerator):
def __init__(self,name,system):
super(Frame,self).__init__()
self.connections={}
self.connections['R'] = {}
self.connections['w'] = {}
self.precomputed={}
self.precomputed['R'] = {}
self.precomputed['w'] = {}
self.tree={}
self.tree['R'] = TreeNode(self)
self.tree['w'] = TreeNode(self)
self.reps = {}
self.name = name
self.x = Vector()
self.y = Vector()
self.z = Vector()
self.x_sym = sympy.Symbol(name+'.x')
self.y_sym = sympy.Symbol(name+'.y')
self.z_sym = sympy.Symbol(name+'.z')
self.syms = sympy.Matrix([self.x_sym,self.y_sym,self.z_sym])
self.x.add_component(self,[1,0,0])
self.y.add_component(self,[0,1,0])
self.z.add_component(self,[0,0,1])
r = Rotation(self,self,sympy.Matrix.eye(3),Quaternion(0,0,0,0))
w = RotationalVelocity(self,self,sympy.Number(0)*self.x,Quaternion(0,0,0,0))
self.add_generic(r,'R')
self.add_generic(w,'w')
self.system = system
self.system.add_frame(self)
def add_generic(self,rotation,my_type):
self.connections[my_type][rotation.other(self)] = rotation
def add_precomputed_generic(self,rotation,my_type):
self.precomputed[my_type][rotation.other(self)] = rotation
@property
def principal_axes(self):
return [self.x,self.y,self.z]
def __str__(self):
return self.name
def __repr__(self):
return str(self)
def get_generic(self,other,my_type):
if other in self.connections[my_type]:
return self.connections[my_type][other]
elif other in self.precomputed[my_type]:
return self.precomputed[my_type][other]
else:
path = self.tree['R'].path_to(other.tree['R'])
path = [item.myclass for item in path]
from_frames = path[:-1]
to_frames = path[1:]
if my_type=='R':
items = [from_frame.connections[my_type][to_frame].get_r_to(to_frame) for from_frame,to_frame in zip(from_frames,to_frames)]
q_items = [from_frame.connections[my_type][to_frame].get_rq_to(to_frame) for from_frame,to_frame in zip(from_frames,to_frames)]
elif my_type=='w':
items = [from_frame.connections[my_type][to_frame].get_w_to(to_frame) for from_frame,to_frame in zip(from_frames,to_frames)]
item_final= items.pop(0)
if my_type=='R':
q_item_final= q_items.pop(0)
for item,to_frame in zip(items,to_frames[1:]):
item_final = item*item_final
for q_item,to_frame in zip(q_items,to_frames[1:]):
q_item_final = q_item*q_item_final
result = Rotation(self,to_frame,item_final,q_item_final)
elif my_type=='w':
for item,to_frame in zip(items,to_frames[1:]):
item_final += item
result = RotationalVelocity(self,to_frame,item_final,Quaternion(0,0,0,0))
self.add_precomputed_generic(result,my_type)
to_frame.add_precomputed_generic(result,my_type)
return result
def get_r_to(self,other):
return self.get_generic(other,'R').get_r_to(other)
def get_r_from(self,other):
return self.get_generic(other,'R').get_r_from(other)
def get_rq_to(self,other):
return self.get_generic(other,'R').get_rq_to(other)
def get_rq_from(self,other):
return self.get_generic(other,'R').get_rq_from(other)
def get_w_from(self,other):
return self.get_generic(other,'w').get_w_from(other)
def get_w_to(self,other):
return self.get_generic(other,'w').get_w_to(other)
def set_generic(self,other,item,my_type):
if my_type=='R':
result = Rotation(self, other, item,Quaternion(0,0,0,0))
elif my_type=='w':
result = RotationalVelocity(self, other, item,Quaternion(0,0,0,0))
self.add_generic(result,my_type)
other.add_generic(result,my_type)
def set_parent_generic(self,parent,item,my_type):
self.set_generic(parent,item,my_type)
parent.tree[my_type].add_branch(self.tree[my_type])
def set_child_generic(self,child,item,my_type):
self.set_generic(child,item,my_type)
self.tree[my_type].add_branch(child.tree[my_type])
def set_w(self,other,w):
self.set_child_generic(other,w,'w')
def rotate_fixed_axis(self,fromframe,axis,q,system):
import pynamics.misc_tools
if not all([pynamics.misc_tools.is_literal(item) for item in axis]):
raise(Exception('not all axis variables are constant'))
rotation = Rotation.build_fixed_axis(fromframe,self,axis,q,system)
rotational_velocity = RotationalVelocity.build_fixed_axis(fromframe,self,axis,q,system)
self.set_parent_generic(fromframe,rotation,'R')
self.set_parent_generic(fromframe,rotational_velocity,'w')
self.add_generic(rotation,'R')
self.add_generic(rotational_velocity,'w')
fromframe.add_generic(rotation,'R')
fromframe.add_generic(rotational_velocity,'w')
fromframe.tree['R'].add_branch(self.tree['R'])
fromframe.tree['w'].add_branch(self.tree['w'])
| 36.493827 | 156 | 0.625169 | 826 | 5,912 | 4.238499 | 0.12954 | 0.053128 | 0.008569 | 0.020566 | 0.402742 | 0.32048 | 0.232791 | 0.226792 | 0.17595 | 0.140817 | 0 | 0.008792 | 0.249662 | 5,912 | 161 | 157 | 36.720497 | 0.780433 | 0.019452 | 0 | 0.067227 | 0 | 0 | 0.012785 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.151261 | false | 0 | 0.067227 | 0.07563 | 0.327731 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ac3d5779a8d78c93d249f8739858eed7b56674a | 6,828 | py | Python | couchbase_core/mapper.py | couchbase/couchbase-python-client | 99ec055835f5aef0cd07905497b3ab4bb3cbbc32 | [
"Apache-2.0"
] | 189 | 2015-01-07T18:34:31.000Z | 2022-03-21T17:41:56.000Z | couchbase_core/mapper.py | couchbase/couchbase-python-client | 99ec055835f5aef0cd07905497b3ab4bb3cbbc32 | [
"Apache-2.0"
] | 24 | 2015-05-19T14:00:16.000Z | 2022-03-16T22:01:30.000Z | couchbase_core/mapper.py | couchbase/couchbase-python-client | 99ec055835f5aef0cd07905497b3ab4bb3cbbc32 | [
"Apache-2.0"
] | 60 | 2015-03-10T22:12:50.000Z | 2022-03-07T21:57:40.000Z | from typing import *
import enum
import datetime
import warnings
from couchbase.exceptions import InvalidArgumentException
Src = TypeVar('Src')
Dest = TypeVar('Dest')
Functor = TypeVar('Functor', bound=Callable[[Src], Dest])
SrcToDest = TypeVar('SrcToDest', bound=Callable[[Src], Dest])
DestToSrc = TypeVar('DestToSrc', bound=Callable[[Dest], Src])
class Bijection(Generic[Src, Dest, SrcToDest, DestToSrc]):
def __init__(
self,
src_to_dest, # type: SrcToDest
dest_to_src=None, # type: DestToSrc
parent=None # type: Bijection[Dest,Src]
):
# type: (...) -> None
"""
Bijective mapping for JSON serialisation/deserialisation
:param src_to_dest: callable to convert Src type to Dest
:param dest_to_src: callable to convert Dest type to Src
:param parent: interanl use only - used to construct the inverse
"""
self._src_to_dest = src_to_dest
if parent:
self._inverse = parent
else:
self._inverse = Bijection(dest_to_src, parent=self)
def __neg__(self):
# type: (...) -> Bijection[Dest,Src]
"""
Generate the inverse of this bijection (Dest to Src)
:return: the inverse of this bijection
"""
return self._inverse
def __call__(self,
src # type: Src
):
# type: (...) -> Dest
"""
Return the Src to Dest transform on src
:param src: source to be transformed
:return: transformed data as type Dest
"""
return self._src_to_dest(src)
def identity(input: Src) -> Src:
return input
class Identity(Bijection[Src, Src, identity, identity]):
def __init__(self, type: Type[Src]):
self._type = type
super(Identity, self).__init__(self, self)
def __call__(self, x: Src) -> Src:
if not isinstance(x, self._type):
raise InvalidArgumentException(
"Argument must be of type {} but got {}".format(
self._type, x))
return x
Enum_Type = TypeVar('Enum_Type', bound=enum.Enum)
class EnumToStr(Generic[Enum_Type]):
def __init__(self, type: Type[Enum_Type], enforce=True):
self._type = type
self._enforce = enforce
def __call__(self, src: Enum_Type) -> str:
if not self._enforce and isinstance(
src, str) and src in map(lambda x: x.value, self._type):
warnings.warn("Using deprecated string parameter {}".format(src))
return src
if not isinstance(src, self._type):
raise InvalidArgumentException(
"Argument must be of type {} but got {}".format(
self._type, src))
return src.value
class StrToEnum(Generic[Enum_Type]):
def __init__(self, type: Enum_Type):
self._type = type
def __call__(self, dest: str
) -> Enum_Type:
return self._type(dest)
class StringEnum(
Bijection[Enum_Type, str, EnumToStr[Enum_Type], StrToEnum[Enum_Type]]):
def __init__(self, type: Type[Enum_Type]):
super(StringEnum, self).__init__(EnumToStr(type), StrToEnum(type))
class StringEnumLoose(
Bijection[Enum_Type, str, EnumToStr[Enum_Type], StrToEnum[Enum_Type]]):
def __init__(self, type: Type[Enum_Type]):
"""
Like StringEnum bijection, but allows use of string constants as src (falling back to identity transform)
:param type: type of enum
"""
super(
StringEnumLoose,
self).__init__(
EnumToStr(
type,
False),
StrToEnum(type))
NumberType = TypeVar('NumberType', bound=Union[float, int])
class TimedeltaToSeconds(object):
def __init__(self, dest_type: Type[NumberType]):
self._numtype = dest_type
def __call__(self, td: datetime.timedelta) -> float:
if isinstance(td, (float, int)):
return self._numtype(td)
return self._numtype(td.total_seconds())
def _seconds_to_timedelta(seconds: NumberType) -> datetime.timedelta:
try:
return datetime.timedelta(seconds=seconds)
except (OverflowError, ValueError) as e:
raise InvalidArgumentException(
"Invalid duration arg: {} ".format(seconds)) from e
class Timedelta(Bijection[datetime.timedelta, NumberType,
TimedeltaToSeconds, _seconds_to_timedelta]):
def __init__(self, dest_type: Type[NumberType]):
super(
Timedelta,
self).__init__(
TimedeltaToSeconds(dest_type),
_seconds_to_timedelta)
class Division(Bijection[float, float, float.__mul__, float.__mul__]):
def __init__(self, divisor):
super(Division, self).__init__((1 / divisor).__mul__, divisor.__mul__)
Orig_Mapping = TypeVar(
'OrigMapping', bound=Mapping[str, Mapping[str, Bijection]])
class BijectiveMapping(object):
def __init__(self,
fwd_mapping: Orig_Mapping
):
"""
Bijective mapping for JSON serialisation/deserialisation.
Will calculate the reverse mapping of the given forward mapping.
:param fwd_mapping: the forward mapping from Src to Dest
"""
self.mapping = dict()
self.reverse_mapping = dict()
for src_key, transform_dict in fwd_mapping.items():
self.mapping[src_key] = {}
for dest_key, transform in transform_dict.items():
self.mapping[src_key][dest_key] = transform
self.reverse_mapping[dest_key] = {src_key: -transform}
@staticmethod
def convert(mapping: Orig_Mapping,
raw_info: Mapping[str, Any]) -> Mapping[str, Any]:
converted = {}
for k, v in raw_info.items():
entry = mapping.get(k, {k: Identity(object)})
for dest, transform in entry.items():
try:
converted[dest] = transform(v)
except InvalidArgumentException as e:
raise InvalidArgumentException(
"Problem processing argument {}: {}".format(
k, e.message))
return converted
def sanitize_src(self, src_data):
return src_data
def to_dest(self, src_data):
"""
Convert src data to destination format
:param src_data: source data
:return: the converted data
"""
return self.convert(self.mapping, src_data)
def to_src(self, dest_data):
"""
Convert dest_data to source format
:param dest_data: destination data
:return: the converted data
"""
return self.convert(self.reverse_mapping, dest_data)
| 30.756757 | 113 | 0.605448 | 760 | 6,828 | 5.180263 | 0.185526 | 0.032512 | 0.02794 | 0.01905 | 0.20193 | 0.1651 | 0.139192 | 0.11303 | 0.11303 | 0.080264 | 0 | 0.000208 | 0.295255 | 6,828 | 221 | 114 | 30.895928 | 0.817955 | 0.161248 | 0 | 0.180451 | 0 | 0 | 0.042713 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.165414 | false | 0 | 0.037594 | 0.022556 | 0.383459 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ac41b3cce04df264e1419de46ced2afc4ce1d2c | 2,836 | py | Python | thedoorman/run.py | FocusedSupport/thedoorman | 4f53a921e1bd97d9ff193482e790fa5757f54e7d | [
"MIT"
] | null | null | null | thedoorman/run.py | FocusedSupport/thedoorman | 4f53a921e1bd97d9ff193482e790fa5757f54e7d | [
"MIT"
] | 29 | 2017-03-03T16:21:59.000Z | 2019-03-11T19:20:24.000Z | thedoorman/run.py | FocusedSupport/thedoorman | 4f53a921e1bd97d9ff193482e790fa5757f54e7d | [
"MIT"
] | null | null | null | import threading
import sys
import os
import signal
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "components/slack")))
from slackbot.bot import Bot
from pydispatch import dispatcher
from components.dispatcher.signals import Signals, Senders
import components.devices.doorbell_monitor as dm
import components.devices.camera as cam
import components.devices.lock as lock
import components.devices.gpio_cleanup as gpio
import components.devices.speakers as spkr
import components.devices.speech as speech
import components.slack.slack_sender as ss
import components.slack.slack_uploader as slackUpload
import components.slack.imagebin_uploader as imagebinUpload
import components.slack.imgur_uploader as imgurUpload
import components.slack.user_manager as um
def main():
start_device_processing()
start_slack_processing()
def start_device_processing():
monitor = threading.Thread(target=dm.DoorbellMonitor)
monitor.daemon = True
print("Starting doorbell monitor")
monitor.start()
audio = threading.Thread(target=spkr.Speakers)
audio.daemon = True
print("Starting audio")
audio.start()
tts = threading.Thread(target=speech.Speech)
tts.daemon = True
print("Starting Text to Speech")
tts.start()
camera = threading.Thread(target=cam.Camera)
camera.daemon = True
print("Starting camera")
camera.start()
lock_control = threading.Thread(target=lock.Lock)
lock_control.daemon = True
print("Starting lock control")
lock_control.start()
gpio_cleanup = threading.Thread(target=gpio.GPIOCleanup)
gpio_cleanup.daemon = True
print("Starting GPIO cleanup module")
gpio_cleanup.start()
def start_slack_processing():
sender = threading.Thread(target=ss.SlackSender)
sender.daemon = True
print("Starting Slack Sender")
sender.start()
#slack_uploader = threading.Thread(target=slackUpload.SlackUploader)
#slack_uploader.daemon = True
#print("Starting Slack file uploader")
#slack_uploader.start()
#imagebinUploader = threading.Thread(target=imagebinUpload.ImagebinUploader)
#imagebinUploader.daemon = True
#print("Starting Imagebin Uploader")
#imagebinUploader.start()
imgurUploader = threading.Thread(target=imgurUpload.ImgurUploader)
imgurUploader.daemon = True
print("Starting imgur Uploader")
imgurUploader.start()
bot = Bot()
print("Starting Slack bot")
user_manager = um.UserManager()
user_manager.set_users(bot._client.users)
bot.run()
def cleanup():
print("Caught interrupt...")
dispatcher.send(Signals.CLEANUP, sender=Senders.SLACKBOT)
dispatcher.send(Signals.EXIT, sender=Senders.SLACKBOT)
exit(0)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
cleanup() | 28.646465 | 93 | 0.744358 | 336 | 2,836 | 6.16369 | 0.244048 | 0.084983 | 0.1014 | 0.111057 | 0.02704 | 0 | 0 | 0 | 0 | 0 | 0 | 0.000419 | 0.159379 | 2,836 | 99 | 94 | 28.646465 | 0.868289 | 0.11213 | 0 | 0 | 0 | 0 | 0.091995 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056338 | false | 0 | 0.253521 | 0 | 0.309859 | 0.140845 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ac4b51a79d3af0cebbea2eb96498b7f916e244a | 605 | py | Python | python/utils/random-sample-with-probabilities.py | leakycup/misc | 5cce8cbd7057bf2598c8076ffc257606edb7141e | [
"Apache-2.0"
] | null | null | null | python/utils/random-sample-with-probabilities.py | leakycup/misc | 5cce8cbd7057bf2598c8076ffc257606edb7141e | [
"Apache-2.0"
] | null | null | null | python/utils/random-sample-with-probabilities.py | leakycup/misc | 5cce8cbd7057bf2598c8076ffc257606edb7141e | [
"Apache-2.0"
] | null | null | null | import sys
import codecs
import numpy as np
#UTF8Writer = codecs.getwriter('utf8')
#sys.stdout = UTF8Writer(sys.stdout)
input_file = sys.argv[1]
probabilities_file = sys.argv[2]
sample_size = int(sys.argv[3])
input_list = []
probabilities_list = []
with codecs.open(input_file, 'r', 'utf-8') as f:
for line in f:
input_list.append(line.strip())
with codecs.open(probabilities_file, 'r', 'utf-8') as f:
for line in f:
probabilities_list.append(float(line.strip()))
for line in np.random.choice(input_list, p=probabilities_list, size=sample_size, replace=False):
print (line)
| 24.2 | 96 | 0.710744 | 94 | 605 | 4.446809 | 0.414894 | 0.050239 | 0.064593 | 0.043062 | 0.105263 | 0.105263 | 0.105263 | 0.105263 | 0.105263 | 0.105263 | 0 | 0.015564 | 0.150413 | 605 | 24 | 97 | 25.208333 | 0.797665 | 0.119008 | 0 | 0.125 | 0 | 0 | 0.022599 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1875 | 0 | 0.1875 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ac695ccecbc9a0acac17b74afec8f55e9ba28d1 | 1,286 | py | Python | ivy/functional/backends/mxnet/old/linear_algebra.py | Neel-Renavikar/ivy | 644ab189a3a3fc52b1f3f86563226106e549eea3 | [
"Apache-2.0"
] | null | null | null | ivy/functional/backends/mxnet/old/linear_algebra.py | Neel-Renavikar/ivy | 644ab189a3a3fc52b1f3f86563226106e549eea3 | [
"Apache-2.0"
] | null | null | null | ivy/functional/backends/mxnet/old/linear_algebra.py | Neel-Renavikar/ivy | 644ab189a3a3fc52b1f3f86563226106e549eea3 | [
"Apache-2.0"
] | null | null | null | """
Collection of MXNet linear algebra functions, wrapped to fit Ivy syntax and signature.
"""
# global
import mxnet as _mx
import numpy as _np
# local
import ivy as _ivy
from typing import Union, Tuple
def matrix_norm(x, p=2, axes=None, keepdims=False):
axes = (-2, -1) if axes is None else axes
if isinstance(axes, int):
raise Exception('if specified, axes must be a length-2 sequence of ints,'
'but found {} of type {}'.format(axes, type(axes)))
return _mx.nd.norm(x, p, axes, keepdims=keepdims)
cholesky = lambda x: _mx.np.linalg.cholesky(x.as_np_ndarray()).as_nd_ndarray()
def vector_to_skew_symmetric_matrix(vector):
batch_shape = list(vector.shape[:-1])
# BS x 3 x 1
vector_expanded = _mx.nd.expand_dims(vector, -1)
# BS x 1 x 1
a1s = vector_expanded[..., 0:1, :]
a2s = vector_expanded[..., 1:2, :]
a3s = vector_expanded[..., 2:3, :]
# BS x 1 x 1
zs = _mx.nd.zeros(batch_shape + [1, 1])
# BS x 1 x 3
row1 = _mx.nd.concat(*(zs, -a3s, a2s), dim=-1)
row2 = _mx.nd.concat(*(a3s, zs, -a1s), dim=-1)
row3 = _mx.nd.concat(*(-a2s, a1s, zs), dim=-1)
# BS x 3 x 3
return _mx.nd.concat(*(row1, row2, row3), dim=-2)
def qr(x, mode):
return _mx.np.linalg.qr(x, mode=mode)
| 27.361702 | 86 | 0.620529 | 216 | 1,286 | 3.564815 | 0.384259 | 0.036364 | 0.020779 | 0.019481 | 0.04026 | 0 | 0 | 0 | 0 | 0 | 0 | 0.043216 | 0.226283 | 1,286 | 46 | 87 | 27.956522 | 0.730653 | 0.120529 | 0 | 0 | 0 | 0 | 0.06983 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.166667 | 0.041667 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ac936989b55dd5518ef35edf3a26894eace0277 | 157 | py | Python | NRPG-DataManager.py | oliverfaustino/NRPG-DataManager | 71064cb79be304f712aabcceebd6647121d2cb6c | [
"MIT"
] | null | null | null | NRPG-DataManager.py | oliverfaustino/NRPG-DataManager | 71064cb79be304f712aabcceebd6647121d2cb6c | [
"MIT"
] | null | null | null | NRPG-DataManager.py | oliverfaustino/NRPG-DataManager | 71064cb79be304f712aabcceebd6647121d2cb6c | [
"MIT"
] | null | null | null | from modulos.query import *
from modulos.splash_screen import *
if __name__ == '__main__':
splash_screen(segundos = 2)
while True:
query()
| 17.444444 | 35 | 0.675159 | 19 | 157 | 5.052632 | 0.684211 | 0.229167 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008264 | 0.229299 | 157 | 8 | 36 | 19.625 | 0.785124 | 0 | 0 | 0 | 0 | 0 | 0.050955 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.333333 | 0 | 0.333333 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 5 |
5ac93a900f8dd76c156f7ea7f46e47f6ba5ffc11 | 759 | py | Python | 01-introduction to python for data science/04-numpy/baseball-players-bmi.py | thelc127/Data-Scientist-Career-Track-Datacamp | 56d0ec0ece7fa9127e72b0da598c89f15f31b6b3 | [
"MIT"
] | 2 | 2021-05-21T04:59:19.000Z | 2021-05-21T08:32:41.000Z | 01-introduction to python for data science/04-numpy/baseball-players-bmi.py | thelc127/Data-Scientist-Career-Track-Datacamp | 56d0ec0ece7fa9127e72b0da598c89f15f31b6b3 | [
"MIT"
] | null | null | null | 01-introduction to python for data science/04-numpy/baseball-players-bmi.py | thelc127/Data-Scientist-Career-Track-Datacamp | 56d0ec0ece7fa9127e72b0da598c89f15f31b6b3 | [
"MIT"
] | null | null | null | # Create a numpy array from the weight_lb list with the correct units. Multiply by 0.453592 to go from pounds to kilograms.
# Store the resulting numpy array as np_weight_kg.
# Use np_height_m and np_weight_kg to calculate the BMI of each player.
# Use the following equation:
# BMI = weight(kg) / height (m3)
# save the resulting numpy array as bmi
# Print out bmi.
# height and weight are available as regular lists
# Import numpy
import numpy as np
# Create array from height_in with metric units: np_height_m
np_height_m = np.array(height_in) * 0.0254
# Create array from weight_lb with metric units: np_weight_kg
np_weight_kg = np.array(weight_lb) * 0.453592
# Calculate the BMI: bmi
bmi = np_weight_kg / np_height_m ** 2
# Print out bmi
print(bmi)
| 31.625 | 123 | 0.764163 | 137 | 759 | 4.065693 | 0.357664 | 0.086176 | 0.089767 | 0.064632 | 0.086176 | 0 | 0 | 0 | 0 | 0 | 0 | 0.033493 | 0.173913 | 759 | 23 | 124 | 33 | 0.854864 | 0.750988 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5acaacc2a5d0c1b378c239243152d8b863342a4f | 2,072 | py | Python | python/waf-alb-ec2-glue-athena-s3/app.py | gsy0911/aws-cdk-small-examples | e4b4be076d22f5d35f640e59a6ca346988baa1af | [
"Apache-2.0"
] | 2 | 2021-01-19T18:15:22.000Z | 2021-02-09T22:18:09.000Z | python/waf-alb-ec2-glue-athena-s3/app.py | gsy0911/aws-cdk-small-examples | e4b4be076d22f5d35f640e59a6ca346988baa1af | [
"Apache-2.0"
] | 14 | 2020-11-20T01:54:27.000Z | 2021-01-12T08:15:39.000Z | python/waf-alb-ec2-glue-athena-s3/app.py | gsy0911/aws-cdk-small-examples | e4b4be076d22f5d35f640e59a6ca346988baa1af | [
"Apache-2.0"
] | null | null | null | from aws_cdk import (
aws_autoscaling as autoscaling,
aws_ec2 as ec2,
aws_elasticloadbalancingv2 as elbv2,
aws_wafv2 as wafv2,
core,
)
class LoadBalancerStack(core.Stack):
def __init__(self, app: core.App, id: str) -> None:
super().__init__(app, id)
vpc = ec2.Vpc(self, "VPC")
data = open("./httpd.sh", "rb").read()
httpd = ec2.UserData.for_linux()
httpd.add_commands(str(data, 'utf-8'))
asg = autoscaling.AutoScalingGroup(
self,
"ASG",
vpc=vpc,
instance_type=ec2.InstanceType.of(
ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.MICRO
),
machine_image=ec2.AmazonLinuxImage(generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2),
user_data=httpd,
)
lb = elbv2.ApplicationLoadBalancer(
self, "LB",
vpc=vpc,
internet_facing=True)
listener = lb.add_listener("Listener", port=80)
listener.add_targets("Target", port=80, targets=[asg])
listener.connections.allow_default_port_from_any_ipv4("Open to the world")
asg.scale_on_request_count("AModestLoad", target_requests_per_second=1)
core.CfnOutput(self, "LoadBalancer", export_name="LoadBalancer", value=lb.load_balancer_dns_name)
# === #
# WAF #
# === #
# TODO #10 apply the web_acl to a resource
# no method to apply the web_acl to a resource in version 1.75.0
web_acl = wafv2.CfnWebACL(
scope_=self,
id="waf",
default_action=wafv2.CfnWebACL.DefaultActionProperty(),
scope="REGIONAL",
visibility_config=wafv2.CfnWebACL.VisibilityConfigProperty(
cloud_watch_metrics_enabled=True,
metric_name="waf-web-acl",
sampled_requests_enabled=True
)
)
def main():
app = core.App()
LoadBalancerStack(app, "LoadBalancerStack")
app.synth()
if __name__ == "__main__":
main()
| 29.6 | 105 | 0.599903 | 226 | 2,072 | 5.243363 | 0.530973 | 0.020253 | 0.016878 | 0.023629 | 0.042194 | 0.042194 | 0.042194 | 0 | 0 | 0 | 0 | 0.021843 | 0.292954 | 2,072 | 69 | 106 | 30.028986 | 0.787031 | 0.056467 | 0 | 0.04 | 0 | 0 | 0.070951 | 0 | 0 | 0 | 0 | 0.014493 | 0 | 1 | 0.04 | false | 0 | 0.02 | 0 | 0.08 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5acb201e61977d9de73500bb4e5a0c3ac26a488a | 575 | py | Python | src/twitter_utils/browser_session.py | namuan/twitter-tools | b8fad3c668f7bd65c74a0449b450bde957326ce4 | [
"MIT"
] | 1 | 2020-07-10T23:35:23.000Z | 2020-07-10T23:35:23.000Z | src/twitter_utils/browser_session.py | namuan/twitter-tools | b8fad3c668f7bd65c74a0449b450bde957326ce4 | [
"MIT"
] | 1 | 2020-10-02T01:51:51.000Z | 2020-10-02T01:51:51.000Z | src/twitter_utils/browser_session.py | namuan/twitter-tools | b8fad3c668f7bd65c74a0449b450bde957326ce4 | [
"MIT"
] | 2 | 2020-05-24T07:03:43.000Z | 2020-06-24T01:36:43.000Z | from typing import Any
from selenium import webdriver # type: ignore
class BrowserSession: # pragma: no cover
def __init__(self, given_browser: Any) -> None:
self.browser = given_browser
self.session = webdriver.Firefox("fireprofile")
def start(self) -> None:
if self.browser == "safari":
self.session = webdriver.Safari()
elif self.browser == "chrome":
self.session = webdriver.Chrome()
def stop(self) -> None:
self.session.close()
def current(self) -> Any:
return self.session
| 26.136364 | 55 | 0.622609 | 65 | 575 | 5.415385 | 0.476923 | 0.15625 | 0.170455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.267826 | 575 | 21 | 56 | 27.380952 | 0.836105 | 0.050435 | 0 | 0 | 0 | 0 | 0.042357 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.266667 | false | 0 | 0.133333 | 0.066667 | 0.533333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5acb9e1e62ddcd1f2667eff52694832dd21f1914 | 411 | py | Python | editor/templatetags/stamp.py | andersshenholm/editor | 052844de68101c5cdc6d9343e3e095ba816cd34c | [
"Apache-2.0"
] | 51 | 2015-04-19T23:27:04.000Z | 2022-03-25T01:43:43.000Z | editor/templatetags/stamp.py | andersshenholm/editor | 052844de68101c5cdc6d9343e3e095ba816cd34c | [
"Apache-2.0"
] | 428 | 2015-01-05T10:56:32.000Z | 2022-03-29T14:33:23.000Z | editor/templatetags/stamp.py | andersshenholm/editor | 052844de68101c5cdc6d9343e3e095ba816cd34c | [
"Apache-2.0"
] | 71 | 2015-01-28T20:06:15.000Z | 2022-03-25T02:35:40.000Z | from django.template import Library
from editor.models import STAMP_STATUS_CHOICES
register = Library()
@register.inclusion_tag('stamp.html')
def stamp(status):
label = ''
if status=='draft':
return {'status': 'draft', 'label': 'Draft'}
for s_status, s_label in STAMP_STATUS_CHOICES:
if status == s_status:
label = s_label
return {'status': status, 'label': label}
| 27.4 | 52 | 0.664234 | 52 | 411 | 5.076923 | 0.423077 | 0.125 | 0.136364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.211679 | 411 | 14 | 53 | 29.357143 | 0.814815 | 0 | 0 | 0 | 0 | 0 | 0.114355 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.166667 | 0 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5acc8629a6ec5a5ef8fb0a2628a406996eb759d6 | 864 | py | Python | ubxlib/ubx_cfg_nmea.py | monocilindro/ubxlib | 378e86b7766f670b9a8966ee038275a2155bac54 | [
"MIT"
] | 3 | 2020-05-03T17:12:21.000Z | 2021-01-16T13:45:07.000Z | ubxlib/ubx_cfg_nmea.py | monocilindro/ubxlib | 378e86b7766f670b9a8966ee038275a2155bac54 | [
"MIT"
] | 35 | 2020-08-29T09:35:15.000Z | 2022-03-18T19:42:34.000Z | ubxlib/ubx_cfg_nmea.py | monocilindro/ubxlib | 378e86b7766f670b9a8966ee038275a2155bac54 | [
"MIT"
] | 4 | 2020-04-24T03:29:07.000Z | 2021-01-13T15:52:53.000Z | from .cid import UbxCID
from .frame import UbxFrame
from .types import CH, U1, X1, X4, Padding
class UbxCfgNmea_(UbxFrame):
CID = UbxCID(UbxCID.CLASS_CFG, 0x17)
NAME = 'UBX-CFG-NMEA'
class UbxCfgNmeaPoll(UbxCfgNmea_):
NAME = UbxCfgNmea_.NAME + '-POLL'
def __init__(self):
super().__init__()
def _cls_response(self):
return UbxCfgNmea
class UbxCfgNmea(UbxCfgNmea_):
def __init__(self):
super().__init__()
self.f.add(X1('filter'))
self.f.add(U1('nmeaVersion'))
self.f.add(U1('numSV'))
self.f.add(X1('flags'))
self.f.add(X4('gnssToFilter'))
self.f.add(U1('svNumbering'))
self.f.add(U1('mainTalkerId'))
self.f.add(U1('gsvTalkerId'))
self.f.add(U1('version'))
self.f.add(CH(2, 'bdsTalkerId'))
self.f.add(Padding(6, 'res1'))
| 24 | 42 | 0.603009 | 112 | 864 | 4.446429 | 0.383929 | 0.110442 | 0.176707 | 0.120482 | 0.080321 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02719 | 0.233796 | 864 | 35 | 43 | 24.685714 | 0.725076 | 0 | 0 | 0.153846 | 0 | 0 | 0.12963 | 0 | 0 | 0 | 0.00463 | 0 | 0 | 1 | 0.115385 | false | 0 | 0.115385 | 0.038462 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ace70d75fc497eeb5ae372bf715cbea09eaaf13 | 390 | py | Python | Lesson_2/up.py | idel28102001/lessons | f88f5034d8c275175dacf66ba5d0342622c1aa50 | [
"Apache-2.0"
] | null | null | null | Lesson_2/up.py | idel28102001/lessons | f88f5034d8c275175dacf66ba5d0342622c1aa50 | [
"Apache-2.0"
] | null | null | null | Lesson_2/up.py | idel28102001/lessons | f88f5034d8c275175dacf66ba5d0342622c1aa50 | [
"Apache-2.0"
] | null | null | null | print('Загадайте число')
num = 'да'
l = 4
while num == 'да':
l -= 1
num = input(f'Количество цифр вашего числа меньше {l}? : ') ## да или нет
num_2 = 'да'
dig = ''
while l > 0:
number = 10
while num_2 == 'да':
number -= 1
num_2 = input(f'Ваша {l}-e цифра меньше {number}? :') # да или нет
l -= 1
dig = str(number) + dig
num_2 = 'да'
print(dig)
| 21.666667 | 78 | 0.525641 | 63 | 390 | 3.190476 | 0.428571 | 0.079602 | 0.089552 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.04059 | 0.305128 | 390 | 17 | 79 | 22.941176 | 0.701107 | 0.053846 | 0 | 0.235294 | 0 | 0 | 0.282192 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.117647 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5acf6a6ed2a18dd015b90e514e7a54da35d4ea24 | 34,367 | py | Python | agoora-profiler-service/quality_inspection/tests/quality_inspector_test.py | spoud/agoora-agents | 918602d428b6cd9918d1ade682fa54e85a9a2df3 | [
"MIT"
] | 8 | 2021-03-25T13:49:31.000Z | 2021-08-19T04:09:14.000Z | agoora-profiler-service/quality_inspection/tests/quality_inspector_test.py | spoud/agoora-agents | 918602d428b6cd9918d1ade682fa54e85a9a2df3 | [
"MIT"
] | 18 | 2021-03-31T11:38:45.000Z | 2022-02-16T05:00:09.000Z | agoora-profiler-service/quality_inspection/tests/quality_inspector_test.py | spoud/agoora-agents | 918602d428b6cd9918d1ade682fa54e85a9a2df3 | [
"MIT"
] | null | null | null | import unittest
from quality_inspection.quality_inspector import QualityInspector
from quality_inspection.schema_definition import SchemaDefinition
from quality_inspection.tests.data_loader import DataLoader
class QualityInspectorTest(unittest.TestCase):
def setUp(self) -> None:
self.inspector = QualityInspector()
def test_inspect_inferred(self) -> None:
# arrange
samples = DataLoader.load_samples()
# act
schema_definition = SchemaDefinition.create(DataLoader.load_schema())
result = self.inspector.inspect(samples, schema_definition)
# assert
self.assertEqual(1.0, result.attribute_integrity)
self.assertEqual(.0, result.attribute_specification)
self.assertEqual(.5, result.attribute_quality_index)
def test_inspect_avro(self) -> None:
# arrange
samples = DataLoader.load_samples()
# act
schema_definition = SchemaDefinition.create(DataLoader.load_schema(), False)
result = self.inspector.inspect(samples, schema_definition)
# assert
self.assertEqual(1.0, result.attribute_integrity)
self.assertEqual(.625, result.attribute_specification)
self.assertEqual(.8125, result.attribute_quality_index)
def test_inspect_json(self) -> None:
# arrange
samples = DataLoader.load_samples()
# act
schema_definition = SchemaDefinition.create(DataLoader.load_schema_json(), False)
result = self.inspector.inspect(samples, schema_definition)
# assert
self.assertEqual(1.0, result.attribute_integrity)
self.assertEqual(.625, result.attribute_specification)
self.assertEqual(.8125, result.attribute_quality_index)
def test_inspect_with_specified_field(self):
# arrange
samples = [
{"random_int": 1},
]
schema_definition = DataLoader.expand_schema(
[("random_int", "integer")],
["random_int"]
)
# act
result = self.inspector.inspect_attributes(samples, schema_definition)
# assert
self.assertEqual(.5, result.attribute_specification)
self.assertEqual(1, result.attribute_integrity)
self.assertEqual(.75, result.attribute_quality_index)
def test_inspect_with_unspecified_field(self):
# arrange
samples = [
{"random_int": 1},
]
schema_definition = DataLoader.expand_schema(
[],
[]
)
# act
result = self.inspector.inspect_attributes(samples, schema_definition)
# assert
self.assertEqual(0, result.attribute_specification)
self.assertEqual(1, result.attribute_integrity)
self.assertEqual(.5, result.attribute_quality_index)
def test_inspect_with_missing_field(self):
# arrange
samples = [
{"random_other": "other"},
]
schema_definition = DataLoader.expand_schema(
[("random_int", "integer")],
["random_int"]
)
# act
result = self.inspector.inspect_attributes(samples, schema_definition)
# assert
expected_specification = (0 + 1) / 2
expected_integrity = (1 + 0) / 2
self.assertEqual(expected_specification, result.attribute_specification,
"Attribute specification is not correct")
self.assertEqual(expected_integrity, result.attribute_integrity,
"Attribute integrity is not correct")
self.assertEqual((expected_specification + expected_integrity) / 2, result.attribute_quality_index,
"Attribute quality is not correct")
def test_specification_with_only_type_specification(self) -> None:
# arrange
samples = [
{"random_int": 1, "random_string": "foo"},
{"random_int": 2, "random_string": "bar"}
]
schema_definition = DataLoader.expand_schema(
[("random_int", "integer"), ("random_string", "string")],
["random_string", "random_int"],
)
# act
result = self.inspector.inspect_attributes(samples, schema_definition)
# assert
self.assertEqual(.5, result.attribute_specification)
def test_specification_with_complete_specification(self) -> None:
# arrange
samples = [
{"random_int": 1, "random_string": "foo"},
{"random_int": 2, "random_string": "bar"}
]
schema_definition = DataLoader.expand_schema(
[("random_int", "integer"), ("random_string", "string")],
["random_string", "random_int"],
)
# act
result = self.inspector.inspect_attributes(samples, schema_definition)
# assert
self.assertEqual(.5, result.attribute_specification)
def test_specification_with_inferred_schema(self) -> None:
# arrange
samples = [
{"random_int": 1, "random_string": "foo"},
{"random_int": 2, "random_string": "bar"}
]
schema = '''
{
"$schema": "http://json-schema.org/schema#",
"type": "object",
"properties": {
"random_string": {
"type": "string"
},
"random_integer": {
"type": "integer"
}
},
"required": [
"random_integer",
"random_string"
]
}
'''
schema_definition = SchemaDefinition.create(schema, True)
# act
result = self.inspector.inspect(samples, schema_definition)
# assert
self.assertEqual(.0, result.attribute_specification,
"Attribute specification is considered 0% when schema is inferred")
def test_specification_with_empty_schema(self) -> None:
# arrange
samples = [
{"random_int": 1, "random_string": "foo"},
{"random_int": 2, "random_string": "bar"}
]
schema_definition = DataLoader.expand_schema(
[],
[]
)
# act
result = self.inspector.inspect_attributes(samples, schema_definition)
# assert
self.assertEqual(0, result.attribute_specification)
def test_specification_with_partial_specification(self) -> None:
# arrange
samples = [
{"random_int": 1, "random_string": "foo"},
{"random_int": 2, "random_string": "bar"}
]
schema_definition = DataLoader.expand_schema(
[("random_string", "string")],
[]
)
# act
result = self.inspector.inspect_attributes(samples, schema_definition)
# assert (half of the data is specified to .5)
self.assertEqual(.25, result.attribute_specification,
"Specification must be 25% because only half of the data is specified in schema")
def test_specification_with_irrelevant_specification(self) -> None:
# arrange
samples = [
{"random_int": 1, "random_string": "foo"},
{"random_int": 2, "random_string": "bar"}
]
schema_definition = DataLoader.expand_schema(
[("random_other", "string")],
[]
)
# act
result = self.inspector.inspect_attributes(samples, schema_definition)
# assert
self.assertEqual(0, result.attribute_specification,
"Specification must be 0% because none of the attributes are specified")
def test_quality_with_complete_specification(self) -> None:
# arrange
samples = [
{"random_int": 1, "random_string": "foo"}, # random_string does not match
{"random_int": 2, "random_string": "bar"}
]
schema_definition = DataLoader.expand_schema(
[("random_string", "string"), ("random_int", "number")],
[],
{"random_string": {"pattern": "bar"}, "random_int": {"minimum": 0, "maximum": 100}}
)
# act
result = self.inspector.inspect_attributes(samples, schema_definition)
# assert
self.assertEqual(.75, result.attribute_integrity)
self.assertEqual(1.0, result.attribute_specification)
self.assertEqual(.875, result.attribute_quality_index)
def test_quality_with_partial_specification(self) -> None:
# arrange
samples = [
{"random_int": 1, "random_string": "foo"},
{"random_int": 2, "random_string": "bar"}
]
schema_definition = DataLoader.expand_schema(
[("random_string", "string"), ("random_int", "int")],
[]
)
# act
result = self.inspector.inspect_attributes(samples, schema_definition)
# assert
self.assertEqual(1.0, result.attribute_integrity)
self.assertEqual(.5, result.attribute_specification)
self.assertEqual(.75, result.attribute_quality_index)
def test_quality_without_specification(self):
# arrange
samples = [
{"random_int": 1, "random_string": "foo"},
{"random_int": 2, "random_string": "bar"}
]
schema_definition = DataLoader.expand_schema(
[],
[]
)
# act
result = self.inspector.inspect_attributes(samples, schema_definition)
# assert
self.assertEqual(.5, result.attribute_quality_index)
def test_specification_with_partial_schema_and_inferred(self) -> None:
# arrange
samples = [
{"random_int": 1, "random_string": "foo"},
{"random_int": 2, "random_string": "bar"}
]
schema = '''
{
"$schema": "http://json-schema.org/schema#",
"type": "object",
"properties": {
"random_string": {
"type": "string"
},
"random_integer": {
"type": "integer"
}
},
"required": [
"random_integer",
"random_string"
]
}
'''
schema_definition = SchemaDefinition.create(schema, True)
# act
result = self.inspector.inspect(samples, schema_definition)
# assert
self.assertEqual(.0, result.attribute_specification,
"Attribute specification is considered 0% when the schema is inferred")
def test_integrity_with_missing_required(self) -> None:
# arrange
samples = [
{"random_int": 1},
{"random_int": None},
{"random_int": 2}
]
schema_definition = DataLoader.expand_schema(
[("random_int", "integer")],
["random_int"]
)
# act
result = self.inspector.inspect_attributes(samples, schema_definition)
# assert
self.assertAlmostEqual(2 / 3, result.attribute_integrity, 3,
"Attribute integrity must be 66%")
def test_integrity_for_complex_type(self):
# arrange
schema = DataLoader.load_schema_with_name("schema_registry_avro.json")
samples = [
{"timestamp": 1595601702, "iss_position": {"longitude": "-42.2948", "latitude": "-40.3670"},
"message": "success"},
{"timestamp": 1595601702, "iss_position": {"latitude": "-40.3670"},
"message": "success"},
{"timestamp": "wrong", "iss_position": {"longitude": 666, "latitude": "-40.0283"},
"message": "success"},
]
# act
result = self.inspector.inspect_attributes(samples,
SchemaDefinition.create(schema, False))
# assert - only message is not mandatory so 3 out of 12 (3*4) are missing or wrong
invalid_elements = 3
all_elements = 12
expected_integrity = (all_elements - invalid_elements) / all_elements
self.assertAlmostEqual(expected_integrity,
result.attribute_integrity, 3,
f"Integrity must be {expected_integrity * 100}%")
def test_integrity_with_missing_not_required(self) -> None:
# arrange
samples = [
{"random_int": 1},
{"random_int": None},
{"random_int": 2}
]
schema_definition = DataLoader.expand_schema(
[("random_int", "integer")],
[]
)
# act
result = self.inspector.inspect_attributes(samples, schema_definition)
# assert
self.assertEqual(1.0, result.attribute_integrity)
def test_integrity_without_specified_optional_field(self) -> None:
# arrange
samples = [
{"random_int": 1},
{"random_int": 2},
{"random_int": 3}
]
schema_definition = DataLoader.expand_schema(
[("random_int", "integer"), ("random_string", "string")],
["random_int"]
)
# act
result = self.inspector.inspect_attributes(samples, schema_definition)
# assert
self.assertEqual(1.0, result.attribute_integrity)
def test_integrity_without_specified_required_field(self) -> None:
# arrange
samples = [
{"random_int": 1},
{"random_int": 2},
{"random_int": 3}
]
schema_definition = DataLoader.expand_schema(
[("random_int", "integer"), ("random_string", "string")],
["random_string"]
)
# act
result = self.inspector.inspect_attributes(samples, schema_definition)
# assert
self.assertEqual(.5, result.attribute_integrity)
def test_integrity_with_additional_field(self) -> None:
# arrange
samples = [
{"random_int": 1, "random_string": "abc"},
{"random_int": 2, "random_string": "efg"},
{"random_int": 3, "random_string": "hij"}
]
schema_definition = DataLoader.expand_schema(
[("random_int", "integer")],
[]
)
# act
result = self.inspector.inspect_attributes(samples, schema_definition)
# assert
self.assertEqual(1.0, result.attribute_integrity)
def test_integrity_with_numeric_as_string(self) -> None:
# arrange
samples = [
{"random_int": "10000001.023"},
{"random_int": "1"}
]
schema_definition = DataLoader.expand_schema(
[("random_int", "number")],
[]
)
# act
result = self.inspector.inspect_attributes(samples, schema_definition)
# assert
self.assertEqual(.0, result.attribute_integrity)
def test_integrity_with_float_as_int(self) -> None:
# arrange
samples = [{"random_int": "10000001.023"}]
schema_definition = DataLoader.expand_schema(
[("random_int", "integer")],
[]
)
# act
result = self.inspector.inspect_attributes(samples, schema_definition)
# assert
self.assertEqual(0.0, result.attribute_integrity)
def test_integrity_on_attribute_level_with_not_specified_partial_field(self) -> None:
# arrange
samples = [
{"random_int": 1002, "random_string": 1},
{"random_int": 1003, "random_string": 2},
{"random_int": 1004},
]
schema_definition = DataLoader.expand_schema(
[("random_int", "integer")],
[]
)
# act
result = self.inspector.inspect_attributes(samples, schema_definition)
# assert
attribute_details = result.attribute_details
self.assertTrue('random_string' in attribute_details.keys(),
"Missing integrity for attribute random_string")
self.assertAlmostEqual(1, attribute_details['random_string'].attribute_integrity, 3,
"Integrity of random_string is not correct")
def test_integrity_on_attribute_level_with_missing_value(self) -> None:
# arrange
samples = [
{"random_int": 1002, "random_string": 1},
{"random_int": 1003, "random_string": 2},
{"random_int": "foo", "random_string": 3},
{"random_int": 1005, "random_string": "fourth"},
]
schema_definition = DataLoader.expand_schema(
[("random_int", "integer"), ("random_string", "string")],
[]
)
# act
result = self.inspector.inspect_attributes(samples, schema_definition)
# assert
attribute_details = result.attribute_details
self.assertTrue('random_int' in attribute_details.keys(),
"Missing integrity for attribute random_int")
self.assertTrue('random_string' in attribute_details.keys(),
"Missing integrity for attribute random_string")
self.assertAlmostEqual((3 / 4), attribute_details['random_int'].attribute_integrity, 3,
"Integrity of random_int is not correct")
self.assertAlmostEqual((1 / 4), attribute_details['random_string'].attribute_integrity, 3,
"Integrity of random_string is not correct")
def test_integrity_on_attribute_level_with_not_specified_fields(self) -> None:
# arrange
samples = [
{"random_int": 1002, "random_string": 1},
]
schema_definition = DataLoader.expand_schema(
[("random_int", "integer")],
[]
)
# act
result = self.inspector.inspect_attributes(samples, schema_definition)
# assert
attribute_details = result.attribute_details
self.assertTrue('random_string' in attribute_details.keys(),
"Even a not specified fields needs to be present in the details.")
self.assertEqual(1.0, attribute_details['random_string'].attribute_integrity)
def test_specification_on_attribute_level_with_complete_expectations(self) -> None:
# arrange
samples = [
{"random_int": 1002, "random_string": "1"},
{"random_int": 1003, "random_string": "2"},
]
schema_definition = DataLoader.expand_schema(
[("random_int", "integer"), ("random_string", "string")],
[],
{"random_int": {"minimum": 0, "maximum": 1004}, "random_string": {"pattern": ""}}
)
# act
result = self.inspector.inspect_attributes(samples, schema_definition)
# assert
attribute_details = result.attribute_details
self.assertTrue('random_int' in attribute_details.keys())
self.assertTrue('random_string' in attribute_details.keys())
self.assertEqual(1.0, attribute_details['random_int'].attribute_specification)
self.assertEqual(1.0, attribute_details['random_string'].attribute_specification)
def test_specification_on_attribute_level_with_partial_expectations(self) -> None:
# arrange
samples = [
{"random_int": 1002, "random_string": 1},
{"random_int": 1003, "random_string": 2},
]
schema_definition = DataLoader.expand_schema(
[("random_int", "integer"), ("random_string", "string")],
[],
{"random_int": {"minimum": 0}}
)
# act
result = self.inspector.inspect_attributes(samples, schema_definition)
# assert
attribute_details = result.attribute_details
self.assertTrue('random_int' in attribute_details.keys())
self.assertEqual(.75, attribute_details['random_int'].attribute_specification)
self.assertEqual(.5, attribute_details['random_string'].attribute_specification)
def test_specification_on_attribute_level_without_expectations(self) -> None:
# arrange
samples = [
{"random_int": 1002, "random_string": 1},
{"random_int": 1003, "random_string": 2},
]
schema_definition = DataLoader.expand_schema(
[("random_int", "integer"), ("random_string", "string")],
)
# act
result = self.inspector.inspect_attributes(samples, schema_definition)
# assert
attribute_details = result.attribute_details
self.assertTrue('random_int' in attribute_details.keys())
self.assertEqual(.5, attribute_details['random_int'].attribute_specification)
self.assertEqual(.5, attribute_details['random_string'].attribute_specification)
def test_specification_on_attribute_level_with_missing_specification(self) -> None:
# arrange
samples = [
{"random_int": 1002, "random_string": 1},
{"random_int": 1003, "random_string": 2},
]
schema_definition = DataLoader.expand_schema(
[("random_int", "integer")],
[]
)
# act
result = self.inspector.inspect_attributes(samples, schema_definition)
# assert
attribute_details = result.attribute_details
self.assertTrue('random_string' in attribute_details.keys())
self.assertEqual(0.0, attribute_details['random_string'].attribute_specification)
def test_quality_on_attribute_level(self) -> None:
# arrange
samples = [
{"random_int": 2, "random_string": "one"},
{"random_int": 55, "random_string": "two"},
{"random_int": 101, "random_string": "three"},
]
schema_definition = DataLoader.expand_schema(
[("random_int", "integer")],
[],
{"random_int": {"minimum": 50, "maximum": 100}}
)
# act
result = self.inspector.inspect(samples, schema_definition)
# assert
attribute_details = result.attribute_details
self.assertTrue('random_int' in attribute_details.keys())
self.assertTrue('random_string' in attribute_details.keys())
self.assertAlmostEquals(((1 / 3) + 1) / 2, attribute_details['random_int'].attribute_quality_index, 3)
self.assertAlmostEquals((1 + 0) / 2, attribute_details['random_string'].attribute_quality_index, 3)
def test_inspect_with_non_unique_types_does_not_throw_exception(self) -> None:
# arrange
samples = [
{"random_int": 1002},
{"random_int": "1003"},
{"random_int": "1004"},
]
schema_definition = DataLoader.expand_schema(
[("random_int", "integer")],
[],
{"random_int": {"minimum": 0, "maximum": 100}}
)
# act
result = self.inspector.inspect(samples, schema_definition)
# assert
attribute_details = result.attribute_details
self.assertAlmostEquals((1 / 3),
attribute_details['random_int'].attribute_integrity, 3)
def test_integrity_on_attribute_level_with_expectations(self):
# arrange
schema = '''
{
"type": "record",
"name": "RandomData",
"namespace": "data.producer.random",
"fields": [
{
"name": "random_integer",
"type": "int",
"expectations": [
{
"kwargs": {
"min_value": 0,
"max_value": 10
},
"expectation_type": "expect_column_values_to_be_between"
}
]
},
{
"name": "random_string",
"type": "string",
"expectations": [
{
"kwargs": {
"regex": "id_"
},
"meta": {},
"expectation_type": "expect_column_values_to_match_regex"
}
]
}
]
}
'''
samples = [
{'random_integer': 1, 'random_string': 'missing_id'},
{'random_integer': 11, 'random_string': 'id_1'},
{'random_integer': 3, 'random_string': 'missing_id'},
]
# act
result = self.inspector.inspect(samples, SchemaDefinition.create(schema, False))
# assert
attribute_details = result.attribute_details
self.assertAlmostEqual((3 / 6), result.attribute_integrity, 3,
"Attribute integrity is not correct")
self.assertTrue('random_integer' in attribute_details.keys(),
"Missing integrity for attribute random_integer")
self.assertTrue('random_string' in attribute_details.keys(),
"Missing integrity for attribute random_string")
self.assertAlmostEqual((2 / 3), attribute_details['random_integer'].attribute_integrity, 3,
"Integrity of random_int is not correct")
self.assertAlmostEqual((1 / 3), attribute_details['random_string'].attribute_integrity, 3,
"Integrity of random_string is not correct")
def test_integrity_with_negative_as_string(self) -> None:
# arrange
samples = [{"random_int": "-10000"}]
schema_definition = DataLoader.expand_schema(
[("random_int", "integer")],
[]
)
# act
result = self.inspector.inspect_attributes(samples, schema_definition)
# assert
self.assertEqual(.0, result.attribute_integrity,
"Attribute integrity must be 0% (even if not required, a "
"specified value needs to be correct).")
def test_integrity_with_wrong_type(self) -> None:
# arrange
samples, schema = DataLoader.create_dummy_samples()
# noinspection PyTypeChecker
samples[0]['random_string'] = 123
# act
result = self.inspector.inspect_attributes(samples, schema)
# assert
self.assertEqual(0.5, result.attribute_integrity)
def test_integrity_without_provided_schema(self) -> None:
# arrange
samples, _ = DataLoader.create_dummy_samples()
# act
empty_schema = SchemaDefinition.empty()
result = self.inspector.inspect(samples, empty_schema)
# assert
self.assertEqual(1.0, result.attribute_integrity)
self.assertEqual(.0, result.attribute_specification)
self.assertEqual(.5, result.attribute_quality_index)
def test_inspect_with_inferred_schemas(self):
# arrange
schema = DataLoader.load_schema_with_name("schema_registry_json.json")
schema_definition = SchemaDefinition.create(schema, True)
samples = DataLoader.load_samples()
# act
result = self.inspector.inspect(samples, schema_definition)
# assert
self.assertEqual(1.0, result.attribute_integrity)
self.assertEqual(.0, result.attribute_specification)
self.assertEqual(.5, result.attribute_quality_index)
def test_various_types_do_not_throw_exceptions(self):
# arrange
schema = '''
{
"type": "record",
"name": "RandomData",
"namespace": "data.producer.random",
"fields": [
{
"name": "random_string",
"type": "string"
},
{
"name": "random_integer",
"type": "int"
},
{
"name": "random_float",
"type": "float"
},
{
"name": "random_boolean",
"type": "boolean"
}
]
}
'''
samples = [
{'random_string': 'wheyuugkwi', 'random_integer': 876, 'random_float': 0.2295482, 'random_boolean': False}
]
# act
metrics = self.inspector.inspect(samples, SchemaDefinition.create(schema, False))
# assert
self.assertIsNotNone(metrics)
def test_inspect_with_min_max_range_expectation(self):
# arrange
schema = DataLoader.load_schema_with_name("schema_with_min_max.json")
samples = [
{'random_integer': 3}, {'random_integer': 11}, {'random_integer': 3}, {'random_integer': 8},
{'random_integer': 3}, {'random_integer': -5}, {'random_integer': 3}, {'random_integer': 10},
]
# act
metrics = self.inspector.inspect(samples, SchemaDefinition.create(schema, False))
# assert
self.assertEqual((6 / 8), metrics.attribute_integrity,
f"Attribute integrity must be {(6 / 8) * 100}%")
def test_inspect_with_min_expectation(self):
# arrange
schema = DataLoader.load_schema_with_name("schema_with_min.json")
samples = [
{'random_integer': 3}, {'random_integer': 11}, {'random_integer': 3}, {'random_integer': 8},
{'random_integer': 3}, {'random_integer': -5}, {'random_integer': 3}, {'random_integer': 10},
]
# act
metrics = self.inspector.inspect(samples, SchemaDefinition.create(schema, False))
# assert
self.assertEqual((7 / 8), metrics.attribute_integrity,
f"Attribute integrity must be {(7 / 8) * 100}%")
def test_inspect_with_multiple_expectations_asyncapi_style(self):
# arrange
schema = DataLoader.load_schema_with_name("schema_expectation_asyncapi_style.json")
samples = [
{'random_integer': 1, 'random_string': 'id_1'},
{'random_integer': 2, 'random_string': 'foo'}, # no match (string)
{'random_integer': 3, 'random_string': 'id_3'},
{'random_integer': 4, 'random_string': 'id_4'}, # no match (integer)
{'random_integer': 5, 'random_string': 'foo'}, # no match (integer, string)
]
# act
metrics = self.inspector.inspect(samples, SchemaDefinition.create(schema, False))
# assert
self.assertAlmostEqual(6 / 10, metrics.attribute_integrity, 3)
def test_inspect_with_multiple_expectations_asyncapi_style_json(self):
# arrange
schema = DataLoader.load_schema_with_name("schema_expectation_asyncapi_style_json.json")
samples = [
{'random_integer': 1, 'random_string': 'id_1'},
{'random_integer': 2, 'random_string': 'foo'}, # no match (string)
{'random_integer': 3, 'random_string': 'id_3'},
{'random_integer': 4, 'random_string': 'id_4'}, # no match (integer)
{'random_integer': 5, 'random_string': 'foo'}, # no match (integer, string)
]
# act
metrics = self.inspector.inspect(samples, SchemaDefinition.create(schema, False))
# assert
self.assertAlmostEqual(6 / 10, metrics.attribute_integrity, 3)
def test_inspect_with_both_schema_formats(self):
# arrange
schema_json = DataLoader.load_schema_with_name("schema_diff_json.json")
schema_avro = DataLoader.load_schema_with_name("schema_diff_avro.json")
samples = DataLoader.load_samples()
# act
result_json = self.inspector.inspect(samples, SchemaDefinition.create(schema_json, False))
result_avro = self.inspector.inspect(samples, SchemaDefinition.create(schema_avro, False))
# assert
self.assertEqual(result_json, result_avro)
def test_specification_from_toeggelomat_json(self):
# arrange
samples = DataLoader.load_samples_from_file("samples_toeggelomat.json")
# act
schema = DataLoader.load_schema_with_name("schema_toeggelomat_json.json")
result = self.inspector.inspect(samples, SchemaDefinition.create(schema, False))
# assert
self.assertEqual(53, len(result.attribute_details.keys()),
"There should be 53 keys in the dictionary")
for attribute_metric in result.attribute_details.keys():
self.assertEqual(1.0, result.attribute_details[attribute_metric].attribute_specification,
f"Attribute specification must be 100% ({attribute_metric})")
self.assertEqual(1.0, result.attribute_details[attribute_metric].attribute_integrity,
f"Attribute integrity must be 100% ({attribute_metric})")
def test_specification_from_toeggelomat(self):
# arrange
samples = DataLoader.load_samples_from_file("samples_toeggelomat.json")
# act
schema = DataLoader.load_schema_with_name("schema_toeggelomat.json")
result = self.inspector.inspect(samples, SchemaDefinition.create(schema, False))
# assert
self.assertEqual(53, len(result.attribute_details.keys()),
"There should be 53 keys in the dictionary")
for attribute_metric in result.attribute_details.keys():
self.assertEqual(1.0, result.attribute_details[attribute_metric].attribute_specification,
f"Attribute specification must be 100% ({attribute_metric})")
self.assertEqual(1.0, result.attribute_details[attribute_metric].attribute_integrity,
f"Attribute integrity must be 100% ({attribute_metric})")
| 35.724532 | 118 | 0.575407 | 3,185 | 34,367 | 5.929042 | 0.070016 | 0.052902 | 0.049778 | 0.055073 | 0.869572 | 0.830544 | 0.785798 | 0.753548 | 0.731148 | 0.702764 | 0 | 0.020126 | 0.314691 | 34,367 | 961 | 119 | 35.761707 | 0.781675 | 0.034015 | 0 | 0.553292 | 0 | 0 | 0.249864 | 0.012557 | 0 | 0 | 0 | 0 | 0.141066 | 1 | 0.073668 | false | 0 | 0.00627 | 0 | 0.081505 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
5acfa73951bf5dd915adc32a7981cab8b5aacd86 | 4,247 | py | Python | twrap/metrics.py | itsnarsi/twrap | cc3128428e37fe0a363e5b18fd7fa0039a963365 | [
"MIT"
] | null | null | null | twrap/metrics.py | itsnarsi/twrap | cc3128428e37fe0a363e5b18fd7fa0039a963365 | [
"MIT"
] | null | null | null | twrap/metrics.py | itsnarsi/twrap | cc3128428e37fe0a363e5b18fd7fa0039a963365 | [
"MIT"
] | null | null | null | # @Author: Narsi Reddy <cibitaw1>
# @Date: 2018-09-22T17:38:05-05:00
# @Email: sainarsireddy@outlook.com
# @Last modified by: narsi
# @Last modified time: 2019-02-13T22:46:56-06:00
import torch
torch.manual_seed(29)
from torch import nn
import numpy as np
np.random.seed(29)
import torch.nn.functional as F
from torch.autograd.function import Function
from torch.nn.parameter import Parameter
from math import exp
"""
CLASSIFICATION METRICS
"""
def accuracy_topk(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def accuracy(output, target):
batch_size = target.size(0)
_, pred = output.topk(1, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
correct_k = correct[:1].view(-1).float().sum(0, keepdim=True)
res = correct_k.mul_(100.0 / batch_size)
return res
def binary_accuracy(output, target):
res = torch.mean(target.eq(torch.round(output)).float()) * 100
return res
"""
SUPER RESOLUTION
"""
# https://github.com/Po-Hsun-Su/pytorch-ssim
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])
return gauss/gauss.sum()
def create_window(window_size, channel, sigma = 1.5):
_1D_window = gaussian(window_size, sigma).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = _2D_window.expand(channel, 1, window_size, window_size).contiguous()
return window
def _ssim(img1, img2, window, window_size, channel, size_average = True):
mu1 = F.conv2d(img1, window, padding = window_size//2, groups = channel)
mu2 = F.conv2d(img2, window, padding = window_size//2, groups = channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1*mu2
sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channel) - mu1_sq
sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channel) - mu2_sq
sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channel) - mu1_mu2
C1 = 0.01**2
C2 = 0.03**2
ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
class SSIM(nn.Module):
def __init__(self, window_size = 5, channel = 24, size_average = True):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = channel
self.window = create_window(window_size, self.channel, sigma = 5)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.data.type() == img1.data.type():
window = self.window
else:
window = create_window(self.window_size, channel, sigma = 5)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
self.window = window
self.channel = channel
return _ssim(img1, img2, window, self.window_size, channel, self.size_average)
class SSIM_LOSS(nn.Module):
def __init__(self, window_size = 5, channel = 1,size_average = True):
super(SSIM_LOSS, self).__init__()
self.SSIM = SSIM(window_size, channel, size_average)
def forward(self, img1, img2):
return 1-self.SSIM(img1, img2)
def psnr(output, target):
mse = F.mse_loss(output, target)
return -10. * logX(mse)
def logX(x, d = 10.0):
""" Log10: log base 10 for tensorflow
"""
numerator = torch.log(x)
denominator = np.log(d)
return numerator / denominator
| 31.932331 | 104 | 0.645632 | 620 | 4,247 | 4.256452 | 0.254839 | 0.079576 | 0.025009 | 0.043577 | 0.305419 | 0.228875 | 0.228875 | 0.228875 | 0.147025 | 0.068966 | 0 | 0.053835 | 0.217094 | 4,247 | 132 | 105 | 32.174242 | 0.73985 | 0.07935 | 0 | 0.170455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136364 | false | 0 | 0.079545 | 0.011364 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5acfefc74de42e7336d5b91fd88fe5402716e7ad | 4,428 | py | Python | configs/resnet/contrast_r18_config.py | alecpeltekian/ImgClassification | cf4eca33027ca423623ff965fac354dcfce396d3 | [
"Apache-2.0"
] | null | null | null | configs/resnet/contrast_r18_config.py | alecpeltekian/ImgClassification | cf4eca33027ca423623ff965fac354dcfce396d3 | [
"Apache-2.0"
] | null | null | null | configs/resnet/contrast_r18_config.py | alecpeltekian/ImgClassification | cf4eca33027ca423623ff965fac354dcfce396d3 | [
"Apache-2.0"
] | null | null | null | # ### ===============================================================
# ### ===============================================================
# ### Modify the dataset loading settings
# dataset settings
dataset_type = 'ContrastDataset'
data_root = '/mnt/cadlabnas/datasets/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['gt_label']),
dict(type='Collect', keys=['img', 'gt_label']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
]
data = dict(
samples_per_gpu=8, # BATCH_SIZE
workers_per_gpu=2,
train=dict(
type='RepeatDataset',
times=1,
dataset=dict(
type=dataset_type,
ann_file='train.txt',
data_prefix= data_root + 'RenalDonors/',
pipeline=train_pipeline),
pipeline=train_pipeline
),
val=dict(
type=dataset_type,
ann_file='val.txt',
data_prefix= data_root + 'RenalDonors/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file='test.txt',
data_prefix= data_root + 'RenalDonors/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='accuracy', metric_options=dict(topk=(1,)))
# Set up working dir to save files and logs.
work_dir = '/home/alec/Desktop/ImgClassification/working_dir'
### ===============================================================
### ===============================================================
### Modify the model settings
# model settings
model = dict(
type='ImageClassifier',
pretrained='torchvision://resnet18',
backbone=dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(3,),
style='pytorch'),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='LinearClsHead',
num_classes=2,
in_channels=512,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1,),
))
### ===============================================================
### ===============================================================
### Modify the schedule settings
# The original learning rate (LR) is set for 8-GPU training.
# We divide it by 4 since we only use one GPU.
# optimizer
optimizer_lr = 0.0001 #0.01 / 4
# optimizer
optimizer = dict(type='SGD', lr=optimizer_lr, momentum=0.9, weight_decay=0.0001, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
# warmup='linear',
# warmup_iters=500,
# warmup_ratio=0.001,
step=[5, 10])
runner = dict(type='EpochBasedRunner', max_epochs=25)
### ===============================================================
### ===============================================================
### Modify the default runtime settings
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50, #50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
# run train iter 1 time (overall 1 time which includes: div num_images by batch_size, and mult by dataset_repeat_times)
# run validation iter 1 time
# only setting workflow = [('train', 1)] will not backpropagate validation error/loss through the network
workflow = [('train', 1), ('val', 1)]
### ===============================================================
### ===============================================================
### Miscellaneous settings
# Set seed thus the results are more reproducible
seed = 0
#set_random_seed(0, deterministic=False)
gpu_ids = range(1)
### ===============================================================
### ===============================================================
### testing/prediction/evaluation phase - Model settings
# get the root path to the model checkpoints
ckp_root = work_dir #'/home/tsm/Code/mmdetection/demo/tutorial_exps/'
| 30.965035 | 137 | 0.539747 | 470 | 4,428 | 4.929787 | 0.468085 | 0.079413 | 0.012948 | 0.018127 | 0.197669 | 0.182132 | 0.11653 | 0.099266 | 0.099266 | 0.054381 | 0 | 0.02608 | 0.168699 | 4,428 | 142 | 138 | 31.183099 | 0.603369 | 0.393631 | 0 | 0.148148 | 0 | 0 | 0.179862 | 0.036126 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ad437188ad82881b1a7663301a30cca6f73df88 | 2,745 | py | Python | modules/2.79/bpy/ops/uv.py | cmbasnett/fake-bpy-module | acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55 | [
"MIT"
] | null | null | null | modules/2.79/bpy/ops/uv.py | cmbasnett/fake-bpy-module | acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55 | [
"MIT"
] | null | null | null | modules/2.79/bpy/ops/uv.py | cmbasnett/fake-bpy-module | acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55 | [
"MIT"
] | null | null | null | def align(axis='ALIGN_AUTO'):
pass
def average_islands_scale():
pass
def circle_select(x=0, y=0, radius=1, gesture_mode=0):
pass
def cube_project(cube_size=1.0, correct_aspect=True, clip_to_bounds=False, scale_to_bounds=False):
pass
def cursor_set(location=(0.0, 0.0)):
pass
def cylinder_project(direction='VIEW_ON_EQUATOR', align='POLAR_ZX', radius=1.0, correct_aspect=True, clip_to_bounds=False, scale_to_bounds=False):
pass
def export_layout(filepath="", check_existing=True, export_all=False, modified=False, mode='PNG', size=(1024, 1024), opacity=0.25, tessellated=False):
pass
def follow_active_quads(mode='LENGTH_AVERAGE'):
pass
def hide(unselected=False):
pass
def lightmap_pack(PREF_CONTEXT='SEL_FACES', PREF_PACK_IN_ONE=True, PREF_NEW_UVLAYER=False, PREF_APPLY_IMAGE=False, PREF_IMG_PX_SIZE=512, PREF_BOX_DIV=12, PREF_MARGIN_DIV=0.1):
pass
def mark_seam(clear=False):
pass
def minimize_stretch(fill_holes=True, blend=0.0, iterations=0):
pass
def pack_islands(rotate=True, margin=0.001):
pass
def pin(clear=False):
pass
def project_from_view(orthographic=False, camera_bounds=True, correct_aspect=True, clip_to_bounds=False, scale_to_bounds=False):
pass
def remove_doubles(threshold=0.02, use_unselected=False):
pass
def reset():
pass
def reveal():
pass
def seams_from_islands(mark_seams=True, mark_sharp=False):
pass
def select(extend=False, location=(0.0, 0.0)):
pass
def select_all(action='TOGGLE'):
pass
def select_border(pinned=False, gesture_mode=0, xmin=0, xmax=0, ymin=0, ymax=0, extend=True):
pass
def select_lasso(path=None, deselect=False, extend=True):
pass
def select_less():
pass
def select_linked(extend=False):
pass
def select_linked_pick(extend=False, location=(0.0, 0.0)):
pass
def select_loop(extend=False, location=(0.0, 0.0)):
pass
def select_more():
pass
def select_pinned():
pass
def select_split():
pass
def smart_project(angle_limit=66.0, island_margin=0.0, user_area_weight=0.0, use_aspect=True, stretch_to_bounds=True):
pass
def snap_cursor(target='PIXELS'):
pass
def snap_selected(target='PIXELS'):
pass
def sphere_project(direction='VIEW_ON_EQUATOR', align='POLAR_ZX', correct_aspect=True, clip_to_bounds=False, scale_to_bounds=False):
pass
def stitch(use_limit=False, snap_islands=True, limit=0.01, static_island=0, midpoint_snap=False, clear_seams=True, mode='VERTEX', stored_mode='VERTEX', selection=None):
pass
def tile_set(tile=(0, 0)):
pass
def unwrap(method='ANGLE_BASED', fill_holes=True, correct_aspect=True, use_subsurf_data=False, margin=0.001):
pass
def weld():
pass
| 17.941176 | 175 | 0.726047 | 433 | 2,745 | 4.357968 | 0.332564 | 0.137255 | 0.069952 | 0.023847 | 0.280339 | 0.237944 | 0.237944 | 0.227875 | 0.18442 | 0.18442 | 0 | 0.031719 | 0.150091 | 2,745 | 152 | 176 | 18.059211 | 0.777111 | 0 | 0 | 0.5 | 0 | 0 | 0.044841 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | false | 0.5 | 0 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 2 |
5ad4e2861aebece133d34e92b30760d0b61fc3a9 | 662 | py | Python | roots/FinalModifiedBisection.py | Seek/LaTechNumeric | dabef2040e84bf25cabab07fe20a6434ce52197b | [
"MIT"
] | null | null | null | roots/FinalModifiedBisection.py | Seek/LaTechNumeric | dabef2040e84bf25cabab07fe20a6434ce52197b | [
"MIT"
] | null | null | null | roots/FinalModifiedBisection.py | Seek/LaTechNumeric | dabef2040e84bf25cabab07fe20a6434ce52197b | [
"MIT"
] | null | null | null | import sys
EPS = sys.float_info.epsilon
#Define the function
def f(x):
return (x+1)**2 - 1
def bisect(f, x1, x2, eps, maxn):
assert f(x1)*f(x2) < 0, \
"We cannot find a root if the function does not change signs"
xl = x1
xu = x2
xr = 0
fl = f(xl)
err = 1000
for i in range(maxn):
r = (xl + xu)/2
print(r)
fr = f(r)
if not abs(r - 0) < EPS:
err = abs((r-xr)/r) * 100
if err < eps:
print("Error =" + str(err))
break
v = fl * fr
if v < 0:
xu = r
if v > 0:
xl = r
fl = fr
else:
err = 0
xr = r
return r
print("Computing the roots of x**2 - 2")
r = bisect(f, -1.5, 10, 0.00001, 100)
print("Root = " + str(r))
| 16.146341 | 62 | 0.539275 | 131 | 662 | 2.717557 | 0.435115 | 0.061798 | 0.022472 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.08137 | 0.294562 | 662 | 40 | 63 | 16.55 | 0.680942 | 0.028701 | 0 | 0 | 0 | 0 | 0.162246 | 0 | 0 | 0 | 0 | 0 | 0.029412 | 1 | 0.058824 | false | 0 | 0.029412 | 0.029412 | 0.147059 | 0.117647 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ad51d488ab3d8dfcae8a309205c863eaacc63bd | 717 | py | Python | main.py | jostimian/Python-To-Facebook | b52f2fb4db01263dc279241c24ad6e9515ba5f45 | [
"MIT"
] | null | null | null | main.py | jostimian/Python-To-Facebook | b52f2fb4db01263dc279241c24ad6e9515ba5f45 | [
"MIT"
] | null | null | null | main.py | jostimian/Python-To-Facebook | b52f2fb4db01263dc279241c24ad6e9515ba5f45 | [
"MIT"
] | null | null | null | from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
email = input("Enter Your Email: ")
password = input("Enter Your Password: ")
def login():
browser = webdriver.Chrome()
browser.get("https://www.facebook.com")
emailid = browser.find_element_by_id("email")
passid = browser.find_element_by_id("pass")
loginid = browser.find_element_by_id("loginbutton")
emailid.send_keys(email)
passid.send_keys(password)
loginid.click()
while email or password == "":
print("\n")
email = input("Enter Your Email: ")
password = input("Enter Your Password")
if email and password != "":
login()
if email and password != "":
login()
| 24.724138 | 55 | 0.677824 | 89 | 717 | 5.337079 | 0.438202 | 0.084211 | 0.117895 | 0.126316 | 0.463158 | 0.227368 | 0.227368 | 0.227368 | 0.227368 | 0.227368 | 0 | 0 | 0.191074 | 717 | 28 | 56 | 25.607143 | 0.818966 | 0 | 0 | 0.285714 | 0 | 0 | 0.170153 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0.333333 | 0.095238 | 0 | 0.142857 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
5ad61d1f671afea82e39013fc63a8845b6a3671b | 3,606 | py | Python | monique_worker_py/worker.py | biocad/monique-worker-py | 56b0ab2e218b80e3a83d7987cd8dd8993a3d66a7 | [
"BSD-3-Clause"
] | null | null | null | monique_worker_py/worker.py | biocad/monique-worker-py | 56b0ab2e218b80e3a83d7987cd8dd8993a3d66a7 | [
"BSD-3-Clause"
] | null | null | null | monique_worker_py/worker.py | biocad/monique-worker-py | 56b0ab2e218b80e3a83d7987cd8dd8993a3d66a7 | [
"BSD-3-Clause"
] | null | null | null | import zmq
import logging
import argparse
from monique_worker_py.config import read_worker_config
from monique_worker_py.qmessage import qmessage_from_json, create_qmessage
class Worker:
def __init__(self, worker_name, algo):
self.worker_name = worker_name
self.algo = algo
parser = argparse.ArgumentParser()
parser.add_argument('--config', required=True, help='Path to config file')
args = parser.parse_args()
self.worker_config = read_worker_config(args.config)
def run(self):
"""Runs application"""
logging.basicConfig(level=self.worker_config.log_level,
format='%(asctime)s %(name)-12s %(levelname)-8s {}: %(message)s'.format(self.worker_name),
datefmt='%Y-%m-%d %H:%M',
filename=self.worker_config.log_path,
filemode='a')
logging.info("connecting to queue...")
# setup connection
context = zmq.Context()
# Socket to receive messages from controller
from_controller = context.socket(zmq.PULL)
from_controller.connect(self.worker_config.controller_pull_address())
# Socket to send messages to controller
to_controller = context.socket(zmq.PUSH)
to_controller.connect(self.worker_config.controller_push_address())
logging.info("connected to queue.")
# waiting for the message...
while True:
in_message = from_controller.recv()
logging.info('message received.')
# parsing message to QMessage
qmessage = qmessage_from_json(in_message)
logging.debug('message tags: {}; message cnt: {}'.format(qmessage.tags, qmessage.cnt))
# get config from Task
task = qmessage.cnt.contents
config = task.get_config()
logging.info('config parsed')
logging.debug('config content: {}'.format(config))
try:
logging.info('start working...')
# that is the MAIN PLACE. We run given algorithm with config received.
wr = self.algo(config)
logging.debug('worker result: {}, worker version: {}'.format(wr.result, wr.version))
logging.info('finished working!')
# prepare result QMessage...
completed_task = task.task_completed(wr)
# prepare result QMessage...
completed_message = create_qmessage(completed_task)
# and sending it back to the queue.
logging.info('sending message with completed task...')
logging.debug('message: {}'.format(completed_message.to_json()))
to_controller.send(completed_message.to_json())
logging.info("message sent :)")
except Exception as e:
logging.error('failed with error: {}'.format(e))
# if exception happened then format result QMessage with another method...
failed_message = qmessage.qmessage_failed(self.worker_name, e)
# and sending it back.
logging.info("sending message with failed task...")
logging.debug('message: {}'.format(failed_message.to_json()))
to_controller.send(failed_message.to_json())
logging.info("message sent :(")
class WorkerResult:
"""Class to format worker result."""
def __init__(self, result, version):
self.result = result
self.version = version
| 37.5625 | 118 | 0.598724 | 388 | 3,606 | 5.399485 | 0.304124 | 0.052506 | 0.038186 | 0.018138 | 0.157518 | 0.102148 | 0.033413 | 0 | 0 | 0 | 0 | 0.001185 | 0.297837 | 3,606 | 95 | 119 | 37.957895 | 0.826224 | 0.13117 | 0 | 0 | 0 | 0 | 0.139826 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.087719 | 0 | 0.175439 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ad6cbcddff8d2b547541b05fb14cdfa5518b9b3 | 1,081 | py | Python | helpers/sendSMS.py | cheikhmbackeseck37/insuris | 3362ca445d489e23d57a76bbd6d263f3a5f0b519 | [
"MIT"
] | 12 | 2019-08-02T07:58:16.000Z | 2022-01-31T23:45:08.000Z | helpers/sendSMS.py | domambia/csdigital-gs1kenya-internal-erp | 6736d0e9a3a51653689f8ae921cf811f378d9d8e | [
"MIT"
] | 8 | 2019-08-02T08:06:18.000Z | 2022-03-11T23:45:17.000Z | helpers/sendSMS.py | cheikhmbackeseck37/insuris | 3362ca445d489e23d57a76bbd6d263f3a5f0b519 | [
"MIT"
] | 11 | 2019-07-31T16:23:36.000Z | 2022-01-29T08:30:07.000Z | # works with both python 2 and 3
from __future__ import print_function
from datetime import datetime
import africastalking
class SMS:
def __init__(self):
self.username = "gs1kenya"
self.api_key = "0902d36a02514da9fa33a11586683f8d76e5207ea544363e7d41149e6c9a6718"
africastalking.initialize(self.username, self.api_key)
self.sms = africastalking.SMS
def send(self, phone, message):
try:
response = self.sms.send(str(message), ["+254"+str(phone)])
except Exception as e:
message = """
Dear, Omambia Mogaka.
Ref: Message Notification
------------------------
There was an error in sending message to your other employee.
The Error is: {}
Thank You,
Humble Developer, Most adored,
GS1 Kenya
Date: {} .
"""
print (message.format(str(e), datetime.now))
| 38.607143 | 89 | 0.518964 | 97 | 1,081 | 5.670103 | 0.659794 | 0.050909 | 0.036364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.081818 | 0.389454 | 1,081 | 27 | 90 | 40.037037 | 0.751515 | 0.027752 | 0 | 0 | 0 | 0 | 0.509056 | 0.083889 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.12 | 0 | 0.24 | 0.08 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ad80dcae0e4a2b3ab268f2939266c44cfa02c66 | 52 | py | Python | tests/test_placeholder.py | yhay81/socialname | 1907947014d3ba8e518be1374f24c44b89854e29 | [
"MIT"
] | null | null | null | tests/test_placeholder.py | yhay81/socialname | 1907947014d3ba8e518be1374f24c44b89854e29 | [
"MIT"
] | 7 | 2021-01-23T11:18:00.000Z | 2022-03-12T21:43:13.000Z | tests/test_placeholder.py | yhay81/socialname | 1907947014d3ba8e518be1374f24c44b89854e29 | [
"MIT"
] | null | null | null | def test_sample() -> None:
assert True # nosec
| 17.333333 | 26 | 0.634615 | 7 | 52 | 4.571429 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 52 | 2 | 27 | 26 | 0.820513 | 0.096154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.5 | 1 | 0.5 | true | 0 | 0 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 5 |
5ad86ff306f582b52295bb807123e900f16d76f5 | 818 | py | Python | rss_skill/models.py | frybin/Alexa-Python-Skill | b729d303524a7e7b72c9becb7b7044afea6597fc | [
"MIT"
] | null | null | null | rss_skill/models.py | frybin/Alexa-Python-Skill | b729d303524a7e7b72c9becb7b7044afea6597fc | [
"MIT"
] | null | null | null | rss_skill/models.py | frybin/Alexa-Python-Skill | b729d303524a7e7b72c9becb7b7044afea6597fc | [
"MIT"
] | null | null | null | ####################################
# File name: models.py #
# Author: Fred Rybin #
####################################
from rss_skill import db
class Feed(db.Model):
__tablename__ = 'feed'
rss_i = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.Text, nullable=False)
link = db.Column(db.Text, nullable=False)
article_1 = db.Column(db.Text, nullable=False)
article_2 = db.Column(db.Text, nullable=False)
post = db.Column(db.String(32), nullable=False)
def __init__(self, name, link, article_1, article_2):
self.name = name
self.link = link
self.article_1 = article_1
self.article_2 = article_2
self.post = ""
def __repr__(self):
return f'Feed {self.rss_i}: {self.name}'
| 31.461538 | 71 | 0.572127 | 105 | 818 | 4.228571 | 0.371429 | 0.108108 | 0.135135 | 0.126126 | 0.274775 | 0.274775 | 0.153153 | 0 | 0 | 0 | 0 | 0.015848 | 0.228606 | 818 | 25 | 72 | 32.72 | 0.687797 | 0.06357 | 0 | 0 | 0 | 0 | 0.050445 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.058824 | 0.058824 | 0.705882 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
5adaa94654c12d575666ad0a6b6cf47ac7a0cb0e | 1,120 | py | Python | examples/cartpole_example/test/cartpole_plot_model_NN_cloop.py | marcosfelt/sysid-neural-structures-fitting | 80eda427251e8cce1d2a565b5cbca533252315e4 | [
"MIT"
] | 17 | 2019-11-15T06:27:05.000Z | 2021-10-02T14:24:25.000Z | examples/cartpole_example/test/cartpole_plot_model_NN_cloop.py | marcosfelt/sysid-neural-structures-fitting | 80eda427251e8cce1d2a565b5cbca533252315e4 | [
"MIT"
] | null | null | null | examples/cartpole_example/test/cartpole_plot_model_NN_cloop.py | marcosfelt/sysid-neural-structures-fitting | 80eda427251e8cce1d2a565b5cbca533252315e4 | [
"MIT"
] | 4 | 2020-09-03T17:01:34.000Z | 2021-11-05T04:09:24.000Z | import os
import pandas as pd
import matplotlib.pyplot as plt
from examples.cartpole_example.cartpole_dynamics import RAD_TO_DEG, DEG_TO_RAD
if __name__ == '__main__':
#df_model = pd.read_csv(os.path.join("data", "pendulum_data_PID.csv"))
#df_nn = pd.read_csv(os.path.join("data", "pendulum_data_PID_NN_model.csv"))
df_meas = pd.read_csv(os.path.join("data", "pendulum_data_MPC_ref_val.csv"))
df_nn = pd.read_csv(os.path.join("data", "pendulum_data_MPC_ref_val_NN_model.csv"))
fig,axes = plt.subplots(3,1, figsize=(10,10), sharex=True)
axes[0].plot(df_meas['time'], df_meas['p'], "k", label='p system')
axes[0].plot(df_nn['time'], df_nn['p'], "r", label='p NN')
axes[0].set_title("Position (m)")
axes[0].set_ylim(-10, 10.0)
axes[1].plot(df_meas['time'], df_meas['theta'] * RAD_TO_DEG, "k", label='theta system')
axes[1].plot(df_nn['time'], df_nn['theta']*RAD_TO_DEG, "r", label='theta NN')
axes[2].plot(df_meas['time'], df_meas['u'], label="u")
axes[2].plot(df_nn['time'], df_nn['u'], label="u")
for ax in axes:
ax.grid(True)
ax.legend()
| 35 | 91 | 0.655357 | 200 | 1,120 | 3.39 | 0.315 | 0.047198 | 0.053097 | 0.064897 | 0.421829 | 0.421829 | 0.262537 | 0.262537 | 0.262537 | 0.262537 | 0 | 0.019792 | 0.142857 | 1,120 | 31 | 92 | 36.129032 | 0.686458 | 0.128571 | 0 | 0 | 0 | 0 | 0.175565 | 0.068789 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.210526 | 0 | 0.210526 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ae07991afc23cb7d9a2fd6ee5c2c60cb7e50a1b | 2,560 | py | Python | unit_tests/test_extract_initial_rules.py | fensta/bracid2019 | ad9bf0b4e44c19f66c1597e857ef6cf70f56a646 | [
"MIT"
] | null | null | null | unit_tests/test_extract_initial_rules.py | fensta/bracid2019 | ad9bf0b4e44c19f66c1597e857ef6cf70f56a646 | [
"MIT"
] | null | null | null | unit_tests/test_extract_initial_rules.py | fensta/bracid2019 | ad9bf0b4e44c19f66c1597e857ef6cf70f56a646 | [
"MIT"
] | null | null | null | from unittest import TestCase
import pandas as pd
from scripts.utils import extract_initial_rules
class TestExtractInitialRules(TestCase):
"""Tests test_extract_initial_rules() from utils"""
def test_extract_initial_rules_numeric(self):
"""Test that rules are extracted correctly with a single numeric features"""
df = pd.DataFrame({"A": [1.0, 2, 3], "Class": ["A", "B", "C"]})
class_col_name = "Class"
rules = extract_initial_rules(df, class_col_name)
correct = pd.DataFrame({"A": [(1.0, 1.0), (2, 2), (3, 3)], "Class": ["A", "B", "C"]})
self.assertTrue(df.shape == (3, 2) and rules.shape == (3, 2))
self.assertTrue(rules.equals(correct))
def test_extract_initial_rules_nominal(self):
"""Test that rules are extracted correctly with a single nominal features"""
df = pd.DataFrame({"A": ["a", "b", "c"], "Class": ["A", "B", "C"]})
class_col_name = "Class"
rules = extract_initial_rules(df, class_col_name)
correct = pd.DataFrame({"A": ["a", "b", "c"], "Class": ["A", "B", "C"]})
self.assertTrue(df.shape == (3, 2) and rules.shape == (3, 2))
self.assertTrue(rules.equals(correct))
def test_extract_initial_rules_single_feature_mixed(self):
"""
Test that rules are extracted correctly with a single numeric and nominal feature
"""
df = pd.DataFrame({"A": [1.0, 2, 3], "B": ["a", "b", "c"], "Class": ["A", "B", "C"]})
class_col_name = "Class"
rules = extract_initial_rules(df, class_col_name)
correct = pd.DataFrame({"A": [(1.0, 1.0), (2, 2), (3, 3)], "B": ["a", "b", "c"], "Class": ["A", "B", "C"]})
self.assertTrue(df.shape == (3, 3) and rules.shape == (3, 3))
self.assertTrue(rules.equals(correct))
def test_extract_initial_rules_multiple_features_mixed(self):
"""
Test that rules are extracted correctly with different numeric and nominal features
"""
df = pd.DataFrame({"A": [1.0, 2, 3], "B": ["a", "b", "c"], "C": [5, -1, 3], "D": ["t", "t", "e"],
"Class": ["A", "B", "C"]})
class_col_name = "Class"
rules = extract_initial_rules(df, class_col_name)
correct = pd.DataFrame({"A": [(1.0, 1.0), (2, 2), (3, 3)], "B": ["a", "b", "c"],
"C": [(5, 5), (-1, -1), (3, 3)], "D": ["t", "t", "e"], "Class": ["A", "B", "C"]})
self.assertTrue(df.shape == (3, 5) and rules.shape == (3, 5))
self.assertTrue(rules.equals(correct))
| 49.230769 | 115 | 0.551563 | 357 | 2,560 | 3.817927 | 0.142857 | 0.020543 | 0.030814 | 0.046955 | 0.80044 | 0.757887 | 0.740279 | 0.737344 | 0.737344 | 0.644901 | 0 | 0.031314 | 0.239063 | 2,560 | 51 | 116 | 50.196078 | 0.668378 | 0.137891 | 0 | 0.411765 | 0 | 0 | 0.057809 | 0 | 0 | 0 | 0 | 0 | 0.235294 | 1 | 0.117647 | false | 0 | 0.088235 | 0 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
5ae084aea517a9f9539f0a2bcb3c98aac39ffea6 | 2,801 | py | Python | utils/ICMPMaker.py | lanfis/Spider | c66f1d134733318a8714c544050ca42e08b9fe0e | [
"MIT"
] | null | null | null | utils/ICMPMaker.py | lanfis/Spider | c66f1d134733318a8714c544050ca42e08b9fe0e | [
"MIT"
] | null | null | null | utils/ICMPMaker.py | lanfis/Spider | c66f1d134733318a8714c544050ca42e08b9fe0e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# license removed for brevity
from scapy.all import *
import sys
class ICMPMaker:
TYPE = 8
CODE = 0
CHKSUM = None
ID = 0
SEQ = 0
TIMESTAMP_ORI = 60025165
TIMESTAMP_RX = 60025165
TIMESTAMP_TX = 60025165
GATEWAY = '0.0.0.0'
PTR = 0
RESERVED = 0
LEN = 0
MASK = '0.0.0.0'
NEXTHOPMTU = 0
def __init__(self,
TYPE=8,
CODE=0,
CHKSUM=None,
ID=0,
SEQ=0,
TIMESTAMP_ORI=60025165,
TIMESTAMP_RX=60025165,
TIMESTAMP_TX=60025165,
GATEWAY='0.0.0.0',
PTR=0,
RESERVED=0,
LEN=0,
MASK='0.0.0.0',
NEXTHOPMTU=0
):
self.TYPE = TYPE
self.CODE = CODE
self.CHKSUM = CHKSUM
self.ID = ID
self.SEQ = SEQ
self.TIMESTAMP_ORI = TIMESTAMP_ORI
self.TIMESTAMP_RX = TIMESTAMP_RX
self.TIMESTAMP_TX = TIMESTAMP_TX
self.GATEWAY = GATEWAY
self.PTR = PTR
self.RESERVED = RESERVED
self.LEN = LEN
self.MASK = MASK
self.NEXTHOPMTU = NEXTHOPMTU
def make_packet(self):
return ICMP(
type=self.TYPE,
code=self.CODE,
chksum=self.CHKSUM,
id=self.ID,
seq=self.SEQ,
ts_ori=self.TIMESTAMP_ORI,
ts_rx=self.TIMESTAMP_RX,
ts_tx=self.TIMESTAMP_TX,
gw=self.GATEWAY,
ptr=self.PTR,
reserved=self.RESERVED,
length=self.LEN,
addr_mask=self.MASK,
nexthopmtu=self.NEXTHOPMTU
)
def parse_type(self, icmp):
return icmp.type
def parse_code(self, icmp):
return icmp.code
def parse_chksum(self, icmp):
return icmp.chksum
def parse_id(self, icmp):
return icmp.id
def parse_seq(self, icmp):
return icmp.seq
def parse_ts_ori(self, icmp):
return icmp.ts_ori
def parse_ts_rx(self, icmp):
return icmp.ts_rx
def parse_ts_tx(self, icmp):
return icmp.ts_tx
def parse_gw(self, icmp):
return icmp.gw
def parse_ptr(self, icmp):
return icmp.ptr
def parse_reserved(self, icmp):
return icmp.reserved
def parse_len(self, icmp):
return icmp.length
def parse_mask(self, icmp):
return icmp.addr_mask
def parse_nexthopmtu(self, icmp):
return icmp.nexthopmtu
def show(self, icmp):
return icmp.show2()
| 27.732673 | 43 | 0.49875 | 322 | 2,801 | 4.198758 | 0.149068 | 0.118343 | 0.155325 | 0.199704 | 0.239645 | 0.195266 | 0.195266 | 0.195266 | 0.195266 | 0.195266 | 0 | 0.049938 | 0.420921 | 2,801 | 100 | 44 | 28.01 | 0.7836 | 0.017137 | 0 | 0.021277 | 0 | 0 | 0.010562 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.180851 | false | 0 | 0.021277 | 0.170213 | 0.531915 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 3 |
5ae239fa618e4a3a7c9f061bc545037094e16454 | 4,462 | py | Python | src/5 - rmn-master-original-adapt/viz.py | barbarasilveiraf/rmn-id-post | 894cd50a65902275bea3553cbe4f1acd8c2b709b | [
"MIT"
] | 99 | 2016-04-12T06:19:35.000Z | 2021-07-23T16:19:10.000Z | src/5 - rmn-master-original-adapt/viz.py | barbarasilveiraf/rmn-id-post | 894cd50a65902275bea3553cbe4f1acd8c2b709b | [
"MIT"
] | 3 | 2018-02-22T21:35:00.000Z | 2019-03-17T09:10:59.000Z | src/5 - rmn-master-original-adapt/viz.py | barbarasilveiraf/rmn-id-post | 894cd50a65902275bea3553cbe4f1acd8c2b709b | [
"MIT"
] | 20 | 2016-04-21T22:23:42.000Z | 2021-04-27T11:49:50.000Z | import csv, cPickle
from numpy import *
import matplotlib.pyplot as plt
import matplotlib
from cycler import cycler
# parse learned descriptors into a dict
def read_descriptors(desc_file):
desc_map = {}
f = open(desc_file, 'r')
for i, line in enumerate(f):
line = line.split()
desc_map[i] = line[0]
return desc_map
# read learned trajectories file
def read_csv(csv_file):
reader = csv.reader(open(csv_file, 'rb'))
all_traj = {}
prev_book = None
prev_c1 = None
prev_c2 = None
total_traj = 0
for index, row in enumerate(reader):
if index == 0:
continue
book, c1, c2 = row[:3]
if prev_book != book or prev_c1 != c1 or prev_c2 != c2:
prev_book = book
prev_c1 = c1
prev_c2 = c2
if book not in all_traj:
all_traj[book] = {}
all_traj[book][c1+' AND '+c2] = []
total_traj += 1
else:
all_traj[book][c1+' AND '+c2].append(array(row[4:], dtype='float32'))
print len(all_traj), total_traj
return all_traj
# compute locations to write labels
# only write labels when the
def compute_centers(max_traj, smallest_shift):
center_inds = []
prev_topic = max_traj[0]
tstart = 0
for index, topic in enumerate(max_traj):
if topic != prev_topic:
center = int((index-tstart) / 2)
if center > smallest_shift / 2:
center_inds.append(tstart + center)
tstart = index
prev_topic = topic
center = int((index-tstart) / 2)
if index - tstart > smallest_shift:
center_inds.append(tstart + center)
return center_inds
def viz_csv(rmn_traj, rmn_descs,
min_length=10,
smallest_shift=1, max_viz=False,
fig_dir=None):
for book in rmn_traj:
for rel in rmn_traj[book]:
rtraj = rmn_traj[book][rel]
if len(rtraj) > min_length and len(rtraj)<150:
print book, rel
plt.close()
rtraj_mat = array(rtraj)
if max_viz:
plt.title(book + ': ' + rel)
plt.axis('off')
max_rtraj = argmax(rtraj_mat, axis=1)
rcenter_inds = compute_centers(max_rtraj, smallest_shift)
for ind in range(0, len(max_rtraj)):
topic = max_rtraj[ind]
plt.axhspan(ind, ind+1, 0.2, 0.4, color=color_list[topic])
if ind in rcenter_inds:
loc = (0.43, ind + 0.5)
plt.annotate(rmn_descs[topic], loc, size=15,
verticalalignment='center',
color=color_list[topic])
plt.xlim(0, 1.0)
plt.arrow(0.1,0,0.0,len(rtraj),
head_width=0.1, head_length=len(rtraj)/12, lw=3,
length_includes_head=True, fc='k', ec='k')
props = {'ha': 'left', 'va': 'bottom',}
plt.text(0.0, len(rtraj) / 2, 'TIME', props, rotation=90, size=15)
props = {'ha': 'left', 'va': 'top',}
if fig_dir is None:
plt.show()
else:
chars = rel.split(' AND ')
fig_name = fig_dir + book + \
'__' + chars[0] + '__' + chars[1] + '.png'
print 'figname = ', fig_name
plt.savefig(fig_name)
if __name__ == '__main__':
wmap, cmap, bmap = cPickle.load(open('data/metadata.pkl', 'rb'))
rmn_traj = read_csv('models/trajectories.log')
rmn_descs = read_descriptors('models/descriptors.log')
plt.style.use('ggplot')
color_list = ["peru","dodgerblue","brown","hotpink",
"aquamarine","springgreen","chartreuse","fuchsia",
"mediumspringgreen","burlywood","midnightblue","orangered",
"olive","darkolivegreen","darkmagenta","mediumvioletred",
"darkslateblue","saddlebrown","darkturquoise","cyan",
"chocolate","cornflowerblue","blue","red",
"navy","steelblue","cadetblue","forestgreen",
"black","darkcyan"]
color_list += color_list
plt.rc('axes', prop_cycle=(cycler('color', color_list)))
viz_csv(rmn_traj, rmn_descs,
min_length=50, max_viz=True,
fig_dir='figs/', smallest_shift=1)
| 33.298507 | 86 | 0.536755 | 541 | 4,462 | 4.23475 | 0.343808 | 0.021388 | 0.014404 | 0.011349 | 0.09079 | 0.066347 | 0.050633 | 0.026189 | 0 | 0 | 0 | 0.025136 | 0.340206 | 4,462 | 133 | 87 | 33.548872 | 0.753057 | 0.028911 | 0 | 0.056075 | 0 | 0 | 0.104022 | 0.010402 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.046729 | null | null | 0.028037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5ae33aa8d62f63600348c0e8b3870cf2f541e67e | 224 | py | Python | translate.py | vipul-khatana/Hinglish-Sentiment-Analysis | 27b0d0aca736194b8f06b8cccb3e68537d91709f | [
"MIT"
] | 19 | 2017-12-03T14:17:13.000Z | 2022-02-23T19:06:07.000Z | translate.py | vipul-khatana/Hinglish-Sentiment-Analysis | 27b0d0aca736194b8f06b8cccb3e68537d91709f | [
"MIT"
] | 2 | 2018-12-14T12:43:47.000Z | 2020-03-31T15:27:31.000Z | translate.py | vipul-khatana/Hinglish-Sentiment-Analysis | 27b0d0aca736194b8f06b8cccb3e68537d91709f | [
"MIT"
] | 23 | 2018-12-14T04:37:47.000Z | 2022-03-25T09:58:26.000Z | # -*- coding: utf-8 -*-
import numpy as np
from googletrans import Translator
translator = Translator(service_urls=['translate.google.co.in'])
def translate(word):
return translator.translate(word,src='hi' , dest='en')
| 22.4 | 64 | 0.727679 | 30 | 224 | 5.4 | 0.766667 | 0.246914 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005076 | 0.120536 | 224 | 9 | 65 | 24.888889 | 0.817259 | 0.09375 | 0 | 0 | 0 | 0 | 0.129353 | 0.109453 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.4 | 0.2 | 0.8 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 4 |
5ae33bc829b96d32b0f8a98265306f77e0baf4b1 | 3,327 | py | Python | tests/test_views.py | localmed/django-assetfiles | 34089780126989f49e6b890b85a90047704fde37 | [
"MIT"
] | null | null | null | tests/test_views.py | localmed/django-assetfiles | 34089780126989f49e6b890b85a90047704fde37 | [
"MIT"
] | 2 | 2017-02-11T20:10:46.000Z | 2017-02-11T20:10:56.000Z | tests/test_views.py | localmed/django-assetfiles | 34089780126989f49e6b890b85a90047704fde37 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django_nose.tools import *
from tests.base import AssetfilesTestCase
class TestServe(AssetfilesTestCase):
def test_returns_not_found_without_an_asset(self):
response = self.client.get('/static/non/existent/file.css')
assert_equal(response.status_code, 404)
def test_returns_static_files(self):
self.mkfile('static/css/static.css', 'body { color: red; }')
response = self.client.get('/static/css/static.css')
assert_contains(response, 'body { color: red; }')
def test_returns_static_files_with_correct_content_type(self):
self.mkfile('static/css/static.css')
response = self.client.get('/static/css/static.css')
assert_equal(response.get('content-type'), 'text/css')
def test_returns_static_files_with_extra_extensions(self):
self.mkfile('app-1/static/js/jquery.plugin.js', '$.fn.plugin = {};')
response = self.client.get('/static/js/jquery.plugin.js')
assert_contains(response, '$.fn.plugin = {};')
def test_returns_app_static_files(self):
self.mkfile('app-1/static/css/app_static.css', 'body { color: blue; }')
response = self.client.get('/static/css/app_static.css')
assert_contains(response, 'body { color: blue; }')
def test_processes_scss_files(self):
self.mkfile('static/css/simple.scss',
'$c: red; body { color: $c; }')
response = self.client.get('/static/css/simple.css')
assert_contains(response, 'body {\n color: red; }')
def test_returns_processed_scss_files_with_correct_content_type(self):
self.mkfile('static/css/simple.scss',
'$c: red; body { color: $c; }')
response = self.client.get('/static/css/simple.css')
assert_equal(response.get('content-type'), 'text/css')
def test_processes_app_scss_files(self):
self.mkfile('app-1/static/css/app.scss',
'$c: yellow; body { color: $c; }')
response = self.client.get('/static/css/app.css')
assert_contains(response, 'body {\n color: yellow; }')
def test_processes_scss_files_with_deps(self):
self.mkfile('static/css/folder/_dep.scss', '$c: black;')
self.mkfile('static/css/with_deps.scss',
'@import "folder/dep"; body { color: $c; }')
response = self.client.get('/static/css/with_deps.css')
assert_contains(response, 'body {\n color: black; }')
def test_processes_scss_files_with_app_deps(self):
self.mkfile('app-1/static/css/folder/_dep.scss', '$c: white;')
self.mkfile('static/css/with_app_deps.scss',
'@import "folder/dep"; body { color: $c; }')
response = self.client.get('/static/css/with_app_deps.css')
assert_contains(response, 'body {\n color: white; }')
def test_processes_asset_files_with_unicode_chars(self):
self.mkfile('static/css/simple.scss',
'$c: "é"; a::before { content: $c; }')
self.mkfile('static/js/simple.coffee', 'a = foo: "é#{2}3"')
response = self.client.get('/static/css/simple.css')
assert_contains(response, 'a::before {\n content: "é"; }')
response = self.client.get('/static/js/simple.js')
assert_contains(response, 'foo: "é" + 2 + "3"')
| 44.36 | 79 | 0.643823 | 439 | 3,327 | 4.681093 | 0.177677 | 0.113869 | 0.105109 | 0.122628 | 0.73528 | 0.636983 | 0.525547 | 0.421411 | 0.36691 | 0.273966 | 0 | 0.004473 | 0.193568 | 3,327 | 74 | 80 | 44.959459 | 0.761461 | 0.006312 | 0 | 0.241379 | 0 | 0 | 0.351695 | 0.175242 | 0 | 0 | 0 | 0 | 0.206897 | 1 | 0.189655 | false | 0 | 0.086207 | 0 | 0.293103 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ae4b5cc0aeea03900ca797b02f8cd9bb0c7e4f9 | 8,083 | py | Python | CHT/cht_data.py | aryam7/WASP | 39f3ac2e8ad3b97124b52cc17e97902e3ec1fbc9 | [
"Apache-2.0"
] | 72 | 2015-03-01T20:59:06.000Z | 2022-03-28T08:48:39.000Z | CHT/cht_data.py | bmvdgeijn/WASP | d3b8447fd7719fffa00b856fd1f27c845554693e | [
"Apache-2.0"
] | 93 | 2015-01-14T23:49:12.000Z | 2022-03-26T16:31:52.000Z | CHT/cht_data.py | aryam7/WASP | 39f3ac2e8ad3b97124b52cc17e97902e3ec1fbc9 | [
"Apache-2.0"
] | 51 | 2015-02-19T23:49:17.000Z | 2021-12-16T01:40:37.000Z | import sys
import gzip
import os
import numpy as np
import util
class TestSNP:
def __init__(self, name, geno_hap1, geno_hap2, AS_target_ref, AS_target_alt,
hetps, totals, counts):
self.name = name
self.geno_hap1 = geno_hap1
self.geno_hap2 = geno_hap2
self.AS_target_ref = AS_target_ref
self.AS_target_alt = AS_target_alt
self.hetps = hetps
self.totals = totals
self.counts = counts
def is_het(self):
"""returns True if the test SNP is heterozygous"""
return self.geno_hap1 != self.geno_hap2
def is_homo_ref(self):
"""Returns True if test SNP is homozygous for reference allele"""
return self.geno_hap1 == 0 and self.geno_hap2 == 0
def is_homo_alt(self):
"""Returns True if test SNP is homozygous for non-reference allele"""
return self.geno_hap1 == 1 and self.geno_hap2 == 1
dup_snp_warn = True
def parse_test_snp(snpinfo, shuffle=False):
global dup_snp_warn
snp_id = snpinfo[2]
tot = 0 if snpinfo[16] == "NA" else float(snpinfo[16])
if snpinfo[6] == "NA":
geno_hap1 = 0
geno_hap2 = 0
else:
geno_hap1 = int(snpinfo[6].strip().split("|")[0])
geno_hap2 = int(snpinfo[6].strip().split("|")[1])
count = 0 if snpinfo[15] == "NA" else int(snpinfo[15])
if snpinfo[9].strip() == "NA" or geno_hap1 == geno_hap2:
# SNP is homozygous, so there is no AS info
return TestSNP(snp_id, geno_hap1, geno_hap2, [], [], [], tot, count)
else:
# positions of target SNPs
snp_locs = np.array([int(y.strip()) for y in snpinfo[9].split(';')])
# counts of reads that match reference overlapping linked 'target' SNPs
snp_as_ref = np.array([int(y) for y in snpinfo[12].split(';')])
# counts of reads that match alternate allele
snp_as_alt = np.array([int(y) for y in snpinfo[13].split(';')])
# heterozygote probabilities
snp_hetps = np.array([np.float64(y.strip())
for y in snpinfo[10].split(';')])
# linkage probabilities, not currently used
snp_linkageps = np.array([np.float64(y.strip())
for y in snpinfo[11].split(';')])
# same SNP should not be provided multiple times, this
# can create problems with combined test. Warn and filter
# duplicate SNPs
uniq_loc, uniq_idx = np.unique(snp_locs, return_index=True)
if dup_snp_warn and uniq_loc.shape[0] != snp_locs.shape[0]:
sys.stderr.write("WARNING: discarding SNPs that are repeated "
"multiple times in same line\n")
# only warn once
dup_snp_warn = False
snp_as_ref = snp_as_ref[uniq_idx]
snp_as_alt = snp_as_alt[uniq_idx]
snp_hetps = snp_hetps[uniq_idx]
# linkage probabilities currently not used
snp_linkageps = snp_linkageps[uniq_idx]
if shuffle:
# permute allele-specific read counts by flipping them randomly at
# each SNP
for y in range(len(snp_as_ref)):
if random.randint(0, 1) == 1:
temp = snp_as_ref[y]
snp_as_ref[y] = snp_as_alt[y]
snp_as_alt[y] = temp
return TestSNP(snp_id, geno_hap1, geno_hap2, snp_as_ref,
snp_as_alt, snp_hetps, tot, count)
def open_input_files(in_filename):
if not os.path.exists(in_filename) or not os.path.isfile(in_filename):
raise IOError("input file %s does not exist or is not a "
"regular file\n" % in_filename)
# read file that contains list of input files
in_file = open(in_filename, "rt")
infiles = []
for line in in_file:
# open each input file and read first line
filename = line.rstrip()
sys.stderr.write(" " + filename + "\n")
if (not filename) or (not os.path.exists(filename)) or \
(not os.path.isfile(filename)):
sys.stderr.write("input file '%s' does not exist or is not a "
"regular file\n" % in_file)
exit(2)
if util.is_gzipped(filename):
f = gzip.open(filename, "rt")
else:
f = open(filename, "rt")
# skip header
f.readline()
infiles.append(f)
in_file.close()
if len(infiles) == 0:
sys.stderr.write("no input files specified in file '%s'\n" % in_filename)
exit(2)
return infiles
def read_count_matrices(input_filename, shuffle=False, skip=0,
min_counts=0, min_as_counts=0, sample=0):
"""Given an input file that contains paths to input files for all individuals, and returns
matrix of observed read counts, and matrix of expected read counts
"""
infiles = open_input_files(input_filename)
is_finished = False
count_matrix = []
expected_matrix = []
line_num = 0
skip_num = 0
while not is_finished:
is_comment = False
line_num += 1
count_line = []
expected_line = []
num_as = 0
for i in range(len(infiles)):
# read next row from this input file
line = infiles[i].readline().strip()
if line.startswith("#") or line.startswith("CHROM"):
# skip comment lines and header line
is_comment = True
elif line:
if is_finished:
raise IOError("All input files should have same number of lines. "
"LINE %d is present in file %s, but not in all input files\n"
% (line_num, infiles[i].name))
if is_comment:
raise IOError("Comment and header lines should be consistent accross "
"all input files. LINE %d is comment or header line in some input files "
"but not in file %s" % (line_num, infiles[i].name))
# parse test SNP and associated info from input file row
new_snp = parse_test_snp(line.split(), shuffle=shuffle)
if new_snp.is_het():
num_as += np.sum(new_snp.AS_target_ref) + \
np.sum(new_snp.AS_target_alt)
count_line.append(new_snp.counts)
expected_line.append(new_snp.totals)
else:
# out of lines from at least one file, assume we are finished
is_finished = True
if not is_finished and not is_comment:
if skip_num < skip:
# skip this row
skip_num += 1
else:
if(sum(count_line) >= min_counts and num_as >= min_as_counts):
# this line exceeded minimum number of read counts and AS counts
count_matrix.append(count_line)
expected_matrix.append(expected_line)
skip_num = 0
count_matrix = np.array(count_matrix, dtype=int)
expected_matrix = np.array(expected_matrix, dtype=np.float64)
sys.stderr.write("count_matrix dimension: %s\n" % str(count_matrix.shape))
sys.stderr.write("expect_matrix dimension: %s\n" % str(expected_matrix.shape))
nrow = count_matrix.shape[0]
if (sample > 0) and (sample < count_matrix.shape[0]):
# randomly sample subset of rows without replacement
sys.stderr.write("randomly sampling %d target regions\n" % sample)
samp_index = np.arange(nrow)
np.random.shuffle(samp_index)
samp_index = samp_index[:sample]
count_matrix = count_matrix[samp_index,]
expected_matrix = expected_matrix[samp_index,]
sys.stderr.write("new count_matrix dimension: %s\n" % str(count_matrix.shape))
sys.stderr.write("new expect_matrix dimension: %s\n" % str(expected_matrix.shape))
return count_matrix, expected_matrix
| 35.143478 | 107 | 0.584189 | 1,085 | 8,083 | 4.17235 | 0.204608 | 0.016567 | 0.027833 | 0.014358 | 0.22907 | 0.177159 | 0.121935 | 0.121935 | 0.096311 | 0.0592 | 0 | 0.014765 | 0.321292 | 8,083 | 229 | 108 | 35.296943 | 0.810427 | 0.153285 | 0 | 0.0625 | 0 | 0 | 0.097762 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048611 | false | 0 | 0.034722 | 0 | 0.138889 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ae519116b2d3198ee0c6685afe6a91a67c62aa2 | 1,023 | py | Python | restaurant/admin.backup.py | syahnur197/restaurant-backend | a0f320b69f3fed293555634f6ac094eaa0574c45 | [
"MIT"
] | null | null | null | restaurant/admin.backup.py | syahnur197/restaurant-backend | a0f320b69f3fed293555634f6ac094eaa0574c45 | [
"MIT"
] | null | null | null | restaurant/admin.backup.py | syahnur197/restaurant-backend | a0f320b69f3fed293555634f6ac094eaa0574c45 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.contrib.contenttypes.admin import GenericStackedInline
from .models import Image, Product
"""
To register generics
"""
class ImageInline(GenericStackedInline):
model = Image
@admin.register(Image)
class ImageAdmin(admin.ModelAdmin):
list_display = (
'id',
'created',
'modified',
'content_type',
'object_id',
'image',
)
list_filter = ('created', 'modified', 'content_type')
@admin.register(Product)
class ProductAdmin(admin.ModelAdmin):
list_display = (
'id',
'created',
'modified',
'status',
'activate_date',
'deactivate_date',
'name',
'description',
'restaurant',
'unit_price',
'discount_price',
)
list_filter = (
'created',
'modified',
'activate_date',
'deactivate_date',
'restaurant',
)
search_fields = ('name',)
inlines = [
ImageInline,
]
| 19.673077 | 66 | 0.57087 | 86 | 1,023 | 6.627907 | 0.476744 | 0.105263 | 0.059649 | 0.091228 | 0.150877 | 0.150877 | 0.150877 | 0 | 0 | 0 | 0 | 0 | 0.304008 | 1,023 | 51 | 67 | 20.058824 | 0.800562 | 0 | 0 | 0.380952 | 0 | 0 | 0.228141 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.071429 | 0 | 0.309524 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ae5219951f2f425f340756b442acd6b639dbefb | 1,025 | py | Python | test/twistedutils/test_deferred_deque.py | Wizmann/STUP-Protocol | e06a3442082e5061d2be32be3ffd681675e7ffb5 | [
"MIT"
] | 14 | 2017-05-06T10:14:32.000Z | 2018-07-17T02:58:00.000Z | test/twistedutils/test_deferred_deque.py | Wizmann/STUP-Protocol | e06a3442082e5061d2be32be3ffd681675e7ffb5 | [
"MIT"
] | 2 | 2017-06-13T05:40:18.000Z | 2017-06-13T16:23:01.000Z | test/twistedutils/test_deferred_deque.py | Wizmann/STUP-Protocol | e06a3442082e5061d2be32be3ffd681675e7ffb5 | [
"MIT"
] | 4 | 2017-06-09T20:20:54.000Z | 2018-07-17T02:58:10.000Z | #coding=utf-8
from __future__ import absolute_import
import pytest
import twisted
from twisted.trial import unittest
from twisted.internet.defer import Deferred
from twisted.python import log
from stup.twistedutils.deferred_deque import *
class DeferredDequeueTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
self.buffer = []
super(DeferredDequeueTest, self).__init__(*args, **kwargs)
def test_all(self):
dd = DeferredDeque()
dd.append_left('a')
dd.append_right('b')
self.assertEqual(list(dd.pending), ['a', 'b'])
dd.pop_right().addCallback(lambda x: self.buffer.append(x))
self.assertEqual(self.buffer, ['b'])
dd.pop_left().addCallback(lambda x: self.buffer.append(x))
self.assertEqual(self.buffer, ['b', 'a'])
dd.pop_right().addCallback(lambda x: self.buffer.append(x))
self.assertEqual(self.buffer, ['b', 'a'])
dd.append_left('c')
self.assertEqual(self.buffer, ['b', 'a', 'c'])
| 27.702703 | 67 | 0.656585 | 130 | 1,025 | 5.015385 | 0.353846 | 0.122699 | 0.116564 | 0.153374 | 0.358896 | 0.358896 | 0.317485 | 0.317485 | 0.317485 | 0.317485 | 0 | 0.001212 | 0.195122 | 1,025 | 36 | 68 | 28.472222 | 0.789091 | 0.011707 | 0 | 0.166667 | 0 | 0 | 0.012871 | 0 | 0 | 0 | 0 | 0 | 0.208333 | 1 | 0.083333 | false | 0 | 0.291667 | 0 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ae7e92c23080d64d3b2328bffafe05bd7e29760 | 1,845 | py | Python | quickspy/net/netengine.py | kirte2849/Quickspy | 767d0fb8ded283aa0d8122d77e15dc411f553994 | [
"MIT"
] | 1 | 2020-07-11T13:41:40.000Z | 2020-07-11T13:41:40.000Z | quickspy/net/netengine.py | kirte2849/Quickspy | 767d0fb8ded283aa0d8122d77e15dc411f553994 | [
"MIT"
] | null | null | null | quickspy/net/netengine.py | kirte2849/Quickspy | 767d0fb8ded283aa0d8122d77e15dc411f553994 | [
"MIT"
] | null | null | null | from lxml import etree
import socket
import re
import aiohttp
from quickspy.color import *
class Response:
def __init__(self, byte, encoding='utf-8'):
global ENCODING
ENCODING = encoding
self.url = None
self.html = None
self.byte = byte
self.HTML = None
self.status = None
def get_html(self):
if self.html is None:
self.html = self.get_byte().decode(ENCODING)
return self.html
def get_byte(self):
return self.byte
def xpath(self, exp):
temp = self.get_HTML()
return temp.xpath(exp)
def findall(self, exp):
return re.findall(self.get_html(), exp)
def get_HTML(self):
if self.HTML is None:
self.HTML = etree.HTML(self.get_html())
return self.HTML
def get_url(self):
return self.url
def gettitle(self):
temp = self.get_HTML()
return temp.xpath('//title/text()')[0]
class NetEngine:
def __init__(self):
#打开aiohttp 的http接口
self.session = aiohttp.ClientSession()
try:
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect(('localhost', 2546))
except socket.error as msg:
print(RED(f'at Quickspy.init :{msg}'))
async def close(self):
await self.session.close()
self.s.close()
async def get(self, url, timeout=10):
async with self.session.get(url, timeout=timeout) as response:
print(f'netengine:timeot = {timeout}')
temp = await response.read()
_response = Response(temp)
_response.url = response.url
_response.status = response.status
self.s.send("eval self.nemanager.reg('default').add()".encode())
return _response | 24.932432 | 77 | 0.58374 | 229 | 1,845 | 4.60262 | 0.318777 | 0.060721 | 0.041746 | 0.048387 | 0.166983 | 0.129032 | 0.129032 | 0.072106 | 0.072106 | 0.072106 | 0 | 0.00625 | 0.306233 | 1,845 | 74 | 78 | 24.932432 | 0.817188 | 0.009214 | 0 | 0.037037 | 0 | 0 | 0.065098 | 0.019147 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.092593 | 0.055556 | 0.444444 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ae91679bd447b62dfc5e7a20c1d3f70d03392e4 | 1,577 | py | Python | yamlapi/demo/tool/read_write_json.py | Ironkubi/yamlapi | efd80cf15a182b0dde03e923f6b3d86c43e5a355 | [
"MIT"
] | 19 | 2020-05-29T09:28:42.000Z | 2022-02-21T06:09:42.000Z | yamlapi/demo/tool/read_write_json.py | Ironkubi/yamlapi | efd80cf15a182b0dde03e923f6b3d86c43e5a355 | [
"MIT"
] | 1 | 2020-03-05T05:45:19.000Z | 2020-07-12T03:08:40.000Z | yamlapi/demo/tool/read_write_json.py | Ironkubi/yamlapi | efd80cf15a182b0dde03e923f6b3d86c43e5a355 | [
"MIT"
] | 7 | 2020-10-21T02:24:44.000Z | 2022-02-21T06:09:22.000Z | import demjson
from setting.project_config import *
def read_json(json_absolute_path):
"""
读取json文件
:param json_absolute_path: 参数为需要读取的json文件的绝对路径
:return:
"""
with open(json_absolute_path, "r", encoding="utf-8") as f:
data_list = demjson.decode(f.read(), encoding="utf-8")
return data_list
# 返回一个数据列表
def write_json(json_relative, data_list):
"""
写入json文件
:param json_relative: 第一个参数为需要写入的json文件的相对路径
:param data_list: 第二个参数为需要转换的数据
:return:
"""
with open(yaml_path + json_relative, "wb") as f:
f.write(demjson.encode(data_list, encoding="utf-8"))
return json_relative
# 返回一个json文件的相对路径
def merge_json():
"""
合并所有json文件的方法
:return:
"""
json_list = []
for root, dirs, files in os.walk(yaml_path):
# root为当前目录路径
# dirs为当前路径下所有子目录,list格式
# files为当前路径下所有非目录子文件,list格式
for i in files:
if os.path.splitext(i)[1] == '.json':
# os.path.splitext()把路径拆分为文件名+扩展名
if i != first_test_case_file:
json_list.append(os.path.join(root, i))
else:
the_first_json = os.path.join(root, first_test_case_file)
json_list.append(the_first_json)
# 加入第一个json文件
json_list.reverse()
# 反转排序
temporary_list = []
for i in json_list:
if i:
j = read_json(i)
# 调用读取json文件的方法
if j:
temporary_list.extend(j)
# 往列表里逐步添加元素
return temporary_list
# 返回一个临时列表
| 23.537313 | 77 | 0.590996 | 182 | 1,577 | 4.901099 | 0.395604 | 0.044843 | 0.053812 | 0.040359 | 0.069507 | 0.069507 | 0.069507 | 0 | 0 | 0 | 0 | 0.003666 | 0.30818 | 1,577 | 66 | 78 | 23.893939 | 0.813932 | 0.223843 | 0 | 0 | 0 | 0 | 0.020105 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0 | 0.071429 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5aea87607e71c1fa54637d37748174be3086f680 | 174 | py | Python | core.py | guillaumevincent/keepass-less | 03e041b2b49595b421a7b6ca8a0a4e7ae51d7fdc | [
"MIT"
] | 1 | 2015-12-01T21:47:34.000Z | 2015-12-01T21:47:34.000Z | core.py | guillaumevincent/keepass-less | 03e041b2b49595b421a7b6ca8a0a4e7ae51d7fdc | [
"MIT"
] | null | null | null | core.py | guillaumevincent/keepass-less | 03e041b2b49595b421a7b6ca8a0a4e7ae51d7fdc | [
"MIT"
] | null | null | null | def split_entry(entry):
entries = entry.split(':')
if len(entries) == 3:
return entries[0], entries[1], int(entries[2])
return entries[0], entries[1], 10
| 29 | 54 | 0.609195 | 25 | 174 | 4.2 | 0.52 | 0.247619 | 0.266667 | 0.4 | 0.419048 | 0 | 0 | 0 | 0 | 0 | 0 | 0.058394 | 0.212644 | 174 | 5 | 55 | 34.8 | 0.708029 | 0 | 0 | 0 | 0 | 0 | 0.005747 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0 | 0 | 0.6 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 6 |
5aeb5490b47cc2c338ecc2ddb29c186d556812da | 834 | py | Python | instascrape/commands/whoami.py | tnychn/instascrape | 7aaf3c1a1786bbe80059ed6e0d93442a19a6f475 | [
"MIT"
] | 80 | 2020-05-28T17:22:14.000Z | 2022-03-25T07:15:51.000Z | instascrape/commands/whoami.py | AlphaXenon/InstaScrape | 7aaf3c1a1786bbe80059ed6e0d93442a19a6f475 | [
"MIT"
] | 23 | 2020-05-25T12:45:40.000Z | 2022-03-06T05:44:41.000Z | instascrape/commands/whoami.py | AlphaXenon/InstaScrape | 7aaf3c1a1786bbe80059ed6e0d93442a19a6f475 | [
"MIT"
] | 14 | 2020-06-28T05:52:28.000Z | 2022-03-28T04:27:50.000Z | from instascrape.commands import pretty_print, load_obj
from colorama import Fore, Style
def whoami_handler(**_):
insta = load_obj()
if insta is None:
name = "NOBODY"
print(Fore.BLUE + "Authenticated:", Fore.RED + "False")
else:
name = insta.my_username
data = insta.me().as_dict()
print(Style.BRIGHT + "\033[4m" + "Your Profile")
pretty_print(data)
print()
print(Fore.BLUE + "Authenticated:", Fore.GREEN + "True")
print(Fore.LIGHTCYAN_EX + "Your ID is", Style.BRIGHT + str(insta.my_user_id))
print(Fore.LIGHTCYAN_EX + "You are", Style.BRIGHT + name)
print(Fore.LIGHTBLACK_EX + "“I was basically born knowing how to casually stalk people on social media.”")
print(Fore.LIGHTBLACK_EX + " -- Becky Albertalli, The Upside of Unrequited")
| 37.909091 | 110 | 0.651079 | 110 | 834 | 4.809091 | 0.590909 | 0.102079 | 0.049149 | 0.098299 | 0.113422 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006192 | 0.22542 | 834 | 21 | 111 | 39.714286 | 0.812694 | 0 | 0 | 0 | 0 | 0 | 0.241007 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.111111 | 0 | 0.166667 | 0.555556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 |
5aeccbb2cf767b475bd8dc50612ec4c125139aee | 20,131 | py | Python | tests/test_attack_log.py | Thorsten-Sick/PurpleDome | 297d746ef2e17a4207f8274b7fccbe2ce43c4a5f | [
"MIT"
] | 7 | 2021-11-30T19:54:29.000Z | 2022-03-05T23:15:23.000Z | tests/test_attack_log.py | Thorsten-Sick/PurpleDome | 297d746ef2e17a4207f8274b7fccbe2ce43c4a5f | [
"MIT"
] | null | null | null | tests/test_attack_log.py | Thorsten-Sick/PurpleDome | 297d746ef2e17a4207f8274b7fccbe2ce43c4a5f | [
"MIT"
] | 2 | 2021-11-30T11:16:27.000Z | 2022-02-02T13:36:01.000Z | #!/usr/bin/env python3
# Testing the attack log class
import unittest
from app.attack_log import AttackLog
import app.attack_log
# from unittest.mock import patch, call
# from app.exceptions import ConfigurationError
# https://docs.python.org/3/library/unittest.html
class TestMachineConfig(unittest.TestCase):
""" Test machine specific config """
def test_init(self):
""" The init is empty """
al = AttackLog()
self.assertIsNotNone(al)
default = {"boilerplate": {'log_format_major_version': 1, 'log_format_minor_version': 1},
"system_overview": [],
"attack_log": []}
self.assertEqual(al.get_dict(), default)
def test_caldera_attack_start(self):
""" Starting a caldera attack """
al = AttackLog()
source = "asource"
paw = "apaw"
group = "agroup"
ability_id = "aability_id"
ttp = "1234"
name = "aname"
description = "adescription"
al.start_caldera_attack(source=source,
paw=paw,
group=group,
ability_id=ability_id,
ttp=ttp,
name=name,
description=description
)
data = al.get_dict()
self.assertEqual(data["attack_log"][0]["event"], "start")
self.assertEqual(data["attack_log"][0]["type"], "attack")
self.assertEqual(data["attack_log"][0]["sub_type"], "caldera")
self.assertEqual(data["attack_log"][0]["source"], source)
self.assertEqual(data["attack_log"][0]["target_paw"], paw)
self.assertEqual(data["attack_log"][0]["target_group"], group)
self.assertEqual(data["attack_log"][0]["ability_id"], ability_id)
self.assertEqual(data["attack_log"][0]["hunting_tag"], "MITRE_" + ttp)
self.assertEqual(data["attack_log"][0]["name"], name)
self.assertEqual(data["attack_log"][0]["description"], description)
def test_caldera_attack_stop(self):
""" Stopping a caldera attack """
al = AttackLog()
source = "asource"
paw = "apaw"
group = "agroup"
ability_id = "aability_id"
ttp = "1234"
name = "aname"
description = "adescription"
al.stop_caldera_attack(source=source,
paw=paw,
group=group,
ability_id=ability_id,
ttp=ttp,
name=name,
description=description
)
data = al.get_dict()
self.assertEqual(data["attack_log"][0]["event"], "stop")
self.assertEqual(data["attack_log"][0]["type"], "attack")
self.assertEqual(data["attack_log"][0]["sub_type"], "caldera")
self.assertEqual(data["attack_log"][0]["source"], source)
self.assertEqual(data["attack_log"][0]["target_paw"], paw)
self.assertEqual(data["attack_log"][0]["target_group"], group)
self.assertEqual(data["attack_log"][0]["ability_id"], ability_id)
self.assertEqual(data["attack_log"][0]["hunting_tag"], "MITRE_" + ttp)
self.assertEqual(data["attack_log"][0]["name"], name)
self.assertEqual(data["attack_log"][0]["description"], description)
def test_kali_attack_start(self):
""" Starting a kali attack """
al = AttackLog()
source = "asource"
target = "a target"
ttp = "1234"
attack_name = "a name"
al.start_kali_attack(source=source,
target=target,
attack_name=attack_name,
ttp=ttp,
)
data = al.get_dict()
self.assertEqual(data["attack_log"][0]["event"], "start")
self.assertEqual(data["attack_log"][0]["type"], "attack")
self.assertEqual(data["attack_log"][0]["sub_type"], "kali")
self.assertEqual(data["attack_log"][0]["source"], source)
self.assertEqual(data["attack_log"][0]["target"], target)
self.assertEqual(data["attack_log"][0]["kali_name"], attack_name)
self.assertEqual(data["attack_log"][0]["hunting_tag"], "MITRE_" + ttp)
def test_kali_attack_stop(self):
""" Stopping a kali attack """
al = AttackLog()
source = "asource"
target = "a target"
ttp = "1234"
attack_name = "a name"
al.stop_kali_attack(source=source,
target=target,
attack_name=attack_name,
ttp=ttp,
)
data = al.get_dict()
self.assertEqual(data["attack_log"][0]["event"], "stop")
self.assertEqual(data["attack_log"][0]["type"], "attack")
self.assertEqual(data["attack_log"][0]["sub_type"], "kali")
self.assertEqual(data["attack_log"][0]["source"], source)
self.assertEqual(data["attack_log"][0]["target"], target)
self.assertEqual(data["attack_log"][0]["kali_name"], attack_name)
self.assertEqual(data["attack_log"][0]["hunting_tag"], "MITRE_" + ttp)
def test_narration_start(self):
""" Starting a narration """
al = AttackLog()
text = "texttextext"
al.start_narration(text
)
data = al.get_dict()
self.assertEqual(data["attack_log"][0]["event"], "start")
self.assertEqual(data["attack_log"][0]["type"], "narration")
self.assertEqual(data["attack_log"][0]["sub_type"], "user defined narration")
self.assertEqual(data["attack_log"][0]["text"], text)
def test_build_start(self):
""" Starting a build """
al = AttackLog()
dl_uri = "asource"
dl_uris = "a target"
payload = "1234"
platform = "a name"
architecture = "arch"
lhost = "lhost"
lport = 8080
filename = "afilename"
encoding = "encoded"
encoded_filename = "ef"
sRDI_conversion = True
for_step = 4
comment = "this is a comment"
al.start_build(dl_uri=dl_uri,
dl_uris=dl_uris,
payload=payload,
platform=platform,
architecture=architecture,
lhost=lhost,
lport=lport,
filename=filename,
encoding=encoding,
encoded_filename=encoded_filename,
sRDI_conversion=sRDI_conversion,
for_step=for_step,
comment=comment
)
data = al.get_dict()
self.assertEqual(data["attack_log"][0]["event"], "start")
self.assertEqual(data["attack_log"][0]["type"], "build")
self.assertEqual(data["attack_log"][0]["dl_uri"], dl_uri)
self.assertEqual(data["attack_log"][0]["dl_uris"], dl_uris)
self.assertEqual(data["attack_log"][0]["payload"], payload)
self.assertEqual(data["attack_log"][0]["platform"], platform)
self.assertEqual(data["attack_log"][0]["architecture"], architecture)
self.assertEqual(data["attack_log"][0]["lhost"], lhost)
self.assertEqual(data["attack_log"][0]["lport"], lport)
self.assertEqual(data["attack_log"][0]["filename"], filename)
self.assertEqual(data["attack_log"][0]["encoding"], encoding)
self.assertEqual(data["attack_log"][0]["encoded_filename"], encoded_filename)
self.assertEqual(data["attack_log"][0]["sRDI_conversion"], sRDI_conversion)
self.assertEqual(data["attack_log"][0]["for_step"], for_step)
self.assertEqual(data["attack_log"][0]["comment"], comment)
def test_build_start_default(self):
""" Starting a build default values"""
al = AttackLog()
al.start_build()
data = al.get_dict()
self.assertEqual(data["attack_log"][0]["event"], "start")
self.assertEqual(data["attack_log"][0]["type"], "build")
self.assertEqual(data["attack_log"][0]["dl_uri"], None)
self.assertEqual(data["attack_log"][0]["dl_uris"], None)
self.assertEqual(data["attack_log"][0]["payload"], None)
self.assertEqual(data["attack_log"][0]["platform"], None)
self.assertEqual(data["attack_log"][0]["architecture"], None)
self.assertEqual(data["attack_log"][0]["lhost"], None)
self.assertEqual(data["attack_log"][0]["lport"], None)
self.assertEqual(data["attack_log"][0]["filename"], None)
self.assertEqual(data["attack_log"][0]["encoding"], None)
self.assertEqual(data["attack_log"][0]["encoded_filename"], None)
self.assertEqual(data["attack_log"][0]["sRDI_conversion"], False)
self.assertEqual(data["attack_log"][0]["for_step"], None)
self.assertEqual(data["attack_log"][0]["comment"], None)
def test_build_stop(self):
""" Stopping a build """
al = AttackLog()
logid = "lid"
al.stop_build(logid=logid)
data = al.get_dict()
self.assertEqual(data["attack_log"][0]["event"], "stop")
self.assertEqual(data["attack_log"][0]["type"], "build")
self.assertEqual(data["attack_log"][0]["logid"], logid)
def test_metasploit_attack_start(self):
""" Starting a metasploit attack """
al = AttackLog()
source = "asource"
target = "a target"
ttp = "1234"
attack_name = "a name"
al.start_metasploit_attack(source=source,
target=target,
metasploit_command=attack_name,
ttp=ttp,
)
data = al.get_dict()
self.assertEqual(data["attack_log"][0]["event"], "start")
self.assertEqual(data["attack_log"][0]["type"], "attack")
self.assertEqual(data["attack_log"][0]["sub_type"], "metasploit")
self.assertEqual(data["attack_log"][0]["source"], source)
self.assertEqual(data["attack_log"][0]["target"], target)
self.assertEqual(data["attack_log"][0]["metasploit_command"], attack_name)
self.assertEqual(data["attack_log"][0]["hunting_tag"], "MITRE_" + ttp)
def test_metasploit_attack_stop(self):
""" Stopping a metasploit attack """
al = AttackLog()
source = "asource"
target = "a target"
ttp = "1234"
attack_name = "a name"
al.stop_metasploit_attack(source=source,
target=target,
metasploit_command=attack_name,
ttp=ttp,
)
data = al.get_dict()
self.assertEqual(data["attack_log"][0]["event"], "stop")
self.assertEqual(data["attack_log"][0]["type"], "attack")
self.assertEqual(data["attack_log"][0]["sub_type"], "metasploit")
self.assertEqual(data["attack_log"][0]["source"], source)
self.assertEqual(data["attack_log"][0]["target"], target)
self.assertEqual(data["attack_log"][0]["metasploit_command"], attack_name)
self.assertEqual(data["attack_log"][0]["hunting_tag"], "MITRE_" + ttp)
def test_attack_plugin_start(self):
""" Starting a attack plugin """
al = AttackLog()
source = "asource"
target = "a target"
ttp = "1234"
attack_name = "a name"
al.start_attack_plugin(source=source,
target=target,
plugin_name=attack_name,
ttp=ttp,
)
data = al.get_dict()
self.assertEqual(data["attack_log"][0]["event"], "start")
self.assertEqual(data["attack_log"][0]["type"], "attack")
self.assertEqual(data["attack_log"][0]["sub_type"], "attack_plugin")
self.assertEqual(data["attack_log"][0]["source"], source)
self.assertEqual(data["attack_log"][0]["target"], target)
self.assertEqual(data["attack_log"][0]["plugin_name"], attack_name)
self.assertEqual(data["attack_log"][0]["hunting_tag"], "MITRE_" + ttp)
def test_attack_plugin_stop(self):
""" Stopping a attack plugin"""
al = AttackLog()
source = "asource"
target = "a target"
ttp = "1234"
attack_name = "a name"
al.stop_attack_plugin(source=source,
target=target,
plugin_name=attack_name,
ttp=ttp,
)
data = al.get_dict()
self.assertEqual(data["attack_log"][0]["event"], "stop")
self.assertEqual(data["attack_log"][0]["type"], "attack")
self.assertEqual(data["attack_log"][0]["sub_type"], "attack_plugin")
self.assertEqual(data["attack_log"][0]["source"], source)
self.assertEqual(data["attack_log"][0]["target"], target)
self.assertEqual(data["attack_log"][0]["plugin_name"], attack_name)
self.assertEqual(data["attack_log"][0]["hunting_tag"], "MITRE_" + ttp)
def test_file_write_start(self):
""" Starting a file write """
al = AttackLog()
source = "asource"
target = "a target"
file_name = "a generic filename"
al.start_file_write(source=source,
target=target,
file_name=file_name,
)
data = al.get_dict()
self.assertEqual(data["attack_log"][0]["event"], "start")
self.assertEqual(data["attack_log"][0]["type"], "dropping_file")
self.assertEqual(data["attack_log"][0]["sub_type"], "by PurpleDome")
self.assertEqual(data["attack_log"][0]["source"], source)
self.assertEqual(data["attack_log"][0]["target"], target)
self.assertEqual(data["attack_log"][0]["file_name"], file_name)
def test_file_write_stop(self):
""" Stopping a file write """
al = AttackLog()
source = "asource"
target = "a target"
file_name = "a generic filename"
al.stop_file_write(source=source,
target=target,
file_name=file_name,
)
data = al.get_dict()
self.assertEqual(data["attack_log"][0]["event"], "stop")
self.assertEqual(data["attack_log"][0]["type"], "dropping_file")
self.assertEqual(data["attack_log"][0]["sub_type"], "by PurpleDome")
self.assertEqual(data["attack_log"][0]["source"], source)
self.assertEqual(data["attack_log"][0]["target"], target)
self.assertEqual(data["attack_log"][0]["file_name"], file_name)
def test_execute_payload_start(self):
""" Starting a execute payload """
al = AttackLog()
source = "asource"
target = "a target"
command = "a generic command"
al.start_execute_payload(source=source,
target=target,
command=command,
)
data = al.get_dict()
self.assertEqual(data["attack_log"][0]["event"], "start")
self.assertEqual(data["attack_log"][0]["type"], "execute_payload")
self.assertEqual(data["attack_log"][0]["sub_type"], "by PurpleDome")
self.assertEqual(data["attack_log"][0]["source"], source)
self.assertEqual(data["attack_log"][0]["target"], target)
self.assertEqual(data["attack_log"][0]["command"], command)
def test_execute_payload_stop(self):
""" Stopping a execute payload """
al = AttackLog()
source = "asource"
target = "a target"
command = "a generic command"
al.stop_execute_payload(source=source,
target=target,
command=command,
)
data = al.get_dict()
self.assertEqual(data["attack_log"][0]["event"], "stop")
self.assertEqual(data["attack_log"][0]["type"], "execute_payload")
self.assertEqual(data["attack_log"][0]["sub_type"], "by PurpleDome")
self.assertEqual(data["attack_log"][0]["source"], source)
self.assertEqual(data["attack_log"][0]["target"], target)
self.assertEqual(data["attack_log"][0]["command"], command)
def test_mitre_fix_ttp_is_none(self):
""" Testing the mitre ttp fix for ttp being none """
self.assertEqual(app.attack_log.__mitre_fix_ttp__(None), "")
def test_mitre_fix_ttp_is_MITRE_SOMETHING(self):
""" Testing the mitre ttp fix for ttp being MITRE_ """
self.assertEqual(app.attack_log.__mitre_fix_ttp__("MITRE_FOO"), "MITRE_FOO")
# tests for a bunch of default data covering caldera attacks. That way we will have some fallback if no data is submitted:
def test_get_caldera_default_name_missing(self):
""" Testing getting the caldera default name """
al = AttackLog()
self.assertEqual(al.get_caldera_default_name("missing"), None)
def test_get_caldera_default_name(self):
""" Testing getting the caldera default name """
al = AttackLog()
self.assertEqual(al.get_caldera_default_name("bd527b63-9f9e-46e0-9816-b8434d2b8989"), "whoami")
def test_get_caldera_default_description_missing(self):
""" Testing getting the caldera default description """
al = AttackLog()
self.assertEqual(al.get_caldera_default_description("missing"), None)
def test_get_caldera_default_description(self):
""" Testing getting the caldera default description """
al = AttackLog()
self.assertEqual(al.get_caldera_default_description("bd527b63-9f9e-46e0-9816-b8434d2b8989"), "Obtain user from current session")
def test_get_caldera_default_tactics_missing(self):
""" Testing getting the caldera default tactics """
al = AttackLog()
self.assertEqual(al.get_caldera_default_tactics("missing", None), None)
def test_get_caldera_default_tactics(self):
""" Testing getting the caldera default tactics """
al = AttackLog()
self.assertEqual(al.get_caldera_default_tactics("bd527b63-9f9e-46e0-9816-b8434d2b8989", None), "System Owner/User Discovery")
def test_get_caldera_default_tactics_id_missing(self):
""" Testing getting the caldera default tactics_id """
al = AttackLog()
self.assertEqual(al.get_caldera_default_tactics_id("missing", None), None)
def test_get_caldera_default_tactics_id(self):
""" Testing getting the caldera default tactics_id """
al = AttackLog()
self.assertEqual(al.get_caldera_default_tactics_id("bd527b63-9f9e-46e0-9816-b8434d2b8989", None), "T1033")
def test_get_caldera_default_situation_description_missing(self):
""" Testing getting the caldera default situation_description """
al = AttackLog()
self.assertEqual(al.get_caldera_default_situation_description("missing"), None)
def test_get_caldera_default_situation_description(self):
""" Testing getting the caldera default situation_description """
al = AttackLog()
self.assertEqual(al.get_caldera_default_situation_description("bd527b63-9f9e-46e0-9816-b8434d2b8989"), None)
def test_get_caldera_default_countermeasure_missing(self):
""" Testing getting the caldera default countermeasure """
al = AttackLog()
self.assertEqual(al.get_caldera_default_countermeasure("missing"), None)
def test_get_caldera_default_countermeasure(self):
""" Testing getting the caldera default countermeasure """
al = AttackLog()
self.assertEqual(al.get_caldera_default_countermeasure("bd527b63-9f9e-46e0-9816-b8434d2b8989"), None)
| 45.035794 | 136 | 0.581442 | 2,212 | 20,131 | 5.076854 | 0.073237 | 0.184328 | 0.208103 | 0.27382 | 0.848976 | 0.834105 | 0.808103 | 0.728673 | 0.675868 | 0.661264 | 0 | 0.021274 | 0.276141 | 20,131 | 446 | 137 | 45.136771 | 0.749382 | 0.071581 | 0 | 0.615804 | 0 | 0 | 0.184674 | 0.014276 | 0 | 0 | 0 | 0 | 0.378747 | 1 | 0.084469 | false | 0 | 0.008174 | 0 | 0.095368 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |