text
string
size
int64
token_count
int64
import unittest import logging import sys import datetime from tigershark.facade import f271 from tigershark.parsers import M271_4010_X092_A1 class TestParsed271(unittest.TestCase): def setUp(self): m = M271_4010_X092_A1.parsed_271 with open('tests/271-example.txt') as f: parsed = m.unmarshall(f.read().strip()) self.f = f271.F271_4010(parsed) def test_number_of_facades(self): self.assertEqual(len(self.f.facades), 1) def test_header(self): h = self.f.facades[0].header.hierarchical_transaction_info self.assertEqual(h.structure, ("0022", "Information Source, " "Information Receiver, Subscriber, or Dependent.")) self.assertEqual(h.purpose, ("11", "Response")) self.assertEqual(h.transaction_id, "11111") self.assertEqual(h.creation_date, datetime.date(2012, 06, 05)) self.assertEqual(h.creation_time, datetime.time(23, 24, 23)) def test_number_of_receivers(self): self.assertEqual(len(self.f.facades[0].source.receivers), 1) def test_number_of_subscribers(self): self.assertEqual( len(self.f.facades[0].source.receivers[0].subscribers), 1) def test_hierarchy(self): source = self.f.facades[0].source h = source.hierarchy self.assertEqual(h.id, "1") self.assertEqual(h.parent_id, '') self.assertEqual(h.level, ("20", "Information Source")) self.assertTrue(h.has_children) receiver = source.receivers[0] h = receiver.hierarchy self.assertEqual(h.id, "2") self.assertEqual(h.parent_id, '1') self.assertEqual(h.level, ("21", "Information Receiver")) self.assertTrue(h.has_children) subscriber = receiver.subscribers[0] h = subscriber.hierarchy self.assertEqual(h.id, "3") self.assertEqual(h.parent_id, '2') self.assertEqual(h.level, ("22", "Subscriber")) self.assertFalse(h.has_children) def test_source(self): source = self.f.facades[0].source name = source.source_information.name self.assertEqual(name.entity_identifier, ("PR", "Payer")) self.assertEqual(name.entity_type, ("2", "Non-Person Entity")) self.assertEqual(name.org_name, "Health Net Inc") self.assertEqual(name.id_code, "10385") self.assertEqual(name.id_code_qual, ("PI", "Payor Identification")) self.assertFalse(name.is_person) self.assertTrue(name.is_organization) def test_receiver(self): receiver = self.f.facades[0].source.receivers[0] name = receiver.receiver_information.name self.assertEqual(name.entity_identifier, ("1P", "Provider")) self.assertEqual(name.entity_type, ("2", "Non-Person Entity")) self.assertEqual(name.org_name, "DR. ACULA") self.assertEqual(name.id_code, "1111111111") self.assertEqual(name.id_code_qual, ("XX", "Health Care Financing Administration National "\ "Provider Identifier")) self.assertFalse(name.is_person) self.assertTrue(name.is_organization) def test_subscriber(self): def test_trace_numbers(): def _test(i, trace_type, trace_number, entity_id, entity_addl_id): self.assertEqual(subscriber.trace_numbers[i].trace_type, trace_type) self.assertEqual(subscriber.trace_numbers[i].trace_number, trace_number) self.assertEqual(subscriber.trace_numbers[i].entity_id, entity_id) self.assertEqual( subscriber.trace_numbers[i].entity_additional_id, entity_addl_id) self.assertEqual(len(subscriber.trace_numbers), 3) _test(0, ("1", "Current Transaction Trace Numbers"), "222222222", "9ZIRMEDCOM", "ELR ID") _test(1, ("2", "Referenced Transaction Trace Numbers (Value from 270)"), "333333333", "9ZIRMEDCOM", "ELI ID") _test(2, ("1", "Current Transaction Trace Numbers"), "4444444444", "9MEDDATACO", "") def test_validations(): self.assertEqual( len(subscriber.personal_information.request_validations), 4) validations = subscriber.personal_information.request_validations for validation in validations: self.assertFalse(validation.valid_request) self.assertEqual(validation.follow_up_action_code, ("C", "Please Correct and Resubmit")) self.assertEqual(validations[0].reject_reason, ("72", "Invalid/Missing Subscriber/Insured ID")) self.assertEqual(validations[1].reject_reason, ("73", "Invalid/Missing Subscriber/Insured Name")) self.assertEqual(validations[2].reject_reason, ("73", "Invalid/Missing Subscriber/Insured Name")) self.assertEqual(validations[3].reject_reason, ("58", "Invalid/Missing Date-of-Birth")) def test_dates(): self.assertEqual(len(subscriber.personal_information.dates), 1) date = subscriber.personal_information.dates[0] self.assertEqual(date.type, ("291", "Plan")) self.assertEqual(date.time, datetime.date(2012, 4, 8)) self.assertEqual(date.time_range, None) subscriber = self.f.facades[0].source.receivers[0].subscribers[0] name = subscriber.personal_information.name self.assertEqual(name.entity_identifier, ("IL", "Insured")) self.assertEqual(name.entity_type, ("1", "Person")) self.assertEqual(name.id_code, "R11111111") self.assertEqual(name.id_code_qual, ("MI", "Member Identification Number")) self.assertTrue(name.is_person) self.assertFalse(name.is_organization) test_trace_numbers() test_validations() test_dates() if __name__ == "__main__": logging.basicConfig( stream=sys.stderr, level=logging.INFO, ) unittest.main()
6,387
2,031
from .rules import token_expressions from .lex import lex def run(code): """ Wrapper to run the Lexer (with the token expressions listed here). """ return lex(code, token_expressions)
194
55
import pandas as pd import pandas_profiling from path import Path import numpy as np from scipy.stats import chi2_contingency from collections import Counter root = Path('/home/roit/datasets/kaggle/2016b') dump_path = root/'dump' ge_info = root/'gene_info' exitnpy = False if exitnpy==False: genes_dic = [] genes = [] snps_sorted = pd.read_csv(dump_path/'sorted_cols_series.csv') cnt=0 for file in ge_info.files(): gene = open(file).read()#str: rs1\n rs2\n... ls = gene.split('\n') ls.pop() cnt+=len(ls) genes_dic+=ls genes.append(ls) print(cnt) vecs = np.zeros([len(genes),len(snps_sorted)]) for i in range(len(genes)): for j in range(len(genes[i])): col = genes_dic.index(genes[i][j]) vecs[i][col] = 1 np.save('vecs.npy',vecs) else: vecs = np.load('vecs.npy') sum_vec =vecs.sum(axis=0)#行相加 print(sum_vec.sum())
943
375
"""Constants for the Wattson power meter integration.""" DOMAIN = "wattson"
77
24
import sys from pathlib import Path from datetime import datetime import argparse import json import torch from torchvision.utils import save_image import torchvision import numpy as np import matplotlib.pyplot as plt from sklearn.cluster import KMeans from sklearn.metrics import pairwise_distances_argmin from sklearn.datasets import load_sample_image from sklearn.utils import shuffle import open3d as o3d from svox2 import * from pyvox.models import Vox, Color from pyvox.writer import VoxWriter from importlib import reload as reload reload(svox2) from svox2 import * #TODO> modify this: sys.path.append("/workspace/svox2/opt") from util.dataset import datasets from util import config_util # Our nice tools sys.path.append("/workspace/aseeo-research") import RLResearch.utils.depth_utils as du import RLResearch.utils.gen_utils as gu img_id = 3 NPY_PREFIX = "depth_" project_folder = "/workspace/datasets/cctv_2" data_folder = project_folder + "/result/depth_npy/" ply_folder = project_folder + "/result/debug_ply" Path(ply_folder).mkdir(exist_ok=True, parents=True) dataset = datasets["nsvf"]( project_folder, split="test_train" ) def export_pointcloud(img_id, data_folder, ply_folder): depth_filename = data_folder + "/" + NPY_PREFIX + str(img_id) + ".npy" depth_img_np = np.load(depth_filename) pointcloud_filename = Path(ply_folder)/Path(depth_filename).name pointcloud_filename = pointcloud_filename.with_suffix(".ply") c2w = dataset.c2w[img_id].to(device = "cpu") print("Rendering pose:", img_id) print(c2w) # print("ndc") # print(dataset.ndc_coeffs) # Can take from somewhere else that takes less time to load # width=dataset.get_image_size(0)[1] # height=dataset.get_image_size(0)[0] # fx = dataset.intrins.get('fx', 0) # fy = dataset.intrins.get('fy', 0) # cx = dataset.intrins.get('cx', 0) # cy = dataset.intrins.get('cy', 0) width = 3840 height = 2160 fx = 3243.357296552027 fy = 3243.357296552027 cx = 1920.0 cy = 1080.0 # depth_images = [depth_img_np] radial_weight = du.make_radial_weight(width, height, fx) depth_img_np = depth_img_np * radial_weight depth_img_np = depth_img_np.astype(np.float32) du.write_pointcloud_from_depth( depth_img_np, str(pointcloud_filename.resolve()), w = width, h = height, fx = fx, fy = fy, cx = cx, cy = cy, stride = 8, transform = c2w) for i in [0, 3, 5, 8, 10, 90, 130, 180]: export_pointcloud(i, data_folder , ply_folder)
2,808
1,004
# This is a simple MXNet server demo shows how to use DGL distributed kvstore. import dgl import argparse import mxnet as mx ID = [] ID.append(mx.nd.array([0,1], dtype='int64')) ID.append(mx.nd.array([2,3], dtype='int64')) ID.append(mx.nd.array([4,5], dtype='int64')) ID.append(mx.nd.array([6,7], dtype='int64')) edata_partition_book = {'edata':mx.nd.array([0,0,1,1,2,2,3,3], dtype='int64')} ndata_partition_book = {'ndata':mx.nd.array([0,0,1,1,2,2,3,3], dtype='int64')} def start_client(): client = dgl.contrib.start_client(ip_config='ip_config.txt', ndata_partition_book=ndata_partition_book, edata_partition_book=edata_partition_book) client.push(name='edata', id_tensor=ID[client.get_id()], data_tensor=mx.nd.array([[1.,1.,1.],[1.,1.,1.]])) client.push(name='ndata', id_tensor=ID[client.get_id()], data_tensor=mx.nd.array([[2.,2.,2.],[2.,2.,2.]])) client.barrier() tensor_edata = client.pull(name='edata', id_tensor=mx.nd.array([0,1,2,3,4,5,6,7], dtype='int64')) tensor_ndata = client.pull(name='ndata', id_tensor=mx.nd.array([0,1,2,3,4,5,6,7], dtype='int64')) print(tensor_edata) client.barrier() print(tensor_ndata) client.barrier() if client.get_id() == 0: client.shut_down() if __name__ == '__main__': start_client()
1,376
563
# -*- coding: utf-8 -*- ################################################################################ ## Form generated from reading UI file 'qtgui.ui' ## ## Created by: Qt User Interface Compiler version 6.2.3 ## ## WARNING! All changes made in this file will be lost when recompiling UI file! ################################################################################ from PySide6.QtCore import (QCoreApplication, QDate, QDateTime, QLocale, QMetaObject, QObject, QPoint, QRect, QSize, QTime, QUrl, Qt) from PySide6.QtGui import (QBrush, QColor, QConicalGradient, QCursor, QFont, QFontDatabase, QGradient, QIcon, QImage, QKeySequence, QLinearGradient, QPainter, QPalette, QPixmap, QRadialGradient, QTransform) from PySide6.QtWidgets import (QApplication, QComboBox, QLabel, QLineEdit, QMainWindow, QProgressBar, QPushButton, QSizePolicy, QTabWidget, QTextBrowser, QWidget) class Ui_MainWindow(object): def setupUi(self, MainWindow): if not MainWindow.objectName(): MainWindow.setObjectName(u"MainWindow") MainWindow.resize(800, 600) MainWindow.setLocale(QLocale(QLocale.English, QLocale.UnitedStates)) self.centralwidget = QWidget(MainWindow) self.centralwidget.setObjectName(u"centralwidget") self.tabWidget = QTabWidget(self.centralwidget) self.tabWidget.setObjectName(u"tabWidget") self.tabWidget.setGeometry(QRect(0, 0, 801, 551)) self.tab_acquire = QWidget() self.tab_acquire.setObjectName(u"tab_acquire") self.brain_region_menu = QComboBox(self.tab_acquire) self.brain_region_menu.setObjectName(u"brain_region_menu") self.brain_region_menu.setGeometry(QRect(90, 20, 191, 21)) self.species_choice_menu = QComboBox(self.tab_acquire) self.species_choice_menu.setObjectName(u"species_choice_menu") self.species_choice_menu.setGeometry(QRect(90, 50, 191, 21)) self.cell_type_choice_menu = QComboBox(self.tab_acquire) self.cell_type_choice_menu.setObjectName(u"cell_type_choice_menu") self.cell_type_choice_menu.setGeometry(QRect(90, 80, 191, 21)) self.brain_region_menu_label = QLabel(self.tab_acquire) self.brain_region_menu_label.setObjectName(u"brain_region_menu_label") self.brain_region_menu_label.setGeometry(QRect(10, 20, 81, 21)) self.species_choice_menu_label = QLabel(self.tab_acquire) self.species_choice_menu_label.setObjectName(u"species_choice_menu_label") self.species_choice_menu_label.setGeometry(QRect(10, 50, 81, 21)) self.cell_type_choice_menu_label = QLabel(self.tab_acquire) self.cell_type_choice_menu_label.setObjectName(u"cell_type_choice_menu_label") self.cell_type_choice_menu_label.setGeometry(QRect(10, 80, 81, 21)) self.acq_progressbar = QProgressBar(self.tab_acquire) self.acq_progressbar.setObjectName(u"acq_progressbar") self.acq_progressbar.setGeometry(QRect(450, 10, 281, 23)) self.acq_progressbar.setValue(0) self.acq_textbox = QTextBrowser(self.tab_acquire) self.acq_textbox.setObjectName(u"acq_textbox") self.acq_textbox.setGeometry(QRect(20, 150, 751, 371)) self.acq_textbox.setOpenLinks(False) self.acq_button = QPushButton(self.tab_acquire) self.acq_button.setObjectName(u"acq_button") self.acq_button.setGeometry(QRect(320, 110, 131, 31)) self.acq_entry = QLineEdit(self.tab_acquire) self.acq_entry.setObjectName(u"acq_entry") self.acq_entry.setGeometry(QRect(450, 60, 251, 21)) self.acq_entry_label = QLabel(self.tab_acquire) self.acq_entry_label.setObjectName(u"acq_entry_label") self.acq_entry_label.setGeometry(QRect(450, 40, 251, 21)) self.open_csv_file_button = QPushButton(self.tab_acquire) self.open_csv_file_button.setObjectName(u"open_csv_file_button") self.open_csv_file_button.setGeometry(QRect(510, 110, 151, 31)) self.acq_button_continue = QPushButton(self.tab_acquire) self.acq_button_continue.setObjectName(u"acq_button_continue") self.acq_button_continue.setEnabled(False) self.acq_button_continue.setGeometry(QRect(30, 110, 121, 31)) self.acq_button_cancel = QPushButton(self.tab_acquire) self.acq_button_cancel.setObjectName(u"acq_button_cancel") self.acq_button_cancel.setEnabled(False) self.acq_button_cancel.setGeometry(QRect(160, 110, 121, 31)) self.tabWidget.addTab(self.tab_acquire, "") self.tab_image = QWidget() self.tab_image.setObjectName(u"tab_image") self.img_textbox = QTextBrowser(self.tab_image) self.img_textbox.setObjectName(u"img_textbox") self.img_textbox.setGeometry(QRect(20, 150, 751, 371)) self.img_textbox.setOpenLinks(False) self.img_progressbar = QProgressBar(self.tab_image) self.img_progressbar.setObjectName(u"img_progressbar") self.img_progressbar.setGeometry(QRect(450, 10, 281, 23)) self.img_progressbar.setValue(0) self.img_button = QPushButton(self.tab_image) self.img_button.setObjectName(u"img_button") self.img_button.setGeometry(QRect(320, 110, 131, 31)) self.img_csv_choice_list_label = QLabel(self.tab_image) self.img_csv_choice_list_label.setObjectName(u"img_csv_choice_list_label") self.img_csv_choice_list_label.setGeometry(QRect(20, 10, 81, 21)) self.img_csv_choice_list = QLineEdit(self.tab_image) self.img_csv_choice_list.setObjectName(u"img_csv_choice_list") self.img_csv_choice_list.setGeometry(QRect(90, 10, 291, 21)) self.img_open_csv_file_button = QPushButton(self.tab_image) self.img_open_csv_file_button.setObjectName(u"img_open_csv_file_button") self.img_open_csv_file_button.setGeometry(QRect(260, 50, 121, 21)) self.open_images_directory_button = QPushButton(self.tab_image) self.open_images_directory_button.setObjectName(u"open_images_directory_button") self.open_images_directory_button.setGeometry(QRect(510, 110, 151, 31)) self.tabWidget.addTab(self.tab_image, "") self.tab_about = QWidget() self.tab_about.setObjectName(u"tab_about") self.about_label = QTextBrowser(self.tab_about) self.about_label.setObjectName(u"about_label") self.about_label.setGeometry(QRect(20, 150, 751, 371)) self.about_label.setOpenLinks(False) self.tabWidget.addTab(self.tab_about, "") self.exit_button = QPushButton(self.centralwidget) self.exit_button.setObjectName(u"exit_button") self.exit_button.setGeometry(QRect(330, 560, 131, 31)) self.open_file_location_button = QPushButton(self.centralwidget) self.open_file_location_button.setObjectName(u"open_file_location_button") self.open_file_location_button.setGeometry(QRect(20, 560, 131, 31)) MainWindow.setCentralWidget(self.centralwidget) self.retranslateUi(MainWindow) self.tabWidget.setCurrentIndex(0) QMetaObject.connectSlotsByName(MainWindow) # setupUi def retranslateUi(self, MainWindow): MainWindow.setWindowTitle(QCoreApplication.translate("MainWindow", u"NeuroMorpho Access Tool", None)) self.brain_region_menu_label.setText(QCoreApplication.translate("MainWindow", u"Brain region:", None)) self.species_choice_menu_label.setText(QCoreApplication.translate("MainWindow", u"Species:", None)) self.cell_type_choice_menu_label.setText(QCoreApplication.translate("MainWindow", u"Cell type:", None)) self.acq_button.setText(QCoreApplication.translate("MainWindow", u"Generate CSV", None)) self.acq_entry.setText(QCoreApplication.translate("MainWindow", u"NM_All_All_All.csv", None)) #if QT_CONFIG(accessibility) self.acq_entry_label.setAccessibleDescription("") #endif // QT_CONFIG(accessibility) self.acq_entry_label.setText(QCoreApplication.translate("MainWindow", u"Name of file to generate:", None)) self.open_csv_file_button.setText(QCoreApplication.translate("MainWindow", u"Open CSV file", None)) self.acq_button_continue.setText(QCoreApplication.translate("MainWindow", u"Continue", None)) self.acq_button_cancel.setText(QCoreApplication.translate("MainWindow", u"Cancel", None)) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_acquire), QCoreApplication.translate("MainWindow", u"Generate CSV", None)) self.img_button.setText(QCoreApplication.translate("MainWindow", u"Download Images", None)) self.img_csv_choice_list_label.setText(QCoreApplication.translate("MainWindow", u"CSV File:", None)) self.img_csv_choice_list.setText("") self.img_open_csv_file_button.setText(QCoreApplication.translate("MainWindow", u"Open CSV files", None)) self.open_images_directory_button.setText(QCoreApplication.translate("MainWindow", u"Open Images Directory", None)) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_image), QCoreApplication.translate("MainWindow", u"Get Images", None)) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_about), QCoreApplication.translate("MainWindow", u"About", None)) self.exit_button.setText(QCoreApplication.translate("MainWindow", u"Exit", None)) self.open_file_location_button.setText(QCoreApplication.translate("MainWindow", u"Open file location", None)) # retranslateUi
9,535
3,369
# TODO: Write actual tests. This just makes pytest-cov pick up on the module. import wb2k
90
28
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2021, Cisco Systems # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) DOCUMENTATION = r""" --- module: tag_member short_description: Manage TagMember objects of Tag description: - Returns tag members specified by id. - Adds members to the tag specified by id. - Removes Tag member from the tag specified by id. - Returns the number of members in a given tag. - > Updates tag membership. As part of the request payload through this API, only the specified members are added / retained to the given input tags. Possible values of memberType attribute in the request payload can be queried by using the /tag/member/type API. - Returns list of supported resource types. version_added: '1.0.0' author: Rafael Campos (@racampos) options: id: description: - Tag ID. type: str required: True member_type: description: - Entity type of the member. Possible values can be retrieved by using /tag/member/type API. - MemberType query parameter. type: str required: True level: description: - Level query parameter. type: str limit: description: - Used to Number of maximum members to return in the result. type: str member_association_type: description: - > Indicates how the member is associated with the tag. Possible values and description. 1) DYNAMIC The member is associated to the tag through rules. 2) STATIC – The member is associated to the tag manually. 3) MIXED – The member is associated manually and also satisfies the rule defined for the tag. - MemberAssociationType query parameter. type: str offset: description: - Used for pagination. It indicates the starting row number out of available member records. type: str member_id: description: - TagMember id to be removed from tag. - Required for state delete. type: str count: description: - If true gets the number of objects. - Required for state query. type: bool memberToTags: description: - TagMemberDTO's memberToTags. type: dict suboptions: key: description: - It is the tag member's key. type: list memberType: description: - TagMemberDTO's memberType. type: str requirements: - dnacentersdk seealso: # Reference by module name - module: cisco.dnac.plugins.module_utils.definitions.tag_member # Reference by Internet resource - name: TagMember reference description: Complete reference of the TagMember object model. link: https://developer.cisco.com/docs/dna-center/api/1-3-3-x # Reference by Internet resource - name: TagMember reference description: SDK reference. link: https://dnacentersdk.readthedocs.io/en/latest/api/api.html#v2-1-1-summary """ EXAMPLES = r""" - name: get_tag_members_by_id cisco.dnac.tag_member: state: query # required id: SomeValue # string, required member_type: SomeValue # string, required level: SomeValue # string limit: SomeValue # string member_association_type: SomeValue # string offset: SomeValue # string register: nm_get_tag_members_by_id - name: add_members_to_the_tag cisco.dnac.tag_member: state: create # required id: SomeValue # string, required - name: remove_tag_member cisco.dnac.tag_member: state: delete # required id: SomeValue # string, required member_id: SomeValue # string, required - name: get_tag_member_count cisco.dnac.tag_member: state: query # required id: SomeValue # string, required member_type: SomeValue # string, required count: True # boolean, required level: SomeValue # string member_association_type: SomeValue # string register: nm_get_tag_member_count - name: updates_tag_membership cisco.dnac.tag_member: state: update # required memberToTags: key: - SomeValue # string memberType: SomeValue # string - name: get_tag_resource_types cisco.dnac.tag_member: state: query # required register: nm_get_tag_resource_types """ RETURN = r""" dnac_response: description: A dictionary with the response returned by the DNA Center Python SDK returned: always type: dict sample: {"response": 29, "version": "1.0"} sdk_function: description: The DNA Center SDK function used to execute the task returned: always type: str sample: tag.add_members_to_the_tag missing_params: description: Provided arguments do not comply with the schema of the DNA Center Python SDK function returned: when the function request schema is not satisfied type: list sample: """
4,690
1,433
import unittest import numpy.testing as npt from unittest_reinvent.fixtures.test_data import CELECOXIB, CELECOXIB_C, BUTANE from unittest_reinvent.scoring_tests.scoring_components.fixtures import score_single, score, instantiate_component class TestTanimotoSimilarity(unittest.TestCase): @classmethod def setUpClass(cls): cls.component = instantiate_component() def test_similarity_1(self): npt.assert_almost_equal(score_single(self.component, BUTANE), 1.0) def test_similarity_2(self): npt.assert_almost_equal(score_single(self.component, CELECOXIB), 1.0) def test_similarity_3(self): npt.assert_almost_equal(score_single(self.component, CELECOXIB_C), 0.89, decimal=3) def test_similarity_4(self): smiles = [BUTANE, CELECOXIB] scores = score(self.component, smiles) npt.assert_almost_equal(scores, 1.0)
893
327
import statsmodels.api as sm import numpy as np import os import time import json def apply(train_datasets, var_types_string, test_datasets, n_folds, result_path, filename, foldLog): print("\n========================") print("KDE") print("========================") results = {} folds = {} avg_learning_time = 0 avg_test_ll = 0 for i in range(1, n_folds + 1): index = i-1 init_time = time.time()*1000 model = sm.nonparametric.KDEMultivariate(data=train_datasets[index], var_type=var_types_string, bw='normal_reference') test_ll = np.log(model.pdf(test_datasets[index])) test_ll = np.sum(test_ll) end_time = time.time()*1000 learning_time = end_time - init_time fold_result = {"test_ll": test_ll, "learning_time": learning_time} folds["fold_" + str(i)] = fold_result avg_learning_time = avg_learning_time + learning_time avg_test_ll = avg_test_ll + test_ll if foldLog: print("----------------------------------------") print("Fold (" + str(i) + "): ") print("Test LL: " + str(test_ll)) print("Learning time: " + str(learning_time)) # Generate the average results and store them in the dictionary, then store them in a JSON file avg_test_ll = avg_test_ll / n_folds avg_learning_time = avg_learning_time / n_folds / 1000 # in seconds results["average_test_ll"] = avg_test_ll results["average_learning_time"] = avg_learning_time results["folds"] = folds store_json(results, result_path, filename) print("----------------------------------------") print("----------------------------------------") print("Average Test LL: " + str(avg_test_ll)) print("Average learning time: " + str(avg_learning_time)) def store_json(results, path, filename): if not os.path.exists(path): os.makedirs(path) if os.path.isfile(path + filename + "_results_KDE.json"): os.remove(path + filename + "_results_KDE.json") with open(path + filename + "_results_KDE.json", 'w') as fp: json.dump(results, fp, sort_keys=True, indent=4) else: with open(path + filename + "_results_KDE.json", 'w') as fp: json.dump(results, fp, sort_keys=True, indent=4)
2,318
773
# Copyright © 2019 Province of British Columbia # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests to assure the auth Service. Test-Suite to ensure that the auth Service is working as expected. """ import pytest from werkzeug.exceptions import HTTPException from pay_api.services.auth import check_auth from pay_api.utils.constants import EDIT_ROLE, VIEW_ROLE def test_auth_role_for_service_account(session, monkeypatch): """Assert the auth works for service account.""" def token_info(): # pylint: disable=unused-argument; mocks of library methods return { 'username': 'service account', 'realm_access': { 'roles': [ 'system', 'edit' ] } } def mock_auth(): # pylint: disable=unused-argument; mocks of library methods return 'test' monkeypatch.setattr('pay_api.utils.user_context._get_token', mock_auth) monkeypatch.setattr('pay_api.utils.user_context._get_token_info', token_info) # Test one of roles check_auth('CP0001234', one_of_roles=[EDIT_ROLE]) def test_auth_role_for_service_account_with_no_edit_role(session, monkeypatch): """Assert the auth works for service account.""" def token_info(): # pylint: disable=unused-argument; mocks of library methods return { 'username': 'service account', 'realm_access': { 'roles': [ 'system' ] } } def mock_auth(): # pylint: disable=unused-argument; mocks of library methods return 'test' monkeypatch.setattr('pay_api.utils.user_context._get_token', mock_auth) monkeypatch.setattr('pay_api.utils.user_context._get_token_info', token_info) with pytest.raises(HTTPException) as excinfo: # Test one of roles check_auth('CP0001234', one_of_roles=[EDIT_ROLE]) assert excinfo.exception.code == 403 def test_auth_for_client_user_roles(session, public_user_mock): """Assert that the auth is working as expected.""" # token = jwt.create_jwt(get_claims(roles=[Role.EDITOR.value]), token_header) # headers = {'Authorization': 'Bearer ' + token} # def mock_auth(one, two): # pylint: disable=unused-argument; mocks of library methods # return headers['Authorization'] # with app.test_request_context(): # monkeypatch.setattr('flask.request.headers.get', mock_auth) # Test one of roles check_auth('CP0001234', one_of_roles=[EDIT_ROLE]) # Test contains roles check_auth('CP0001234', contains_role=EDIT_ROLE) # Test for exception with pytest.raises(HTTPException) as excinfo: check_auth('CP0000000', contains_role=VIEW_ROLE) assert excinfo.exception.code == 403 with pytest.raises(HTTPException) as excinfo: check_auth('CP0000000', one_of_roles=[EDIT_ROLE]) assert excinfo.exception.code == 403
3,471
1,110
from cryption.crypto import sign VERSION = "v1"
49
18
from tests.test_helper import * class TestSubscription(unittest.TestCase): def test_create_raises_exception_with_bad_keys(self): try: Subscription.create({"bad_key": "value"}) self.assertTrue(False) except KeyError, e: self.assertEquals("'Invalid keys: bad_key'", str(e)) def test_update_raises_exception_with_bad_keys(self): try: Subscription.update("id", {"bad_key": "value"}) self.assertTrue(False) except KeyError, e: self.assertEquals("'Invalid keys: bad_key'", str(e))
589
180
computador = 0 usuario = 0 rodada = 0 def computador_escolhe_jogada(n, m): global computador n = n - m if (n == 1): print(" ") print("O computador tirou %s peça." % n) print("Agora restam %s peças no tabuleiro." % n) print(" ") if (n == 0): print ("Fim do jogo! O computador ganhou!") partida() else: print(" ") print("O computador tirou %s peça." % m) print("Agora restam %s peças no tabuleiro." % n) print(" ") if (n == 0): print ("Fim do jogo! O computador ganhou!") partida() return n return m def usuario_escolhe_jogada(n, m): global usuario print(" ") n_user = int(input("Quantas peças você vai tirar? ")) print("Voce tirou %s peças." % n_user) if (n_user <= m): n = n - m print(" ") print("Agora restam apenas %s peças no tabuleiro." % n) else: while (n_user > m): print("Oops! Jogada inválida! Tente de novo.") print(" ") n_user = int(input("Quantas peças você vai tirar? ")) if (n == 0): print ("Vitoria do usuario") return n_user return n return m def partida(): global computador global usuario global rodada while(rodada <= 3): rodada = rodada + 1 if (rodada <= 3 ): print(" ") print("**** Rodada %s ****" % rodada) print(" ") n = int(input("Quantas peças? ")) m = int(input("Limite de peças por jogada? ")) if (((n )%(m + 1)) == 0): while (n > 0): print(" ") print("Voce começa!") usuario_escolhe_jogada(n,m) if n > 0: n = n - m computador_escolhe_jogada(n,m) n = n - m computador = computador + 1 else: print(" ") print("Computador Começa!!") while( n > 0): computador_escolhe_jogada(n,m) computador = computador + 1 n = n - m if n > 0: usuario_escolhe_jogada(n,m) n = n - m else: print("**** Final do campeonato! ****") print(" ") print("Fim de Campeonato: Computador %s x 0 Usuario " % computador) break print("Bem-vindo ao jogo do NIM! Escolha:") print(" ") print("1 - para jogar uma partida isolada ") tipo_jogo = int(input("2 - para jogar um campeonato ")) print(" ") if ( tipo_jogo == 1 ): print("Voce escolheu partida isolada!") if ( tipo_jogo == 2): print("Voce escolheu um campeonato!") partida() else: pass
2,848
941
expr = besselj(x, z*polar_lift(-1)) expr = besselsimp(expr)
59
29
import warnings import itertools import numpy as np import operator from collections import namedtuple import parmed as pmd import mbuild as mb from mbuild.utils.sorting import natural_sort from mbuild.utils.io import import_ from mbuild.utils.conversion import RB_to_OPLS from .hoomd_snapshot import to_hoomdsnapshot hoomd = import_("hoomd") hoomd.md = import_("hoomd.md") hoomd.md.pair = import_("hoomd.md.pair") hoomd.md.special_pair = import_("hoomd.md.special_pair") hoomd.md.charge = import_("hoomd.md.charge") hoomd.md.bond = import_("hoomd.md.bond") hoomd.md.angle = import_("hoomd.md.angle") hoomd.md.dihedral = import_("hoomd.md.dihedral") hoomd.group = import_("hoomd.group") def create_hoomd_simulation(structure, ref_distance=1.0, ref_mass=1.0, ref_energy=1.0, r_cut=1.2, auto_scale=False, snapshot_kwargs={}, pppm_kwargs={'Nx':8, 'Ny':8, 'Nz':8, 'order':4}): """ Convert a parametrized pmd.Structure to hoomd.SimulationContext Parameters ---------- structure : parmed.Structure ParmEd Structure object ref_distance : float, optional, default=1.0 Reference distance for conversion to reduced units ref_mass : float, optional, default=1.0 Reference mass for conversion to reduced units ref_energy : float, optional, default=1.0 Reference energy for conversion to reduced units r_cut : float, optional, default 1.2 Cutoff radius, in reduced units auto_scale : bool, optional, default=False Automatically use largest sigma value as ref_distance, largest mass value as ref_mass and largest epsilon value as ref_energy snapshot_kwargs : dict Kwargs to pass to to_hoomdsnapshot pppm_kwargs : dict Kwargs to pass to hoomd's pppm function Returns ------ hoomd_objects : list List of hoomd objects created during conversion ReferenceValues : namedtuple Values used in scaling Notes ----- While the hoomd objects are returned, the hoomd.SimulationContext is accessible via `hoomd.context.current`. If you pass a non-parametrized pmd.Structure, you will not have angle, dihedral, or force field information. You may be better off creating a hoomd.Snapshot Reference units should be expected to convert parmed Structure units : angstroms, kcal/mol, and daltons """ if isinstance(structure, mb.Compound): raise ValueError("You passed mb.Compound to create_hoomd_simulation, " + "there will be no angles, dihedrals, or force field parameters. " + "Please use " + "hoomd_snapshot.to_hoomdsnapshot to create a hoomd.Snapshot, " + "then create your own hoomd context " + "and pass your hoomd.Snapshot " + "to hoomd.init.read_snapshot()") elif not isinstance(structure, pmd.Structure): raise ValueError("Please pass a parmed.Structure to " + "create_hoomd_simulation") _check_hoomd_version() version_numbers = _check_hoomd_version() if float(version_numbers[0]) >= 3: warnings.warn("Warning when using Hoomd 3, potential API change " + "where the hoomd context is not updated upon " + "creation of forces - utilize " + "the returned `hoomd_objects`") hoomd_objects = [] # Potential adaptation for Hoomd v3 API if auto_scale: ref_mass = max([atom.mass for atom in structure.atoms]) pair_coeffs = list(set((atom.type, atom.epsilon, atom.sigma) for atom in structure.atoms)) ref_energy = max(pair_coeffs, key=operator.itemgetter(1))[1] ref_distance = max(pair_coeffs, key=operator.itemgetter(2))[2] ReferenceValues = namedtuple("ref_values", ["distance", "mass", "energy"]) ref_values = ReferenceValues(ref_distance, ref_mass, ref_energy) if not hoomd.context.current: hoomd.context.initialize("") snapshot,_ = to_hoomdsnapshot(structure, ref_distance=ref_distance, ref_mass=ref_mass, ref_energy=ref_energy, **snapshot_kwargs) hoomd_objects.append(snapshot) hoomd.init.read_snapshot(snapshot) nl = hoomd.md.nlist.cell() nl.reset_exclusions(exclusions=['1-2', '1-3']) hoomd_objects.append(nl) if structure.atoms[0].type != '': print("Processing LJ and QQ") lj = _init_hoomd_lj(structure, nl, r_cut=r_cut, ref_distance=ref_distance, ref_energy=ref_energy) qq = _init_hoomd_qq(structure, nl, r_cut=r_cut, **pppm_kwargs) hoomd_objects.append(lj) hoomd_objects.append(qq) if structure.adjusts: print("Processing 1-4 interactions, adjusting neighborlist exclusions") lj_14, qq_14 = _init_hoomd_14_pairs(structure, nl, ref_distance=ref_distance, ref_energy=ref_energy) hoomd_objects.append(lj_14) hoomd_objects.append(qq_14) if structure.bond_types: print("Processing harmonic bonds") harmonic_bond = _init_hoomd_bonds(structure, ref_distance=ref_distance, ref_energy=ref_energy) hoomd_objects.append(harmonic_bond) if structure.angle_types: print("Processing harmonic angles") harmonic_angle = _init_hoomd_angles(structure, ref_energy=ref_energy) hoomd_objects.append(harmonic_angle) if structure.dihedral_types: print("Processing periodic torsions") periodic_torsions = _init_hoomd_dihedrals(structure, ref_energy=ref_energy) hoomd_objects.append(periodic_torsions) if structure.rb_torsion_types: print("Processing RB torsions") rb_torsions = _init_hoomd_rb_torsions(structure, ref_energy=ref_energy) hoomd_objects.append(rb_torsions) print("HOOMD SimulationContext updated from ParmEd Structure") return hoomd_objects, ref_values def _init_hoomd_lj(structure, nl, r_cut=1.2, ref_distance=1.0, ref_energy=1.0): """ LJ parameters """ # Identify the unique atom types before setting atom_type_params = {} for atom in structure.atoms: if atom.type not in atom_type_params: atom_type_params[atom.type] = atom.atom_type # Set the hoomd parameters for self-interactions lj = hoomd.md.pair.lj(r_cut, nl) for name, atom_type in atom_type_params.items(): lj.pair_coeff.set(name, name, sigma=atom_type.sigma/ref_distance, epsilon=atom_type.epsilon/ref_energy) # Cross interactions, mixing rules, NBfixes all_atomtypes = sorted(atom_type_params.keys()) for a1, a2 in itertools.combinations_with_replacement(all_atomtypes, 2): nb_fix_info = atom_type_params[a1].nbfix.get(a2, None) # nb_fix_info = (rmin, eps, rmin14, eps14) if nb_fix_info is None: # No nbfix means use mixing rule to find cross-interaction if structure.combining_rule == 'lorentz': sigma = ((atom_type_params[a1].sigma + atom_type_params[a2].sigma) / (2 * ref_distance)) epsilon = ((atom_type_params[a1].epsilon * atom_type_params[a2].epsilon) / ref_energy**2)**0.5 elif structure.combining_rule == 'geometric': sigma = ((atom_type_params[a1].sigma * atom_type_params[a2].sigma) / ref_distance**2)**0.5 epsilon = ((atom_type_params[a1].epsilon * atom_type_params[a2].epsilon) / ref_energy**2)**0.5 else: raise ValueError( "Mixing rule {} ".format(structure.combining_rule) + "not supported, use lorentz") else: # If we have nbfix info, use it sigma = nb_fix_info[0] / (ref_distance*(2 ** (1/6))) epsilon = nb_fix_info[1] / ref_energy lj.pair_coeff.set(a1, a2, sigma=sigma, epsilon=epsilon) return lj def _init_hoomd_qq(structure, nl, Nx=1, Ny=1, Nz=1, order=4, r_cut=1.2): """ Charge interactions """ charged = hoomd.group.charged() if len(charged) == 0: print("No charged groups found, ignoring electrostatics") return None else: qq = hoomd.md.charge.pppm(charged, nl) qq.set_params(Nx, Ny, Nz, order, r_cut) return qq def _init_hoomd_14_pairs(structure, nl, r_cut=1.2, ref_distance=1.0, ref_energy=1.0): """Special_pairs to handle 14 scalings See discussion: https://groups.google.com/forum/ #!topic/hoomd-users/iZ9WCpHczg0 """ # Update neighborlist to exclude 1-4 interactions, # but impose a special_pair force to handle these pairs nl.exclusions.append('1-4') if hoomd.context.current.system_definition.getPairData().getN() == 0: print("No 1,4 pairs found in hoomd snapshot") return None, None lj_14 = hoomd.md.special_pair.lj() qq_14 = hoomd.md.special_pair.coulomb() params_14 = {} # Identify unique 14 scalings for adjust in structure.adjusts: t1 = adjust.atom1.type t2 = adjust.atom2.type ps = '-'.join(sorted([t1, t2])) if ps not in params_14: params_14[ps] = adjust.type for name, adjust_type in params_14.items(): lj_14.pair_coeff.set(name, sigma=adjust_type.sigma/ref_distance, # The adjust epsilon alreayd carries the scaling epsilon=adjust_type.epsilon/ref_energy, # Do NOT use hoomd's alpha to modify any LJ terms alpha=1, r_cut=r_cut) qq_14.pair_coeff.set(name, alpha=adjust_type.chgscale, r_cut=r_cut) return lj_14, qq_14 def _init_hoomd_bonds(structure, ref_distance=1.0, ref_energy=1.0): """ Harmonic bonds """ # Identify the unique bond types before setting bond_type_params = {} for bond in structure.bonds: t1, t2 = bond.atom1.type, bond.atom2.type t1, t2 = sorted([t1, t2], key=natural_sort) if t1 != '' and t2 != '': bond_type = ('-'.join((t1, t2))) if bond_type not in bond_type_params: bond_type_params[bond_type] = bond.type # Set the hoomd parameters harmonic_bond = hoomd.md.bond.harmonic() for name, bond_type in bond_type_params.items(): # A (paramerized) parmed structure with no bondtype # is because of constraints if bond_type is None: print("Bond with no bondtype detected, setting coefficients to 0") harmonic_bond.bond_coeff.set(name, k=0, r0=0) else: harmonic_bond.bond_coeff.set(name, k=2 * bond_type.k * ref_distance**2 / ref_energy, r0=bond_type.req / ref_distance) return harmonic_bond def _init_hoomd_angles(structure, ref_energy=1.0): """ Harmonic angles """ # Identify the unique angle types before setting angle_type_params = {} for angle in structure.angles: t1, t2, t3 = angle.atom1.type, angle.atom2.type, angle.atom3.type t1, t3 = sorted([t1, t3], key=natural_sort) angle_type = ('-'.join((t1, t2, t3))) if angle_type not in angle_type_params: angle_type_params[angle_type] = angle.type # set the hoomd parameters harmonic_angle = hoomd.md.angle.harmonic() for name, angle_type in angle_type_params.items(): harmonic_angle.angle_coeff.set(name, t0=np.deg2rad(angle_type.theteq), k=2 * angle_type.k / ref_energy) return harmonic_angle def _init_hoomd_dihedrals(structure, ref_energy=1.0): """ Periodic dihedrals (dubbed harmonic dihedrals in HOOMD) """ # Identify the unique dihedral types before setting # need Hoomd 2.8.0 to use proper dihedral implemtnation # from this PR https://github.com/glotzerlab/hoomd-blue/pull/492 version_numbers = _check_hoomd_version() if float(version_numbers[0]) < 2 or float(version_numbers[1]) < 8: from mbuild.exceptions import MBuildError raise MBuildError("Please upgrade Hoomd to at least 2.8.0") dihedral_type_params = {} for dihedral in structure.dihedrals: t1, t2 = dihedral.atom1.type, dihedral.atom2.type t3, t4 = dihedral.atom3.type, dihedral.atom4.type if [t2, t3] == sorted([t2, t3], key=natural_sort): dihedral_type = ('-'.join((t1, t2, t3, t4))) else: dihedral_type = ('-'.join((t4, t3, t2, t1))) if dihedral_type not in dihedral_type_params: if isinstance(dihedral.type, pmd.DihedralType): dihedral_type_params[dihedral_type] = dihedral.type elif isinstance(dihedral.type, pmd.DihedralTypeList): if len(dihedral.type) > 1: warnings.warn("Multiple dihedral types detected" + " for single dihedral, will ignore all except " + " first diheral type") dihedral_type_params[dihedral_type] = dihedral.type[0] # Set the hoomd parameters periodic_torsion = hoomd.md.dihedral.harmonic() # These are periodic torsions for name, dihedral_type in dihedral_type_params.items(): periodic_torsion.dihedral_coeff.set(name, k=2*dihedral_type.phi_k / ref_energy, d=1, n=dihedral_type.per, phi_0=np.deg2rad(dihedral_type.phase)) return periodic_torsion def _init_hoomd_rb_torsions(structure, ref_energy=1.0): """ RB dihedrals (implemented as OPLS dihedrals in HOOMD) """ # Identify the unique dihedral types before setting dihedral_type_params = {} for dihedral in structure.rb_torsions: t1, t2 = dihedral.atom1.type, dihedral.atom2.type t3, t4 = dihedral.atom3.type, dihedral.atom4.type if [t2, t3] == sorted([t2, t3], key=natural_sort): dihedral_type = ('-'.join((t1, t2, t3, t4))) else: dihedral_type = ('-'.join((t4, t3, t2, t1))) if dihedral_type not in dihedral_type_params: dihedral_type_params[dihedral_type] = dihedral.type # Set the hoomd parameter rb_torsion = hoomd.md.dihedral.opls() for name, dihedral_type in dihedral_type_params.items(): F_coeffs = RB_to_OPLS(dihedral_type.c0 / ref_energy, dihedral_type.c1 / ref_energy, dihedral_type.c2 / ref_energy, dihedral_type.c3 / ref_energy, dihedral_type.c4 / ref_energy, dihedral_type.c5 / ref_energy) rb_torsion.dihedral_coeff.set(name, k1=F_coeffs[0], k2=F_coeffs[1], k3=F_coeffs[2], k4=F_coeffs[3]) return rb_torsion def _check_hoomd_version(): version = hoomd.__version__ version_numbers = version.split('.') return version_numbers
15,206
5,147
#!/usr/bin/env python # -*- coding: utf-8 -*- from PyQt5.QtWidgets import * def launch_application(creator): def wrapper(): import sys app = QApplication(sys.argv) widget = creator() widget.show() app.exec_() return wrapper
274
88
def selection_4(): # Library import import numpy import matplotlib import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec # Library version matplotlib_version = matplotlib.__version__ numpy_version = numpy.__version__ # Histo binning xBinning = numpy.linspace(-8.0,8.0,161,endpoint=True) # Creating data sequence: middle of each bin xData = numpy.array([-7.95,-7.85,-7.75,-7.65,-7.55,-7.45,-7.35,-7.25,-7.15,-7.05,-6.95,-6.85,-6.75,-6.65,-6.55,-6.45,-6.35,-6.25,-6.15,-6.05,-5.95,-5.85,-5.75,-5.65,-5.55,-5.45,-5.35,-5.25,-5.15,-5.05,-4.95,-4.85,-4.75,-4.65,-4.55,-4.45,-4.35,-4.25,-4.15,-4.05,-3.95,-3.85,-3.75,-3.65,-3.55,-3.45,-3.35,-3.25,-3.15,-3.05,-2.95,-2.85,-2.75,-2.65,-2.55,-2.45,-2.35,-2.25,-2.15,-2.05,-1.95,-1.85,-1.75,-1.65,-1.55,-1.45,-1.35,-1.25,-1.15,-1.05,-0.95,-0.85,-0.75,-0.65,-0.55,-0.45,-0.35,-0.25,-0.15,-0.05,0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75,0.85,0.95,1.05,1.15,1.25,1.35,1.45,1.55,1.65,1.75,1.85,1.95,2.05,2.15,2.25,2.35,2.45,2.55,2.65,2.75,2.85,2.95,3.05,3.15,3.25,3.35,3.45,3.55,3.65,3.75,3.85,3.95,4.05,4.15,4.25,4.35,4.45,4.55,4.65,4.75,4.85,4.95,5.05,5.15,5.25,5.35,5.45,5.55,5.65,5.75,5.85,5.95,6.05,6.15,6.25,6.35,6.45,6.55,6.65,6.75,6.85,6.95,7.05,7.15,7.25,7.35,7.45,7.55,7.65,7.75,7.85,7.95]) # Creating weights for histo: y5_ETA_0 y5_ETA_0_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.93650155414,2.16577055928,2.86585876426,3.2752672163,3.82387435003,4.39295226636,5.03572372006,5.70305915288,6.52597045348,7.36934973668,8.43790882849,9.65385179504,10.7346868764,11.6640460865,13.5022885242,14.5462796369,14.8819953516,16.2903621546,16.7284257823,17.4162331977,17.174681403,17.7683248984,17.7069129506,17.3384452638,17.0764254865,15.6557786939,15.2299910558,13.7847802841,12.7407891714,11.2996703962,9.60881583332,8.6794566232,7.11142195589,5.67030718072,4.68772801583,3.77883958831,2.8331059921,2.13711178364,1.64991579771,1.08902627442,0.888416044921,0.65505344326,0.405314455517,0.302962222508,0.159669264295,0.131010688652,0.0614112678056,0.0163763340815,0.0286585876426,0.00409408452037,0.00409408452037,0.0122822535611,0.065505344326,0.0736934973668,0.10644618953,0.180139686896,0.282491799906,0.454443213762,0.749217363229,0.822910900595,1.07674428486,1.54346948818,2.12892379059,2.70618969997,3.63964050661,4.68772801583,5.76856309721,7.32431777495,8.39696486329,9.67022778112,11.5371301944,12.6097772828,13.8789442041,14.9556872889,16.2330422033,16.6301698658,17.5267731037,17.891148794,17.9320887592,17.8543008254,17.6250330202,16.585133904,15.979210419,15.3159669827,14.2105639222,12.8513290775,12.1389576829,10.7060309008,9.67431977765,8.58938869975,7.6559374931,6.73476627602,5.75628310765,4.98659576181,4.66725603323,3.78702758135,3.32849037106,2.87814115382,2.10845340799,2.04294786367,0.00409408452037,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y5_ETA_1 y5_ETA_1_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0121240822392,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0121753353338,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0121313846429,0.012170493784,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y5_ETA_2 y5_ETA_2_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0200482816269,0.010032919325,0.0200940397991,0.0100262832744,0.010040728874,0.0100696696577,0.0100271592661,0.0301165257319,0.0401877986198,0.0,0.0502017229729,0.010040728874,0.0,0.0301145712787,0.0402058597513,0.0,0.0100568562125,0.0401512714171,0.0,0.0301337191358,0.0100702894631,0.0,0.050140155629,0.0300994521571,0.0100355638284,0.0100184158769,0.0401671012489,0.0100262832744,0.0301196784758,0.0,0.0,0.0100367001384,0.0100153623019,0.0,0.0,0.0,0.0100369728528,0.0100609841169,0.0,0.0,0.0100568562125,0.0100602899348,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0100340928234,0.0100568562125,0.0,0.0,0.0100532489446,0.0200646444914,0.0301498340782,0.0200868128673,0.0301214387233,0.0301310828965,0.010045943504,0.0200609297906,0.0200572522781,0.0,0.0301088194839,0.010045943504,0.0,0.0301025759767,0.0200638924608,0.0201178403294,0.0300965762597,0.0100299566548,0.0200777058588,0.0100187051194,0.0301077410223,0.0301692711779,0.0100355638284,0.0100324441408,0.0200798255935,0.0,0.0100369728528,0.0200832055994,0.0,0.0,0.0100696696577,0.0,0.0100187051194,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y5_ETA_3 y5_ETA_3_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0275282721771,0.0274997813219,0.00549710596009,0.0164555265735,0.0110053636951,0.0770293110214,0.0440010037659,0.0550185105048,0.0330098306536,0.0549834910877,0.0880414145324,0.121011383177,0.0934765336867,0.0880213454001,0.0990525836506,0.148391326591,0.170522298147,0.115509109618,0.0880932530198,0.142979039121,0.143099738296,0.137527304026,0.142956573069,0.132021455399,0.131943372662,0.0934995278747,0.0934989997397,0.0935291846896,0.104510168858,0.0825596976154,0.071452814174,0.0495039679626,0.0549952725621,0.0274769699499,0.0275034010784,0.0110008136084,0.0220214688448,0.0275283574913,0.0,0.0110360564673,0.021979583672,0.0219691225352,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0110095197118,0.0,0.00550243199904,0.00551802823363,0.00547487959922,0.0220146884032,0.0275094786942,0.0385276029448,0.0384989861497,0.0220041419523,0.0495313903599,0.0165169689939,0.0550053883798,0.0825226875355,0.104575210722,0.126506425347,0.066013876197,0.104442608196,0.126527185118,0.0990133797788,0.170543342297,0.0825431629255,0.181466759744,0.1538988815,0.20895098952,0.132032830616,0.115526172443,0.0824931525979,0.126454668112,0.104502043703,0.08245756442,0.0825350783965,0.0990397052803,0.0770264265915,0.0385428294849,0.0935220345535,0.0329914596786,0.043969193785,0.0494868645118,0.0274664559996,0.0164761360286,0.0165097701068,0.0110073624832,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y5_ETA_4 y5_ETA_4_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00295952755959,0.00197733276196,0.00197153597409,0.00295929588047,0.00592188256639,0.00888223262692,0.00986622754723,0.0207193880908,0.0266393306452,0.0276408337728,0.028623718397,0.0355146850478,0.0365133262569,0.0562514891552,0.0592211905545,0.0720236262363,0.0769833630669,0.0878251039945,0.100658684083,0.116441122468,0.105610244004,0.12136374254,0.104612220072,0.122368861145,0.107579035503,0.124337372063,0.114461228008,0.0977014885466,0.0799371916409,0.0838926916906,0.0818885871641,0.0473736095304,0.0503348935223,0.0305824370646,0.0296045868517,0.0236809646878,0.025663292179,0.01382357141,0.0108591409111,0.00690936069003,0.0049344204759,0.00296236222172,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00197140370055,0.00197179531041,0.000988009622793,0.00394563925743,0.00493570713674,0.00987045228407,0.00888712273977,0.020722578689,0.0157902465318,0.0246790126701,0.033550310684,0.0444057519442,0.0463809967883,0.0631546850964,0.0799404383551,0.08090190669,0.08585110172,0.103606500224,0.0976781202204,0.0977050960069,0.115473882196,0.132224963753,0.1322358663,0.117436981924,0.104620837893,0.107570056935,0.0986854033011,0.0976784008007,0.0799142241437,0.075986220958,0.0641623289231,0.0661257893969,0.0592049169003,0.0414675156152,0.0375125005685,0.0266518204744,0.0197401189434,0.0167707462239,0.0128311630906,0.0118483786737,0.00395132701983,0.00987041220117,0.00296029153951,0.00395140718562,0.000988172359335,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y5_ETA_5 y5_ETA_5_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000504690405707,0.000503681322579,0.00176459470884,0.000504669999982,0.000755187890129,0.00201616849525,0.00327598677425,0.00529427346382,0.0068047412699,0.00630216825829,0.00806693941664,0.010840665667,0.01310830592,0.0178963013326,0.0183999386429,0.0226806437203,0.025963360783,0.0310093045706,0.035792138535,0.0400765646566,0.0519272497141,0.0415845077543,0.0476444880596,0.0441119769099,0.050664615445,0.0526808211507,0.0473877760315,0.0398268505919,0.0317628720061,0.0352897295695,0.0277249790567,0.0264671781437,0.01915871554,0.0146224307521,0.00958156839025,0.0110892754217,0.00579553410721,0.00529300110682,0.0035290593812,0.00201740124113,0.00075590049007,0.000252358446569,0.0,0.000252130822703,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000251770961733,0.000252017871011,0.0,0.0012607777482,0.00100783957898,0.00226974524365,0.0032772871391,0.00453781321669,0.011097473722,0.0110881991197,0.0126025360114,0.0186502288691,0.0219320576825,0.0284887093436,0.0340287397618,0.0400856872162,0.0451133778823,0.0463772524995,0.0509192869005,0.0516829011549,0.0499042020887,0.0499201265568,0.0456294026682,0.0410958106359,0.0451322231699,0.039827538785,0.0239494277149,0.0272195212356,0.0229396403894,0.0196642173734,0.0146193658922,0.012354134315,0.00907918743253,0.00730944287875,0.00453884950745,0.00478865559789,0.00403269148997,0.00352844480876,0.00252244454586,0.00252372690566,0.00100823288933,0.00126131029763,0.000251614677883,0.000756485454199,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y5_ETA_6 y5_ETA_6_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000283957625274,0.000287128707592,0.000572547709956,0.000861351732727,0.000858235034671,0.00142977823532,0.000286450303915,0.00143123581351,0.000859256738999,0.00314945657622,0.00143529663835,0.00429543394091,0.00544119536808,0.00543670366794,0.00687030779608,0.00572291084221,0.00886473662672,0.0131701286858,0.00973534068191,0.0134554561147,0.0177613000431,0.0174690846075,0.0151850655816,0.0186052458129,0.0208887349921,0.0174626364735,0.0197556129072,0.0174692045728,0.0154570468727,0.0123066885575,0.010887799171,0.00944630632579,0.00601141435195,0.00457853701348,0.00315233774244,0.00228783792074,0.0017190425249,0.000574099460894,0.00085594249811,0.000286764712928,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00085828172116,0.00257774402458,0.00229116695735,0.00515367657338,0.00314700028705,0.00745015599335,0.0103183139748,0.0122993706752,0.0143236348717,0.0151823563656,0.0186132135071,0.0191729715183,0.0237481674986,0.0226402381232,0.0217478463732,0.0211806505145,0.0189065386215,0.0180207549592,0.0117413421635,0.0131746173868,0.0146080025783,0.00887664717991,0.00715153841053,0.00629180320999,0.00514740738762,0.00373118123045,0.0042993318129,0.00286459701196,0.00228898858775,0.00114435383452,0.00257892468291,0.00114002408751,0.00200925553992,0.0,0.000568809991618,0.00057289490948,0.000571732745799,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y5_ETA_7 y5_ETA_7_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2.1617996646e-05,8.63041639912e-05,0.000129543836942,0.000129532437545,0.0,0.00021618093038,8.64047469058e-05,0.000237509621227,0.000129647940258,0.000322270298485,0.000410490734636,0.000430353219956,0.000626435839991,0.000475241698333,0.000691605605799,0.000819277594701,0.000970180465002,0.00112314528505,0.00105669727804,0.00114471529107,0.00118825302473,0.0014903026789,0.0017929520587,0.00114463650112,0.00161964099142,0.00144665053218,0.00170707143259,0.00164166529694,0.00123133310612,0.00123126898451,0.000928988408734,0.000496921088276,0.000366997383194,0.000345373062397,0.000172778396927,2.15827549073e-05,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2.15983662139e-05,0.0,0.0,6.48950068125e-05,0.000107900405391,0.000280954818599,0.0004105766073,0.000496826372698,0.00094881791124,0.00116613190815,0.00172641059339,0.00185748940943,0.00190084693203,0.00185734859335,0.00172633389892,0.002138216743,0.00196600957321,0.00140282697541,0.00108005933632,0.000926656561499,0.00125301165787,0.000777599387679,0.00108001868406,0.00099358569012,0.000907322010752,0.000712771184686,0.000691494964593,0.000496688909381,0.00010813861926,0.000280705372971,0.000302205809145,0.000365607872139,0.000194619348176,0.000172678819842,0.000151298580251,0.000108110372225,0.000108019428507,8.64090216796e-05,4.32315844076e-05,4.32170417946e-05,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y5_ETA_8 y5_ETA_8_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2.84292642893e-05,0.0,2.83973993922e-05,2.83973993922e-05,5.66030748554e-05,2.84292642893e-05,0.000141723376559,0.000113644061894,0.000112773137535,0.000198503757442,0.000141120452911,8.50868331672e-05,0.000142062857986,0.000225435534901,0.000111915784712,0.000255442735234,0.000113583286953,5.67183438192e-05,0.000198632048547,0.000170429981271,0.000198798351831,0.000198741185077,0.000142022856106,0.000113357649033,0.000113596531822,0.0,2.83973993922e-05,2.83973993922e-05,2.83498693196e-05,5.64019963668e-05,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,5.67658887402e-05,2.83684893481e-05,0.000142025796111,5.68978177294e-05,8.45111565312e-05,0.000141783943621,0.000113497477428,0.000198740145682,0.000141909131387,0.000198819139742,0.000142041461286,0.000113259010398,0.000113446992503,2.84292642893e-05,5.51480250594e-05,5.65914781711e-05,0.000113545022349,0.000113683677712,8.51264489853e-05,0.000170163302076,2.84080903176e-05,2.83973993922e-05,0.000113642859165,8.52862783201e-05,5.6878173154e-05,2.84489088647e-05,0.0,8.51961775765e-05,0.0,0.0,2.83684893481e-05,2.84489088647e-05,0.0,2.83684893481e-05,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y5_ETA_9 y5_ETA_9_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y5_ETA_10 y5_ETA_10_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0521138287,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0529581672,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.05462838872,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y5_ETA_11 y5_ETA_11_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.230673161153,0.0,0.0,0.0,0.0,0.691026068854,0.230619670779,0.690578010125,0.460783443723,0.229982512821,0.230364746113,0.0,0.229982512821,0.921962685466,0.461195765349,0.691490266921,0.0,0.460080998305,0.461188848491,0.690561870788,0.230752243903,0.461033219172,0.230597152562,0.230428265931,0.229982512821,0.459723627277,0.230360173301,0.230020171273,0.0,0.0,0.0,0.0,0.0,0.0,0.229952462913,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.230020171273,0.0,0.0,0.0,0.690728643934,0.0,0.229982512821,0.230465309552,0.0,0.0,0.0,0.0,0.921987663011,0.691353466828,0.690780136104,0.0,0.691670105244,0.0,0.0,1.15198896663,0.461138893401,0.230020171273,0.0,0.460124420806,0.0,0.230551270733,0.230752243903,0.229932019753,0.230752243903,0.230587737949,0.460570173916,0.0,0.459889631883,0.0,0.690224097526,0.0,0.230619670779,0.230360173301,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y5_ETA_12 y5_ETA_12_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0554467039881,0.0831726910094,0.0276908199867,0.0277244637063,0.0277261678187,0.138459100715,0.110763733864,0.110696415651,0.138481027219,0.0553453804604,0.110839668808,0.0276896813472,0.249341468504,0.304489895107,0.193882570303,0.165968553895,0.415543020352,0.221455956552,0.110817588434,0.41551493904,0.24918529025,0.138688482718,0.415323370639,0.332468383129,0.304440925915,0.30428051623,0.193931154819,0.193743817848,0.138552153719,0.33202835282,0.110822973891,0.110800931985,0.221591477732,0.0553593057136,0.0830496717832,0.027763192836,0.0276953706979,0.0,0.0830522875767,0.0276873271331,0.0,0.0,0.0,0.0,0.0276896813472,0.0276896813472,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.027763192836,0.0,0.0554193535598,0.027603817928,0.0554837482391,0.0277263947773,0.0276586841951,0.0831603813932,0.0553866946095,0.110896946991,0.138389897592,0.0830404011036,0.276814223641,0.221656410957,0.276896121056,0.193704811752,0.193765744352,0.193745241148,0.304459505742,0.304710891184,0.221625021436,0.193771283679,0.166210553255,0.193715351861,0.221506118237,0.332359866169,0.193744356394,0.249216833641,0.19377143755,0.221591593134,0.1384076696,0.110700454743,0.166063030199,0.166173124328,0.0831490719334,0.166071069917,0.0554810939782,0.0830438247156,0.0276586841951,0.0276929395487,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y5_ETA_13 y5_ETA_13_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0100450209578,0.0,0.0200719202967,0.0302570227044,0.0201730639343,0.0807196141628,0.040348427967,0.0302623268889,0.0604818076191,0.0504090944485,0.0403768120299,0.0504226704906,0.0504235808197,0.0403129979564,0.0706525268263,0.0706297079089,0.120951914474,0.0705543933437,0.0604464807792,0.0605190340126,0.0907542330928,0.0302540671691,0.13107884078,0.0302726621592,0.0604138485135,0.0604985091246,0.050332353701,0.0604686624662,0.0301965950553,0.0,0.0302875915573,0.0302954446635,0.0605230030477,0.0100953560911,0.040347511569,0.0,0.0100921881456,0.0100996953267,0.0201665945285,0.0100953560911,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0201987048721,0.0201875442367,0.0201245615971,0.0201424829437,0.0101000473206,0.0504036264047,0.0302668906724,0.050308903622,0.0503705389745,0.0706056145306,0.0504038145394,0.110960869966,0.100781812144,0.0806490333092,0.0806486084889,0.0806237261587,0.0706145357563,0.0402893536739,0.0605118484812,0.0806438140887,0.0806781031534,0.100820045968,0.050385189205,0.0403465891021,0.0605020047886,0.0705439549028,0.0201625041162,0.0705455328066,0.0201750545207,0.0403691045764,0.0201483758077,0.050360561767,0.0100592767124,0.0604937875508,0.0403312045394,0.0100853121261,0.0100921881456,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y5_ETA_14 y5_ETA_14_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00566304279343,0.0113243080673,0.00565465151583,0.0113270474481,0.0226121500506,0.0141291878309,0.00565787567476,0.0141412072492,0.00849151124252,0.0198041884835,0.0282808755203,0.0339622436944,0.0254604059094,0.0226399170613,0.0113074601055,0.0396079460531,0.0396194499137,0.0396201424538,0.0339383202812,0.0481071444908,0.00849126115859,0.0254815707045,0.0339557068853,0.0113223958871,0.0169703182216,0.0283032714978,0.0141288069338,0.0226442338946,0.0197992483641,0.0282745349309,0.0141461896905,0.0113219688207,0.0141390603748,0.0056663977655,0.0113208722989,0.00566218481319,0.00282800521671,0.00283012131147,0.00282190547736,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00282347946712,0.00565759865872,0.0,0.00283355053922,0.00849388896355,0.00566924872226,0.0113134197978,0.0141524456361,0.0113166093298,0.0113165477706,0.0254648920303,0.0169795905641,0.0198015222041,0.0282696871502,0.0226120269323,0.0339307869839,0.0395990199806,0.0282947532545,0.0226330955413,0.0254486750495,0.039614178914,0.0226347076208,0.0424486301401,0.0395931718642,0.0339560146809,0.0395998279441,0.0198092017043,0.016970391323,0.028310854812,0.0198078820307,0.0339708581238,0.0198045462959,0.0169803023414,0.0113155205028,0.00282142762469,0.0113133543913,0.00849043780536,0.0,0.00282930950057,0.00283041371729,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y5_ETA_15 y5_ETA_15_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00152260673975,0.00153821483072,0.00151881876977,0.0,0.00304355646424,0.0,0.00456097823541,0.0015356572123,0.00456914512233,0.00152449658811,0.00305783256077,0.00303609280421,0.00304190063088,0.0,0.00304325980887,0.00151265401114,0.00152644434928,0.0,0.00150849610837,0.00154541020084,0.00152162931349,0.00152305585944,0.0030361719911,0.00152449658811,0.00153629543501,0.0,0.0,0.00152495989053,0.00153629543501,0.00153333597266,0.00152162931349,0.00152094972449,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.00154541020084,0.00153219780883,0.00151265401114,0.00305061000709,0.00150849610837,0.00152162931349,0.00151727403443,0.0,0.0,0.0,0.00306015380041,0.00306014316337,0.00305496528615,0.00607878412211,0.00301699221674,0.00306438379871,0.00152192833265,0.00151115655156,0.00305428569715,0.00150849610837,0.00304681258196,0.0,0.00303898371671,0.0,0.0,0.0,0.00151265401114,0.0,0.00152094972449,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating weights for histo: y5_ETA_16 y5_ETA_16_weights = numpy.array([0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000180755028423,0.0,0.0,0.0,0.0,0.000180686039232,0.0,0.000542211148965,0.000541323760103,0.0,0.0,0.0,0.0,0.000361533925874,0.000180154568376,0.000722930104357,0.000360550714414,0.000360973850682,0.000180970234659,0.000180626135672,0.000541377657908,0.00018065712691,0.0,0.000180533816432,0.0,0.0,0.0,0.000180553027149,0.000180626135672,0.000180003616023,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.000180553027149,0.0,0.0,0.000180626135672,0.00018065712691,0.000361237718936,0.000180626135672,0.0,0.0,0.0,0.000179998688224,0.0,0.000180766962937,0.0,0.000360616354241,0.000902862921764,0.0,0.0,0.0,0.00018065712691,0.000542553400027,0.0,0.000361314061327,0.0,0.000180402036298,0.0,0.0,0.000360206615427,0.0,0.000180755028423,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]) # Creating a new Canvas fig = plt.figure(figsize=(12,6),dpi=80) frame = gridspec.GridSpec(1,1,right=0.7) pad = fig.add_subplot(frame[0]) # Creating a new Stack pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights+y5_ETA_7_weights+y5_ETA_8_weights+y5_ETA_9_weights+y5_ETA_10_weights+y5_ETA_11_weights+y5_ETA_12_weights+y5_ETA_13_weights+y5_ETA_14_weights+y5_ETA_15_weights+y5_ETA_16_weights,\ label="$bg\_dip\_1600\_inf$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#e5e5e5", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights+y5_ETA_7_weights+y5_ETA_8_weights+y5_ETA_9_weights+y5_ETA_10_weights+y5_ETA_11_weights+y5_ETA_12_weights+y5_ETA_13_weights+y5_ETA_14_weights+y5_ETA_15_weights,\ label="$bg\_dip\_1200\_1600$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#f2f2f2", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights+y5_ETA_7_weights+y5_ETA_8_weights+y5_ETA_9_weights+y5_ETA_10_weights+y5_ETA_11_weights+y5_ETA_12_weights+y5_ETA_13_weights+y5_ETA_14_weights,\ label="$bg\_dip\_800\_1200$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#ccc6aa", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights+y5_ETA_7_weights+y5_ETA_8_weights+y5_ETA_9_weights+y5_ETA_10_weights+y5_ETA_11_weights+y5_ETA_12_weights+y5_ETA_13_weights,\ label="$bg\_dip\_600\_800$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#ccc6aa", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights+y5_ETA_7_weights+y5_ETA_8_weights+y5_ETA_9_weights+y5_ETA_10_weights+y5_ETA_11_weights+y5_ETA_12_weights,\ label="$bg\_dip\_400\_600$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#c1bfa8", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights+y5_ETA_7_weights+y5_ETA_8_weights+y5_ETA_9_weights+y5_ETA_10_weights+y5_ETA_11_weights,\ label="$bg\_dip\_200\_400$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#bab5a3", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights+y5_ETA_7_weights+y5_ETA_8_weights+y5_ETA_9_weights+y5_ETA_10_weights,\ label="$bg\_dip\_100\_200$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#b2a596", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights+y5_ETA_7_weights+y5_ETA_8_weights+y5_ETA_9_weights,\ label="$bg\_dip\_0\_100$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#b7a39b", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights+y5_ETA_7_weights+y5_ETA_8_weights,\ label="$bg\_vbf\_1600\_inf$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#ad998c", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights+y5_ETA_7_weights,\ label="$bg\_vbf\_1200\_1600$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#9b8e82", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights,\ label="$bg\_vbf\_800\_1200$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#876656", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights,\ label="$bg\_vbf\_600\_800$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#afcec6", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights,\ label="$bg\_vbf\_400\_600$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#84c1a3", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights,\ label="$bg\_vbf\_200\_400$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#89a8a0", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights,\ label="$bg\_vbf\_100\_200$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#829e8c", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights+y5_ETA_1_weights,\ label="$bg\_vbf\_0\_100$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#adbcc6", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") pad.hist(x=xData, bins=xBinning, weights=y5_ETA_0_weights,\ label="$signal$", histtype="step", rwidth=1.0,\ color=None, edgecolor="#7a8e99", linewidth=1, linestyle="solid",\ bottom=None, cumulative=False, normed=False, align="mid", orientation="vertical") # Axis plt.rc('text',usetex=False) plt.xlabel(r"\eta [ j_{2} ] ",\ fontsize=16,color="black") plt.ylabel(r"$\mathrm{Events}$ $(\mathcal{L}_{\mathrm{int}} = 40.0\ \mathrm{fb}^{-1})$ ",\ fontsize=16,color="black") # Boundary of y-axis ymax=(y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights+y5_ETA_7_weights+y5_ETA_8_weights+y5_ETA_9_weights+y5_ETA_10_weights+y5_ETA_11_weights+y5_ETA_12_weights+y5_ETA_13_weights+y5_ETA_14_weights+y5_ETA_15_weights+y5_ETA_16_weights).max()*1.1 ymin=0 # linear scale #ymin=min([x for x in (y5_ETA_0_weights+y5_ETA_1_weights+y5_ETA_2_weights+y5_ETA_3_weights+y5_ETA_4_weights+y5_ETA_5_weights+y5_ETA_6_weights+y5_ETA_7_weights+y5_ETA_8_weights+y5_ETA_9_weights+y5_ETA_10_weights+y5_ETA_11_weights+y5_ETA_12_weights+y5_ETA_13_weights+y5_ETA_14_weights+y5_ETA_15_weights+y5_ETA_16_weights) if x])/100. # log scale plt.gca().set_ylim(ymin,ymax) # Log/Linear scale for X-axis plt.gca().set_xscale("linear") #plt.gca().set_xscale("log",nonposx="clip") # Log/Linear scale for Y-axis plt.gca().set_yscale("linear") #plt.gca().set_yscale("log",nonposy="clip") # Legend plt.legend(bbox_to_anchor=(1.05,1), loc=2, borderaxespad=0.) # Saving the image plt.savefig('../../HTML/MadAnalysis5job_0/selection_4.png') plt.savefig('../../PDF/MadAnalysis5job_0/selection_4.png') plt.savefig('../../DVI/MadAnalysis5job_0/selection_4.eps') # Running! if __name__ == '__main__': selection_4()
34,907
28,671
import os class Cpypl: def __init__(self, directory): self.directory = directory self.extension_dict = {"c": (".c", ".h"), "py": (".py", ".pyc"), "pl": (".pl", ".pm"), } def file_list(self): folder_files = os.listdir(self.directory) extension = self.extension_dict return [i for i in folder_files for j in extension if i.endswith(extension[j])] a = Cpypl(r"C:\Users\admin\Desktop\python test") print(a.file_list())
595
188
from .graph_encoder import GraphEncoder __all__ = ["GraphEncoder"]
68
23
# Generated by Django 2.2.9 on 2020-07-20 19:50 from django.db import migrations import multiselectfield.db.fields class Migration(migrations.Migration): dependencies = [ ('core', '0004_auto_20200717_1159'), ] operations = [ migrations.AddField( model_name='material', name='categoria', field=multiselectfield.db.fields.MultiSelectField(choices=[('BERCARIO', 'Kit Infantil (Berçário I e II)'), ('MINI_GRUPO', 'Kit Infantil (Mini grupo I e II)'), ('EMEI', 'Kit Infantil (EMEI - Infantil I e II)'), ('CICLO_ALFABETIZACAO', 'Kit Ensino Fundamental - Ciclo alfabetização (1º ao 3º ano)'), ('CICLO_INTERDISCIPLINAR', 'Kit Ensino Fundamental - Ciclo interdisciplinar (4º ao 6º ano)'), ('CICLO_ALTORAL', 'Kit Ensino Fundamental - Ciclo Autoral (7º ao 9º ano)'), ('MEDIO_EJA_MOVA', 'Kit Ensino Médio, EJA e MOVA')], default='CICLO_ALFABETIZACAO', max_length=25), ), ]
941
372
from django.db import models import datetime class Task(models.Model): title = models.CharField("Название", max_length=50) task = models.TextField("Описание") date = models.DateTimeField(u'Дата и время', default=datetime.datetime.now()) def __str__(self): return self.title class Meta: verbose_name = 'Новость' verbose_name_plural = 'Новости' class Test(models.Model): text = models.TextField("Описание") cat = models.ForeignKey('Category', on_delete=models.CASCADE, null=True) def __str__(self): return self.text class Meta: verbose_name = 'Сообщение сервера' verbose_name_plural = 'Сообщения сервера' class Category(models.Model): name = models.CharField(max_length=100, db_index=True) def __str__(self): return self.name
833
270
from functools import wraps from flask import current_app, request from flask_restful import abort def auth_simple_token(func): @wraps(func) def wrapper(*args, **kwargs): token = request.headers.get('x-simple-auth') if current_app.config['API_KEY'] == token: return func(*args, **kwargs) abort(401) return wrapper
364
111
import numpy as np import scipy.ndimage.measurements as scipy_measurements import miapy.data.transformation as miapy_tfm class ClipNegativeTransform(miapy_tfm.Transform): def __init__(self, entries=('images',)) -> None: super().__init__() self.entries = entries def __call__(self, sample: dict) -> dict: for entry in self.entries: if entry not in sample: continue img = sample[entry] m = np.min(img) if m < 0: print('Clipping... min: {}'.format(m)) img = np.clip(img, a_min=0, a_max=None) sample[entry] = img return sample class CenterCentroidTransform(miapy_tfm.Transform): def __init__(self, entries=('images',)) -> None: super().__init__() self.entries = entries def __call__(self, sample: dict) -> dict: for entry in self.entries: if entry not in sample: continue img = sample[entry] centroid_transform = [] # move centroid to center com = scipy_measurements.center_of_mass(img > 0) for axis in range(0, 3): diff = com[axis] - int(img.shape[axis] / 2) centroid_transform.append(-diff) if abs(diff) > 1: img = np.roll(img, int(-diff), axis=axis) sample[entry] = img # store the centroid transformation (will be written to metadata later) sample['centroid_transform'] = np.array(centroid_transform) return sample class RandomRotateShiftTransform(miapy_tfm.Transform): def __init__(self, do_rotate=True, shift_amount=0, entries=('images',)) -> None: super().__init__() self.entries = entries self.do_rotate = do_rotate self.shift_amount = shift_amount print('Using RandomRotateShiftTransform({}, {})'.format(do_rotate, shift_amount)) def __call__(self, sample: dict) -> dict: for entry in self.entries: if entry not in sample: continue img = sample[entry] # shift +/- shift_amount pixels if self.shift_amount != 0: # number of pixels to shift n = np.random.randint(-self.shift_amount, self.shift_amount + 1) # axis k = np.random.randint(0, 3) img = np.roll(img, n, axis=k) # 3x rotate by 90 degree around a random axis if self.do_rotate: planes = [(0, 1), (0, 2), (1, 2)] for i in range(0, 3): k = np.random.randint(0, 3) plane_idx = np.random.randint(0, 3) img = np.rot90(img, k, planes[plane_idx]) sample[entry] = img return sample def get_bounding_box(img): a = np.argwhere(img) min0, min1, min2 = a.min(0) max0, max1, max2 = a.max(0) return [min0, max0, min1, max1, min2, max2] # Apply reverse center centroid transform def revert_centroid_transform(img, centroid_transform): for axis in range(0, 3): diff = -centroid_transform[axis] if abs(diff) > 1: img = np.roll(img, int(diff), axis=axis) return img
3,307
1,012
# pylint: skip-file import random import string from .common import * # noqa # we don't use user sessions, so it doesn't matter if we recreate the secret key on each startup SECRET_KEY = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(30)) # disable databases for the worker DATABASES = {} INSTALLED_APPS += ( # sentry 'raven.contrib.django.raven_compat', ) # SENTRY SENTRY_DSN = env.str('SENTRY_DSN', default=None) if SENTRY_DSN: LOGGING = { 'version': 1, 'disable_existing_loggers': True, 'root': { 'level': 'WARNING', 'handlers': ['sentry'], }, 'formatters': { 'verbose': { 'format': '%(levelname)s %(asctime)s %(module)s ' '%(process)d %(thread)d %(message)s' }, }, 'handlers': { 'sentry': { 'level': 'ERROR', 'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler' }, 'console': { 'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'verbose' } }, 'loggers': { 'django.db.backends': { 'level': 'ERROR', 'handlers': ['console'], 'propagate': False, }, 'raven': { 'level': 'DEBUG', 'handlers': ['console'], 'propagate': False, }, 'sentry.errors': { 'level': 'DEBUG', 'handlers': ['console'], 'propagate': False, }, }, } RAVEN_CONFIG = { 'dsn': SENTRY_DSN, 'release': env.str('SENTRY_RELEASE', default=''), }
1,828
557
import main def cadastrar_categoria(nome: str): with open("categorias.txt", "a") as file: dados = f"{nome}\n" file.write(dados) print("Categoria cadastrada com sucesso! \n") def listar_categorias(): main.categorias_list.clear() print(f"Lista de categorias: \n") with open("categorias.txt", "r") as file: for i in file: print(i.strip()) main.categorias_list.append(i.strip()) print("-----------------") print("\n") def deletar_categoria(nome): categorias = [] contador = 0 categoria = 0 categorias.clear() main.produtos_categoria.clear() with open("categorias.txt", "r") as file: nome_inserido = nome for i in file: categorias.append(i.strip()) for index_categoria in range(len(categorias)): if nome_inserido == categorias[index_categoria]: contador = 1 categorias.remove(categorias[index_categoria]) break if contador == 0: print("Codigo não localizado!\n") else: print("Categoria deletado com sucesso!\n") with open("categorias.txt", "w") as file: for x in categorias: file.write(f"{x}\n") with open("produtos.txt", "r") as file: for produtos_arquivo in file: main.produtos_categoria.append(produtos_arquivo.strip()) print("Produtos da categoria alterados:\n") for prod in range(len(main.produtos_categoria)-1): try: cod_prod = main.produtos_categoria[prod].split(",")[0] prod_nome = main.produtos_categoria[prod].split(",")[1] prod_preco = main.produtos_categoria[prod].split(",")[2] cat_produto = main.produtos_categoria[prod].split(",")[3] if nome == cat_produto: main.produtos_categoria[prod] = main.produtos_categoria[prod].replace(nome, "NULL") print(f"{main.produtos_categoria[prod]}") except: pass print(f"-----------------------------\n") with open("produtos.txt", "w") as file: for prod in main.produtos_categoria: file.write(f"{prod}\n")
2,289
730
import torch from espnet2.layers.log_mel import LogMel def test_repr(): print(LogMel()) def test_forward(): layer = LogMel(n_fft=16, n_mels=2) x = torch.randn(2, 4, 9) y, _ = layer(x) assert y.shape == (2, 4, 2) y, ylen = layer(x, torch.tensor([4, 2], dtype=torch.long)) assert (ylen == torch.tensor((4, 2), dtype=torch.long)).all() def test_backward_leaf_in(): layer = LogMel(n_fft=16, n_mels=2) x = torch.randn(2, 4, 9, requires_grad=True) y, _ = layer(x) y.sum().backward() def test_backward_not_leaf_in(): layer = LogMel(n_fft=16, n_mels=2) x = torch.randn(2, 4, 9, requires_grad=True) x = x + 2 y, _ = layer(x) y.sum().backward()
708
335
import unittest import json from aws_allowlister.database.database import connect_db from aws_allowlister.database.raw_scraping_data import RawScrapingData db_session = connect_db() raw_scraping_data = RawScrapingData() class RawScrapingDataTestCase(unittest.TestCase): def test_standards(self): """database.scrapers.raw_scraping_data.standards""" results = raw_scraping_data.standards(db_session=db_session) print(results) print(len(results)) # This will grow over time, so let's just make sure it meets minimum size self.assertTrue(len(results) >= 7) expected_results = ['SOC', 'PCI', 'IRAP', 'OSPAR', 'FINMA', 'ISO', 'HIPAA'] def test_get_rows(self): """database.scrapers.raw_scraping_data.get_rows""" results = raw_scraping_data.get_rows(db_session=db_session, sdk_name="ecs") print(len(results)) # print(results) results = raw_scraping_data.get_rows(db_session=db_session) # This will change over time, so let's just check that the size of this is massive print(len(results)) self.assertTrue(len(results) > 850) # 857 def test_get_sdk_names_matching_compliance_standard(self): """database.scrapers.raw_scraping_data.get_sdk_names_matching_compliance_standard""" results = raw_scraping_data.get_sdk_names_matching_compliance_standard(db_session=db_session, standard_name="SOC") # print(results) print(len(results)) # 120 def test_get_service_names_matching_compliance_standard(self): """database.scrapers.raw_scraping_data.get_service_names_matching_compliance_standard""" results = raw_scraping_data.get_service_names_matching_compliance_standard(db_session=db_session, standard_name="SOC") # print(results) print(len(results)) # 119 expected_results = {'Amazon API Gateway': 'apigateway', 'Amazon AppStream 2.0': 'appstream', 'Amazon Athena': 'athena', 'Amazon Chime': 'chime', 'Amazon Cloud Directory': 'clouddirectory', 'Amazon CloudFront': 'cloudfront', 'Amazon CloudWatch': 'cloudwatch', 'Amazon CloudWatch Events [includes Amazon EventBridge]': 'events', 'Amazon CloudWatch Logs': 'logs', 'Amazon CloudWatch SDK Metrics for Enterprise Support': 'sdkmetrics', 'Amazon Cognito': 'cognito-sync', 'Amazon Comprehend': 'comprehend', 'Amazon Comprehend Medical': 'comprehendmedical', 'Amazon Connect': 'connect', 'Amazon DynamoDB': 'dynamodb', 'Amazon EC2 Auto Scaling': 'autoscaling', 'Amazon Elastic Block Store (EBS)': 'ec2', 'Amazon Elastic Compute Cloud (EC2)': 'ec2', 'Amazon Elastic Container Registry (ECR)': 'ecr', 'Amazon Elastic Container Service': 'ecs', 'Amazon Elastic File System (EFS)': 'elasticfilesystem', 'Amazon Elastic Kubernetes Service (EKS)': 'eks', 'Amazon Elastic MapReduce (EMR)': 'elasticmapreduce', 'Amazon ElastiCache for Redis': 'elasticache', 'Amazon Elasticsearch Service': 'es', 'Amazon Forecast': 'amazonforecast', 'Amazon FreeRTOS': 'freertos', 'Amazon FSx': 'fsx', 'Amazon GuardDuty': 'guardduty', 'Amazon Inspector': 'inspector', 'Amazon Kinesis Data Analytics': 'kinesisanalytics', 'Amazon Kinesis Data Firehose': 'firehose', 'Amazon Kinesis Data Streams': 'kinesis', 'Amazon Kinesis Video Streams': 'kinesisvideo', 'Amazon Lex': 'models.lex', 'Amazon Macie': 'macie', 'Amazon Managed Streaming for Apache Kafka': 'kafka', 'Amazon MQ': 'mq', 'Amazon Neptune': 'neptune-db', 'Amazon Personalize': 'personalize', 'Amazon Pinpoint': 'mobiletargeting', 'Amazon Polly': 'polly', 'Amazon Quantum Ledger Database (QLDB)': 'qldb', 'Amazon QuickSight': 'quicksight', 'Amazon Redshift': 'redshift', 'Amazon Rekognition': 'rekognition', 'Amazon Relational Database Service (RDS)': 'rds', 'Amazon Route 53': 'route53', 'Amazon S3 Glacier': 'glacier', 'Amazon SageMaker': 'sagemaker', 'Amazon SimpleDB': 'sdb', 'Amazon Simple Email Service (SES)': 'ses', 'Amazon Simple Notification Service (SNS)': 'sns', 'Amazon Simple Queue Service (SQS)': 'sqs', 'Amazon Simple Storage Service (S3)': 's3', 'Amazon Simple Workflow Service (SWF)': 'swf', 'Amazon Textract': 'textract', 'Amazon Transcribe': 'transcribe', 'Amazon Translate': 'translate', 'Amazon Virtual Private Cloud (VPC)': 'ec2', 'Amazon WorkDocs': 'workdocs', 'Amazon WorkLink': 'worklink', 'Amazon WorkMail': 'workmail', 'Amazon WorkSpaces': 'workspaces', 'AWS Amplify': 'amplify', 'AWS AppSync': 'appsync', 'AWS Backup': 'backup', 'AWS Batch': 'batch', 'AWS Certificate Manager (ACM)': 'acm', 'AWS CloudFormation': 'cloudformation', 'AWS CloudHSM': 'cloudhsm', 'AWS CloudTrail': 'cloudtrail', 'AWS CodeBuild': 'codebuild', 'AWS CodeCommit': 'codecommit', 'AWS CodeDeploy': 'codedeploy', 'AWS CodePipeline': 'codepipeline', 'AWS Config': 'config', 'AWS Control Tower': 'controltower', 'AWS Data Exchange': 'dataexchange', 'AWS Database Migration Service (DMS)': 'dms', 'AWS DataSync': 'datasync', 'AWS Direct Connect': 'directconnect', 'AWS Directory Service': 'ds', 'AWS Elastic Beanstalk': 'elasticbeanstalk', 'AWS Elemental MediaConnect': 'mediaconnect', 'AWS Elemental MediaConvert': 'mediaconvert', 'AWS Elemental MediaLive': 'medialive', 'AWS Firewall Manager': 'fms', 'AWS Global Accelerator': 'globalaccelerator', 'AWS Glue': 'glue', 'AWS Identity and Access Management (IAM)': 'iam', 'AWS IoT Core': 'iot', 'AWS IoT Device Management': 'iot', 'AWS IoT Events': 'iotevents', 'AWS IoT Greengrass': 'greengrass', 'AWS Key Management Service (KMS)': 'kms', 'AWS Lambda': 'lambda', 'AWS License Manager': 'license-manager', 'AWS OpsWorks Stacks': 'opsworks', 'AWS OpsWorks Stacksfor Chef Automate': 'opsworks-cm', 'AWS Organizations': 'organizations', 'AWS Outposts': 'outposts', 'AWS Personal Health Dashboard': 'health', 'AWS Resource Groups': 'resource-groups', 'AWS RoboMaker': 'robomaker', 'AWS Secrets Manager': 'secretsmanager', 'AWS Security Hub': 'securityhub', 'AWS Server Migration Service (SMS)': 'sms', 'AWS Serverless Application Repository': 'serverlessrepo', 'AWS Service Catalog': 'servicecatalog', 'AWS Shield': 'DDoSProtection', 'AWS Snowball': 'snowball', 'AWS Step Functions': 'states', 'AWS Storage Gateway': 'storagegateway', 'AWS Systems Manager': 'ssm', 'AWS Transfer Family': 'transfer', 'AWS Web Application Firewall (WAF)': 'waf', 'AWS X-Ray': 'xray', 'Elastic Load Balancing (ELB)': 'elasticloadbalancing'}
6,347
2,203
import random from . import constants from . import exceptions class PasswordGenerator: def __init__(self, length=False, ignore=False, only=False, include=False, repeat=False, separator=False, separator_length=False, separation=False): self.__password = None self.__available_char = None self.__possibility = None self.__error = None self.length = length self.ignore = ignore self.only = only self.include = include self.repeat = repeat self.separator_length = separator_length self.separator = separator if separator else separation def __all_possible_chars(self): self.__possibility = constants.POSSIBLE_UPPERCASE_CHARS + constants.POSSIBLE_LOWERCASE_CHARS + constants.POSSIBLE_NUMBERS + constants.POSSIBLE_SPECIAL_CHARS self.__available_char = {"alphabets": constants.POSSIBLE_UPPERCASE_CHARS + constants.POSSIBLE_LOWERCASE_CHARS, "uppercase": constants.POSSIBLE_UPPERCASE_CHARS, "lowercase": constants.POSSIBLE_LOWERCASE_CHARS, "numbers": constants.POSSIBLE_NUMBERS, "symbols": constants.POSSIBLE_SPECIAL_CHARS } def __set_length(self): if not self.length: if not self.only: self.length = random.randint(constants.DEFAULT_MIN_PASS_LEN, constants.DEFAULT_MAX_PASS_LEN) else: self.__error = ValueError('[-] Password length must be given.') def __add_only_wanted(self): possibility = constants.EMPTY_STRING try: choices = [character for character in self.only.split(',')] for choice in choices: if choice.lower() in self.__available_char: possibility += self.__available_char[choice.lower()] if not choice.lower() in self.__available_char: for char in choice: possibility += char except Exception: raise exceptions.GenpasswdException self.__possibility = possibility def __remove_unwanted(self): try: choices = [characters for characters in self.ignore.split(',')] if self.ignore == ',' or ',,,' in self.ignore or ',,' in self.ignore or len(choices) > 0: possibility = self.__possibility.replace(',', constants.EMPTY_STRING) for choice in choices: if choice.lower() in self.__available_char: for char in self.__available_char[choice.lower()]: possibility = possibility.replace(char, constants.EMPTY_STRING) if choice not in self.__available_char: for char in choice: possibility = possibility.replace(char, constants.EMPTY_STRING) self.__possibility = possibility except Exception: raise exceptions.GenpasswdException def __include_characters(self): possibility = self.__possibility try: choices = [character for character in self.include.split(',')] for choice in choices: if choice.lower() in self.__available_char: for char in self.__available_char[choice.lower()]: if char not in possibility: possibility += char if not choice.lower() in self.__available_char: for char in choice: if char not in possibility: possibility += char except Exception: raise exceptions.GenpasswdException self.__possibility = possibility def __repeat_char(self): if self.repeat is None or self.repeat is True: self.__possibility *= self.length def __check(self): if self.length and ((self.length > len( self.__possibility)) or self.length > constants.PASSWORD_LENGTH_LIMIT) and self.__error is None: self.__error = ValueError('[-] Password length must be less.') def __separated_pass(self): if type(self.separator) is bool: self.separator = constants.DEFAULT_SEPARATOR if not self.separator_length: self.separator_length = constants.DEFAULT_SEPARATE_LENGTH final_password = constants.EMPTY_STRING for i in range(len(self.__password)): final_password += self.separator + self.__password[i] if i % self.separator_length == 0 and i != 0 else \ self.__password[i] return final_password def __filter(self): if self.only: self.__add_only_wanted() if self.include: self.__include_characters() if self.ignore: self.__remove_unwanted() self.__repeat_char() def generate(self): self.__all_possible_chars() self.__set_length() self.__filter() self.__check() if self.__error is not None: return self.__error self.__password = constants.EMPTY_STRING.join(random.sample(self.__possibility, self.length)) return self.__separated_pass() if self.separator or self.separator_length else self.__password
5,533
1,467
###American Environmental Solutions Data Manipulation### ## Created by Jeremy Herrmann ## ##Import Libraries## from __future__ import print_function from os.path import join, dirname, abspath import xlrd from xlrd.sheet import ctype_text import xlsxwriter #################### def loadSpreadsheet(): fname = join(dirname(dirname(abspath(__file__))), 'AES/First Spreadsheet', 'GBZ65745 Excel SE855 GLENWOOD RD-1 (copy).xls') xl_workbook = xlrd.open_workbook(fname) xl_sheet = xl_workbook.sheet_by_name("Results") return xl_workbook, xl_sheet def grabSimpleInformation(xl_workbook, xl_sheet): numSpaces = 0 generalAreas = {} num_cols = xl_sheet.ncols for row_idx in range(8, xl_sheet.nrows-7): if(xl_sheet.cell(row_idx,0).value == "Mercury"): Mercury_Values_Raw = (xl_sheet.row(row_idx)) if(xl_sheet.cell(row_idx,0).value == "pH at 25C - Soil"): Corrosivity_Values_Raw = (xl_sheet.row(row_idx)) if(xl_sheet.cell(row_idx,0).value == "Flash Point"): Flashpoint_Values_Raw = (xl_sheet.row(row_idx)) if(xl_sheet.cell(row_idx,0).value == "Ignitability"): Ignitability_Values_Raw = (xl_sheet.row(row_idx)) if(xl_sheet.cell(row_idx,0).value == "Reactivity Cyanide"): Reactivity_Values_Cyanide_Raw = (xl_sheet.row(row_idx)) if(xl_sheet.cell(row_idx,0).value == "Reactivity Sulfide"): Reactivity_Values_Sulfide_Raw = (xl_sheet.row(row_idx)) if(xl_sheet.cell(row_idx,0).value == "Total Cyanide (SW9010C Distill.)"): Cyanide_Values_Raw = (xl_sheet.row(row_idx)) if(numSpaces%3 == 0): generalAreas[int(row_idx)] = str(xl_sheet.cell(row_idx,0).value) numSpaces +=1 if(xl_sheet.cell(row_idx,0).value == ""): numSpaces += 1 return Mercury_Values_Raw, Corrosivity_Values_Raw, Flashpoint_Values_Raw, Ignitability_Values_Raw, Reactivity_Values_Cyanide_Raw, Reactivity_Values_Sulfide_Raw, Cyanide_Values_Raw, generalAreas def sortGeneralAreas(generalAreas): keys = generalAreas.keys() sortedGenAreas = [[0 for i in range(2)]for i in range(len(keys))] for x in range(0,len(keys)): smallestKey = 100000 for key in generalAreas.keys(): if(key < smallestKey): smallestKey = key sortedGenAreas[x][0] = int(smallestKey) sortedGenAreas[x][1] = str(generalAreas.pop(smallestKey)) return sortedGenAreas def insertRowsIntoAreas(xl_sheet, sortedGenAreas): rowsInArea = [[""]for i in range(len(sortedGenAreas))] for x in range(0,len(sortedGenAreas)): rowsInArea[x][0] = sortedGenAreas[x][1] numAreas = len(sortedGenAreas) for x in range(0 , numAreas): if(x < numAreas-1): for y in range(sortedGenAreas[x][0]+1, sortedGenAreas[x+1][0]-2): rowsInArea[x].append(xl_sheet.row(y)) else: for y in range(sortedGenAreas[x][0]+1, xl_sheet.nrows-7): rowsInArea[x].append(xl_sheet.row(y)) return rowsInArea print("Beginning program...") #Loading the file to be parsed xl_workbook, xl_sheet = loadSpreadsheet() #Grabbing basic information Company_Name = xl_sheet.cell(0, 0).value Type_Samples_Collected_Raw = xl_sheet.row(4) global firstIndex firstIndex = 6 #Begin parsing to find simple useful information Mercury_Values_Raw, Corrosivity_Values_Raw, Flashpoint_Values_Raw, Ignitability_Values_Raw, Reactivity_Values_Cyanide_Raw, Reactivity_Values_Sulfide_Raw, Cyanide_Values_Raw, generalAreas = grabSimpleInformation(xl_workbook, xl_sheet) #Sort the general areas in increasing order(Row number) sortedGenAreas = sortGeneralAreas(generalAreas) #Insert the rows that belong to each respective area rowsInArea = insertRowsIntoAreas(xl_sheet, sortedGenAreas) print("Done Parsing") print() ######################################################################################################################## def startWritingFinalFile(): workbook = xlsxwriter.Workbook('/home/jeremy/Desktop/AES/Excel_Reformatting.xlsx') worksheet = workbook.add_worksheet() return workbook, worksheet #Refining a given row def valueRefinerMetals(inputArrayRaw): outputArray = [] pos = 0 units = str(inputArrayRaw[2].value) divisor = 1 if(units[0:2] == "ug"): divisor = 1000 for value in inputArrayRaw: if((pos >= firstIndex and pos%2 == firstIndex%2) or (pos == 0) or (pos == 2)): if(pos == 0): outputArray.append(str(value.value)) elif(pos == 2): outputArray.append("ppm") outputArray.append("") elif(str(value.value).find("<") == -1): outputArray.append(str(round((float(value.value)/divisor), 5))) else: outputArray.append("N.D.") pos+=1 return(outputArray) def isDetected(compound): hasFloat = False for x in compound: try: val = float(x) hasFloat = True break except Exception as e: val = "" return hasFloat def isNumber(value): try: val = float(value) return True except Exception as e: return False def removeUselessRows(rowsInArea, index): y = 1 lenRow = (len(rowsInArea[index][1])) while(y < len(rowsInArea[index])): if not isDetected(rowsInArea[index][y]): rowsInArea[index].remove(rowsInArea[index][y]) y -= 1 y += 1 if(len(rowsInArea[index]) == 1): emptyArray = ["None Detected", "_", "_"] for x in range(len(emptyArray), lenRow): emptyArray.append("N.D.") rowsInArea[index].append(emptyArray) return rowsInArea[index] def createBeginning(worksheet, currLine): line = 1 x = len(Type_Samples_Collected) offset = 4 finalLetter="" if 64+x+offset > 90: firstLetter = chr(int(65+(((x+offset)-26)/26))) secondLetter = chr(64+((x+offset)%26)) finalLetter = firstLetter+secondLetter else: finalLetter = chr(64+x+offset) for x in range(0, 5): worksheet.merge_range("B"+str(line)+":"+finalLetter+str(line), "") line += 1 return worksheet, currLine def createHeading(worksheet, currLine, Type_Samples_Collected_Raw, formatOne): formatOne.set_text_wrap(True) Type_Samples_Collected = [] pos = 0 for value in Type_Samples_Collected_Raw: if((pos >= firstIndex and pos%2 == firstIndex%2) or (pos ==0)): Type_Samples_Collected.append(value.value) pos+=1 worksheet.write('B'+str(currLine), 'Parameter', formatOne) worksheet.write('C'+str(currLine), 'Compounds Detected', formatOne) worksheet.write('D'+str(currLine), 'Units', formatOne) worksheet.write('E'+str(currLine), 'NYSDEC Part 375 Unrestricted Use Criteria', formatOne) offset = 4 for x in range(1,len(Type_Samples_Collected)): if(64+x+offset < 90): worksheet.write(str(chr(65+x+offset))+str(currLine), str(Type_Samples_Collected[x]), formatOne) else: firstLetter = chr(int(65+(((x+offset)-26)/26))) secondLetter = chr(65+((x+offset)%26)) col = firstLetter + secondLetter worksheet.write(col+str(currLine), str(Type_Samples_Collected[x]), formatOne) currLine += 1 return worksheet, currLine, Type_Samples_Collected def addMercuryValues(worksheet, currLine, Mercury_Values_Raw, formatOne, formatTwo): Mercury_Values = valueRefinerMetals(Mercury_Values_Raw) offset = 2 worksheet.write('B'+str(currLine), 'Mercury 7471', formatOne) for x in range(0, len(Mercury_Values)): if(isNumber(Mercury_Values[x])): if(64+x+offset < 90): worksheet.write(str(chr(65+x+offset))+str(currLine), str(Mercury_Values[x]), formatTwo) else: firstLetter = chr(int(65+(((x+offset)-26)/26))) secondLetter = chr(65+((x+offset)%26)) col = firstLetter + secondLetter worksheet.write(col+str(currLine), str(Mercury_Values[x]), formatTwo) else: if(64+x+offset < 90): worksheet.write(str(chr(65+x+offset))+str(currLine), str(Mercury_Values[x]), formatOne) else: firstLetter = chr(int(65+(((x+offset)-26)/26))) secondLetter = chr(65+((x+offset)%26)) col = firstLetter + secondLetter worksheet.write(col+str(currLine), str(Mercury_Values[x]), formatOne) currLine += 1 return worksheet, currLine, Mercury_Values def addPCBValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo): indexOfPCBS = 0 for x in range(0, len(sortedGenAreas)): if sortedGenAreas[x][1] == "PCBs By SW8082A": indexOfPCBS = x for x in range(1, len(rowsInArea[indexOfPCBS])): rowsInArea[indexOfPCBS][x] = valueRefinerMetals(rowsInArea[indexOfPCBS][x]) rowsInArea[indexOfPCBS] = removeUselessRows(rowsInArea, indexOfPCBS) firstLine = currLine offset = 2 for x in range(1, len(rowsInArea[indexOfPCBS])): for y in range(0, len(rowsInArea[indexOfPCBS][x])): if(isNumber(rowsInArea[indexOfPCBS][x][y])): if(64+y+offset < 90): worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfPCBS][x][y]), formatTwo) else: firstLetter = chr(int(65+(((y+offset)-26)/26))) secondLetter = chr(65+((y+offset)%26)) col = firstLetter + secondLetter worksheet.write(col+str(currLine), str(rowsInArea[indexOfPCBS][x][y]), formatTwo) else: if(64+y+offset < 90): worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfPCBS][x][y], formatOne) else: firstLetter = chr(int(65+(((y+offset)-26)/26))) secondLetter = chr(65+((y+offset)%26)) col = firstLetter + secondLetter worksheet.write(col+str(currLine), str(rowsInArea[indexOfPCBS][x][y]), formatOne) currLine += 1 lastLine = currLine - 1 if(lastLine - firstLine != 0): worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'PCBS', formatOne) else: worksheet.write('B'+str(firstLine), 'PCBS',formatOne) return worksheet, currLine def addPesticideValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo): indexOfPesticides = 0 for x in range(0, len(sortedGenAreas)): if sortedGenAreas[x][1] == "Pesticides - Soil By SW8081B": indexOfPesticides = x for x in range(1, len(rowsInArea[indexOfPesticides])): rowsInArea[indexOfPesticides][x] = valueRefinerMetals(rowsInArea[indexOfPesticides][x]) rowsInArea[indexOfPesticides] = removeUselessRows(rowsInArea, indexOfPesticides) firstLine = currLine offset = 2 for x in range(1, len(rowsInArea[indexOfPesticides])): for y in range(0, len(rowsInArea[indexOfPesticides][x])): if(isNumber(rowsInArea[indexOfPesticides][x][y])): if(64+y+offset < 90): worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfPesticides][x][y]), formatTwo) else: firstLetter = chr(int(65+(((y+offset)-26)/26))) secondLetter = chr(65+((y+offset)%26)) col = firstLetter + secondLetter worksheet.write(col+str(currLine), str(rowsInArea[indexOfPesticides][x][y]), formatTwo) else: if(64+y+offset < 90): worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfPesticides][x][y], formatOne) else: firstLetter = chr(int(65+(((y+offset)-26)/26))) secondLetter = chr(65+((y+offset)%26)) col = firstLetter + secondLetter worksheet.write(col+str(currLine), str(rowsInArea[indexOfPesticides][x][y]), formatOne) currLine += 1 lastLine = currLine - 1 if(lastLine - firstLine != 0): worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'Pesticides', formatOne) else: worksheet.write('B'+str(firstLine), 'Pesticides', formatOne) return worksheet, currLine def addMetalValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo): indexOfMetals = 0 for x in range(0, len(sortedGenAreas)): if sortedGenAreas[x][1] == "Metals, Total": indexOfMetals = x for x in range(1, len(rowsInArea[indexOfMetals])): rowsInArea[indexOfMetals][x] = valueRefinerMetals(rowsInArea[indexOfMetals][x]) rowsInArea[indexOfMetals] = removeUselessRows(rowsInArea, indexOfMetals) firstLine = currLine offset = 2 worksheet.write('B'+str(currLine), 'Metals, Total') for x in range(1, len(rowsInArea[indexOfMetals])): if(rowsInArea[indexOfMetals][x][0] != "Mercury"): for y in range(0, len(rowsInArea[indexOfMetals][x])): if(isNumber(rowsInArea[indexOfMetals][x][y])): if(64+y+offset < 90): worksheet.write(str(chr(65+offset+y))+str(currLine), str(rowsInArea[indexOfMetals][x][y]), formatTwo) else: firstLetter = chr(int(65+(((y+offset)-26)/26))) secondLetter = chr(65+((y+offset)%26)) col = firstLetter + secondLetter worksheet.write(col+str(currLine), str(rowsInArea[indexOfMetals][x][y]), formatTwo) else: if(64+y+offset < 90): worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfMetals][x][y], formatOne) else: firstLetter = chr(int(65+(((y+offset)-26)/26))) secondLetter = chr(65+((y+offset)%26)) col = firstLetter + secondLetter worksheet.write(col+str(currLine), str(rowsInArea[indexOfMetals][x][y]), formatOne) currLine += 1 lastLine = currLine - 1 if(lastLine - firstLine != 0): worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'Metals', formatOne) else: worksheet.write('B'+str(firstLine), 'Metals', formatOne) return worksheet, currLine def addCyanideValues(worksheet, currLine, Cyanide_Values_Raw, formatOne, formatTwo): Cyanide_Values = valueRefinerMetals(Cyanide_Values_Raw) worksheet.write('B'+str(currLine), 'Cyanide', formatOne) offset = 2 for x in range(0, len(Cyanide_Values)): if(isNumber(Cyanide_Values[x])): if(64+x+offset < 90): worksheet.write(str(chr(65+x+offset))+str(currLine), str(Cyanide_Values[x]), formatTwo) else: firstLetter = chr(int(65+(((x+offset)-26)/26))) secondLetter = chr(65+((x+offset)%26)) col = firstLetter + secondLetter worksheet.write(col+str(currLine), str(Cyanide_Values[x]), formatTwo) else: if(64+x+offset < 90): worksheet.write(str(chr(65+x+offset))+str(currLine), str(Cyanide_Values[x]), formatOne) else: firstLetter = chr(int(65+(((x+offset)-26)/26))) secondLetter = chr(65+((x+offset)%26)) col = firstLetter + secondLetter worksheet.write(col+str(currLine), str(Cyanide_Values[x]), formatOne) currLine += 1 return worksheet, currLine, Cyanide_Values def addSemiVolatileValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo): indexOfSemiVolatiles = 0 for x in range(0, len(sortedGenAreas)): if sortedGenAreas[x][1] == "Semivolatiles By SW8270D": indexOfSemiVolatiles = x for x in range(1, len(rowsInArea[indexOfSemiVolatiles])): rowsInArea[indexOfSemiVolatiles][x] = valueRefinerMetals(rowsInArea[indexOfSemiVolatiles][x]) rowsInArea[indexOfSemiVolatiles] = removeUselessRows(rowsInArea, indexOfSemiVolatiles) firstLine = currLine offset = 2 for x in range(1, len(rowsInArea[indexOfSemiVolatiles])): for y in range(0, len(rowsInArea[indexOfSemiVolatiles][x])): if(isNumber(rowsInArea[indexOfSemiVolatiles][x][y])): if(64+y+offset < 90): worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfSemiVolatiles][x][y]), formatTwo) else: firstLetter = chr(int(65+(((y+offset)-26)/26))) secondLetter = chr(65+((y+offset)%26)) col = firstLetter + secondLetter worksheet.write(col+str(currLine), str(rowsInArea[indexOfSemiVolatiles][x][y]), formatTwo) else: if(64+y+offset < 90): worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfSemiVolatiles][x][y], formatOne) else: firstLetter = chr(int(65+(((y+offset)-26)/26))) secondLetter = chr(65+((y+offset)%26)) col = firstLetter + secondLetter worksheet.write(col+str(currLine), str(rowsInArea[indexOfSemiVolatiles][x][y]), formatOne) currLine += 1 lastLine = currLine - 1 if(lastLine - firstLine != 0): worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'SemiVolatiles', formatOne) else: worksheet.write('B'+str(firstLine), 'SemiVolatiles', formatOne) return worksheet, currLine def addVolatileValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo): indexOfVolatiles = 0 for x in range(0, len(sortedGenAreas)): if sortedGenAreas[x][1] == "Volatiles (TCL) By SW8260C": indexOfVolatiles = x for x in range(1, len(rowsInArea[indexOfVolatiles])): rowsInArea[indexOfVolatiles][x] = valueRefinerMetals(rowsInArea[indexOfVolatiles][x]) rowsInArea[indexOfVolatiles] = removeUselessRows(rowsInArea, indexOfVolatiles) firstLine = currLine offset = 2 worksheet.write('B'+str(currLine), 'Volatiles (TCL) By SW8260C') for x in range(1, len(rowsInArea[indexOfVolatiles])): for y in range(0, len(rowsInArea[indexOfVolatiles][x])): if(isNumber(rowsInArea[indexOfVolatiles][x][y])): if(64+y+offset < 90): worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfVolatiles][x][y]), formatTwo) else: firstLetter = chr(int(65+(((y+offset)-26)/26))) secondLetter = chr(65+((y+offset)%26)) col = firstLetter + secondLetter worksheet.write(col+str(currLine), str(rowsInArea[indexOfVolatiles][x][y]), formatTwo) else: if(64+y+offset < 90): worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfVolatiles][x][y], formatOne) else: firstLetter = chr(int(65+(((y+offset)-26)/26))) secondLetter = chr(65+((y+offset)%26)) col = firstLetter + secondLetter worksheet.write(col+str(currLine), str(rowsInArea[indexOfVolatiles][x][y]), formatOne) currLine += 1 lastLine = currLine - 1 if(lastLine - firstLine != 0): worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'Volatiles', formatOne) else: worksheet.write('B'+str(firstLine), 'Volatiles', formatOne) return worksheet, currLine def createSecondHeading(worksheet, currLine, Type_Samples_Collected, formatOne): worksheet.set_row(currLine-1,50) worksheet.write('B'+str(currLine), 'RCRA Characteristics ', formatOne) worksheet.merge_range('C'+str(currLine)+':E'+str(currLine), 'Regulatory Criteria', formatOne) offset = 4 for x in range(1,len(Type_Samples_Collected)): if(64+x+offset < 90): worksheet.write(str(chr(65+x+offset))+str(currLine), str(Type_Samples_Collected[x]), formatOne) else: firstLetter = chr(int(65+(((x+offset)-26)/26))) secondLetter = chr(65+((x+offset)%26)) col = firstLetter + secondLetter worksheet.write(col+str(currLine), str(Type_Samples_Collected[x]), formatOne) currLine += 1 return worksheet, currLine def addCorrosivityValues(worksheet, currLine, Corrosivity_Values_Raw, formatOne): Corrosivity_Values = valueRefinerMetals(Corrosivity_Values_Raw) worksheet.write('B'+str(currLine), 'Corrosivity', formatOne) offset = 2 for x in range(0,len(Corrosivity_Values)): if(64+x+offset < 90): worksheet.write(str(chr(65+x+offset))+str(currLine), str(Corrosivity_Values[x]), formatOne) else: firstLetter = chr(int(65+(((x+offset)-26)/26))) secondLetter = chr(65+((x+offset)%26)) col = firstLetter + secondLetter worksheet.write(col+str(currLine), str(Corrosivity_Values[x]), formatOne) currLine += 1 return worksheet, currLine, Corrosivity_Values def addFlashpointValues(worksheet, currLine, Flashpoint_Values_Raw, formastOne): Flashpoint_Values = [] pos = 0 for value in Flashpoint_Values_Raw: if(pos == 0): Flashpoint_Values.append(value.value) Flashpoint_Values.append(" ") Flashpoint_Values.append("Degree F") Flashpoint_Values.append(">200 Degree F") if((pos >= firstIndex and pos%2 == firstIndex%2)): Flashpoint_Values.append(value.value) pos+=1 offset = 1 for x in range(0,len(Flashpoint_Values)): if(64+x+offset < 90): worksheet.write(str(chr(65+x+offset))+str(currLine), str(Flashpoint_Values[x]), formatOne) else: firstLetter = chr(int(65+(((x+offset)-26)/26))) secondLetter = chr(65+((x+offset)%26)) col = firstLetter + secondLetter worksheet.write(col+str(currLine), str(Flashpoint_Values[x]), formatOne) currLine += 1 return worksheet, currLine, Flashpoint_Values def addIgnitabilityValues(worksheet, currLine, Ignitability_Values_Raw, formatOne): Ignitability_Values = [] pos = 0 for value in Ignitability_Values_Raw: if(pos == 0): Ignitability_Values.append(value.value) Ignitability_Values.append(" ") Ignitability_Values.append("Degree F") Ignitability_Values.append("<140 Degree F") if((pos >= firstIndex and pos%2 == firstIndex%2)): Ignitability_Values.append(value.value) pos+=1 offset = 1 for x in range(0,len(Ignitability_Values)): if(64+x+offset < 90): worksheet.write(str(chr(65+x+offset))+str(currLine), str(Ignitability_Values[x]), formatOne) else: firstLetter = chr(int(65+(((x+offset)-26)/26))) secondLetter = chr(65+((x+offset)%26)) col = firstLetter + secondLetter worksheet.write(col+str(currLine), str(Ignitability_Values[x]), formatOne) currLine += 1 return worksheet, currLine, Ignitability_Values def addReactivityValues(worksheet, currLine, Reactivity_Values_Cyanide_Raw, Reactivity_Values_Sulfide_Raw, formatOne): Reactivity_Values_Cyanide = valueRefinerMetals(Reactivity_Values_Cyanide_Raw) worksheet.merge_range('B'+str(currLine)+":B"+str(currLine+1), 'Reactivity', formatOne) worksheet.write('C'+str(currLine), 'Cyanide', formatOne) offset = 2 for x in range(1,len(Reactivity_Values_Cyanide)): if(64+x+offset < 90): worksheet.write(str(chr(65+x+offset))+str(currLine), str(Reactivity_Values_Cyanide[x]), formatOne) else: firstLetter = chr(int(65+(((x+offset)-26)/26))) secondLetter = chr(65+((x+offset)%26)) col = firstLetter + secondLetter worksheet.write(col+str(currLine), str(Reactivity_Values_Cyanide[x]), formatOne) currLine += 1 Reactivity_Values_Sulfide = valueRefinerMetals(Reactivity_Values_Sulfide_Raw) worksheet.write('C'+str(currLine), 'Sulfide', formatOne) for x in range(1,len(Reactivity_Values_Sulfide)): if(64+x+offset < 90): worksheet.write(str(chr(65+x+offset))+str(currLine), str(Reactivity_Values_Sulfide[x]), formatOne) else: firstLetter = chr(int(65+(((x+offset)-26)/26))) secondLetter = chr(65+((x+offset)%26)) col = firstLetter + secondLetter worksheet.write(col+str(currLine), str(Reactivity_Values_Sulfide[x]), formatOne) currLine += 1 return worksheet, currLine, Reactivity_Values_Cyanide, Reactivity_Values_Sulfide def createThirdHeading(worksheet, currLine, Type_Samples_Collected, formatOne): worksheet.set_row(currLine-1,50) worksheet.write('B'+str(currLine), 'Toxicity ', formatOne) worksheet.merge_range('C'+str(currLine)+':E'+str(currLine), 'TCLP Regulatory Criteria', formatOne) x = len(Type_Samples_Collected) offset = 4 finalLetter="" if 64+x+offset > 90: firstLetter = chr(int(65+(((x+offset)-26)/26))) print(firstLetter) secondLetter = chr(64+((x+offset)%26)) print(secondLetter) finalLetter = firstLetter+secondLetter else: finalLetter = chr(64+x+offset) worksheet.merge_range("F"+str(currLine)+":"+finalLetter+str(currLine), "", formatOne) currLine += 1 return worksheet, currLine def addTCLPMetalValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo): indexOfTCLPMetals = 0 for x in range(0, len(sortedGenAreas)): if sortedGenAreas[x][1] == "Metals, TCLP": indexOfTCLPMetals = x for x in range(1, len(rowsInArea[indexOfTCLPMetals])): rowsInArea[indexOfTCLPMetals][x] = valueRefinerMetals(rowsInArea[indexOfTCLPMetals][x]) rowsInArea[indexOfTCLPMetals] = removeUselessRows(rowsInArea, indexOfTCLPMetals) firstLine = currLine offset = 2 for x in range(1, len(rowsInArea[indexOfTCLPMetals])): for y in range(0, len(rowsInArea[indexOfTCLPMetals][x])): if(isNumber(rowsInArea[indexOfTCLPMetals][x][y])): if(64+y+offset < 90): worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfTCLPMetals][x][y]), formatTwo) else: firstLetter = chr(int(65+(((y+offset)-26)/26))) secondLetter = chr(65+((y+offset)%26)) col = firstLetter + secondLetter worksheet.write(col+str(currLine), str(rowsInArea[indexOfTCLPMetals][x][y]), formatTwo) else: if(64+y+offset < 90): worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfTCLPMetals][x][y], formatOne) else: firstLetter = chr(int(65+(((y+offset)-26)/26))) secondLetter = chr(65+((y+offset)%26)) col = firstLetter + secondLetter worksheet.write(col+str(currLine), str(rowsInArea[indexOfTCLPMetals][x][y]), formatOne) currLine += 1 lastLine = currLine - 1 if(lastLine - firstLine != 0): worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'TCLP Metals', formatOne) else: worksheet.write('B'+str(firstLine), 'TCLP Metals', formatOne) return worksheet, currLine def addVOCSValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo): indexOfVOCS = 0 for x in range(0, len(sortedGenAreas)): if sortedGenAreas[x][1] == "TCLP Volatiles By SW8260C": indexOfVOCS = x for x in range(1, len(rowsInArea[indexOfVOCS])): rowsInArea[indexOfVOCS][x] = valueRefinerMetals(rowsInArea[indexOfVOCS][x]) rowsInArea[indexOfVOCS] = removeUselessRows(rowsInArea, indexOfVOCS) firstLine = currLine offset = 2 for x in range(1, len(rowsInArea[indexOfVOCS])): for y in range(0, len(rowsInArea[indexOfVOCS][x])): if(isNumber(rowsInArea[indexOfVOCS][x][y])): if(64+y+offset < 90): worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfVOCS][x][y]), formatTwo) else: firstLetter = chr(int(65+(((y+offset)-26)/26))) secondLetter = chr(65+((y+offset)%26)) col = firstLetter + secondLetter worksheet.write(col+str(currLine), str(rowsInArea[indexOfVOCS][x][y]), formatTwo) else: if(64+y+offset < 90): worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfVOCS][x][y], formatOne) else: firstLetter = chr(int(65+(((y+offset)-26)/26))) secondLetter = chr(65+((y+offset)%26)) col = firstLetter + secondLetter worksheet.write(col+str(currLine), str(rowsInArea[indexOfVOCS][x][y]), formatOne) currLine += 1 lastLine = currLine - 1 if(lastLine - firstLine != 0): worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'TCLP Vocs', formatOne) else: worksheet.write('B'+str(firstLine), 'TCLP Vocs', formatOne) return worksheet, currLine def addSVOCSValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne): indexOfSVOCS = 0 for x in range(0, len(sortedGenAreas)): if sortedGenAreas[x][1] == "TCLP Acid/Base-Neutral By SW8270D": indexOfSVOCS = x for x in range(1, len(rowsInArea[indexOfSVOCS])): rowsInArea[indexOfSVOCS][x] = valueRefinerMetals(rowsInArea[indexOfSVOCS][x]) rowsInArea[indexOfSVOCS] = removeUselessRows(rowsInArea, indexOfSVOCS) firstLine = currLine offset = 2 for x in range(1, len(rowsInArea[indexOfSVOCS])): for y in range(0, len(rowsInArea[indexOfSVOCS][x])): if(isNumber(rowsInArea[indexOfSVOCS][x][y])): if(64+y+offset < 90): worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfSVOCS][x][y]), formatTwo) else: firstLetter = chr(int(65+(((y+offset)-26)/26))) secondLetter = chr(65+((y+offset)%26)) col = firstLetter + secondLetter worksheet.write(col+str(currLine), str(rowsInArea[indexOfSVOCS][x][y]), formatTwo) else: if(64+y+offset < 90): worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfSVOCS][x][y], formatOne) else: firstLetter = chr(int(65+(((y+offset)-26)/26))) secondLetter = chr(65+((y+offset)%26)) col = firstLetter + secondLetter worksheet.write(col+str(currLine), str(rowsInArea[indexOfSVOCS][x][y]), formatOne) currLine += 1 lastLine = currLine - 1 if(lastLine - firstLine != 0): worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'TCLP SVocs', formatOne) else: worksheet.write('B'+str(firstLine), 'TCLP SVocs', formatOne) return worksheet, currLine def addTCLPPesticideValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne): indexOfTCLPPesticides = 0 for x in range(0, len(sortedGenAreas)): if sortedGenAreas[x][1] == "TCLP Pesticides By SW8081B": indexOfTCLPPesticides = x for x in range(1, len(rowsInArea[indexOfTCLPPesticides])): rowsInArea[indexOfTCLPPesticides][x] = valueRefinerMetals(rowsInArea[indexOfTCLPPesticides][x]) rowsInArea[indexOfTCLPPesticides] = removeUselessRows(rowsInArea, indexOfTCLPPesticides) firstLine = currLine offset = 2 for x in range(1, len(rowsInArea[indexOfTCLPPesticides])): for y in range(0, len(rowsInArea[indexOfTCLPPesticides][x])): if(isNumber(rowsInArea[indexOfTCLPPesticides][x][y])): if(64+y+offset < 90): worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfTCLPPesticides][x][y]), formatTwo) else: firstLetter = chr(int(65+(((y+offset)-26)/26))) secondLetter = chr(65+((y+offset)%26)) col = firstLetter + secondLetter worksheet.write(col+str(currLine), str(rowsInArea[indexOfTCLPPesticides][x][y]), formatTwo) else: if(64+y+offset < 90): worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfTCLPPesticides][x][y], formatOne) else: firstLetter = chr(int(65+(((y+offset)-26)/26))) secondLetter = chr(65+((y+offset)%26)) col = firstLetter + secondLetter worksheet.write(col+str(currLine), str(rowsInArea[indexOfTCLPPesticides][x][y]), formatOne) currLine += 1 lastLine = currLine - 1 if(lastLine - firstLine != 0): worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'TCLP Pesticides', formatOne) else: worksheet.write('B'+str(firstLine), 'TCLP Pesticides', formatOne) return worksheet, currLine def addTCLPHerbicideValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne): indexOfTCLPHerbicides = 0 for x in range(0, len(sortedGenAreas)): if sortedGenAreas[x][1] == "TCLP Herbicides By SW8151A": indexOfHerbicides = x for x in range(1, len(rowsInArea[indexOfHerbicides])): rowsInArea[indexOfHerbicides][x] = valueRefinerMetals(rowsInArea[indexOfHerbicides][x]) rowsInArea[indexOfTCLPHerbicides] = removeUselessRows(rowsInArea, indexOfTCLPHerbicides) firstLine = currLine offset = 2 for x in range(1, len(rowsInArea[indexOfHerbicides])): for y in range(0, len(rowsInArea[indexOfHerbicides][x])): if(isNumber(rowsInArea[indexOfHerbicides][x][y])): if(64+y+offset < 90): worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfHerbicides][x][y]), formatTwo) else: firstLetter = chr(int(65+(((y+offset)-26)/26))) secondLetter = chr(65+((y+offset)%26)) col = firstLetter + secondLetter worksheet.write(col+str(currLine), str(rowsInArea[indexOfHerbicides][x][y]), formatTwo) else: if(64+y+offset < 90): worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfHerbicides][x][y], formatOne) else: firstLetter = chr(int(65+(((y+offset)-26)/26))) secondLetter = chr(65+((y+offset)%26)) col = firstLetter + secondLetter worksheet.write(col+str(currLine), str(rowsInArea[indexOfHerbicides][x][y]), formatOne) currLine += 1 lastLine = currLine - 1 if(lastLine - firstLine != 0): worksheet.merge_range('B'+str(firstLine)+":B"+str(lastLine), 'TCLP Pesticides / Herbicides', formatOne) else: worksheet.write('B'+str(firstLine), 'TCLP Pesticides / Herbicides', formatOne) return worksheet, currLine def addTPHValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne): indexOfGasolineHydrocarbons = 0 for x in range(0, len(sortedGenAreas)): if sortedGenAreas[x][1] == "Gasoline Range Hydrocarbons (C6-C10) By SW8015D": indexOfGasolineHydrocarbons = x for x in range(1, len(rowsInArea[indexOfGasolineHydrocarbons])): rowsInArea[indexOfGasolineHydrocarbons][x] = valueRefinerMetals(rowsInArea[indexOfGasolineHydrocarbons][x]) indexOfDieselHydrocarbons = 0 for x in range(0, len(sortedGenAreas)): if sortedGenAreas[x][1] == "TPH By SW8015D DRO": indexOfDieselHydrocarbons = x for x in range(1, len(rowsInArea[indexOfDieselHydrocarbons])): rowsInArea[indexOfDieselHydrocarbons][x] = valueRefinerMetals(rowsInArea[indexOfDieselHydrocarbons][x]) offset = 2 worksheet.merge_range('B'+str(currLine)+":B"+str(currLine+1), 'Total Petroleum Hydrocarbons', formatOne) for x in range(1, len(rowsInArea[indexOfGasolineHydrocarbons])): for y in range(0, len(rowsInArea[indexOfGasolineHydrocarbons][x])): if(isNumber(rowsInArea[indexOfGasolineHydrocarbons][x][y])): if(64+y+offset < 90): worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfGasolineHydrocarbons][x][y]), formatTwo) else: firstLetter = chr(int(65+(((y+offset)-26)/26))) secondLetter = chr(65+((y+offset)%26)) col = firstLetter + secondLetter worksheet.write(col+str(currLine), str(rowsInArea[indexOfGasolineHydrocarbons][x][y]), formatTwo) else: if(64+y+offset < 90): worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfGasolineHydrocarbons][x][y], formatOne) else: firstLetter = chr(int(65+(((y+offset)-26)/26))) secondLetter = chr(65+((y+offset)%26)) col = firstLetter + secondLetter worksheet.write(col+str(currLine), str(rowsInArea[indexOfGasolineHydrocarbons][x][y]), formatOne) currLine += 1 for x in range(1, len(rowsInArea[indexOfDieselHydrocarbons])): for y in range(0, len(rowsInArea[indexOfDieselHydrocarbons][x])): if(isNumber(rowsInArea[indexOfDieselHydrocarbons][x][y])): if(64+y+offset < 90): worksheet.write(str(chr(65+y+offset))+str(currLine), str(rowsInArea[indexOfDieselHydrocarbons][x][y]), formatTwo) else: firstLetter = chr(int(65+(((y+offset)-26)/26))) secondLetter = chr(65+((y+offset)%26)) col = firstLetter + secondLetter worksheet.write(col+str(currLine), str(rowsInArea[indexOfDieselHydrocarbons][x][y]), formatTwo) else: if(64+y+offset < 90): worksheet.write(str(chr(65+y+offset))+str(currLine), rowsInArea[indexOfDieselHydrocarbons][x][y], formatOne) else: firstLetter = chr(int(65+(((y+offset)-26)/26))) secondLetter = chr(65+((y+offset)%26)) col = firstLetter + secondLetter worksheet.write(col+str(currLine), str(rowsInArea[indexOfDieselHydrocarbons][x][y]), formatOne) currLine += 1 return worksheet, currLine print("Writing to Excel File...") workbook, worksheet = startWritingFinalFile() worksheet.set_column('B:B', 25) worksheet.set_column('C:C', 30) worksheet.set_column('E:E', 15) worksheet.set_row(5,50) #Important Information - Titles, etc.. formatOne = workbook.add_format() formatOne.set_align('center') formatOne.set_align('vcenter') formatOne.set_font_name('Arial') formatOne.set_font_size('12') formatOne.set_border(6) #Numbers within the text formatTwo = workbook.add_format() formatTwo.set_align('center') formatTwo.set_align('vcenter') formatTwo.set_font_name('Arial') formatTwo.set_font_size('12') formatTwo.set_border(6) formatTwo.set_bg_color('#87CEFF') formatTwo.set_bold() #Current Line to overwrite each process currLine = 6 #Heading for each column worksheet, currLine, Type_Samples_Collected = createHeading(worksheet, currLine, Type_Samples_Collected_Raw, formatOne) #Adding Mercury Values worksheet, currLine, Mercury_Values = addMercuryValues(worksheet, currLine, Mercury_Values_Raw, formatOne, formatTwo) #Adding PCB Values worksheet, currLine = addPCBValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo) #Adding Pesticide Values worksheet, currLine = addPesticideValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo) #Adding Metal Values worksheet, currLine = addMetalValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo) #Adding Cyanide Values worksheet, currLine, Cyanide_Values = addCyanideValues(worksheet, currLine, Cyanide_Values_Raw, formatOne, formatTwo) #Adding Semi Volatile Organic Compounds worksheet, currLine = addSemiVolatileValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo) #Adding Volatile Organic Compounds worksheet, currLine = addVolatileValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo) #RCRA Second Heading worksheet, currLine = createSecondHeading(worksheet, currLine, Type_Samples_Collected, formatOne) #Adding Corrosivity(pH) Values worksheet, currLine, Corrosivity_Values = addCorrosivityValues(worksheet, currLine, Corrosivity_Values_Raw, formatOne) #Adding Flashpoint Values worksheet, currLine, Flashpoint_Values = addFlashpointValues(worksheet, currLine, Flashpoint_Values_Raw, formatOne) #Adding Ignitability Values worksheet, currLine, Ignitability_Values = addIgnitabilityValues(worksheet, currLine, Ignitability_Values_Raw, formatOne) #Adding Reactivity Values worksheet, currLine, Reactivity_Values_Cyanide, Reactivity_Values_Sulfide = addReactivityValues(worksheet, currLine, Reactivity_Values_Cyanide_Raw, Reactivity_Values_Sulfide_Raw, formatOne) #Toxicity Third Heading worksheet, currLine = createThirdHeading(worksheet, currLine, Type_Samples_Collected, formatOne) #Adding TCLP Metals(Barium / Lead) worksheet, currLine = addTCLPMetalValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo) #Adding TCLP VOCS worksheet, currLine = addVOCSValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne, formatTwo) #Adding TCLP SVOCS worksheet, currLine = addSVOCSValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne) #Adding TCLP Pesticides worksheet, currLine = addTCLPPesticideValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne) #Adding TCLP Herbicides worksheet, currLine = addTCLPHerbicideValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne) #Adding Total Petroleum Hydrocarbons worksheet, currLine = addTPHValues(worksheet, currLine, sortedGenAreas, rowsInArea, formatOne) #Beginning information(Company Name, Address, Dates Samples were collected) worksheet, currLine = createBeginning(worksheet, currLine) workbook.close() print("Done Writing")
43,014
14,686
"""Abstractions for a liquid-handling robot.""" # Standard imports import asyncio import logging # Local package imports from lhrhost.robot.p_axis import Axis as PAxis from lhrhost.robot.x_axis import Axis as XAxis from lhrhost.robot.y_axis import Axis as YAxis from lhrhost.robot.z_axis import Axis as ZAxis from lhrhost.util.cli import Prompt # Logging logger = logging.getLogger(__name__) logger.addHandler(logging.NullHandler()) class Robot(object): """High-level controller for 4-axis liquid-handling robot. Currently the x-axis is moved manually by the user. """ def __init__(self): """Initialize member variables.""" self.p = PAxis() self.z = ZAxis() self.y = YAxis() self.x = XAxis() self.prompt = Prompt(end='', flush=True) def register_messaging_stack(self, messaging_stack): """Associate a messaging stack with the robot. The messaging stack is used for host-peripheral communication. """ messaging_stack.register_response_receivers( self.p.protocol, self.z.protocol, self.y.protocol, self.x.protocol ) messaging_stack.register_command_senders( self.p.protocol, self.z.protocol, self.y.protocol, self.x.protocol ) async def wait_until_initialized(self): """Wait until all axes are initialized.""" await asyncio.gather( self.p.wait_until_initialized(), self.z.wait_until_initialized(), self.y.wait_until_initialized(), self.x.wait_until_initialized() ) async def synchronize_values(self): """Request the values of all protocol channels.""" await self.p.synchronize_values() await self.z.synchronize_values() await self.y.synchronize_values() await self.x.synchronize_values() async def load_calibrations(self): """Load calibration parameters from json files.""" self.p.load_calibration_json() self.p.load_preset_json() self.p.load_tunings_json() self.z.load_calibration_json() self.z.load_preset_json() self.z.load_tunings_json() self.y.load_calibration_json() self.y.load_preset_json() self.y.load_tunings_json() self.x.load_calibration_json() self.x.load_preset_json() self.x.load_tunings_json() async def ensure_sample_platform_configuration(self, configuration): """Ensure that the sample platform is configured as speified.""" await self.prompt( 'Please ensure that the sample platform modules are configured ' 'following the "{}" configurationn:'.format(configuration) ) self.x.configuration = configuration async def go_to_alignment_hole(self): """Move the pipettor head to the alignment hole.""" await self.z.go_to_high_end_position() await asyncio.gather( self.y.go_to_alignment_hole(), self.x.go_to_alignment_hole() ) await self.z.go_to_alignment_hole() async def align_manually(self): """Do a manual alignment of x/y positioning.""" await self.go_to_alignment_hole() await self.prompt( 'Please move the x-axis and the y-axis so that the pipette tip is ' 'directly over the round alignment hole: ' ) await asyncio.gather(self.x.set_alignment(), self.y.set_alignment()) logger.info('Aligned to the zero position at the alignment hole.') async def go_to_module_position( self, module_name, x_position, y_position, z_position=None ): """Move the pipettor head to the specified x/y position of the module.""" module_type = self.x.get_module_type(module_name) if ( self.x.current_preset_position is not None and self.x.at_module(module_name) ): await self.z.go_to_module_position(module_type, 'far above') else: await self.z.go_to_high_end_position() await asyncio.gather( self.x.go_to_module_position(module_name, x_position), self.y.go_to_module_position(module_type, y_position) ) if z_position is not None: await self.z.go_to_module_position(module_type, 'far above') async def intake(self, module_name, volume, height=None): """Intake fluid at the specified height. Height should be a preset z-axis position or a physical z-axis position. """ module_type = self.x.get_module_type(module_name) if height is not None: try: await self.z.go_to_module_position(module_type, height) except KeyError: await self.z.go_to_physical_position(height) await self.p.intake(volume) async def intake_precise(self, module_name, volume, height=None): """Intake fluid at the specified height. Height should be a preset z-axis position or a physical z-axis position. Volume should be either 20, 30, 40, 50, or 100. """ module_type = self.x.get_module_type(module_name) if height is None: if self.z.current_preset_position is not None: height = self.z.current_preset_position[1] else: height = await self.z.physical_position await self.z.go_to_module_position(module_type, 'above') await self.p.go_to_pre_intake(volume) try: await self.z.go_to_module_position(module_type, height) except KeyError: await self.z.go_to_physical_position(height) await self.p.intake(volume) async def dispense(self, module_name, volume=None, height=None): """Dispense fluid at the specified height. If volume is none, dispenses all syringe contents. Height should be a preset z-axis position or a physical z-axis position. """ module_type = self.x.get_module_type(module_name) if height is not None: try: await self.z.go_to_module_position(module_type, height) except KeyError: await self.z.go_to_physical_position(height) await self.p.dispense(volume)
6,282
1,889
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" import grpc from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 from tinkoff.cloud.longrunning.v1 import longrunning_pb2 as tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2 class OperationsStub(object): """Missing associated documentation comment in .proto file.""" def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.GetOperation = channel.unary_unary( '/tinkoff.cloud.longrunning.v1.Operations/GetOperation', request_serializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.GetOperationRequest.SerializeToString, response_deserializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.Operation.FromString, ) self.WaitOperation = channel.unary_unary( '/tinkoff.cloud.longrunning.v1.Operations/WaitOperation', request_serializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.WaitOperationRequest.SerializeToString, response_deserializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.Operation.FromString, ) self.ListOperations = channel.unary_unary( '/tinkoff.cloud.longrunning.v1.Operations/ListOperations', request_serializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.ListOperationsRequest.SerializeToString, response_deserializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.ListOperationsResponse.FromString, ) self.WatchOperations = channel.unary_stream( '/tinkoff.cloud.longrunning.v1.Operations/WatchOperations', request_serializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.WatchOperationsRequest.SerializeToString, response_deserializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.WatchOperationsResponse.FromString, ) self.DeleteOperation = channel.unary_unary( '/tinkoff.cloud.longrunning.v1.Operations/DeleteOperation', request_serializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.DeleteOperationRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.CancelOperation = channel.unary_unary( '/tinkoff.cloud.longrunning.v1.Operations/CancelOperation', request_serializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.CancelOperationRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) class OperationsServicer(object): """Missing associated documentation comment in .proto file.""" def GetOperation(self, request, context): """Starts polling for operation statuses Returns operation status """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def WaitOperation(self, request, context): """Wait for operation update """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def ListOperations(self, request, context): """List operations """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def WatchOperations(self, request, context): """Watch operations """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def DeleteOperation(self, request, context): """Deletes specified operations """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def CancelOperation(self, request, context): """Cancels specified operations """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_OperationsServicer_to_server(servicer, server): rpc_method_handlers = { 'GetOperation': grpc.unary_unary_rpc_method_handler( servicer.GetOperation, request_deserializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.GetOperationRequest.FromString, response_serializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.Operation.SerializeToString, ), 'WaitOperation': grpc.unary_unary_rpc_method_handler( servicer.WaitOperation, request_deserializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.WaitOperationRequest.FromString, response_serializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.Operation.SerializeToString, ), 'ListOperations': grpc.unary_unary_rpc_method_handler( servicer.ListOperations, request_deserializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.ListOperationsRequest.FromString, response_serializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.ListOperationsResponse.SerializeToString, ), 'WatchOperations': grpc.unary_stream_rpc_method_handler( servicer.WatchOperations, request_deserializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.WatchOperationsRequest.FromString, response_serializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.WatchOperationsResponse.SerializeToString, ), 'DeleteOperation': grpc.unary_unary_rpc_method_handler( servicer.DeleteOperation, request_deserializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.DeleteOperationRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'CancelOperation': grpc.unary_unary_rpc_method_handler( servicer.CancelOperation, request_deserializer=tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.CancelOperationRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'tinkoff.cloud.longrunning.v1.Operations', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API. class Operations(object): """Missing associated documentation comment in .proto file.""" @staticmethod def GetOperation(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/tinkoff.cloud.longrunning.v1.Operations/GetOperation', tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.GetOperationRequest.SerializeToString, tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.Operation.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def WaitOperation(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/tinkoff.cloud.longrunning.v1.Operations/WaitOperation', tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.WaitOperationRequest.SerializeToString, tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.Operation.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def ListOperations(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/tinkoff.cloud.longrunning.v1.Operations/ListOperations', tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.ListOperationsRequest.SerializeToString, tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.ListOperationsResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def WatchOperations(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_stream(request, target, '/tinkoff.cloud.longrunning.v1.Operations/WatchOperations', tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.WatchOperationsRequest.SerializeToString, tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.WatchOperationsResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def DeleteOperation(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/tinkoff.cloud.longrunning.v1.Operations/DeleteOperation', tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.DeleteOperationRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def CancelOperation(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/tinkoff.cloud.longrunning.v1.Operations/CancelOperation', tinkoff_dot_cloud_dot_longrunning_dot_v1_dot_longrunning__pb2.CancelOperationRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
12,100
3,443
import os from airflow.providers.postgres.hooks.postgres import PostgresHook from psycopg2 import sql def set_schema_query(conn_type, hook, schema_id, user): if conn_type == "postgres": return ( sql.SQL("CREATE SCHEMA IF NOT EXISTS {schema} AUTHORIZATION {user}") .format(schema=sql.Identifier(schema_id), user=sql.Identifier(user)) .as_string(hook.get_conn()) ) elif conn_type == "snowflake": return f"CREATE SCHEMA IF NOT EXISTS {schema_id}" def get_schema(): return os.getenv("AIRFLOW__ASTRO__SQL_SCHEMA") or "tmp_astro"
602
203
import time import math as m start = time.time() def checksum(block): data_chk = [] xor = '0' for line in block: for x in range(0, len(line) - 1, 2): xor = hex(int(xor, 16) ^ int(line[x], 16) ^ int(line[x + 1], 16)) data_chk.append(xor) xor = '0' return data_chk def stm_parser(): data, block, d_chk = [], [], [] addr = '0x08000000' start, i, x = 0, 0, 0 with open("main.bin", "rb") as f: byte = f.read(1) while byte: for b in byte: data.append(hex(b)) byte = f.read(1) while(start <= len(data)): block.append(data[start : start + 256]) start = start + 256 count = int(len(block)) end = len(block) - 1 i_addr = int(addr, 16) while(len(block[end]) % 256 != 0): block[end].append(hex(255)) d_chk = checksum(block) print("Start Flashing") while(i < count): l_addr = hex(int(i_addr % 65536)) h_addr = hex(int(i_addr / 65536)) i_addr += 256 a_chk = hex(int(l_addr, 16) ^ int(h_addr, 16)) print("Sending WRITE MEMORY Command...") print("0x31", "0xCE") time.sleep(0.1) print("Sending Address...") print(h_addr, l_addr, a_chk) time.sleep(0.1) print("Sending Data...") # print(i + 1, block[i], d_chk[i]) print(i + 1, "BLOCK", d_chk[i]) time.sleep(0.1) i+=1 print() print("Done Flashing") stm_parser() print('Time (ms):', 1000*(time.time() - start))
1,599
659
#!/usr/bin/env python3 # Copyright lowRISC contributors. # Licensed under the Apache License, Version 2.0, see LICENSE for details. # SPDX-License-Identifier: Apache-2.0 import argparse import json import math import sys import hjson def parse_hex_int(hex_str): # int() throws an error message for empty string if hex_str == '': return 0 return int(hex_str, 16) def parse_test(raw_data, n, e, t): test = {'n': n, 'e': e} test['msg'] = parse_hex_int(t['msg']) # Message is expressed in hex notation, so the length in bytes is # the number of characters / 2 test['msg_len'] = math.ceil(len(t['msg']) / 2) test['signature'] = parse_hex_int(t['sig']) notes = [] if t['comment']: notes.append(t['comment']) # Add notes from flags, if any notes.extend([raw_data['notes'][flag] for flag in t['flags']]) # cases for expected result if t['result'] == 'valid': test['valid'] = True elif t['result'] == 'invalid': test['valid'] = False elif t['result'] == 'acceptable': if t['comment'] == 'short signature': # We consider short signatures valid test['valid'] = True else: # err on the side of caution and reject "acceptable" signatures otherwise test['valid'] = False notes.append('signature marked as acceptable by wycheproof') else: raise RuntimeError('Unexpected result type {}'.format(test['result'])) test['comment'] = 'wycheproof test with tcId={:d}, notes={}'.format( t["tcId"], ', '.join(notes)) return test def parse_test_group(raw_data, group): tests = [] n = parse_hex_int(group['n']) e = parse_hex_int(group['e']) for t in group['tests']: tests.append(parse_test(raw_data, n, e, t)) return tests def parse_test_vectors(raw_data): if raw_data['algorithm'] != 'RSASSA-PKCS1-v1_5': raise RuntimeError('Unexpected algorithm: {}, expected {}'.format( raw_data['algorithm'], 'RSASSA-PKCS1-v1_5')) tests = [] for group in raw_data['testGroups']: if group['sha'] != 'SHA-256': raise RuntimeError( 'Unexpected hash function: {}, expected {}'.format( group['sha'], 'SHA-256')) tests.extend(parse_test_group(raw_data, group)) return tests def main(): parser = argparse.ArgumentParser() parser.add_argument('src', metavar='FILE', type=argparse.FileType('r'), help='Read test vectors from this JSON file.') parser.add_argument('dst', metavar='FILE', type=argparse.FileType('w'), help='Write output to this file.') args = parser.parse_args() testvecs = parse_test_vectors(json.load(args.src)) args.src.close() hjson.dump(testvecs, args.dst) args.dst.close() return 0 if __name__ == '__main__': sys.exit(main())
3,034
961
#!/usr/bin/env python ############################################################################ # # Copyright (C) 2012-2015 PX4 Development Team. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # 3. Neither the name PX4 nor the names of its contributors may be # used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS # OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED # AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ############################################################################ # send BOOT command to a device import argparse import serial, sys from sys import platform as _platform # Parse commandline arguments parser = argparse.ArgumentParser(description="Send boot command to a device") parser.add_argument('--baud', action="store", type=int, default=115200, help="Baud rate of the serial port") parser.add_argument('port', action="store", help="Serial port(s) to which the FMU may be attached") args = parser.parse_args() REBOOT = b'\x30' EOC = b'\x20' print("Sending reboot to %s" % args.port) try: port = serial.Serial(args.port, args.baud, timeout=0.5) except Exception: print("Unable to open %s" % args.port) sys.exit(1) port.write(REBOOT + EOC) port.close() sys.exit(0)
2,479
818
import pymongo class Mongo: def __init__(self, config): self._config = config self.client = pymongo.MongoClient('mongodb://{}:{}@{}:{}/{}'.format( self._config.mongo['username'], self._config.mongo['password'], self._config.mongo['host'], self._config.mongo['port'], self._config.mongo['db'] )) self.db = self.client[self._config.mongo['db']] def drop_db_collections(self, collections): for c in collections: self.db[c].drop()
550
165
from unittest import TestCase import numpy as np from bokeh.models import LayoutDOM from coord2vec.evaluation.visualizations.bokeh_plots import bokeh_pr_curve_from_y_proba class TestBokeh_pr_curve(TestCase): @classmethod def setUpClass(cls): cls.y_pred = np.random.choice((0, 1), size=10) cls.y_true = np.random.choice((0, 1), size=10) def test_bokeh_pr_curve(self): fig = bokeh_pr_curve_from_y_proba(self.y_pred, self.y_true, legend='Zarecki is special') self.assertIsInstance(fig, LayoutDOM)
543
209
#!/usr/bin/env python # encoding: utf-8 #based on Microwire.py and hackaday buspirate/sht tutorial import sys,time from optparse import OptionParser from pyBusPirateLite.RAW_WIRE import * def sht_command(rw, data): ##clear shtxx interface rw.data_high() for i in range(9): rw.clk_tick() #start condition rw.data_high() rw.clk_high() rw.data_low() rw.clk_low() rw.clk_high() rw.data_high() rw.clk_low() #command rw.bulk_trans(1, [data]) #read and return bit return rw.read_bit() def sht_acknowledge(rw): #acknowledge rw.data_low() rw.clk_tick() def sht_wait_conversion_finished(rw, options): while 1: mosi_status = (ord(rw.read_pins()) & BBIOPins.MOSI) >> (BBIOPins.MOSI-1) if mosi_status: if options.verbose: print 'waiting...' else: if options.verbose: print 'conversion done' time.sleep(0.1) break def sht_temperature(rw, options): #soft reset status = sht_command(rw, 0b00011110) if options.verbose: print 'acknowledgment status:', ord(status) if not status: print "Error resetting SHT" #start temperature conversion status = sht_command(rw, 0b00000011) if options.verbose: print 'acknowledgment status:', ord(status) if not status: print "Error starting temperature conversion SHT" sht_wait_conversion_finished(rw, options) data = list() for i in range(3): data.append(ord(rw.read_byte())) sht_acknowledge(rw) temp_hb, temp_lb ,temp_crc = data #temp_hb = 0x17 #for formula testing #temp_lb = 0xcc #for formula testing #print (temp_hb<<8)+temp_lb temp = -39.7 + 0.01 * ((temp_hb<<8)+temp_lb) if options.verbose: print 'temp_hb:', temp_hb print 'temp_lb:', temp_lb print 'temp_crc:', temp_crc print 'temp:', temp return temp def sht_humidity(rw, options): #soft reset status = sht_command(rw, 0b00011110) if options.verbose: print 'acknowledgment status:', ord(status) if not status: print "Error resetting SHT" #start humidity conversion status = sht_command(rw, 0b00000101) if options.verbose: print 'acknowledgment status:', ord(status) if not status: print "Error starting humidity conversion SHT" #time.sleep(1) sht_wait_conversion_finished(rw, options) data = list() for i in range(3): data.append(ord(rw.read_byte())) sht_acknowledge(rw) hum_hb, hum_lb ,hum_crc = data #hum_hb = 0x05 #for formula testing #hum_lb = 0x80 #for formula testing #print (hum_hb<<8)+hum_lb hum = -2.0468 + 0.0367*((hum_hb<<8)+hum_lb) + (-0.0000015955*(((hum_hb<<8)+hum_lb)**2)) if options.verbose: print 'hum_hb:', hum_hb print 'hum_lb:', hum_lb print 'hum_crc:', hum_crc print 'hum:', hum return hum def main(): # First of all parse the command line parser = OptionParser() parser.add_option("-d", "--device", dest="device", help="serial interface where bus pirate is in.[/dev/bus_pirate]", default="/dev/bus_pirate") parser.add_option("-t", "--temperature", action="store_true", dest="temperature", help="get temperature from sht.", default=False) parser.add_option("-H", "--humidity", dest="humidity", action="store_true", help="get humidity from sht.", default=False) parser.add_option("-v", "--verbose", dest="verbose", help="don't be quiet.", action="store_true") (options,args) = parser.parse_args() if not (options.temperature or options.humidity): parser.print_help() exit() # Create an instance of the RAW_WIRE class as we are using the BitBang/RAW_WIRE mode rw = RAW_WIRE( options.device, 115200 ) if not rw.BBmode(): print "Can't enter into BitBang mode." exit() # We have succesfully activated the BitBang Mode, so we continue with # the raw-wire mode. if not rw.enter_rawwire(): print "Can't enable the raw-wire mode." exit() # Now we have raw-wire mode enabled, so first configure peripherals # (Power, PullUps, AUX, CS) if not rw.raw_cfg_pins( PinCfg.POWER | PinCfg.PULLUPS): print "Error enabling the internal voltage regulators." # Configure the raw-wire mode if not rw.cfg_raw_wire( (RAW_WIRECfg.BIT_ORDER & RAW_WIRE_BIT_ORDER_TYPE.MSB) | (RAW_WIRECfg.WIRES & RAW_WIRE_WIRES_TYPE.TWO) | (RAW_WIRECfg.OUT_TYPE & RAW_WIRE_OUT_TYPE.HIZ) ): print "Error configuring the raw-wire mode." # Set raw-wire speed if not rw.set_speed( RAW_WIRESpeed._5KHZ ): print "Error setting raw-wire speed." if options.temperature: print "Measuring temperature..." temperature = sht_temperature(rw, options) print "Temperature: %f°C" % temperature if options.humidity: print "Measuring humidity..." humidity = sht_humidity(rw, options) print "Humidity: %f%%" % humidity # Reset the bus pirate rw.resetBP(); if __name__ == '__main__': main()
5,267
1,931
import gc import os import pyalp.io import pyalp.sequence import pyalp.utils from .base import Stimulus class Film(Stimulus): """Film stimulus Parameters ---------- bin_pathname: none | string, optional Path name to the .bin file. vec_pathname: none | string, optional Path name to the .vec file. rate: float, optional Frame rate [Hz]. The default value is 30.0. sequence_size: integer, optional Number of frames each sequence. The default value is 200. interactive: boolean, optional Specify if it should prompt the input parameters. The default value is False. verbose: boolean, optional Verbose mode. The default value is False. """ dirname = os.path.join("E:", "BINVECS") # dirname = os.path.expanduser(os.path.join("~", ".pyalp", "films")) # TODO remove. def __init__(self, bin_pathname=None, vec_pathname=None, rate=30.0, sequence_size=200, interactive=False, verbose=False): Stimulus.__init__(self) self.bin_pathname = bin_pathname self.vec_pathname = vec_pathname self.rate = rate self.sequence_size = sequence_size if interactive: self.prompt_input_arguments() # Read .vec file. self.frame_ids = pyalp.io.load_vec(self.vec_pathname) self.nb_frames = len(self.frame_ids) self.nb_sequences = int(self.nb_frames / self.sequence_size) self.nb_cycles = int(self.nb_sequences / 2) # Read header of .bin file. self.bin_header = pyalp.io.load_bin_header(self.bin_pathname) if verbose: self.print_settings() def prompt_input_arguments(self, sep=""): """Prompt the input arguments. Parameter --------- sep: string, optional Prompt separator. The default value is \"\" """ print(sep) # Print all the user directories. user_dirnames = os.listdir(self.dirname) for user_dirname_id, user_dirname in enumerate(user_dirnames): print(" {}. {}".format(user_dirname_id, user_dirname)) # Prompt user identifier. prompt = "Enter the user number (e.g. 0): " user_id = pyalp.utils.input(prompt, int) user_dirname = user_dirnames[user_id] user_pathname = os.path.join(self.dirname, user_dirname) print(sep) # Print all the .bin files. bin_pathname = os.path.join(user_pathname, "Bin") bin_filenames = [name for name in os.listdir(bin_pathname) if os.path.isfile(os.path.join(bin_pathname, name))] for bin_filename_id, bin_filename in enumerate(bin_filenames): print(" {}. {}".format(bin_filename_id, bin_filename)) # Prompt .bin filename identifier. prompt = "Enter the .bin file number (e.g. 0): " bin_id = pyalp.utils.input(prompt, int) bin_filename = bin_filenames[bin_id] self.bin_pathname = os.path.join(bin_pathname, bin_filename) print(sep) # Print all the .vec files. vec_pathname = os.path.join(user_pathname, "Vec") vec_filenames = [name for name in os.listdir(vec_pathname) if os.path.isfile(os.path.join(vec_pathname, name))] for vec_filename_id, vec_filename in enumerate(vec_filenames): print(" {}. {}".format(vec_filename_id, vec_filename)) # Prompt .vec filename identifier. prompt = "Enter the .vec file number (e.g. 0): " vec_id = pyalp.utils.input(prompt, int) vec_filename = vec_filenames[vec_id] self.vec_pathname = os.path.join(vec_pathname, vec_filename) print(sep) # Prompt the frame rate. prompt = "Enter the frame rate [Hz] (e.g. {}): ".format(self.rate) self.rate = pyalp.utils.input(prompt, float) print(sep) # Prompt the advanced features. prompt = "Advanced features (y/n): " advanced = pyalp.utils.input(prompt, lambda arg: arg == "y") if advanced: # Prompt the number of frames in each sequence. prompt = "Number of frames in each sequence (e.g. {}): ".format(self.sequence_size) self.sequence_size = pyalp.utils.input(prompt, int) print(sep) return def print_settings(self): """Print settings.""" print("----------------- Film stimulus ------------------") print(".bin pathname: {}".format(self.bin_pathname)) print(".vec pathname: {}".format(self.vec_pathname)) print("frame rate: {} Hz".format(self.rate)) print("sequence size: {}".format(self.sequence_size)) print("number of frames: {}".format(self.nb_frames)) print("number of sequences: {}".format(self.nb_sequences)) print("number of cycles: {}".format(self.nb_cycles)) print(".bin header: {}".format(self.bin_header)) print("--------------------------------------------------") print("") return def display(self, device): """Display stimulus. Parameter --------- device: Device ALP device. """ sequence_1 = None sequence_2 = None if self.nb_frames > 0 * self.sequence_size: # i.e. enough frames # 1. Allocate 1st sequence of frames. # Define 1st sequence of frames. sequence_id_1 = 0 nb_frames = min(self.sequence_size, self.nb_frames - 0 * self.sequence_size) sequence_1 = pyalp.sequence.Film(sequence_id_1, self.bin_pathname, self.frame_ids, nb_frames, self.sequence_size, self.rate) # Allocate memory for 1st sequence of frames. device.allocate(sequence_1) # Control the timing properties of 1st sequence display. sequence_1.control_timing() if self.nb_frames > 1 * self.sequence_size: # i.e. enough frames # 2. Allocate 2nd sequence of frames. # Define 2nd sequence of frames. sequence_id_2 = 1 nb_frames = min(self.sequence_size, self.nb_frames - 1 * self.sequence_size) sequence_2 = pyalp.sequence.Film(sequence_id_2, self.bin_pathname, self.frame_ids, nb_frames, self.sequence_size, self.rate) # Allocate memory for 2nd sequence of frames. device.allocate(sequence_2) # Control the timing properties of 2nd sequence display. sequence_2.control_timing() # 3. Play on DMD. # Set up queue mode. device.control_projection(queue_mode=True) # Transmit and start 1st sequence of frames into memory. if self.nb_frames > 0 * self.sequence_size: # i.e. enough frames sequence_1.load() sequence_1.start() # Transmit and start 2nd sequence of frames into memory. if self.nb_frames > 1 * self.sequence_size: # i.e. enough frames sequence_2.load() sequence_2.start() # Force garbage collection. gc.collect() # 4. Repeat. for cycle_id in range(1, self.nb_cycles): # a. Wait completion of 1st sequence. device.synchronize() # b. Free 1st sequence. sequence_1.free() # c. Reallocate 1st sequence. sequence_id_1 = 2 * cycle_id + 0 nb_frames = self.sequence_size sequence_1 = pyalp.sequence.Film(sequence_id_1, self.bin_pathname, self.frame_ids, nb_frames, self.sequence_size, self.rate) device.allocate(sequence_1) sequence_1.control_timing() sequence_1.load() sequence_1.start() gc.collect() # d. Wait completion of 2nd sequence. device.synchronize() # e. Free 2nd sequence. sequence_2.free() # f. Reallocate 2nd sequence. sequence_id_2 = 2 * cycle_id + 1 nb_frames = self.sequence_size sequence_2 = pyalp.sequence.Film(sequence_id_2, self.bin_pathname, self.frame_ids, nb_frames, self.sequence_size, self.rate) device.allocate(sequence_2) sequence_2.control_timing() sequence_2.load() sequence_2.start() gc.collect() if self.nb_cycles > 0 and self.nb_frames > (self.nb_cycles * 2 + 0) * self.sequence_size: # i.e. remaining frames # a. Wait completion of 1st sequence. device.synchronize() # b. Free 1st sequence. sequence_1.free() # c. Reallocate 1st sequence. sequence_id_1 = 2 * self.nb_cycles + 0 nb_frames = min(self.sequence_size, self.nb_frames - sequence_id_1 * self.sequence_size) sequence_1 = pyalp.sequence.Film(sequence_id_1, self.bin_pathname, self.frame_ids, nb_frames, self.sequence_size, self.rate) device.allocate(sequence_1) sequence_1.control_timing() sequence_1.load() sequence_1.start() gc.collect() if self.nb_cycles > 0 and self.nb_frames > (self.nb_cycles * 2 + 1) * self.sequence_size: # i.e. remaining frames # a. Wait completion of 2nd sequence. device.synchronize() # b. Free 2nd sequence. sequence_id_2 = 2 * self.nb_cycles + 1 nb_frames = min(self.sequence_size, self.nb_frames - sequence_id_2 * self.sequence_size) sequence_2 = pyalp.sequence.Film(sequence_id_2, self.bin_pathname, self.frame_ids, nb_frames, self.sequence_size, self.rate) device.allocate(sequence_2) sequence_2.control_timing() sequence_2.load() sequence_2.start() gc.collect() # 5. Clean up. try: device.wait() sequence_1.free() sequence_2.free() except AttributeError: pass return
10,248
3,104
# Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # This file is licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. A copy of the # License is located at # # http://aws.amazon.com/apache2.0/ # # This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS # OF ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import boto3 import botocore BUCKET_NAME = 'my-bucket' # replace with your bucket name KEY = 'my_image_in_s3.jpg' # replace with your object key s3 = boto3.resource('s3') try: s3.Bucket(BUCKET_NAME).download_file(KEY, 'my_local_image.jpg') except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": print("The object does not exist.") else: raise #snippet-sourcedescription:[s3-python-example-download-file.py demonstrates how to ...] #snippet-keyword:[Python] #snippet-keyword:[AWS SDK for Python (Boto3)] #snippet-keyword:[Code Sample] #snippet-keyword:[Amazon S3] #snippet-service:[s3] #snippet-sourcetype:[full-example] #snippet-sourcedate:[2018-06-25] #snippet-sourceauthor:[jschwarzwalder]
1,328
500
""" Investigate the text of an alert. """ from screenpy.abilities import BrowseTheWeb from screenpy.actor import Actor from screenpy.pacing import beat class TextOfTheAlert: """Ask what text appears in the alert. Abilities Required: |BrowseTheWeb| Examples:: the_actor.should( See.the(TextOfTheAlert(), ReadsExactly("Danger, Will Robinson!")) ) """ def describe(self) -> str: """Describe the Question..""" return "The text of the alert." @beat("{} reads the text from the alert.") def answered_by(self, the_actor: Actor) -> str: """Direct the Actor to read off the alert's text.""" browser = the_actor.uses_ability_to(BrowseTheWeb).browser return browser.switch_to.alert.text
788
243
import time from threading import Thread COUNT = 50000000 def countdown(n): while n > 0: n -= 1 print ('Done! My final value is {0}'.format(n)) half_count = int(COUNT/2) t1 = Thread(target=countdown, args=(half_count,)) t2 = Thread(target=countdown, args=(half_count,)) start = time.time() t1.start() t2.start() t1.join() t2.join() end = time.time() print('Time taken in seconds -', end - start)
418
165
from clr import AddReference import pandas AddReference("System") AddReference("QuantConnect.Research") AddReference("QuantConnect.Common") AddReference("QuantConnect.Logging") #AddReference("QuantConnect.Data") from System import * from QuantConnect import * from QuantConnect.Logging import * #from Data import * #from QuantConnect.Data import * from QuantConnect.Research import * from datetime import datetime, timedelta from custom_data import QuandlFuture, Nifty import pandas as pd #from System import * #from QuantConnect import * #from QuantConnect.Data import SubscriptionDataSource from QuantConnect.Python import PythonData, PythonQuandl from datetime import datetime import decimal class QuandlFuture(PythonQuandl): '''Custom quandl data type for setting customized value column name. Value column is used for the primary trading calculations and charting.''' def __init__(self): # Define ValueColumnName: cannot be None, Empty or non-existant column name # If ValueColumnName is "Close", do not use PythonQuandl, use Quandl: # self.AddData[QuandlFuture](self.crude, Resolution.Daily) self.ValueColumnName = "Settle" class Nifty(PythonData): '''NIFTY Custom Data Class''' def GetSource(self, config, date, isLiveMode): return SubscriptionDataSource("https://www.dropbox.com/s/rsmg44jr6wexn2h/CNXNIFTY.csv?dl=1", SubscriptionTransportMedium.RemoteFile); def Reader(self, config, line, date, isLiveMode): if not (line.strip() and line[0].isdigit()): return None # New Nifty object index = Nifty(); index.Symbol = config.Symbol try: # Example File Format: # Date, Open High Low Close Volume Turnover # 2011-09-13 7792.9 7799.9 7722.65 7748.7 116534670 6107.78 data = line.split(',') index.Time = datetime.strptime(data[0], "%Y-%m-%d") index.Value = decimal.Decimal(data[4]) index["Open"] = float(data[1]) index["High"] = float(data[2]) index["Low"] = float(data[3]) index["Close"] = float(data[4]) except ValueError: # Do nothing return None return index class SecurityHistoryTest(): def __init__(self, start_date, security_type, symbol): self.qb = QuantBook() self.qb.SetStartDate(start_date) self.symbol = self.qb.AddSecurity(security_type, symbol).Symbol self.column = 'close' def __str__(self): return "{} on {}".format(self.symbol.ID, self.qb.StartDate) def test_period_overload(self, period): history = self.qb.History([self.symbol], period) return history[self.column].unstack(level=0) def test_daterange_overload(self, end): start = end - timedelta(1) history = self.qb.History([self.symbol], start, end) return history[self.column].unstack(level=0) class OptionHistoryTest(SecurityHistoryTest): def test_daterange_overload(self, end, start = None): if start is None: start = end - timedelta(1) history = self.qb.GetOptionHistory(self.symbol, start, end) return history.GetAllData() class FutureHistoryTest(SecurityHistoryTest): def test_daterange_overload(self, end, start = None, maxFilter = 182): if start is None: start = end - timedelta(1) self.qb.Securities[self.symbol].SetFilter(0, maxFilter) # default is 35 days history = self.qb.GetFutureHistory(self.symbol, start, end) return history.GetAllData() class FutureContractHistoryTest(): def __init__(self, start_date, security_type, symbol): self.qb = QuantBook() self.qb.SetStartDate(start_date) self.symbol = symbol self.column = 'close' def test_daterange_overload(self, end): start = end - timedelta(1) history = self.qb.GetFutureHistory(self.symbol, start, end) return history.GetAllData() class OptionContractHistoryTest(FutureContractHistoryTest): def test_daterange_overload(self, end): start = end - timedelta(1) history = self.qb.GetOptionHistory(self.symbol, start, end) return history.GetAllData() class CustomDataHistoryTest(SecurityHistoryTest): def __init__(self, start_date, security_type, symbol): self.qb = QuantBook() self.qb.SetStartDate(start_date) if security_type == 'Nifty': type = Nifty self.column = 'close' elif security_type == 'QuandlFuture': type = QuandlFuture self.column = 'settle' else: raise self.symbol = self.qb.AddData(type, symbol, Resolution.Daily).Symbol class MultipleSecuritiesHistoryTest(SecurityHistoryTest): def __init__(self, start_date, security_type, symbol): self.qb = QuantBook() self.qb.SetStartDate(start_date) self.qb.AddEquity('SPY', Resolution.Daily) self.qb.AddForex('EURUSD', Resolution.Daily) self.qb.AddCrypto('BTCUSD', Resolution.Daily) def test_period_overload(self, period): history = self.qb.History(self.qb.Securities.Keys, period) return history['close'].unstack(level=0) class FundamentalHistoryTest(): def __init__(self): self.qb = QuantBook() def getFundamentals(self, ticker, selector, start, end): return self.qb.GetFundamental(ticker, selector, start, end) startDate = datetime(2014, 5, 9) a = CompositeLogHandler() securityTestHistory = MultipleSecuritiesHistoryTest(startDate, None, None) #// Get the last 5 candles periodHistory = securityTestHistory.test_period_overload(5) #// Note there is no data for BTCUSD at 2014 #//symbol EURUSD SPY #//time #//2014-05-03 00:00:00 NaN 173.580655 #//2014-05-04 20:00:00 1.387185 NaN #//2014-05-05 20:00:00 1.387480 NaN #//2014-05-06 00:00:00 NaN 173.903690 #//2014-05-06 20:00:00 1.392925 NaN #//2014-05-07 00:00:00 NaN 172.426958 #//2014-05-07 20:00:00 1.391070 NaN #//2014-05-08 00:00:00 NaN 173.423752 #//2014-05-08 20:00:00 1.384265 NaN #//2014-05-09 00:00:00 NaN 173.229931 Console.WriteLine(periodHistory) count = periodHistory.shape[0] Assert.AreEqual(10, count) #// Get the one day of data timedeltaHistory = securityTestHistory.test_period_overload(TimeSpan.FromDays(8)); firstIndex = timedeltaHistory.index.values[0] #// EURUSD exchange time zone is NY but data is UTC so we have a 4 hour difference with algo TZ which is NY Assert.AreEqual(datetime(startDate.years, startDate.days - 8, startDate.hours + 20), firstIndex);
6,829
2,289
# coding=utf-8 from django import forms from app.task.models import SendTask from app.template.models import SendTemplate from app.address.models import MailList from django.utils.translation import ugettext_lazy as _ from django.core.cache import cache class SendTaskForm(forms.ModelForm): user = forms.CharField(label=u'客户', required=False, widget=forms.HiddenInput()) # send_name = forms.CharField(label=u'发送批次', widget=forms.TextInput(attrs={'readonly': 'readonly'})) # template = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple, label=u"选择邮件模板") # maillist = forms.ChoiceField(label=u'选择联系人列表') # sender = forms.ChoiceField(label=u'发件人') def __init__(self, user, *args, **kwargs): super(SendTaskForm, self).__init__(*args, **kwargs) self.user = user # self.fields['template'].choices = [(x.id, x) for x in SendTemplate.objects.filter(user=user, name__isnull=False)] # self.fields['maillist'].choices = [(x.id, x) for x in MailList.objects.filter(customer=user)] def clean_user(self): return self.user class Meta: model = SendTask exclude = [] # fields = ['user'] # exclude = [ # 'id', 'send_acct_type', 'send_acct_domain', 'send_replyto', 'send_fullname', # 'send_maillist', 'send_maillist_id', # 'send_qty', 'send_qty_remark', 'send_time', # 'send_status', 'verify_status', 'time_start', 'time_end', 'updated', 'status' # ] class SendTaskSearchForm(forms.Form): name = forms.CharField(label=u'任务名称', required=False) class TaskExportForm(forms.Form): export_open_or_click = forms.ChoiceField(label=_(u"导出类型"), choices=( ('open', _(u"打开")), ('click', _(u"点击")) ), initial="open") export_is_new_maillist = forms.BooleanField(label=_(u'是否导入到新分类'), widget=forms.CheckboxInput(attrs={ "autocomplete": "off", "onchange": "onchangeIsNewMaillist();", })) export_maillist_name = forms.CharField(label=_(u'分类名称'), initial=_(u"打开/点击地址"), max_length=50, help_text=_(u"默认名称为:打开/点击地址,打开/点击地址将导入此分类中")) export_maillist_id = forms.ModelChoiceField( label=_(u'选择地址池'), queryset=None, widget=forms.Select(attrs={ #"data-placeholder": _(u"请选择地址池"), "autocomplete": "off", "class": "select2 ", }), help_text=_(u"选择一个地址分类,打开/点击地址将导入此分类中")) def __init__(self, user, *args, **kwargs): super(TaskExportForm, self).__init__(*args, **kwargs) lists = MailList.objects.filter( customer=user).filter( isvalid=True, is_smtp=False).order_by('-id')[:500] self.fields['export_maillist_id'].queryset= lists
2,765
988
#!/usr/bin/env python3 """ Author: Victoria McDonald email: vmcd@atmos.washington.edu website: http://torimcd.github.com license: BSD """ import matplotlib as mpl mpl.use("Agg") import os import sys import numpy as np import netCDF4 import operator import matplotlib.pyplot as plt from mpl_toolkits.basemap import Basemap download_path = '/home/vmcd/' # enter the path to the directory where you downloaded the archived data, eg '/home/user/Downloads' filebase = download_path + 'FYSP_clouds_archive/CAM4/' outfileloc = download_path + 'temp_data/' # this is the location to save the processed netcdf files to current = '0.775' cc = '0775' # SWCF fraction variable field = 'SWCF' outfilebase = 'c4_swcf_' casenames = {'07','0725','075','0775', '08','0825','085','0875','09','0925','095','0975','10', '1025', '105', '1075','11'} # 1.0 case outfile_10 = outfileloc + outfilebase + '10.nc' if not os.path.isfile(outfile_10): if os.path.isdir(outfileloc): infile = filebase +'cam4_10.nc' # calc cldlow global average per month syscall = r"//usr//bin//cdo timmean -seltimestep,21/40 -select,name="+field+" "+infile+ " " +outfile_10 os.system(syscall) for c in casenames: # calc swcf outfile_case = outfileloc+outfilebase+c+'.nc' # check directly if the file exists if not os.path.isfile(outfile_case): if os.path.isdir(outfileloc): infile = filebase +'cam4_' + c +'.nc' # calc cldlow global average per month syscall = r"//usr//bin//cdo timmean -seltimestep,21/40 -select,name="+field+" "+infile+ " " +outfile_case os.system(syscall) control = outfile_10 if os.path.isfile(control): dsyear = netCDF4.Dataset(control) control_swcf = dsyear.variables[field][:] dsyear.close() #plot the data dsloc = outfileloc + outfilebase + cc +'.nc' if os.path.isfile(dsloc): # open the merged file and get out some variables dsyear = netCDF4.Dataset(dsloc) lons = dsyear.variables['lon'][:] lats = dsyear.variables['lat'][:] swcf = dsyear.variables[field][:] swcf_units = dsyear.variables[field].units dsyear.close() #close the file swcf_diff = list(map(operator.sub, swcf, control_swcf)) #create plot fig = plt.figure() # setup the map m = Basemap(lat_0=0,lon_0=0) m.drawcoastlines() m.drawcountries() # Create 2D lat/lon arrays for Basemap lon2d, lat2d = np.meshgrid(lons, lats) # Plot cs = m.pcolormesh(lon2d,lat2d,np.squeeze(swcf_diff), cmap='RdBu_r', latlon='True', vmin=-60, vmax=60, rasterized=True) # This is the fix for the white lines between contour levels cs.set_edgecolor("face") # Add Colorbar cbar = m.colorbar(cs, location='bottom', pad="10%") cbar.set_label(swcf_units) plt.title('Shortwave Cloud Forcing: ' + r'$\mathsf{S/S_0}$'+' = '+ current) plt.show() fig.savefig('swcf_map_diff_'+cc+'.pdf', bbox_inches='tight')
2,827
1,202
# AntiBiofilm Peptide Research # Department of Computer Science and Engineering, Santa Clara University # Author: Taylor Downey # A python script that uses the optimized hyperparameters found for both # the SVM and the SVR to create a prediction model # Script prints the average RMSE of the full model when run with cross validation # # NOTE: Given the small number of training samples available, the average RMSE # outputted will vary by about +- 5 # ------------------------------------------------------------------------------ # Libraries # ------------------------------------------------------------------------------ import numpy as np import pandas as pd import json import warnings from sklearn import preprocessing from sklearn.decomposition import PCA from sklearn.utils.validation import column_or_1d from sklearn.svm import SVC from sklearn.svm import SVR from sklearn.metrics import mean_squared_error from sklearn.model_selection import RepeatedStratifiedKFold warnings.filterwarnings("ignore") # ------------------------------------------------------------------------------ # Functions # ------------------------------------------------------------------------------ def seperatePeptides(peptides, threshold): columns = ['MBIC'] filterMBIC = (peptides[columns] <= threshold).all(axis=1) lower_peptides = peptides[filterMBIC] filterMBIC = (peptides[columns] > threshold).all(axis=1) upper_peptides = peptides[filterMBIC] return lower_peptides, upper_peptides # ------------------------------------------------------------------------------ # Variables # ------------------------------------------------------------------------------ training_filename = '../../data/mbic_training_data.csv' svm_features_filename = 'mbic_svm_forward_selection_features.json' svr_features_filename = 'mbic_svr_forward_selection_features.json' svr_svm_results = 'full_model_results.txt' # Optimized Hyperparameters svm_c = 10 svm_g = 1000 svm_pca_comp = 6 svm_num_feat = 9 svr_c = 45 svr_g = 40 svr_pca_comp = 8 svr_num_feat = 9 # ------------------------------------------------------------------------------ # Main # ------------------------------------------------------------------------------ def main(): # Prepare peptides for SVM with open(svm_features_filename) as f: svm_feat_dict = json.load(f) svm_feat_dict = svm_feat_dict[0:svm_num_feat] peptides_svm = pd.read_csv(training_filename) peptides_svm.loc[(peptides_svm['MBIC'] > 64), 'MBIC'] = 0 peptides_svm.loc[(peptides_svm['MBIC'] != 0), 'MBIC'] = 1 # Filter out columns based on feat list labels = peptides_svm.columns.values.tolist() for l in labels: if l == 'MBIC': continue if l not in svm_feat_dict: peptides_svm = peptides_svm.drop(columns=[l]) y_svm = peptides_svm['MBIC'].to_numpy() peptides_svm = peptides_svm.drop(columns=['MBIC']) min_max_scaler = preprocessing.MinMaxScaler() X_norm_svm = min_max_scaler.fit_transform(peptides_svm) pca_svm = PCA(n_components=svm_pca_comp) X_trans_svm = pca_svm.fit_transform(X_norm_svm) SVC_rbf = SVC(kernel='rbf', C=svm_c, gamma=svm_g) # Prepare peptides for SVR with open(svr_features_filename) as f: svr_feat_dict = json.load(f) svr_feat_dict = svr_feat_dict[0:svr_num_feat] peptides_svr = pd.read_csv(training_filename) peptides_svr, _ = seperatePeptides(peptides_svr, 64) # Filter out columns based on feat list labels = peptides_svr.columns.values.tolist() for l in labels: if l == 'MBIC': continue if l not in svr_feat_dict: peptides_svr = peptides_svr.drop(columns=[l]) y_svr = peptides_svr['MBIC'].to_numpy() peptides_svr = peptides_svr.drop(columns=['MBIC']) min_max_scaler_svr = preprocessing.MinMaxScaler() X_norm_svr = min_max_scaler_svr.fit_transform(peptides_svr) pca_svr = PCA(n_components=svr_pca_comp) X_trans_svr = pca_svr.fit_transform(X_norm_svr) SVR_rbf = SVR(kernel='rbf', C=svr_c, gamma=svr_g) # Prepare test set of petides used by svr after training peptides_test_svr = pd.read_csv(training_filename) # Filter out columns based on feat list labels = peptides_test_svr.columns.values.tolist() for l in labels: if l == 'MBIC': continue if l not in svr_feat_dict: peptides_test_svr = peptides_test_svr.drop(columns=[l]) y_svr2 = peptides_test_svr['MBIC'].to_numpy() peptides_test_svr = peptides_test_svr.drop(columns=['MBIC']) # Apply svr transformations on test set of peptides for svr X_norm_test_svr = min_max_scaler_svr.transform(peptides_test_svr) X_trans_test_svr = pca_svr.transform(X_norm_test_svr) # Cross validation applied to full model rskf = RepeatedStratifiedKFold(n_splits=5, n_repeats = 20) RMSE = [] cnt = 1 for train_index, test_index in rskf.split(X_trans_svm, y_svm): X_train, X_test = X_trans_svm[train_index], X_trans_svm[test_index] y_train, y_test = y_svm[train_index], y_svm[test_index] y_train = y_train.reshape(-1,1) y_train = column_or_1d(y_train, warn=False) svm_fit = SVC_rbf.fit(X_train, y_train) y_pred = svm_fit.predict(X_test) train_index_svr = [] test_index_svr = [] y_train_svr = [] y_test_svr = [] for i in range(0, len(y_train)): if(y_train[i] == 0): continue else: train_index_svr.append(train_index[i]) X_train_svr = X_trans_svr[train_index_svr] y_train_svr = y_svr[train_index_svr] svr_fit = SVR_rbf.fit(X_train_svr, y_train_svr) y_train_svr = [] for i in range(0, len(y_pred)): if(y_pred[i] == 0): continue else: test_index_svr.append(test_index[i]) X_test_svr = X_trans_test_svr[test_index_svr] y_test_svr = y_svr2[test_index_svr] y_pred_svr = SVR_rbf.predict(X_test_svr) rmse = np.sqrt(mean_squared_error(y_test_svr, y_pred_svr)) cnt = cnt + 1 with open (svr_svm_results, 'a', encoding="utf-8") as sfile: sfile.write(str(rmse) + '\n') RMSE.append(rmse) rmse_avg = np.average(RMSE) print('RMSE average: ' + str(rmse_avg)) if __name__ == "__main__": main()
6,706
2,378
# -*- coding: utf-8 -*- """ Predefined URLs used to make google translate requests. """ BASE = 'https://translate.google.com' TOKEN = 'https://translate.google.com/translate_a/element.js' TRANSLATE = 'https://translate.googleapis.com/translate_a/' TRANSLATEURL = 'https://translate.google.com/_/TranslateWebserverUi/data/batchexecute'
334
110
# Generated by Django 3.1.7 on 2021-04-19 14:09 import django.db.models.deletion from django.db import migrations from django.db import models class Migration(migrations.Migration): dependencies = [ ('stac_api', '0005_auto_20210408_0821'), ] operations = [ migrations.AlterField( model_name='item', name='collection', field=models.ForeignKey( help_text= '\n <div class=SearchUsage>\n Search Usage:\n <ul>\n <li>\n <i>arg</i> will make a non exact search checking if <i>arg</i> is part of\n the collection ID\n </li>\n <li>\n Multiple <i>arg</i> can be used, separated by spaces. This will search for all\n collections ID containing all arguments.\n </li>\n <li>\n <i>"collectionID"</i> will make an exact search for the specified collection.\n </li>\n </ul>\n Examples :\n <ul>\n <li>\n Searching for <i>pixelkarte</i> will return all collections which have\n pixelkarte as a part of their collection ID\n </li>\n <li>\n Searching for <i>pixelkarte 2016 4</i> will return all collection\n which have pixelkarte, 2016 AND 4 as part of their collection ID\n </li>\n <li>\n Searching for <i>ch.swisstopo.pixelkarte.example</i> will yield only this\n collection, if this collection exists. Please note that it would not return\n a collection named ch.swisstopo.pixelkarte.example.2.\n </li>\n </ul>\n </div>', on_delete=django.db.models.deletion.PROTECT, to='stac_api.collection' ), ), ]
1,901
543
#!/usr/bin/python # coding: utf8 from __future__ import absolute_import from geocoder.base import Base from geocoder.keys import opencage_key class OpenCage(Base): """ OpenCage Geocoding Services =========================== OpenCage Geocoder simple, easy, and open geocoding for the entire world Our API combines multiple geocoding systems in the background. Each is optimized for different parts of the world and types of requests. We aggregate the best results from open data sources and algorithms so you don't have to. Each is optimized for different parts of the world and types of requests. API Reference ------------- http://geocoder.opencagedata.com/api.html """ provider = 'opencage' method = 'geocode' def __init__(self, location, **kwargs): self.url = 'http://api.opencagedata.com/geocode/v1/json' self.location = location self.params = { 'query': location, 'key': self._get_api_key(opencage_key, **kwargs), } self._initialize(**kwargs) def _catch_errors(self): if self.content: status = self.content.get('status') if status: self.status_code = status.get('code') message = status.get('message') if self.status_code: self.error = message def _exceptions(self): # Build intial Tree with results if self.parse['results']: self._build_tree(self.parse['results'][0]) licenses = self.parse['licenses'] if licenses: self.parse['licenses'] = licenses[0] @property def lat(self): return self.parse['geometry'].get('lat') @property def lng(self): return self.parse['geometry'].get('lng') @property def address(self): return self.parse.get('formatted') @property def housenumber(self): return self.parse['components'].get('house_number') @property def street(self): return self.parse['components'].get('road') @property def neighborhood(self): neighbourhood = self.parse['components'].get('neighbourhood') if neighbourhood: return neighbourhood elif self.suburb: return self.suburb elif self.city_district: return self.city_district @property def suburb(self): return self.parse['components'].get('suburb') @property def city_district(self): return self.parse['components'].get('city_district') @property def city(self): city = self.parse['components'].get('city') if city: return city elif self.town: return self.town elif self.county: return self.county @property def town(self): return self.parse['components'].get('town') @property def county(self): return self.parse['components'].get('county') @property def state(self): return self.parse['components'].get('state') @property def country(self): return self.parse['components'].get('country_code') @property def postal(self): return self.parse['components'].get('postcode') @property def confidence(self): return self.parse.get('confidence') @property def w3w(self): return self.parse['what3words'].get('words') @property def mgrs(self): return self.parse['annotations'].get('MGRS') @property def geohash(self): return self.parse['annotations'].get('geohash') @property def callingcode(self): return self.parse['annotations'].get('callingcode') @property def Maidenhead(self): return self.parse['annotations'].get('Maidenhead') @property def DMS(self): return self.parse.get('DMS') @property def Mercator(self): return self.parse.get('Mercator') @property def license(self): return self.parse.get('licenses') @property def bbox(self): south = self.parse['southwest'].get('lat') north = self.parse['northeast'].get('lat') west = self.parse['southwest'].get('lng') east = self.parse['northeast'].get('lng') return self._get_bbox(south, west, north, east) if __name__ == '__main__': g = OpenCage('1552 Payette dr., Ottawa') print(g.json['mgrs'])
4,463
1,300
a = "3434" a = int(a) print(type(a)) b = 32 b = str(b) print(type(b))
108
79
from .DeepMask import DeepMask from .SharpMask import SharpMask
64
20
"""The systemmonitor integration."""
37
10
from setuptools import setup, find_packages setup( name='strom-cli', author='Adrian Agnic', author_email='adrian@tura.io', version='0.0.1', description='CLI tool for use with Strom', packages=find_packages(), include_package_data=True, install_requires=['click', 'requests'], entry_points=''' [console_scripts] strom=interface.tool:dstream ''', )
396
133
# Generated by Django 2.1.3 on 2019-04-24 19:12 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('post', '0004_post_an_type'), ] operations = [ migrations.CreateModel( name='PostType', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(default='Offres', max_length=100)), ], ), migrations.AlterField( model_name='post', name='an_type', field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.PROTECT, to='post.PostType'), ), ]
776
247
""" python -m aspen =============== Aspen ships with a server (wsgiref.simple_server) that is suitable for development and testing. It can be invoked via: python -m aspen though even for development you'll likely want to specify a project root, so a more likely incantation is: ASPEN_PROJECT_ROOT=/path/to/wherever python -m aspen For production deployment, you should probably deploy using a higher performance WSGI server like Gunicorn, uwsgi, Spawning, or the like. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import os from aspen import log_dammit from aspen.website import Website from wsgiref.simple_server import make_server if __name__ == '__main__': website = Website() port = int(os.environ.get('PORT', '8080')) server = make_server('0.0.0.0', port, website) log_dammit("Greetings, program! Welcome to port {0}.".format(port)) server.serve_forever()
1,006
313
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from ax.benchmark2.benchmark import ( benchmark_full_run, benchmark_replication, benchmark_test, ) from ax.benchmark2.benchmark_method import BenchmarkMethod from ax.benchmark2.benchmark_problem import ( BenchmarkProblem, SingleObjectiveBenchmarkProblem, MultiObjectiveBenchmarkProblem, ) from ax.benchmark2.benchmark_result import BenchmarkResult, AggregatedBenchmarkResult __all__ = [ "BenchmarkMethod", "BenchmarkProblem", "SingleObjectiveBenchmarkProblem", "MultiObjectiveBenchmarkProblem", "BenchmarkResult", "AggregatedBenchmarkResult", "benchmark_replication", "benchmark_test", "benchmark_full_run", ]
874
263
from setuptools import setup, Command from distutils.command.build_py import build_py with open('README.md') as infile: long_description = infile.read() from psrecord import __version__ setup( name='metatoenv', version=__version__, description= 'Generate a conda environment file from a conda meta.yaml recipe', long_description=long_description, url='https://github.com/nvaytet/metatoenv', license='BSD-3-Clause', author='Neil Vaytet', packages=['metatoenv'], provides=['metatoenv'], scripts=['scripts/metatoenv'], cmdclass={'build_py': build_py}, classifiers=[ "Programming Language :: Python", "License :: OSI Approved :: BSD License", ], )
724
234
#!/usr/bin/python3 -tt # -*- coding: utf-8 -*- # (keep hashbang line for `make install`) # # git timestamp — Zeitgitter GIT Timestamping client # # Copyright (C) 2019-2021 Marcel Waldvogel # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # # This has not been modularized for ease of installation import configargparse import distutils.util import os import re import sys import tempfile import time import traceback # Provided e.g. by `pip install python-gnupg` (try with `pip3` if `pip` does not work) import gnupg import pygit2 as git import requests import deltat VERSION = '1.1.0' class GitArgumentParser(configargparse.ArgumentParser): """Insert git config options between command line and default. WARNING: There is no way to handle custom actions correctly by default, so your custom actions need to include a `convert_default(value)` method.""" def __init__(self, *args, **kwargs): super(GitArgumentParser, self).__init__(*args, **kwargs) def repo_config(self, key): """`repo_config(key)` is similar to `repo.config[key]`, but `key` can be a comma-separated list of keys. It returns the value of the first which exists or raises `KeyError` if none is set. """ for k in key.split(','): if k in repo.config: return repo.config[k] raise KeyError("Key%s `%s` not in git config" % ('s' if ',' in key else "", key)) def add_argument(self, *args, **kwargs): global repo if repo is None and 'gitopt' in kwargs: # Called outside a repo (maybe for --help or --version): # Ignore repo options del kwargs['gitopt'] elif 'gitopt' in kwargs: if 'help' in kwargs: kwargs['help'] += '. ' else: kwargs['help'] = '' gitopt = kwargs['gitopt'] try: if 'action' in kwargs and issubclass(kwargs['action'], configargparse.Action): try: val = kwargs['action'].convert_default( self.repo_config(gitopt)) except AttributeError: raise NotImplementedError("Custom action `%r' passed " "to GitArgumentParser does not support " "`convert_default()' method." % kwargs['action']) else: val = self.repo_config(gitopt) kwargs['help'] += "Defaults to '%s' from `git config %s`" % ( val, gitopt.replace(',', ' or ')) if 'default' in kwargs: kwargs['help'] += "; fallback default: '%s'" % kwargs['default'] kwargs['default'] = val if 'required' in kwargs: del kwargs['required'] except KeyError: kwargs['help'] += "Can be set by `git config %s`" % gitopt if 'default' in kwargs: kwargs['help'] += "; fallback default: '%s'" % kwargs['default'] del kwargs['gitopt'] return super(GitArgumentParser, self).add_argument(*args, **kwargs) add = add_argument def asciibytes(data): """For Python 2/3 compatibility: If it is 'bytes' already, do nothing, otherwise convert to ASCII Bytes""" if isinstance(data, bytes): return data else: return data.encode('ASCII') def timestamp_branch_name(fields): """Return the first field except 'www', 'igitt', '*stamp*', 'zeitgitter' 'localhost:8080' is returned as 'localhost-8080'""" for f in fields: i = f.replace(':', '-') if (i != '' and i != 'www' and i != 'igitt' and i != 'zeitgitter' and 'stamp' not in i and valid_name(i)): return i + '-timestamps' return 'zeitgitter-timestamps' class DefaultTrueIfPresent(configargparse.Action): def __call__(self, parser, namespace, values, option_string=None): if values is None: values = True else: try: values = self.convert_default(values) except ValueError: raise configargparse.ArgumentError( self, "Requires boolean value") setattr(namespace, self.dest, values) @classmethod def convert_default(cls, value): return bool(distutils.util.strtobool(value)) server_aliases = { "gitta": "gitta.zeitgitter.net", "diversity": "diversity.zeitgitter.net" } def expanded_aliases(): return ', '.join(map(lambda t: "%s → %s" % t, server_aliases.items())) def get_args(): """Parse command line and git config parameters""" parser = GitArgumentParser( auto_env_var_prefix='timestamp_', add_help=False, description="""Interface to Zeitgitter, the network of independent GIT timestampers.""", epilog="""`--tag` takes precedence over `--branch`. When in doubt, use `--tag` for single/rare timestamping, and `--branch` for frequent timestamping. `bool` values can be specified as true/false/yes/no/0/1. Arguments with optional `bool` options default to true if the argument is present, false if absent. Environment variable `ZEITGITTER_FAKE_TIME` can be used for repeatable tests against a local Zeitgitter server under test, see there.""") parser.add('--help', '-h', action='help', help="""Show this help message and exit. When called as 'git timestamp' (space, not dash), use `-h`, as `--help` is captured by `git` itself.""") parser.add('--version', action='version', version="git timestamp v%s" % VERSION, help="Show program's version number and exit") parser.add('--tag', help="Create a new timestamped tag named TAG") parser.add('--branch', gitopt='timestamp.branch', help="""Create a timestamped commit in branch BRANCH, with identical contents as the specified commit. Default name derived from servername, appending `-timestamps`, and, possibly, by the effects of `--append-branch-name`.""") parser.add('--server', default='https://gitta.zeitgitter.net', gitopt='timestamp.server', help="""Comma-separated list of Zeitgitter servers to obtain timestamps from. 'https://' is optional. The following aliases are supported: """ + expanded_aliases()) parser.add('--interval', default='0s', gitopt='timestamp.interval', help="""Delay between timestamping against the different timestampers. For consistent ordering of timestamps, set this to at least <maximum clock skew>+1s.""") parser.add('--append-branch-name', default=True, action=DefaultTrueIfPresent, metavar='bool', gitopt='timestamp.append-branch-name', help="""Whether to append the branch name of the current branch to the timestamp branch name, i.e., create per-branch timestamp branches. (Default branch name will never be appended.)""") parser.add('--default-branch', gitopt='timestamp.defaultBranch', default="main,master", help="""Comma-separated list of default branch names, i.e. those, where the branch name will not automatically be appended to. `git config init.defaultBranch`, if it exists, is always appended to this list.""") parser.add('--gnupg-home', gitopt='timestamp.gnupg-home', help="Where to store timestamper public keys") parser.add('--enable', nargs='?', action=DefaultTrueIfPresent, metavar='bool', gitopt='timestamp.enable', help="""Forcibly enable/disable timestamping operations; mainly for use in `git config`""") parser.add('--require-enable', action='store_true', help="""Disable operation unless `git config timestamp.enable` has explicitely been set to true""") parser.add('--quiet', '-q', nargs='?', action=DefaultTrueIfPresent, metavar='bool', gitopt='timestamp.quiet', help="Suppress diagnostic messages, only print fatal errors") parser.add('commit', nargs='?', default='HEAD', metavar='COMMIT', gitopt='timestamp.commit-branch', help="""Which commit-ish to timestamp. Must be a branch name for branch timestamps with `--append-branch-name`""") arg = parser.parse_args() arg.interval = deltat.parse_time(arg.interval) arg.default_branch = arg.default_branch.split(',') try: arg.default_branch.append(repo.config['init.defaultBranch']) except KeyError: pass if arg.enable == False: sys.exit("Timestamping explicitely disabled") if arg.require_enable and arg.enable != True: sys.exit("Timestamping not explicitely enabled") return arg def ensure_gnupg_ready_for_scan_keys(): """`scan_keys()` on older GnuPG installs returns an empty list when `~/.gnupg/pubring.kbx` has not yet been created. `list_keys()` or most other commands will create it. Trying to have no match (for speed). Probing for the existance of `pubring.kbx` would be faster, but would require guessing the path of GnuPG-Home.""" gpg.list_keys(keys='arbitrary.query@creates.keybox') def validate_key_and_import(text, args): """Is this a single key? Then import it""" ensure_gnupg_ready_for_scan_keys() f = tempfile.NamedTemporaryFile(mode='w', delete=False) f.write(text) f.close() info = gpg.scan_keys(f.name) os.unlink(f.name) if len(info) != 1 or info[0]['type'] != 'pub' or len(info[0]['uids']) == 0: sys.exit("Invalid key returned\n" "Maybe not a Zeitgitter server or ~/.gnupg permission problem") res = gpg.import_keys(text) count = res.count # pylint: disable=maybe-no-member if count == 1 and not args.quiet: print("Imported new key %s: %s" % (info[0]['keyid'], info[0]['uids'][0])) return (info[0]['keyid'], info[0]['uids'][0]) def get_global_config_if_possible(): """Try to return global git configuration, which normally lies in `~/.gitconfig`. However (https://github.com/libgit2/pygit2/issues/915), `get_global_config()` fails, if the underlying file does not exist yet. (The [paths may be determined](https://github.com/libgit2/pygit2/issues/915#issuecomment-503300141) by `pygit2.option(pygit2.GIT_OPT_GET_SEARCH_PATH, pygit2.GIT_CONFIG_LEVEL_GLOBAL)` and similar.) Therefore, we do not simply `touch ~/.gitconfig` first, but 1. try `get_global_config()` (raises `IOError` in Python2, `OSError` in Python3), 2. try `get_xdg_config()` (relying on the alternative global location `$XDG_CONFIG_HOME/git/config`, typically aka `~/.config/git/config` (this might fail due to the file not being there either (`OSError`, `IOError`), or because the installed `libgit2`/`pygit2` is too old (`AttributeError`; function added in 2014 only), 3. `touch ~/.gitconfig` and retry `get_global_config()`, and, as fallback 4. use the repo's `.git/config`, which should always be there.""" try: return git.Config.get_global_config() # 1 except (IOError, OSError): try: return git.Config.get_xdg_config() # 2 except (IOError, OSError, AttributeError): try: sys.stderr.write("INFO: Creating global .gitconfig\n") with open(os.path.join( git.option( # pylint: disable=maybe-no-member git.GIT_OPT_GET_SEARCH_PATH, # pylint: disable=maybe-no-member git.GIT_CONFIG_LEVEL_GLOBAL), # pylint: disable=maybe-no-member '.gitconfig'), 'a'): pass return git.Config.get_global_config() # 3 except (IOError, OSError): sys.stderr.write("INFO: Cannot record key ID in global config," " falling back to repo config\n") return repo.config # 4 # Not reached def get_keyid(args): """Return keyid/fullname from git config, if known. Otherwise, request it from server and remember TOFU-style""" keyname = args.server if keyname.startswith('http://'): keyname = keyname[7:] elif keyname.startswith('https://'): keyname = keyname[8:] while keyname.endswith('/'): keyname = keyname[0:-1] # Replace everything outside 0-9a-z with '-': keyname = ''.join(map(lambda x: x if (x >= '0' and x <= '9') or (x >= 'a' and x <= 'z') else '-', keyname)) try: keyid = repo.config['timestamper.%s.keyid' % keyname] keys = gpg.list_keys(keys=keyid) if len(keys) == 0: sys.stderr.write("WARNING: Key %s missing in keyring;" " refetching timestamper key\n" % keyid) raise KeyError("GPG Key not found") # Evil hack return (keyid, repo.config['timestamper.%s.name' % keyname]) except KeyError: # Obtain key in TOFU fashion and remember keyid r = requests.get(args.server, params={'request': 'get-public-key-v1'}, timeout=30) quit_if_http_error(args.server, r) (keyid, name) = validate_key_and_import(r.text, args) if not os.getenv('FORCE_GIT_REPO_CONFIG'): gcfg = get_global_config_if_possible() else: gcfg = repo.config gcfg['timestamper.%s.keyid' % keyname] = keyid gcfg['timestamper.%s.name' % keyname] = name return (keyid, name) def sig_time(): """Current time, unless in test mode""" return int(os.getenv('ZEITGITTER_FAKE_TIME', time.time())) def validate_timestamp(stamp): """Is this timestamp within ± of now?""" now = sig_time() # Allow a ±30 s window return stamp > now - 30 and stamp < now + 30 def time_str(seconds): """Format Unix timestamp in ISO format""" return time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(seconds)) def validate_timestamp_zone_eol(header, text, offset): """Does this line end with a current timestamp and GMT? Returns start of next line.""" stamp = text[offset:offset + 10] try: istamp = int(stamp) sigtime = sig_time() if not validate_timestamp(istamp): sys.exit("Ignoring returned %s timestamp (%s) as possible falseticker\n" "(off by %d seconds compared to this computer's time; check clock)" % (header, time_str(istamp), istamp - sigtime)) except ValueError: sys.exit("Returned %s timestamp '%s' is not a number" % (header, stamp)) tz = text[offset + 10:offset + 17] if tz != ' +0000\n': sys.exit("Returned %s timezone is not GMT or not at end of line,\n" "but '%s' instead of '%s'" % (header, repr(tz), repr(' +0000\n'))) return offset + 17 def verify_signature_and_timestamp(keyid, signed, signature, args): """Is the signature valid and the signature timestamp within range as well?""" f = tempfile.NamedTemporaryFile(mode='w', delete=False) f.write(signature) f.close() verified = gpg.verify_data(f.name, signed) if not verified.valid: sys.exit("Not a valid OpenPGP signature") os.remove(f.name) if not validate_timestamp(int(verified.sig_timestamp)): sigtime = sig_time() sys.exit("Signature timestamp (%d, %s) too far off now (%d, %s)" % (verified.sig_timestamp, time_str(verified.sig_timestamp), sigtime, time_str(sigtime))) if keyid != verified.key_id and keyid != verified.pubkey_fingerprint: sys.exit("Received signature with key ID %s; but expected %s -- refusing" % (verified.key_id, keyid)) def validate_tag(text, commit, keyid, name, args): """Check this tag head to toe""" if len(text) > 8000: sys.exit("Returned tag too long (%d > 8000)" % len(text)) if not re.match('^[ -~\n]*$', text, re.MULTILINE): sys.exit("Returned tag does not only contain ASCII chars") lead = '''object %s type commit tag %s tagger %s ''' % (commit.id, args.tag, name) if not text.startswith(lead): sys.exit("Expected signed tag to start with:\n" "> %s\n\nInstead, it started with:\n> %s\n" % (lead.replace('\n', '\n> '), text.replace('\n', '\n> '))) pos = validate_timestamp_zone_eol('tagger', text, len(lead)) if text[pos] != '\n': sys.exit("Signed tag has unexpected data after 'tagger' header") pgpstart = text.find('\n-----BEGIN PGP SIGNATURE-----\n\n', len(lead)) if pgpstart >= 0: signed = asciibytes(text[:pgpstart + 1]) signature = text[pgpstart + 1:] verify_signature_and_timestamp(keyid, signed, signature, args) else: sys.exit("No OpenPGP signature found") def quit_if_http_error(server, r): if r.status_code == 301: sys.exit("Timestamping server URL changed from %s to %s\n" "Please change this on the command line(s) or run\n" " git config [--global] timestamp.server %s" % (server, r.headers['Location'], r.headers['Location'])) if r.status_code != 200: sys.exit("Timestamping request failed; server responded with %d %s" % (r.status_code, r.reason)) def timestamp_tag(repo, keyid, name, args): """Obtain and add a signed tag""" try: commit = repo.revparse_single(args.commit) except KeyError as e: sys.exit("No such revision: '%s'" % (e,)) if not valid_name(args.tag): sys.exit("Tag name '%s' is not valid for timestamping" % args.tag) try: r = repo.lookup_reference('refs/tags/' + args.tag) sys.exit("Tag '%s' already in use" % args.tag) except KeyError: pass try: r = requests.post(args.server, data={ 'request': 'stamp-tag-v1', 'commit': commit.id, 'tagname': args.tag }, allow_redirects=False) quit_if_http_error(args.server, r) validate_tag(r.text, commit, keyid, name, args) tagid = repo.write( git.GIT_OBJ_TAG, # pylint: disable=maybe-no-member r.text) repo.create_reference('refs/tags/%s' % args.tag, tagid) except requests.exceptions.ConnectionError as e: sys.exit("Cannot connect to server: %s" % e) def validate_branch(text, keyid, name, data, args): """Check this branch commit head to toe""" if len(text) > 8000: sys.exit("Returned branch commit too long (%d > 8000)" % len(text)) if not re.match('^[ -~\n]*$', text, re.MULTILINE): sys.exit("Returned branch commit does not only contain ASCII chars") lead = 'tree %s\n' % data['tree'] if 'parent' in data: lead += 'parent %s\n' % data['parent'] lead += '''parent %s author %s ''' % (data['commit'], name) if not text.startswith(lead): sys.exit("Expected signed branch commit to start with:\n" "> %s\n\nInstead, it started with:\n> %s\n" % (lead.replace('\n', '\n> '), text.replace('\n', '\n> '))) pos = validate_timestamp_zone_eol('tagger', text, len(lead)) follow = 'committer %s ' % name if not text[pos:].startswith(follow): sys.exit("Committer in signed branch commit does not match") pos = validate_timestamp_zone_eol('committer', text, pos + len(follow)) if not text[pos:].startswith('gpgsig '): sys.exit("Signed branch commit missing 'gpgsig' after 'committer'") sig = re.match('^-----BEGIN PGP SIGNATURE-----\n \n' '[ -~\n]+\n -----END PGP SIGNATURE-----\n\n', text[pos + 7:], re.MULTILINE) if not sig: sys.exit("Incorrect OpenPGP signature in signed branch commit") signature = sig.group() # Everything except the signature signed = asciibytes(text[:pos] + text[pos + 7 + sig.end() - 1:]) signature = signature.replace('\n ', '\n') verify_signature_and_timestamp(keyid, signed, signature, args) def valid_name(name): """Can be sanely, universally stored as file name. pygit2.reference_is_valid_name() would be better, but is too new [(2018-10-17)](https://github.com/libgit2/pygit2/commit/1a389cc0ba360f1fd53f1352da41c6a2fae92a66) to rely on being available.""" return (re.match('^[_a-z][-._a-z0-9]{,99}$', name, re.IGNORECASE) and '..' not in name and not '\n' in name) def append_branch_name(repo, commit_name, branch_name, default_branches): """Appends current branch name if not the default branch""" explanation = "for (implicit) options `--branch` and `--append-branch-name`" if commit_name == 'HEAD': try: comref = repo.lookup_reference(commit_name) comname = comref.target except git.InvalidSpecError: # pylint: disable=maybe-no-member # 1. If HEAD or it's target is invalid, we end up here sys.exit("Invalid HEAD " + explanation) # Two more options remain: # 2. If HEAD points to a branch, then we now have its name (a `str` # starting with 'refs/heads/') and can proceed; # 3. if it is detached, it points to a commit (a `Oid`) and we fail; # 4. there might be some other cases, which should fail as well. # To be able to test for case 2, we convert `comname` to `str`. if str(comname).startswith('refs/heads/'): comname = comname[len('refs/heads/'):] else: sys.exit(("HEAD must point to branch, not %s\n" + explanation) % comname) else: # 5. Explicit and non-HEAD commit given; check for branch name only: proceed; try: comref = repo.lookup_reference('refs/heads/' + commit_name) comname = commit_name # Branch name itself except (KeyError, git.InvalidSpecError): # pylint: disable=maybe-no-member # 6. Explicit commit given, but it's neither HEAD nor tail^H^H^H^H # a branch: fail sys.exit(("%s must be a branch name " + explanation) % commit_name) # Now that we know which branch to timestamp (to), construct it. if comname in default_branches: return branch_name else: extended_name = "%s-%s" % (branch_name, comname) if valid_name(extended_name): return extended_name else: sys.exit(("Branch name %s is not valid for timestamping\n" "(constructed from base timestamp branch %s and " "source branch %s)\n" + explanation) % (extended_name, branch_name, comname)) def timestamp_branch(repo, keyid, name, args, first): """Obtain and add branch commit; create/update branch head""" # If the base name is already invalid, it cannot become valid by appending if not valid_name(args.branch): sys.exit("Branch name %s is not valid for timestamping" % args.branch) if args.append_branch_name: args.branch = append_branch_name(repo, args.commit, args.branch, args.default_branch) try: commit = repo.revparse_single(args.commit) except KeyError as e: sys.exit("No such revision: '%s'" % (e,)) branch_head = None data = { 'request': 'stamp-branch-v1', 'commit': commit.id, 'tree': commit.tree.id } try: branch_head = repo.lookup_reference('refs/heads/' + args.branch) if branch_head.target == commit.id: # Would create a merge commit with the same parent twice sys.exit("Cannot timestamp head of timestamp branch to itself") data['parent'] = branch_head.target try: if (repo[branch_head.target].parent_ids[0] == commit.id or repo[branch_head.target].parent_ids[1] == commit.id): sys.exit("Already timestamped commit %s to branch %s" % (commit.id.hex, args.branch)) except IndexError: pass except KeyError: pass if not first: time.sleep(args.interval.total_seconds()) try: r = requests.post(args.server, data=data, allow_redirects=False) quit_if_http_error(args.server, r) validate_branch(r.text, keyid, name, data, args) commitid = repo.write( git.GIT_OBJ_COMMIT, # pylint: disable=maybe-no-member r.text) repo.create_reference('refs/heads/' + args.branch, commitid, force=True) except requests.exceptions.ConnectionError as e: sys.exit("Cannot connect to server: %s" % e) def main(): global repo, gpg requests.__title__ = 'git-timestamp/%s %s' % (VERSION, requests.__title__) try: # Depending on the version of pygit2, `git.discover_repository()` # returns `None` or raises `KeyError` path = git.discover_repository( # pylint: disable=maybe-no-member os.getcwd()) except KeyError: path = None if path is not None: repo = git.Repository(path) else: repo = None args = get_args() # Only check after parsing the arguments, so --version and --help work if repo is None: sys.exit("Not a git repository") try: gpg = gnupg.GPG(gnupghome=args.gnupg_home) except TypeError: traceback.print_exc() sys.exit("*** `git timestamp` needs `python-gnupg`" " module from PyPI, not `gnupg`\n" " Possible remedy: `pip uninstall gnupg;" " pip install python-gnupg`\n" " (try `pip2`/`pip3` if it does not work with `pip`)") if args.tag is not None or args.branch is not None: # Single tag or branch against one timestamping server if ',' in args.server: (server, _) = args.server.split(',', 1) args.server = server print(f"WARNING: Cannot timestamp single tag/branch against" " multiple servers;\nonly timestamping against {server}") (keyid, name) = get_keyid(args) if args.tag: timestamp_tag(repo, keyid, name, args) else: timestamp_branch(repo, keyid, name, args, True) else: # Automatic branch, with support for multiple timestamping servers success = True first = True for server in args.server.split(','): if server in server_aliases: server = server_aliases[server] if ':' not in server: server = 'https://' + server fields = server.replace('/', '.').split('.') args.branch = timestamp_branch_name(fields[1:]) args.server = server try: (keyid, name) = get_keyid(args) timestamp_branch(repo, keyid, name, args, first) first = False # Only on successful timestamp except SystemExit as e: sys.stderr.write(e.code + '\n') success = False if not success: sys.exit(1) if __name__ == "__main__": main()
28,931
8,618
""" Some elements of the finite difference routines were adapted from HP Langtangen's wonderful book on the FD method for python: https://hplgit.github.io/fdm-book/doc/pub/book/html/._fdm-book-solarized001.html """ import numpy as np from scipy.integrate import simps class Wave1D: """ A utility class for simulating the wave equation in 1 dimension using a finite difference """ def __init__(self,config): """ Constructor 1 dimensional wave system Inputs: config: A dict containing parameters for the system, which must have the following keys: time_interval: (float > 0) the temporal interval between time steps wave_speed: (float > 0) the speed of standing waves on the bridge, related to material tension system_length: (float > 0) the lengthe of the system num_lattice_points: (int > 0) how many discrete points along the length of the system to use for the finite difference scheme num_force_points: (int > 0) how many pistons the system has force_width: (int > 0) how wide the gaussian spread of each piston is """ self.dt = config['time_interval'] self.c_speed = config['wave_speed'] self.L = config['system_length'] self.Nx = config['num_lattice_points'] # How many points along the domain can impulse force be applied self.num_force_points = config['num_force_points'] # Set the locations of the force application self.force_locations = np.linspace(0.0,self.L,self.num_force_points+2)[1:self.num_force_points+1] # How wide is the profile of each impulse force, must be > 0 self.force_width = config['force_width'] # Scale the force width by system length self.force_width *= self.L # The lattice spacing self.dx = float(self.L)/float(self.Nx) # Mesh points in space self.x_mesh = np.linspace(0.0,self.L,self.Nx+1) # The courant number self.C = self.c_speed *self.dt/self.dx self.C2 = self.C**2 #helper number # Recalibrate the resolutions to account for rounding self.dx = self.x_mesh[1] - self.x_mesh[0] # We set up the conditions of the system before warmup period # The system is always initially at rest self.Velocity_0 = lambda x: 0 # We assume the system starts completely flat self.Initial_Height = lambda x: 0 # Allocate memory for the recursive solution arrays self.height = np.zeros(self.Nx + 1) # Solution array at new time level self.height_n = np.zeros(self.Nx + 1) # Solution at 1 time level back self.height_nm1 = np.zeros(self.Nx + 1) # Solution at 2 time levels back self.height_traj=[] self.action_traj=[] self.reset() def reset(self): """ Resets the state of the wave system """ # We reset the time and step index self.t = 0 self.n = 0 # We set the force vals to zero self.force_vals = np.zeros(self.num_force_points) # We set the initial condition of the solution 1 time level back for i in range(0,self.Nx+1): self.height_n[i]=self.Initial_Height(self.x_mesh[i]) # We do a special first step for the finite difference scheme for i in range(1,self.Nx): self.height[i] =self.height_n[i] + self.dt*self.Velocity_0(self.x_mesh[i]) self.height[i]+=0.5*self.C2*(self.height_n[i-1] - 2*self.height_n[i] + self.height_n[i+1]) self.height[i]+=0.5*(self.dt**2)*self.impulse_term(self.x_mesh[i]) # Force boundary conditions self.height[0]=0 self.height[self.Nx]=0 # Switch solution steps self.height_nm1[:] = self.height_n self.height_n[:] = self.height def single_step(self): """ Run a single step of the wave equation finite difference dynamics """ self.t += self.dt self.n += 1 for i in range(1,self.Nx): self.height[i] = -self.height_nm1[i] + 2*self.height_n[i] self.height[i] += self.C2*(self.height_n[i-1] - 2*self.height_n[i] + self.height_n[i+1]) self.height[i] += (self.dt**2)*self.impulse_term(self.x_mesh[i]) # Force boundary conditions self.height[0] = 0 self.height[self.Nx] = 0 # Switch solution steps self.height_nm1[:] = self.height_n self.height_n[:] = self.height def take_in_action(self,action): """ This method acts as the interface where the agent applies an action to environment. For this simulator, it's simply a setter method for the force_vals attribute that determine the profile of the impulse term. """ self.force_vals = np.copy(action) def impulse_term(self,x): """ The function definition for the active damping terms Inputs: x - a scalar, position in the domain force_vals - A vector of shape (self.num_force_points), the (signed) values of the force at each piston point """ return np.sum(self.force_vals*np.exp(-0.5* ((x-self.force_locations)**2 )/self.force_width)) def get_impulse_profile(self): """ A utility function for returning an array representing the shape of the resulting impulse force, this is used for rendering the history of actions taken by the agent. Inputs: force_vals - A vector of shape (self.num_force_points), the (signed) values of the force at each piston point """ profile = [] for i in range(self.Nx+1): profile.append(self.impulse_term(self.x_mesh[i])) return np.array(profile) def get_observation(self): """ This is an interface that returns the observation of the system, which is modeled as the state of the wave system for the current timestep, previous timestep, and twice previous timestep. Outputs: observation - An array of shape (1,self.Nx+1,3). observation[0,:,0]= self.height, observation[0,:,1]=self.height_n, and observation[0,:,2]=self.height_nm1 """ observation = np.zeros((1,self.Nx+1,3)) observation[0,:,0]= self.height observation[0,:,1]=self.height_n observation[0,:,2]=self.height_nm1 return observation def energy(self): """ Computes the internal energy of the system based upon the integral functional for the 1-D wave equation. Additionally we add an L2 norm regularizer See http://web.math.ucsb.edu/~grigoryan/124A/lecs/lec7.pdf for details """ dudt = (self.height-self.height_nm1)/self.dt # Time derivative dudx = np.gradient(self.height,self.x_mesh) # Space derivative space_term = -self.height*np.gradient(dudx,self.x_mesh) # Alternative tension energy energy_density = dudt**2 + (self.C**2)*(dudx**2) energy_density += self.height**2 # Regularize with L2 norm # Energy_density = dudt**2 + (self.c_speed**2)*space_term return 0.5*simps(energy_density,self.x_mesh)
7,335
2,284
"""AyudaEnPython: https://www.facebook.com/groups/ayudapython Contributor: Carolina Morán Source: https://github.com/CarolinaMoran03/Juego-de-ahorcado-con-frase/blob/main/Juego%20de%20ahorcado%20con%20frase """ participante=input("Ingrese nombre del participante: ") print(participante.upper()) def run(): frases = ["Vive tu momento", "Nunca subestimes el poder de la musica", "Nunca olvides lo mucho que te ame tu familia", "Amo mi locura", "Que nadie te diga que no","Carlos Rivera", "No me ponga cero inge", "La fuerza estara contigo"] cantidad = len(frases) numero = 0 while numero < 1 or numero > cantidad: numero = int(input("Ingrese el numero de frase que desea revelar (1 al {c}): ".format(c=cantidad))) frase = frases[numero-1] patron = "" for i in frase: if i == " ": patron += " " else: patron += "_" patron = list(patron) presentar(patron) vidas = 5 cont = 0 a = 10 while vidas > 0: letra = input("Ingrese letra: ") x = 0 for i in frase: if letra.lower() == i.lower(): patron[x] = letra x += 1 if letra in patron: print("Felicitaciones ganaste", a, " puntos") cont += a presentar(patron) if "_" not in patron: print("FELICIDADES",participante.upper(), "Acabas De Adivinar La Frase") print("Obtuvistes:", cont, " Puntos") break if letra not in patron: vidas -= 1 print("Te Equivocaste, Te Quedan", +vidas, "Intentos") presentar(patron) else: print("Chale",participante.upper(), "Acabas de Perder, Como Cuando La Perdistes A Ella") print("Tienes:", cont, " Puntos, Gracias Por Participar") def presentar(patron): p = "" for i in patron: p = p + i print(p) if __name__ == "__main__": run()
1,977
687
import torch.nn as nn from abc import ABC,abstractmethod class EncoderBase(nn.Module): @abstractmethod def __init__(self,pretrained_emb,__C): """Constructor of encoder module should take pretrained embedding as an argument because of later comparison of different types of embeddings. Args: pretrained_emb ([Tensor]): Extracted pretrained embedding. __C (object): Config object """ super(EncoderBase,self).__init__() self.pretrained_emb = pretrained_emb self.__C = __C @abstractmethod def forward(self,question,answer): """Base encoder method in full answer generation Args: question ([Tensor]): Index of questions after tokenized and padded answer ([Tensor]): Index of answers after tokenized and padded Raises: NotImplementedError """ raise NotImplementedError
946
253
from abc import abstractmethod from pmaf.biome._metakit import BiomeFeatureMetabase, BiomeSampleMetabase class BiomeSurveyBackboneMetabase(BiomeFeatureMetabase, BiomeSampleMetabase): @abstractmethod def to_assembly(self): pass @property @abstractmethod def essentials(self): pass @property @abstractmethod def assemblies(self): pass @property @abstractmethod def controller(self): pass
467
135
# -*- coding: utf-8 -*- """ Written by Daniel M. Aukes Email: danaukes<at>gmail.com Please see LICENSE for full license. """ import pynamics from pynamics.tree_node import TreeNode from pynamics.vector import Vector from pynamics.rotation import Rotation, RotationalVelocity from pynamics.name_generator import NameGenerator from pynamics.quaternion import Quaternion import sympy class Frame(NameGenerator): def __init__(self,name,system): super(Frame,self).__init__() self.connections={} self.connections['R'] = {} self.connections['w'] = {} self.precomputed={} self.precomputed['R'] = {} self.precomputed['w'] = {} self.tree={} self.tree['R'] = TreeNode(self) self.tree['w'] = TreeNode(self) self.reps = {} self.name = name self.x = Vector() self.y = Vector() self.z = Vector() self.x_sym = sympy.Symbol(name+'.x') self.y_sym = sympy.Symbol(name+'.y') self.z_sym = sympy.Symbol(name+'.z') self.syms = sympy.Matrix([self.x_sym,self.y_sym,self.z_sym]) self.x.add_component(self,[1,0,0]) self.y.add_component(self,[0,1,0]) self.z.add_component(self,[0,0,1]) r = Rotation(self,self,sympy.Matrix.eye(3),Quaternion(0,0,0,0)) w = RotationalVelocity(self,self,sympy.Number(0)*self.x,Quaternion(0,0,0,0)) self.add_generic(r,'R') self.add_generic(w,'w') self.system = system self.system.add_frame(self) def add_generic(self,rotation,my_type): self.connections[my_type][rotation.other(self)] = rotation def add_precomputed_generic(self,rotation,my_type): self.precomputed[my_type][rotation.other(self)] = rotation @property def principal_axes(self): return [self.x,self.y,self.z] def __str__(self): return self.name def __repr__(self): return str(self) def get_generic(self,other,my_type): if other in self.connections[my_type]: return self.connections[my_type][other] elif other in self.precomputed[my_type]: return self.precomputed[my_type][other] else: path = self.tree['R'].path_to(other.tree['R']) path = [item.myclass for item in path] from_frames = path[:-1] to_frames = path[1:] if my_type=='R': items = [from_frame.connections[my_type][to_frame].get_r_to(to_frame) for from_frame,to_frame in zip(from_frames,to_frames)] q_items = [from_frame.connections[my_type][to_frame].get_rq_to(to_frame) for from_frame,to_frame in zip(from_frames,to_frames)] elif my_type=='w': items = [from_frame.connections[my_type][to_frame].get_w_to(to_frame) for from_frame,to_frame in zip(from_frames,to_frames)] item_final= items.pop(0) if my_type=='R': q_item_final= q_items.pop(0) for item,to_frame in zip(items,to_frames[1:]): item_final = item*item_final for q_item,to_frame in zip(q_items,to_frames[1:]): q_item_final = q_item*q_item_final result = Rotation(self,to_frame,item_final,q_item_final) elif my_type=='w': for item,to_frame in zip(items,to_frames[1:]): item_final += item result = RotationalVelocity(self,to_frame,item_final,Quaternion(0,0,0,0)) self.add_precomputed_generic(result,my_type) to_frame.add_precomputed_generic(result,my_type) return result def get_r_to(self,other): return self.get_generic(other,'R').get_r_to(other) def get_r_from(self,other): return self.get_generic(other,'R').get_r_from(other) def get_rq_to(self,other): return self.get_generic(other,'R').get_rq_to(other) def get_rq_from(self,other): return self.get_generic(other,'R').get_rq_from(other) def get_w_from(self,other): return self.get_generic(other,'w').get_w_from(other) def get_w_to(self,other): return self.get_generic(other,'w').get_w_to(other) def set_generic(self,other,item,my_type): if my_type=='R': result = Rotation(self, other, item,Quaternion(0,0,0,0)) elif my_type=='w': result = RotationalVelocity(self, other, item,Quaternion(0,0,0,0)) self.add_generic(result,my_type) other.add_generic(result,my_type) def set_parent_generic(self,parent,item,my_type): self.set_generic(parent,item,my_type) parent.tree[my_type].add_branch(self.tree[my_type]) def set_child_generic(self,child,item,my_type): self.set_generic(child,item,my_type) self.tree[my_type].add_branch(child.tree[my_type]) def set_w(self,other,w): self.set_child_generic(other,w,'w') def rotate_fixed_axis(self,fromframe,axis,q,system): import pynamics.misc_tools if not all([pynamics.misc_tools.is_literal(item) for item in axis]): raise(Exception('not all axis variables are constant')) rotation = Rotation.build_fixed_axis(fromframe,self,axis,q,system) rotational_velocity = RotationalVelocity.build_fixed_axis(fromframe,self,axis,q,system) self.set_parent_generic(fromframe,rotation,'R') self.set_parent_generic(fromframe,rotational_velocity,'w') self.add_generic(rotation,'R') self.add_generic(rotational_velocity,'w') fromframe.add_generic(rotation,'R') fromframe.add_generic(rotational_velocity,'w') fromframe.tree['R'].add_branch(self.tree['R']) fromframe.tree['w'].add_branch(self.tree['w'])
5,912
2,017
from typing import * import enum import datetime import warnings from couchbase.exceptions import InvalidArgumentException Src = TypeVar('Src') Dest = TypeVar('Dest') Functor = TypeVar('Functor', bound=Callable[[Src], Dest]) SrcToDest = TypeVar('SrcToDest', bound=Callable[[Src], Dest]) DestToSrc = TypeVar('DestToSrc', bound=Callable[[Dest], Src]) class Bijection(Generic[Src, Dest, SrcToDest, DestToSrc]): def __init__( self, src_to_dest, # type: SrcToDest dest_to_src=None, # type: DestToSrc parent=None # type: Bijection[Dest,Src] ): # type: (...) -> None """ Bijective mapping for JSON serialisation/deserialisation :param src_to_dest: callable to convert Src type to Dest :param dest_to_src: callable to convert Dest type to Src :param parent: interanl use only - used to construct the inverse """ self._src_to_dest = src_to_dest if parent: self._inverse = parent else: self._inverse = Bijection(dest_to_src, parent=self) def __neg__(self): # type: (...) -> Bijection[Dest,Src] """ Generate the inverse of this bijection (Dest to Src) :return: the inverse of this bijection """ return self._inverse def __call__(self, src # type: Src ): # type: (...) -> Dest """ Return the Src to Dest transform on src :param src: source to be transformed :return: transformed data as type Dest """ return self._src_to_dest(src) def identity(input: Src) -> Src: return input class Identity(Bijection[Src, Src, identity, identity]): def __init__(self, type: Type[Src]): self._type = type super(Identity, self).__init__(self, self) def __call__(self, x: Src) -> Src: if not isinstance(x, self._type): raise InvalidArgumentException( "Argument must be of type {} but got {}".format( self._type, x)) return x Enum_Type = TypeVar('Enum_Type', bound=enum.Enum) class EnumToStr(Generic[Enum_Type]): def __init__(self, type: Type[Enum_Type], enforce=True): self._type = type self._enforce = enforce def __call__(self, src: Enum_Type) -> str: if not self._enforce and isinstance( src, str) and src in map(lambda x: x.value, self._type): warnings.warn("Using deprecated string parameter {}".format(src)) return src if not isinstance(src, self._type): raise InvalidArgumentException( "Argument must be of type {} but got {}".format( self._type, src)) return src.value class StrToEnum(Generic[Enum_Type]): def __init__(self, type: Enum_Type): self._type = type def __call__(self, dest: str ) -> Enum_Type: return self._type(dest) class StringEnum( Bijection[Enum_Type, str, EnumToStr[Enum_Type], StrToEnum[Enum_Type]]): def __init__(self, type: Type[Enum_Type]): super(StringEnum, self).__init__(EnumToStr(type), StrToEnum(type)) class StringEnumLoose( Bijection[Enum_Type, str, EnumToStr[Enum_Type], StrToEnum[Enum_Type]]): def __init__(self, type: Type[Enum_Type]): """ Like StringEnum bijection, but allows use of string constants as src (falling back to identity transform) :param type: type of enum """ super( StringEnumLoose, self).__init__( EnumToStr( type, False), StrToEnum(type)) NumberType = TypeVar('NumberType', bound=Union[float, int]) class TimedeltaToSeconds(object): def __init__(self, dest_type: Type[NumberType]): self._numtype = dest_type def __call__(self, td: datetime.timedelta) -> float: if isinstance(td, (float, int)): return self._numtype(td) return self._numtype(td.total_seconds()) def _seconds_to_timedelta(seconds: NumberType) -> datetime.timedelta: try: return datetime.timedelta(seconds=seconds) except (OverflowError, ValueError) as e: raise InvalidArgumentException( "Invalid duration arg: {} ".format(seconds)) from e class Timedelta(Bijection[datetime.timedelta, NumberType, TimedeltaToSeconds, _seconds_to_timedelta]): def __init__(self, dest_type: Type[NumberType]): super( Timedelta, self).__init__( TimedeltaToSeconds(dest_type), _seconds_to_timedelta) class Division(Bijection[float, float, float.__mul__, float.__mul__]): def __init__(self, divisor): super(Division, self).__init__((1 / divisor).__mul__, divisor.__mul__) Orig_Mapping = TypeVar( 'OrigMapping', bound=Mapping[str, Mapping[str, Bijection]]) class BijectiveMapping(object): def __init__(self, fwd_mapping: Orig_Mapping ): """ Bijective mapping for JSON serialisation/deserialisation. Will calculate the reverse mapping of the given forward mapping. :param fwd_mapping: the forward mapping from Src to Dest """ self.mapping = dict() self.reverse_mapping = dict() for src_key, transform_dict in fwd_mapping.items(): self.mapping[src_key] = {} for dest_key, transform in transform_dict.items(): self.mapping[src_key][dest_key] = transform self.reverse_mapping[dest_key] = {src_key: -transform} @staticmethod def convert(mapping: Orig_Mapping, raw_info: Mapping[str, Any]) -> Mapping[str, Any]: converted = {} for k, v in raw_info.items(): entry = mapping.get(k, {k: Identity(object)}) for dest, transform in entry.items(): try: converted[dest] = transform(v) except InvalidArgumentException as e: raise InvalidArgumentException( "Problem processing argument {}: {}".format( k, e.message)) return converted def sanitize_src(self, src_data): return src_data def to_dest(self, src_data): """ Convert src data to destination format :param src_data: source data :return: the converted data """ return self.convert(self.mapping, src_data) def to_src(self, dest_data): """ Convert dest_data to source format :param dest_data: destination data :return: the converted data """ return self.convert(self.reverse_mapping, dest_data)
6,828
1,982
import threading import sys import os import signal sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "components/slack"))) from slackbot.bot import Bot from pydispatch import dispatcher from components.dispatcher.signals import Signals, Senders import components.devices.doorbell_monitor as dm import components.devices.camera as cam import components.devices.lock as lock import components.devices.gpio_cleanup as gpio import components.devices.speakers as spkr import components.devices.speech as speech import components.slack.slack_sender as ss import components.slack.slack_uploader as slackUpload import components.slack.imagebin_uploader as imagebinUpload import components.slack.imgur_uploader as imgurUpload import components.slack.user_manager as um def main(): start_device_processing() start_slack_processing() def start_device_processing(): monitor = threading.Thread(target=dm.DoorbellMonitor) monitor.daemon = True print("Starting doorbell monitor") monitor.start() audio = threading.Thread(target=spkr.Speakers) audio.daemon = True print("Starting audio") audio.start() tts = threading.Thread(target=speech.Speech) tts.daemon = True print("Starting Text to Speech") tts.start() camera = threading.Thread(target=cam.Camera) camera.daemon = True print("Starting camera") camera.start() lock_control = threading.Thread(target=lock.Lock) lock_control.daemon = True print("Starting lock control") lock_control.start() gpio_cleanup = threading.Thread(target=gpio.GPIOCleanup) gpio_cleanup.daemon = True print("Starting GPIO cleanup module") gpio_cleanup.start() def start_slack_processing(): sender = threading.Thread(target=ss.SlackSender) sender.daemon = True print("Starting Slack Sender") sender.start() #slack_uploader = threading.Thread(target=slackUpload.SlackUploader) #slack_uploader.daemon = True #print("Starting Slack file uploader") #slack_uploader.start() #imagebinUploader = threading.Thread(target=imagebinUpload.ImagebinUploader) #imagebinUploader.daemon = True #print("Starting Imagebin Uploader") #imagebinUploader.start() imgurUploader = threading.Thread(target=imgurUpload.ImgurUploader) imgurUploader.daemon = True print("Starting imgur Uploader") imgurUploader.start() bot = Bot() print("Starting Slack bot") user_manager = um.UserManager() user_manager.set_users(bot._client.users) bot.run() def cleanup(): print("Caught interrupt...") dispatcher.send(Signals.CLEANUP, sender=Senders.SLACKBOT) dispatcher.send(Signals.EXIT, sender=Senders.SLACKBOT) exit(0) if __name__ == "__main__": try: main() except KeyboardInterrupt: cleanup()
2,836
916
import sys import codecs import numpy as np #UTF8Writer = codecs.getwriter('utf8') #sys.stdout = UTF8Writer(sys.stdout) input_file = sys.argv[1] probabilities_file = sys.argv[2] sample_size = int(sys.argv[3]) input_list = [] probabilities_list = [] with codecs.open(input_file, 'r', 'utf-8') as f: for line in f: input_list.append(line.strip()) with codecs.open(probabilities_file, 'r', 'utf-8') as f: for line in f: probabilities_list.append(float(line.strip())) for line in np.random.choice(input_list, p=probabilities_list, size=sample_size, replace=False): print (line)
605
221
""" Collection of MXNet linear algebra functions, wrapped to fit Ivy syntax and signature. """ # global import mxnet as _mx import numpy as _np # local import ivy as _ivy from typing import Union, Tuple def matrix_norm(x, p=2, axes=None, keepdims=False): axes = (-2, -1) if axes is None else axes if isinstance(axes, int): raise Exception('if specified, axes must be a length-2 sequence of ints,' 'but found {} of type {}'.format(axes, type(axes))) return _mx.nd.norm(x, p, axes, keepdims=keepdims) cholesky = lambda x: _mx.np.linalg.cholesky(x.as_np_ndarray()).as_nd_ndarray() def vector_to_skew_symmetric_matrix(vector): batch_shape = list(vector.shape[:-1]) # BS x 3 x 1 vector_expanded = _mx.nd.expand_dims(vector, -1) # BS x 1 x 1 a1s = vector_expanded[..., 0:1, :] a2s = vector_expanded[..., 1:2, :] a3s = vector_expanded[..., 2:3, :] # BS x 1 x 1 zs = _mx.nd.zeros(batch_shape + [1, 1]) # BS x 1 x 3 row1 = _mx.nd.concat(*(zs, -a3s, a2s), dim=-1) row2 = _mx.nd.concat(*(a3s, zs, -a1s), dim=-1) row3 = _mx.nd.concat(*(-a2s, a1s, zs), dim=-1) # BS x 3 x 3 return _mx.nd.concat(*(row1, row2, row3), dim=-2) def qr(x, mode): return _mx.np.linalg.qr(x, mode=mode)
1,286
551
from modulos.query import * from modulos.splash_screen import * if __name__ == '__main__': splash_screen(segundos = 2) while True: query()
157
56
# Create a numpy array from the weight_lb list with the correct units. Multiply by 0.453592 to go from pounds to kilograms. # Store the resulting numpy array as np_weight_kg. # Use np_height_m and np_weight_kg to calculate the BMI of each player. # Use the following equation: # BMI = weight(kg) / height (m3) # save the resulting numpy array as bmi # Print out bmi. # height and weight are available as regular lists # Import numpy import numpy as np # Create array from height_in with metric units: np_height_m np_height_m = np.array(height_in) * 0.0254 # Create array from weight_lb with metric units: np_weight_kg np_weight_kg = np.array(weight_lb) * 0.453592 # Calculate the BMI: bmi bmi = np_weight_kg / np_height_m ** 2 # Print out bmi print(bmi)
759
262
from aws_cdk import ( aws_autoscaling as autoscaling, aws_ec2 as ec2, aws_elasticloadbalancingv2 as elbv2, aws_wafv2 as wafv2, core, ) class LoadBalancerStack(core.Stack): def __init__(self, app: core.App, id: str) -> None: super().__init__(app, id) vpc = ec2.Vpc(self, "VPC") data = open("./httpd.sh", "rb").read() httpd = ec2.UserData.for_linux() httpd.add_commands(str(data, 'utf-8')) asg = autoscaling.AutoScalingGroup( self, "ASG", vpc=vpc, instance_type=ec2.InstanceType.of( ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.MICRO ), machine_image=ec2.AmazonLinuxImage(generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2), user_data=httpd, ) lb = elbv2.ApplicationLoadBalancer( self, "LB", vpc=vpc, internet_facing=True) listener = lb.add_listener("Listener", port=80) listener.add_targets("Target", port=80, targets=[asg]) listener.connections.allow_default_port_from_any_ipv4("Open to the world") asg.scale_on_request_count("AModestLoad", target_requests_per_second=1) core.CfnOutput(self, "LoadBalancer", export_name="LoadBalancer", value=lb.load_balancer_dns_name) # === # # WAF # # === # # TODO #10 apply the web_acl to a resource # no method to apply the web_acl to a resource in version 1.75.0 web_acl = wafv2.CfnWebACL( scope_=self, id="waf", default_action=wafv2.CfnWebACL.DefaultActionProperty(), scope="REGIONAL", visibility_config=wafv2.CfnWebACL.VisibilityConfigProperty( cloud_watch_metrics_enabled=True, metric_name="waf-web-acl", sampled_requests_enabled=True ) ) def main(): app = core.App() LoadBalancerStack(app, "LoadBalancerStack") app.synth() if __name__ == "__main__": main()
2,072
716
from typing import Any from selenium import webdriver # type: ignore class BrowserSession: # pragma: no cover def __init__(self, given_browser: Any) -> None: self.browser = given_browser self.session = webdriver.Firefox("fireprofile") def start(self) -> None: if self.browser == "safari": self.session = webdriver.Safari() elif self.browser == "chrome": self.session = webdriver.Chrome() def stop(self) -> None: self.session.close() def current(self) -> Any: return self.session
575
167
from django.template import Library from editor.models import STAMP_STATUS_CHOICES register = Library() @register.inclusion_tag('stamp.html') def stamp(status): label = '' if status=='draft': return {'status': 'draft', 'label': 'Draft'} for s_status, s_label in STAMP_STATUS_CHOICES: if status == s_status: label = s_label return {'status': status, 'label': label}
411
133
from .cid import UbxCID from .frame import UbxFrame from .types import CH, U1, X1, X4, Padding class UbxCfgNmea_(UbxFrame): CID = UbxCID(UbxCID.CLASS_CFG, 0x17) NAME = 'UBX-CFG-NMEA' class UbxCfgNmeaPoll(UbxCfgNmea_): NAME = UbxCfgNmea_.NAME + '-POLL' def __init__(self): super().__init__() def _cls_response(self): return UbxCfgNmea class UbxCfgNmea(UbxCfgNmea_): def __init__(self): super().__init__() self.f.add(X1('filter')) self.f.add(U1('nmeaVersion')) self.f.add(U1('numSV')) self.f.add(X1('flags')) self.f.add(X4('gnssToFilter')) self.f.add(U1('svNumbering')) self.f.add(U1('mainTalkerId')) self.f.add(U1('gsvTalkerId')) self.f.add(U1('version')) self.f.add(CH(2, 'bdsTalkerId')) self.f.add(Padding(6, 'res1'))
864
395
print('Загадайте число') num = 'да' l = 4 while num == 'да': l -= 1 num = input(f'Количество цифр вашего числа меньше {l}? : ') ## да или нет num_2 = 'да' dig = '' while l > 0: number = 10 while num_2 == 'да': number -= 1 num_2 = input(f'Ваша {l}-e цифра меньше {number}? :') # да или нет l -= 1 dig = str(number) + dig num_2 = 'да' print(dig)
390
175
import unittest from quality_inspection.quality_inspector import QualityInspector from quality_inspection.schema_definition import SchemaDefinition from quality_inspection.tests.data_loader import DataLoader class QualityInspectorTest(unittest.TestCase): def setUp(self) -> None: self.inspector = QualityInspector() def test_inspect_inferred(self) -> None: # arrange samples = DataLoader.load_samples() # act schema_definition = SchemaDefinition.create(DataLoader.load_schema()) result = self.inspector.inspect(samples, schema_definition) # assert self.assertEqual(1.0, result.attribute_integrity) self.assertEqual(.0, result.attribute_specification) self.assertEqual(.5, result.attribute_quality_index) def test_inspect_avro(self) -> None: # arrange samples = DataLoader.load_samples() # act schema_definition = SchemaDefinition.create(DataLoader.load_schema(), False) result = self.inspector.inspect(samples, schema_definition) # assert self.assertEqual(1.0, result.attribute_integrity) self.assertEqual(.625, result.attribute_specification) self.assertEqual(.8125, result.attribute_quality_index) def test_inspect_json(self) -> None: # arrange samples = DataLoader.load_samples() # act schema_definition = SchemaDefinition.create(DataLoader.load_schema_json(), False) result = self.inspector.inspect(samples, schema_definition) # assert self.assertEqual(1.0, result.attribute_integrity) self.assertEqual(.625, result.attribute_specification) self.assertEqual(.8125, result.attribute_quality_index) def test_inspect_with_specified_field(self): # arrange samples = [ {"random_int": 1}, ] schema_definition = DataLoader.expand_schema( [("random_int", "integer")], ["random_int"] ) # act result = self.inspector.inspect_attributes(samples, schema_definition) # assert self.assertEqual(.5, result.attribute_specification) self.assertEqual(1, result.attribute_integrity) self.assertEqual(.75, result.attribute_quality_index) def test_inspect_with_unspecified_field(self): # arrange samples = [ {"random_int": 1}, ] schema_definition = DataLoader.expand_schema( [], [] ) # act result = self.inspector.inspect_attributes(samples, schema_definition) # assert self.assertEqual(0, result.attribute_specification) self.assertEqual(1, result.attribute_integrity) self.assertEqual(.5, result.attribute_quality_index) def test_inspect_with_missing_field(self): # arrange samples = [ {"random_other": "other"}, ] schema_definition = DataLoader.expand_schema( [("random_int", "integer")], ["random_int"] ) # act result = self.inspector.inspect_attributes(samples, schema_definition) # assert expected_specification = (0 + 1) / 2 expected_integrity = (1 + 0) / 2 self.assertEqual(expected_specification, result.attribute_specification, "Attribute specification is not correct") self.assertEqual(expected_integrity, result.attribute_integrity, "Attribute integrity is not correct") self.assertEqual((expected_specification + expected_integrity) / 2, result.attribute_quality_index, "Attribute quality is not correct") def test_specification_with_only_type_specification(self) -> None: # arrange samples = [ {"random_int": 1, "random_string": "foo"}, {"random_int": 2, "random_string": "bar"} ] schema_definition = DataLoader.expand_schema( [("random_int", "integer"), ("random_string", "string")], ["random_string", "random_int"], ) # act result = self.inspector.inspect_attributes(samples, schema_definition) # assert self.assertEqual(.5, result.attribute_specification) def test_specification_with_complete_specification(self) -> None: # arrange samples = [ {"random_int": 1, "random_string": "foo"}, {"random_int": 2, "random_string": "bar"} ] schema_definition = DataLoader.expand_schema( [("random_int", "integer"), ("random_string", "string")], ["random_string", "random_int"], ) # act result = self.inspector.inspect_attributes(samples, schema_definition) # assert self.assertEqual(.5, result.attribute_specification) def test_specification_with_inferred_schema(self) -> None: # arrange samples = [ {"random_int": 1, "random_string": "foo"}, {"random_int": 2, "random_string": "bar"} ] schema = ''' { "$schema": "http://json-schema.org/schema#", "type": "object", "properties": { "random_string": { "type": "string" }, "random_integer": { "type": "integer" } }, "required": [ "random_integer", "random_string" ] } ''' schema_definition = SchemaDefinition.create(schema, True) # act result = self.inspector.inspect(samples, schema_definition) # assert self.assertEqual(.0, result.attribute_specification, "Attribute specification is considered 0% when schema is inferred") def test_specification_with_empty_schema(self) -> None: # arrange samples = [ {"random_int": 1, "random_string": "foo"}, {"random_int": 2, "random_string": "bar"} ] schema_definition = DataLoader.expand_schema( [], [] ) # act result = self.inspector.inspect_attributes(samples, schema_definition) # assert self.assertEqual(0, result.attribute_specification) def test_specification_with_partial_specification(self) -> None: # arrange samples = [ {"random_int": 1, "random_string": "foo"}, {"random_int": 2, "random_string": "bar"} ] schema_definition = DataLoader.expand_schema( [("random_string", "string")], [] ) # act result = self.inspector.inspect_attributes(samples, schema_definition) # assert (half of the data is specified to .5) self.assertEqual(.25, result.attribute_specification, "Specification must be 25% because only half of the data is specified in schema") def test_specification_with_irrelevant_specification(self) -> None: # arrange samples = [ {"random_int": 1, "random_string": "foo"}, {"random_int": 2, "random_string": "bar"} ] schema_definition = DataLoader.expand_schema( [("random_other", "string")], [] ) # act result = self.inspector.inspect_attributes(samples, schema_definition) # assert self.assertEqual(0, result.attribute_specification, "Specification must be 0% because none of the attributes are specified") def test_quality_with_complete_specification(self) -> None: # arrange samples = [ {"random_int": 1, "random_string": "foo"}, # random_string does not match {"random_int": 2, "random_string": "bar"} ] schema_definition = DataLoader.expand_schema( [("random_string", "string"), ("random_int", "number")], [], {"random_string": {"pattern": "bar"}, "random_int": {"minimum": 0, "maximum": 100}} ) # act result = self.inspector.inspect_attributes(samples, schema_definition) # assert self.assertEqual(.75, result.attribute_integrity) self.assertEqual(1.0, result.attribute_specification) self.assertEqual(.875, result.attribute_quality_index) def test_quality_with_partial_specification(self) -> None: # arrange samples = [ {"random_int": 1, "random_string": "foo"}, {"random_int": 2, "random_string": "bar"} ] schema_definition = DataLoader.expand_schema( [("random_string", "string"), ("random_int", "int")], [] ) # act result = self.inspector.inspect_attributes(samples, schema_definition) # assert self.assertEqual(1.0, result.attribute_integrity) self.assertEqual(.5, result.attribute_specification) self.assertEqual(.75, result.attribute_quality_index) def test_quality_without_specification(self): # arrange samples = [ {"random_int": 1, "random_string": "foo"}, {"random_int": 2, "random_string": "bar"} ] schema_definition = DataLoader.expand_schema( [], [] ) # act result = self.inspector.inspect_attributes(samples, schema_definition) # assert self.assertEqual(.5, result.attribute_quality_index) def test_specification_with_partial_schema_and_inferred(self) -> None: # arrange samples = [ {"random_int": 1, "random_string": "foo"}, {"random_int": 2, "random_string": "bar"} ] schema = ''' { "$schema": "http://json-schema.org/schema#", "type": "object", "properties": { "random_string": { "type": "string" }, "random_integer": { "type": "integer" } }, "required": [ "random_integer", "random_string" ] } ''' schema_definition = SchemaDefinition.create(schema, True) # act result = self.inspector.inspect(samples, schema_definition) # assert self.assertEqual(.0, result.attribute_specification, "Attribute specification is considered 0% when the schema is inferred") def test_integrity_with_missing_required(self) -> None: # arrange samples = [ {"random_int": 1}, {"random_int": None}, {"random_int": 2} ] schema_definition = DataLoader.expand_schema( [("random_int", "integer")], ["random_int"] ) # act result = self.inspector.inspect_attributes(samples, schema_definition) # assert self.assertAlmostEqual(2 / 3, result.attribute_integrity, 3, "Attribute integrity must be 66%") def test_integrity_for_complex_type(self): # arrange schema = DataLoader.load_schema_with_name("schema_registry_avro.json") samples = [ {"timestamp": 1595601702, "iss_position": {"longitude": "-42.2948", "latitude": "-40.3670"}, "message": "success"}, {"timestamp": 1595601702, "iss_position": {"latitude": "-40.3670"}, "message": "success"}, {"timestamp": "wrong", "iss_position": {"longitude": 666, "latitude": "-40.0283"}, "message": "success"}, ] # act result = self.inspector.inspect_attributes(samples, SchemaDefinition.create(schema, False)) # assert - only message is not mandatory so 3 out of 12 (3*4) are missing or wrong invalid_elements = 3 all_elements = 12 expected_integrity = (all_elements - invalid_elements) / all_elements self.assertAlmostEqual(expected_integrity, result.attribute_integrity, 3, f"Integrity must be {expected_integrity * 100}%") def test_integrity_with_missing_not_required(self) -> None: # arrange samples = [ {"random_int": 1}, {"random_int": None}, {"random_int": 2} ] schema_definition = DataLoader.expand_schema( [("random_int", "integer")], [] ) # act result = self.inspector.inspect_attributes(samples, schema_definition) # assert self.assertEqual(1.0, result.attribute_integrity) def test_integrity_without_specified_optional_field(self) -> None: # arrange samples = [ {"random_int": 1}, {"random_int": 2}, {"random_int": 3} ] schema_definition = DataLoader.expand_schema( [("random_int", "integer"), ("random_string", "string")], ["random_int"] ) # act result = self.inspector.inspect_attributes(samples, schema_definition) # assert self.assertEqual(1.0, result.attribute_integrity) def test_integrity_without_specified_required_field(self) -> None: # arrange samples = [ {"random_int": 1}, {"random_int": 2}, {"random_int": 3} ] schema_definition = DataLoader.expand_schema( [("random_int", "integer"), ("random_string", "string")], ["random_string"] ) # act result = self.inspector.inspect_attributes(samples, schema_definition) # assert self.assertEqual(.5, result.attribute_integrity) def test_integrity_with_additional_field(self) -> None: # arrange samples = [ {"random_int": 1, "random_string": "abc"}, {"random_int": 2, "random_string": "efg"}, {"random_int": 3, "random_string": "hij"} ] schema_definition = DataLoader.expand_schema( [("random_int", "integer")], [] ) # act result = self.inspector.inspect_attributes(samples, schema_definition) # assert self.assertEqual(1.0, result.attribute_integrity) def test_integrity_with_numeric_as_string(self) -> None: # arrange samples = [ {"random_int": "10000001.023"}, {"random_int": "1"} ] schema_definition = DataLoader.expand_schema( [("random_int", "number")], [] ) # act result = self.inspector.inspect_attributes(samples, schema_definition) # assert self.assertEqual(.0, result.attribute_integrity) def test_integrity_with_float_as_int(self) -> None: # arrange samples = [{"random_int": "10000001.023"}] schema_definition = DataLoader.expand_schema( [("random_int", "integer")], [] ) # act result = self.inspector.inspect_attributes(samples, schema_definition) # assert self.assertEqual(0.0, result.attribute_integrity) def test_integrity_on_attribute_level_with_not_specified_partial_field(self) -> None: # arrange samples = [ {"random_int": 1002, "random_string": 1}, {"random_int": 1003, "random_string": 2}, {"random_int": 1004}, ] schema_definition = DataLoader.expand_schema( [("random_int", "integer")], [] ) # act result = self.inspector.inspect_attributes(samples, schema_definition) # assert attribute_details = result.attribute_details self.assertTrue('random_string' in attribute_details.keys(), "Missing integrity for attribute random_string") self.assertAlmostEqual(1, attribute_details['random_string'].attribute_integrity, 3, "Integrity of random_string is not correct") def test_integrity_on_attribute_level_with_missing_value(self) -> None: # arrange samples = [ {"random_int": 1002, "random_string": 1}, {"random_int": 1003, "random_string": 2}, {"random_int": "foo", "random_string": 3}, {"random_int": 1005, "random_string": "fourth"}, ] schema_definition = DataLoader.expand_schema( [("random_int", "integer"), ("random_string", "string")], [] ) # act result = self.inspector.inspect_attributes(samples, schema_definition) # assert attribute_details = result.attribute_details self.assertTrue('random_int' in attribute_details.keys(), "Missing integrity for attribute random_int") self.assertTrue('random_string' in attribute_details.keys(), "Missing integrity for attribute random_string") self.assertAlmostEqual((3 / 4), attribute_details['random_int'].attribute_integrity, 3, "Integrity of random_int is not correct") self.assertAlmostEqual((1 / 4), attribute_details['random_string'].attribute_integrity, 3, "Integrity of random_string is not correct") def test_integrity_on_attribute_level_with_not_specified_fields(self) -> None: # arrange samples = [ {"random_int": 1002, "random_string": 1}, ] schema_definition = DataLoader.expand_schema( [("random_int", "integer")], [] ) # act result = self.inspector.inspect_attributes(samples, schema_definition) # assert attribute_details = result.attribute_details self.assertTrue('random_string' in attribute_details.keys(), "Even a not specified fields needs to be present in the details.") self.assertEqual(1.0, attribute_details['random_string'].attribute_integrity) def test_specification_on_attribute_level_with_complete_expectations(self) -> None: # arrange samples = [ {"random_int": 1002, "random_string": "1"}, {"random_int": 1003, "random_string": "2"}, ] schema_definition = DataLoader.expand_schema( [("random_int", "integer"), ("random_string", "string")], [], {"random_int": {"minimum": 0, "maximum": 1004}, "random_string": {"pattern": ""}} ) # act result = self.inspector.inspect_attributes(samples, schema_definition) # assert attribute_details = result.attribute_details self.assertTrue('random_int' in attribute_details.keys()) self.assertTrue('random_string' in attribute_details.keys()) self.assertEqual(1.0, attribute_details['random_int'].attribute_specification) self.assertEqual(1.0, attribute_details['random_string'].attribute_specification) def test_specification_on_attribute_level_with_partial_expectations(self) -> None: # arrange samples = [ {"random_int": 1002, "random_string": 1}, {"random_int": 1003, "random_string": 2}, ] schema_definition = DataLoader.expand_schema( [("random_int", "integer"), ("random_string", "string")], [], {"random_int": {"minimum": 0}} ) # act result = self.inspector.inspect_attributes(samples, schema_definition) # assert attribute_details = result.attribute_details self.assertTrue('random_int' in attribute_details.keys()) self.assertEqual(.75, attribute_details['random_int'].attribute_specification) self.assertEqual(.5, attribute_details['random_string'].attribute_specification) def test_specification_on_attribute_level_without_expectations(self) -> None: # arrange samples = [ {"random_int": 1002, "random_string": 1}, {"random_int": 1003, "random_string": 2}, ] schema_definition = DataLoader.expand_schema( [("random_int", "integer"), ("random_string", "string")], ) # act result = self.inspector.inspect_attributes(samples, schema_definition) # assert attribute_details = result.attribute_details self.assertTrue('random_int' in attribute_details.keys()) self.assertEqual(.5, attribute_details['random_int'].attribute_specification) self.assertEqual(.5, attribute_details['random_string'].attribute_specification) def test_specification_on_attribute_level_with_missing_specification(self) -> None: # arrange samples = [ {"random_int": 1002, "random_string": 1}, {"random_int": 1003, "random_string": 2}, ] schema_definition = DataLoader.expand_schema( [("random_int", "integer")], [] ) # act result = self.inspector.inspect_attributes(samples, schema_definition) # assert attribute_details = result.attribute_details self.assertTrue('random_string' in attribute_details.keys()) self.assertEqual(0.0, attribute_details['random_string'].attribute_specification) def test_quality_on_attribute_level(self) -> None: # arrange samples = [ {"random_int": 2, "random_string": "one"}, {"random_int": 55, "random_string": "two"}, {"random_int": 101, "random_string": "three"}, ] schema_definition = DataLoader.expand_schema( [("random_int", "integer")], [], {"random_int": {"minimum": 50, "maximum": 100}} ) # act result = self.inspector.inspect(samples, schema_definition) # assert attribute_details = result.attribute_details self.assertTrue('random_int' in attribute_details.keys()) self.assertTrue('random_string' in attribute_details.keys()) self.assertAlmostEquals(((1 / 3) + 1) / 2, attribute_details['random_int'].attribute_quality_index, 3) self.assertAlmostEquals((1 + 0) / 2, attribute_details['random_string'].attribute_quality_index, 3) def test_inspect_with_non_unique_types_does_not_throw_exception(self) -> None: # arrange samples = [ {"random_int": 1002}, {"random_int": "1003"}, {"random_int": "1004"}, ] schema_definition = DataLoader.expand_schema( [("random_int", "integer")], [], {"random_int": {"minimum": 0, "maximum": 100}} ) # act result = self.inspector.inspect(samples, schema_definition) # assert attribute_details = result.attribute_details self.assertAlmostEquals((1 / 3), attribute_details['random_int'].attribute_integrity, 3) def test_integrity_on_attribute_level_with_expectations(self): # arrange schema = ''' { "type": "record", "name": "RandomData", "namespace": "data.producer.random", "fields": [ { "name": "random_integer", "type": "int", "expectations": [ { "kwargs": { "min_value": 0, "max_value": 10 }, "expectation_type": "expect_column_values_to_be_between" } ] }, { "name": "random_string", "type": "string", "expectations": [ { "kwargs": { "regex": "id_" }, "meta": {}, "expectation_type": "expect_column_values_to_match_regex" } ] } ] } ''' samples = [ {'random_integer': 1, 'random_string': 'missing_id'}, {'random_integer': 11, 'random_string': 'id_1'}, {'random_integer': 3, 'random_string': 'missing_id'}, ] # act result = self.inspector.inspect(samples, SchemaDefinition.create(schema, False)) # assert attribute_details = result.attribute_details self.assertAlmostEqual((3 / 6), result.attribute_integrity, 3, "Attribute integrity is not correct") self.assertTrue('random_integer' in attribute_details.keys(), "Missing integrity for attribute random_integer") self.assertTrue('random_string' in attribute_details.keys(), "Missing integrity for attribute random_string") self.assertAlmostEqual((2 / 3), attribute_details['random_integer'].attribute_integrity, 3, "Integrity of random_int is not correct") self.assertAlmostEqual((1 / 3), attribute_details['random_string'].attribute_integrity, 3, "Integrity of random_string is not correct") def test_integrity_with_negative_as_string(self) -> None: # arrange samples = [{"random_int": "-10000"}] schema_definition = DataLoader.expand_schema( [("random_int", "integer")], [] ) # act result = self.inspector.inspect_attributes(samples, schema_definition) # assert self.assertEqual(.0, result.attribute_integrity, "Attribute integrity must be 0% (even if not required, a " "specified value needs to be correct).") def test_integrity_with_wrong_type(self) -> None: # arrange samples, schema = DataLoader.create_dummy_samples() # noinspection PyTypeChecker samples[0]['random_string'] = 123 # act result = self.inspector.inspect_attributes(samples, schema) # assert self.assertEqual(0.5, result.attribute_integrity) def test_integrity_without_provided_schema(self) -> None: # arrange samples, _ = DataLoader.create_dummy_samples() # act empty_schema = SchemaDefinition.empty() result = self.inspector.inspect(samples, empty_schema) # assert self.assertEqual(1.0, result.attribute_integrity) self.assertEqual(.0, result.attribute_specification) self.assertEqual(.5, result.attribute_quality_index) def test_inspect_with_inferred_schemas(self): # arrange schema = DataLoader.load_schema_with_name("schema_registry_json.json") schema_definition = SchemaDefinition.create(schema, True) samples = DataLoader.load_samples() # act result = self.inspector.inspect(samples, schema_definition) # assert self.assertEqual(1.0, result.attribute_integrity) self.assertEqual(.0, result.attribute_specification) self.assertEqual(.5, result.attribute_quality_index) def test_various_types_do_not_throw_exceptions(self): # arrange schema = ''' { "type": "record", "name": "RandomData", "namespace": "data.producer.random", "fields": [ { "name": "random_string", "type": "string" }, { "name": "random_integer", "type": "int" }, { "name": "random_float", "type": "float" }, { "name": "random_boolean", "type": "boolean" } ] } ''' samples = [ {'random_string': 'wheyuugkwi', 'random_integer': 876, 'random_float': 0.2295482, 'random_boolean': False} ] # act metrics = self.inspector.inspect(samples, SchemaDefinition.create(schema, False)) # assert self.assertIsNotNone(metrics) def test_inspect_with_min_max_range_expectation(self): # arrange schema = DataLoader.load_schema_with_name("schema_with_min_max.json") samples = [ {'random_integer': 3}, {'random_integer': 11}, {'random_integer': 3}, {'random_integer': 8}, {'random_integer': 3}, {'random_integer': -5}, {'random_integer': 3}, {'random_integer': 10}, ] # act metrics = self.inspector.inspect(samples, SchemaDefinition.create(schema, False)) # assert self.assertEqual((6 / 8), metrics.attribute_integrity, f"Attribute integrity must be {(6 / 8) * 100}%") def test_inspect_with_min_expectation(self): # arrange schema = DataLoader.load_schema_with_name("schema_with_min.json") samples = [ {'random_integer': 3}, {'random_integer': 11}, {'random_integer': 3}, {'random_integer': 8}, {'random_integer': 3}, {'random_integer': -5}, {'random_integer': 3}, {'random_integer': 10}, ] # act metrics = self.inspector.inspect(samples, SchemaDefinition.create(schema, False)) # assert self.assertEqual((7 / 8), metrics.attribute_integrity, f"Attribute integrity must be {(7 / 8) * 100}%") def test_inspect_with_multiple_expectations_asyncapi_style(self): # arrange schema = DataLoader.load_schema_with_name("schema_expectation_asyncapi_style.json") samples = [ {'random_integer': 1, 'random_string': 'id_1'}, {'random_integer': 2, 'random_string': 'foo'}, # no match (string) {'random_integer': 3, 'random_string': 'id_3'}, {'random_integer': 4, 'random_string': 'id_4'}, # no match (integer) {'random_integer': 5, 'random_string': 'foo'}, # no match (integer, string) ] # act metrics = self.inspector.inspect(samples, SchemaDefinition.create(schema, False)) # assert self.assertAlmostEqual(6 / 10, metrics.attribute_integrity, 3) def test_inspect_with_multiple_expectations_asyncapi_style_json(self): # arrange schema = DataLoader.load_schema_with_name("schema_expectation_asyncapi_style_json.json") samples = [ {'random_integer': 1, 'random_string': 'id_1'}, {'random_integer': 2, 'random_string': 'foo'}, # no match (string) {'random_integer': 3, 'random_string': 'id_3'}, {'random_integer': 4, 'random_string': 'id_4'}, # no match (integer) {'random_integer': 5, 'random_string': 'foo'}, # no match (integer, string) ] # act metrics = self.inspector.inspect(samples, SchemaDefinition.create(schema, False)) # assert self.assertAlmostEqual(6 / 10, metrics.attribute_integrity, 3) def test_inspect_with_both_schema_formats(self): # arrange schema_json = DataLoader.load_schema_with_name("schema_diff_json.json") schema_avro = DataLoader.load_schema_with_name("schema_diff_avro.json") samples = DataLoader.load_samples() # act result_json = self.inspector.inspect(samples, SchemaDefinition.create(schema_json, False)) result_avro = self.inspector.inspect(samples, SchemaDefinition.create(schema_avro, False)) # assert self.assertEqual(result_json, result_avro) def test_specification_from_toeggelomat_json(self): # arrange samples = DataLoader.load_samples_from_file("samples_toeggelomat.json") # act schema = DataLoader.load_schema_with_name("schema_toeggelomat_json.json") result = self.inspector.inspect(samples, SchemaDefinition.create(schema, False)) # assert self.assertEqual(53, len(result.attribute_details.keys()), "There should be 53 keys in the dictionary") for attribute_metric in result.attribute_details.keys(): self.assertEqual(1.0, result.attribute_details[attribute_metric].attribute_specification, f"Attribute specification must be 100% ({attribute_metric})") self.assertEqual(1.0, result.attribute_details[attribute_metric].attribute_integrity, f"Attribute integrity must be 100% ({attribute_metric})") def test_specification_from_toeggelomat(self): # arrange samples = DataLoader.load_samples_from_file("samples_toeggelomat.json") # act schema = DataLoader.load_schema_with_name("schema_toeggelomat.json") result = self.inspector.inspect(samples, SchemaDefinition.create(schema, False)) # assert self.assertEqual(53, len(result.attribute_details.keys()), "There should be 53 keys in the dictionary") for attribute_metric in result.attribute_details.keys(): self.assertEqual(1.0, result.attribute_details[attribute_metric].attribute_specification, f"Attribute specification must be 100% ({attribute_metric})") self.assertEqual(1.0, result.attribute_details[attribute_metric].attribute_integrity, f"Attribute integrity must be 100% ({attribute_metric})")
34,367
9,671
# @Author: Narsi Reddy <cibitaw1> # @Date: 2018-09-22T17:38:05-05:00 # @Email: sainarsireddy@outlook.com # @Last modified by: narsi # @Last modified time: 2019-02-13T22:46:56-06:00 import torch torch.manual_seed(29) from torch import nn import numpy as np np.random.seed(29) import torch.nn.functional as F from torch.autograd.function import Function from torch.nn.parameter import Parameter from math import exp """ CLASSIFICATION METRICS """ def accuracy_topk(output, target, topk=(1,)): """Computes the accuracy over the k top predictions for the specified values of k""" with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res def accuracy(output, target): batch_size = target.size(0) _, pred = output.topk(1, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) correct_k = correct[:1].view(-1).float().sum(0, keepdim=True) res = correct_k.mul_(100.0 / batch_size) return res def binary_accuracy(output, target): res = torch.mean(target.eq(torch.round(output)).float()) * 100 return res """ SUPER RESOLUTION """ # https://github.com/Po-Hsun-Su/pytorch-ssim def gaussian(window_size, sigma): gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)]) return gauss/gauss.sum() def create_window(window_size, channel, sigma = 1.5): _1D_window = gaussian(window_size, sigma).unsqueeze(1) _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0) window = _2D_window.expand(channel, 1, window_size, window_size).contiguous() return window def _ssim(img1, img2, window, window_size, channel, size_average = True): mu1 = F.conv2d(img1, window, padding = window_size//2, groups = channel) mu2 = F.conv2d(img2, window, padding = window_size//2, groups = channel) mu1_sq = mu1.pow(2) mu2_sq = mu2.pow(2) mu1_mu2 = mu1*mu2 sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channel) - mu1_sq sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channel) - mu2_sq sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channel) - mu1_mu2 C1 = 0.01**2 C2 = 0.03**2 ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2)) if size_average: return ssim_map.mean() else: return ssim_map.mean(1).mean(1).mean(1) class SSIM(nn.Module): def __init__(self, window_size = 5, channel = 24, size_average = True): super(SSIM, self).__init__() self.window_size = window_size self.size_average = size_average self.channel = channel self.window = create_window(window_size, self.channel, sigma = 5) def forward(self, img1, img2): (_, channel, _, _) = img1.size() if channel == self.channel and self.window.data.type() == img1.data.type(): window = self.window else: window = create_window(self.window_size, channel, sigma = 5) if img1.is_cuda: window = window.cuda(img1.get_device()) window = window.type_as(img1) self.window = window self.channel = channel return _ssim(img1, img2, window, self.window_size, channel, self.size_average) class SSIM_LOSS(nn.Module): def __init__(self, window_size = 5, channel = 1,size_average = True): super(SSIM_LOSS, self).__init__() self.SSIM = SSIM(window_size, channel, size_average) def forward(self, img1, img2): return 1-self.SSIM(img1, img2) def psnr(output, target): mse = F.mse_loss(output, target) return -10. * logX(mse) def logX(x, d = 10.0): """ Log10: log base 10 for tensorflow """ numerator = torch.log(x) denominator = np.log(d) return numerator / denominator
4,247
1,644
# ### =============================================================== # ### =============================================================== # ### Modify the dataset loading settings # dataset settings dataset_type = 'ContrastDataset' data_root = '/mnt/cadlabnas/datasets/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='ToTensor', keys=['gt_label']), dict(type='Collect', keys=['img', 'gt_label']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ] data = dict( samples_per_gpu=8, # BATCH_SIZE workers_per_gpu=2, train=dict( type='RepeatDataset', times=1, dataset=dict( type=dataset_type, ann_file='train.txt', data_prefix= data_root + 'RenalDonors/', pipeline=train_pipeline), pipeline=train_pipeline ), val=dict( type=dataset_type, ann_file='val.txt', data_prefix= data_root + 'RenalDonors/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file='test.txt', data_prefix= data_root + 'RenalDonors/', pipeline=test_pipeline)) evaluation = dict(interval=1, metric='accuracy', metric_options=dict(topk=(1,))) # Set up working dir to save files and logs. work_dir = '/home/alec/Desktop/ImgClassification/working_dir' ### =============================================================== ### =============================================================== ### Modify the model settings # model settings model = dict( type='ImageClassifier', pretrained='torchvision://resnet18', backbone=dict( type='ResNet', depth=18, num_stages=4, out_indices=(3,), style='pytorch'), neck=dict(type='GlobalAveragePooling'), head=dict( type='LinearClsHead', num_classes=2, in_channels=512, loss=dict(type='CrossEntropyLoss', loss_weight=1.0), topk=(1,), )) ### =============================================================== ### =============================================================== ### Modify the schedule settings # The original learning rate (LR) is set for 8-GPU training. # We divide it by 4 since we only use one GPU. # optimizer optimizer_lr = 0.0001 #0.01 / 4 # optimizer optimizer = dict(type='SGD', lr=optimizer_lr, momentum=0.9, weight_decay=0.0001, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.)) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', # warmup='linear', # warmup_iters=500, # warmup_ratio=0.001, step=[5, 10]) runner = dict(type='EpochBasedRunner', max_epochs=25) ### =============================================================== ### =============================================================== ### Modify the default runtime settings checkpoint_config = dict(interval=1) # yapf:disable log_config = dict( interval=50, #50, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook') ]) dist_params = dict(backend='nccl') log_level = 'INFO' load_from = None resume_from = None # run train iter 1 time (overall 1 time which includes: div num_images by batch_size, and mult by dataset_repeat_times) # run validation iter 1 time # only setting workflow = [('train', 1)] will not backpropagate validation error/loss through the network workflow = [('train', 1), ('val', 1)] ### =============================================================== ### =============================================================== ### Miscellaneous settings # Set seed thus the results are more reproducible seed = 0 #set_random_seed(0, deterministic=False) gpu_ids = range(1) ### =============================================================== ### =============================================================== ### testing/prediction/evaluation phase - Model settings # get the root path to the model checkpoints ckp_root = work_dir #'/home/tsm/Code/mmdetection/demo/tutorial_exps/'
4,428
1,423
def align(axis='ALIGN_AUTO'): pass def average_islands_scale(): pass def circle_select(x=0, y=0, radius=1, gesture_mode=0): pass def cube_project(cube_size=1.0, correct_aspect=True, clip_to_bounds=False, scale_to_bounds=False): pass def cursor_set(location=(0.0, 0.0)): pass def cylinder_project(direction='VIEW_ON_EQUATOR', align='POLAR_ZX', radius=1.0, correct_aspect=True, clip_to_bounds=False, scale_to_bounds=False): pass def export_layout(filepath="", check_existing=True, export_all=False, modified=False, mode='PNG', size=(1024, 1024), opacity=0.25, tessellated=False): pass def follow_active_quads(mode='LENGTH_AVERAGE'): pass def hide(unselected=False): pass def lightmap_pack(PREF_CONTEXT='SEL_FACES', PREF_PACK_IN_ONE=True, PREF_NEW_UVLAYER=False, PREF_APPLY_IMAGE=False, PREF_IMG_PX_SIZE=512, PREF_BOX_DIV=12, PREF_MARGIN_DIV=0.1): pass def mark_seam(clear=False): pass def minimize_stretch(fill_holes=True, blend=0.0, iterations=0): pass def pack_islands(rotate=True, margin=0.001): pass def pin(clear=False): pass def project_from_view(orthographic=False, camera_bounds=True, correct_aspect=True, clip_to_bounds=False, scale_to_bounds=False): pass def remove_doubles(threshold=0.02, use_unselected=False): pass def reset(): pass def reveal(): pass def seams_from_islands(mark_seams=True, mark_sharp=False): pass def select(extend=False, location=(0.0, 0.0)): pass def select_all(action='TOGGLE'): pass def select_border(pinned=False, gesture_mode=0, xmin=0, xmax=0, ymin=0, ymax=0, extend=True): pass def select_lasso(path=None, deselect=False, extend=True): pass def select_less(): pass def select_linked(extend=False): pass def select_linked_pick(extend=False, location=(0.0, 0.0)): pass def select_loop(extend=False, location=(0.0, 0.0)): pass def select_more(): pass def select_pinned(): pass def select_split(): pass def smart_project(angle_limit=66.0, island_margin=0.0, user_area_weight=0.0, use_aspect=True, stretch_to_bounds=True): pass def snap_cursor(target='PIXELS'): pass def snap_selected(target='PIXELS'): pass def sphere_project(direction='VIEW_ON_EQUATOR', align='POLAR_ZX', correct_aspect=True, clip_to_bounds=False, scale_to_bounds=False): pass def stitch(use_limit=False, snap_islands=True, limit=0.01, static_island=0, midpoint_snap=False, clear_seams=True, mode='VERTEX', stored_mode='VERTEX', selection=None): pass def tile_set(tile=(0, 0)): pass def unwrap(method='ANGLE_BASED', fill_holes=True, correct_aspect=True, use_subsurf_data=False, margin=0.001): pass def weld(): pass
2,745
1,119
import sys EPS = sys.float_info.epsilon #Define the function def f(x): return (x+1)**2 - 1 def bisect(f, x1, x2, eps, maxn): assert f(x1)*f(x2) < 0, \ "We cannot find a root if the function does not change signs" xl = x1 xu = x2 xr = 0 fl = f(xl) err = 1000 for i in range(maxn): r = (xl + xu)/2 print(r) fr = f(r) if not abs(r - 0) < EPS: err = abs((r-xr)/r) * 100 if err < eps: print("Error =" + str(err)) break v = fl * fr if v < 0: xu = r if v > 0: xl = r fl = fr else: err = 0 xr = r return r print("Computing the roots of x**2 - 2") r = bisect(f, -1.5, 10, 0.00001, 100) print("Root = " + str(r))
662
368
from selenium import webdriver from selenium.webdriver.support.ui import WebDriverWait email = input("Enter Your Email: ") password = input("Enter Your Password: ") def login(): browser = webdriver.Chrome() browser.get("https://www.facebook.com") emailid = browser.find_element_by_id("email") passid = browser.find_element_by_id("pass") loginid = browser.find_element_by_id("loginbutton") emailid.send_keys(email) passid.send_keys(password) loginid.click() while email or password == "": print("\n") email = input("Enter Your Email: ") password = input("Enter Your Password") if email and password != "": login() if email and password != "": login()
717
216
import zmq import logging import argparse from monique_worker_py.config import read_worker_config from monique_worker_py.qmessage import qmessage_from_json, create_qmessage class Worker: def __init__(self, worker_name, algo): self.worker_name = worker_name self.algo = algo parser = argparse.ArgumentParser() parser.add_argument('--config', required=True, help='Path to config file') args = parser.parse_args() self.worker_config = read_worker_config(args.config) def run(self): """Runs application""" logging.basicConfig(level=self.worker_config.log_level, format='%(asctime)s %(name)-12s %(levelname)-8s {}: %(message)s'.format(self.worker_name), datefmt='%Y-%m-%d %H:%M', filename=self.worker_config.log_path, filemode='a') logging.info("connecting to queue...") # setup connection context = zmq.Context() # Socket to receive messages from controller from_controller = context.socket(zmq.PULL) from_controller.connect(self.worker_config.controller_pull_address()) # Socket to send messages to controller to_controller = context.socket(zmq.PUSH) to_controller.connect(self.worker_config.controller_push_address()) logging.info("connected to queue.") # waiting for the message... while True: in_message = from_controller.recv() logging.info('message received.') # parsing message to QMessage qmessage = qmessage_from_json(in_message) logging.debug('message tags: {}; message cnt: {}'.format(qmessage.tags, qmessage.cnt)) # get config from Task task = qmessage.cnt.contents config = task.get_config() logging.info('config parsed') logging.debug('config content: {}'.format(config)) try: logging.info('start working...') # that is the MAIN PLACE. We run given algorithm with config received. wr = self.algo(config) logging.debug('worker result: {}, worker version: {}'.format(wr.result, wr.version)) logging.info('finished working!') # prepare result QMessage... completed_task = task.task_completed(wr) # prepare result QMessage... completed_message = create_qmessage(completed_task) # and sending it back to the queue. logging.info('sending message with completed task...') logging.debug('message: {}'.format(completed_message.to_json())) to_controller.send(completed_message.to_json()) logging.info("message sent :)") except Exception as e: logging.error('failed with error: {}'.format(e)) # if exception happened then format result QMessage with another method... failed_message = qmessage.qmessage_failed(self.worker_name, e) # and sending it back. logging.info("sending message with failed task...") logging.debug('message: {}'.format(failed_message.to_json())) to_controller.send(failed_message.to_json()) logging.info("message sent :(") class WorkerResult: """Class to format worker result.""" def __init__(self, result, version): self.result = result self.version = version
3,606
926
# works with both python 2 and 3 from __future__ import print_function from datetime import datetime import africastalking class SMS: def __init__(self): self.username = "gs1kenya" self.api_key = "0902d36a02514da9fa33a11586683f8d76e5207ea544363e7d41149e6c9a6718" africastalking.initialize(self.username, self.api_key) self.sms = africastalking.SMS def send(self, phone, message): try: response = self.sms.send(str(message), ["+254"+str(phone)]) except Exception as e: message = """ Dear, Omambia Mogaka. Ref: Message Notification ------------------------ There was an error in sending message to your other employee. The Error is: {} Thank You, Humble Developer, Most adored, GS1 Kenya Date: {} . """ print (message.format(str(e), datetime.now))
1,081
324
def test_sample() -> None: assert True # nosec
52
18
#################################### # File name: models.py # # Author: Fred Rybin # #################################### from rss_skill import db class Feed(db.Model): __tablename__ = 'feed' rss_i = db.Column(db.Integer, primary_key=True, autoincrement=True) name = db.Column(db.Text, nullable=False) link = db.Column(db.Text, nullable=False) article_1 = db.Column(db.Text, nullable=False) article_2 = db.Column(db.Text, nullable=False) post = db.Column(db.String(32), nullable=False) def __init__(self, name, link, article_1, article_2): self.name = name self.link = link self.article_1 = article_1 self.article_2 = article_2 self.post = "" def __repr__(self): return f'Feed {self.rss_i}: {self.name}'
818
271
import os import pandas as pd import matplotlib.pyplot as plt from examples.cartpole_example.cartpole_dynamics import RAD_TO_DEG, DEG_TO_RAD if __name__ == '__main__': #df_model = pd.read_csv(os.path.join("data", "pendulum_data_PID.csv")) #df_nn = pd.read_csv(os.path.join("data", "pendulum_data_PID_NN_model.csv")) df_meas = pd.read_csv(os.path.join("data", "pendulum_data_MPC_ref_val.csv")) df_nn = pd.read_csv(os.path.join("data", "pendulum_data_MPC_ref_val_NN_model.csv")) fig,axes = plt.subplots(3,1, figsize=(10,10), sharex=True) axes[0].plot(df_meas['time'], df_meas['p'], "k", label='p system') axes[0].plot(df_nn['time'], df_nn['p'], "r", label='p NN') axes[0].set_title("Position (m)") axes[0].set_ylim(-10, 10.0) axes[1].plot(df_meas['time'], df_meas['theta'] * RAD_TO_DEG, "k", label='theta system') axes[1].plot(df_nn['time'], df_nn['theta']*RAD_TO_DEG, "r", label='theta NN') axes[2].plot(df_meas['time'], df_meas['u'], label="u") axes[2].plot(df_nn['time'], df_nn['u'], label="u") for ax in axes: ax.grid(True) ax.legend()
1,120
487
from unittest import TestCase import pandas as pd from scripts.utils import extract_initial_rules class TestExtractInitialRules(TestCase): """Tests test_extract_initial_rules() from utils""" def test_extract_initial_rules_numeric(self): """Test that rules are extracted correctly with a single numeric features""" df = pd.DataFrame({"A": [1.0, 2, 3], "Class": ["A", "B", "C"]}) class_col_name = "Class" rules = extract_initial_rules(df, class_col_name) correct = pd.DataFrame({"A": [(1.0, 1.0), (2, 2), (3, 3)], "Class": ["A", "B", "C"]}) self.assertTrue(df.shape == (3, 2) and rules.shape == (3, 2)) self.assertTrue(rules.equals(correct)) def test_extract_initial_rules_nominal(self): """Test that rules are extracted correctly with a single nominal features""" df = pd.DataFrame({"A": ["a", "b", "c"], "Class": ["A", "B", "C"]}) class_col_name = "Class" rules = extract_initial_rules(df, class_col_name) correct = pd.DataFrame({"A": ["a", "b", "c"], "Class": ["A", "B", "C"]}) self.assertTrue(df.shape == (3, 2) and rules.shape == (3, 2)) self.assertTrue(rules.equals(correct)) def test_extract_initial_rules_single_feature_mixed(self): """ Test that rules are extracted correctly with a single numeric and nominal feature """ df = pd.DataFrame({"A": [1.0, 2, 3], "B": ["a", "b", "c"], "Class": ["A", "B", "C"]}) class_col_name = "Class" rules = extract_initial_rules(df, class_col_name) correct = pd.DataFrame({"A": [(1.0, 1.0), (2, 2), (3, 3)], "B": ["a", "b", "c"], "Class": ["A", "B", "C"]}) self.assertTrue(df.shape == (3, 3) and rules.shape == (3, 3)) self.assertTrue(rules.equals(correct)) def test_extract_initial_rules_multiple_features_mixed(self): """ Test that rules are extracted correctly with different numeric and nominal features """ df = pd.DataFrame({"A": [1.0, 2, 3], "B": ["a", "b", "c"], "C": [5, -1, 3], "D": ["t", "t", "e"], "Class": ["A", "B", "C"]}) class_col_name = "Class" rules = extract_initial_rules(df, class_col_name) correct = pd.DataFrame({"A": [(1.0, 1.0), (2, 2), (3, 3)], "B": ["a", "b", "c"], "C": [(5, 5), (-1, -1), (3, 3)], "D": ["t", "t", "e"], "Class": ["A", "B", "C"]}) self.assertTrue(df.shape == (3, 5) and rules.shape == (3, 5)) self.assertTrue(rules.equals(correct))
2,560
912
#!/usr/bin/env python # license removed for brevity from scapy.all import * import sys class ICMPMaker: TYPE = 8 CODE = 0 CHKSUM = None ID = 0 SEQ = 0 TIMESTAMP_ORI = 60025165 TIMESTAMP_RX = 60025165 TIMESTAMP_TX = 60025165 GATEWAY = '0.0.0.0' PTR = 0 RESERVED = 0 LEN = 0 MASK = '0.0.0.0' NEXTHOPMTU = 0 def __init__(self, TYPE=8, CODE=0, CHKSUM=None, ID=0, SEQ=0, TIMESTAMP_ORI=60025165, TIMESTAMP_RX=60025165, TIMESTAMP_TX=60025165, GATEWAY='0.0.0.0', PTR=0, RESERVED=0, LEN=0, MASK='0.0.0.0', NEXTHOPMTU=0 ): self.TYPE = TYPE self.CODE = CODE self.CHKSUM = CHKSUM self.ID = ID self.SEQ = SEQ self.TIMESTAMP_ORI = TIMESTAMP_ORI self.TIMESTAMP_RX = TIMESTAMP_RX self.TIMESTAMP_TX = TIMESTAMP_TX self.GATEWAY = GATEWAY self.PTR = PTR self.RESERVED = RESERVED self.LEN = LEN self.MASK = MASK self.NEXTHOPMTU = NEXTHOPMTU def make_packet(self): return ICMP( type=self.TYPE, code=self.CODE, chksum=self.CHKSUM, id=self.ID, seq=self.SEQ, ts_ori=self.TIMESTAMP_ORI, ts_rx=self.TIMESTAMP_RX, ts_tx=self.TIMESTAMP_TX, gw=self.GATEWAY, ptr=self.PTR, reserved=self.RESERVED, length=self.LEN, addr_mask=self.MASK, nexthopmtu=self.NEXTHOPMTU ) def parse_type(self, icmp): return icmp.type def parse_code(self, icmp): return icmp.code def parse_chksum(self, icmp): return icmp.chksum def parse_id(self, icmp): return icmp.id def parse_seq(self, icmp): return icmp.seq def parse_ts_ori(self, icmp): return icmp.ts_ori def parse_ts_rx(self, icmp): return icmp.ts_rx def parse_ts_tx(self, icmp): return icmp.ts_tx def parse_gw(self, icmp): return icmp.gw def parse_ptr(self, icmp): return icmp.ptr def parse_reserved(self, icmp): return icmp.reserved def parse_len(self, icmp): return icmp.length def parse_mask(self, icmp): return icmp.addr_mask def parse_nexthopmtu(self, icmp): return icmp.nexthopmtu def show(self, icmp): return icmp.show2()
2,801
1,105
import csv, cPickle from numpy import * import matplotlib.pyplot as plt import matplotlib from cycler import cycler # parse learned descriptors into a dict def read_descriptors(desc_file): desc_map = {} f = open(desc_file, 'r') for i, line in enumerate(f): line = line.split() desc_map[i] = line[0] return desc_map # read learned trajectories file def read_csv(csv_file): reader = csv.reader(open(csv_file, 'rb')) all_traj = {} prev_book = None prev_c1 = None prev_c2 = None total_traj = 0 for index, row in enumerate(reader): if index == 0: continue book, c1, c2 = row[:3] if prev_book != book or prev_c1 != c1 or prev_c2 != c2: prev_book = book prev_c1 = c1 prev_c2 = c2 if book not in all_traj: all_traj[book] = {} all_traj[book][c1+' AND '+c2] = [] total_traj += 1 else: all_traj[book][c1+' AND '+c2].append(array(row[4:], dtype='float32')) print len(all_traj), total_traj return all_traj # compute locations to write labels # only write labels when the def compute_centers(max_traj, smallest_shift): center_inds = [] prev_topic = max_traj[0] tstart = 0 for index, topic in enumerate(max_traj): if topic != prev_topic: center = int((index-tstart) / 2) if center > smallest_shift / 2: center_inds.append(tstart + center) tstart = index prev_topic = topic center = int((index-tstart) / 2) if index - tstart > smallest_shift: center_inds.append(tstart + center) return center_inds def viz_csv(rmn_traj, rmn_descs, min_length=10, smallest_shift=1, max_viz=False, fig_dir=None): for book in rmn_traj: for rel in rmn_traj[book]: rtraj = rmn_traj[book][rel] if len(rtraj) > min_length and len(rtraj)<150: print book, rel plt.close() rtraj_mat = array(rtraj) if max_viz: plt.title(book + ': ' + rel) plt.axis('off') max_rtraj = argmax(rtraj_mat, axis=1) rcenter_inds = compute_centers(max_rtraj, smallest_shift) for ind in range(0, len(max_rtraj)): topic = max_rtraj[ind] plt.axhspan(ind, ind+1, 0.2, 0.4, color=color_list[topic]) if ind in rcenter_inds: loc = (0.43, ind + 0.5) plt.annotate(rmn_descs[topic], loc, size=15, verticalalignment='center', color=color_list[topic]) plt.xlim(0, 1.0) plt.arrow(0.1,0,0.0,len(rtraj), head_width=0.1, head_length=len(rtraj)/12, lw=3, length_includes_head=True, fc='k', ec='k') props = {'ha': 'left', 'va': 'bottom',} plt.text(0.0, len(rtraj) / 2, 'TIME', props, rotation=90, size=15) props = {'ha': 'left', 'va': 'top',} if fig_dir is None: plt.show() else: chars = rel.split(' AND ') fig_name = fig_dir + book + \ '__' + chars[0] + '__' + chars[1] + '.png' print 'figname = ', fig_name plt.savefig(fig_name) if __name__ == '__main__': wmap, cmap, bmap = cPickle.load(open('data/metadata.pkl', 'rb')) rmn_traj = read_csv('models/trajectories.log') rmn_descs = read_descriptors('models/descriptors.log') plt.style.use('ggplot') color_list = ["peru","dodgerblue","brown","hotpink", "aquamarine","springgreen","chartreuse","fuchsia", "mediumspringgreen","burlywood","midnightblue","orangered", "olive","darkolivegreen","darkmagenta","mediumvioletred", "darkslateblue","saddlebrown","darkturquoise","cyan", "chocolate","cornflowerblue","blue","red", "navy","steelblue","cadetblue","forestgreen", "black","darkcyan"] color_list += color_list plt.rc('axes', prop_cycle=(cycler('color', color_list))) viz_csv(rmn_traj, rmn_descs, min_length=50, max_viz=True, fig_dir='figs/', smallest_shift=1)
4,462
1,502
# -*- coding: utf-8 -*- import numpy as np from googletrans import Translator translator = Translator(service_urls=['translate.google.co.in']) def translate(word): return translator.translate(word,src='hi' , dest='en')
224
78
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django_nose.tools import * from tests.base import AssetfilesTestCase class TestServe(AssetfilesTestCase): def test_returns_not_found_without_an_asset(self): response = self.client.get('/static/non/existent/file.css') assert_equal(response.status_code, 404) def test_returns_static_files(self): self.mkfile('static/css/static.css', 'body { color: red; }') response = self.client.get('/static/css/static.css') assert_contains(response, 'body { color: red; }') def test_returns_static_files_with_correct_content_type(self): self.mkfile('static/css/static.css') response = self.client.get('/static/css/static.css') assert_equal(response.get('content-type'), 'text/css') def test_returns_static_files_with_extra_extensions(self): self.mkfile('app-1/static/js/jquery.plugin.js', '$.fn.plugin = {};') response = self.client.get('/static/js/jquery.plugin.js') assert_contains(response, '$.fn.plugin = {};') def test_returns_app_static_files(self): self.mkfile('app-1/static/css/app_static.css', 'body { color: blue; }') response = self.client.get('/static/css/app_static.css') assert_contains(response, 'body { color: blue; }') def test_processes_scss_files(self): self.mkfile('static/css/simple.scss', '$c: red; body { color: $c; }') response = self.client.get('/static/css/simple.css') assert_contains(response, 'body {\n color: red; }') def test_returns_processed_scss_files_with_correct_content_type(self): self.mkfile('static/css/simple.scss', '$c: red; body { color: $c; }') response = self.client.get('/static/css/simple.css') assert_equal(response.get('content-type'), 'text/css') def test_processes_app_scss_files(self): self.mkfile('app-1/static/css/app.scss', '$c: yellow; body { color: $c; }') response = self.client.get('/static/css/app.css') assert_contains(response, 'body {\n color: yellow; }') def test_processes_scss_files_with_deps(self): self.mkfile('static/css/folder/_dep.scss', '$c: black;') self.mkfile('static/css/with_deps.scss', '@import "folder/dep"; body { color: $c; }') response = self.client.get('/static/css/with_deps.css') assert_contains(response, 'body {\n color: black; }') def test_processes_scss_files_with_app_deps(self): self.mkfile('app-1/static/css/folder/_dep.scss', '$c: white;') self.mkfile('static/css/with_app_deps.scss', '@import "folder/dep"; body { color: $c; }') response = self.client.get('/static/css/with_app_deps.css') assert_contains(response, 'body {\n color: white; }') def test_processes_asset_files_with_unicode_chars(self): self.mkfile('static/css/simple.scss', '$c: "é"; a::before { content: $c; }') self.mkfile('static/js/simple.coffee', 'a = foo: "é#{2}3"') response = self.client.get('/static/css/simple.css') assert_contains(response, 'a::before {\n content: "é"; }') response = self.client.get('/static/js/simple.js') assert_contains(response, 'foo: "é" + 2 + "3"')
3,327
1,127
import sys import gzip import os import numpy as np import util class TestSNP: def __init__(self, name, geno_hap1, geno_hap2, AS_target_ref, AS_target_alt, hetps, totals, counts): self.name = name self.geno_hap1 = geno_hap1 self.geno_hap2 = geno_hap2 self.AS_target_ref = AS_target_ref self.AS_target_alt = AS_target_alt self.hetps = hetps self.totals = totals self.counts = counts def is_het(self): """returns True if the test SNP is heterozygous""" return self.geno_hap1 != self.geno_hap2 def is_homo_ref(self): """Returns True if test SNP is homozygous for reference allele""" return self.geno_hap1 == 0 and self.geno_hap2 == 0 def is_homo_alt(self): """Returns True if test SNP is homozygous for non-reference allele""" return self.geno_hap1 == 1 and self.geno_hap2 == 1 dup_snp_warn = True def parse_test_snp(snpinfo, shuffle=False): global dup_snp_warn snp_id = snpinfo[2] tot = 0 if snpinfo[16] == "NA" else float(snpinfo[16]) if snpinfo[6] == "NA": geno_hap1 = 0 geno_hap2 = 0 else: geno_hap1 = int(snpinfo[6].strip().split("|")[0]) geno_hap2 = int(snpinfo[6].strip().split("|")[1]) count = 0 if snpinfo[15] == "NA" else int(snpinfo[15]) if snpinfo[9].strip() == "NA" or geno_hap1 == geno_hap2: # SNP is homozygous, so there is no AS info return TestSNP(snp_id, geno_hap1, geno_hap2, [], [], [], tot, count) else: # positions of target SNPs snp_locs = np.array([int(y.strip()) for y in snpinfo[9].split(';')]) # counts of reads that match reference overlapping linked 'target' SNPs snp_as_ref = np.array([int(y) for y in snpinfo[12].split(';')]) # counts of reads that match alternate allele snp_as_alt = np.array([int(y) for y in snpinfo[13].split(';')]) # heterozygote probabilities snp_hetps = np.array([np.float64(y.strip()) for y in snpinfo[10].split(';')]) # linkage probabilities, not currently used snp_linkageps = np.array([np.float64(y.strip()) for y in snpinfo[11].split(';')]) # same SNP should not be provided multiple times, this # can create problems with combined test. Warn and filter # duplicate SNPs uniq_loc, uniq_idx = np.unique(snp_locs, return_index=True) if dup_snp_warn and uniq_loc.shape[0] != snp_locs.shape[0]: sys.stderr.write("WARNING: discarding SNPs that are repeated " "multiple times in same line\n") # only warn once dup_snp_warn = False snp_as_ref = snp_as_ref[uniq_idx] snp_as_alt = snp_as_alt[uniq_idx] snp_hetps = snp_hetps[uniq_idx] # linkage probabilities currently not used snp_linkageps = snp_linkageps[uniq_idx] if shuffle: # permute allele-specific read counts by flipping them randomly at # each SNP for y in range(len(snp_as_ref)): if random.randint(0, 1) == 1: temp = snp_as_ref[y] snp_as_ref[y] = snp_as_alt[y] snp_as_alt[y] = temp return TestSNP(snp_id, geno_hap1, geno_hap2, snp_as_ref, snp_as_alt, snp_hetps, tot, count) def open_input_files(in_filename): if not os.path.exists(in_filename) or not os.path.isfile(in_filename): raise IOError("input file %s does not exist or is not a " "regular file\n" % in_filename) # read file that contains list of input files in_file = open(in_filename, "rt") infiles = [] for line in in_file: # open each input file and read first line filename = line.rstrip() sys.stderr.write(" " + filename + "\n") if (not filename) or (not os.path.exists(filename)) or \ (not os.path.isfile(filename)): sys.stderr.write("input file '%s' does not exist or is not a " "regular file\n" % in_file) exit(2) if util.is_gzipped(filename): f = gzip.open(filename, "rt") else: f = open(filename, "rt") # skip header f.readline() infiles.append(f) in_file.close() if len(infiles) == 0: sys.stderr.write("no input files specified in file '%s'\n" % in_filename) exit(2) return infiles def read_count_matrices(input_filename, shuffle=False, skip=0, min_counts=0, min_as_counts=0, sample=0): """Given an input file that contains paths to input files for all individuals, and returns matrix of observed read counts, and matrix of expected read counts """ infiles = open_input_files(input_filename) is_finished = False count_matrix = [] expected_matrix = [] line_num = 0 skip_num = 0 while not is_finished: is_comment = False line_num += 1 count_line = [] expected_line = [] num_as = 0 for i in range(len(infiles)): # read next row from this input file line = infiles[i].readline().strip() if line.startswith("#") or line.startswith("CHROM"): # skip comment lines and header line is_comment = True elif line: if is_finished: raise IOError("All input files should have same number of lines. " "LINE %d is present in file %s, but not in all input files\n" % (line_num, infiles[i].name)) if is_comment: raise IOError("Comment and header lines should be consistent accross " "all input files. LINE %d is comment or header line in some input files " "but not in file %s" % (line_num, infiles[i].name)) # parse test SNP and associated info from input file row new_snp = parse_test_snp(line.split(), shuffle=shuffle) if new_snp.is_het(): num_as += np.sum(new_snp.AS_target_ref) + \ np.sum(new_snp.AS_target_alt) count_line.append(new_snp.counts) expected_line.append(new_snp.totals) else: # out of lines from at least one file, assume we are finished is_finished = True if not is_finished and not is_comment: if skip_num < skip: # skip this row skip_num += 1 else: if(sum(count_line) >= min_counts and num_as >= min_as_counts): # this line exceeded minimum number of read counts and AS counts count_matrix.append(count_line) expected_matrix.append(expected_line) skip_num = 0 count_matrix = np.array(count_matrix, dtype=int) expected_matrix = np.array(expected_matrix, dtype=np.float64) sys.stderr.write("count_matrix dimension: %s\n" % str(count_matrix.shape)) sys.stderr.write("expect_matrix dimension: %s\n" % str(expected_matrix.shape)) nrow = count_matrix.shape[0] if (sample > 0) and (sample < count_matrix.shape[0]): # randomly sample subset of rows without replacement sys.stderr.write("randomly sampling %d target regions\n" % sample) samp_index = np.arange(nrow) np.random.shuffle(samp_index) samp_index = samp_index[:sample] count_matrix = count_matrix[samp_index,] expected_matrix = expected_matrix[samp_index,] sys.stderr.write("new count_matrix dimension: %s\n" % str(count_matrix.shape)) sys.stderr.write("new expect_matrix dimension: %s\n" % str(expected_matrix.shape)) return count_matrix, expected_matrix
8,083
2,631
from django.contrib import admin from django.contrib.contenttypes.admin import GenericStackedInline from .models import Image, Product """ To register generics """ class ImageInline(GenericStackedInline): model = Image @admin.register(Image) class ImageAdmin(admin.ModelAdmin): list_display = ( 'id', 'created', 'modified', 'content_type', 'object_id', 'image', ) list_filter = ('created', 'modified', 'content_type') @admin.register(Product) class ProductAdmin(admin.ModelAdmin): list_display = ( 'id', 'created', 'modified', 'status', 'activate_date', 'deactivate_date', 'name', 'description', 'restaurant', 'unit_price', 'discount_price', ) list_filter = ( 'created', 'modified', 'activate_date', 'deactivate_date', 'restaurant', ) search_fields = ('name',) inlines = [ ImageInline, ]
1,023
317
#coding=utf-8 from __future__ import absolute_import import pytest import twisted from twisted.trial import unittest from twisted.internet.defer import Deferred from twisted.python import log from stup.twistedutils.deferred_deque import * class DeferredDequeueTest(unittest.TestCase): def __init__(self, *args, **kwargs): self.buffer = [] super(DeferredDequeueTest, self).__init__(*args, **kwargs) def test_all(self): dd = DeferredDeque() dd.append_left('a') dd.append_right('b') self.assertEqual(list(dd.pending), ['a', 'b']) dd.pop_right().addCallback(lambda x: self.buffer.append(x)) self.assertEqual(self.buffer, ['b']) dd.pop_left().addCallback(lambda x: self.buffer.append(x)) self.assertEqual(self.buffer, ['b', 'a']) dd.pop_right().addCallback(lambda x: self.buffer.append(x)) self.assertEqual(self.buffer, ['b', 'a']) dd.append_left('c') self.assertEqual(self.buffer, ['b', 'a', 'c'])
1,025
342
from lxml import etree import socket import re import aiohttp from quickspy.color import * class Response: def __init__(self, byte, encoding='utf-8'): global ENCODING ENCODING = encoding self.url = None self.html = None self.byte = byte self.HTML = None self.status = None def get_html(self): if self.html is None: self.html = self.get_byte().decode(ENCODING) return self.html def get_byte(self): return self.byte def xpath(self, exp): temp = self.get_HTML() return temp.xpath(exp) def findall(self, exp): return re.findall(self.get_html(), exp) def get_HTML(self): if self.HTML is None: self.HTML = etree.HTML(self.get_html()) return self.HTML def get_url(self): return self.url def gettitle(self): temp = self.get_HTML() return temp.xpath('//title/text()')[0] class NetEngine: def __init__(self): #打开aiohttp 的http接口 self.session = aiohttp.ClientSession() try: self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.s.connect(('localhost', 2546)) except socket.error as msg: print(RED(f'at Quickspy.init :{msg}')) async def close(self): await self.session.close() self.s.close() async def get(self, url, timeout=10): async with self.session.get(url, timeout=timeout) as response: print(f'netengine:timeot = {timeout}') temp = await response.read() _response = Response(temp) _response.url = response.url _response.status = response.status self.s.send("eval self.nemanager.reg('default').add()".encode()) return _response
1,845
574
import demjson from setting.project_config import * def read_json(json_absolute_path): """ 读取json文件 :param json_absolute_path: 参数为需要读取的json文件的绝对路径 :return: """ with open(json_absolute_path, "r", encoding="utf-8") as f: data_list = demjson.decode(f.read(), encoding="utf-8") return data_list # 返回一个数据列表 def write_json(json_relative, data_list): """ 写入json文件 :param json_relative: 第一个参数为需要写入的json文件的相对路径 :param data_list: 第二个参数为需要转换的数据 :return: """ with open(yaml_path + json_relative, "wb") as f: f.write(demjson.encode(data_list, encoding="utf-8")) return json_relative # 返回一个json文件的相对路径 def merge_json(): """ 合并所有json文件的方法 :return: """ json_list = [] for root, dirs, files in os.walk(yaml_path): # root为当前目录路径 # dirs为当前路径下所有子目录,list格式 # files为当前路径下所有非目录子文件,list格式 for i in files: if os.path.splitext(i)[1] == '.json': # os.path.splitext()把路径拆分为文件名+扩展名 if i != first_test_case_file: json_list.append(os.path.join(root, i)) else: the_first_json = os.path.join(root, first_test_case_file) json_list.append(the_first_json) # 加入第一个json文件 json_list.reverse() # 反转排序 temporary_list = [] for i in json_list: if i: j = read_json(i) # 调用读取json文件的方法 if j: temporary_list.extend(j) # 往列表里逐步添加元素 return temporary_list # 返回一个临时列表
1,577
680
def split_entry(entry): entries = entry.split(':') if len(entries) == 3: return entries[0], entries[1], int(entries[2]) return entries[0], entries[1], 10
174
61
from instascrape.commands import pretty_print, load_obj from colorama import Fore, Style def whoami_handler(**_): insta = load_obj() if insta is None: name = "NOBODY" print(Fore.BLUE + "Authenticated:", Fore.RED + "False") else: name = insta.my_username data = insta.me().as_dict() print(Style.BRIGHT + "\033[4m" + "Your Profile") pretty_print(data) print() print(Fore.BLUE + "Authenticated:", Fore.GREEN + "True") print(Fore.LIGHTCYAN_EX + "Your ID is", Style.BRIGHT + str(insta.my_user_id)) print(Fore.LIGHTCYAN_EX + "You are", Style.BRIGHT + name) print(Fore.LIGHTBLACK_EX + "“I was basically born knowing how to casually stalk people on social media.”") print(Fore.LIGHTBLACK_EX + " -- Becky Albertalli, The Upside of Unrequited")
834
308
#!/usr/bin/env python3 # Testing the attack log class import unittest from app.attack_log import AttackLog import app.attack_log # from unittest.mock import patch, call # from app.exceptions import ConfigurationError # https://docs.python.org/3/library/unittest.html class TestMachineConfig(unittest.TestCase): """ Test machine specific config """ def test_init(self): """ The init is empty """ al = AttackLog() self.assertIsNotNone(al) default = {"boilerplate": {'log_format_major_version': 1, 'log_format_minor_version': 1}, "system_overview": [], "attack_log": []} self.assertEqual(al.get_dict(), default) def test_caldera_attack_start(self): """ Starting a caldera attack """ al = AttackLog() source = "asource" paw = "apaw" group = "agroup" ability_id = "aability_id" ttp = "1234" name = "aname" description = "adescription" al.start_caldera_attack(source=source, paw=paw, group=group, ability_id=ability_id, ttp=ttp, name=name, description=description ) data = al.get_dict() self.assertEqual(data["attack_log"][0]["event"], "start") self.assertEqual(data["attack_log"][0]["type"], "attack") self.assertEqual(data["attack_log"][0]["sub_type"], "caldera") self.assertEqual(data["attack_log"][0]["source"], source) self.assertEqual(data["attack_log"][0]["target_paw"], paw) self.assertEqual(data["attack_log"][0]["target_group"], group) self.assertEqual(data["attack_log"][0]["ability_id"], ability_id) self.assertEqual(data["attack_log"][0]["hunting_tag"], "MITRE_" + ttp) self.assertEqual(data["attack_log"][0]["name"], name) self.assertEqual(data["attack_log"][0]["description"], description) def test_caldera_attack_stop(self): """ Stopping a caldera attack """ al = AttackLog() source = "asource" paw = "apaw" group = "agroup" ability_id = "aability_id" ttp = "1234" name = "aname" description = "adescription" al.stop_caldera_attack(source=source, paw=paw, group=group, ability_id=ability_id, ttp=ttp, name=name, description=description ) data = al.get_dict() self.assertEqual(data["attack_log"][0]["event"], "stop") self.assertEqual(data["attack_log"][0]["type"], "attack") self.assertEqual(data["attack_log"][0]["sub_type"], "caldera") self.assertEqual(data["attack_log"][0]["source"], source) self.assertEqual(data["attack_log"][0]["target_paw"], paw) self.assertEqual(data["attack_log"][0]["target_group"], group) self.assertEqual(data["attack_log"][0]["ability_id"], ability_id) self.assertEqual(data["attack_log"][0]["hunting_tag"], "MITRE_" + ttp) self.assertEqual(data["attack_log"][0]["name"], name) self.assertEqual(data["attack_log"][0]["description"], description) def test_kali_attack_start(self): """ Starting a kali attack """ al = AttackLog() source = "asource" target = "a target" ttp = "1234" attack_name = "a name" al.start_kali_attack(source=source, target=target, attack_name=attack_name, ttp=ttp, ) data = al.get_dict() self.assertEqual(data["attack_log"][0]["event"], "start") self.assertEqual(data["attack_log"][0]["type"], "attack") self.assertEqual(data["attack_log"][0]["sub_type"], "kali") self.assertEqual(data["attack_log"][0]["source"], source) self.assertEqual(data["attack_log"][0]["target"], target) self.assertEqual(data["attack_log"][0]["kali_name"], attack_name) self.assertEqual(data["attack_log"][0]["hunting_tag"], "MITRE_" + ttp) def test_kali_attack_stop(self): """ Stopping a kali attack """ al = AttackLog() source = "asource" target = "a target" ttp = "1234" attack_name = "a name" al.stop_kali_attack(source=source, target=target, attack_name=attack_name, ttp=ttp, ) data = al.get_dict() self.assertEqual(data["attack_log"][0]["event"], "stop") self.assertEqual(data["attack_log"][0]["type"], "attack") self.assertEqual(data["attack_log"][0]["sub_type"], "kali") self.assertEqual(data["attack_log"][0]["source"], source) self.assertEqual(data["attack_log"][0]["target"], target) self.assertEqual(data["attack_log"][0]["kali_name"], attack_name) self.assertEqual(data["attack_log"][0]["hunting_tag"], "MITRE_" + ttp) def test_narration_start(self): """ Starting a narration """ al = AttackLog() text = "texttextext" al.start_narration(text ) data = al.get_dict() self.assertEqual(data["attack_log"][0]["event"], "start") self.assertEqual(data["attack_log"][0]["type"], "narration") self.assertEqual(data["attack_log"][0]["sub_type"], "user defined narration") self.assertEqual(data["attack_log"][0]["text"], text) def test_build_start(self): """ Starting a build """ al = AttackLog() dl_uri = "asource" dl_uris = "a target" payload = "1234" platform = "a name" architecture = "arch" lhost = "lhost" lport = 8080 filename = "afilename" encoding = "encoded" encoded_filename = "ef" sRDI_conversion = True for_step = 4 comment = "this is a comment" al.start_build(dl_uri=dl_uri, dl_uris=dl_uris, payload=payload, platform=platform, architecture=architecture, lhost=lhost, lport=lport, filename=filename, encoding=encoding, encoded_filename=encoded_filename, sRDI_conversion=sRDI_conversion, for_step=for_step, comment=comment ) data = al.get_dict() self.assertEqual(data["attack_log"][0]["event"], "start") self.assertEqual(data["attack_log"][0]["type"], "build") self.assertEqual(data["attack_log"][0]["dl_uri"], dl_uri) self.assertEqual(data["attack_log"][0]["dl_uris"], dl_uris) self.assertEqual(data["attack_log"][0]["payload"], payload) self.assertEqual(data["attack_log"][0]["platform"], platform) self.assertEqual(data["attack_log"][0]["architecture"], architecture) self.assertEqual(data["attack_log"][0]["lhost"], lhost) self.assertEqual(data["attack_log"][0]["lport"], lport) self.assertEqual(data["attack_log"][0]["filename"], filename) self.assertEqual(data["attack_log"][0]["encoding"], encoding) self.assertEqual(data["attack_log"][0]["encoded_filename"], encoded_filename) self.assertEqual(data["attack_log"][0]["sRDI_conversion"], sRDI_conversion) self.assertEqual(data["attack_log"][0]["for_step"], for_step) self.assertEqual(data["attack_log"][0]["comment"], comment) def test_build_start_default(self): """ Starting a build default values""" al = AttackLog() al.start_build() data = al.get_dict() self.assertEqual(data["attack_log"][0]["event"], "start") self.assertEqual(data["attack_log"][0]["type"], "build") self.assertEqual(data["attack_log"][0]["dl_uri"], None) self.assertEqual(data["attack_log"][0]["dl_uris"], None) self.assertEqual(data["attack_log"][0]["payload"], None) self.assertEqual(data["attack_log"][0]["platform"], None) self.assertEqual(data["attack_log"][0]["architecture"], None) self.assertEqual(data["attack_log"][0]["lhost"], None) self.assertEqual(data["attack_log"][0]["lport"], None) self.assertEqual(data["attack_log"][0]["filename"], None) self.assertEqual(data["attack_log"][0]["encoding"], None) self.assertEqual(data["attack_log"][0]["encoded_filename"], None) self.assertEqual(data["attack_log"][0]["sRDI_conversion"], False) self.assertEqual(data["attack_log"][0]["for_step"], None) self.assertEqual(data["attack_log"][0]["comment"], None) def test_build_stop(self): """ Stopping a build """ al = AttackLog() logid = "lid" al.stop_build(logid=logid) data = al.get_dict() self.assertEqual(data["attack_log"][0]["event"], "stop") self.assertEqual(data["attack_log"][0]["type"], "build") self.assertEqual(data["attack_log"][0]["logid"], logid) def test_metasploit_attack_start(self): """ Starting a metasploit attack """ al = AttackLog() source = "asource" target = "a target" ttp = "1234" attack_name = "a name" al.start_metasploit_attack(source=source, target=target, metasploit_command=attack_name, ttp=ttp, ) data = al.get_dict() self.assertEqual(data["attack_log"][0]["event"], "start") self.assertEqual(data["attack_log"][0]["type"], "attack") self.assertEqual(data["attack_log"][0]["sub_type"], "metasploit") self.assertEqual(data["attack_log"][0]["source"], source) self.assertEqual(data["attack_log"][0]["target"], target) self.assertEqual(data["attack_log"][0]["metasploit_command"], attack_name) self.assertEqual(data["attack_log"][0]["hunting_tag"], "MITRE_" + ttp) def test_metasploit_attack_stop(self): """ Stopping a metasploit attack """ al = AttackLog() source = "asource" target = "a target" ttp = "1234" attack_name = "a name" al.stop_metasploit_attack(source=source, target=target, metasploit_command=attack_name, ttp=ttp, ) data = al.get_dict() self.assertEqual(data["attack_log"][0]["event"], "stop") self.assertEqual(data["attack_log"][0]["type"], "attack") self.assertEqual(data["attack_log"][0]["sub_type"], "metasploit") self.assertEqual(data["attack_log"][0]["source"], source) self.assertEqual(data["attack_log"][0]["target"], target) self.assertEqual(data["attack_log"][0]["metasploit_command"], attack_name) self.assertEqual(data["attack_log"][0]["hunting_tag"], "MITRE_" + ttp) def test_attack_plugin_start(self): """ Starting a attack plugin """ al = AttackLog() source = "asource" target = "a target" ttp = "1234" attack_name = "a name" al.start_attack_plugin(source=source, target=target, plugin_name=attack_name, ttp=ttp, ) data = al.get_dict() self.assertEqual(data["attack_log"][0]["event"], "start") self.assertEqual(data["attack_log"][0]["type"], "attack") self.assertEqual(data["attack_log"][0]["sub_type"], "attack_plugin") self.assertEqual(data["attack_log"][0]["source"], source) self.assertEqual(data["attack_log"][0]["target"], target) self.assertEqual(data["attack_log"][0]["plugin_name"], attack_name) self.assertEqual(data["attack_log"][0]["hunting_tag"], "MITRE_" + ttp) def test_attack_plugin_stop(self): """ Stopping a attack plugin""" al = AttackLog() source = "asource" target = "a target" ttp = "1234" attack_name = "a name" al.stop_attack_plugin(source=source, target=target, plugin_name=attack_name, ttp=ttp, ) data = al.get_dict() self.assertEqual(data["attack_log"][0]["event"], "stop") self.assertEqual(data["attack_log"][0]["type"], "attack") self.assertEqual(data["attack_log"][0]["sub_type"], "attack_plugin") self.assertEqual(data["attack_log"][0]["source"], source) self.assertEqual(data["attack_log"][0]["target"], target) self.assertEqual(data["attack_log"][0]["plugin_name"], attack_name) self.assertEqual(data["attack_log"][0]["hunting_tag"], "MITRE_" + ttp) def test_file_write_start(self): """ Starting a file write """ al = AttackLog() source = "asource" target = "a target" file_name = "a generic filename" al.start_file_write(source=source, target=target, file_name=file_name, ) data = al.get_dict() self.assertEqual(data["attack_log"][0]["event"], "start") self.assertEqual(data["attack_log"][0]["type"], "dropping_file") self.assertEqual(data["attack_log"][0]["sub_type"], "by PurpleDome") self.assertEqual(data["attack_log"][0]["source"], source) self.assertEqual(data["attack_log"][0]["target"], target) self.assertEqual(data["attack_log"][0]["file_name"], file_name) def test_file_write_stop(self): """ Stopping a file write """ al = AttackLog() source = "asource" target = "a target" file_name = "a generic filename" al.stop_file_write(source=source, target=target, file_name=file_name, ) data = al.get_dict() self.assertEqual(data["attack_log"][0]["event"], "stop") self.assertEqual(data["attack_log"][0]["type"], "dropping_file") self.assertEqual(data["attack_log"][0]["sub_type"], "by PurpleDome") self.assertEqual(data["attack_log"][0]["source"], source) self.assertEqual(data["attack_log"][0]["target"], target) self.assertEqual(data["attack_log"][0]["file_name"], file_name) def test_execute_payload_start(self): """ Starting a execute payload """ al = AttackLog() source = "asource" target = "a target" command = "a generic command" al.start_execute_payload(source=source, target=target, command=command, ) data = al.get_dict() self.assertEqual(data["attack_log"][0]["event"], "start") self.assertEqual(data["attack_log"][0]["type"], "execute_payload") self.assertEqual(data["attack_log"][0]["sub_type"], "by PurpleDome") self.assertEqual(data["attack_log"][0]["source"], source) self.assertEqual(data["attack_log"][0]["target"], target) self.assertEqual(data["attack_log"][0]["command"], command) def test_execute_payload_stop(self): """ Stopping a execute payload """ al = AttackLog() source = "asource" target = "a target" command = "a generic command" al.stop_execute_payload(source=source, target=target, command=command, ) data = al.get_dict() self.assertEqual(data["attack_log"][0]["event"], "stop") self.assertEqual(data["attack_log"][0]["type"], "execute_payload") self.assertEqual(data["attack_log"][0]["sub_type"], "by PurpleDome") self.assertEqual(data["attack_log"][0]["source"], source) self.assertEqual(data["attack_log"][0]["target"], target) self.assertEqual(data["attack_log"][0]["command"], command) def test_mitre_fix_ttp_is_none(self): """ Testing the mitre ttp fix for ttp being none """ self.assertEqual(app.attack_log.__mitre_fix_ttp__(None), "") def test_mitre_fix_ttp_is_MITRE_SOMETHING(self): """ Testing the mitre ttp fix for ttp being MITRE_ """ self.assertEqual(app.attack_log.__mitre_fix_ttp__("MITRE_FOO"), "MITRE_FOO") # tests for a bunch of default data covering caldera attacks. That way we will have some fallback if no data is submitted: def test_get_caldera_default_name_missing(self): """ Testing getting the caldera default name """ al = AttackLog() self.assertEqual(al.get_caldera_default_name("missing"), None) def test_get_caldera_default_name(self): """ Testing getting the caldera default name """ al = AttackLog() self.assertEqual(al.get_caldera_default_name("bd527b63-9f9e-46e0-9816-b8434d2b8989"), "whoami") def test_get_caldera_default_description_missing(self): """ Testing getting the caldera default description """ al = AttackLog() self.assertEqual(al.get_caldera_default_description("missing"), None) def test_get_caldera_default_description(self): """ Testing getting the caldera default description """ al = AttackLog() self.assertEqual(al.get_caldera_default_description("bd527b63-9f9e-46e0-9816-b8434d2b8989"), "Obtain user from current session") def test_get_caldera_default_tactics_missing(self): """ Testing getting the caldera default tactics """ al = AttackLog() self.assertEqual(al.get_caldera_default_tactics("missing", None), None) def test_get_caldera_default_tactics(self): """ Testing getting the caldera default tactics """ al = AttackLog() self.assertEqual(al.get_caldera_default_tactics("bd527b63-9f9e-46e0-9816-b8434d2b8989", None), "System Owner/User Discovery") def test_get_caldera_default_tactics_id_missing(self): """ Testing getting the caldera default tactics_id """ al = AttackLog() self.assertEqual(al.get_caldera_default_tactics_id("missing", None), None) def test_get_caldera_default_tactics_id(self): """ Testing getting the caldera default tactics_id """ al = AttackLog() self.assertEqual(al.get_caldera_default_tactics_id("bd527b63-9f9e-46e0-9816-b8434d2b8989", None), "T1033") def test_get_caldera_default_situation_description_missing(self): """ Testing getting the caldera default situation_description """ al = AttackLog() self.assertEqual(al.get_caldera_default_situation_description("missing"), None) def test_get_caldera_default_situation_description(self): """ Testing getting the caldera default situation_description """ al = AttackLog() self.assertEqual(al.get_caldera_default_situation_description("bd527b63-9f9e-46e0-9816-b8434d2b8989"), None) def test_get_caldera_default_countermeasure_missing(self): """ Testing getting the caldera default countermeasure """ al = AttackLog() self.assertEqual(al.get_caldera_default_countermeasure("missing"), None) def test_get_caldera_default_countermeasure(self): """ Testing getting the caldera default countermeasure """ al = AttackLog() self.assertEqual(al.get_caldera_default_countermeasure("bd527b63-9f9e-46e0-9816-b8434d2b8989"), None)
20,131
6,468