text
stringlengths
957
885k
<gh_stars>10-100 import unittest import time import winsound from selenium import webdriver from selenium.webdriver.common.keys import Keys from configparser import ConfigParser interval = 60 bad_interval = 60 number_fail = 0 iteration = 0 count = None count2 = None username = "" driver_path = "" password = "" window_x = 0 window_y = 0 def read_config(): global driver_path, username, password, interval, bad_interval, window_x, window_y config = ConfigParser() config.read('config.ini') driver_path = config.get('main', 'chrome webdriver location') username = config.get('main', 'username') password = config.get('main', 'password') interval = config.getint('settings', 'time interval') bad_interval = config.getint('settings', 'fail time interval') window_x = config.getint('settings', 'window x-position') window_y = config.getint('settings', 'window y-position') #Emit sound if event occurs def notify(mode = 1): #New notification if mode == 1: winsound.Beep(300, 250) winsound.Beep(600, 125) winsound.Beep(600, 125) #Bot-detected notification elif mode == 2: winsound.Beep(300, 200) class PythonOrgSearch(unittest.TestCase): def setUp(self): read_config() #Log in def login_fiverr(self): driver = self.driver driver.implicitly_wait(10) driver.get("http://www.fiverr.com/login") username_el = driver.find_element_by_xpath("//input[@placeholder='Email / Username']") username_el.send_keys(username) password_el = driver.find_element_by_xpath("//input[@placeholder='Password']") password_el.send_keys(password) submit = driver.find_element_by_xpath("//input[@id='login-btn']").click() #[Helper - check_active] Gets the # of buyer requests def get_count(self): global count, number_fail try: driver = self.driver count_el = driver.find_element_by_xpath("//a[@data-gtm-label='all requests']") count_local = count_el.get_attribute("data-count") return count_local except: notify(2) time.sleep(bad_interval) driver.quit() number_fail = number_fail + 1 return count #Goes to requests page to grab # of buyer requests def check_active(self): self.driver = webdriver.Chrome(driver_path) driver = self.driver driver.set_window_position(window_x, window_y) self.login_fiverr() time.sleep(1) driver.get("https://www.fiverr.com/users/"+username+"/requests") count = self.get_count() time.sleep(interval) driver.quit() return count #Loop until the number of requests increases def test_fiverr(self): global count, count2, iteration #Initialize starting # count = self.check_active() count2 = count iteration = 0 while count >= count2: count = count2 iteration = iteration + 1 time.sleep(5) try: count2 = self.check_active() except: notify(2) self.driver.quit() time.sleep(5) continue #Prints the current number of active requests print(str(iteration) + ": " + str(count2)) def tearDown(self): notify() # Displays what # of requests changed from and to #print(str(count) + " -> " + str(count2)) # Displays # of times bot page has been shown. #print("Test fail rate: " + str(number_fail) + "/" + str(iteration) ) try: # Shut down the driver if it's still up for some reason self.driver.close() except: return if __name__ == "__main__": unittest.main()
<reponame>jensenbox/python-jamf # coding: utf-8 """ Jamf Pro API ## Overview This is a sample Jamf Pro server which allows for usage without any authentication. The Jamf Pro environment which supports the Try it Out functionality does not run the current beta version of Jamf Pro, thus any newly added endpoints will result in an error and should be used soley for documentation purposes. # noqa: E501 The version of the OpenAPI document: 10.25.0 Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from jamf.configuration import Configuration class ComputerGeneral(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'name': 'str', 'last_ip_address': 'str', 'last_reported_ip': 'str', 'jamf_binary_version': 'str', 'platform': 'str', 'barcode1': 'str', 'barcode2': 'str', 'asset_tag': 'str', 'remote_management': 'ComputerRemoteManagement', 'supervised': 'bool', 'mdm_capable': 'ComputerMdmCapability', 'report_date': 'datetime', 'last_contact_time': 'datetime', 'last_cloud_backup_date': 'datetime', 'last_enrolled_date': 'datetime', 'mdm_profile_expiration': 'datetime', 'initial_entry_date': 'date', 'distribution_point': 'str', 'enrollment_method': 'EnrollmentMethod', 'site': 'V1Site', 'itunes_store_account_active': 'bool', 'enrolled_via_automated_device_enrollment': 'bool', 'user_approved_mdm': 'bool', 'extension_attributes': 'list[ComputerExtensionAttribute]' } attribute_map = { 'name': 'name', 'last_ip_address': 'lastIpAddress', 'last_reported_ip': 'lastReportedIp', 'jamf_binary_version': 'jamfBinaryVersion', 'platform': 'platform', 'barcode1': 'barcode1', 'barcode2': 'barcode2', 'asset_tag': 'assetTag', 'remote_management': 'remoteManagement', 'supervised': 'supervised', 'mdm_capable': 'mdmCapable', 'report_date': 'reportDate', 'last_contact_time': 'lastContactTime', 'last_cloud_backup_date': 'lastCloudBackupDate', 'last_enrolled_date': 'lastEnrolledDate', 'mdm_profile_expiration': 'mdmProfileExpiration', 'initial_entry_date': 'initialEntryDate', 'distribution_point': 'distributionPoint', 'enrollment_method': 'enrollmentMethod', 'site': 'site', 'itunes_store_account_active': 'itunesStoreAccountActive', 'enrolled_via_automated_device_enrollment': 'enrolledViaAutomatedDeviceEnrollment', 'user_approved_mdm': 'userApprovedMdm', 'extension_attributes': 'extensionAttributes' } def __init__(self, name=None, last_ip_address=None, last_reported_ip=None, jamf_binary_version=None, platform=None, barcode1=None, barcode2=None, asset_tag=None, remote_management=None, supervised=None, mdm_capable=None, report_date=None, last_contact_time=None, last_cloud_backup_date=None, last_enrolled_date=None, mdm_profile_expiration=None, initial_entry_date=None, distribution_point=None, enrollment_method=None, site=None, itunes_store_account_active=None, enrolled_via_automated_device_enrollment=None, user_approved_mdm=None, extension_attributes=None, local_vars_configuration=None): # noqa: E501 """ComputerGeneral - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._name = None self._last_ip_address = None self._last_reported_ip = None self._jamf_binary_version = None self._platform = None self._barcode1 = None self._barcode2 = None self._asset_tag = None self._remote_management = None self._supervised = None self._mdm_capable = None self._report_date = None self._last_contact_time = None self._last_cloud_backup_date = None self._last_enrolled_date = None self._mdm_profile_expiration = None self._initial_entry_date = None self._distribution_point = None self._enrollment_method = None self._site = None self._itunes_store_account_active = None self._enrolled_via_automated_device_enrollment = None self._user_approved_mdm = None self._extension_attributes = None self.discriminator = None if name is not None: self.name = name if last_ip_address is not None: self.last_ip_address = last_ip_address if last_reported_ip is not None: self.last_reported_ip = last_reported_ip if jamf_binary_version is not None: self.jamf_binary_version = jamf_binary_version if platform is not None: self.platform = platform if barcode1 is not None: self.barcode1 = barcode1 if barcode2 is not None: self.barcode2 = barcode2 if asset_tag is not None: self.asset_tag = asset_tag if remote_management is not None: self.remote_management = remote_management if supervised is not None: self.supervised = supervised if mdm_capable is not None: self.mdm_capable = mdm_capable if report_date is not None: self.report_date = report_date if last_contact_time is not None: self.last_contact_time = last_contact_time if last_cloud_backup_date is not None: self.last_cloud_backup_date = last_cloud_backup_date if last_enrolled_date is not None: self.last_enrolled_date = last_enrolled_date if mdm_profile_expiration is not None: self.mdm_profile_expiration = mdm_profile_expiration if initial_entry_date is not None: self.initial_entry_date = initial_entry_date if distribution_point is not None: self.distribution_point = distribution_point if enrollment_method is not None: self.enrollment_method = enrollment_method if site is not None: self.site = site if itunes_store_account_active is not None: self.itunes_store_account_active = itunes_store_account_active if enrolled_via_automated_device_enrollment is not None: self.enrolled_via_automated_device_enrollment = enrolled_via_automated_device_enrollment if user_approved_mdm is not None: self.user_approved_mdm = user_approved_mdm if extension_attributes is not None: self.extension_attributes = extension_attributes @property def name(self): """Gets the name of this ComputerGeneral. # noqa: E501 :return: The name of this ComputerGeneral. # noqa: E501 :rtype: str """ return self._name @name.setter def name(self, name): """Sets the name of this ComputerGeneral. :param name: The name of this ComputerGeneral. # noqa: E501 :type name: str """ self._name = name @property def last_ip_address(self): """Gets the last_ip_address of this ComputerGeneral. # noqa: E501 :return: The last_ip_address of this ComputerGeneral. # noqa: E501 :rtype: str """ return self._last_ip_address @last_ip_address.setter def last_ip_address(self, last_ip_address): """Sets the last_ip_address of this ComputerGeneral. :param last_ip_address: The last_ip_address of this ComputerGeneral. # noqa: E501 :type last_ip_address: str """ self._last_ip_address = last_ip_address @property def last_reported_ip(self): """Gets the last_reported_ip of this ComputerGeneral. # noqa: E501 :return: The last_reported_ip of this ComputerGeneral. # noqa: E501 :rtype: str """ return self._last_reported_ip @last_reported_ip.setter def last_reported_ip(self, last_reported_ip): """Sets the last_reported_ip of this ComputerGeneral. :param last_reported_ip: The last_reported_ip of this ComputerGeneral. # noqa: E501 :type last_reported_ip: str """ self._last_reported_ip = last_reported_ip @property def jamf_binary_version(self): """Gets the jamf_binary_version of this ComputerGeneral. # noqa: E501 :return: The jamf_binary_version of this ComputerGeneral. # noqa: E501 :rtype: str """ return self._jamf_binary_version @jamf_binary_version.setter def jamf_binary_version(self, jamf_binary_version): """Sets the jamf_binary_version of this ComputerGeneral. :param jamf_binary_version: The jamf_binary_version of this ComputerGeneral. # noqa: E501 :type jamf_binary_version: str """ self._jamf_binary_version = jamf_binary_version @property def platform(self): """Gets the platform of this ComputerGeneral. # noqa: E501 :return: The platform of this ComputerGeneral. # noqa: E501 :rtype: str """ return self._platform @platform.setter def platform(self, platform): """Sets the platform of this ComputerGeneral. :param platform: The platform of this ComputerGeneral. # noqa: E501 :type platform: str """ self._platform = platform @property def barcode1(self): """Gets the barcode1 of this ComputerGeneral. # noqa: E501 :return: The barcode1 of this ComputerGeneral. # noqa: E501 :rtype: str """ return self._barcode1 @barcode1.setter def barcode1(self, barcode1): """Sets the barcode1 of this ComputerGeneral. :param barcode1: The barcode1 of this ComputerGeneral. # noqa: E501 :type barcode1: str """ self._barcode1 = barcode1 @property def barcode2(self): """Gets the barcode2 of this ComputerGeneral. # noqa: E501 :return: The barcode2 of this ComputerGeneral. # noqa: E501 :rtype: str """ return self._barcode2 @barcode2.setter def barcode2(self, barcode2): """Sets the barcode2 of this ComputerGeneral. :param barcode2: The barcode2 of this ComputerGeneral. # noqa: E501 :type barcode2: str """ self._barcode2 = barcode2 @property def asset_tag(self): """Gets the asset_tag of this ComputerGeneral. # noqa: E501 :return: The asset_tag of this ComputerGeneral. # noqa: E501 :rtype: str """ return self._asset_tag @asset_tag.setter def asset_tag(self, asset_tag): """Sets the asset_tag of this ComputerGeneral. :param asset_tag: The asset_tag of this ComputerGeneral. # noqa: E501 :type asset_tag: str """ self._asset_tag = asset_tag @property def remote_management(self): """Gets the remote_management of this ComputerGeneral. # noqa: E501 :return: The remote_management of this ComputerGeneral. # noqa: E501 :rtype: ComputerRemoteManagement """ return self._remote_management @remote_management.setter def remote_management(self, remote_management): """Sets the remote_management of this ComputerGeneral. :param remote_management: The remote_management of this ComputerGeneral. # noqa: E501 :type remote_management: ComputerRemoteManagement """ self._remote_management = remote_management @property def supervised(self): """Gets the supervised of this ComputerGeneral. # noqa: E501 :return: The supervised of this ComputerGeneral. # noqa: E501 :rtype: bool """ return self._supervised @supervised.setter def supervised(self, supervised): """Sets the supervised of this ComputerGeneral. :param supervised: The supervised of this ComputerGeneral. # noqa: E501 :type supervised: bool """ self._supervised = supervised @property def mdm_capable(self): """Gets the mdm_capable of this ComputerGeneral. # noqa: E501 :return: The mdm_capable of this ComputerGeneral. # noqa: E501 :rtype: ComputerMdmCapability """ return self._mdm_capable @mdm_capable.setter def mdm_capable(self, mdm_capable): """Sets the mdm_capable of this ComputerGeneral. :param mdm_capable: The mdm_capable of this ComputerGeneral. # noqa: E501 :type mdm_capable: ComputerMdmCapability """ self._mdm_capable = mdm_capable @property def report_date(self): """Gets the report_date of this ComputerGeneral. # noqa: E501 :return: The report_date of this ComputerGeneral. # noqa: E501 :rtype: datetime """ return self._report_date @report_date.setter def report_date(self, report_date): """Sets the report_date of this ComputerGeneral. :param report_date: The report_date of this ComputerGeneral. # noqa: E501 :type report_date: datetime """ self._report_date = report_date @property def last_contact_time(self): """Gets the last_contact_time of this ComputerGeneral. # noqa: E501 :return: The last_contact_time of this ComputerGeneral. # noqa: E501 :rtype: datetime """ return self._last_contact_time @last_contact_time.setter def last_contact_time(self, last_contact_time): """Sets the last_contact_time of this ComputerGeneral. :param last_contact_time: The last_contact_time of this ComputerGeneral. # noqa: E501 :type last_contact_time: datetime """ self._last_contact_time = last_contact_time @property def last_cloud_backup_date(self): """Gets the last_cloud_backup_date of this ComputerGeneral. # noqa: E501 :return: The last_cloud_backup_date of this ComputerGeneral. # noqa: E501 :rtype: datetime """ return self._last_cloud_backup_date @last_cloud_backup_date.setter def last_cloud_backup_date(self, last_cloud_backup_date): """Sets the last_cloud_backup_date of this ComputerGeneral. :param last_cloud_backup_date: The last_cloud_backup_date of this ComputerGeneral. # noqa: E501 :type last_cloud_backup_date: datetime """ self._last_cloud_backup_date = last_cloud_backup_date @property def last_enrolled_date(self): """Gets the last_enrolled_date of this ComputerGeneral. # noqa: E501 :return: The last_enrolled_date of this ComputerGeneral. # noqa: E501 :rtype: datetime """ return self._last_enrolled_date @last_enrolled_date.setter def last_enrolled_date(self, last_enrolled_date): """Sets the last_enrolled_date of this ComputerGeneral. :param last_enrolled_date: The last_enrolled_date of this ComputerGeneral. # noqa: E501 :type last_enrolled_date: datetime """ self._last_enrolled_date = last_enrolled_date @property def mdm_profile_expiration(self): """Gets the mdm_profile_expiration of this ComputerGeneral. # noqa: E501 :return: The mdm_profile_expiration of this ComputerGeneral. # noqa: E501 :rtype: datetime """ return self._mdm_profile_expiration @mdm_profile_expiration.setter def mdm_profile_expiration(self, mdm_profile_expiration): """Sets the mdm_profile_expiration of this ComputerGeneral. :param mdm_profile_expiration: The mdm_profile_expiration of this ComputerGeneral. # noqa: E501 :type mdm_profile_expiration: datetime """ self._mdm_profile_expiration = mdm_profile_expiration @property def initial_entry_date(self): """Gets the initial_entry_date of this ComputerGeneral. # noqa: E501 :return: The initial_entry_date of this ComputerGeneral. # noqa: E501 :rtype: date """ return self._initial_entry_date @initial_entry_date.setter def initial_entry_date(self, initial_entry_date): """Sets the initial_entry_date of this ComputerGeneral. :param initial_entry_date: The initial_entry_date of this ComputerGeneral. # noqa: E501 :type initial_entry_date: date """ self._initial_entry_date = initial_entry_date @property def distribution_point(self): """Gets the distribution_point of this ComputerGeneral. # noqa: E501 :return: The distribution_point of this ComputerGeneral. # noqa: E501 :rtype: str """ return self._distribution_point @distribution_point.setter def distribution_point(self, distribution_point): """Sets the distribution_point of this ComputerGeneral. :param distribution_point: The distribution_point of this ComputerGeneral. # noqa: E501 :type distribution_point: str """ self._distribution_point = distribution_point @property def enrollment_method(self): """Gets the enrollment_method of this ComputerGeneral. # noqa: E501 :return: The enrollment_method of this ComputerGeneral. # noqa: E501 :rtype: EnrollmentMethod """ return self._enrollment_method @enrollment_method.setter def enrollment_method(self, enrollment_method): """Sets the enrollment_method of this ComputerGeneral. :param enrollment_method: The enrollment_method of this ComputerGeneral. # noqa: E501 :type enrollment_method: EnrollmentMethod """ self._enrollment_method = enrollment_method @property def site(self): """Gets the site of this ComputerGeneral. # noqa: E501 :return: The site of this ComputerGeneral. # noqa: E501 :rtype: V1Site """ return self._site @site.setter def site(self, site): """Sets the site of this ComputerGeneral. :param site: The site of this ComputerGeneral. # noqa: E501 :type site: V1Site """ self._site = site @property def itunes_store_account_active(self): """Gets the itunes_store_account_active of this ComputerGeneral. # noqa: E501 :return: The itunes_store_account_active of this ComputerGeneral. # noqa: E501 :rtype: bool """ return self._itunes_store_account_active @itunes_store_account_active.setter def itunes_store_account_active(self, itunes_store_account_active): """Sets the itunes_store_account_active of this ComputerGeneral. :param itunes_store_account_active: The itunes_store_account_active of this ComputerGeneral. # noqa: E501 :type itunes_store_account_active: bool """ self._itunes_store_account_active = itunes_store_account_active @property def enrolled_via_automated_device_enrollment(self): """Gets the enrolled_via_automated_device_enrollment of this ComputerGeneral. # noqa: E501 :return: The enrolled_via_automated_device_enrollment of this ComputerGeneral. # noqa: E501 :rtype: bool """ return self._enrolled_via_automated_device_enrollment @enrolled_via_automated_device_enrollment.setter def enrolled_via_automated_device_enrollment(self, enrolled_via_automated_device_enrollment): """Sets the enrolled_via_automated_device_enrollment of this ComputerGeneral. :param enrolled_via_automated_device_enrollment: The enrolled_via_automated_device_enrollment of this ComputerGeneral. # noqa: E501 :type enrolled_via_automated_device_enrollment: bool """ self._enrolled_via_automated_device_enrollment = enrolled_via_automated_device_enrollment @property def user_approved_mdm(self): """Gets the user_approved_mdm of this ComputerGeneral. # noqa: E501 :return: The user_approved_mdm of this ComputerGeneral. # noqa: E501 :rtype: bool """ return self._user_approved_mdm @user_approved_mdm.setter def user_approved_mdm(self, user_approved_mdm): """Sets the user_approved_mdm of this ComputerGeneral. :param user_approved_mdm: The user_approved_mdm of this ComputerGeneral. # noqa: E501 :type user_approved_mdm: bool """ self._user_approved_mdm = user_approved_mdm @property def extension_attributes(self): """Gets the extension_attributes of this ComputerGeneral. # noqa: E501 :return: The extension_attributes of this ComputerGeneral. # noqa: E501 :rtype: list[ComputerExtensionAttribute] """ return self._extension_attributes @extension_attributes.setter def extension_attributes(self, extension_attributes): """Sets the extension_attributes of this ComputerGeneral. :param extension_attributes: The extension_attributes of this ComputerGeneral. # noqa: E501 :type extension_attributes: list[ComputerExtensionAttribute] """ self._extension_attributes = extension_attributes def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ComputerGeneral): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, ComputerGeneral): return True return self.to_dict() != other.to_dict()
<reponame>zbmain/PGL # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys sys.path.append("../") import json import glob import copy import time import tqdm import argparse import numpy as np import pickle as pkl from collections import OrderedDict, namedtuple from ogb.lsc import PCQM4MDataset, PCQM4MEvaluator # from ogb.utils import smiles2graph from rdkit.Chem import AllChem import paddle import paddle.nn as nn import paddle.nn.functional as F import pgl from pgl.utils.data.dataset import Dataset, StreamDataset, HadoopDataset from pgl.utils.data import Dataloader from pgl.utils.logger import log from utils.config import prepare_config, make_dir def load_vocab(vocab_file, freq=0): vocab = {"": 0, "CUT": 1} with open(vocab_file, 'r') as f: for line in f: fields = line.rstrip("\r\n").split('\t') if int(fields[1]) > freq: vocab[fields[0]] = len(vocab) else: vocab[fields[0]] = vocab["CUT"] return vocab def getmorganfingerprint(mol): return list(AllChem.GetMorganFingerprintAsBitVect(mol, 2)) def getmaccsfingerprint(mol): fp = AllChem.GetMACCSKeysFingerprint(mol) return [int(b) for b in fp.ToBitString()] class Subset(Dataset): r""" Subset of a dataset at specified indices. Arguments: dataset (Dataset): The whole Dataset indices (sequence): Indices in the whole set selected for subset """ def __init__(self, dataset, indices, mode='train'): self.dataset = dataset if paddle.distributed.get_world_size() == 1 or mode != "train": self.indices = indices else: self.indices = indices[int(paddle.distributed.get_rank())::int( paddle.distributed.get_world_size())] self.mode = mode def __getitem__(self, idx): return self.dataset[self.indices[idx]] def __len__(self): return len(self.indices) class MolDataset(Dataset): def __init__(self, config, mode="train"): log.info("dataset_type is %s" % self.__class__.__name__) self.config = config self.mode = mode self.transform = config.transform self.raw_dataset = PCQM4MDataset(config.base_data_path, only_smiles=True) self.graph_list = None if not config.debug and self.config.preprocess_file is not None: log.info("preprocess graph data in %s" % self.__class__.__name__) processed_path = os.path.join(self.config.base_data_path, "pgl_processed") if not os.path.exists(processed_path): os.makedirs(processed_path) data_file = os.path.join(processed_path, self.config.preprocess_file) if os.path.exists(data_file): log.info("loading graph data from pkl file") self.graph_list = pkl.load(open(data_file, "rb")) else: log.info("loading graph data from smiles data using %s transform" \ % self.transform) self.graph_list = [] for i in tqdm.tqdm(range(len(self.raw_dataset))): # num_nodes, edge_index, node_feat, edge_feat, label smiles, label = self.raw_dataset[i] g = getattr(self, self.transform)(smiles, label) self.graph_list.append(g) pkl.dump(self.graph_list, open(data_file, 'wb')) else: processed_path = os.path.join(self.config.base_data_path, "pgl_processed") vocab_file = os.path.join(processed_path, "junc_vocab.txt") self.vocab = load_vocab(vocab_file) def get_idx_split(self): if self.config.debug: split_idx = {'train': [i for i in range(800)], 'valid': [i + 800 for i in range(100)], 'test': [i + 800 for i in range(100)]} return split_idx else: return self.raw_dataset.get_idx_split() def __getitem__(self, idx): return self.graph_list[idx] def __len__(self): return len(self.raw_dataset) class ExMolDataset(Dataset): def __init__(self, config, mode='train', transform=None): self.config = config self.mode = mode self.transform = transform self.raw_dataset = PCQM4MDataset( config.base_data_path, only_smiles=True) log.info("preprocess graph data in %s" % self.__class__.__name__) graph_path = os.path.join(self.config.preprocess_file, "mmap_graph") label_file = os.path.join(self.config.preprocess_file, "label.npy") self.graph = pgl.Graph.load(graph_path) self.label = np.load(label_file) def get_idx_split(self): if self.config.debug: split_idx = {'train': [i for i in range(800)], 'valid': [i + 800 for i in range(100)], 'test': [i + 900 for i in range(100)]} return split_idx else: return self.raw_dataset.get_idx_split() def get_cross_idx_split(self): if self.config.debug: split_idx = {'cross_train_1': [i for i in range(800)], 'cross_train_2': [i for i in range(800)], 'cross_valid_1': [i + 800 for i in range(100)], 'cross_valid_2': [i + 800 for i in range(100)], 'valid_left_1percent': [i + 800 for i in range(100)], 'test': [i + 900 for i in range(100)]} return split_idx else: cross_split_idx_file = os.path.join(self.config.base_data_path, "cross_split.pkl") split_idx = pkl.load(open(cross_split_idx_file, 'rb')) return split_idx def __getitem__(self, idx): num_nodes = self.graph._graph_node_index[idx + 1] - self.graph._graph_node_index[idx] node_shift = self.graph._graph_node_index[idx] edges = self.graph.edges[self.graph._graph_edge_index[idx]:self.graph._graph_edge_index[idx + 1]] edges = edges - node_shift edge_feat = {} for key, value in self.graph.edge_feat.items(): edge_feat[key] = value[self.graph._graph_edge_index[idx]:self.graph._graph_edge_index[idx + 1]] node_feat = {} for key, value in self.graph.node_feat.items(): node_feat[key] = value[self.graph._graph_node_index[idx]:self.graph._graph_node_index[idx + 1]] smiles, label = self.raw_dataset[idx] return (pgl.Graph(num_nodes=num_nodes, edges=edges, node_feat=node_feat, edge_feat=edge_feat), self.label[idx], smiles) def __len__(self): return self.graph.num_graph class AuxDataset(Dataset): def __init__(self, config, mode='train', transform=None): self.config = config self.mode = mode self.transform = transform self.raw_dataset = PCQM4MDataset( config.base_data_path, only_smiles=True) log.info("preprocess graph data in %s" % self.__class__.__name__) graph_path = os.path.join(self.config.preprocess_file, "mmap_graph") label_file = os.path.join(self.config.preprocess_file, "label.npy") self.graph = pgl.Graph.load(graph_path) self.pretrain_info_list = pkl.load(open(config.pretrian_path,"rb")) print(f"len of pretrain data: {len(self.pretrain_info_list)}") self.label = np.load(label_file) def get_idx_split(self): return self.raw_dataset.get_idx_split() def get_cross_idx_split(self): cross_split_idx_file = os.path.join(self.config.base_data_path, "cross_split.pkl") split_idx = pkl.load(open(cross_split_idx_file, 'rb')) return split_idx def __getitem__(self, idx): num_nodes = self.graph._graph_node_index[idx + 1] - self.graph._graph_node_index[idx] node_shift = self.graph._graph_node_index[idx] edges = self.graph.edges[self.graph._graph_edge_index[idx]:self.graph._graph_edge_index[idx + 1]] edges = edges - node_shift edge_feat = {} for key, value in self.graph.edge_feat.items(): edge_feat[key] = value[self.graph._graph_edge_index[idx]:self.graph._graph_edge_index[idx + 1]] node_feat = {} for key, value in self.graph.node_feat.items(): node_feat[key] = value[self.graph._graph_node_index[idx]:self.graph._graph_node_index[idx + 1]] #pretrain information pretrain_info = {} cid = self.pretrain_info_list[idx]["context_id"] edge_index = self.pretrain_info_list[idx]["edge_index"] tid = self.pretrain_info_list[idx]["twohop_context"] if num_nodes!=len(tid): print(f"idx {idx} num_nodes is : {num_nodes} and len of tid is : {len(tid)}, they are not equal") exit(0) bond_angle_index = self.pretrain_info_list[idx]["bond_angle_index"] bond_angle = self.pretrain_info_list[idx]["bond_angle"] dft_success = self.pretrain_info_list[idx]["dft_success"] bond_angle_mask = np.array(self.pretrain_info_list[idx]["bond_angle"] * 0 + dft_success, dtype=bool) edge_attr_float = np.array(self.pretrain_info_list[idx]["edge_feat_float"]) edge_attr_float_mask = np.array(self.pretrain_info_list[idx]["edge_feat_float"].reshape(-1) * 0 + dft_success, dtype=bool) pretrain_info["edge_index"] = np.array(edge_index) pretrain_info["tid"] = np.array(tid, dtype=int) pretrain_info["bond_angle_index"] = bond_angle_index pretrain_info["bond_angle"] = bond_angle pretrain_info["bond_angle_mask"] = bond_angle_mask pretrain_info["edge_attr_float" ] = edge_attr_float pretrain_info["edge_attr_float_mask"] = edge_attr_float_mask smiles, label = self.raw_dataset[idx] return (pgl.Graph(num_nodes=num_nodes, edges=edges, node_feat=node_feat, edge_feat=edge_feat), self.label[idx], smiles, pretrain_info) def __len__(self): return self.graph.num_graph class CollateFn(object): def __init__(self, config): self.config = config def __call__(self, batch_data): fn = getattr(self, self.config.collate_type) return fn(batch_data) def new_graph_collatefn(self, batch_data): # for graph_data_additional_features_0424.pkl # with graph_transform in mol_features_extract.py graph_list = [] labels = [] for gdata in batch_data: efeat = np.delete(gdata['edge_feat'], -1, axis=1) # remove 3d dist g = pgl.Graph(edges=gdata['edge_index'].T, num_nodes=gdata['num_nodes'], node_feat={'feat': gdata['node_feat']}, edge_feat={'feat': efeat}) graph_list.append(g) labels.append(gdata['label']) labels = np.array(labels, dtype="float32") g = pgl.Graph.batch(graph_list) return {'graph': g}, labels def graph_collatefn(self, batch_data): graph_list = [] labels = [] for gdata in batch_data: g = pgl.Graph(edges=gdata['edge_index'].T, num_nodes=gdata['num_nodes'], node_feat={'feat': gdata['node_feat']}, edge_feat={'feat': gdata['edge_feat']}) graph_list.append(g) labels.append(gdata['label']) labels = np.array(labels, dtype="float32") g = pgl.Graph.batch(graph_list) return {'graph': g}, labels def quality_graph_collatefn(self, batch_data): graph_list = [] labels = [] for gdata in batch_data: g = pgl.Graph(edges=gdata['mol_graph']['edge_index'].T, num_nodes=gdata['mol_graph']['num_nodes'], node_feat={'feat': gdata['mol_graph']['node_feat']}, edge_feat={'feat': gdata['mol_graph']['edge_feat']}) graph_list.append(g) labels.append(gdata['label']) labels = np.array(labels, dtype="float32") g = pgl.Graph.batch(graph_list) return {'graph': g}, labels def junc_collatefn(self, batch_data): graph_list = [] labels = [] junc_graph_list = [] mol2junc_list = [] g_offset = 0 junc_g_offset = 0 for gdata in batch_data: g = pgl.Graph(edges=gdata['mol_graph']['edge_index'].T, num_nodes=gdata['mol_graph']['num_nodes'], node_feat={'feat': gdata['mol_graph']['node_feat']}, edge_feat={'feat': gdata['mol_graph']['edge_feat']}) num_nodes = gdata['junction_tree']['num_nodes'] if num_nodes > 0: nfeat = np.array(gdata['junction_tree']['junc_dict'], dtype="int64").reshape(-1, 1) junc_g = pgl.Graph(edges=gdata['junction_tree']['edge_index'].T, num_nodes=num_nodes, node_feat={'feat': nfeat}) offset = np.array([g_offset, junc_g_offset], dtype="int64") mol2junc = gdata['mol2juct'] + offset junc_g_offset += junc_g.num_nodes junc_graph_list.append(junc_g) mol2junc_list.append(mol2junc) graph_list.append(g) labels.append(gdata['label']) g_offset += g.num_nodes mol2junc = np.concatenate(mol2junc_list, axis=0) labels = np.array(labels, dtype="float32") g = pgl.Graph.batch(graph_list) junc_g = pgl.Graph.batch(junc_graph_list) return {'graph': g, 'junc_graph': junc_g, 'mol2junc': mol2junc}, labels def coord3_junc_collatefn(self, batch_data): graph_list = [] labels = [] junc_graph_list = [] mol2junc_list = [] g_offset = 0 junc_g_offset = 0 for gdata in batch_data: g = pgl.Graph(edges=gdata['mol_graph']['edge_index'].T, num_nodes=gdata['mol_graph']['num_nodes'], node_feat={'feat': gdata['mol_graph']['node_feat'], '3d': gdata['mol_coord']}, edge_feat={'feat': gdata['mol_graph']['edge_feat']}) num_nodes = gdata['junction_tree']['num_nodes'] if num_nodes > 0: nfeat = np.array(gdata['junction_tree']['junc_dict'], dtype="int64").reshape(-1, 1) junc_g = pgl.Graph(edges=gdata['junction_tree']['edge_index'].T, num_nodes=num_nodes, node_feat={'feat': nfeat}) offset = np.array([g_offset, junc_g_offset], dtype="int64") mol2junc = gdata['mol2juct'] + offset junc_g_offset += junc_g.num_nodes junc_graph_list.append(junc_g) mol2junc_list.append(mol2junc) graph_list.append(g) labels.append(gdata['label']) g_offset += g.num_nodes mol2junc = np.concatenate(mol2junc_list, axis=0) labels = np.array(labels, dtype="float32") g = pgl.Graph.batch(graph_list) junc_g = pgl.Graph.batch(junc_graph_list) return {'graph': g, 'junc_graph': junc_g, 'mol2junc': mol2junc}, labels def fp_collatefn(self, batch_data): graph_list = [] labels = [] mgf_list = [] maccs_list = [] for gdata in batch_data: g = pgl.Graph(edges=gdata['edge_index'].T, num_nodes=gdata['num_nodes'], node_feat={'feat': gdata['node_feat']}, edge_feat={'feat': gdata['edge_feat']}) graph_list.append(g) labels.append(gdata['label']) mgf_list.append(gdata['mgf']) maccs_list.append(gdata['maccs']) labels = np.array(labels, dtype="float32") g = pgl.Graph.batch(graph_list) mgf_feat = np.array(mgf_list, dtype="float32") maccs_feat = np.array(maccs_list, dtype="float32") others = {} others['mgf'] = mgf_feat others['maccs'] = maccs_feat return {'graph': g, 'mgf': mgf_feat, 'maccs': maccs_feat}, labels def ex_collatefn(self, batch_data): graph_list = [] # full_graph_list = [] labels = [] smiles_list = [] #for gdata in batch_data: for g, l, s in batch_data: graph_list.append(g) # full_g = pgl.Graph(num_nodes=g.num_nodes, edges=make_full(g.num_nodes)) # full_graph_list.append(full_g) labels.append(l) smiles_list.append(s) labels = np.array(labels, dtype="float32") g = pgl.Graph.batch(graph_list) # full_g = pgl.Graph.batch(full_graph_list) #full_g = None others = {'smiles': smiles_list} return {'graph': g}, labels, others def aux_collatefn(self, batch_data): graph_list = [] # full_graph_list = [] labels = [] smiles_list = [] pretrain_info_list = [] tid_list = [] edge_index_list = [] bond_angle_list = [] bond_angle_index_list = [] bond_angle_mask_list = [] edge_attr_float_list = [] edge_attr_float_mask_list = [] #for gdata in batch_data: total_node_num = 0 for g, l, s , pretrain_info in batch_data: graph_list.append(g) # full_g = pgl.Graph(num_nodes=g.num_nodes, edges=make_full(g.num_nodes)) # full_graph_list.append(full_g) labels.append(l) smiles_list.append(s) tid_list.append(pretrain_info["tid"].reshape(-1,1)) edge_index_list.append(pretrain_info["edge_index"]) bond_angle_list.append(pretrain_info["bond_angle"].reshape(-1,1)) bond_angle_index_list.append(pretrain_info["bond_angle_index"] + total_node_num) bond_angle_mask_list.append(pretrain_info["bond_angle_mask"].reshape(-1,1)) edge_attr_float_list.append(pretrain_info["edge_attr_float"].reshape(-1,1)) edge_attr_float_mask_list.append(pretrain_info["edge_attr_float_mask"].reshape(-1,1)) total_node_num += g.num_nodes tid_list = np.concatenate(tid_list) edge_index_list = np.concatenate(edge_index_list, axis=1) bond_angle_list = np.concatenate(bond_angle_list).astype('float32') bond_angle_index_list = np.concatenate(bond_angle_index_list, axis=1) bond_angle_mask_list = np.concatenate(bond_angle_mask_list) edge_attr_float_list = np.concatenate(edge_attr_float_list).astype('float32') edge_attr_float_mask_list = np.concatenate(edge_attr_float_mask_list) labels = np.array(labels, dtype="float32") g = pgl.Graph.batch(graph_list) others = {'smiles': smiles_list} return {'graph': g, "tid": tid_list, "edge_index":edge_index_list, "bond_angle":bond_angle_list, "bond_angle_mask": bond_angle_mask_list, "bond_angle_index": bond_angle_index_list, "edge_attr_float":edge_attr_float_list, "edge_attr_float_mask":edge_attr_float_mask_list}, labels, others def test_dataset(config): print("loading dataset") ds = MolDataset(config) split_idx = ds.get_idx_split() train_ds = Subset(ds, split_idx['train'], mode='train') valid_ds = Subset(ds, split_idx['valid'], mode='valid') test_ds = Subset(ds, split_idx['test'], mode='test') print("Train exapmles: %s" % len(train_ds)) print("Valid exapmles: %s" % len(valid_ds)) print("Test exapmles: %s" % len(test_ds)) for i in range(len(train_ds)): gdata = train_ds[i] print("nfeat: ", np.sum(gdata['node_feat'])) print("edges: ", np.sum(gdata['edge_index'])) print("label: ", gdata['label']) if i == 10: break print("valid data") for i in range(len(valid_ds)): gdata = valid_ds[i] print("nfeat: ", np.sum(gdata['node_feat'])) print("edges: ", np.sum(gdata['edge_index'])) print("label: ", gdata['label']) if i == 10: break if __name__=="__main__": config = prepare_config("./config.yaml", isCreate=False, isSave=False) test_dataset(config)
<gh_stars>10-100 from rtruffle.source_section import SourceSection from som.compiler.method_generation_context import MethodGenerationContextBase from som.interpreter.ast.nodes.field_node import create_write_node, create_read_node from som.interpreter.ast.nodes.global_read_node import create_global_node from som.interpreter.ast.nodes.return_non_local_node import CatchNonLocalReturnNode from som.vmobjects.primitive import empty_primitive from som.vmobjects.method_ast import AstMethod class MethodGenerationContext(MethodGenerationContextBase): def __init__(self, universe, holder, outer): MethodGenerationContextBase.__init__(self, universe, holder, outer) self._embedded_block_methods = [] def add_embedded_block_method(self, block_method): self._embedded_block_methods.append(block_method) def assemble(self, method_body): if self._primitive: return empty_primitive(self.signature.get_embedded_string()) if self.needs_to_catch_non_local_returns: method_body = CatchNonLocalReturnNode( method_body, method_body.source_section ) trivial_method = method_body.create_trivial_method(self.signature) if trivial_method is not None: return trivial_method arg_inner_access, size_frame, size_inner = self.prepare_frame() return AstMethod( self.signature, method_body, arg_inner_access, size_frame, size_inner, # copy list to make it immutable for RPython self._embedded_block_methods[:], self._get_source_section_for_method(method_body), self.lexical_scope, ) def _get_source_section_for_method(self, expr): src_body = expr.source_section assert isinstance(src_body, SourceSection) src_method = SourceSection( identifier="%s>>#%s" % ( self.holder.name.get_embedded_string(), self.signature.get_embedded_string(), ), source_section=src_body, ) return src_method def get_outer_self_context_level(self): level = 0 ctx = self.outer_genc while ctx is not None: ctx = ctx.outer_genc level += 1 return level def get_context_level(self, var_name): if var_name in self._locals or var_name in self._arguments: return 0 assert self.outer_genc is not None return 1 + self.outer_genc.get_context_level(var_name) def get_variable(self, var_name): if var_name in self._locals: return self._locals[var_name] if var_name in self._arguments: return self._arguments[var_name] if self.outer_genc: outer_var = self.outer_genc.get_variable(var_name) if outer_var: self._accesses_variables_of_outer_context = True return outer_var return None def get_local(self, var_name): if var_name in self._locals: return self._locals[var_name] if self.outer_genc: outer_local = self.outer_genc.get_local(var_name) if outer_local: self._accesses_variables_of_outer_context = True return outer_local return None def get_self_read(self): return self.get_variable("self").get_read_node(self.get_context_level("self")) def get_object_field_read(self, field_name): if not self.has_field(field_name): return None return create_read_node(self.get_self_read(), self.get_field_index(field_name)) def get_global_read(self, var_name): return create_global_node(var_name, self.universe, self, None) def get_object_field_write(self, field_name, exp): if not self.has_field(field_name): return None return create_write_node( self.get_self_read(), exp, self.get_field_index(field_name) )
<gh_stars>0 # # HyperParemeters container class # Copyright EAVISE # import logging import importlib.util from collections import Iterable import torch __all__ = ['HyperParameters'] log = logging.getLogger(__name__) class HyperParameters: """ This class is a container for training hyperparameters. It allows to save the state of a training and reload it at a later stage. Args: **kwargs (dict, optional): Keywords arguments that will be set as attributes of the instance and serialized as well Attributes: self.batch: Number of batches processed; Gets initialized to **0** self.epoch: Number of epochs processed; Gets initialized to **0** self.*: All arguments passed to the initialization function can be accessed as attributes of this object Note: If you pass a ``kwarg`` that starts with an **_**, the parameter class will store it as a regular property without the leading **_**, but it will not serialize this variable. This allows you to store all parameters in this object, regardless of whether you want to serialize it. This also works when assigning new values after the object creation: >>> param = ln.engine.HyperParameters() >>> param._dummy = 666 >>> print(param.dummy) 666 """ __init_done = False def __init__(self, **kwargs): self.batch = 0 self.epoch = 0 self.__no_serialize = [] for key in kwargs: if key.startswith('_'): serialize = False val = kwargs[key] key = key[1:] else: serialize = True val = kwargs[key] if not hasattr(self, key): setattr(self, key, val) if not serialize: self.__no_serialize.append(key) else: log.error(f'{key} attribute already exists as a HyperParameter and will not be overwritten.') self.__init_done = True def __setattr__(self, item, value): """ Store extra variables in this container class. This custom function allows to store objects after creation and mark whether are not you want to serialize them, by prefixing them with an underscore. """ if item in self.__dict__ or not self.__init_done: super().__setattr__(item, value) elif item[0] == '_': if item[1:] in self.__dict__: raise AttributeError(f'{item} already stored in this object! Use {item[1:]} to access and modify it.') self.__no_serialize.append(item[1:]) super().__setattr__(item[1:], value) else: super().__setattr__(item, value) def __repr__(self): """ Print all values stored in the object. Objects that will not be serialized are marked with an asterisk. """ s = f'{self.__class__.__name__}(' for k in sorted(self.__dict__.keys()): if k.startswith('_HyperParameters__'): continue val = self.__dict__[k] valrepr = str(val) if '\n' in valrepr: valrepr = val.__class__.__name__ if k in self.__no_serialize: k += '*' s += f'\n {k} = {valrepr}' return s + '\n)' @classmethod def from_file(cls, path, variable='params', **kwargs): """ Create a HyperParameter object from a dictionary in an external configuration file. This function will import a file by its path and extract a variable to use as HyperParameters. Args: path (str or path-like object): Path to the configuration python file variable (str, optional): Variable to extract from the configuration file; Default **'params'** **kwargs (dict, optional): Extra parameters that are passed to the extracted variable if it is a callable object Note: The extracted variable can be one of the following: - :class:`lightnet.engine.HyperParameters`: This object will simply be returned - ``dictionary``: The dictionary will be expanded as the parameters for initializing a new :class:`~lightnet.engine.HyperParameters` object - ``callable``: The object will be called with the optional kwargs and should return either a :class:`~lightnet.engine.HyperParameters` object or a ``dictionary`` """ try: spec = importlib.util.spec_from_file_location('lightnet.cfg', path) cfg = importlib.util.module_from_spec(spec) spec.loader.exec_module(cfg) except AttributeError as err: raise ImportError(f'Failed to import the file [{path}]. Are you sure it is a valid python file?') from err try: params = getattr(cfg, variable) except AttributeError as err: raise AttributeError(f'Configuration variable [{variable}] not found in file [{path}]') from err if callable(params): params = params(**kwargs) if isinstance(params, cls): return params elif isinstance(params, dict): return cls(**params) else: raise TypeError(f'Unkown type for configuration variable {variable} [{type(params).__name__}]. This variable should be a dictionary or lightnet.engine.HyperParameters object.') def save(self, filename): """ Serialize all the hyperparameters to a pickle file. |br| The network, optimizers and schedulers objects are serialized using their ``state_dict()`` functions. Args: filename (str or path): File to store the hyperparameters Note: This function will first check if the existing attributes have a `state_dict()` function, in which case it will use this function to get the values needed to save. """ state = {} for k, v in vars(self).items(): if k not in self.__no_serialize: if hasattr(v, 'state_dict'): state[k] = v.state_dict() else: state[k] = v torch.save(state, filename) def load(self, filename, strict=True): """ Load the hyperparameters from a serialized pickle file. Note: This function will first check if the existing attributes have a `load_state_dict()` function, in which case it will use this function with the saved state to restore the values. |br| The `load_state_dict()` function will first be called with both the serialized value and the `strict` argument as a keyword argument. If that fails because of a TypeError, it is called with only the serialized value. This means that you will still get an error if the strict rule is not being followed, but functions that have a `load_state_dict()` function without `strict` argument can be loaded as well. """ log.info(f'Loading state from file [{filename}]') state = torch.load(filename, 'cpu') for k, v in state.items(): if hasattr(self, k): current = getattr(self, k) if hasattr(current, 'load_state_dict'): try: current.load_state_dict(v, strict=strict) except TypeError: current.load_state_dict(v) else: setattr(self, k, v) else: setattr(self, k, v) def to(self, device): """ Cast the parameters from the network, optimizers and schedulers to a given device. |br| This function will go through all the class attributes and check if they have a `to()` function, which it will call with the device. Args: device (torch.device or string): Device to cast parameters Note: PyTorch optimizers and the ReduceLROnPlateau classes do not have a `to()` function implemented. |br| For these objects, this function will go through all their necessary attributes and cast the tensors to the right device. """ for key, value in self.__dict__.items(): if hasattr(value, 'to') and callable(value.to): value.to(device) elif isinstance(value, torch.optim.Optimizer): for param in value.state.values(): if isinstance(param, torch.Tensor): param.data = param.data.to(device) if param._grad is not None: param._grad.data = param._grad.data.to(device) elif isinstance(param, dict): for subparam in param.values(): if isinstance(subparam, torch.Tensor): subparam.data = subparam.data.to(device) if subparam._grad is not None: subparam._grad.data = subparam._grad.data.to(device) elif isinstance(value, (torch.optim.lr_scheduler._LRScheduler, torch.optim.lr_scheduler.ReduceLROnPlateau)): for param in value.__dict__.values(): if isinstance(param, torch.Tensor): param.data = param.data.to(device) if param._grad is not None: param._grad.data = param._grad.data.to(device)
<gh_stars>1-10 # -------- BEGIN LICENSE BLOCK -------- # Copyright 2022 FZI Forschungszentrum Informatik # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # * Neither the name of the {copyright_holder} nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # -------- END LICENSE BLOCK -------- from ros_bt_py_msgs.msg import Node as NodeMsg from ros_bt_py_msgs.msg import UtilityBounds from ros_bt_py.exceptions import NodeConfigError from ros_bt_py.node import Leaf, define_bt_node from ros_bt_py.node_config import NodeConfig, OptionRef @define_bt_node(NodeConfig( version='0.9.0', options={'output_type': type, 'state_values': list, 'output_values': list}, inputs={}, outputs={'out': OptionRef('output_type'), 'current_index': int, 'tick_count': int, 'untick_count': int, 'reset_count': int, 'shutdown_count': int}, max_children=0)) class MockLeaf(Leaf): def __init__(self, options=None, debug_manager=None, name=None): super(Leaf, self).__init__(options, debug_manager, name) self.setup_called = False self.tick_count = 0 self.untick_count = 0 self.reset_count = 0 self.shutdown_count = 0 def _do_setup(self): self.setup_called = True self.outputs['current_index'] = 0 self.outputs['tick_count'] = self.tick_count self.outputs['untick_count'] = self.untick_count self.outputs['reset_count'] = self.reset_count self.outputs['shutdown_count'] = self.shutdown_count if len(self.options['state_values']) != len(self.options['output_values']): raise NodeConfigError('state_values and output_values must have the same length!') for value in self.options['output_values']: try: self.outputs['out'] = value except TypeError: raise NodeConfigError('Provided output value "%s (%s)" is not compatible with ' 'output type %s' % (str(value), type(value).__name__, self.options['output_type'].__name__)) self.outputs['out'] = None def _do_tick(self): self.outputs['out'] = self.options['output_values'][self.outputs['current_index']] new_state = self.options['state_values'][self.outputs['current_index']] # Increment index (and roll over if necessary self.outputs['current_index'] = ((self.outputs['current_index'] + 1) % len(self.options['state_values'])) self.tick_count += 1 self.outputs['tick_count'] = self.tick_count return new_state def _do_untick(self): # We leave current_index untouched, so paused is the most semantically # correct state self.untick_count += 1 self.outputs['untick_count'] = self.untick_count return NodeMsg.PAUSED def _do_reset(self): self.outputs['current_index'] = 0 self.reset_count += 1 self.outputs['reset_count'] = self.reset_count return NodeMsg.IDLE def _do_shutdown(self): self.outputs['current_index'] = 0 self.shutdown_count += 1 self.outputs['shutdown_count'] = self.shutdown_count def _do_calculate_utility(self): return UtilityBounds(can_execute=True) @define_bt_node(NodeConfig( version='0.9.0', options={'can_execute': bool, 'utility_lower_bound_success': float, 'utility_upper_bound_success': float, 'utility_lower_bound_failure': float, 'utility_upper_bound_failure': float}, inputs={}, outputs={'calculate_utility_count': int}, max_children=0)) class MockUtilityLeaf(Leaf): def _do_calculate_utility(self): if self.outputs['calculate_utility_count']: self.outputs['calculate_utility_count'] += 1 else: self.outputs['calculate_utility_count'] = 1 return UtilityBounds( can_execute=self.options['can_execute'], has_lower_bound_success=True, lower_bound_success=self.options['utility_lower_bound_success'], has_upper_bound_success=True, upper_bound_success=self.options['utility_upper_bound_success'], has_lower_bound_failure=True, lower_bound_failure=self.options['utility_lower_bound_failure'], has_upper_bound_failure=True, upper_bound_failure=self.options['utility_upper_bound_failure']) def _do_shutdown(self): self.shutdown_count += 1 def _do_setup(self): self.setup_called = True self.tick_count = 0 self.untick_count = 0 self.reset_count = 0 self.shutdown_count = 0 def _do_tick(self): self.tick_count += 1 return NodeMsg.SUCCEEDED def _do_untick(self): self.untick_count += 1 return NodeMsg.IDLE def _do_reset(self): self.reset_count += 1 return NodeMsg.IDLE
import time from typing import List from narwhallet.core.kcl.transaction.input import MTransactionInput from narwhallet.core.kcl.transaction.output import MTransactionOutput class MTransaction(): def __init__(self): self._txid: str = None self._hash: str = None self._version: int = None self._size: int = None self._vsize: int = None self._locktime: int = None self._blockhash: str = None self._vin: List[MTransactionInput] = [] self._vout: List[MTransactionOutput] = [] self._hex: str = None self._confirmations: int = None self._time: int = None self._blocktime: int = None @property def txid(self) -> str: return self._txid @property def hash(self) -> str: return self._hash @property def version(self) -> int: return self._version @property def size(self) -> int: return self._size @property def vsize(self) -> int: return self._vsize @property def locktime(self) -> int: return self._locktime @property def blockhash(self) -> str: return self._blockhash @property def confirmations(self) -> int: return self._confirmations @property def time(self) -> int: return self._time @property def blocktime(self) -> int: return self._blocktime @property def vin(self) -> List[MTransactionInput]: return self._vin @property def vout(self) -> List[MTransactionOutput]: return self._vout @property def hex(self) -> str: return self._hex def set_txid(self, txid: str) -> None: self._txid = txid def set_hash(self, thash: str) -> None: self._hash = thash def set_version(self, version) -> None: self._version = version def set_size(self, size) -> None: self._size = size def set_vsize(self, vsize) -> None: self._vsize = vsize def set_locktime(self, locktime) -> None: self._locktime = locktime def set_blockhash(self, blockhash: str) -> None: self._blockhash = blockhash def set_confirmations(self, confirmations) -> None: self._confirmations = confirmations def set_time(self, time) -> None: self._time = time def set_blocktime(self, blocktime) -> None: self._blocktime = blocktime def set_vin(self, vin: List[MTransactionInput]) -> None: self._vin = vin def set_vout(self, vout: List[MTransactionOutput]) -> None: self._vout = vout def add_vin(self, vin: MTransactionInput) -> None: self._vin.append(vin) def add_vout(self, vout: MTransactionOutput) -> None: self._vout.append(vout) def set_hex(self, thex: str) -> None: self._hex = thex @staticmethod def to_dict_list(v: list) -> List[dict]: _l = [] for i in v: if isinstance(i, (MTransactionInput, MTransactionOutput)): _l.append(i.to_dict()) return _l def from_sql(self, tx, vin, vout): if len(tx) > 0: _tx = tx[0] self.set_txid(_tx[0]) self.set_hash(_tx[1]) self.set_version(_tx[2]) self.set_size(_tx[3]) self.set_vsize(_tx[4]) self.set_locktime(_tx[5]) self.set_blockhash(_tx[8]) self.set_confirmations(_tx[9]) self.set_time(_tx[10]) self.set_blocktime(_tx[11]) self.set_hex(_tx[12]) for i in vin: _in = MTransactionInput() _in.from_sql(i) self.add_vin(_in) for i in vout: _out = MTransactionOutput() _out.from_sql(i) self.add_vout(_out) def from_json(self, json: dict): self.set_txid(json['txid']) if 'hash' in json: self.set_hash(json['hash']) if 'version' in json: self.set_version(json['version']) if 'size' in json: self.set_size(json['size']) if 'vsize' in json: self.set_vsize(json['vsize']) if 'locktime' in json: self.set_locktime(json['locktime']) for i in json['vin']: _in = MTransactionInput() _in.from_json(i) self.add_vin(_in) for i in json['vout']: _out = MTransactionOutput() _out.from_json(i) self.add_vout(_out) if 'hex' in json: self.set_hex(json['hex']) if 'blockhash' in json: self.set_blockhash(json['blockhash']) if 'confirmations' in json: self.set_confirmations(json['confirmations']) if 'time' in json: self.set_time(json['time']) else: self.set_time(time.time()) if 'blocktime' in json: self.set_blocktime(json['blocktime']) def to_dict(self) -> dict: return {'hash': self.hash, 'blockhash': self.blockhash, 'vin': self.to_dict_list(self.vin), 'vout': self.to_dict_list(self.vout), 'txid': self.txid, 'hex': self.hex, 'version': self.version, 'size': self.size, 'vsize': self.vsize, 'locktime': self.locktime, 'confirmations': self.confirmations, 'time': self.time, 'blocktime': self.blocktime}
<reponame>mphoward/relentless<gh_stars>0 """ Math functions ============== This module implements some convenience objects for mathematical operations. .. autosummary:: :nosignatures: Interpolator KeyedArray .. autoclass:: Interpolator :members: .. autoclass:: KeyedArray :members: """ import numpy import scipy.interpolate from .collections import FixedKeyDict class Interpolator: r"""Interpolating function. Interpolates through a function :math:`y(x)` on the domain :math:`a \le x \le b` using Akima splines. Outside this domain, `y` is extrapolated as a constant, i.e., :math:`y(x < a) = y(a)` and :math:`y(x > b) = y(b)`\. Parameters ---------- x : array_like 1D array of x coordinates that must be continually increasing. y : array_like 1D array of y coordinates. Raises ------ ValueError If ``x`` is a scalar. ValueError If ``x`` is not 1-dimensional. ValueError If ``y`` is not the same shape as ``x``. ValueError If ``x`` is not strictly increasing. Examples -------- Interpolating the line :math:`y=2x`:: f = Interpolator(x=(-1,0,1), y=(-2,0,2)) Evaluating the function:: >>> f(0.5) 1.0 >>> f([-0.5,0.5]) (-1.0, 1.0) Evaluate the :math:`n`\th derivative of the function:: >>> f.derivative(x=0.5, n=1) 2.0 >>> f.derivative(x=[-2.5,-0.5,0.5,2.5], n=1) (0.0, 2.0, 2.0, 0.0) Extrapolation:: >>> f(100) 2.0 """ def __init__(self, x, y): x = numpy.atleast_1d(x) y = numpy.atleast_1d(y) if x.shape[0] == 1: raise ValueError('x cannot be a scalar') if x.ndim > 1: raise ValueError('x must be 1-dimensional') if x.shape != y.shape: raise ValueError('x and y must be the same shape') if not numpy.all(x[1:] > x[:-1]): raise ValueError('x must be strictly increasing') self._domain = (x[0],x[-1]) if x.shape[0] > 2: self._spline = scipy.interpolate.Akima1DInterpolator(x=x, y=y) else: self._spline = scipy.interpolate.InterpolatedUnivariateSpline(x=x, y=y, k=1) def __call__(self, x): r"""Evaluate the interpolating function. Parameters ---------- x : float or array_like 1-d array of :math:`x` coordinates to evaluate. Returns ------- result : float or numpy.ndarray Interpolated values having the same form as ``x``. """ scalar_x = numpy.isscalar(x) x = numpy.atleast_1d(x) result = numpy.zeros(len(x)) # clamp lo lo = x < self.domain[0] result[lo] = self._spline(self.domain[0]) # clamp hi hi = x > self.domain[1] result[hi] = self._spline(self.domain[1]) # evaluate in between flags = numpy.logical_and(~lo,~hi) result[flags] = self._spline(x[flags]) if scalar_x: result = result.item() return result def derivative(self, x, n): r"""Evaluate the :math:`n`\th derivative of the interpolating function. Parameters ---------- x : float or array_like 1-d array of :math:`x` coordinates to evaluate. n : int The order of the derivative to take. Returns ------- result : float or numpy.ndarray Interpolated derivative values having the same form as ``x``. Raises ------ ValueError If ``n`` is not a positive integer. """ if not isinstance(n, int) and n <= 0: raise ValueError('n must be a positive integer') scalar_x = numpy.isscalar(x) x = numpy.atleast_1d(x) result = numpy.zeros(len(x)) # clamp lo lo = x < self.domain[0] result[lo] = 0 # clamp hi hi = x > self.domain[1] result[hi] = 0 # evaluate in between flags = numpy.logical_and(~lo,~hi) result[flags] = self._spline.derivative(n)(x[flags]) if scalar_x: result = result.item() return result @property def domain(self): """tuple: The valid domain for interpolation.""" return self._domain class KeyedArray(FixedKeyDict): """Numerical array with fixed keys. Can be used to perform arithmetic operations between two arrays (element-wise) or between an array and a scalar, as well as vector algebraic operations (norm, dot product). Parameters ---------- keys : array_like List of keys to be fixed. default : scalar Initial value to fill in the dictionary, defaults to ``None``. Examples -------- Create a keyed array:: k1 = KeyedArray(keys=('A','B')) k2 = KeyedArray(keys=('A','B')) Set values through update:: k1.update({'A':2.0, 'B':3.0}) k2.update({'A':3.0, 'B':4.0}) Perform array-array arithmetic operations:: >>> print(k1 + k2) {'A':5.0, 'B':7.0} >>> print(k1 - k2) {'A':-1.0, 'B':-1.0} >>> print(k1*k2) {'A':6.0, 'B':12.0} >>> print(k1/k2) {'A':0.6666666666666666, 'B':0.75} >>> print(k1**k2) {'A':8.0, 'B':81.0} Perform array-scalar arithmetic operations:: >>> print(k1 + 3) {'A':5.0, 'B':6.0} >>> print(3 - k1) {'A':1.0, 'B':0.0} >>> print(3*k1) {'A':6.0, 'B':9.0} >>> print(k1/10) {'A':0.2, 'B':0.3} >>> print(k1**2) {'A':4.0, 'B':9.0} >>> print(-k1) {'A':-2.0, 'B':-3.0} Compute vector dot product:: >>> print(k1.dot(k2)) 18.0 Compute vector norm:: >>> print(k2.norm()) 5.0 """ def __init__(self, keys, default=None): super().__init__(keys, default) def _assert_same_keys(self, val): if self.keys() != val.keys(): raise KeyError('Both KeyedArrays must have identical keys to perform mathematical operations.') def __add__(self, val): """Element-wise addition of two arrays, or of an array and a scalar.""" k = KeyedArray(keys=self.keys()) if isinstance(val, KeyedArray): self._assert_same_keys(val) k.update({x: self[x] + val[x] for x in self}) elif numpy.isscalar(val): k.update({x: self[x] + val for x in self}) else: raise TypeError('A KeyedArray can only add a scalar or a KeyedArray.') return k def __radd__(self, val): """Element-wise addition of a scalar and an array.""" k = KeyedArray(keys=self.keys()) if numpy.isscalar(val): k.update({x: val + self[x] for x in self}) else: raise TypeError('A KeyedArray can only add a scalar or a KeyedArray.') return k def __iadd__(self, val): """In-place element-wise addition of two arrays, or of an array or scalar.""" if isinstance(val, KeyedArray): self._assert_same_keys(val) for x in self: self[x] += val[x] elif numpy.isscalar(val): for x in self: self[x] += val else: raise TypeError('A KeyedArray can only add a scalar or a KeyedArray.') return self def __sub__(self, val): """Element-wise subtraction of two arrays, or of an array and a scalar.""" k = KeyedArray(keys=self.keys()) if isinstance(val, KeyedArray): self._assert_same_keys(val) k.update({x: self[x] - val[x] for x in self}) elif numpy.isscalar(val): k.update({x: self[x] - val for x in self}) else: raise TypeError('A KeyedArray can only subtract a scalar or a KeyedArray.') return k def __rsub__(self, val): """Element-wise subtraction of a scalar and an array.""" k = KeyedArray(keys=self.keys()) if numpy.isscalar(val): k.update({x: val - self[x] for x in self}) else: raise TypeError('A KeyedArray can only subtract a scalar or a KeyedArray.') return k def __isub__(self, val): """In-place element-wise subtraction of two arrays, or of an array and a scalar.""" if isinstance(val, KeyedArray): self._assert_same_keys(val) for x in self: self[x] -= val[x] elif numpy.isscalar(val): for x in self: self[x] -= val else: raise TypeError('A KeyedArray can only subtract a scalar or a KeyedArray.') return self def __mul__(self, val): """Element-wise multiplication of two arrays, or of an array and a scalar.""" k = KeyedArray(keys=self.keys()) if isinstance(val, KeyedArray): self._assert_same_keys(val) k.update({x: self[x]*val[x] for x in self}) elif numpy.isscalar(val): k.update({x: self[x]*val for x in self}) else: raise TypeError('A KeyedArray can only multiply a scalar or a KeyedArray.') return k def __rmul__(self, val): """Element-wise multiplication of a scalar by an array.""" k = KeyedArray(keys=self.keys()) if numpy.isscalar(val): k.update({x: val*self[x] for x in self}) else: raise TypeError('A KeyedArray can only multiply a scalar or a KeyedArray.') return k def __imul__(self, val): """In-place element-wise multiplication of two arrays, or of an array by a scalar.""" if isinstance(val, KeyedArray): self._assert_same_keys(val) for x in self: self[x] *= val[x] elif numpy.isscalar(val): for x in self: self[x] *= val else: raise TypeError('A KeyedArray can only multiply a scalar or a KeyedArray.') return self def __truediv__(self, val): """Element-wise division of two arrays, or of an array by a scalar.""" k = KeyedArray(keys=self.keys()) if isinstance(val, KeyedArray): self._assert_same_keys(val) k.update({x: self[x]/val[x] for x in self}) elif numpy.isscalar(val): k.update({x: self[x]/val for x in self}) else: raise TypeError('A KeyedArray can only divide a scalar or a KeyedArray.') return k def __rtruediv__(self, val): """Element-wise division of a scalar by an array.""" k = KeyedArray(keys=self.keys()) if numpy.isscalar(val): k.update({x: val/self[x] for x in self}) else: raise TypeError('A KeyedArray can only divide a scalar or a KeyedArray.') return k def __itruediv__(self, val): """In-place element-wise division of two arrays, or of an array by a scalar.""" if isinstance(val, KeyedArray): self._assert_same_keys(val) for x in self: self[x] /= val[x] elif numpy.isscalar(val): for x in self: self[x] /= val else: raise TypeError('A KeyedArray can only divide a scalar or a KeyedArray.') return self def __pow__(self, val): """Element-wise exponentiation of an array by a scalar or by an array.""" k = KeyedArray(keys=self.keys()) if isinstance(val, KeyedArray): self._assert_same_keys(val) k.update({x: self[x]**val[x] for x in self}) elif numpy.isscalar(val): k.update({x: self[x]**val for x in self}) else: raise TypeError('A KeyedArray can only be exponentiated by a scalar or by a KeyedArray.') return k def __neg__(self): """Element-wise negation of an array.""" k = KeyedArray(keys=self.keys()) k.update({x: -self[x] for x in self}) return k def norm(self): r"""Vector :math:`\ell^2`-norm. For a vector :math:`\mathbf{x}=\left[x_1,\ldots,x_n\right]`, the Euclidean 2-norm :math:`\lVert\mathbf{x}\rVert` is computed as: .. math:: \lVert\mathbf{x}\rVert = \sqrt{\sum_{k=1}^{n} {x_k}^2} Returns ------- float The vector norm. """ return numpy.linalg.norm(list(self.values())) def dot(self, val): r"""Vector dot product. For two vectors :math:`\mathbf{x}=\left[x_1,\ldots,x_n\right]` and :math:`\mathbf{y}=\left[y_1,\ldots,y_n\right]`, the vector dot product :math:`\mathbf{x}\cdot\mathbf{y}` is computed as: .. math:: \mathbf{x}\cdot\mathbf{y} = \sum_{k=1}^{n} {x_k y_k} Parameters ---------- val : :class:`KeyedArray` One of the arrays used to compute the dot product. Returns ------- floats The vector dot product. """ self._assert_same_keys(val) return numpy.sum([self[x]*val[x] for x in self])
import os, sys, re, types, copy, warnings, inspect, logging, glob, gzip from collections import OrderedDict as odict import collections # Python 2/3 Compatibility try: import ConfigParser as configparser except: import configparser import numpy import pandas import sqlalchemy import sqlalchemy.exc as exc import sqlalchemy.engine # for rpy2 for data frames try: import rpy2 from rpy2.robjects import r as R except ImportError: R = None from SphinxReport import Utils class SQLError( Exception ): pass ########################################################################### ########################################################################### ########################################################################### def prettyFloat( val, format = "%5.2f" ): """output a float or "na" if not defined""" try: x = format % val except (ValueError, TypeError): x = "na" return x ########################################################################### ########################################################################### ########################################################################### def prettyPercent( numerator, denominator, format = "%5.2f" ): """output a percent value or "na" if not defined""" try: x = format % (100.0 * numerator / denominator ) except (ValueError, ZeroDivisionError): x = "na" return x ########################################################################### ########################################################################### ########################################################################### def getCallerLocals( level = 3, decorators = 0): '''returns locals of caller using frame. optional pass number of decorators from http://pylab.blogspot.com/2009/02/python-accessing-caller-locals-from.html ''' f = sys._getframe(level+decorators) args = inspect.getargvalues(f) return args[3] def quoteField( s ): '''returns a quoted version of s for inclusion in SQL statements.''' # replace internal "'" with "\'" return re.sub( "'", "''", s) ########################################################################### ########################################################################### ########################################################################### @Utils.memoized def getTableNames( db ): '''return a set of table names.''' inspector = sqlalchemy.engine.reflection.Inspector.from_engine(db) return set(inspector.get_table_names()) ########################################################################### ########################################################################### ########################################################################### def getTableColumns( db, tablename ): '''return a list of columns for table *tablename*.''' inspector = sqlalchemy.engine.reflection.Inspector.from_engine(db) with warnings.catch_warnings(): warnings.simplefilter("ignore") vals = inspector.get_columns(tablename) return vals ########################################################################### ########################################################################### ########################################################################### class Tracker(object): """ Base class for trackers. User trackers should be derived from this class. A tracker provides the data for a given :term:`track` through its __call__ method. Optionally, the data can be sliced, with a :term:`slice` containing a subset of the data. For example, tracks could be entities like cars, motorcycles, and bikes, and slices could be colours like blue, red, green:: class LengthTracker( Tracker ): mData = ... def __call__( self, track, slice ): return [x.length for x in mData if x.type == track and x.color == slice] The call:: tracker = LengthTracker() tracker("car", "blue") would return the lengths of blue cars. """ mMinData = 1 # set to False, if results of tracker should be cached cache = True # default: empty tracks/slices # tracks = [] # slices = [] # paths = [] def __init__(self, *args, **kwargs): pass # def getTracks( self ): # """return a list of all tracks that this tracker provides.""" # return self.tracks # def getSlices( self ): # """return a list of all slices that this tracker provides. # """ # return self.slices # def getPaths( self ): # """return all paths this tracker provides. # """ # return self.paths def getShortCaption( self ): """return one line caption. The default is to return the first non-blank line of the __doc__ string. """ try: for line in self.__doc__.split("\n"): if line.strip(): return line.strip() except AttributeError: return "" return "" def __call__(self, track, slice = None): """return a data structure for track :param: track and slice :slice:""" raise NotImplementedError("Tracker not fully implemented -> __call__ missing") def members( self, locals = None ): '''function similar to locals() but returning member variables of this tracker. Convenience function for string substitution. If *locals* is given (and a dictionary), the dictionary is added to the returned dictionary. Entries in *local* take precedence before member variables. Typical usage is:: print "my string with %(vars)s" % (self.members(locals())). returns a dictionary ''' # skip tracks and slices to avoid recursion # todo: do this for the general case # 1. subtract property attributes, or # 2. subtract members of Tracker() l = dict( [(attr,getattr(self,attr)) for attr in dir(self) \ if not isinstance(attr, collections.Callable) and not attr.startswith("__") and attr != "tracks" and attr != "slices"] ) if locals: return dict( l, **locals) else: return l ########################################################################### ########################################################################### ########################################################################### class TrackerSingleFile( Tracker ): '''base class for tracker obtaining data from a single file. Tracks and slices are defined by the file contents. ''' def __init__(self, *args, **kwargs ): Tracker.__init__(self, *args, **kwargs ) if "filename" not in kwargs: raise ValueError( "TrackerSingleFile requires a :filename: parameter" ) self.filename = kwargs['filename'].strip() ####################################################### ####################################################### ####################################################### class TrackerMultipleFiles( Tracker ): '''base class for trackers obtaining data from a multilpe files. Tracks are names derived from filenames via a regular expression. This tracker accepts the following parameters: :glob: glob expression describing the files to include. :regex: regular expression for extracting a track label from a filename. If not given, the complete filename path is used. ''' def getTracks(self, subset = None ): files = glob.glob( self.glob ) self.mapTrack2File = {} for f in files: try: track = self.regex.search( f ).groups()[0] except AttributeError: raise ValueError( "filename %s does not match regular expression" % f ) self.mapTrack2File[track] = f return self.mapTrack2File.keys() def __init__(self, *args, **kwargs ): Tracker.__init__(self, *args, **kwargs ) if "glob" not in kwargs: raise ValueError( "TrackerMultipleFiles requires a :glob: parameter" ) self.glob = kwargs['glob'].strip() self.regex = kwargs.get( 'regex', '(.*)' ) if '(' not in self.regex: raise ValueError( "regular expression requires exactly one group enclosed in ()") self.regex = re.compile( self.regex) def openFile( self, track ): '''open a file.''' filename = self.mapTrack2File[track] if filename.endswith( ".gz" ): infile = gzip.open( filename, "r" ) else: infile = open( filename, "r" ) return infile ####################################################### ####################################################### ####################################################### class TrackerTSV( TrackerSingleFile ): """Base class for trackers that fetch data from an CSV file. Each track is a column in the file. """ def __init__(self, *args, **kwargs ): TrackerSingleFile.__init__(self, *args, **kwargs ) self._data = None self._tracks = None def getTracks(self, subset = None ): if self.filename.endswith( ".gz" ): inf = gzip.open( self.filename, "r" ) else: inf = open( self.filename, "r" ) for line in inf: if line.startswith("#"): continue tracks = line[:-1].split( "\t" ) break inf.close() self._tracks = tracks return tracks def readData( self ): if self._data == None: if self.filename.endswith( ".gz" ): inf = gzip.open( self.filename, "r" ) else: inf = open( self.filename, "r" ) data = [ x.split() for x in inf.readlines() if not x.startswith("#")] inf.close() self.data = dict( list(zip( data[0], list(zip( *data[1:] )) )) ) def __call__(self, track, **kwargs ): """return a data structure for track :param: track""" self.readData() return self.data[track] class TrackerMatrices( TrackerMultipleFiles ): """Return matrix data from multiple files. """ def __call__(self, track, **kwargs ): """return a data structure for track :param: track""" infile = self.openFile( track ) dtype = numpy.float lines = [ l for l in infile.readlines() if not l.startswith("#") ] infile.close() nrows = len(lines) - 1 col_headers = lines[0][:-1].split("\t")[1:] ncols = len(col_headers) matrix = numpy.zeros( (nrows, ncols), dtype = dtype ) row_headers = [] for row, l in enumerate(lines[1:]): data = l.split("\t") row_headers.append( data[0] ) matrix[row] = numpy.array(data[1:], dtype = dtype) return odict( ( ('matrix', matrix), ('rows', row_headers), ('columns', col_headers) ) ) ####################################################### ####################################################### ####################################################### class TrackerDataframes( TrackerMultipleFiles ): '''return dataframe from files. By default, the dataframe has no row names. If self.index_column is set, the specified column will be used as row names. ''' def __init__(self, *args, **kwargs ): TrackerMultipleFiles.__init__(self, *args, **kwargs ) self.index_column = kwargs.get('index_column', None ) def __call__( self, track, **kwargs ): df = pandas.read_csv( self.openFile(track), sep='\t', header=0, index_col=self.index_column ) return df ########################################################################### ########################################################################### ########################################################################### class TrackerImages( Tracker ): '''Collect image files and arrange them in a gallery. ''' def __init__(self, *args, **kwargs ): Tracker.__init__(self, *args, **kwargs ) if "glob" not in kwargs: raise ValueError( "TrackerImages requires a :glob: parameter" ) self.glob = kwargs["glob"] def getTracks(self, subset = None ): return glob.glob( self.glob ) def __call__(self, track, **kwargs ): """return a data structure for track :param: track and slice :slice:""" return odict( ( ('name', track), ( 'filename', track) ) ) ########################################################################### ########################################################################### ########################################################################### class TrackerSQL( Tracker ): """Base class for trackers that fetch data from an SQL database. The basic tracker identifies tracks as tables that match a certain pattern (:attr:`pattern`) The pattern should contain at least one group. If there are multiple groups, these will be associated as tracks/slices. This tracker connects to the database. Each tracker will establish its own connection for efficient multi-processing. If :attr:`as_tables` is set, the full table names will be returned. The default is to apply :attr:`pattern` and return the result. """ pattern = None as_tables = False def __init__(self, backend = None, attach = [], *args, **kwargs ): Tracker.__init__(self, *args, **kwargs ) # connection within python self.db = None # connection within R self.rdb = None # attach to additional tables self.attach = attach if backend != None: # backend given - use it self.backend = backend else: # not defined previously (by mix-in class) get default if not hasattr( self, "backend" ): self.backend = Utils.PARAMS["report_sql_backend"] # patch for mPattern and mAsTables for backwards-compatibility if hasattr( self, "mPattern"): warnings.warn( "mPattern is deprecated, use pattern instead", DeprecationWarning ) self.pattern = "(.*)%s" % self.mPattern if hasattr( self, "mAsTables" ): warnings.warn( "mAsTables is deprecated, use as_tables instead", DeprecationWarning ) self.as_tables = self.mAsTables def connect( self, creator = None ): """lazy connection function.""" if not self.db: logging.debug( "connecting to %s" % self.backend ) # attach to additional databases (sqlite) if self.attach: if creator is not None: raise NotImplementedError( 'attach not implemented if creator is set') if not self.backend.startswith( 'sqlite' ): raise NotImplementedError( 'attach only implemented for sqlite backend') def _my_creator(): # issuing the ATTACH DATABASE into the sqlalchemy ORM (self.db.execute( ... )) # does not work. The database is attached, but tables are not accessible in later # SELECT statements. import sqlite3 conn = sqlite3.connect(re.sub( "sqlite:///", "", self.backend) ) for filename, name in self.attach: conn.execute( "ATTACH DATABASE '%s' AS %s" % \ (os.path.abspath(filename), name)) return conn creator = _my_creator # creator can not be None. if creator: db = sqlalchemy.create_engine( self.backend, echo = False, creator = creator ) else: db = sqlalchemy.create_engine( self.backend, echo = False ) if not db: raise ValueError( "could not connect to database %s" % self.backend ) db.echo = False # ignore unknown type BigInt warnings # Note that this step can take a while on large databases # with many tables and many columns if db and False: try: with warnings.catch_warnings(): warnings.simplefilter("ignore") self.metadata = sqlalchemy.MetaData(db, reflect = True) except AttributeError: self.metadata = sqlalchemy.MetaData(db, reflect = True) self.db = db logging.debug( "connected to %s" % self.backend ) def rconnect( self, creator = None ): '''open connection within R to database.''' if not self.rdb: R.library( 'RSQLite' ) if creator: self.rdb = creator() else: if self.backend.startswith( 'sqlite' ): self.rdb = R.dbConnect(R.SQLite(), dbname=re.sub( "sqlite:///./", "", self.backend ) ) else: raise NotImplementedError("can not connect to %s in R" % self.backend ) def getTables(self, pattern = None ): """return a list of tables matching a *pattern*. This function does not return table views. returns a list of table objects. """ self.connect() sorted_tables = sorted(getTableNames( self.db )) if pattern: rx = re.compile(pattern) return [ x for x in sorted_tables if rx.search( x ) ] else: return sorted_tables def getTableNames( self, pattern = None ): '''return a list of tablenames matching a *pattern*. ''' return self.getTables( pattern ) def hasTable( self, tablename ): """return table with name *tablename*.""" self.connect() return tablename in getTableNames( self.db ) def getColumns( self, tablename ): '''return a list of columns in table *tablename*.''' self.connect() columns = getTableColumns( self.db, tablename ) return [ re.sub( "%s[.]" % tablename, "", x['name']) for x in columns ] def execute(self, stmt ): self.connect() try: r = self.db.execute(stmt) except exc.SQLAlchemyError as msg: raise SQLError(msg) return r def buildStatement( self, stmt ): '''fill in placeholders in stmt.''' kwargs = self.members( getCallerLocals() ) statement = stmt % dict( list(kwargs.items()) ) return statement def getValue( self, stmt ): """returns a single value from SQL statement *stmt*. The SQL statement is subjected to variable interpolation. This function will return the first value in the first row from a SELECT statement. """ statement = self.buildStatement(stmt) result = self.execute(statement).fetchone() if result == None: raise exc.SQLAlchemyError( "no result from %s" % statement ) return result[0] def getFirstRow( self, stmt ): """return a row of values from SQL statement *stmt* as a list. The SQL statement is subjected to variable interpolation. This function will return the first row from a SELECT statement as a list. Returns None if result is empty. """ e = self.execute(self.buildStatement(stmt)).fetchone() if e: return list(e) else: return None def getRow( self, stmt ): """return a row of values from an SQL statement as dictionary. This function will return the first row from a SELECT statement as a dictionary of column-value mappings. Returns None if result is empty. """ e = self.execute( self.buildStatement( stmt )).fetchone() # assumes that values are sorted in ResultProxy.keys() if e: return odict( [x,e[x]] for x in list(e.keys()) ) else: return None def getValues( self, stmt ): """return values from SQL statement *stmt* as a list. This function will return the first value in each row from an SELECT statement. Returns an empty list if there is no result. """ e = self.execute(self.buildStatement(stmt)).fetchall() if e: return [x[0] for x in e] return [] def getAll( self, stmt ): """return all rows from SQL statement *stmt* as a dictionary. The dictionary contains key/values pairs where keys are the selected columns and the values are the results. Example: SELECT column1, column2 FROM table Example: { 'column1': [1,2,3], 'column2' : [2,4,2] } Returns an empty dictionary if there is no result. """ # convert to tuples e = self.execute(self.buildStatement(stmt)) columns = list(e.keys()) d = e.fetchall() return odict( list(zip( columns, list(zip( *d )) )) ) def get( self, stmt ): """return all results from an SQL statement as list of tuples. Example: SELECT column1, column2 FROM table Result: [(1,2),(2,4),(3,2)] Returns an empty list if there is no result. """ return self.execute(self.buildStatement(stmt)).fetchall() def getDict( self, stmt ): """return results from SQL statement *stmt* as a dictionary. Example: SELECT column1, column2 FROM table Result: { 1: 2, 2: 4, 3: 2} The first column is taken as the dictionary key. This method is useful to return a data structure that can be used for matrix visualization. """ # convert to tuples e = self.execute(self.buildStatement(stmt)) columns = list(e.keys()) result = odict() for row in e: result[row[0]] = odict( list(zip( columns[1:], row[1:] )) ) return result def getIter( self, stmt ): '''returns an iterator over results of SQL statement *stmt*. ''' return self.execute(stmt) def getTracks(self, *args, **kwargs): """return a list of all tracks that this tracker provides. Tracks are defined as tables matching the attribute :attr:`pattern`. """ if self.pattern: rx = re.compile(self.pattern) tables = self.getTables( pattern = self.pattern ) if self.as_tables: return sorted([ x for x in tables ] ) else: return sorted([rx.search(x).groups()[0] for x in tables] ) else: return [ "all" ] # def getDataFrame( self, stmt ): # '''return an R data frame as rpy2 object. # ''' # self.rconnect() # return R.dbGetQuery(self.rdb, self.buildStatement(stmt) ) def getDataFrame( self, stmt ): '''return results of SQL statement as an pandas dataframe. ''' data = self.getAll( stmt ) df = pandas.DataFrame.from_dict( data ) return df def getPaths( self ): """return all paths this tracker provides. Tracks are defined as tables matching the attribute :attr:`pattern`. """ if self.pattern: rx = re.compile(self.pattern) # let getTracks handle a single group if rx.groups < 2: return None tables = self.getTables( pattern = self.pattern ) parts = [rx.search(x).groups() for x in tables] result = [] for x in range(rx.groups): result.append( sorted( set( [ part[x] for part in parts ] ) ) ) return result return None ########################################################################### ########################################################################### ########################################################################### class TrackerSQLCheckTables(TrackerSQL): """Tracker that examines the presence/absence of a certain field in a list of tables. Define the following attributes: :attr:`mFields` fields to join :attr:`mExcludePattern`: tables to exclude :attr:`pattern`: pattern to define tracks (see :class:`TrackerSql`) """ mFields = ["id",] mExcludePattern = None pattern = "_annotations$" mIncludePattern = "^%s_" def __init__(self, *args, **kwargs ): TrackerSQL.__init__(self, *args, **kwargs ) def getSlices(self, subset = None): return self.mFields def getTablesOfInterest( self, track ): if self.mExcludePattern: keep = lambda x: not re.search( self.mExcludePattern, x ) and re.search( self.mIncludePattern % track, x ) else: keep = lambda x: re.search( self.mIncludePattern % track, x ) return [ x for x in self.getTables() if keep(x.name) ] def __call__(self, track, slice = None): """count number of unique occurances of field *slice* in tables matching *track*.""" tables = self.getTablesOfInterest( track ) data = [] for table in tables: if slice not in [x.name for x in table.columns]: continue # remove the table name and strip offensive characters field = re.sub( self.mIncludePattern % track, "", table.name ).strip( "._:@$!?#") data.append( (field, self.getValue( "SELECT COUNT(DISTINCT %s) FROM %s" % (slice, table.name) ) ) ) return odict( data ) ########################################################################### ########################################################################### ########################################################################### class TrackerSQLCheckTable(TrackerSQL): """Tracker that counts existing entries in a table. Define the following attributes: :attr:`mExcludePattern`: columns to exclude :attr:`pattern`: pattern to define tracks (see :class:`TrackerSql`) """ mExcludePattern = None pattern = "(.*)_evol$" def __init__(self, *args, **kwargs ): TrackerSQL.__init__(self, *args, **kwargs ) def __call__(self, track, *args ): """count number of entries in a table.""" statement = "SELECT COUNT( %s) FROM %s WHERE %s IS NOT NULL" if self.mExcludePattern: fskip = lambda x: re.search( self.mExcludePattern, x ) else: fskip = lambda x: False tablename = track + "_evol" columns = self.getColumns( tablename ) data = [] for column in columns: if fskip( column ): continue data.append( (column, self.getValue( statement % (column, tablename, column) ) ) ) return odict( data ) ########################################################################### ########################################################################### ########################################################################### class Config( Tracker ): '''Tracker providing config values of ini files. The .ini files need to be located in the directory from which sphinxreport is called. returns a dictionary of key,value pairs. ''' tracks = glob.glob( "*.ini" ) def __init__(self, *args, **kwargs ): Tracker.__init__(self, *args, **kwargs ) def __call__(self, track, *args ): """count number of entries in a table.""" config = configparser.ConfigParser() config.readfp(open(track),"r") result = odict() def convert( value ): '''convert a value to int, float or str.''' rx_int = re.compile("^\s*[+-]*[0-9]+\s*$") rx_float = re.compile("^\s*[+-]*[0-9.]+[.+\-eE][+-]*[0-9.]*\s*$") if value == None: return value if rx_int.match( value ): return int(value), "int" elif rx_float.match( value ): return float(value), "float" return value, "string" for section in config.sections(): x = odict() for key,value in config.items( section ): x[key] = odict( list(zip( ("value", "type" ), convert( value ))) ) result[section] = x return result ########################################################################### ########################################################################### ########################################################################### class Empty( Tracker ): '''Empty tracker This tracker servers as placeholder for plots that require no input from a tracker. ''' def getTracks( self, subset = None ): """return a list of all tracks that this tracker provides.""" return ["empty"] def __call__(self, *args ): return None ########################################################################### ########################################################################### ########################################################################### class Status( TrackerSQL ): '''Tracker returning status information. Define tracks and slices. Slices will be translated into calls to member functions starting with 'test'. Each test function should return a tuple with the test status and some information. If this tracker is paired with a :class:`Renderer.Status` renderer, the following values of a test status will be translated into icons: ``PASS``, ``FAIL``, ``WARNING``, ``NOT AVAILABLE``. The docstring of the test function is used as description. ''' def getSlices( self, subset = None ): return [ x[4:] for x in dir(self) if x.startswith("test")] def __call__(self, track, slice ): if not hasattr( self, "test%s" % slice ): raise NotImplementedError( "test%s not implement" % slice ) status, value = getattr( self, "test%s" % slice )(track) description = getattr( self, "test%s" % slice ).__doc__ return odict( ( ( 'name' , slice ), ( 'status', status), ( 'info', str(value)), ( 'description', description ) ) ) ########################################################################### ########################################################################### ########################################################################### class SingleTableTrackerRows( TrackerSQL ): '''Tracker representing a table with multiple tracks. Returns a dictionary of values. The tracks are given by rows in table :py:attr:`table`. The tracks are specified by the :py:attr:`fields`. :py:attr:`fields` is a tuple of column names (default = ``(track,)``). If multiple columns are specified, they will all be used to define the tracks in the table. Rows in the table need to be unique for any combination :py:attr:`fields`. attribute:`extra_columns` can be used to add additional columns to the table. This attribute is a dictionary. ''' exclude_columns = () table = None fields = ("track",) extra_columns = {} sort = None loaded = False # not called by default as Mixin class def __init__(self, *args, **kwargs ): TrackerSQL.__init__(self, *args, **kwargs ) def _load(self): '''load data. The data is pre-loaded in order to avoid multiple random access operations on the same table. ''' if not self.loaded: nfields = len(self.fields) if self.sort: sort = "ORDER BY %s" % self.sort else: sort = "" self._tracks = self.get( "SELECT DISTINCT %s FROM %s %s" % \ (",".join(self.fields), self.table, sort )) columns = self.getColumns( self.table ) self._slices = [ x for x in columns if x not in self.exclude_columns and x not in self.fields ] + list(self.extra_columns.keys()) # remove columns with special characters (:, +, -, ) self._slices = [ x for x in self._slices if not re.search( "[:+-]", x)] data = self.get( "SELECT %s, %s FROM %s" % (",".join(self.fields), ",".join(self._slices), self.table)) self.data = odict() for d in data: tr = tuple(d[:nfields]) self.data[tr] = odict( list(zip( self._slices, tuple(d[nfields:]))) ) self.loaded = True @property def tracks( self ): if not self.hasTable( self.table ): return [] if not self.loaded: self._load() if len(self.fields) == 1: return tuple( [x[0] for x in self._tracks ] ) else: return tuple( [tuple(x) for x in self._tracks ] ) @property def slices( self ): if not self.hasTable( self.table ): return [] if not self.loaded: self._load() return self._slices def __call__(self, track, slice = None ): if not self.loaded: self._load() if len(self.fields) == 1: track = (track,) return self.data[track][slice] ########################################################################### ########################################################################### ########################################################################### class SingleTableTrackerColumns( TrackerSQL ): '''Tracker representing a table with multiple tracks. Returns a dictionary of two sets of data, one given by :py:attr:`column` and one for a track. The tracks are derived from all columns in table :py:attr:`table`. By default, all columns are taken as tracks apart from :py:attr:`column` and those listed in :py:attr:`exclude_columns`. An example for a table using this tracker would be:: bin mouse_counts human_counts 100 10 10 200 20 15 300 10 4 In the example above, the tracks will be ``mouse_counts`` and ``human_counts``. The slices will be ``100``, ``200``, ``300`` Tracker could be defined as:: class MyTracker( SingleTableTrackerColumns ): table = 'mytable' column = 'bin' ''' exclude_columns = ("track,") table = None column = None def __init__(self, *args, **kwargs ): TrackerSQL.__init__(self, *args, **kwargs ) @property def tracks(self): if not self.hasTable( self.table ): return [] columns = self.getColumns( self.table ) return [ x for x in columns if x not in self.exclude_columns and x != self.column ] @property def slices(self): if self.column: return self.getValues( "SELECT DISTINCT %(column)s FROM %(table)s" ) else: return [] def __call__(self, track, slice = None ): if slice != None: data = self.getValue( "SELECT %(track)s FROM %(table)s WHERE %(column)s = '%(slice)s'" ) else: data = self.getValues( "SELECT %(track)s FROM %(table)s" ) return data ########################################################################### ########################################################################### ########################################################################### class SingleTableTrackerEdgeList( TrackerSQL ): '''Tracker representing a table with matrix type data. Returns a dictionary of values. The tracks are given by entries in the :py:attr:`row` column in a table :py:attr:`table`. The slices are given by entries in the :py:attr:`column` column in a table. The :py:attr:`value` is a third column specifying the value returned. If :py:attr:`where` is set, it is added to the SQL statement to permit some filtering. If :py:attr:`transform` is set, it is applied to the value. if :py:attr:`value2` is set, the matrix is assumed to be stored in the format ``(row, column, value, value1)``, where ``value`` is the value for ``row,col`` and value2 is the value for ``col,row``. This method is inefficient, particularly so if there are no indices on :py:attr:`row` and :py:attr:`column`. ''' table = None row = None column = None value = None value2 = None transform = None where = "1" def __init__(self, *args, **kwargs ): TrackerSQL.__init__(self, *args, **kwargs ) @property def tracks( self ): if self.table == None: raise NotImplementedError("table not defined" ) if not self.hasTable( self.table ): raise ValueError("unknown table %s" % self.table) if self.value2 != None: return sorted( set( self.getValues( "SELECT DISTINCT %(row)s FROM %(table)s") +\ self.getValues( "SELECT DISTINCT %(column)s FROM %(table)s") ) ) else: return self.getValues( "SELECT DISTINCT %(row)s FROM %(table)s" ) @property def slices( self ): if self.value2 != None: return self.tracks else: return self.getValues( "SELECT DISTINCT %(column)s FROM %(table)s" ) def __call__(self, track, slice = None ): try: val = self.getValue( """SELECT %(value)s FROM %(table)s WHERE %(row)s = '%(track)s' AND %(column)s = '%(slice)s' AND %(where)s""" ) except exc.SQLAlchemyError: val = None if val == None and self.value2: try: val = self.getValue( """SELECT %(value2)s FROM %(table)s WHERE %(row)s = '%(slice)s' AND %(column)s = '%(track)s' AND %(where)s""" ) except exc.SQLAlchemyError: val = None if val == None: return val if self.transform: return self.transform(val) return val ########################################################################### ########################################################################### ########################################################################### class MultipleTableTrackerEdgeList( TrackerSQL ): '''Tracker representing multiple tables with matrix type data. Returns a dictionary of values. The tracks are given by table names mathing :py:attr:`pattern`. ''' row = None column = None value = None as_tables = True def __init__(self, *args, **kwargs ): TrackerSQL.__init__(self, *args, **kwargs ) def __call__(self, track, slice = None ): if self.column == None: raise ValueError('MultipleTrackerEdgeList requires a column field') if self.row == None: raise ValueError('MultipleTrackerEdgeList requires a row field') if self.value == None: raise ValueError('MultipleTrackerEdgeList requires a value field') data = self.get( """SELECT %(row)s, %(column)s, %(value)s FROM %(track)s ORDER BY fdr,power""" ) result = odict() for row, col, value in data: try: result[row][col] = value except KeyError: result[row] = odict() result[row][col] = value return result ########################################################################### ########################################################################### ########################################################################### class SingleTableTrackerHistogram( TrackerSQL ): '''Tracker representing a table with multiple tracks. Returns a dictionary of two sets of data, one given by :py:attr:`column` and one for a track. The tracks are derived from all columns in table :py:attr:`table`. By default, all columns are taken as tracks apart from :py:attr:`column` and those listed in :py:attr:`exclude_columns`. An example for a table using this tracker would be:: bin mouse_counts human_counts 100 10 10 200 20 15 300 10 4 In the example above, the tracks will be ``mouse_counts`` and ``human_counts``. The Tracker could be defined as:: class MyTracker( SingleTableTrackerHistogram ): table = 'mytable' column = 'bin' ''' exclude_columns = ("track,") table = None column = None value = 'data' def __init__(self, *args, **kwargs ): TrackerSQL.__init__(self, *args, **kwargs ) @property def tracks(self): if self.column == None: raise NotImplementedError( "column not set - Tracker not fully implemented" ) if not self.hasTable( self.table ): return [] columns = self.getColumns( self.table ) return [ x for x in columns if x not in self.exclude_columns and x != self.column ] def __call__(self, track, slice = None ): if self.column == None: raise NotImplementedError( "column not set - Tracker not fully implemented" ) # labels need to be consistent in order # so rename track to value. data = self.getAll( "SELECT %(column)s, %(track)s AS %(value)s FROM %(table)s" ) return data class MultipleTableTrackerHistogram( TrackerSQL ): '''Tracker representing multiple table with multiple slices. Returns a dictionary of two sets of data, one given by :py:attr:`column` and one for a track. The tracks are derived from all columns in table :py:attr:`table`. By default, all columns are taken as tracks apart from :py:attr:`column` and those listed in :py:attr:`exclude_columns`. An example for a table using this tracker would be:: bin mouse_counts human_counts 100 10 10 200 20 15 300 10 4 In the example above, the tracks will be ``mouse_counts`` and ``human_counts``. The Tracker could be defined as:: class MyTracker( ManyTableTrackerHistogram ): pattern = '(.*)_table' column = 'bin' ''' exclude_columns = ("track,") as_tables = True column = None def __init__(self, *args, **kwargs ): TrackerSQL.__init__(self, *args, **kwargs ) @property def slices(self): if self.column == None: raise NotImplementedError( "column not set - Tracker not fully implemented" ) columns = set() for table in self.getTableNames( self.pattern ): columns.update( self.getColumns( table ) ) return [ x for x in columns if x not in self.exclude_columns and x != self.column ] def __call__(self, track, slice ): # check if column exists in particular table - if not, return no data if slice not in self.getColumns( track ): return None return self.getAll( """SELECT %(column)s, %(slice)s FROM %(track)s""" ) # def __call__(self, track ): # if self.column == None: raise NotImplementedError( "column not set - Tracker not fully implemented" ) # # get columns in the alphabetical order # columns = sorted( self.getColumns( track ) ) # if self.column not in columns: raise ValueError("column '%s' missing from '%s'" % (self.column, track )) # columns = ",".join( [ x for x in columns if x not in self.exclude_columns and x != self.column ] ) # return self.getAll( """SELECT %(column)s, %(columns)s FROM %(track)s""" ) class TrackerSQLMulti( TrackerSQL ): '''An SQL tracker spanning multiple databases. ''' databases = () tracks = () def __init__(self, *args, **kwargs): TrackerSQL.__init__(self, *args, **kwargs ) if len(self.tracks) == 0: raise ValueError("no tracks specified in TrackerSQLMulti") if (len(self.tracks) != len(self.databases)): raise ValueError("TrackerSQLMulti requires an equal number of tracks (%i) and databases (%i)" \ % (len(self.tracks), len(self.databases))) if not self.backend.startswith("sqlite"): raise ValueError( "TrackerSQLMulti only works for sqlite database" ) if not self.db: def _my_creator(): # issuing the ATTACH DATABASE into the sqlalchemy ORM (self.db.execute( ... )) # does not work. The database is attached, but tables are not accessible in later # SELECT statements. import sqlite3 conn = sqlite3.connect(re.sub( "sqlite:///", "", self.backend) ) for track, name in zip( self.databases, self.tracks ): conn.execute( "ATTACH DATABASE '%s/csvdb' AS %s" % \ (os.path.abspath(track), name)) return conn self.connect( creator = _my_creator ) class TrackerMultipleLists( TrackerSQL ): ''' A class to retrieve multiple columns across one or more tables. Returns a dictionary of lists. TrackerMultipleLists can be used in conjunction with venn and hypergeometric transformers and the venn render. The items in each list are specified by an SQL statement. The statements can be specified in 3 different ways: :attr:`statements` dictionary If the tracker contains a statements attribute then the statments are taken from here as well as the list names e.g.:: class TrackerOverlapTest1( TrackerOverlappingSets ): statements = {"listA": "SELECT gene_id FROM table_a", "listB": "SELECT gene_id FROM table_b"} :attr:`listA`, :attr:`listB`,:attr:`listC` and :attr:`background` attributes If the tracker does not contain a statements dictionary then the statements can be specifed using these attributes. An optional list of labels can be specified for the names of these lists. For example:: class TrackerOverlapTest2( TrackerOverlappingSets ): listA = "SELECT gene_id FROM table_a" listB = "SELECT gene_id FROM table_b" labels = ["FirstList","SecondList"] :meth:`getStatements` method The :meth:`getStatements` method can be overridden to allow full control over where the statements come from. It should return a dictionary of SQL statements. Because TrackerMultipleLists is derived from :class:`TrackerSQL`, tracks and slices can be specified in the usual way. ''' statements = None ListA = None ListB = None ListC = None background = None labels = None def getStatements(self): statements = odict() if self.statements: return self.statements if self.ListA: if self.labels: label = self.labels[0] else: label = "ListA" statements[label] = self.ListA if self.ListB: if self.labels: label = self.labels[1] else: label = "ListB" statements[label] = self.ListB if self.ListC: if self.labels: label = self.labels[2] else: label = "ListC" statements[label] = self.ListC if self.background: statements["background"] = self.background return statements def __call__(self, track, slice = None): statements = self.getStatements() # track and slice will be substituted in the statements return odict( [(x,self.getValues(statements[x])) for x in statements] ) class MeltedTableTracker( TrackerSQL ): '''Tracker representing multiple tables with the same columns. The tables are melted - a column called ``track`` is added that contains the table name. ''' tracks = "all" pattern = None def __init__(self, *args, **kwargs ): TrackerSQL.__init__(self, *args, **kwargs ) def __call__(self, track ): assert( self.pattern != None ) tables = self.getTables( self.pattern ) ref_columns = self.getColumns( tables[0] ) fields = ",".join( ref_columns ) results = [] for table in tables: columns = self.getColumns( table ) if columns != ref_columns: E.warn( "incompatible column names in table %s - skipped" % table ) continue track = re.search( self.pattern, table ).groups()[0] results.extend( self.get( "SELECT '%(track)s' as track, %(fields)s FROM %(table)s" ) ) ref_columns.insert( 0, "track") return odict( zip( ref_columns, zip(*results) )) class MeltedTableTrackerDataframe( MeltedTableTracker ): '''Tracker representing multiple tables with the same columns. The tables are melted - a column called ``track`` is added that contains the table name. This tracker returns a dataframe diretly. ''' tracks = "all" pattern = None def __init__(self, *args, **kwargs ): TrackerSQL.__init__(self, *args, **kwargs ) def __call__(self, track ): data = MeltedTableTracker.__call__(self, track ) return pandas.DataFrame.from_dict( data )
# Copyright (c) 2012 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import worker import lock import logging import logging.config import rpc import optparse import scheduler import warnings import configuration import task import parameter import re import argparse import sys import os from task import Register def setup_interface_logging(conf_file=None): # use a variable in the function object to determine if it has run before if getattr(setup_interface_logging, "has_run", False): return if conf_file is None: logger = logging.getLogger('luigi-interface') logger.setLevel(logging.DEBUG) streamHandler = logging.StreamHandler() streamHandler.setLevel(logging.DEBUG) formatter = logging.Formatter('%(levelname)s: %(message)s') streamHandler.setFormatter(formatter) logger.addHandler(streamHandler) else: logging.config.fileConfig(conf_file, disable_existing_loggers=False) setup_interface_logging.has_run = True def get_config(): warnings.warn('Use luigi.configuration.get_config() instead') return configuration.get_config() class EnvironmentParamsContainer(task.Task): ''' Keeps track of a bunch of environment params. Uses the internal luigi parameter mechanism. The nice thing is that we can instantiate this class and get an object with all the environment variables set. This is arguably a bit of a hack.''' # TODO(erikbern): would be cleaner if we don't have to read config in global scope local_scheduler = parameter.BooleanParameter(is_global=True, default=False, description='Use local scheduling') scheduler_host = parameter.Parameter(is_global=True, default=configuration.get_config().get('core', 'default-scheduler-host', default='localhost'), description='Hostname of machine running remote scheduler') scheduler_port = parameter.IntParameter(is_global=True, default=8082, description='Port of remote scheduler api process') lock = parameter.BooleanParameter(is_global=True, default=False, description='Do not run if the task is already running') lock_pid_dir = parameter.Parameter(is_global=True, default='/var/tmp/luigi', description='Directory to store the pid file') workers = parameter.IntParameter(is_global=True, default=1, description='Maximum number of parallel tasks to run') logging_conf_file = parameter.Parameter(is_global=True, default=None, description='Configuration file for logging') @classmethod def env_params(cls, override_defaults): # Override any global parameter with whatever is in override_defaults for param_name, param_obj in cls.get_global_params(): if param_name in override_defaults: param_obj.set_default(override_defaults[param_name]) return cls() # instantiate an object with the global params set on it def expose(cls): warnings.warn('expose is no longer used, everything is autoexposed', DeprecationWarning) return cls def expose_main(cls): warnings.warn('expose_main is no longer supported, use luigi.run(..., main_task_cls=cls) instead', DeprecationWarning) return cls def reset(): warnings.warn('reset is no longer supported') class WorkerSchedulerFactory(object): def create_local_scheduler(self): return scheduler.CentralPlannerScheduler() def create_remote_scheduler(self, host, port): return rpc.RemoteScheduler(host=host, port=port) def create_worker(self, scheduler, worker_processes): return worker.Worker(scheduler=scheduler, worker_processes=worker_processes) class Interface(object): def parse(self): raise NotImplementedError @staticmethod def run(tasks, worker_scheduler_factory=None, override_defaults={}): if worker_scheduler_factory is None: worker_scheduler_factory = WorkerSchedulerFactory() env_params = EnvironmentParamsContainer.env_params(override_defaults) # search for logging configuration path first on the command line, then # in the application config file logging_conf = env_params.logging_conf_file or \ configuration.get_config().get('core', 'logging_conf_file', None) if logging_conf is not None and not os.path.exists(logging_conf): raise Exception("Error: Unable to locate specified logging configuration file!") if not configuration.get_config().getboolean('core', 'no_configure_logging', False): setup_interface_logging(logging_conf) if env_params.lock and not(lock.acquire_for(env_params.lock_pid_dir)): sys.exit(1) if env_params.local_scheduler: sch = worker_scheduler_factory.create_local_scheduler() else: sch = worker_scheduler_factory.create_remote_scheduler(host=env_params.scheduler_host, port=env_params.scheduler_port) w = worker_scheduler_factory.create_worker(scheduler=sch, worker_processes=env_params.workers) for task in tasks: w.add(task) logger = logging.getLogger('luigi-interface') logger.info('Done scheduling tasks') w.run() w.stop() class ErrorWrappedArgumentParser(argparse.ArgumentParser): ''' Wraps ArgumentParser's error message to suggested similar tasks ''' # Simple unweighted Levenshtein distance def _editdistance(self, a, b): r0 = range(0, len(b) + 1) r1 = [0] * (len(b) + 1) for i in range(0, len(a)): r1[0] = i + 1 for j in range(0, len(b)): c = 0 if a[i] is b[j] else 1 r1[j + 1] = min(r1[j] + 1, r0[j + 1] + 1, r0[j] + c) r0 = r1[:] return r1[len(b)] def error(self, message): result = re.match("argument .+: invalid choice: '(\w+)'.+", message) if result: arg = result.group(1) weightedTasks = [(self._editdistance(arg, task), task) for task in Register.get_reg().keys()] orderedTasks = sorted(weightedTasks, key=lambda pair: pair[0]) candidates = [task for (dist, task) in orderedTasks if dist <= 5 and dist < len(task)] displaystring = "" if candidates: displaystring = "No task %s. Did you mean:\n%s" % (arg, '\n'.join(candidates)) else: displaystring = "No task %s." % arg super(ErrorWrappedArgumentParser, self).error(displaystring) else: super(ErrorWrappedArgumentParser, self).error(message) class ArgParseInterface(Interface): ''' Takes the task as the command, with parameters specific to it ''' def parse(self, cmdline_args=None, main_task_cls=None): parser = ErrorWrappedArgumentParser() def _add_parameter(parser, param_name, param, prefix=None): description = [] if prefix: description.append('%s.%s' % (prefix, param_name)) else: description.append(param_name) if param.description: description.append(param.description) if param.has_default: description.append(" [default: %s]" % (param.default,)) if param.is_list: action = "append" elif param.is_boolean: action = "store_true" else: action = "store" parser.add_argument('--' + param_name.replace('_', '-'), help=' '.join(description), default=None, action=action) def _add_task_parameters(parser, cls): for param_name, param in cls.get_nonglobal_params(): _add_parameter(parser, param_name, param, cls.task_family) def _add_global_parameters(parser): for param_name, param in Register.get_global_params(): _add_parameter(parser, param_name, param) _add_global_parameters(parser) if main_task_cls: _add_task_parameters(parser, main_task_cls) else: orderedtasks = '{%s}' % ','.join(sorted(Register.get_reg().keys())) subparsers = parser.add_subparsers(dest='command', metavar=orderedtasks) for name, cls in Register.get_reg().iteritems(): subparser = subparsers.add_parser(name) if cls == Register.AMBIGUOUS_CLASS: continue _add_task_parameters(subparser, cls) # Add global params here as well so that we can support both: # test.py --global-param xyz Test --n 42 # test.py Test --n 42 --global-param xyz _add_global_parameters(subparser) args = parser.parse_args(args=cmdline_args) params = vars(args) # convert to a str -> str hash if main_task_cls: task_cls = main_task_cls else: task_cls = Register.get_reg()[args.command] if task_cls == Register.AMBIGUOUS_CLASS: raise Exception('%s is ambigiuous' % args.command) # Notice that this is not side effect free because it might set global params task = task_cls.from_input(params, Register.get_global_params()) return [task] class PassThroughOptionParser(optparse.OptionParser): ''' An unknown option pass-through implementation of OptionParser. When unknown arguments are encountered, bundle with largs and try again, until rargs is depleted. sys.exit(status) will still be called if a known argument is passed incorrectly (e.g. missing arguments or bad argument types, etc.) ''' def _process_args(self, largs, rargs, values): while rargs: try: optparse.OptionParser._process_args(self, largs, rargs, values) except (optparse.BadOptionError, optparse.AmbiguousOptionError), e: largs.append(e.opt_str) class OptParseInterface(Interface): ''' Supported for legacy reasons where it's necessary to interact with an existing parser. Takes the task using --task. All parameters to all possible tasks will be defined globally in a big unordered soup. ''' def __init__(self, existing_optparse): self.__existing_optparse = existing_optparse def parse(self, cmdline_args=None, main_task_cls=None): global_params = list(Register.get_global_params()) parser = PassThroughOptionParser() tasks_str = '/'.join(sorted([name for name in Register.get_reg()])) def add_task_option(p): if main_task_cls: p.add_option('--task', help='Task to run (' + tasks_str + ') [default: %default]', default=main_task_cls.task_family) else: p.add_option('--task', help='Task to run (%s)' % tasks_str) def _add_parameter(parser, param_name, param): description = [param_name] if param.description: description.append(param.description) if param.has_default: description.append(" [default: %s]" % (param.default,)) if param.is_list: action = "append" elif param.is_boolean: action = "store_true" else: action = "store" parser.add_option('--' + param_name.replace('_', '-'), help=' '.join(description), default=None, action=action) for param_name, param in global_params: _add_parameter(parser, param_name, param) add_task_option(parser) options, args = parser.parse_args(args=cmdline_args) task_cls_name = options.task if self.__existing_optparse: parser = self.__existing_optparse else: parser = optparse.OptionParser() add_task_option(parser) if task_cls_name not in Register.get_reg(): raise Exception('Error: %s is not a valid tasks (must be %s)' % (task_cls_name, tasks_str)) # Register all parameters as a big mess task_cls = Register.get_reg()[task_cls_name] if task_cls == Register.AMBIGUOUS_CLASS: raise Exception('%s is ambiguous' % task_cls_name) params = task_cls.get_nonglobal_params() for param_name, param in global_params: _add_parameter(parser, param_name, param) for param_name, param in params: _add_parameter(parser, param_name, param) # Parse and run options, args = parser.parse_args(args=cmdline_args) params = {} for k, v in vars(options).iteritems(): if k != 'task': params[k] = v task = task_cls.from_input(params, global_params) return [task] class LuigiConfigParser(configuration.LuigiConfigParser): ''' Deprecated class, use configuration.LuigiConfigParser instead. Left for backwards compatibility ''' pass def run(cmdline_args=None, existing_optparse=None, use_optparse=False, main_task_cls=None, worker_scheduler_factory=None): ''' Run from cmdline. The default parser uses argparse. However for legacy reasons we support optparse that optionally allows for overriding an existing option parser with new args. ''' if use_optparse: interface = OptParseInterface(existing_optparse) else: interface = ArgParseInterface() tasks = interface.parse(cmdline_args, main_task_cls=main_task_cls) interface.run(tasks, worker_scheduler_factory) def build(tasks, worker_scheduler_factory=None, **env_params): ''' Run internally, bypassing the cmdline parsing. Useful if you have some luigi code that you want to run internally. Example luigi.build([MyTask1(), MyTask2()], local_scheduler=True) ''' Interface.run(tasks, worker_scheduler_factory, env_params)
#!/usr/bin/env python3 __author__ = 'tomarovsky' import numpy as np import matplotlib.pyplot as plt plt.ioff() from argparse import ArgumentParser def draw_plot_by_window_stats(input_file, output_prefix, metric, separator="\t", min_x=None, max_x=None, min_y=None, max_y=None, extensions=["png", "svg"], xlabel=None, ylabel=None,title=None, width=6, height=6, markersize=2, ylogbase=10, type="plot", grid=False, close_plot=True): if metric.lower() == "median": data = np.loadtxt(input_file, comments="#", usecols=(1, 2), delimiter=separator) elif metric.lower() == "average": data = np.loadtxt(input_file, comments="#", usecols=(1, 3), delimiter=separator) elif metric.lower() == "max": data = np.loadtxt(input_file, comments="#", usecols=(1, 4), delimiter=separator) elif metric.lower() == "min": data = np.loadtxt(input_file, comments="#", usecols=(1, 5), delimiter=separator) plt.figure(1, figsize=(width, height), dpi=300) plt.subplot(1, 1, 1) if type == "plot": plt.plot(data[:, 0], data[:, 1], markersize=markersize) elif type == "scatter": plt.scatter(data[:, 0], data[:, 1], s=markersize) plt.xlim(xmin=min_x, xmax=max_x) plt.ylim(ymin=min_y, ymax=max_y) if xlabel: plt.xlabel(xlabel) if ylabel: plt.ylabel(ylabel) if title: plt.title(title) if grid or grid == "True": plt.grid() for ext in extensions: plt.savefig(f"{output_prefix}.{type}.{ext}") plt.yscale("log") for ext in extensions: plt.savefig(f"{output_prefix}.{type}.{ylogbase}.{ext}") if close_plot: plt.close() def main(): draw_plot_by_window_stats(args.input_file, args.output_prefix, args.metric, separator=args.separator, extensions=args.extensions, min_x=args.min_x, max_x=args.max_x, min_y=args.min_y, max_y=args.max_y, xlabel=args.xlabel, ylabel=args.ylabel, title=args.title, width=args.width, height=args.height, markersize=args.markersize, ylogbase=args.ylogbase, type=args.type, grid=args.grid, close_plot=args.close_plot) if __name__ == '__main__': parser = ArgumentParser(description="coverage visualization from stats.csv file (scripts/Coverage/coverage_statistics.py)") group_required = parser.add_argument_group('Required options') group_required.add_argument('-i', '--input-file', type=str, help="mosdepth.bed.gz file") group_required.add_argument('-o', '--output-prefix', type=str, help="output prefix") group_additional = parser.add_argument_group('Additional options') group_additional.add_argument('-m', '--metric', type=str, help="metric: 'median', 'average', 'max' or 'min'", default="median") group_additional.add_argument('-s', '--separator', type=str, help="separator", default="\t") group_additional.add_argument('-e', '--extensions', type=lambda s: [str(ext) for ext in s.split(',')], default=["png", "svg"], help="output files extensions") group_additional.add_argument('--min_x', help="min_x value", type=int, default=None) group_additional.add_argument('--max_x', help="max_x value", type=int, default=None) group_additional.add_argument('--min_y', help="min_y value", type=int, default=None) group_additional.add_argument('--max_y', help="max_y value", type=int, default=None) group_additional.add_argument('--xlabel', help="xlabel", default=None) group_additional.add_argument('--ylabel', help="ylabel", default=None) group_additional.add_argument('--title', type=str, help="title", default=None) group_additional.add_argument('--width', type=int, help="xlabel", default=6) group_additional.add_argument('--height', type=int, help="ylabel", default=6) group_additional.add_argument('--markersize', type=int, help="markersize", default=2) group_additional.add_argument('--ylogbase', type=int, help="ylogbase", default=10) group_additional.add_argument('--type', type=str, help="type", default="plot") group_additional.add_argument('--grid', type=bool, help="grid", default=False) group_additional.add_argument('--close_plot', type=bool, help="close plot", default=True) args = parser.parse_args() main()
# Copyright (c) 2015, Narrative Science # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # """A module for pretty-printing tables.""" import os def render_columns(columns, write_borders=True, column_colors=None): """ Renders a list of columns. :param columns: A list of columns, where each column is a list of strings. :type columns: [[``str``]] :param write_borders: Whether to write the top and bottom borders. :type write_borders: ``bool`` :param column_colors: A list of coloring functions, one for each column. Optional. :type column_colors: [``str`` -> ``str``] or ``NoneType`` :return: The rendered columns. :rtype: ``str`` """ if column_colors is not None and len(column_colors) != len(columns): raise ValueError('Wrong number of column colors') widths = [max(len(cell) for cell in column) for column in columns] max_column_length = max(len(column) for column in columns) result = '\n'.join(render_row(i, columns, widths, column_colors) for i in range(max_column_length)) if write_borders: border = '+%s+' % '|'.join('-' * (w + 2) for w in widths) return '%s\n%s\n%s' % (border, result, border) else: return result def render_row(num, columns, widths, column_colors=None): """ Render the `num`th row of each column in `columns`. :param num: Which row to render. :type num: ``int`` :param columns: The list of columns. :type columns: [[``str``]] :param widths: The widths of each column. :type widths: [``int``] :param column_colors: An optional list of coloring functions. :type column_colors: [``str`` -> ``str``] or ``NoneType`` :return: The rendered row. :rtype: ``str`` """ row_str = '|' cell_strs = [] for i, column in enumerate(columns): try: cell = column[num] # We choose the number of spaces before we color the string, so # that the coloring codes don't affect the length. spaces = ' ' * (widths[i] - len(cell)) if column_colors is not None and column_colors[i] is not None: cell = column_colors[i](cell) cell_strs.append(' %s%s ' % (cell, spaces)) except IndexError: # If the index is out of range, just print an empty cell. cell_strs.append(' ' * (widths[i] + 2)) return '|%s|' % '|'.join(cell_strs) def render_table(table, write_borders=True, column_colors=None): """ Renders a table. A table is a list of rows, each of which is a list of arbitrary objects. The `.str` method will be called on each element of the row. Jagged tables are ok; in this case, each row will be expanded to the maximum row length. :param table: A list of rows, as described above. :type table: [[``object``]] :param write_borders: Whether there should be a border on the top and bottom. Defaults to ``True``. :type write_borders: ``bool`` :param column_colors: An optional list of coloring *functions* to be applied to each cell in each column. If provided, the list's length must be equal to the maximum number of columns. ``None`` can be mixed in to this list so that a selection of columns can be colored. :type column_colors: [``str`` -> ``str``] or ``NoneType`` :return: The rendered table. :rtype: ``str`` """ prepare_rows(table) columns = transpose_table(table) return render_columns(columns, write_borders, column_colors) def transpose_table(table): """ Transposes a table, turning rows into columns. :param table: A 2D string grid. :type table: [[``str``]] :return: The same table, with rows and columns flipped. :rtype: [[``str``]] """ if len(table) == 0: return table else: num_columns = len(table[0]) return [[row[i] for row in table] for i in range(num_columns)] def prepare_rows(table): """ Prepare the rows so they're all strings, and all the same length. :param table: A 2D grid of anything. :type table: [[``object``]] :return: A table of strings, where every row is the same length. :rtype: [[``str``]] """ num_columns = max(len(row) for row in table) for row in table: while len(row) < num_columns: row.append('') for i in range(num_columns): row[i] = str(row[i]) if row[i] is not None else '' return table def get_table_width(table): """ Gets the width of the table that would be printed. :rtype: ``int`` """ columns = transpose_table(prepare_rows(table)) widths = [max(len(cell) for cell in column) for column in columns] return len('+' + '|'.join('-' * (w + 2) for w in widths) + '+')
<gh_stars>1-10 # Generated by Django 2.2.3 on 2019-12-27 16:23 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='CallType', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('type', models.CharField(max_length=32)), ], ), migrations.CreateModel( name='Competence', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('competence', models.CharField(max_length=10)), ('description', models.TextField(blank=True)), ], ), migrations.CreateModel( name='DataSource', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('source', models.CharField(max_length=16)), ], ), migrations.CreateModel( name='Field', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('field', models.CharField(max_length=16)), ], ), migrations.CreateModel( name='KodSed', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('kod', models.IntegerField()), ], ), migrations.CreateModel( name='OP', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('op', models.CharField(max_length=16)), ], ), migrations.CreateModel( name='Program', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('program', models.CharField(max_length=16)), ('description', models.TextField(blank=True)), ], ), migrations.CreateModel( name='Indicator', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('kod_nci_07_13', models.CharField(blank=True, max_length=16, null=True, verbose_name='kód NČI 07-13')), ('npr_envi', models.BooleanField(blank=True, null=True, verbose_name='NPR/ENVI')), ('c_s', models.CharField(blank=True, choices=[('c', 'C'), ('s', 'S')], max_length=1, null=True, verbose_name='C/S')), ('fond', models.CharField(blank=True, choices=[('efrr_fs', 'EFRR / FS'), ('enrf', 'ENRF'), ('esf', 'ESF'), ('esf_yei', 'ESF / YEI'), ('eus', 'EUS'), ('ezfrv', 'EZFRV')], max_length=16, null=True, verbose_name='Fond')), ('kod_ek', models.CharField(blank=True, max_length=16, null=True)), ('wf', models.IntegerField(blank=True, null=True, verbose_name='WF')), ('kod_nci_2014', models.CharField(blank=True, max_length=16, null=True, verbose_name='KódNČI2014+')), ('kod_sfc', models.CharField(blank=True, max_length=16, null=True, verbose_name='Kód v SFC')), ('indicator_name_cs', models.TextField(verbose_name='Název indikátoru (CS)')), ('indicator_name_en', models.TextField(verbose_name='Název indikátoru (EN)')), ('unit', models.CharField(max_length=8, verbose_name='Měrná jednotka')), ('type', models.CharField(choices=[('context', 'Kontext'), ('output', 'Výstup'), ('result', 'Výsledek')], max_length=8, verbose_name='Typ')), ('definition', models.TextField(verbose_name='Definice')), ('frequency', models.CharField(max_length=16, verbose_name='Frekvence')), ('resource', models.URLField(blank=True, null=True, verbose_name='Odkaz na zdroj dat')), ('resource_comments', models.CharField(blank=True, max_length=256, null=True, verbose_name='Zdroj metodiky / komentáře')), ('es_esf2014', models.BooleanField(blank=True, null=True, verbose_name='Přenos do IS ESF2014+')), ('projects_number', models.IntegerField(blank=True, null=True, verbose_name='Počet Projektů')), ('ec_comments', models.TextField(blank=True, null=True, verbose_name='Comments EK')), ('data_source', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='dotacni_matice.DataSource', verbose_name='Zdroj dat (Ž/P, ŘO, statistika')), ('field', models.ManyToManyField(blank=True, to='dotacni_matice.Field', verbose_name='Oblast')), ('kod_sed', models.ManyToManyField(blank=True, to='dotacni_matice.KodSed', verbose_name='Kód SED')), ('main_indicator', models.ManyToManyField(blank=True, related_name='indicator_main_indicator', to='dotacni_matice.OP', verbose_name='Hlavní indikátor')), ('op', models.ManyToManyField(blank=True, related_name='indicator_op', to='dotacni_matice.OP', verbose_name='OP')), ('sfc', models.ManyToManyField(blank=True, to='dotacni_matice.OP', verbose_name='SFC')), ], ), migrations.CreateModel( name='DotacniTitul', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('changed', models.DateField(auto_now=True, help_text='Changed')), ('name', models.CharField(max_length=256)), ('area', models.CharField(blank=True, max_length=64, null=True)), ('mip', models.IntegerField(blank=True, choices=[(10, 10), (15, 15), (20, 20), (30, 30), (30, 35), (40, 40), (45, 45), (50, 50), (55, 55), (60, 60), (65, 65), (70, 70), (75, 75), (80, 80), (85, 85), (90, 90), (95, 95), (100, 100), (1, 'Ano'), (-1, 'Není stanoveno'), (-2, 'Dle aktivity'), (-3, 'Nelze určit')], null=True)), ('mp', models.IntegerField(blank=True, choices=[(10, 10), (15, 15), (20, 20), (30, 30), (30, 35), (40, 40), (45, 45), (50, 50), (55, 55), (60, 60), (65, 65), (70, 70), (75, 75), (80, 80), (85, 85), (90, 90), (95, 95), (100, 100), (1, 'Ano'), (-1, 'Není stanoveno'), (-2, 'Dle aktivity'), (-3, 'Nelze určit')], null=True)), ('sp', models.IntegerField(blank=True, choices=[(10, 10), (15, 15), (20, 20), (30, 30), (30, 35), (40, 40), (45, 45), (50, 50), (55, 55), (60, 60), (65, 65), (70, 70), (75, 75), (80, 80), (85, 85), (90, 90), (95, 95), (100, 100), (1, 'Ano'), (-1, 'Není stanoveno'), (-2, 'Dle aktivity'), (-3, 'Nelze určit')], null=True)), ('vp', models.IntegerField(blank=True, choices=[(10, 10), (15, 15), (20, 20), (30, 30), (30, 35), (40, 40), (45, 45), (50, 50), (55, 55), (60, 60), (65, 65), (70, 70), (75, 75), (80, 80), (85, 85), (90, 90), (95, 95), (100, 100), (1, 'Ano'), (-1, 'Není stanoveno'), (-2, 'Dle aktivity'), (-3, 'Nelze určit')], null=True)), ('nno', models.IntegerField(blank=True, choices=[(10, 10), (15, 15), (20, 20), (30, 30), (30, 35), (40, 40), (45, 45), (50, 50), (55, 55), (60, 60), (65, 65), (70, 70), (75, 75), (80, 80), (85, 85), (90, 90), (95, 95), (100, 100), (1, 'Ano'), (-1, 'Není stanoveno'), (-2, 'Dle aktivity'), (-3, 'Nelze určit')], null=True)), ('public', models.IntegerField(blank=True, choices=[(10, 10), (15, 15), (20, 20), (30, 30), (30, 35), (40, 40), (45, 45), (50, 50), (55, 55), (60, 60), (65, 65), (70, 70), (75, 75), (80, 80), (85, 85), (90, 90), (95, 95), (100, 100), (1, 'Ano'), (-1, 'Není stanoveno'), (-2, 'Dle aktivity'), (-3, 'Nelze určit')], null=True)), ('date_call', models.DateField(blank=True, null=True)), ('date_pref_from', models.DateField(blank=True, null=True)), ('date_pref_to', models.DateField(blank=True, null=True)), ('date_full_from', models.DateField(blank=True, null=True)), ('date_full_to', models.DateField(blank=True, null=True)), ('allocated', models.IntegerField(blank=True, help_text='[*10^6 Kč]', null=True)), ('min', models.IntegerField(blank=True, help_text='[*10^6 Kč]', null=True)), ('max', models.IntegerField(blank=True, help_text='[*10^6 Kč]', null=True)), ('form', models.CharField(blank=True, max_length=32, null=True)), ('history', models.CharField(blank=True, max_length=32, null=True)), ('regime', models.CharField(blank=True, max_length=32, null=True)), ('supported_activities', models.TextField(blank=True, null=True)), ('eligible_costs', models.TextField(blank=True, null=True)), ('ineligible_costs', models.TextField(blank=True, null=True)), ('pkn', models.IntegerField(blank=True, null=True)), ('pkv', models.IntegerField(blank=True, null=True)), ('url', models.URLField(blank=True, null=True)), ('comment', models.TextField(blank=True, null=True)), ('note', models.TextField(blank=True, null=True)), ('afc', models.BooleanField(blank=True, null=True)), ('ipo', models.BooleanField(blank=True, null=True)), ('investment', models.BooleanField(blank=True, null=True)), ('noninvestment', models.BooleanField(blank=True, null=True)), ('remuneration', models.BooleanField(blank=True, null=True)), ('personal_costs', models.BooleanField(blank=True, null=True)), ('education', models.BooleanField(blank=True, null=True)), ('consultation', models.BooleanField(blank=True, null=True)), ('research', models.BooleanField(blank=True, null=True)), ('property', models.BooleanField(blank=True, null=True)), ('machines', models.BooleanField(blank=True, null=True)), ('construction', models.BooleanField(blank=True, null=True)), ('administration', models.BooleanField(blank=True, null=True)), ('hw', models.BooleanField(blank=True, null=True)), ('sw', models.BooleanField(blank=True, null=True)), ('lump', models.BooleanField(blank=True, null=True)), ('marketing', models.BooleanField(blank=True, null=True)), ('competence', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='dotacni_matice.Competence')), ('program', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='dotacni_matice.Program')), ('type', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='dotacni_matice.CallType')), ], ), ]
# -*- coding: utf-8 -*- """ equip.analysis.constraint.container ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Constraint container. :copyright: (c) 2014 by <NAME> (@rgaucher) :license: Apache 2, see LICENSE for more details. """ import opcode from ...utils.log import logger from ..graph import Tree, TreeNode from .expr import Expr, Const, Ref, Comparator, Operator, Undef, \ CMP_IMPLICIT_NOT_EMPTY, CMP_TYPE_CHECK, OP_MAP, CMP_MAP from ..python.opcodes import * class Constraint(object): """ Represents a constraint in the bytecode. This is used to represent conditional expressions. We store both the bytecode AST constraint and a final internal representation (can be used to compare constraints or generate SMT clauses). """ def __init__(self): self._ast = Tree() self._cstr = None self._live = None self._root = None @property def root(self): return self._root @root.setter def root(self, value): self._root = value self._ast.root = self._root @property def ast(self): return self._ast @property def live(self): if self._live is None: self._live = set() worklist = [self.tree] while worklist: cur = worklist.pop(0) if isinstance(cur, Ref): self._live.add(cur.data) else: if not cur.terminal: worklist.append(cur.rhs) if cur.binary: worklist.append(cur.lhs) return self._live @property def tree(self): if self._cstr is None: self.__finalize() return self._cstr def has_comparator(self, cmp_kind): worklist = [self.tree] while worklist: cur = worklist.pop(0) logger.debug("Cur := %s", cur) if isinstance(cur, Comparator): if cur.cmp_id == cmp_kind: return True worklist.append(cur.lhs) if cur.rhs is not None: worklist.append(cur.rhs) elif isinstance(cur, Operator): worklist.append(cur.lhs) if cur.rhs is not None: worklist.append(cur.rhs) return False def __ne__(self, obj): return not self == obj def __eq__(self, obj): return isinstance(obj, Constraint) and self.tree == obj.tree def __finalize(self): root = self.root self._cstr = None if root.data[0] != COMPARE_OP: self._cstr = Comparator.fromKind(CMP_IMPLICIT_NOT_EMPTY) else: self._cstr = Comparator.fromOpcode(*root.data) self._cstr.data = root.data def process_children(cstr_node, ast_node): if ast_node.has_children() and cstr_node.terminal: return if not cstr_node.terminal: if cstr_node.kind == Expr.COMPARATOR and cstr_node.cmp_id in (CMP_IMPLICIT_NOT_EMPTY, CMP_TYPE_CHECK): if cstr_node.cmp_id == CMP_IMPLICIT_NOT_EMPTY: cstr_node.rhs = convert_ast_constraint(ast_node) process_children(cstr_node.rhs, ast_node) else: lhs_child = ast_node.first_child cstr_node.lhs = convert_ast_constraint(lhs_child) process_children(cstr_node.lhs, lhs_child) if ast_node.num_children() > 1: rhs_child = ast_node.child(1) cstr_node.rhs = convert_ast_constraint(rhs_child) process_children(cstr_node.rhs, rhs_child) else: expected_children_num = 2 if cstr_node.binary else 1 children = ast_node.children num_children = len([c for c in children if c is not None]) if num_children != expected_children_num: logger.debug("Consistency error. expected %d, got %d children for %s", expected_children_num, num_children, ast_node) cstr_node.rhs = Undef(data=None) if expected_children_num == 2: cstr_node.lhs = Undef(data=None) elif expected_children_num == 1: child = ast_node.first_child cstr_node.rhs = convert_ast_constraint(child) process_children(cstr_node.rhs, child) else: lhs_child = ast_node.first_child rhs_child = ast_node.last_child cstr_node.lhs = convert_ast_constraint(lhs_child) process_children(cstr_node.lhs, lhs_child) cstr_node.rhs = convert_ast_constraint(rhs_child) process_children(cstr_node.rhs, rhs_child) logger.debug("Current tree := %s", self._ast.to_dot()) process_children(self._cstr, root) def __repr__(self): return repr(self._cstr) def convert_ast_constraint(ast_node): """ Returns a new ``Expr`` node. :param ast_node: The current AST node in the conditional. """ op, arg = ast_node.data if op in OP_MAP: # We got an operator return Operator.fromOpcode(op, arg) elif op == COMPARE_OP and arg in CMP_MAP: return Comparator.fromOpcode(op, arg) elif op in LOAD_OPCODES: if op == LOAD_CONST: return Const.fromValue(arg) else: if arg in ('True', 'False', 'None', 'str', 'int', 'bool', 'chr', 'float', 'tuple'): return Const.fromValue(arg, is_symbol=True) return Ref.fromName(arg) elif op in CALL_OPCODES: if is_type_check(ast_node): return Comparator.fromKind(CMP_TYPE_CHECK) elif is_type_cast(ast_node): return Operator.fromTypeMethod(ast_call_node.first_child.data[1]) else: return Undef(data=ast_node.data) else: logger.debug("Not converted node: op=%s, arg=%s", opcode.opname[op], repr(arg)) return Undef(data=ast_node.data) def is_type_check(ast_call_node): method_name = ast_call_node.first_child.data[1] return method_name in ('isinstance', 'type') def is_type_cast(ast_call_node): method_name = ast_call_node.first_child.data[1] return method_name in ('str', 'int', 'bool', 'chr', 'float', 'tuple')
import time USBI2C_error_messages = { b'a': "NACK received", b'A': "Invalid address", b'L': "Invalid length", b'C': "Unknown command", b'U': "Unknown escape sequence", b'T': "Timer expired" } class AdapterResponseException(Exception): def __init__(self, char): self.char = char def __str__(self): if self.char in USBI2C_error_messages: return USBI2C_error_messages[self.char] return "Unknown response: %s" % self.char class USBI2C: def __init__(self, uart): self.waitInterval = 0.002 self.wait = 1.5 self.uart = uart def _send(self, data): buf = b'' for byte in data: if byte == 27: buf += b'\x5c\xb1' elif byte == 92: buf += b'\x5c\xc5' else: buf += bytes([byte]) self.uart.write(buf) def _recv(self, len): buf = b'' escaped = 0 while len: byte = self.uart.read(1) if byte == b'': raise Exception("Read timeout") if byte == b'\x5c': escaped = 1 else: len = len - 1 if escaped: if byte == b'\xb1': byte = b'\x1b' elif byte == b'\xc5': byte = b'\x5c' escaped = 0 buf += byte return buf def Reset(self): ''' Reset the adapter into defined (reset) state. ''' self.uart.flushInput() self.uart.flushOutput() self.uart.write(b'\x1b') if not self.StatusOK(): raise Exception("Adapter not ready") def Timing(self, speed): ''' Set the I2C bus frequency and resets into defined state. ''' if speed == 10: timing = b'\xc7\xc3\x42\xb0' elif speed == 100: timing = b'\x13\x0f\x42\xb0' elif speed == 400: timing = b'\x09\x03\x33\x50' elif speed == 1000: timing = b'\x03\x01\x11\x50' else: raise Exception("Unsuppored timing") self.uart.write(b't') self.uart.write(timing) self.uart.write(b'i') def Serial(self): ''' Retrieve 96-bit serial number from the adapter. ''' self.uart.write(b'n') return self._recv(24).decode() def Address(self, address): ''' Set address of target I2C device. ''' if isinstance(address, int): if address < 0 or address > 128: raise Exception('Address out of range') address = bytes([address]) elif isinstance(address, str) and len(address) > 0: address = address[0].encode('ascii') elif not isinstance(address, bytes): raise Exception('Address is not an integer') self.uart.write(b'A') self._send(address) def Write(self, data): ''' Write data into adapter transmit buffer. ''' if isinstance(data, int): data = [data] l = len(data) if l < 1 or l > 64: raise Exception('Data length out of range') self.uart.write(b'W') self._send(bytes([l])) self.uart.write(b'w') self._send(data) def ReadLength(self, l): ''' Set the number of expected bytes from target I2C device. ''' if l < 1 or l > 64: raise Exception('Data length out of range') self.uart.write(b'R') self._send(bytes([l])) def Read(self, l): ''' Read the data from adapter receive buffer. ''' self.uart.write(b'r') r = self._recv(l) if (len(r) != l): raise Exception('Insufficient data received') return r def Start(self): ''' Start the transmission with target I2C device. ''' self.uart.write(b'S') def Busy(self): ''' Check if the adapter is busy (transmission takes place). ''' busy = True self.uart.write(b's') while True: r = self.uart.read(1) if r == b'\x1b' or r == b'\x00': busy = False if self.uart.in_waiting == 0: break return busy def StatusOK(self): ''' Check if an error has occured. ''' self.uart.write(b'E') r = self.uart.read(1) if len(r) == 0: raise Exception("Communication error") if r == b'N': return True raise AdapterResponseException(r) return False def WaitForCompletion(self): ''' Busy-wait loop while transmission takes place. ''' elapsedTime = 0 while self.Busy(): elapsedTime += self.waitInterval time.sleep(self.waitInterval) if elapsedTime > self.wait: raise Exception("Communication timeout") self.StatusOK()
<reponame>TheLiteCrafter/AsepriteInstaller_Updater from pathlib import Path from tkinter import * from tkinter import messagebox from configparser import ConfigParser import subprocess import os import zipfile import sys class MyDialog: def __init__(self, parent, ttt): top = self.top = Toplevel(parent) top.geometry("200x100") top.iconbitmap("Icon.ico") self.myLabel = Label(top, text=ttt) self.myLabel.pack() self.myEntryBox = Entry(top) self.myEntryBox.pack() self.mySubmitButton = Button(top, text='Submit', command=self.send) self.mySubmitButton.pack() def send(self): global output output = self.myEntryBox.get() self.top.destroy() OUTPUT_PATH = Path(__file__).parent ASSETS_PATH = OUTPUT_PATH / Path("./assets") def Install(): subprocess.call(["Install.bat"]) skia_path_ins = MyDialog(window, ttt = "Path to Skia File") window.wait_window(skia_path_ins.top) skia_path = output ninja_path_ins = MyDialog(window, ttt = "Path to Ninja File") window.wait_window(ninja_path_ins.top) ninja_path = output try: with zipfile.ZipFile(skia_path, "r") as zf: zf.extractall("C:/deps/skia") with zipfile.ZipFile(ninja_path, "r") as zf: zf.extractall("C:/Program Files/CMake/bin") except Exception as e: messagebox.showerror("Unzip Error!", str(e)) if os.path.isdir("C:/Program Files/Microsoft Visual Studio/2022/Community/Common7/Tools"): subprocess.call(["Compile22.bat"]) elif os.path.isdir("C:/Program Files (x86)/Microsoft Visual Studio/2019/Community/Common7/Tools"): subprocess.call(["Compile19.bat"]) else: messagebox.showerror("No Visual Studio installation found", "No Visual Studio installation found. Please refer to https://github.com/TheLiteCrafter/AsepriteTool") subprocess.call(["Shortcut.bat"]) def Update(): subprocess.call(["Update.bat"]) if os.path.isdir("C:/Program Files/Microsoft Visual Studio/2022/Community/Common7/Tools"): subprocess.call(["Compile22.bat"]) elif os.path.isdir("C:/Program Files (x86)/Microsoft Visual Studio/2019/Community/Common7/Tools"): subprocess.call(["Compile19.bat"]) subprocess.call(["Shortcut.bat"]) def relative_to_assets(path: str) -> Path: return ASSETS_PATH / Path(path) def Start(): if Mode.get() == "Auto": if os.path.isdir("C:/aseprite") and os.path.isdir("C:\deps"): Update() else: Install() elif Mode.get() == "Install": Install() elif Mode.get() == "Update": Update() def Help(): messagebox.showinfo("Help", "Please Refer to https://github.com/TheLiteCrafter/AsepriteTool") window = Tk() window.geometry("700x800") window.configure(bg = "#FFFFFF") window.title("AsepriteTool") window.iconbitmap("Icon.ico") Mode = StringVar(window) Mode.set("Auto") canvas = Canvas( window, bg = "#FFFFFF", height = 800, width = 700, bd = 0, highlightthickness = 0, relief = "ridge" ) canvas.place(x = 0, y = 0) image_image_1 = PhotoImage( file=relative_to_assets("image_1.png")) image_1 = canvas.create_image( 350.0000000000001, 250.0, image=image_image_1 ) button_image_1 = PhotoImage( file=relative_to_assets("button_1.png")) button_1 = OptionMenu(window, Mode, "Install", "Update", "Auto") button_1.configure(indicatoron = 0, image = button_image_1) button_1.place( x=35.000000000000114, y=723.0, width=170.0, height=60.0 ) button_image_2 = PhotoImage( file=relative_to_assets("button_2.png")) button_2 = Button( image=button_image_2, borderwidth=0, highlightthickness=0, command=Help, relief="flat" ) button_2.place( x=495.0000000000001, y=723.0, width=170.0, height=60.0 ) button_image_3 = PhotoImage( file=relative_to_assets("button_3.png")) button_3 = Button( image=button_image_3, borderwidth=0, highlightthickness=0, command=Start, relief="flat" ) button_3.place( x=265.0000000000001, y=723.0, width=170.0, height=60.0 ) window.resizable(False, False) window.mainloop()
<filename>tools/py/serial/simpleobj.py # versa.serial.csv """ Serialize and deserialize between a Versa model and CSV Import as: from versa.serial.csv import parse as csv_parse """ import re import json import logging import operator from operator import truth from itertools import chain, islice, repeat, starmap, takewhile from versa import I, VERSA_BASEIRI, ORIGIN, RELATIONSHIP, TARGET from versa.driver.memory import newmodel from .literate import parse as markdown_parse __all__ = ['parse', 'parse_iter', 'write', # Non-standard ] def parse(objlist, vl_template, model, encoding='utf-8', nosy=None): for obj in objlist: vl_text = vl_template.render(_=obj, **obj) if nosy: nosy(vl_text) markdown_parse(vl_text, model) # FIXME: Seems to be massive cut & paste error def parse_iter(csvfp, template_obj, model_fact=newmodel, csv_fact=None, header_loc=None, nosy=None): ''' Parse simple Python object (e.g. loaded from JSON) into Versa model based on template for interpreting the data. Yield a new model representing each row csvfp - file-like object with CSV content template_obj - string format template that serves as Versa literal template for each object, or callable that takes the dict of each and returns a versa literate string. e.g. of the latter might be a function that uses Jinja or Mako for more sophisticated templating model_fact - callable that provides a Versa model to receive the model intepreted from the Versa literate of each row csv_fact - callable that convers data from csvfp into Python csv module-compatible objects header_loc - how many rows down in the CSV file header data can be found nosy - optional function which is called with the result of each row's Versa literal output, useful for debugging ''' if csv_fact is None: rows = csv.DictReader(csvfp, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) else: rows = csv_fact(csvfp) first_proper_row = True for (row_ix, row) in enumerate(rows): if first_proper_row: adapted_keys = {} for k in row.keys(): # URI escape, but treat spaces as special case, for convenience adapted = iri.percent_encode(k.replace(' ', '_')) #adapted = OMIT_FROM_SLUG_PAT.sub('_', k) # Ensure there are no clashes after escaping while adapted in adapted_keys: adapted_keys += '_' adapted_keys[k] = adapted first_proper_row = False for k, ad_k in adapted_keys.items(): row[ad_k] = row[k] if isinstance(template_obj, str): vliterate_text = template_obj.format(**row) else: vliterate_text = template_obj(row) model = model_fact() markdown_parse(vliterate_text, model) yield model # Optimized version courtesy https://stackoverflow.com/a/34935239 def chunker(n, iterable): # n is size of each chunk; last chunk may be smaller # operator.truth is *significantly* faster than bool for the case of # exactly one positional argument return takewhile(truth, map(tuple, starmap(islice, repeat((iterable, n))))) # New, consistent API def do_parse(csvobj, adapted_keys, vliterate_template, model): at_least_one_row = False for row in csvobj: at_least_one_row = True for k, ad_k in adapted_keys.items(): row[ad_k] = row[k] vliterate_text = vliterate_template.format(**row) markdown_parse(vliterate_text, model) return at_least_one_row # Batch option def parse_batched(csvfp, vliterate_template, model, batch_size, csv_cls=None, encoding='utf-8', header_loc=None): if csv_cls is None: rows = csv.DictReader(csvfp, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) else: rows = csv_cls(csvfp) row = next(rows, None) # Handle column headers with non-ID characters if row: adapted_keys = {} for k in row.keys(): # FIXME: Needs uniqueness post-check adapted = OMIT_FROM_SLUG_PAT.sub('_', k) adapted_keys[k] = adapted curr_model = model chunks = chunker(batch_size, chain(iter([row]), rows)) while True: print((id(curr_model))) chunk = next(chunks, None) print(chunk) curr_model = yield do_parse(chunk, adapted_keys, vliterate_template, curr_model) if chunk is None: break def write(): raise NotImplementedError
import csv import os import time import unittest from selenium.webdriver.support.select import Select from Data.parameters import Data from get_dir import pwd from reuse_func import GetData class Test_logs(unittest.TestCase): @classmethod def setUpClass(self): self.data = GetData() self.p = pwd() self.driver = self.data.get_driver() self.driver.implicitly_wait(100) self.data.open_cqube_appln(self.driver) self.data.page_loading(self.driver) self.data.login_to_adminconsole(self.driver) time.sleep(2) def test_click_on_logs(self): self.data.logs_page() self.data.page_loading(self.driver) if "all-logs" in self.driver.current_url: print("Logs page is present ") else: print("logs page is not present") self.driver.find_element_by_id("homeBtn").click() self.data.page_loading(self.driver) def test_click_on_logs_icon(self): count = 0 self.data.page_loading(self.driver) self.driver.find_element_by_xpath(Data.log_icon).click() self.data.page_loading(self.driver) if "all-logs" in self.driver.current_url: print("Logs page is present ") else: print("logs page is not present") count = count + 1 self.driver.find_element_by_id("homeBtn").click() self.assertEqual(0,count,msg='Logs page does not exist ') self.data.page_loading(self.driver) def test_application_node_info(self): self.data.logs_page() self.data.page_loading(self.driver) time.sleep(5) log_type = Select(self.driver.find_element_by_id("choose_dist")) log_name = Select(self.driver.find_element_by_id("choose_block")) log_type.select_by_visible_text(' Application ') print(log_type.options[1].text, "is selected") self.data.page_loading(self.driver) log_name.select_by_index(1) print(log_name.options[1].text, "is selected") self.data.page_loading(self.driver) if "No such file or directory" in self.driver.page_source: print(log_name.options[1].text, "has no files to download ") else: p = pwd() self.data.page_loading(self.driver) self.driver.find_element_by_id("downld").click() time.sleep(25) self.filename = p.get_download_dir() + "/server_side-out.log" result = os.path.isfile(self.filename) self.assertTrue(result, msg="log is not dowloaded") with open(self.filename, "r") as f: reader = csv.reader(f, delimiter=",") data = list(reader) row_count = len(data) self.assertNotEqual(int(row_count),0,msg='Log file does not contains informations') os.remove(self.filename) self.data.page_loading(self.driver) def test_application_node_error(self): self.data.logs_page() self.data.page_loading(self.driver) time.sleep(5) log_type = Select(self.driver.find_element_by_id("choose_dist")) log_name = Select(self.driver.find_element_by_id("choose_block")) log_type.select_by_visible_text(' Application ') print(log_type.options[1].text, "is selected") self.data.page_loading(self.driver) log_name.select_by_index(2) print(log_name.options[2].text, "is selected") self.data.page_loading(self.driver) if "No such file or directory" in self.driver.page_source: print(log_name.options[2].text, "has no files to download ") else: p = pwd() self.data.page_loading(self.driver) self.driver.find_element_by_id("downld").click() time.sleep(15) self.filename = p.get_download_dir() + "/server_side-error.log" result = os.path.isfile(self.filename) self.assertTrue(result, msg="log is not dowloaded") with open(self.filename, "r") as f: reader = csv.reader(f, delimiter=",") data = list(reader) row_count = len(data) self.assertNotEqual(int(row_count),0,msg='Log file does not contains informations') os.remove(self.filename) self.data.page_loading(self.driver) def test_application_angular_info(self): self.data.logs_page() self.data.page_loading(self.driver) time.sleep(5) log_type = Select(self.driver.find_element_by_id("choose_dist")) log_name = Select(self.driver.find_element_by_id("choose_block")) log_type.select_by_visible_text(' Application ') print(log_type.options[1].text, "is selected") self.data.page_loading(self.driver) log_name.select_by_index(3) print(log_name.options[3].text, "is selected") self.data.page_loading(self.driver) if "No such file or directory" in self.driver.page_source: print(log_name.options[3].text, "has no files to download ") else: p = pwd() self.data.page_loading(self.driver) self.driver.find_element_by_id("downld").click() time.sleep(15) self.filename = p.get_download_dir() + "/client_side-out.log" result = os.path.isfile(self.filename) self.assertTrue(result, msg="log is not dowloaded") with open(self.filename, "r") as f: reader = csv.reader(f, delimiter=",") data = list(reader) row_count = len(data) self.assertNotEqual(int(row_count),0,msg='Log file does not contains informations') os.remove(self.filename) self.data.page_loading(self.driver) def test_application_angular_error(self): self.data.logs_page() self.data.page_loading(self.driver) time.sleep(5) log_type = Select(self.driver.find_element_by_id("choose_dist")) log_name = Select(self.driver.find_element_by_id("choose_block")) log_type.select_by_visible_text(' Application ') print(log_type.options[1].text, "is selected") self.data.page_loading(self.driver) log_name.select_by_index(4) print(log_name.options[4].text, "is selected") self.data.page_loading(self.driver) if "No such file or directory" in self.driver.page_source: print(log_name.options[4].text, "has no files to download ") else: p = pwd() self.data.page_loading(self.driver) self.driver.find_element_by_id("downld").click() time.sleep(15) self.filename = p.get_download_dir() + "/client_side-error.log" result = os.path.isfile(self.filename) self.assertTrue(result, msg="log is not dowloaded") with open(self.filename, "r") as f: reader = csv.reader(f, delimiter=",") data = list(reader) row_count = len(data) self.assertNotEqual(int(row_count),0,msg='Log file does not contains informations') os.remove(self.filename) self.data.page_loading(self.driver) def test_admin_node_info(self): self.data.logs_page() self.data.page_loading(self.driver) time.sleep(5) log_type = Select(self.driver.find_element_by_id("choose_dist")) log_name = Select(self.driver.find_element_by_id("choose_block")) log_type.select_by_visible_text(' Admin ') print(log_type.options[2].text, "is selected") self.data.page_loading(self.driver) log_name.select_by_index(1) print(log_name.options[1].text, "is selected") self.data.page_loading(self.driver) if "No such file or directory" in self.driver.page_source: print(log_name.options[1].text, "has no files to download ") else: p = pwd() self.data.page_loading(self.driver) self.driver.find_element_by_id("downld").click() time.sleep(20) self.data.page_loading(self.driver) self.filename = p.get_download_dir() + "/admin_server_side-out.log" result = os.path.isfile(self.filename) self.assertTrue(result, msg="log is not dowloaded") with open(self.filename, "r") as f: reader = csv.reader(f, delimiter=",") data = list(reader) row_count = len(data) self.assertNotEqual(int(row_count),0,msg='Log file does not contains informations') os.remove(self.filename) self.data.page_loading(self.driver) def test_admin_node_error(self): self.data.logs_page() self.data.page_loading(self.driver) time.sleep(5) log_type = Select(self.driver.find_element_by_id("choose_dist")) log_name = Select(self.driver.find_element_by_id("choose_block")) log_type.select_by_visible_text(' Admin ') print(log_type.options[2].text, "is selected") self.data.page_loading(self.driver) log_name.select_by_index(2) print(log_name.options[2].text, "is selected") self.data.page_loading(self.driver) if "No such file or directory" in self.driver.page_source: print(log_name.options[2].text, "has no files to download ") else: p = pwd() self.data.page_loading(self.driver) self.driver.find_element_by_id("downld").click() time.sleep(15) self.filename = p.get_download_dir() + "/admin_server_side-error.log" result = os.path.isfile(self.filename) self.assertTrue(result, msg="log is not dowloaded") with open(self.filename, "r") as f: reader = csv.reader(f, delimiter=",") data = list(reader) row_count = len(data) self.assertNotEqual(int(row_count),0,msg='Log file does not contains informations') os.remove(self.filename) self.data.page_loading(self.driver) def test_admin_angular_info(self): self.data.logs_page() self.data.page_loading(self.driver) time.sleep(5) log_type = Select(self.driver.find_element_by_id("choose_dist")) log_name = Select(self.driver.find_element_by_id("choose_block")) log_type.select_by_visible_text(' Admin ') print(log_type.options[2].text, "is selected") self.data.page_loading(self.driver) log_name.select_by_index(3) print(log_name.options[3].text, "is selected") self.data.page_loading(self.driver) if "No such file or directory" in self.driver.page_source: print(log_name.options[3].text, "has no files to download ") else: p = pwd() self.data.page_loading(self.driver) self.driver.find_element_by_id("downld").click() time.sleep(15) self.filename = p.get_download_dir() + "/admin_client_side-out.log" result = os.path.isfile(self.filename) self.assertTrue(result, msg="log is not dowloaded") with open(self.filename, "r") as f: reader = csv.reader(f, delimiter=",") data = list(reader) row_count = len(data) self.assertNotEqual(int(row_count),0,msg='Log file does not contains informations') os.remove(self.filename) self.data.page_loading(self.driver) def test_admin_angular_error(self): self.data.logs_page() self.data.page_loading(self.driver) time.sleep(5) log_type = Select(self.driver.find_element_by_id("choose_dist")) log_name = Select(self.driver.find_element_by_id("choose_block")) log_type.select_by_visible_text(' Admin ') print(log_type.options[2].text, "is selected") self.data.page_loading(self.driver) log_name.select_by_index(4) print(log_name.options[4].text, "is selected") self.data.page_loading(self.driver) if "No such file or directory" in self.driver.page_source: print(log_name.options[4].text, "has no files to download ") else: p = pwd() self.data.page_loading(self.driver) self.driver.find_element_by_id("downld").click() time.sleep(15) self.filename = p.get_download_dir() + "/admin_client_side-error.log" result = os.path.isfile(self.filename) self.assertTrue(result, msg="log is not dowloaded") with open(self.filename, "r") as f: reader = csv.reader(f, delimiter=",") data = list(reader) row_count = len(data) self.assertNotEqual(int(row_count),0,msg='Log file does not contains informations') os.remove(self.filename) self.data.page_loading(self.driver) def test_nifi_applogs(self): self.data.logs_page() self.data.page_loading(self.driver) time.sleep(5) log_type = Select(self.driver.find_element_by_id("choose_dist")) log_name = Select(self.driver.find_element_by_id("choose_block")) log_type.select_by_visible_text(' Nifi ') print(log_type.options[3].text, "is selected") self.data.page_loading(self.driver) log_name.select_by_index(1) print(log_name.options[1].text, "is selected") self.data.page_loading(self.driver) if "No such file or directory" in self.driver.page_source: print(log_name.options[1].text, "has no files to download ") else: p = pwd() self.data.page_loading(self.driver) self.driver.find_element_by_id("downld").click() time.sleep(5) self.filename = p.get_download_dir() + "/nifi-app.log" result = os.path.isfile(self.filename) self.assertTrue(result, msg="log is not dowloaded") with open(self.filename, "r") as f: reader = csv.reader(f, delimiter=",") data = list(reader) row_count = len(data) self.assertNotEqual(int(row_count),0,msg='Log file does not contains informations') os.remove(self.filename) self.data.page_loading(self.driver) def test_nifi_bootstrap(self): self.data.logs_page() self.data.page_loading(self.driver) time.sleep(5) log_type = Select(self.driver.find_element_by_id("choose_dist")) log_name = Select(self.driver.find_element_by_id("choose_block")) log_type.select_by_visible_text(' Nifi ') print(log_type.options[3].text, "is selected") self.data.page_loading(self.driver) log_name.select_by_index(2) print(log_name.options[2].text, "is selected") self.data.page_loading(self.driver) if "No such file or directory" in self.driver.page_source: print(log_name.options[2].text, "has no files to download ") else: p = pwd() self.data.page_loading(self.driver) self.driver.find_element_by_id("downld").click() time.sleep(15) self.filename = p.get_download_dir() + "/nifi-bootstrap.log" result = os.path.isfile(self.filename) self.assertTrue(result, msg="log is not dowloaded") with open(self.filename, "r") as f: reader = csv.reader(f, delimiter=",") data = list(reader) row_count = len(data) self.assertNotEqual(int(row_count),0,msg='Log file does not contains informations') os.remove(self.filename) self.data.page_loading(self.driver) def test_emission_access_logs(self): self.data.logs_page() self.data.page_loading(self.driver) time.sleep(5) log_type = Select(self.driver.find_element_by_id("choose_dist")) log_name = Select(self.driver.find_element_by_id("choose_block")) log_type.select_by_visible_text(' Emission ') print(log_type.options[4].text, "is selected") self.data.page_loading(self.driver) log_name.select_by_index(1) print(log_name.options[1].text, "is selected") self.data.page_loading(self.driver) if "No such file or directory" in self.driver.page_source: print(log_name.options[1].text, "has no files to download ") else: p = pwd() self.data.page_loading(self.driver) self.driver.find_element_by_id("downld").click() time.sleep(15) self.filename = p.get_download_dir() + "/emission_app-access.log" result = os.path.isfile(self.filename) self.assertTrue(result, msg="log is not dowloaded") with open(self.filename, "r") as f: reader = csv.reader(f, delimiter=",") data = list(reader) row_count = len(data) self.assertNotEqual(int(row_count),0,msg='Log file does not contains informations') os.remove(self.filename) self.data.page_loading(self.driver) def test_emission_error_logs(self): self.data.logs_page() self.data.page_loading(self.driver) time.sleep(5) log_type = Select(self.driver.find_element_by_id("choose_dist")) log_name = Select(self.driver.find_element_by_id("choose_block")) log_type.select_by_visible_text(' Emission ') print(log_type.options[4].text, "is selected") self.data.page_loading(self.driver) log_name.select_by_index(2) print(log_name.options[2].text, "is selected") self.data.page_loading(self.driver) if "No such file or directory" in self.driver.page_source: print(log_name.options[2].text, "has no files to download ") else: p = pwd() self.data.page_loading(self.driver) self.driver.find_element_by_id("downld").click() time.sleep(15) self.filename = p.get_download_dir() + "/emission_app-error.log" result = os.path.isfile(self.filename) self.assertTrue(result, msg="log is not dowloaded") with open(self.filename, "r") as f: reader = csv.reader(f, delimiter=",") data = list(reader) row_count = len(data) self.assertNotEqual(int(row_count),0,msg='Log file does not contains informations') os.remove(self.filename) self.data.page_loading(self.driver) def test_System_syslogs(self): self.data.logs_page() self.data.page_loading(self.driver) time.sleep(5) log_type = Select(self.driver.find_element_by_id("choose_dist")) log_name = Select(self.driver.find_element_by_id("choose_block")) log_type.select_by_visible_text(' System ') print(log_type.options[5].text, "is selected") self.data.page_loading(self.driver) log_name.select_by_index(1) print(log_name.options[1].text, "is selected") self.data.page_loading(self.driver) if "No such file or directory" in self.driver.page_source: print(log_name.options[1].text, "has no files to download ") else: p = pwd() self.data.page_loading(self.driver) self.driver.find_element_by_id("downld").click() time.sleep(15) self.filename = p.get_download_dir() + "/syslog.txt" result = os.path.isfile(self.filename) self.assertTrue(result, msg="log is not dowloaded") with open(self.filename, "r") as f: reader = csv.reader(f, delimiter=",") data = list(reader) row_count = len(data) self.assertNotEqual(int(row_count),0,msg='Log file does not contains informations') os.remove(self.filename) self.data.page_loading(self.driver) def test_postgress_postgreslog(self): self.data.logs_page() self.data.page_loading(self.driver) time.sleep(5) log_type = Select(self.driver.find_element_by_name("logTypeName")) log_name = Select(self.driver.find_element_by_name("logName")) log_type.select_by_visible_text(' PostgreSql ') print(log_type.options[6].text, "is selected") self.data.page_loading(self.driver) log_name.select_by_index(1) print(log_name.options[1].text, "is selected") self.data.page_loading(self.driver) if "No such file or directory" in self.driver.page_source: print(log_name.options[1].text, "has no files to download ") else: p = pwd() self.data.page_loading(self.driver) self.driver.find_element_by_id("downld").click() time.sleep(5) self.filename = p.get_download_dir() + "/postgresql-10-main.log" result = os.path.isfile(self.filename) self.assertTrue(result, msg="log is not dowloaded") with open(self.filename, "r") as f: reader = csv.reader(f, delimiter=",") data = list(reader) row_count = len(data) self.assertNotEqual(int(row_count),0,msg='Log file does not contains informations') os.remove(self.filename) self.data.page_loading(self.driver) @classmethod def tearDownClass(cls): cls.driver.close()
<reponame>yaso9/vroom-scripts #!/usr/bin/env python # -*- coding: utf-8 -*- import argparse import sys from utils.format_input import write_files from utils.overpass import node_coordinates_bb, node_coordinates_city def name_if_present(n): if "name" in n["tags"]: return n["tags"]["name"] else: return None if __name__ == "__main__": parser = argparse.ArgumentParser( description="Generate problem from an overpass query" ) parser.add_argument( "-c", "--city", metavar="CITY", help="city to restrict overpass query to", default=None, ) parser.add_argument( "-o", "--output", metavar="OUTPUT", help="output file name", default=None ) parser.add_argument( "-k", "--key", metavar="KEY", help="key value to use in overpass query", default="amenity", ) parser.add_argument( "-v", "--value", metavar="VALUE", help="comma-separated list of value(s) to use in overpass query", default="cafe", ) parser.add_argument( "--top", metavar="TOP", help="bounding box max latitude", type=float, default=48.86, ), parser.add_argument( "--bottom", metavar="BOTTOM", help="bounding box min latitude", type=float, default=48.85, ), parser.add_argument( "--left", metavar="LEFT", help="bounding box min longitude", type=float, default=2.37, ), parser.add_argument( "--right", metavar="RIGHT", help="bounding box max longitude", type=float, default=2.39, ), parser.add_argument( "--geojson", action="store_true", help="also write a geojson file with all generated points", default=False, ) parser.add_argument( "--csv", action="store_true", help="also write a csv file with coordinates for all generated points", default=False, ) args = parser.parse_args() file_name = args.output values = args.value.split(",") values_name = "_".join(values) if args.city is not None: if not file_name: file_name = args.key + "_" + values_name + "_" + args.city nodes = node_coordinates_city(args.key, values, args.city)["elements"] else: if not file_name: file_name = ( args.key + "_" + values_name + "_" + str(args.bottom) + "_" + str(args.left) + "_" + str(args.top) + "_" + str(args.right) ) nodes = node_coordinates_bb( args.key, values, [[args.left, args.bottom], [args.right, args.top]] )["elements"] if len(nodes) < 2: print("Too few nodes to format a problem!") sys.exit(0) lons = map(lambda n: n["lon"], nodes) lats = map(lambda n: n["lat"], nodes) names = map(lambda n: name_if_present(n), nodes) locations = { "vehicles": {"coordinates": [[lons[0], lats[0]]], "names": [names[0]]}, "jobs": { "coordinates": [list(c) for c in zip(lons[1:], lats[1:])], "names": names[1:], }, } write_files(file_name, locations, args.geojson, args.csv)
<gh_stars>1-10 # Utilities import os import logging import attr import hashlib from enum import Enum # Telegram bot API import telegram from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackQueryHandler # Support modules from sheetmanager import SheetManager, DataSheetEnum from inlineselector import InlineSelector from inlinedateselector import InlineDateSelector # Instantiate and configure logger logging.basicConfig( level = logging.INFO, format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s," ) logger = logging.getLogger() class HandlerGroup(Enum): CALLBACK_QUERY_HANDLER = 0 MESSAGE_HANDLER = 1 COMMAND_HANDLER = 2 # State enum for data entry steps class InputState(Enum): INPUT_STATE_IDLE = 0 INPUT_STUDENT_NAME = 1 INPUT_COURSE_NAME = 2 INPUT_ASSIST_NAME = 3 INPUT_AUX_NAME = 4 INPUT_RECEIVED_DATE = 5 INPUT_START_DATE = 6 INPUT_END_DATE = 7 INPUT_STATE_END = 8 # State enum for authentication steps class AuthState(Enum): AUTH_STATE_IDLE = 0 AUTH_IS_AUTHENTICATING = 1 AUTH_IS_AUTHENTICATED = 2 # Utility method for computing SHA-256 hash def hash_string(string): # Return a SHA-256 hash of the given string return hashlib.sha256(string.encode('utf-8')).hexdigest() # Data class for consulta objects @attr.s class Consulta: # Date values received_date:str = attr.ib(converter=str, default="") start_date:str = attr.ib(converter=str, default="") end_date:str = attr.ib(converter=str, default="") # Names and course information assistant_name:str = attr.ib(converter=str, default="") auxiliary_name:str = attr.ib(converter=str, default="") student_name:str = attr.ib(converter=str, default="") course_name:str = attr.ib(converter=str, default="N/A") class GeconsultaInstanceBot: def __init__(self, user_id, auth_hash, sheet_manager): # Set initial values self.user_id = user_id self.consulta = Consulta() self.input_state = InputState.INPUT_STATE_IDLE self.auth_state = AuthState.AUTH_STATE_IDLE # Sheet manager reference for pushing requests self.sheet_manager = sheet_manager # Receive password hash for authentication self.auth_hash = auth_hash # Instantiate inline selectors course_name_field = DataSheetEnum.COURSE_NAME assist_name_field = DataSheetEnum.ASSIST_NAME self.course_name_selector = InlineSelector(sheet_manager.get_data_from_field_functor(course_name_field)) self.assist_name_selector = InlineSelector(sheet_manager.get_data_from_field_functor(assist_name_field)) # Instantiate date selector self.date_selector = InlineDateSelector() # Instantiate selector map self.selector_map = {} # Halt runtime if hash isn't present if not self.auth_hash: raise RuntimeError self.run() def run(self): # Build state-function map for input steps self.input_state_map = { InputState.INPUT_STATE_IDLE: self.handle_input_idle, InputState.INPUT_STUDENT_NAME: self.handle_student_name, InputState.INPUT_COURSE_NAME: self.handle_course_name, InputState.INPUT_ASSIST_NAME: self.handle_assist_name, InputState.INPUT_AUX_NAME: self.handle_aux_name, InputState.INPUT_RECEIVED_DATE: self.handle_received_date, InputState.INPUT_START_DATE: self.handle_start_date, InputState.INPUT_END_DATE: self.handle_end_date, InputState.INPUT_STATE_END: self.handle_input_end } # Build state-function map for auth steps self.auth_state_map = { AuthState.AUTH_STATE_IDLE: self.handle_auth_idle, AuthState.AUTH_IS_AUTHENTICATING: self.handle_authenticating, AuthState.AUTH_IS_AUTHENTICATED: self.handle_is_authenticated } # Command handlers def start(self, update, context): # Start from the top; reset everything and show help self.restart(update, context, True) self.logout(update, context, True) self.show_help(update, context) logger.info("Bot instance started by user {id}".format(id=update.message.from_user.id)) def show_help(self, update, context): help_text = "¡Hola! Soy el ayudante virtual de Geconsultas 🙂\n" + \ "Conmigo puedes ingresar los datos de tu Geconsulta de forma automatizada, sin rollos 😎\n" + \ "➡️ Usa el comando /auth para autenticar tu chat 🔐\n" + \ "➡️ Usa el comando /registrar para ingresar datos 📝\n" + \ "➡️ Usa el comando /restart para borrar los datos y comenzar desde cero 🔄" update.message.reply_text(help_text) def register(self, update, context): # Halt if user is not authenticated if self.auth_state != AuthState.AUTH_IS_AUTHENTICATED: update.message.reply_text("Por favor, usa el comando /auth para autenticar tu chat primero ✅") return # If handling a previous entry flow, clobber old data if self.input_state.value > InputState.INPUT_STATE_IDLE.value: self.consulta = Consulta() # Log data entry attempt logger.info("Data entry requested by user {id}".format(id=update.message.from_user.id)) # Set input state and update user self.input_state = InputState.INPUT_STUDENT_NAME state_function = self.input_state_map[self.input_state] update.message.reply_text("Por favor, sigue los pasos para registrar tu consulta 🙂") update.message.reply_text("Introduce el nombre del estudiante 📖") def restart_inline_selectors(self): self.course_name_selector.reset() self.assist_name_selector.reset() self.date_selector.reset() self.selector_map = {} def restart(self, update, context, noupdate=False): if self.auth_state == AuthState.AUTH_IS_AUTHENTICATED: # Log data entry abort if self.input_state.value < InputState.INPUT_STATE_END.value: logger.info("Data entry aborted by user {id}".format(id=update.message.from_user.id)) # Reset input state and input object self.consulta = Consulta() self.input_state = InputState.INPUT_STATE_IDLE # Reset inline objects to their original state self.restart_inline_selectors() # User update if not noupdate: update.message.reply_text("¡Datos reseteados!") def auth(self, update, context): if self.auth_state == AuthState.AUTH_IS_AUTHENTICATED: reply_text = "Parece que ya estás autenticado 🙂\n" + \ "Para registrar tu consulta, usa el comando /register 📝" update.message.reply_text(reply_text) return # Log authentication attempt logger.info("Auth requested by user {id}".format(id=update.message.from_user.id)) # Set Auth state and update user self.auth_state = AuthState.AUTH_IS_AUTHENTICATING update.message.reply_text("Por favor, introduce la contraseña 🙂") def logout(self, update, context, noupdate=False): if self.auth_state == AuthState.AUTH_IS_AUTHENTICATED: # Reset state and update user self.restart(update, context, True) self.auth_state = AuthState.AUTH_STATE_IDLE logger.info("User {id} has logged out succesfully".format(id=update.message.from_user.id)) if not noupdate: update.message.reply_text("Sesión cerrada con éxito 🙂 ¡Nos vemos!") # If logout is succesful, return True return True # If logout is not performed, return False return False # Inline handler def handle_inline_query(self, update, context): if update.callback_query.message.message_id not in self.selector_map: self.selector_map[update.callback_query.message.message_id] = self.current_selector() # Only accept callback queries as input if update.callback_query.data is not None: state_function = self.input_state_map[self.input_state] selector = self.selector_map[update.callback_query.message.message_id] state_function(update, context, selector) # Message handler def handle_message(self, update, context): state_function = None # Take inputs only when permitted if self.input_state.value > InputState.INPUT_STATE_IDLE.value and self.auth_state == AuthState.AUTH_IS_AUTHENTICATED: state_function = self.input_state_map[self.input_state] elif self.auth_state == AuthState.AUTH_IS_AUTHENTICATING: state_function = self.auth_state_map[self.auth_state] if self.auth_state.value > AuthState.AUTH_IS_AUTHENTICATING.value: logger.info("Handling message {msg} by user {id}".format(msg = update.message.text, id=update.message.from_user.id)) # If a state function has been specified, call it and pass down parameters if state_function is not None: state_function(update, context) # Auth handlers def handle_auth_idle(self, update, context): pass def handle_authenticating(self, update, context): if update.message.text is not None: # Compute the password hash and evaluate against the pre-set value password_hash = hash_string(update.message.text) if password_hash == self.auth_hash: self.auth_state = AuthState.AUTH_IS_AUTHENTICATED # Log succesful authentication logger.info("Auth completed by user {id}".format(id=update.message.from_user.id)) update.message.reply_text("¡Autenticación completa! 😎 Ahora puedes usar /registrar para comenzar la entrada de datos 📝") else: # Log failed authentication attempt logger.info("Auth attempt failed by user {id}".format(id=update.message.from_user.id)) update.message.reply_text("Contraseña inválida, por favor intenta nuevamente.") def handle_is_authenticated(self, update, context): pass # Input handlers def handle_input_idle(self, update, context): pass def handle_student_name(self, update, context): if update.message.text is not None: student_name = update.message.text self.consulta.student_name = student_name # Update state self.input_state = InputState.INPUT_COURSE_NAME # Create inline keyboard and reply self.course_name_selector.fetch_data() keyboard = self.course_name_selector.get_inline_keyboard() update.message.reply_text("¡Genial! Selecciona el nombre de la materia ✒️", reply_markup = keyboard) else: reply_text = "Parece que no enviaste un nombre válido 🤔\n" + \ "Por favor, inténtalo de nuevo 👇" update.message.reply_text(reply_text) def handle_course_name(self, update, context, selector = None): # Only accept callback queries as input if update.callback_query is not None and selector is not None: data = selector.handle_callback_query(update, context) # If data is extracted, push it and change state if data is not None: self.consulta.course_name = data self.input_state = InputState.INPUT_ASSIST_NAME self.assist_name_selector.fetch_data() keyboard = self.assist_name_selector.get_inline_keyboard() update.callback_query.message.reply_text("¡Muy bien! Selecciona el nombre del miembro encargado 🤓", reply_markup = keyboard) def handle_assist_name(self, update, context, selector = None): # Only accept callback queries as input if update.callback_query is not None and selector is not None: data = selector.handle_callback_query(update, context) # If data is extracted, push it and change state if data is not None: self.consulta.assistant_name = data self.input_state = InputState.INPUT_AUX_NAME # Reutilize the same selector self.assist_name_selector.reset() self.assist_name_selector.fetch_data() keyboard = self.assist_name_selector.get_inline_keyboard() update.callback_query.message.reply_text("¡Excelente! Selecciona el nombre del miembro auxiliar 🤝", reply_markup = keyboard) def handle_aux_name(self, update, context, selector = None): # Only accept callback queries as input if update.callback_query is not None and selector is not None: data = selector.handle_callback_query(update, context) # If data is extracted, push it and change state if data is not None: self.consulta.auxiliary_name = data self.input_state = InputState.INPUT_RECEIVED_DATE # Use the date selector keyboard = self.date_selector.get_inline_keyboard() update.callback_query.message.reply_text("¡Recibido! Ahora selecciona la fecha en que se recibió la consulta 📆", reply_markup = keyboard) def handle_received_date(self, update, context, selector = None): # Only accept callback queries as input if update.callback_query is not None and selector is not None: data = selector.handle_callback_query(update, context) # If data is extracted, push it and change state if data is not None: self.consulta.received_date = data self.input_state = InputState.INPUT_START_DATE # Reutilize the date selector self.date_selector.reset(persist=True) keyboard = self.date_selector.get_inline_keyboard() update.callback_query.message.reply_text("¡Buenísimo! Selecciona la fecha en que se atendió la consulta 📆", reply_markup = keyboard) def handle_start_date(self, update, context, selector = None): # Only accept callback queries as input if update.callback_query is not None and selector is not None: data = selector.handle_callback_query(update, context) # If data is extracted, push it and change state if data is not None: self.consulta.start_date = data self.input_state = InputState.INPUT_END_DATE # Reutilize the date selector self.date_selector.reset(persist=False) keyboard = self.date_selector.get_inline_keyboard() update.callback_query.message.reply_text("¡Perfecto! Por último, selecciona la fecha en que terminó la consulta 📆", reply_markup = keyboard) def handle_end_date(self, update, context, selector = None): # Only accept callback queries as input if update.callback_query is not None and selector is not None: data = selector.handle_callback_query(update, context) # If data is extracted, push it and change state if data is not None: self.consulta.end_date = data self.input_state = InputState.INPUT_STATE_END logger.info("Data entry completed by user {id}".format(id=update.callback_query.from_user.id)) # self.consulta should be ready at this point! Send a Sheet Manager request! self.sheet_manager.request_input(self.consulta) update.callback_query.message.reply_text("¡Tu consulta ha sido enviada! 🎉🎉") update.callback_query.message.reply_text("Puedes confirmar luego en la spreadsheet si tu consulta ha sido procesada correctamente 🔎") update.callback_query.message.reply_text("¡Que tengas un buen día! ✨ Recuerda que puedes usar /registrar para subir una nueva consulta 🙂") # Restart bot instance self.restart(update, context, True) def handle_input_end(self, update, context): pass # Auxiliary Methods def current_selector(self): # No selectors are exposed if not authenticated if self.auth_state == AuthState.AUTH_IS_AUTHENTICATED: # Determine the corresponding selector if self.input_state == InputState.INPUT_COURSE_NAME: return self.course_name_selector elif self.input_state == InputState.INPUT_ASSIST_NAME: return self.assist_name_selector elif self.input_state == InputState.INPUT_AUX_NAME: return self.assist_name_selector elif self.input_state == InputState.INPUT_RECEIVED_DATE: return self.date_selector elif self.input_state == InputState.INPUT_START_DATE: return self.date_selector elif self.input_state == InputState.INPUT_END_DATE: return self.date_selector return None class GeconsultasBot(): def __init__(self): # Fetch deploy mode self.deploy_mode = os.getenv("T_DEPLOY_MODE") # Fetch API token and hashed password from environment variables self.token = os.getenv("T_API_TOKEN") if self.deploy_mode == "prod" else os.getenv("T_DEV_API_TOKEN") self.auth_hash = os.getenv("T_AUTH_HASH") # Halt runtime if token or hash aren't set if not self.token or not self.auth_hash: raise RuntimeError # Create active user map self.user_map = {} # Create sheets object and retrieve data self.sheet_manager = SheetManager() # Instantiate Updater and Dispatcher self.updater = Updater(self.token, use_context=True, workers=8) self.dispatcher = self.updater.dispatcher def run(self): # Add handlers and start bot self.add_handlers() if self.deploy_mode == "prod": # if deployed to production environment, set up webhook PORT = int(os.environ.get('PORT', '8443')) # Start and set webhook T_APP_NAME = os.getenv("T_APP_NAME") # Log init logger.info("Initializing main bot at port {port}".format(port=PORT)) self.updater.start_webhook(listen = "0.0.0.0", port = PORT, url_path = self.token, webhook_url = f"https://{T_APP_NAME}.herokuapp.com/{self.token}") else: self.updater.start_polling() self.updater.idle() def add_handlers(self): # Create and add command handlers start_command = CommandHandler("start", self.start) help_command = CommandHandler("help", self.show_help) register_command = CommandHandler("registrar", self.register) restart_command = CommandHandler("restart", self.restart) auth_command = CommandHandler("auth", self.auth) logout_command = CommandHandler("logout", self.logout) # Create message handler message_handler = MessageHandler(Filters.text, self.handle_message) # Data and flow self.dispatcher.add_handler(start_command, HandlerGroup.COMMAND_HANDLER.value) self.dispatcher.add_handler(help_command, HandlerGroup.COMMAND_HANDLER.value) self.dispatcher.add_handler(register_command, HandlerGroup.COMMAND_HANDLER.value) self.dispatcher.add_handler(restart_command, HandlerGroup.COMMAND_HANDLER.value) # Authentication self.dispatcher.add_handler(auth_command, HandlerGroup.COMMAND_HANDLER.value) self.dispatcher.add_handler(logout_command, HandlerGroup.COMMAND_HANDLER.value) # Message handler self.dispatcher.add_handler(message_handler, HandlerGroup.MESSAGE_HANDLER.value) # Inline handler self.dispatcher.add_handler(CallbackQueryHandler(self.handle_inline_query), HandlerGroup.CALLBACK_QUERY_HANDLER.value) # Inline query handler def handle_inline_query(self, update, context): self.call_instance_method(update, context, "handle_inline_query", True) # Command Handlers def start(self, update, context): if update.message.from_user.id is not None: # Capture chat ID and assign a new instance bot if it's not already present user_id = update.message.from_user.id if user_id not in self.user_map: self.user_map[user_id] = GeconsultaInstanceBot(user_id, self.auth_hash, self.sheet_manager) self.call_instance_method(update, context, "start") def show_help(self, update, context): self.call_instance_method(update, context, "show_help") def register(self, update, context): self.call_instance_method(update, context, "register") def restart(self, update, context): self.call_instance_method(update, context, "restart") def auth(self, update, context): self.call_instance_method(update, context, "auth") def logout(self, update, context): logged_out = self.call_instance_method(update, context, "logout") if update.message.from_user.id in self.user_map and logged_out: self.user_map.pop(update.message.from_user.id) # Message handlers def handle_message(self, update, context): self.call_instance_method(update, context, "handle_message") # Auxiliary methods def call_instance_method(self, update, context, method, is_inline = False): # No bot instance by default user_bot_instance = None # Try to resolve user_id from CallbackQuery object first if is_inline and update.callback_query.from_user.id in self.user_map: user_bot_instance = self.user_map[update.callback_query.from_user.id] # Else, try to resolve user_id from Message object elif update.message.from_user.id in self.user_map: user_bot_instance = self.user_map[update.message.from_user.id] # If bot instance is found and has method, call it if user_bot_instance is not None and hasattr(user_bot_instance, method): return getattr(user_bot_instance, method)(update, context) if __name__ == "__main__": bot = GeconsultasBot() bot.run()
<reponame>lit26/bokeh_fin import pandas as pd import yfinance as yf from bokeh.layouts import column from bokeh.models import ( BooleanFilter, CustomJS, ColumnDataSource, CDSView, HoverTool, CrosshairTool, NumeralTickFormatter, ) from bokeh.plotting import figure, show import os INDEX_COL = 'index1' w = 0.5 with open(os.path.join(os.path.dirname(__file__), 'autoscale_cb.js'), encoding='utf-8') as _f: _AUTOSCALE_JS_CALLBACK = _f.read() class plot: def __init__(self, stock, data, date='Date', open='Open', high="High", low="Low", close="Close", volume="Volume", kind="candlestick", show_volume=True, addplot=None, main_plot_height=400, volume_plot_height=100): self._stock = stock self._date = date self._open = open self._high = high self._low = low self._close = close self._volume = volume self._kind = kind self._show_volume = show_volume self._addplot = addplot self._main_plot_height = main_plot_height self._volume_plot_height = volume_plot_height self._tools = "pan,xwheel_zoom,box_zoom,zoom_in,zoom_out,reset,save" self._linked_crosshair = CrosshairTool(dimensions="both") self._grid_line_alpha = 0.3 self._p = [] self._process_data(data) self._plot() def add_subplot(self, subplot): p = figure(x_range=self._p[0].x_range, plot_height=200, **self._options) p.xaxis.major_label_overrides = self._major_label_overrides p.grid.grid_line_alpha = self._grid_line_alpha ind_line = [] ind_tooltip = [] for ind in subplot: if ind['kind'] == 'line': l = p.line(x=INDEX_COL, y=ind['column'],source=self._source, **self._format_style('line', **ind)) ind_line.append(l) ind_tooltip.append((ind['column'], f"@{ind['column']}")) elif ind['kind'] == 'scatter': s = p.scatter(x=INDEX_COL, y=ind['column'], source=self._source, **self._format_style('scatter', **ind)) ind_line.append(s) ind_tooltip.append((ind['column'], f"@{ind['column']}")) else: raise ValueError("Other kinds are not supported.") p.add_tools( HoverTool( renderers=ind_line, point_policy="follow_mouse", tooltips=ind_tooltip, mode="vline") ) p.add_tools(self._linked_crosshair) self._p.append(p) def _format_tooltips(self, custom): NBSP = "\N{NBSP}" * 4 tool_tips = dict( point_policy="follow_mouse", tooltips=[ ("Date", "@Date{%F}"), ( "OHLC", NBSP.join( ( "@Open{0,0.00}", "@High{0,0.00}", "@Low{0,0.00}", "@Close{0,0.00}", ) ), ), ("Volume", "@Volume{0,0.0[0000]}"), ] + custom, formatters={"@Date": "datetime"}, mode="vline", ) return tool_tips def _process_data(self, data): data[INDEX_COL] = data.index self._source = ColumnDataSource(data) inc = self._source.data[self._close] > self._source.data[self._open] dec = self._source.data[self._open] > self._source.data[self._close] self._view_inc = CDSView(source=self._source, filters=[BooleanFilter(inc)]) self._view_dec = CDSView(source=self._source, filters=[BooleanFilter(dec)]) self._view = CDSView(source=self._source) self._options = dict(x_axis_type="datetime", plot_width=1000) self._major_label_overrides = { i: date.strftime("%b %d") for i, date in enumerate(pd.to_datetime(self._source.data[self._date])) } self._segment = dict( x0=INDEX_COL, x1=INDEX_COL, y0=self._low, y1=self._high, color="black" ) def _volume_plot(self): if self._show_volume: p = figure(x_range=self._p[0].x_range, plot_height=self._volume_plot_height, **self._options) p.xaxis.major_label_overrides = self._major_label_overrides p.grid.grid_line_alpha = self._grid_line_alpha vbar_options = dict( x=INDEX_COL, width=w, top=self._volume, bottom=0, line_color="black", source=self._source, ) t1 = p.vbar(fill_color="green", view=self._view_inc, **vbar_options) t2 = p.vbar(fill_color="red", view=self._view_dec, **vbar_options) p.add_tools( HoverTool( renderers=[t1, t2], **self._format_tooltips([]), ) ) p.add_tools(self._linked_crosshair) p.yaxis.formatter = NumeralTickFormatter(format="0.0a") self._p.append(p) def _format_style(self, plot, **kwargs): styles = {} if plot == 'line': styles['color'] = kwargs['color'] if 'color' in kwargs else 'black' styles['line_width'] = kwargs['line_width'] if 'line_width' in kwargs else 1 styles['alpha'] = kwargs['alpha'] if 'alpha' in kwargs else 1 elif plot == 'scatter': styles['color'] = kwargs['color'] if 'color' in kwargs else 'black' styles['size'] = kwargs['size'] if 'size' in kwargs else 3 styles['alpha'] = kwargs['alpha'] if 'alpha' in kwargs else 1 styles['marker'] = kwargs['marker'] if 'marker' in kwargs else 'dot' return styles def _add_mainplot(self, p): if not self._addplot: return [] ind_tooltip = [] for ind in self._addplot: if ind['kind'] == 'line': p.line(x=INDEX_COL, y=ind['column'],source=self._source, **self._format_style('line', **ind)) elif ind['kind'] == 'scatter': p.scatter(x=INDEX_COL, y=ind['column'], source=self._source, **self._format_style('scatter', **ind)) else: raise ValueError("Other kinds are not supported.") ind_tooltip.append((ind['column'], f"@{ind['column']}")) return ind_tooltip def _auto_scale(self, p): custom_js_args = dict(ohlc_range=p.y_range, source=self._source) p.x_range.js_on_change('end', CustomJS(args=custom_js_args, code=_AUTOSCALE_JS_CALLBACK)) return p def _candlestick_plot(self): p = figure( plot_height=self._main_plot_height, title=self._stock, tools=self._tools, **self._options ) p.xaxis.major_label_overrides = self._major_label_overrides p.grid.grid_line_alpha = self._grid_line_alpha p.segment(**self._segment, source=self._source) vbar_options = dict( x=INDEX_COL, width=w, top=self._open, bottom=self._close, line_color="black", source=self._source, ) t1 = p.vbar(fill_color="green", view=self._view_inc, **vbar_options) t2 = p.vbar(fill_color="red", view=self._view_dec, **vbar_options) ind_tooltip = self._add_mainplot(p) p.add_tools( HoverTool( renderers=[t1,t2], **self._format_tooltips(ind_tooltip), ), self._linked_crosshair, ) self._auto_scale(p) self._p.append(p) def _line_plot(self): p = figure( plot_height=self._main_plot_height, title=self._stock, tools=self._tools, **self._options ) p.xaxis.major_label_overrides = self._major_label_overrides p.grid.grid_line_alpha = self._grid_line_alpha l = p.line(x=INDEX_COL, y=self._close, source=self._source) ind_tooltip = self._add_mainplot(p) p.add_tools( HoverTool( renderers=[l], **self._format_tooltips(ind_tooltip), ), self._linked_crosshair, ) self._auto_scale(p) self._p.append(p) def _plot(self): if self._kind == "candlestick": self._candlestick_plot() elif self._kind == "line": self._line_plot() else: raise ValueError("Please choose from the following: candlestock, line") self._volume_plot() def show(self): show(column(self._p))
<gh_stars>1-10 import argparse import http.server import inspect import random import string import threading def main(handler): app = HyperToyApp(handler) pretty_ports = ','.join(map(str, app.ports)) print("Listening on {}:{}".format(app.host, pretty_ports)) app.run() class HyperToyApp(object): def __init__(self, handler, description=None): """ handler - The class of the HyperToyHandler implementation. description - Description for --help. Defaults to class docstring. """ self.handler = handler if description is None: description = inspect.getdoc(handler) arg_parser = argparse.ArgumentParser(description=description) arg_parser.add_argument( "--ports", default = [8080], nargs = "+", type = int, help = "The port to listen on." ) arg_parser.add_argument( "--host", default = "127.0.0.1", help = "The host to bind to." ) self._arg_parser = arg_parser def parse_args(self): """ Returns a Namespace of argument values. """ return self._arg_parser.parse_args() @property def args(self): return self.parse_args() def add_argument(self, *args, **kwargs): """ See documentation for ArgumentParser.add_argument. """ self._arg_parser.add_argument(*args, **kwargs) @property def host(self): return self.args.host @property def ports(self): return self.args.ports def run(self, handler_options={}): # Copy command line arguments to handler options. for key, value in vars(self.args).items(): if key not in handler_options: handler_options[key] = value server = HyperToyServer(self.handler, self.host, self.ports, handler_options) server.run() class HyperToyServer(object): def __init__(self, handler, host, ports, handler_options={}): """ handler - Class of HyperToyHandler implementation. host - Host address to bind to. ports - Array of ports to listen on. handler_options - Dict to provide handlers via attribute "options". """ self.reference_handler = handler self.handler_options = handler_options self.ports = ports self.host = host def make_handlers(self): handlers = [] for port in self.ports: inner_options = self.handler_options.copy() inner_options["port"] = port class handler(self.reference_handler): options = inner_options handlers.append(handler) return handlers def make_servers(self): servers = [] for handler in self.make_handlers(): host = self.host port = handler.options["port"] servers.append(http.server.HTTPServer((host, port), handler)) return servers def run(self): threads = [] for server in self.make_servers(): thread = threading.Thread(target=server.serve_forever) thread.start() threads.append(thread) while True in [thread.is_alive() for thread in threads]: pass class HyperToyHandler(http.server.BaseHTTPRequestHandler): def server_headers(self, content): """ HTTP response headers as an array of tuples. Default contains Content-type, Last-Modified, and Content-Length. """ return [ ("Content-type", "text/html"), ("Last-Modified", self.date_time_string()), ("Content-Length", str(len(content))) ] def content(self): """ The body of the HTTP response. Default is an empty string. """ return "" def status_code(self): """ The HTTP status code sent in the response. Default is 200. """ return 200 def server_string(self): """ The server string header. Override to provide one other than the default of "Apache". """ return "Apache" def version_string(self): return self.server_string() def on_request(self): """ Called right before response is generated. """ def on_response(self, content, headers, code): """ Called right after response is generated, but before it is sent. """ def send_all(self, content=None, headers=None, code=None): # Note, on_request() was put here and not handle/handle_one_request because they handle # parsing and other logic we want to happen first (e.g. parsing the request). # log_request is also a no-go as it is called by send_response, which comes # after we generate our content. self.on_request() if content is None: content = self.content() if headers is None: headers = self.server_headers(content) if code is None: code = self.status_code() self.on_response(content, headers, code) # Send status code self.send_response(code) # Send headers for header in headers: self.send_header(header[0], header[1]) self.end_headers() # Send content self.wfile.write(content.encode()) def send_error(self, code, message=None): """ Instead of sending an error response as this method should, we hijack the error reporting attempt and wrap send_all. In the case a loop is detected, we call the parent's implementation of send_error. """ # If there was not a corresponding do_ handler (e.g. do_GET), trigger our logic. if code == 501: self.send_all() else: super().send_error(code, message) def random_alphanum(min_length, max_length): """ Generate a string of ascii letters and digits, between min_length and max_length, inclusive. """ return random_string(string.ascii_letters + string.digits, min_length, max_length) def random_string(charset, min_length, max_length): """ Generate a string from the given character set (charset), between min_length and max_length, inclusive. """ length = random.randint(min_length, max_length) return ''.join([random.choice(charset) for i in range(length)])
# -*- coding: utf-8 -*- """ The MIT License (MIT) Copyright (c) 2015-present Rapptz Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import inspect from ._types import _BaseCommand __all__ = ( 'CogMeta', 'Cog', ) class CogMeta(type): """A metaclass for defining a cog. Note that you should probably not use this directly. It is exposed purely for documentation purposes along with making custom metaclasses to intermix with other metaclasses such as the :class:`abc.ABCMeta` metaclass. For example, to create an abstract cog mixin class, the following would be done. .. code-block:: python3 import abc class CogABCMeta(commands.CogMeta, abc.ABCMeta): pass class SomeMixin(metaclass=abc.ABCMeta): pass class SomeCogMixin(SomeMixin, commands.Cog, metaclass=CogABCMeta): pass .. note:: When passing an attribute of a metaclass that is documented below, note that you must pass it as a keyword-only argument to the class creation like the following example: .. code-block:: python3 class MyCog(commands.Cog, name='My Cog'): pass Attributes ----------- name: :class:`str` The cog name. By default, it is the name of the class with no modification. description: :class:`str` The cog description. By default, it is the cleaned docstring of the class. .. versionadded:: 1.6 command_attrs: :class:`dict` A list of attributes to apply to every command inside this cog. The dictionary is passed into the :class:`Command` options at ``__init__``. If you specify attributes inside the command attribute in the class, it will override the one specified inside this attribute. For example: .. code-block:: python3 class MyCog(commands.Cog, command_attrs=dict(hidden=True)): @commands.command() async def foo(self, ctx): pass # hidden -> True @commands.command(hidden=False) async def bar(self, ctx): pass # hidden -> False """ def __new__(cls, *args, **kwargs): name, bases, attrs = args attrs['__cog_name__'] = kwargs.pop('name', name) attrs['__cog_settings__'] = kwargs.pop('command_attrs', {}) description = kwargs.pop('description', None) if description is None: description = inspect.cleandoc(attrs.get('__doc__', '')) attrs['__cog_description__'] = description commands = {} listeners = {} no_bot_cog = 'Commands or listeners must not start with cog_ or bot_ (in method {0.__name__}.{1})' new_cls = super().__new__(cls, name, bases, attrs, **kwargs) for base in reversed(new_cls.__mro__): for elem, value in base.__dict__.items(): if elem in commands: del commands[elem] if elem in listeners: del listeners[elem] is_static_method = isinstance(value, staticmethod) if is_static_method: value = value.__func__ if isinstance(value, _BaseCommand): if is_static_method: raise TypeError('Command in method {0}.{1!r} must not be staticmethod.'.format(base, elem)) if elem.startswith(('cog_', 'bot_')): raise TypeError(no_bot_cog.format(base, elem)) commands[elem] = value elif inspect.iscoroutinefunction(value): try: getattr(value, '__cog_listener__') except AttributeError: continue else: if elem.startswith(('cog_', 'bot_')): raise TypeError(no_bot_cog.format(base, elem)) listeners[elem] = value new_cls.__cog_commands__ = list(commands.values()) # this will be copied in Cog.__new__ listeners_as_list = [] for listener in listeners.values(): for listener_name in listener.__cog_listener_names__: # I use __name__ instead of just storing the value so I can inject # the self attribute when the time comes to add them to the bot listeners_as_list.append((listener_name, listener.__name__)) new_cls.__cog_listeners__ = listeners_as_list return new_cls def __init__(self, *args, **kwargs): super().__init__(*args) @classmethod def qualified_name(cls): return cls.__cog_name__ def _cog_special_method(func): func.__cog_special_method__ = None return func class Cog(metaclass=CogMeta): """The base class that all cogs must inherit from. A cog is a collection of commands, listeners, and optional state to help group commands together. More information on them can be found on the :ref:`ext_commands_cogs` page. When inheriting from this class, the options shown in :class:`CogMeta` are equally valid here. """ def __new__(cls, *args, **kwargs): # For issue 426, we need to store a copy of the command objects # since we modify them to inject `self` to them. # To do this, we need to interfere with the Cog creation process. self = super().__new__(cls) cmd_attrs = cls.__cog_settings__ # Either update the command with the cog provided defaults or copy it. self.__cog_commands__ = tuple(c._update_copy(cmd_attrs) for c in cls.__cog_commands__) lookup = { cmd.qualified_name: cmd for cmd in self.__cog_commands__ } # Update the Command instances dynamically as well for command in self.__cog_commands__: setattr(self, command.callback.__name__, command) parent = command.parent if parent is not None: # Get the latest parent reference parent = lookup[parent.qualified_name] # Update our parent's reference to our self parent.remove_command(command.name) parent.add_command(command) return self def get_commands(self): r""" Returns -------- List[:class:`.Command`] A :class:`list` of :class:`.Command`\s that are defined inside this cog. .. note:: This does not include subcommands. """ return [c for c in self.__cog_commands__ if c.parent is None] @property def qualified_name(self): """:class:`str`: Returns the cog's specified name, not the class name.""" return self.__cog_name__ @property def description(self): """:class:`str`: Returns the cog's description, typically the cleaned docstring.""" return self.__cog_description__ @description.setter def description(self, description): self.__cog_description__ = description def walk_commands(self): """An iterator that recursively walks through this cog's commands and subcommands. Yields ------ Union[:class:`.Command`, :class:`.Group`] A command or group from the cog. """ from .core import GroupMixin for command in self.__cog_commands__: if command.parent is None: yield command if isinstance(command, GroupMixin): yield from command.walk_commands() def get_listeners(self): """Returns a :class:`list` of (name, function) listener pairs that are defined in this cog. Returns -------- List[Tuple[:class:`str`, :ref:`coroutine <coroutine>`]] The listeners defined in this cog. """ return [(name, getattr(self, method_name)) for name, method_name in self.__cog_listeners__] @classmethod def _get_overridden_method(cls, method): """Return None if the method is not overridden. Otherwise returns the overridden method.""" return getattr(method.__func__, '__cog_special_method__', method) @classmethod def listener(cls, name=None): """A decorator that marks a function as a listener. This is the cog equivalent of :meth:`.Bot.listen`. Parameters ------------ name: :class:`str` The name of the event being listened to. If not provided, it defaults to the function's name. Raises -------- TypeError The function is not a coroutine function or a string was not passed as the name. """ if name is not None and not isinstance(name, str): raise TypeError('Cog.listener expected str but received {0.__class__.__name__!r} instead.'.format(name)) def decorator(func): actual = func if isinstance(actual, staticmethod): actual = actual.__func__ if not inspect.iscoroutinefunction(actual): raise TypeError('Listener function must be a coroutine function.') actual.__cog_listener__ = True to_assign = name or actual.__name__ try: actual.__cog_listener_names__.append(to_assign) except AttributeError: actual.__cog_listener_names__ = [to_assign] # we have to return `func` instead of `actual` because # we need the type to be `staticmethod` for the metaclass # to pick it up but the metaclass unfurls the function and # thus the assignments need to be on the actual function return func return decorator def has_error_handler(self): """:class:`bool`: Checks whether the cog has an error handler. .. versionadded:: 1.7 """ return not hasattr(self.cog_command_error.__func__, '__cog_special_method__') @_cog_special_method def cog_unload(self): """A special method that is called when the cog gets removed. This function **cannot** be a coroutine. It must be a regular function. Subclasses must replace this if they want special unloading behaviour. """ pass @_cog_special_method def bot_check_once(self, ctx): """A special method that registers as a :meth:`.Bot.check_once` check. This function **can** be a coroutine and must take a sole parameter, ``ctx``, to represent the :class:`.Context`. """ return True @_cog_special_method def bot_check(self, ctx): """A special method that registers as a :meth:`.Bot.check` check. This function **can** be a coroutine and must take a sole parameter, ``ctx``, to represent the :class:`.Context`. """ return True @_cog_special_method def cog_check(self, ctx): """A special method that registers as a :func:`commands.check` for every command and subcommand in this cog. This function **can** be a coroutine and must take a sole parameter, ``ctx``, to represent the :class:`.Context`. """ return True @_cog_special_method async def cog_command_error(self, ctx, error): """A special method that is called whenever an error is dispatched inside this cog. This is similar to :func:`.on_command_error` except only applying to the commands inside this cog. This **must** be a coroutine. Parameters ----------- ctx: :class:`.Context` The invocation context where the error happened. error: :class:`CommandError` The error that happened. """ pass @_cog_special_method async def cog_before_invoke(self, ctx): """A special method that acts as a cog local pre-invoke hook. This is similar to :meth:`.Command.before_invoke`. This **must** be a coroutine. Parameters ----------- ctx: :class:`.Context` The invocation context. """ pass @_cog_special_method async def cog_after_invoke(self, ctx): """A special method that acts as a cog local post-invoke hook. This is similar to :meth:`.Command.after_invoke`. This **must** be a coroutine. Parameters ----------- ctx: :class:`.Context` The invocation context. """ pass def _inject(self, bot): cls = self.__class__ # realistically, the only thing that can cause loading errors # is essentially just the command loading, which raises if there are # duplicates. When this condition is met, we want to undo all what # we've added so far for some form of atomic loading. for index, command in enumerate(self.__cog_commands__): command.cog = self if command.parent is None: try: bot.add_command(command) except Exception as e: # undo our additions for to_undo in self.__cog_commands__[:index]: if to_undo.parent is None: bot.remove_command(to_undo.name) raise e # check if we're overriding the default if cls.bot_check is not Cog.bot_check: bot.add_check(self.bot_check) if cls.bot_check_once is not Cog.bot_check_once: bot.add_check(self.bot_check_once, call_once=True) # while Bot.add_listener can raise if it's not a coroutine, # this precondition is already met by the listener decorator # already, thus this should never raise. # Outside of, memory errors and the like... for name, method_name in self.__cog_listeners__: bot.add_listener(getattr(self, method_name), name) return self def _eject(self, bot): cls = self.__class__ try: for command in self.__cog_commands__: if command.parent is None: bot.remove_command(command.name) for _, method_name in self.__cog_listeners__: bot.remove_listener(getattr(self, method_name)) if cls.bot_check is not Cog.bot_check: bot.remove_check(self.bot_check) if cls.bot_check_once is not Cog.bot_check_once: bot.remove_check(self.bot_check_once, call_once=True) finally: try: self.cog_unload() except Exception: pass
<filename>profit/run/zeromq.py """ zeromq Interface Ideas & Help from the 0MQ Guide (zguide.zeromq.org, examples are licensed with MIT) """ from .runner import RunnerInterface from .worker import Interface import zmq import numpy as np import json from time import sleep from logging import Logger import os @RunnerInterface.register('zeromq') class ZeroMQRunnerInterface(RunnerInterface): """ Runner-Worker Interface using the lightweight message queue `ZeroMQ <https://zeromq.org/>`_ - can use different transport systems, most commonly tcp - can be used efficiently on a cluster (tested) - expected to be inefficient for a large number of small, locally run simulations where communication overhead is a concern (unverified, could be mitigated by using a different transport system) - known issue: some workers were unable to establish a connection with three tries, reason unknown """ def __init__(self, config, size, input_config, output_config, *, logger_parent: Logger = None): if 'FLAGS' not in [var[0] for var in self.internal_vars]: self.internal_vars += [('FLAGS', np.byte)] super().__init__(config, size, input_config, output_config, logger_parent=logger_parent) self.socket = zmq.Context.instance().socket(zmq.ROUTER) if self.config['address'] is None: bind = f'{self.config["transport"]}://*:{self.config["port"]}' else: bind = self.config['bind'] self.socket.bind(bind) self.logger.info(f'connected to {bind}') def poll(self): self.logger.debug('polling: checking for messages') while self.socket.poll(timeout=10, flags=zmq.POLLIN): msg = self.socket.recv_multipart() # ToDo: Heartbeats self.handle_msg(msg[0], msg[2:]) def handle_msg(self, address: bytes, msg: list): if address[:4] == b'req_': # req_123 run_id = int(address[4:]) self.logger.debug(f'received {msg[0]} from run {run_id}') if msg[0] == b'READY': input_descr = json.dumps(self.input_vars).encode() output_descr = json.dumps(self.output_vars).encode() self.logger.debug(f'send input {input_descr} + {self.input[run_id]} + output {output_descr}') self.socket.send_multipart([address, b'', input_descr, self.input[run_id], output_descr]) self.internal['FLAGS'][run_id] |= 0x02 elif msg[0] == b'DATA': self.output[run_id] = np.frombuffer(msg[1], dtype=self.output_vars) self.logger.debug(f'received output {np.frombuffer(msg[1], dtype=self.output_vars)}') self.internal['DONE'][run_id] = True self.internal['FLAGS'][run_id] |= 0x08 self.logger.debug('acknowledge DATA') self.socket.send_multipart([address, b'', b'ACK']) # acknowledge elif msg[0] == b'TIME': self.internal['TIME'][run_id] = np.frombuffer(msg[1], dtype=np.uint) self.logger.debug('acknowledge TIME') self.socket.send_multipart([address, b'', b'ACK']) # acknowledge elif msg[0] == b'DIE': self.internal['FLAGS'][run_id] |= 0x04 self.logger.debug('acknowledge DIE') self.socket.send_multipart([address, b'', b'ACK']) # acknowledge else: self.logger.warning(f'received unknown message {address}: {msg}') else: self.logger.warning(f'received message from unknown client {address}: {msg}') def clean(self): self.logger.debug('cleaning: closing socket') self.socket.close(0) @classmethod def handle_config(cls, config, base_config): """ Example: .. code-block:: yaml class: zeromq transport: tcp # transport system used by zeromq port: 9000 # port for the interface address: null # override bind address used by zeromq connect: null # override connect address used by zeromq timeout: 2500 # zeromq polling timeout, in ms retries: 3 # number of zeromq connection retries retry-sleep: 1 # sleep between retries, in s """ defaults = dict(transport='tcp', port=9000, address=None, connect=None, timeout=2500, retries=3) defaults['retry-sleep'] = 1 for key, value in defaults.items(): if key not in config: config[key] = value @Interface.register('zeromq') class ZeroMQInterface(Interface): """ Runner-Worker Interface using the lightweight message queue `ZeroMQ <https://zeromq.org/>`_ counterpart to :py:class:`ZeroMQRunnerInterface` """ def __init__(self, config, run_id: int, *, logger_parent: Logger = None): super().__init__(config, run_id, logger_parent=logger_parent) self.connect() # self.socket self._done = False self._time = 0 self.request('READY') # self.input, self.output @property def time(self): return self._time @time.setter def time(self, value: int): self._time = value self.request('TIME') def done(self): self.request('DATA') self.socket.close(0) def connect(self): self.socket = zmq.Context.instance().socket(zmq.REQ) self.socket.setsockopt(zmq.IDENTITY, f'req_{self.run_id}'.encode()) if self.config['connect'] is None: address = os.environ.get('PROFIT_RUNNER_ADDRESS') or 'localhost' connect = f'{self.config["transport"]}://{address}:{self.config["port"]}' else: connect = self.config['connect'] self.socket.connect(connect) self.logger.info(f'connected to {connect}') def request(self, request): """ 0MQ - Lazy Pirate Pattern """ if request not in ['READY', 'DATA', 'TIME']: raise ValueError('unknown request') tries = 0 while True: msg = [request.encode()] if request == 'DATA': msg.append(self.output) elif request == 'TIME': msg.append(np.uint(self.time)) self.socket.send_multipart(msg) if self.socket.poll(timeout=self.config['timeout'], flags=zmq.POLLIN): response = None try: response = self.socket.recv_multipart() if request == 'READY': input_descr, input_data, output_descr = response input_descr = [tuple(column) for column in json.loads(input_descr.decode())] output_descr = [tuple(column[:2] + [tuple(column[2])]) for column in json.loads(output_descr.decode())] self.input = np.frombuffer(input_data, dtype=input_descr)[0] self.output = np.zeros(1, dtype=output_descr)[0] self.logger.info('READY: received input data') self.logger.debug(f'received: {np.frombuffer(input_data, dtype=input_descr)}') return else: assert response[0] == b'ACK' self.logger.debug(f'{request}: message acknowledged') return except (ValueError, AssertionError): self.logger.debug(f'{request}: received {response}') self.logger.error(f'{request}: malformed reply') else: self.logger.warning(f'{request}: no response') tries += 1 sleep(self.config['retry-sleep']) if tries >= self.config['retries'] + 1: self.logger.error(f'{request}: {tries} requests unsuccessful, abandoning') self.socket.close(0) raise ConnectionError('could not connect to RunnerInterface') # close and reopen the socket self.socket.close(linger=0) self.connect()
<reponame>ebenh/django-flex-user from rest_framework.test import APITestCase from rest_framework import status class TestFlexUserRetrieveUpdate(APITestCase): """ This class is designed to test django_flex_user.views.FlexUser """ _REST_ENDPOINT_PATH = '/api/accounts/users/user/' def test_method_get(self): response = self.client.get(self._REST_ENDPOINT_PATH) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_method_post(self): response = self.client.post(self._REST_ENDPOINT_PATH) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_method_put(self): response = self.client.put(self._REST_ENDPOINT_PATH) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_method_patch(self): response = self.client.patch(self._REST_ENDPOINT_PATH) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_method_delete(self): response = self.client.delete(self._REST_ENDPOINT_PATH) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_method_options(self): response = self.client.options(self._REST_ENDPOINT_PATH) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) class TestFlexUserRetrieveUpdateAuthenticated(APITestCase): """ This class is designed to test django_flex_user.views.FlexUser """ _REST_ENDPOINT_PATH = '/api/accounts/users/user/' class _ContentType: class ApplicationJSON: username_values = [{}, {'username': None}, {'username': ''}, {'username': 'validUsername'}, {'username': 'invalidUsername+'}] email_values = [{}, {'email': None}, {'email': ''}, {'email': '<EMAIL>'}, {'email': 'invalidEmail'}] phone_values = [{}, {'phone': None}, {'phone': ''}, {'phone': '+12025551234'}, {'phone': 'invalidPhoneNumber'}] password_values = [{}, {'password': None}, {'password': ''}, {'password': '<PASSWORD>'}, {'password': '<PASSWORD>'}] class MultipartFormData: username_values = [{}, {'username': ''}, {'username': 'validUsername'}, {'username': 'invalidUsername+'}] email_values = [{}, {'email': ''}, {'email': '<EMAIL>'}, {'email': 'invalidEmail'}] phone_values = [{}, {'phone': ''}, {'phone': '+12025551234'}, {'phone': 'invalidPhoneNumber'}] password_values = [{}, {'password': ''}, {'password': '<PASSWORD>'}, {'password': '<PASSWORD>'}] def setUp(self): from django_flex_user.models.user import FlexUser self.user = FlexUser.objects.create_user(username='validUsername', password='<PASSWORD>') def test_method_get(self): is_authenticated = self.client.login(username='validUsername', password='<PASSWORD>') self.assertIs(is_authenticated, True) response = self.client.get(self._REST_ENDPOINT_PATH) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual( response.data, { 'username': 'validUsername', 'email': None, 'email_verified': None, 'phone': None, 'phone_verified': None } ) self.client.logout() def test_method_post(self): is_authenticated = self.client.login(username='validUsername', password='<PASSWORD>') self.assertIs(is_authenticated, True) response = self.client.post(self._REST_ENDPOINT_PATH) self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED) self.client.logout() def test_method_patch_format_application_json(self): from django.db import transaction for i in self._ContentType.ApplicationJSON.username_values: for j in self._ContentType.ApplicationJSON.email_values: for k in self._ContentType.ApplicationJSON.phone_values: for l in self._ContentType.ApplicationJSON.password_values: data = {} data.update(i) data.update(j) data.update(k) data.update(l) with self.subTest(**data), transaction.atomic(): """ Special considerations for password changes: By default, updating a user's password invalidates all sessions for the user. To make it so that the user is *not* signed out by a password change, in django_flex_user.serializers.FlexUserSerializer.update we call django.contrib.auth.update_session_auth_hash which (1) generates a new session key for the user's current session (2) updates the current session's _auth_user_hash with a value based on the user's new password (because the value of _auth_user_hash for all other sessions are not based on the user's latest password, those sessions are implicitly invalidated). The session key for the newly created session is returned to the client in a 'set-cookie' response header. Because this call is wrapped in a transaction that rolls back all database changes at the end of each iteration, the changes to the user's session will not be persisted to the database. This means that on iterations following a password change, the session key that was returned to the client will not match any session in the django_session table. Therefore it is insufficient to log in the test client once before the execution of this loop. Instead we have to call django.test.client.Client.force_login on each iteration to ensure the client always has a valid session. We could instead call django.test.client.Client.login, but it significantly impacts execution time. For good measure/symmetry we also call django.test.client.Client.logout at the end of each iteration. """ self.client.force_login(self.user, 'django_flex_user.backends.FlexUserModelBackend') response = self.client.patch(self._REST_ENDPOINT_PATH, data=data, format='json') if 'password' in data and not data['password']: """ If the supplied password is defined and either None or the empty string, django_flex_user.views.FlexUser.put should return HTTP status code HTTP_400_BAD_REQUEST. """ self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.client.logout() elif ('username' in data and data['username'] is None) and \ ('email' not in data or data['email'] is None) and \ ('phone' not in data or data['phone'] is None): """ If the supplied username is None, and the supplied email and phone are simultaneously undefined or None, django_flex_user.views.FlexUser.put should return HTTP status code HTTP_400_BAD_REQUEST. """ self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.client.logout() elif data.get('username') == '' or \ data.get('email') == '' or \ data.get('phone') == '': """ If any of the supplied username, email or phone are the empty string django_flex_user.views.FlexUser.put should return HTTP status code HTTP_400_BAD_REQUEST. """ self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.client.logout() elif (data.get('username') and 'invalid' in data['username']) or \ (data.get('email') and 'invalid' in data['email']) or \ (data.get('phone') and 'invalid' in data['phone']) or \ (data.get('password') and 'invalid' in data['password']): """ If any of the supplied username, email, phone or password are defined and invalid, django_flex_user.views.FlexUser.put should return HTTP status code HTTP_400_BAD_REQUEST. """ self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.client.logout() else: """ This case encompasses all possible permutations of supplied username, email, phone and password for which django_flex_user.views.FlexUser.put should return HTTP status code HTTP_200_OK. """ self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual( response.data, { 'username': data.get('username', 'validUsername'), 'email': data.get('email'), 'email_verified': False if data.get('email') else None, 'phone': data.get('phone'), 'phone_verified': False if data.get('phone') else None } ) self.client.logout() transaction.set_rollback(True) def test_method_patch_format_multipart_form_data(self): from django.db import transaction for i in self._ContentType.MultipartFormData.username_values: for j in self._ContentType.MultipartFormData.email_values: for k in self._ContentType.MultipartFormData.phone_values: for l in self._ContentType.MultipartFormData.password_values: data = {} data.update(i) data.update(j) data.update(k) data.update(l) with self.subTest(**data), transaction.atomic(): """ Special considerations for password changes: By default, updating a user's password invalidates all sessions for the user. To make it so that the user is *not* signed out by a password change, in django_flex_user.serializers.FlexUserSerializer.update we call django.contrib.auth.update_session_auth_hash which (1) generates a new session key for the user's current session (2) updates the current session's _auth_user_hash with a value based on the user's new password (because the value of _auth_user_hash for all other sessions are not based on the user's latest password, those sessions are implicitly invalidated). The session key for the newly created session is returned to the client in a 'set-cookie' response header. Because this call is wrapped in a transaction that rolls back all database changes at the end of each iteration, the changes to the user's session will not be persisted to the database. This means that on iterations following a password change, the session key that was returned to the client will not match any session in the django_session table. Therefore it is insufficient to log in the test client once before the execution of this loop. Instead we have to call django.test.client.Client.force_login on each iteration to ensure the client always has a valid session. We could instead call django.test.client.Client.login, but it significantly impacts execution time. For good measure/symmetry we also call django.test.client.Client.logout at the end of each iteration. """ self.client.force_login(self.user, 'django_flex_user.backends.FlexUserModelBackend') response = self.client.patch(self._REST_ENDPOINT_PATH, data=data, format='multipart') if 'password' in data and data['password'] == '': """ If the supplied password is defined and blank, django_flex_user.views.FlexUser.put should return HTTP status code HTTP_400_BAD_REQUEST. """ self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.client.logout() elif ('username' in data and data['username'] == '') and \ ('email' not in data or data['email'] == '') and \ ('phone' not in data or data['phone'] == ''): """ If the supplied username is blank, and the supplied email and phone are simultaneously undefined or blank, django_flex_user.views.FlexUser.put should return HTTP status code HTTP_400_BAD_REQUEST. """ self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.client.logout() elif (data.get('username') and 'invalid' in data['username']) or \ (data.get('email') and 'invalid' in data['email']) or \ (data.get('phone') and 'invalid' in data['phone']) or \ (data.get('password') and 'invalid' in data['password']): """ If any of the supplied username, email, phone or password are defined and invalid, django_flex_user.views.FlexUser.put should return HTTP status code HTTP_400_BAD_REQUEST. """ self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.client.logout() else: """ This case encompasses all possible permutations of supplied username, email, phone and password for which django_flex_user.views.FlexUser.put should return HTTP status code HTTP_200_OK. """ self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual( response.data, { 'username': data.get('username', 'validUsername') or None, 'email': data.get('email') or None, 'email_verified': False if data.get('email') else None, 'phone': data.get('phone') or None, 'phone_verified': False if data.get('phone') else None } ) self.client.logout() transaction.set_rollback(True) def test_method_patch_username_case_insensitivity(self): from django_flex_user.models.user import FlexUser FlexUser.objects.create_user(username='validUsername2', password='<PASSWORD>') self.client.force_login(self.user, 'django_flex_user.backends.FlexUserModelBackend') data = {'username': 'VALIDUSERNAME2'} response = self.client.patch(self._REST_ENDPOINT_PATH, data=data) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.client.logout() def test_method_patch_duplicate_username(self): from django_flex_user.models.user import FlexUser FlexUser.objects.create_user(username='validUsername2', password='<PASSWORD>') self.client.force_login(self.user, 'django_flex_user.backends.FlexUserModelBackend') data = {'username': 'validUsername2'} response = self.client.patch(self._REST_ENDPOINT_PATH, data=data) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.client.logout() def test_method_patch_duplicate_email(self): from django_flex_user.models.user import FlexUser FlexUser.objects.create_user(email='<EMAIL>', password='<PASSWORD>') self.client.force_login(self.user, 'django_flex_user.backends.FlexUserModelBackend') data = {'email': '<EMAIL>'} response = self.client.patch(self._REST_ENDPOINT_PATH, data=data) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.client.logout() def test_method_patch_duplicate_phone(self): from django_flex_user.models.user import FlexUser FlexUser.objects.create_user(phone='+12025551234', password='<PASSWORD>') self.client.force_login(self.user, 'django_flex_user.backends.FlexUserModelBackend') data = {'phone': '+12025551234'} response = self.client.patch(self._REST_ENDPOINT_PATH, data=data) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.client.logout() def test_method_patch_ambiguous_username(self): """ Verify that an email address or phone number cannot form a valid username. :return: """ self.client.force_login(self.user, 'django_flex_user.backends.FlexUserModelBackend') data = {'username': '<EMAIL>'} response = self.client.patch(self._REST_ENDPOINT_PATH, data=data) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) data = {'username': '+12025551234'} response = self.client.patch(self._REST_ENDPOINT_PATH, data=data) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.client.logout() def test_method_patch_ambiguous_email(self): """ Verify that a username or phone number cannot form a valid email. :return: """ self.client.force_login(self.user, 'django_flex_user.backends.FlexUserModelBackend') data = {'email': 'validUsername'} response = self.client.patch(self._REST_ENDPOINT_PATH, data=data) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) data = {'email': '+12025551234'} response = self.client.patch(self._REST_ENDPOINT_PATH, data=data) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.client.logout() def test_method_patch_ambiguous_phone(self): """ Verify that a username or email address cannot form a valid phone. :return: """ self.client.force_login(self.user, 'django_flex_user.backends.FlexUserModelBackend') data = {'phone': 'validUsername'} response = self.client.patch(self._REST_ENDPOINT_PATH, data=data) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) data = {'phone': '<EMAIL>'} response = self.client.patch(self._REST_ENDPOINT_PATH, data=data) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.client.logout() def test_method_patch_normalize_username(self): self.client.force_login(self.user, 'django_flex_user.backends.FlexUserModelBackend') nfd = 'validUsérname' # é = U+0065 U+0301 nfkc = 'validUsérname' # é = U+00e9 data = {'username': nfd} response = self.client.patch(self._REST_ENDPOINT_PATH, data=data) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['username'], nfkc) self.client.logout() def test_method_patch_normalize_email(self): self.client.force_login(self.user, 'django_flex_user.backends.FlexUserModelBackend') data = {'email': '<EMAIL>'} response = self.client.patch(self._REST_ENDPOINT_PATH, data=data) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['email'], '<EMAIL>') self.client.logout() def test_method_put(self): is_authenticated = self.client.login(username='validUsername', password='<PASSWORD>') self.assertIs(is_authenticated, True) response = self.client.put(self._REST_ENDPOINT_PATH) self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED) self.client.logout() def test_method_delete(self): is_authenticated = self.client.login(username='validUsername', password='<PASSWORD>') self.assertIs(is_authenticated, True) response = self.client.delete(self._REST_ENDPOINT_PATH) self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED) self.client.logout() def test_method_options(self): is_authenticated = self.client.login(username='validUsername', password='<PASSWORD>') self.assertIs(is_authenticated, True) response = self.client.options(self._REST_ENDPOINT_PATH) self.assertEqual(response.status_code, status.HTTP_200_OK) self.client.logout()
<filename>source/sagemaker/src/package/data_privatization/container/train.py # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0 # Licensed under the Amazon Software License http://aws.amazon.com/asl/ import argparse import os from os.path import join import json import random import time import logging import subprocess from pathlib import Path from glob import glob import torch from torch import nn from torch import optim from torchtext import data from torchtext.data import Field, TabularDataset from torchtext.vocab import Vectors LOG = logging.getLogger() LOG.setLevel(logging.INFO) class FastText(nn.Module): # The model is taken from the excellent Torchtext tutorial at # https://github.com/bentrevett/pytorch-sentiment-analysis/ def __init__(self, vocab_size, embedding_dim, output_dim, pad_idx): super().__init__() self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=pad_idx) self.fc = nn.Linear(embedding_dim, output_dim) def forward(self, text): # text = [sent len, batch size] embedded = self.embedding(text) # embedded = [sent len, batch size, emb dim] embedded = embedded.permute(1, 0, 2) # embedded = [batch size, sent len, emb dim] pooled = nn.functional.avg_pool2d(embedded, (embedded.shape[1], 1)).squeeze(1) # pooled = [batch size, embedding_dim] return self.fc(pooled) def binary_accuracy(preds, y): """ Returns accuracy per batch, i.e. if you get 8/10 right, this returns 0.8, NOT 8 """ # round predictions to the closest integer rounded_preds = torch.round(torch.sigmoid(preds)) correct = (rounded_preds == y).float() # convert into float for division acc = correct.sum() / len(correct) return acc def evaluate(model, iterator, criterion): epoch_loss = 0 epoch_acc = 0 model.eval() for batch in iterator: predictions = model(batch.review).squeeze(1) loss = criterion(predictions, batch.sentiment) acc = binary_accuracy(predictions, batch.sentiment) epoch_loss += loss.item() epoch_acc += acc.item() return epoch_loss / iterator.iterations, epoch_acc / iterator.iterations def epoch_time(start_time, end_time): elapsed_time = end_time - start_time elapsed_mins = int(elapsed_time / 60) elapsed_secs = int(elapsed_time - (elapsed_mins * 60)) return elapsed_mins, elapsed_secs def train_one_epoch(model, iterator, optimizer, criterion): epoch_loss = 0 epoch_acc = 0 model.train() for batch in iterator: optimizer.zero_grad() predictions = model(batch.review).squeeze(1) loss = criterion(predictions, batch.sentiment) acc = binary_accuracy(predictions, batch.sentiment) loss.backward() optimizer.step() epoch_loss += loss.item() epoch_acc += acc.item() return epoch_loss / len(iterator), epoch_acc / len(iterator) def train(model, train_iterator, valid_iterator, n_epochs, model_dir): device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') optimizer = optim.Adam(model.parameters()) criterion = nn.BCEWithLogitsLoss() model = model.to(device) criterion = criterion.to(device) best_valid_loss = float('inf') model_path = join(model_dir, "model.pt") for epoch in range(n_epochs): print(f'Epoch: {epoch + 1:02} started...') start_time = time.time() train_loss, train_acc = train_one_epoch(model, train_iterator, optimizer, criterion) valid_loss, valid_acc = evaluate(model, valid_iterator, criterion) end_time = time.time() epoch_mins, epoch_secs = epoch_time(start_time, end_time) if valid_loss < best_valid_loss: best_valid_loss = valid_loss torch.save(model.state_dict(), model_path) print(f'Epoch: {epoch + 1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s') print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc * 100:.2f}%') print(f'\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc * 100:.2f}%') def create_fields(): TEXT = Field(sequential=True, tokenize="basic_english") LABEL = data.LabelField(dtype=torch.float) return TEXT, LABEL def create_iterators(train_data, valid_data): # Create iterators BATCH_SIZE = 64 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') train_iterator, valid_iterator = data.BucketIterator.splits( (train_data, valid_data), batch_size=BATCH_SIZE, sort=False, device=device) return train_iterator, valid_iterator def create_model(input_dimensions, embedding_size, pad_idx, unk_idx, pretrained_embeddings): model = FastText(input_dimensions, embedding_size, output_dim=1, pad_idx=pad_idx) model.embedding.weight.data.copy_(pretrained_embeddings) # Set <unk> and <pad> token vectors to all zero model.embedding.weight.data[unk_idx] = torch.zeros(embedding_size) model.embedding.weight.data[pad_idx] = torch.zeros(embedding_size) return model if __name__ == '__main__': SEED = 42 torch.manual_seed(SEED) torch.backends.cudnn.deterministic = True parser = argparse.ArgumentParser() # hyperparameters sent by the client are passed as command-line arguments to the script. parser.add_argument('--epochs', type=int, default=5) parser.add_argument('--batch-size', type=int, default=64) parser.add_argument('--vocab-size', type=int, default=25_000) parser.add_argument('--embedding-size', type=int, default=300) # Data, model, and output directories parser.add_argument('--output-data-dir', type=str, default=os.environ['SM_OUTPUT_DATA_DIR']) parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR']) parser.add_argument('--train-dir', type=str, default=os.environ['SM_CHANNEL_TRAIN']) parser.add_argument('--vectors-dir', type=str, default=os.environ['SM_CHANNEL_VECTORS']) parser.add_argument('--vectors-filename', type=str, default='glove.6B.300d.txt.gz') parser.add_argument('--train-filename', type=str, default='train_examples.csv') args, _ = parser.parse_known_args() LOG.info("Loading data...") TEXT, LABEL = create_fields() fields = [('review', TEXT), ('sentiment', LABEL)] # Torchtext expects a single file, so we concatenate the partial output files train_file = Path("{}/{}".format(os.environ['SM_CHANNEL_TRAIN'], args.train_filename)) if not train_file.exists(): part_files = glob("{0}/part-*".format(os.environ['SM_CHANNEL_TRAIN'])) subprocess.check_call(["cat"] + part_files, stdout=train_file.open(mode='w')) assert train_file.exists() reviews = TabularDataset( path=str(train_file), format='csv', fields=fields, skip_header=True) train_data, valid_data = reviews.split( split_ratio=[.9, .1], random_state=random.seed(SEED)) # Create vocabs MAX_VOCAB_SIZE = args.vocab_size vectors = Vectors(args.vectors_dir + "/" + args.vectors_filename) TEXT.build_vocab(train_data, max_size=MAX_VOCAB_SIZE, vectors=vectors, unk_init=torch.Tensor.normal_) LABEL.build_vocab(train_data) train_iterator, valid_iterator = create_iterators(train_data, valid_data) LOG.info("Instantiating model...") model = create_model( len(TEXT.vocab), args.embedding_size, TEXT.vocab.stoi[TEXT.pad_token], TEXT.vocab.stoi[TEXT.unk_token], TEXT.vocab.vectors) LOG.info("Starting training...") train(model, train_iterator, valid_iterator, args.epochs, args.model_dir) # Save vocab, we'll need them for testing later vocab_path = join(args.model_dir, "vocab.pt") torch.save(TEXT.vocab, vocab_path) # Keep track of experiment settings json_file = join(args.model_dir, "training-settings.json") with open(json_file, 'w') as f: f.write(json.dumps(vars(args)))
import torch import numpy as np import os import torch.nn as nn from torch.optim.lr_scheduler import ReduceLROnPlateau, MultiStepLR from sklearn.metrics import confusion_matrix from utils import make_log_name class TrainerFactory: def __init__(self): pass @staticmethod def get_trainer(method, **kwargs): if method == 'scratch': import trainer.vanilla_train as trainer elif method == 'kd_hinton': import trainer.kd_hinton as trainer elif method == 'kd_fitnet': import trainer.kd_fitnet as trainer elif method == 'kd_at': import trainer.kd_at as trainer elif method == 'kd_nst': import trainer.kd_nst as trainer elif method == 'kd_mfd': import trainer.kd_mfd as trainer elif method == 'scratch_mmd': import trainer.scratch_mmd as trainer elif method == 'adv_debiasing': import trainer.adv_debiasing as trainer else: raise Exception('Not allowed method') return trainer.Trainer(**kwargs) class GenericTrainer: ''' Base class for trainer; to implement a new training routine, inherit from this. ''' def __init__(self, model, args, optimizer, teacher=None): self.get_inter = args.get_inter self.cuda = args.cuda self.device = args.device self.t_device = args.t_device self.term = args.term self.lr = args.lr self.parallel = args.parallel self.epochs = args.epochs self.method = args.method self.model = model self.teacher = teacher self.optimizer = optimizer self.optim_type = args.optimizer self.img_size = args.img_size if not 'cifar10' in args.dataset else 32 self.criterion=torch.nn.CrossEntropyLoss() self.scheduler = None self.log_name = make_log_name(args) self.log_dir = os.path.join(args.log_dir, args.date, args.dataset, args.method) self.save_dir = os.path.join(args.save_dir, args.date, args.dataset, args.method) if self.optim_type == 'Adam' and self.optimizer is not None: self.scheduler = ReduceLROnPlateau(self.optimizer) else: self.scheduler = MultiStepLR(self.optimizer, [30, 60, 90], gamma=0.1) def evaluate(self, model, loader, criterion, device=None, groupwise=False): model.eval() num_groups = loader.dataset.num_groups num_classes = loader.dataset.num_classes device = self.device if device is None else device eval_acc = 0 if not groupwise else torch.zeros(num_groups, num_classes).cuda(device) eval_loss = 0 if not groupwise else torch.zeros(num_groups, num_classes).cuda(device) eval_eopp_list = torch.zeros(num_groups, num_classes).cuda(device) eval_data_count = torch.zeros(num_groups, num_classes).cuda(device) if 'Custom' in type(loader).__name__: loader = loader.generate() with torch.no_grad(): for j, eval_data in enumerate(loader): # Get the inputs inputs, _, groups, classes, _ = eval_data # labels = classes if self.cuda: inputs = inputs.cuda(device) labels = labels.cuda(device) groups = groups.cuda(device) outputs = model(inputs) if groupwise: if self.cuda: groups = groups.cuda(device) loss = nn.CrossEntropyLoss(reduction='none')(outputs, labels) preds = torch.argmax(outputs, 1) acc = (preds == labels).float().squeeze() for g in range(num_groups): for l in range(num_classes): eval_loss[g, l] += loss[(groups == g) * (labels == l)].sum() eval_acc[g, l] += acc[(groups == g) * (labels == l)].sum() eval_data_count[g, l] += torch.sum((groups == g) * (labels == l)) else: loss = criterion(outputs, labels) eval_loss += loss.item() * len(labels) preds = torch.argmax(outputs, 1) acc = (preds == labels).float().squeeze() eval_acc += acc.sum() for g in range(num_groups): for l in range(num_classes): eval_eopp_list[g, l] += acc[(groups == g) * (labels == l)].sum() eval_data_count[g, l] += torch.sum((groups == g) * (labels == l)) eval_loss = eval_loss / eval_data_count.sum() if not groupwise else eval_loss / eval_data_count eval_acc = eval_acc / eval_data_count.sum() if not groupwise else eval_acc / eval_data_count eval_eopp_list = eval_eopp_list / eval_data_count eval_max_eopp = torch.max(eval_eopp_list, dim=0)[0] - torch.min(eval_eopp_list, dim=0)[0] eval_max_eopp = torch.max(eval_max_eopp).item() model.train() return eval_loss, eval_acc, eval_max_eopp def save_model(self, save_dir, log_name="", model=None): model_to_save = self.model if model is None else model model_savepath = os.path.join(save_dir, log_name + '.pt') torch.save(model_to_save.state_dict(), model_savepath) print('Model saved to %s' % model_savepath) def compute_confusion_matix(self, dataset='test', num_classes=2, dataloader=None, log_dir="", log_name=""): from scipy.io import savemat from collections import defaultdict self.model.eval() confu_mat = defaultdict(lambda: np.zeros((num_classes, num_classes))) print('# of {} data : {}'.format(dataset, len(dataloader.dataset))) predict_mat = {} output_set = torch.tensor([]) group_set = torch.tensor([], dtype=torch.long) target_set = torch.tensor([], dtype=torch.long) intermediate_feature_set = torch.tensor([]) with torch.no_grad(): for i, data in enumerate(dataloader): # Get the inputs inputs, _, groups, targets, _ = data labels = targets groups = groups.long() if self.cuda: inputs = inputs.cuda(self.device) labels = labels.cuda(self.device) # forward outputs = self.model(inputs) if self.get_inter: intermediate_feature = self.model.forward(inputs, get_inter=True)[-2] group_set = torch.cat((group_set, groups)) target_set = torch.cat((target_set, targets)) output_set = torch.cat((output_set, outputs.cpu())) if self.get_inter: intermediate_feature_set = torch.cat((intermediate_feature_set, intermediate_feature.cpu())) pred = torch.argmax(outputs, 1) group_element = list(torch.unique(groups).numpy()) for i in group_element: mask = groups == i if len(labels[mask]) != 0: confu_mat[str(i)] += confusion_matrix( labels[mask].cpu().numpy(), pred[mask].cpu().numpy(), labels=[i for i in range(num_classes)]) predict_mat['group_set'] = group_set.numpy() predict_mat['target_set'] = target_set.numpy() predict_mat['output_set'] = output_set.numpy() if self.get_inter: predict_mat['intermediate_feature_set'] = intermediate_feature_set.numpy() savepath = os.path.join(log_dir, log_name + '_{}_confu'.format(dataset)) print('savepath', savepath) savemat(savepath, confu_mat, appendmat=True) savepath_pred = os.path.join(log_dir, log_name + '_{}_pred'.format(dataset)) savemat(savepath_pred, predict_mat, appendmat=True) print('Computed confusion matrix for {} dataset successfully!'.format(dataset)) return confu_mat
<filename>tdd/app/drl/t_holt_winters.py import unittest import matplotlib.pyplot as plt from app.drl.holt_winters import HoltWinters class THoltWinters(unittest.TestCase): def test_weighted_average(self): holt_winters = HoltWinters() series = [3.0, 10.0, 12.0, 13.0, 12.0, 10.0, 12.0] weights = [0.1, 0.2, 0.3, 0.4] mean = holt_winters.weighted_average(series, weights) print('mean={0}'.format(mean)) self.assertTrue(True) def test_expopential_smoothing(self): series = [3.0, 10.0, 12.0, 13.0, 12.0, 10.0, 12.0] alpha = 0.9 holt_winters = HoltWinters() result = holt_winters.expopential_smoothing(series, alpha) print(result) self.assertTrue(True) def test_expopential_smoothing_2(self): series = [3.0, 10.0, 12.0, 13.0, 12.0, 10.0, 12.0] alpha = 0.1 holt_winters = HoltWinters() result = holt_winters.expopential_smoothing(series, alpha) print(result) self.assertTrue(True) def test_expopential_smoothing_3(self): holt_winters = HoltWinters() series = [3.0, 10.0, 12.0, 13.0, 12.0, 10.0, 12.0] alpha = 0.1 r1 = holt_winters.expopential_smoothing(series, alpha) alpha = 0.9 r2 = holt_winters.expopential_smoothing(series, alpha) fig, ax = plt.subplots() ax.plot(series, label='series') ax.plot(r1, label='a=0.1') ax.plot(r2, label='a=0.9') legend = ax.legend(loc='lower right', shadow=True, fontsize='medium') plt.show() def test_double_expopential_smoothing(self): holt_winters = HoltWinters() series = [3.0, 10.0, 12.0, 13.0, 12.0, 10.0, 12.0] alpha = 0.9 beta = 0.9 result = holt_winters.double_exponential_smoothing(series, alpha, beta) fig, ax = plt.subplots() ax.plot(series, label='series') ax.plot(result, label='predict') legend = ax.legend(loc='lower right', shadow=True, fontsize='medium') plt.show() def test_initial_trend(self): series = [30,21,29,31,40,48,53,47,37,39,31,29,17,9,20,24,27,35,41,38, 27,31,27,26,21,13,21,18,33,35,40,36,22,24,21,20,17,14,17,19, 26,29,40,31,20,24,18,26,17,9,17,21,28,32,46,33,23,28,22,27, 18,8,17,21,31,34,44,38,31,30,26,32] holt_winters = HoltWinters() L = 12 result = holt_winters.initial_trend(series, L) print(result) # 正确结果:-0.7847222222222222 fig, ax = plt.subplots() ax.plot(series, label='series') legend = ax.legend(loc='lower right', shadow=True, fontsize='medium') plt.show() self.assertTrue(True) def test_initial_seasonal_components(self): series = [30,21,29,31,40,48,53,47,37,39,31,29,17,9,20,24,27,35,41,38, 27,31,27,26,21,13,21,18,33,35,40,36,22,24,21,20,17,14,17,19, 26,29,40,31,20,24,18,26,17,9,17,21,28,32,46,33,23,28,22,27, 18,8,17,21,31,34,44,38,31,30,26,32] holt_winters = HoltWinters() L = 12 result = holt_winters.initial_seasonal_components(series, L) print(result) # 正确结果:{0: -7.4305555555555545, 1: -15.097222222222221, 2: -7.263888888888888, 3: -5.097222222222222, 4: 3.402777777777778, 5: 8.069444444444445, 6: 16.569444444444446, 7: 9.736111111111112, 8: -0.7638888888888887, 9: 1.902777777777778, 10: -3.263888888888889, 11: -0.7638888888888887} fig, ax = plt.subplots() ax.plot(series, label='series') legend = ax.legend(loc='lower right', shadow=True, fontsize='medium') plt.show() self.assertTrue(True) def test_triple_exponential_smoothing(self): series = [30,21,29,31,40,48,53,47,37,39,31,29,17,9,20,24,27,35,41,38, 27,31,27,26,21,13,21,18,33,35,40,36,22,24,21,20,17,14,17,19, 26,29,40,31,20,24,18,26,17,9,17,21,28,32,46,33,23,28,22,27, 18,8,17,21,31,34,44,38,31,30,26,32] holt_winters = HoltWinters() L = 12 alpha = 0.716 beta = 0.029 gamma = 0.993 n_preds = 24 result = holt_winters.triple_exponential_smoothing(series, L, alpha, beta, gamma, n_preds) fig, ax = plt.subplots() ax.plot(result, label='predict') ax.plot(series, label='series') legend = ax.legend(loc='lower right', shadow=True, fontsize='medium') plt.show() self.assertTrue(True)
<filename>bigtable/tests/unit/test_instance.py<gh_stars>1-10 # Copyright 2015 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import mock from ._testing import _make_credentials from google.cloud.bigtable.cluster import Cluster class TestInstance(unittest.TestCase): PROJECT = "project" INSTANCE_ID = "instance-id" INSTANCE_NAME = "projects/" + PROJECT + "/instances/" + INSTANCE_ID LOCATION_ID = "locid" LOCATION = "projects/" + PROJECT + "/locations/" + LOCATION_ID APP_PROFILE_PATH = ( "projects/" + PROJECT + "/instances/" + INSTANCE_ID + "/appProfiles/" ) DISPLAY_NAME = "display_name" LABELS = {"foo": "bar"} OP_ID = 8915 OP_NAME = "operations/projects/{}/instances/{}operations/{}".format( PROJECT, INSTANCE_ID, OP_ID ) TABLE_ID = "table_id" TABLE_NAME = INSTANCE_NAME + "/tables/" + TABLE_ID @staticmethod def _get_target_class(): from google.cloud.bigtable.instance import Instance return Instance def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) @staticmethod def _get_target_client_class(): from google.cloud.bigtable.client import Client return Client def _make_client(self, *args, **kwargs): return self._get_target_client_class()(*args, **kwargs) def test_constructor_defaults(self): client = object() instance = self._make_one(self.INSTANCE_ID, client) self.assertEqual(instance.instance_id, self.INSTANCE_ID) self.assertEqual(instance.display_name, self.INSTANCE_ID) self.assertIsNone(instance.type_) self.assertIsNone(instance.labels) self.assertIs(instance._client, client) self.assertIsNone(instance.state) def test_constructor_non_default(self): from google.cloud.bigtable import enums instance_type = enums.Instance.Type.DEVELOPMENT state = enums.Instance.State.READY labels = {"test": "test"} client = object() instance = self._make_one( self.INSTANCE_ID, client, display_name=self.DISPLAY_NAME, instance_type=instance_type, labels=labels, _state=state, ) self.assertEqual(instance.instance_id, self.INSTANCE_ID) self.assertEqual(instance.display_name, self.DISPLAY_NAME) self.assertEqual(instance.type_, instance_type) self.assertEqual(instance.labels, labels) self.assertIs(instance._client, client) self.assertEqual(instance.state, state) def test__update_from_pb_success(self): from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 from google.cloud.bigtable import enums instance_type = enums.Instance.Type.PRODUCTION state = enums.Instance.State.READY instance_pb = data_v2_pb2.Instance( display_name=self.DISPLAY_NAME, type=instance_type, labels=self.LABELS, state=state, ) instance = self._make_one(None, None) self.assertIsNone(instance.display_name) self.assertIsNone(instance.type_) self.assertIsNone(instance.labels) instance._update_from_pb(instance_pb) self.assertEqual(instance.display_name, self.DISPLAY_NAME) self.assertEqual(instance.type_, instance_type) self.assertEqual(instance.labels, self.LABELS) self.assertEqual(instance._state, state) def test__update_from_pb_success_defaults(self): from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 from google.cloud.bigtable import enums instance_pb = data_v2_pb2.Instance(display_name=self.DISPLAY_NAME) instance = self._make_one(None, None) self.assertIsNone(instance.display_name) self.assertIsNone(instance.type_) self.assertIsNone(instance.labels) instance._update_from_pb(instance_pb) self.assertEqual(instance.display_name, self.DISPLAY_NAME) self.assertEqual(instance.type_, enums.Instance.Type.UNSPECIFIED) self.assertFalse(instance.labels) def test__update_from_pb_no_display_name(self): from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 instance_pb = data_v2_pb2.Instance() instance = self._make_one(None, None) self.assertIsNone(instance.display_name) with self.assertRaises(ValueError): instance._update_from_pb(instance_pb) def test_from_pb_success(self): from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 from google.cloud.bigtable import enums credentials = _make_credentials() client = self._make_client( project=self.PROJECT, credentials=credentials, admin=True ) instance_type = enums.Instance.Type.PRODUCTION state = enums.Instance.State.READY instance_pb = data_v2_pb2.Instance( name=self.INSTANCE_NAME, display_name=self.INSTANCE_ID, type=instance_type, labels=self.LABELS, state=state, ) klass = self._get_target_class() instance = klass.from_pb(instance_pb, client) self.assertIsInstance(instance, klass) self.assertEqual(instance._client, client) self.assertEqual(instance.instance_id, self.INSTANCE_ID) self.assertEqual(instance.display_name, self.INSTANCE_ID) self.assertEqual(instance.type_, instance_type) self.assertEqual(instance.labels, self.LABELS) self.assertEqual(instance._state, state) def test_from_pb_bad_instance_name(self): from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 instance_name = "INCORRECT_FORMAT" instance_pb = data_v2_pb2.Instance(name=instance_name) klass = self._get_target_class() with self.assertRaises(ValueError): klass.from_pb(instance_pb, None) def test_from_pb_project_mistmatch(self): from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 ALT_PROJECT = "ALT_PROJECT" credentials = _make_credentials() client = self._make_client( project=ALT_PROJECT, credentials=credentials, admin=True ) self.assertNotEqual(self.PROJECT, ALT_PROJECT) instance_pb = data_v2_pb2.Instance(name=self.INSTANCE_NAME) klass = self._get_target_class() with self.assertRaises(ValueError): klass.from_pb(instance_pb, client) def test_name_property(self): from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client api = bigtable_instance_admin_client.BigtableInstanceAdminClient(mock.Mock()) credentials = _make_credentials() client = self._make_client( project=self.PROJECT, credentials=credentials, admin=True ) # Patch the the API method. client._instance_admin_client = api instance = self._make_one(self.INSTANCE_ID, client) self.assertEqual(instance.name, self.INSTANCE_NAME) def test___eq__(self): client = object() instance1 = self._make_one(self.INSTANCE_ID, client) instance2 = self._make_one(self.INSTANCE_ID, client) self.assertEqual(instance1, instance2) def test___eq__type_differ(self): client = object() instance1 = self._make_one(self.INSTANCE_ID, client) instance2 = object() self.assertNotEqual(instance1, instance2) def test___ne__same_value(self): client = object() instance1 = self._make_one(self.INSTANCE_ID, client) instance2 = self._make_one(self.INSTANCE_ID, client) comparison_val = instance1 != instance2 self.assertFalse(comparison_val) def test___ne__(self): instance1 = self._make_one("instance_id1", "client1") instance2 = self._make_one("instance_id2", "client2") self.assertNotEqual(instance1, instance2) def test_create_check_location_and_clusters(self): instance = self._make_one(self.INSTANCE_ID, None) with self.assertRaises(ValueError): instance.create(location_id=self.LOCATION_ID, clusters=[object(), object()]) def test_create_check_serve_nodes_and_clusters(self): instance = self._make_one(self.INSTANCE_ID, None) with self.assertRaises(ValueError): instance.create(serve_nodes=3, clusters=[object(), object()]) def test_create_check_default_storage_type_and_clusters(self): instance = self._make_one(self.INSTANCE_ID, None) with self.assertRaises(ValueError): instance.create(default_storage_type=1, clusters=[object(), object()]) def _instance_api_response_for_create(self): import datetime from google.api_core import operation from google.longrunning import operations_pb2 from google.protobuf.any_pb2 import Any from google.cloud._helpers import _datetime_to_pb_timestamp from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client from google.cloud.bigtable_admin_v2.proto import ( bigtable_instance_admin_pb2 as messages_v2_pb2, ) from google.cloud.bigtable_admin_v2.types import instance_pb2 NOW = datetime.datetime.utcnow() NOW_PB = _datetime_to_pb_timestamp(NOW) metadata = messages_v2_pb2.CreateInstanceMetadata(request_time=NOW_PB) type_url = "type.googleapis.com/{}".format( messages_v2_pb2.CreateInstanceMetadata.DESCRIPTOR.full_name ) response_pb = operations_pb2.Operation( name=self.OP_NAME, metadata=Any(type_url=type_url, value=metadata.SerializeToString()), ) response = operation.from_gapic( response_pb, mock.Mock(), instance_pb2.Instance, metadata_type=messages_v2_pb2.CreateInstanceMetadata, ) project_path_template = "projects/{}" location_path_template = "projects/{}/locations/{}" instance_api = mock.create_autospec( bigtable_instance_admin_client.BigtableInstanceAdminClient ) instance_api.create_instance.return_value = response instance_api.project_path = project_path_template.format instance_api.location_path = location_path_template.format return instance_api, response def test_create(self): from google.cloud.bigtable import enums from google.cloud.bigtable_admin_v2.types import instance_pb2 credentials = _make_credentials() client = self._make_client( project=self.PROJECT, credentials=credentials, admin=True ) instance = self._make_one( self.INSTANCE_ID, client, self.DISPLAY_NAME, enums.Instance.Type.PRODUCTION, self.LABELS, ) instance_api, response = self._instance_api_response_for_create() client._instance_admin_client = instance_api serve_nodes = 3 result = instance.create(location_id=self.LOCATION_ID, serve_nodes=serve_nodes) cluster_pb = instance_pb2.Cluster( location=instance_api.location_path(self.PROJECT, self.LOCATION_ID), serve_nodes=serve_nodes, default_storage_type=enums.StorageType.UNSPECIFIED, ) instance_pb = instance_pb2.Instance( display_name=self.DISPLAY_NAME, type=enums.Instance.Type.PRODUCTION, labels=self.LABELS, ) cluster_id = "{}-cluster".format(self.INSTANCE_ID) instance_api.create_instance.assert_called_once_with( parent=instance_api.project_path(self.PROJECT), instance_id=self.INSTANCE_ID, instance=instance_pb, clusters={cluster_id: cluster_pb}, ) self.assertIs(result, response) def test_create_w_clusters(self): from google.cloud.bigtable import enums from google.cloud.bigtable_admin_v2.types import instance_pb2 credentials = _make_credentials() client = self._make_client( project=self.PROJECT, credentials=credentials, admin=True ) instance = self._make_one( self.INSTANCE_ID, client, self.DISPLAY_NAME, enums.Instance.Type.PRODUCTION, self.LABELS, ) instance_api, response = self._instance_api_response_for_create() client._instance_admin_client = instance_api # Perform the method and check the result. cluster_id_1 = "cluster-1" cluster_id_2 = "cluster-2" location_id_1 = "location-id-1" location_id_2 = "location-id-2" serve_nodes_1 = 3 serve_nodes_2 = 5 clusters = [ Cluster( cluster_id_1, instance, location_id=location_id_1, serve_nodes=serve_nodes_1, ), Cluster( cluster_id_2, instance, location_id=location_id_2, serve_nodes=serve_nodes_2, ), ] result = instance.create(clusters=clusters) cluster_pb_1 = instance_pb2.Cluster( location=instance_api.location_path(self.PROJECT, location_id_1), serve_nodes=serve_nodes_1, default_storage_type=enums.StorageType.UNSPECIFIED, ) cluster_pb_2 = instance_pb2.Cluster( location=instance_api.location_path(self.PROJECT, location_id_2), serve_nodes=serve_nodes_2, default_storage_type=enums.StorageType.UNSPECIFIED, ) instance_pb = instance_pb2.Instance( display_name=self.DISPLAY_NAME, type=enums.Instance.Type.PRODUCTION, labels=self.LABELS, ) instance_api.create_instance.assert_called_once_with( parent=instance_api.project_path(self.PROJECT), instance_id=self.INSTANCE_ID, instance=instance_pb, clusters={cluster_id_1: cluster_pb_1, cluster_id_2: cluster_pb_2}, ) self.assertIs(result, response) def test_exists(self): from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 from google.api_core import exceptions api = bigtable_instance_admin_client.BigtableInstanceAdminClient(mock.Mock()) credentials = _make_credentials() client = self._make_client( project=self.PROJECT, credentials=credentials, admin=True ) # Create response_pb instance_name = client.instance_admin_client.instance_path( self.PROJECT, self.INSTANCE_ID ) response_pb = data_v2_pb2.Instance(name=instance_name) # Patch the stub used by the API method. client._instance_admin_client = api instance_admin_client = client._instance_admin_client instance_stub = instance_admin_client.transport instance_stub.get_instance.side_effect = [ response_pb, exceptions.NotFound("testing"), exceptions.BadRequest("testing"), ] # Perform the method and check the result. non_existing_instance_id = "instance-id-2" alt_instance_1 = self._make_one(self.INSTANCE_ID, client) alt_instance_2 = self._make_one(non_existing_instance_id, client) self.assertTrue(alt_instance_1.exists()) self.assertFalse(alt_instance_2.exists()) with self.assertRaises(exceptions.BadRequest): alt_instance_2.exists() def test_reload(self): from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client from google.cloud.bigtable import enums api = bigtable_instance_admin_client.BigtableInstanceAdminClient(mock.Mock()) credentials = _make_credentials() client = self._make_client( project=self.PROJECT, credentials=credentials, admin=True ) instance = self._make_one(self.INSTANCE_ID, client) # Create response_pb DISPLAY_NAME = u"hey-hi-hello" instance_type = enums.Instance.Type.PRODUCTION response_pb = data_v2_pb2.Instance( display_name=DISPLAY_NAME, type=instance_type, labels=self.LABELS ) # Patch the stub used by the API method. client._instance_admin_client = api bigtable_instance_stub = client._instance_admin_client.transport bigtable_instance_stub.get_instance.side_effect = [response_pb] # Create expected_result. expected_result = None # reload() has no return value. # Check Instance optional config values before. self.assertEqual(instance.display_name, self.INSTANCE_ID) # Perform the method and check the result. result = instance.reload() self.assertEqual(result, expected_result) # Check Instance optional config values before. self.assertEqual(instance.display_name, DISPLAY_NAME) def _instance_api_response_for_update(self): import datetime from google.api_core import operation from google.longrunning import operations_pb2 from google.protobuf.any_pb2 import Any from google.cloud._helpers import _datetime_to_pb_timestamp from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client from google.cloud.bigtable_admin_v2.proto import ( bigtable_instance_admin_pb2 as messages_v2_pb2, ) from google.cloud.bigtable_admin_v2.types import instance_pb2 NOW = datetime.datetime.utcnow() NOW_PB = _datetime_to_pb_timestamp(NOW) metadata = messages_v2_pb2.UpdateInstanceMetadata(request_time=NOW_PB) type_url = "type.googleapis.com/{}".format( messages_v2_pb2.UpdateInstanceMetadata.DESCRIPTOR.full_name ) response_pb = operations_pb2.Operation( name=self.OP_NAME, metadata=Any(type_url=type_url, value=metadata.SerializeToString()), ) response = operation.from_gapic( response_pb, mock.Mock(), instance_pb2.Instance, metadata_type=messages_v2_pb2.UpdateInstanceMetadata, ) instance_path_template = "projects/{project}/instances/{instance}" instance_api = mock.create_autospec( bigtable_instance_admin_client.BigtableInstanceAdminClient ) instance_api.partial_update_instance.return_value = response instance_api.instance_path = instance_path_template.format return instance_api, response def test_update(self): from google.cloud.bigtable import enums from google.protobuf import field_mask_pb2 from google.cloud.bigtable_admin_v2.types import instance_pb2 credentials = _make_credentials() client = self._make_client( project=self.PROJECT, credentials=credentials, admin=True ) instance = self._make_one( self.INSTANCE_ID, client, display_name=self.DISPLAY_NAME, instance_type=enums.Instance.Type.DEVELOPMENT, labels=self.LABELS, ) instance_api, response = self._instance_api_response_for_update() client._instance_admin_client = instance_api result = instance.update() instance_pb = instance_pb2.Instance( name=instance.name, display_name=instance.display_name, type=instance.type_, labels=instance.labels, ) update_mask_pb = field_mask_pb2.FieldMask( paths=["display_name", "type", "labels"] ) instance_api.partial_update_instance.assert_called_once_with( instance=instance_pb, update_mask=update_mask_pb ) self.assertIs(result, response) def test_update_empty(self): from google.protobuf import field_mask_pb2 from google.cloud.bigtable_admin_v2.types import instance_pb2 credentials = _make_credentials() client = self._make_client( project=self.PROJECT, credentials=credentials, admin=True ) instance = self._make_one(None, client) instance_api, response = self._instance_api_response_for_update() client._instance_admin_client = instance_api result = instance.update() instance_pb = instance_pb2.Instance( name=instance.name, display_name=instance.display_name, type=instance.type_, labels=instance.labels, ) update_mask_pb = field_mask_pb2.FieldMask() instance_api.partial_update_instance.assert_called_once_with( instance=instance_pb, update_mask=update_mask_pb ) self.assertIs(result, response) def test_delete(self): from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client credentials = _make_credentials() client = self._make_client( project=self.PROJECT, credentials=credentials, admin=True ) instance = self._make_one(self.INSTANCE_ID, client) instance_api = mock.create_autospec( bigtable_instance_admin_client.BigtableInstanceAdminClient ) instance_api.delete_instance.return_value = None client._instance_admin_client = instance_api result = instance.delete() instance_api.delete_instance.assert_called_once_with(instance.name) self.assertIsNone(result) def test_get_iam_policy(self): from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client from google.iam.v1 import policy_pb2 from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE credentials = _make_credentials() client = self._make_client( project=self.PROJECT, credentials=credentials, admin=True ) instance = self._make_one(self.INSTANCE_ID, client) version = 1 etag = b"etag_v1" members = ["serviceAccount:<EMAIL>", "user:<EMAIL>"] bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}] iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) # Patch the stub used by the API method. instance_api = mock.create_autospec( bigtable_instance_admin_client.BigtableInstanceAdminClient ) client._instance_admin_client = instance_api instance_api.get_iam_policy.return_value = iam_policy # Perform the method and check the result. result = instance.get_iam_policy() instance_api.get_iam_policy.assert_called_once_with(resource=instance.name) self.assertEqual(result.version, version) self.assertEqual(result.etag, etag) admins = result.bigtable_admins self.assertEqual(len(admins), len(members)) for found, expected in zip(sorted(admins), sorted(members)): self.assertEqual(found, expected) def test_set_iam_policy(self): from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client from google.iam.v1 import policy_pb2 from google.cloud.bigtable.policy import Policy from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE credentials = _make_credentials() client = self._make_client( project=self.PROJECT, credentials=credentials, admin=True ) instance = self._make_one(self.INSTANCE_ID, client) version = 1 etag = b"etag_v1" members = ["serviceAccount:<EMAIL>", "user:<EMAIL>"] bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}] iam_policy_pb = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) # Patch the stub used by the API method. instance_api = mock.create_autospec( bigtable_instance_admin_client.BigtableInstanceAdminClient ) instance_api.set_iam_policy.return_value = iam_policy_pb client._instance_admin_client = instance_api # Perform the method and check the result. iam_policy = Policy(etag=etag, version=version) iam_policy[BIGTABLE_ADMIN_ROLE] = [ Policy.user("<EMAIL>"), Policy.service_account("<EMAIL>"), ] result = instance.set_iam_policy(iam_policy) instance_api.set_iam_policy.assert_called_once_with( resource=instance.name, policy={"version": version, "etag": etag, "bindings": bindings}, ) self.assertEqual(result.version, version) self.assertEqual(result.etag, etag) admins = result.bigtable_admins self.assertEqual(len(admins), len(members)) for found, expected in zip(sorted(admins), sorted(members)): self.assertEqual(found, expected) def test_test_iam_permissions(self): from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client from google.iam.v1 import iam_policy_pb2 credentials = _make_credentials() client = self._make_client( project=self.PROJECT, credentials=credentials, admin=True ) instance = self._make_one(self.INSTANCE_ID, client) permissions = ["bigtable.tables.create", "bigtable.clusters.create"] response = iam_policy_pb2.TestIamPermissionsResponse(permissions=permissions) instance_api = mock.create_autospec( bigtable_instance_admin_client.BigtableInstanceAdminClient ) instance_api.test_iam_permissions.return_value = response client._instance_admin_client = instance_api result = instance.test_iam_permissions(permissions) self.assertEqual(result, permissions) instance_api.test_iam_permissions.assert_called_once_with( resource=instance.name, permissions=permissions ) def test_cluster_factory(self): from google.cloud.bigtable import enums CLUSTER_ID = "{}-cluster".format(self.INSTANCE_ID) LOCATION_ID = "us-central1-c" SERVE_NODES = 3 STORAGE_TYPE = enums.StorageType.HDD instance = self._make_one(self.INSTANCE_ID, None) cluster = instance.cluster( CLUSTER_ID, location_id=LOCATION_ID, serve_nodes=SERVE_NODES, default_storage_type=STORAGE_TYPE, ) self.assertIsInstance(cluster, Cluster) self.assertEqual(cluster.cluster_id, CLUSTER_ID) self.assertEqual(cluster.location_id, LOCATION_ID) self.assertIsNone(cluster._state) self.assertEqual(cluster.serve_nodes, SERVE_NODES) self.assertEqual(cluster.default_storage_type, STORAGE_TYPE) def test_list_clusters(self): from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client from google.cloud.bigtable_admin_v2.proto import ( bigtable_instance_admin_pb2 as messages_v2_pb2, ) from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 from google.cloud.bigtable.instance import Instance from google.cloud.bigtable.instance import Cluster credentials = _make_credentials() client = self._make_client( project=self.PROJECT, credentials=credentials, admin=True ) instance = Instance(self.INSTANCE_ID, client) failed_location = "FAILED" cluster_id1 = "cluster-id1" cluster_id2 = "cluster-id2" cluster_path_template = "projects/{}/instances/{}/clusters/{}" cluster_name1 = cluster_path_template.format( self.PROJECT, self.INSTANCE_ID, cluster_id1 ) cluster_name2 = cluster_path_template.format( self.PROJECT, self.INSTANCE_ID, cluster_id2 ) # Create response_pb response_pb = messages_v2_pb2.ListClustersResponse( failed_locations=[failed_location], clusters=[ data_v2_pb2.Cluster(name=cluster_name1), data_v2_pb2.Cluster(name=cluster_name2), ], ) # Patch the stub used by the API method. instance_api = mock.create_autospec( bigtable_instance_admin_client.BigtableInstanceAdminClient ) instance_api.list_clusters.side_effect = [response_pb] instance_api.cluster_path = cluster_path_template.format client._instance_admin_client = instance_api # Perform the method and check the result. clusters, failed_locations = instance.list_clusters() cluster_1, cluster_2 = clusters self.assertIsInstance(cluster_1, Cluster) self.assertEqual(cluster_1.name, cluster_name1) self.assertIsInstance(cluster_2, Cluster) self.assertEqual(cluster_2.name, cluster_name2) self.assertEqual(failed_locations, [failed_location]) def test_table_factory(self): from google.cloud.bigtable.table import Table app_profile_id = "appProfileId1262094415" instance = self._make_one(self.INSTANCE_ID, None) table = instance.table(self.TABLE_ID, app_profile_id=app_profile_id) self.assertIsInstance(table, Table) self.assertEqual(table.table_id, self.TABLE_ID) self.assertEqual(table._instance, instance) self.assertEqual(table._app_profile_id, app_profile_id) def _list_tables_helper(self, table_name=None): from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_data_v2_pb2 from google.cloud.bigtable_admin_v2.proto import ( bigtable_table_admin_pb2 as table_messages_v1_pb2, ) from google.cloud.bigtable_admin_v2.gapic import ( bigtable_table_admin_client, bigtable_instance_admin_client, ) table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( mock.Mock() ) credentials = _make_credentials() client = self._make_client( project=self.PROJECT, credentials=credentials, admin=True ) instance = self._make_one(self.INSTANCE_ID, client) # Create response_pb if table_name is None: table_name = self.TABLE_NAME response_pb = table_messages_v1_pb2.ListTablesResponse( tables=[table_data_v2_pb2.Table(name=table_name)] ) # Patch the stub used by the API method. client._table_admin_client = table_api client._instance_admin_client = instance_api bigtable_table_stub = client._table_admin_client.transport bigtable_table_stub.list_tables.side_effect = [response_pb] # Create expected_result. expected_table = instance.table(self.TABLE_ID) expected_result = [expected_table] # Perform the method and check the result. result = instance.list_tables() self.assertEqual(result, expected_result) def test_list_tables(self): self._list_tables_helper() def test_list_tables_failure_bad_split(self): with self.assertRaises(ValueError): self._list_tables_helper(table_name="wrong-format") def test_list_tables_failure_name_bad_before(self): BAD_TABLE_NAME = ( "nonempty-section-before" + "projects/" + self.PROJECT + "/instances/" + self.INSTANCE_ID + "/tables/" + self.TABLE_ID ) with self.assertRaises(ValueError): self._list_tables_helper(table_name=BAD_TABLE_NAME) def test_app_profile_factory(self): from google.cloud.bigtable.enums import RoutingPolicyType APP_PROFILE_ID_1 = "app-profile-id-1" ANY = RoutingPolicyType.ANY DESCRIPTION_1 = "routing policy any" APP_PROFILE_ID_2 = "app-profile-id-2" SINGLE = RoutingPolicyType.SINGLE DESCRIPTION_2 = "routing policy single" ALLOW_WRITES = True CLUSTER_ID = "cluster-id" instance = self._make_one(self.INSTANCE_ID, None) app_profile1 = instance.app_profile( APP_PROFILE_ID_1, routing_policy_type=ANY, description=DESCRIPTION_1 ) app_profile2 = instance.app_profile( APP_PROFILE_ID_2, routing_policy_type=SINGLE, description=DESCRIPTION_2, cluster_id=CLUSTER_ID, allow_transactional_writes=ALLOW_WRITES, ) self.assertEqual(app_profile1.app_profile_id, APP_PROFILE_ID_1) self.assertIs(app_profile1._instance, instance) self.assertEqual(app_profile1.routing_policy_type, ANY) self.assertEqual(app_profile1.description, DESCRIPTION_1) self.assertEqual(app_profile2.app_profile_id, APP_PROFILE_ID_2) self.assertIs(app_profile2._instance, instance) self.assertEqual(app_profile2.routing_policy_type, SINGLE) self.assertEqual(app_profile2.description, DESCRIPTION_2) self.assertEqual(app_profile2.cluster_id, CLUSTER_ID) self.assertEqual(app_profile2.allow_transactional_writes, ALLOW_WRITES) def test_list_app_profiles(self): from google.api_core.page_iterator import Iterator from google.api_core.page_iterator import Page from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 from google.cloud.bigtable.app_profile import AppProfile class _Iterator(Iterator): def __init__(self, pages): super(_Iterator, self).__init__(client=None) self._pages = pages def _next_page(self): if self._pages: page, self._pages = self._pages[0], self._pages[1:] return Page(self, page, self.item_to_value) credentials = _make_credentials() client = self._make_client( project=self.PROJECT, credentials=credentials, admin=True ) instance = self._make_one(self.INSTANCE_ID, client) # Setup Expected Response app_profile_path_template = "projects/{}/instances/{}/appProfiles/{}" app_profile_id1 = "app-profile-id1" app_profile_id2 = "app-profile-id2" app_profile_name1 = app_profile_path_template.format( self.PROJECT, self.INSTANCE_ID, app_profile_id1 ) app_profile_name2 = app_profile_path_template.format( self.PROJECT, self.INSTANCE_ID, app_profile_id2 ) routing_policy = data_v2_pb2.AppProfile.MultiClusterRoutingUseAny() app_profiles = [ data_v2_pb2.AppProfile( name=app_profile_name1, multi_cluster_routing_use_any=routing_policy ), data_v2_pb2.AppProfile( name=app_profile_name2, multi_cluster_routing_use_any=routing_policy ), ] iterator = _Iterator(pages=[app_profiles]) # Patch the stub used by the API method. instance_api = mock.create_autospec( bigtable_instance_admin_client.BigtableInstanceAdminClient ) client._instance_admin_client = instance_api instance_api.app_profile_path = app_profile_path_template.format instance_api.list_app_profiles.return_value = iterator # Perform the method and check the result. app_profiles = instance.list_app_profiles() app_profile_1, app_profile_2 = app_profiles self.assertIsInstance(app_profile_1, AppProfile) self.assertEqual(app_profile_1.name, app_profile_name1) self.assertIsInstance(app_profile_2, AppProfile) self.assertEqual(app_profile_2.name, app_profile_name2)
<filename>CsvPlotter/internal/configuration.py from .utils import Range def _get_or_default(cfg_obj, key, default=None, conv=None): if key not in cfg_obj or cfg_obj[key] is None: return default v = cfg_obj[key] if conv is not None: return conv(v) return v def _assign_range(rng, obj): if len(obj) == 0: rng.start = None rng.end = None elif len(obj) == 1: rng.start = obj[0] rng.end = None else: rng.start = obj[0] rng.end = obj[1] class PlotConfig(object): def __init__(self): self.input_file = None self.output_file = None self.range = Range() self.share_x_axis = True self.subplots = [] @classmethod def from_obj(cls, cfg_obj): plot_cfg = cls() plot_cfg.input_file = _get_or_default(cfg_obj, 'input_file', conv=str) plot_cfg.output_file = _get_or_default(cfg_obj, 'output_file', conv=str) plot_cfg.range.divider = _get_or_default(cfg_obj, 'divider', 1, conv=int) plot_cfg.share_x_axis = _get_or_default(cfg_obj, 'share_x_axis', True, conv=bool) _assign_range(plot_cfg.range, _get_or_default(cfg_obj, 'xlim', [None, None], conv=list)) for p in _get_or_default(cfg_obj, 'plots', []): plot_cfg.add_subplot(SubplotConfig.from_obj(p)) return plot_cfg def add_subplot(self, subplot_cfg): self.subplots.append(subplot_cfg) def __repr__(self): return f'PlotConfig{{input_file={self.input_file!r}, output_file={self.output_file!r}, range={self.range!r}, subplots={self.subplots!r}}}' class SubplotConfig(object): def __init__(self): self.title = None self.xlabel = 'X' self.ylabel = 'Y' self.ylim = Range() self.alt_ylim = Range() self.alt_ylabel = None self.columns = [] @classmethod def from_obj(cls, cfg_obj): subplot_cfg = cls() subplot_cfg.title = _get_or_default(cfg_obj, 'title', conv=str) subplot_cfg.xlabel = _get_or_default(cfg_obj, 'xlabel', 'X', conv=str) subplot_cfg.ylabel = _get_or_default(cfg_obj, 'ylabel', 'Y', conv=str) subplot_cfg.alt_ylabel = _get_or_default(cfg_obj, 'alt_ylabel', conv=str) _assign_range(subplot_cfg.ylim, _get_or_default(cfg_obj, 'ylim', [None, None], conv=list)) _assign_range(subplot_cfg.alt_ylim, _get_or_default(cfg_obj, 'alt_ylim', [None, None], conv=list)) for c in _get_or_default(cfg_obj, 'columns', []): subplot_cfg.add_column(ColumnConfig.from_obj(c)) return subplot_cfg def add_column(self, column_cfg): self.columns.append(column_cfg) def __repr__(self): return f'SubplotConfig{{columns={self.columns!r}}}' class ColumnConfig(object): def __init__(self, name=None, label=None, alt_y_axis=False): self.name = name self.alt_y_axis = alt_y_axis self.label = label if label is not None else name @classmethod def from_obj(cls, cfg_obj): column_cfg = cls() column_cfg.name = _get_or_default(cfg_obj, 'name', conv=str) column_cfg.label = _get_or_default(cfg_obj, 'label', conv=str) column_cfg.alt_y_axis = _get_or_default(cfg_obj, 'alt_y_axis', False, conv=bool) return column_cfg def __repr__(self): return f'ColumnConfig{{name={self.name!r}, label={self.label!r}, alt_y_axis={self.alt_y_axis!r}}}'
"""Ref https://github.com/bamos/densenet.pytorch """ import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from torch.autograd import Variable import torchvision.models as models import torchvision.datasets as dset import torchvision.transforms as transforms from torchvision.utils import save_image from torch.utils.data import DataLoader import math class Bottleneck(nn.Module): def __init__(self, nChannels, growthRate): super(Bottleneck, self).__init__() interChannels = 4 * growthRate self.bn1 = nn.BatchNorm2d(nChannels) self.conv1 = nn.Conv2d(nChannels, interChannels, kernel_size=1, bias=False) self.bn2 = nn.BatchNorm2d(interChannels) self.conv2 = nn.Conv2d(interChannels, growthRate, kernel_size=3, padding=1, bias=False) def forward(self, x): out = self.conv1(F.relu(self.bn1(x))) out = self.conv2(F.relu(self.bn2(out))) out = torch.cat((x, out), 1) return out class SingleLayer(nn.Module): def __init__(self, nChannels, growthRate): super(SingleLayer, self).__init__() self.bn1 = nn.BatchNorm2d(nChannels) self.conv1 = nn.Conv2d(nChannels, growthRate, kernel_size=3, padding=1, bias=False) def forward(self, x): out = self.conv1(F.relu(self.bn1(x))) out = torch.cat((x, out), 1) return out class Transition(nn.Module): def __init__(self, nChannels, nOutChannels): super(Transition, self).__init__() self.bn1 = nn.BatchNorm2d(nChannels) self.conv1 = nn.Conv2d(nChannels, nOutChannels, kernel_size=1, bias=False) def forward(self, x): out = self.conv1(F.relu(self.bn1(x))) out = F.avg_pool2d(out, 2) return out class DenseNet(nn.Module): def __init__(self, growthRate, depth, reduction, nClasses, bottleneck): super(DenseNet, self).__init__() nDenseBlocks = (depth - 4) // 3 if bottleneck: nDenseBlocks //= 2 nChannels = 2 * growthRate self.conv1 = nn.Conv2d(3, nChannels, kernel_size=3, padding=1, bias=False) self.dense1 = self._make_dense(nChannels, growthRate, nDenseBlocks, bottleneck) nChannels += nDenseBlocks * growthRate nOutChannels = int(math.floor(nChannels * reduction)) self.trans1 = Transition(nChannels, nOutChannels) nChannels = nOutChannels self.dense2 = self._make_dense(nChannels, growthRate, nDenseBlocks, bottleneck) nChannels += nDenseBlocks * growthRate nOutChannels = int(math.floor(nChannels * reduction)) self.trans2 = Transition(nChannels, nOutChannels) nChannels = nOutChannels self.dense3 = self._make_dense(nChannels, growthRate, nDenseBlocks, bottleneck) nChannels += nDenseBlocks * growthRate self.bn1 = nn.BatchNorm2d(nChannels) self.fc = nn.Linear(nChannels, nClasses) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.Linear): m.bias.data.zero_() def _make_dense(self, nChannels, growthRate, nDenseBlocks, bottleneck): layers = [] for i in range(int(nDenseBlocks)): if bottleneck: layers.append(Bottleneck(nChannels, growthRate)) else: layers.append(SingleLayer(nChannels, growthRate)) nChannels += growthRate return nn.Sequential(*layers) def forward(self, x): out = self.conv1(x) out = self.trans1(self.dense1(out)) out = self.trans2(self.dense2(out)) out = self.dense3(out) out = torch.squeeze(F.avg_pool2d(F.relu(self.bn1(out)), 8)) out = F.log_softmax(self.fc(out), dim=1) return out def train(args, epoch, net, trainLoader, optimizer, logger=None, show=False): if (not show) and (logger is None): return net.train() # tells net to do training nProcessed = 0 nTrain = len(trainLoader.dataset) for batch_idx, (data, target) in enumerate(trainLoader): if args.cuda: data, target = data.cuda(), target.cuda() data, target = Variable(data), Variable(target) optimizer.zero_grad() output = net(data) loss = F.nll_loss(output, target) loss.backward() optimizer.step() nProcessed += len(data) pred = output.data.max(1)[1] # get the index of the max log-probability incorrect = pred.ne(target.data).cpu().sum() err = 100. * incorrect / len(data) partialEpoch = epoch + batch_idx / len(trainLoader) - 1 if show: print('Train Epoch: {:.2f} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tError: {:.6f}'.format( partialEpoch, nProcessed, nTrain, 100. * batch_idx / len(trainLoader), loss.item(), err)) if logger is not None: logger.write('{},{},{}\n'.format(partialEpoch, loss.item(), err)) logger.flush() def test(args, epoch, net, testLoader, optimizer, logger=None, show=True): if (not show) and (logger is None): return net.eval() # tells net to do evaluating test_loss = 0 incorrect = 0 for data, target in testLoader: if args.cuda: data, target = data.cuda(), target.cuda() with torch.no_grad(): # data, target = Variable(data), Variable(target) output = net(data) test_loss += F.nll_loss(output, target).item() pred = output.data.max(1)[1] # get the index of the max log-probability incorrect += pred.ne(target.data).cpu().sum() test_loss = test_loss test_loss /= len(testLoader) # loss function already averages over batch size nTotal = len(testLoader.dataset) err = 100. * incorrect / nTotal if show: print('\nTest set: Average loss: {:.4f}, Error: {}/{} ({:.0f}%)\n'.format( test_loss, incorrect, nTotal, err)) if logger is not None: logger.write('{},{},{}\n'.format(epoch, test_loss, err)) logger.flush() def adjust_opt(optAlg, optimizer, epoch): if optAlg == 'sgd': if epoch < 150: lr = 1e-1 elif epoch == 150: lr = 1e-2 elif epoch == 225: lr = 1e-3 else: return for param_group in optimizer.param_groups: param_group['lr'] = lr if __name__ == '__main__': import argparse import setproctitle import os import shutil """argparse""" parser = argparse.ArgumentParser() parser.add_argument('--batchSz', type=int, default=128) parser.add_argument('--nEpochs', type=int, default=300) parser.add_argument('--no-cuda', action='store_true') parser.add_argument('--path') parser.add_argument('--no-load', action='store_true') parser.add_argument('--seed', type=int, default=1) parser.add_argument('--opt', type=str, default='sgd', choices=('sgd', 'adam', 'rmsprop')) args = parser.parse_args() args.cuda = not args.no_cuda and torch.cuda.is_available() args.path = args.path or 'data/base' setproctitle.setproctitle(args.path) torch.manual_seed(args.seed) if args.cuda: torch.cuda.manual_seed(args.seed) # if os.path.exists(args.path): # shutil.rmtree(args.path) os.makedirs(args.path, exist_ok=True) """normalization # TODO: get Mean and Std # Ref: https://github.com/bamos/densenet.pytorch """ normMean = [0.49139968, 0.48215827, 0.44653124] normStd = [0.24703233, 0.24348505, 0.26158768] normTransform = transforms.Normalize(normMean, normStd) trainTransform = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normTransform ]) testTransform = transforms.Compose([ transforms.ToTensor(), normTransform ]) """data # TODO: set num_workers """ kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {} trainLoader = DataLoader( dset.CIFAR10(root='cifar', train=True, download=True, transform=trainTransform), batch_size=args.batchSz, shuffle=True, **kwargs) testLoader = DataLoader( dset.CIFAR10(root='cifar', train=False, download=True, transform=testTransform), batch_size=args.batchSz, shuffle=False, **kwargs) """net # TODO: remove batch normalization (and residual connection ?) """ net = DenseNet(growthRate=12, depth=100, reduction=0.5, bottleneck=True, nClasses=10) print('>>> Number of params: {}'.format( sum([p.data.nelement() for p in net.parameters()]))) if args.cuda: if torch.cuda.device_count() > 1: """DataParallel # TODO: setting output_device # torch.cuda.device_count() """ net = nn.DataParallel(net) net = net.cuda() if args.opt == 'sgd': optimizer = optim.SGD(net.parameters(), lr=1e-1, momentum=0.9) # , weight_decay=1e-4) elif args.opt == 'adam': optimizer = optim.Adam(net.parameters()) # , weight_decay=1e-4) elif args.opt == 'rmsprop': optimizer = optim.RMSprop(net.parameters()) # , weight_decay=1e-4) # load if not args.no_load: path_and_file = os.path.join(args.path, 'latest.pth') if os.path.isfile(path_and_file): print(">>> Load weights:", path_and_file) net = torch.load(path_and_file) else: print(">>> No pre-trained weights") # log files trainF = open(os.path.join(args.path, 'train.csv'), 'w') testF = open(os.path.join(args.path, 'test.csv'), 'w') """train and test""" for epoch in range(1, args.nEpochs + 1): adjust_opt(args.opt, optimizer, epoch) train(args, epoch, net, trainLoader, optimizer, show=True, logger=trainF) test(args, epoch, net, testLoader, optimizer, show=True, logger=testF) # save torch.save(net, os.path.join(args.path, 'latest.pth')) trainF.close() testF.close()
<reponame>TongjiZhanglab/wwang_bioinfo_tools #! /usr/bin/env python3 # Nov-1-2018 # MD5 auto check on server import os, sys import subprocess import threading from distutils.spawn import find_executable def plotform_check(): operation_system = sys.platform if operation_system == "darwin": checksumCMD = find_executable("md5") elif operation_system.startswith("linux"): checksumCMD = find_executable("md5sum") else: sys.stdout.write("Do not support operation system except LINUX or macOS, exit!\n") sys.exit(2) if not checksumCMD: sys.stdout.write("Can not find executable md5/md5sum, exit!\n") sys.exit(3) return checksumCMD def parse_md5_result(result): global lock line = result.strip().split() name, md5 = '', '' if len(line) == 2: md5, name = line elif len(line) == 4: _, name, _, md5 = line name = name[1:-1] else: lock.acquire() sys.stdout.write('Undetermined md5checkoutput format: {}.\n'.format('\t'.join(line))) lock.release() return(name.split('/')[-1],md5) def md5_compare(name,md5,sampleMD5): lock.acquire() try: if md5 == sampleMD5[name]: sys.stdout.write('Sample: {} is valid.\n'.format(name)) else: sys.stdout.write('Sample: {} is not valid. Expect md5: {}. Observed md5: {}.\n'.format(name, sampleMD5[name], md5)) except KeyError: sys.stdout.write('Sample: {} is not valid. Can not find expect md5 information. The observed md5 is: {}.\n'.format(name, md5)) lock.release() def get_md5(): sp = subprocess.Popen('{command} {args}'.format(command='cat',args='*md5*txt *md5 MD5* */*md5*txt */*md5 */MD5*'), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, encoding='utf8') out, err = sp.communicate() sampleMD5 = dict(map(parse_md5_result,out.strip().split("\n"))) return(sampleMD5) def check(checksumCMD): global lock global failed_samples global index global fastq_files global sampleMD5 while True: lock.acquire() if index < len(fastq_files): index += 1 lock.release() sp = subprocess.Popen('{command} {args}'.format(command=checksumCMD,args=fastq_files[index-1]), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, encoding='utf8') out, err = sp.communicate() name, md5 = parse_md5_result(out) md5_compare(name,md5,sampleMD5) else: lock.release() break lock = threading.Lock() checksumCMD = plotform_check() sampleMD5 = get_md5() sp = subprocess.Popen(" ".join(["ls", "*/*fastq.gz", "*fastq.gz", "*/*fq.gz", "*fq.gz"]), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, encoding='utf8') out, err = sp.communicate() fastq_files = out.strip().split() index = 0 failed_samples = {} for i in range(4): new_thread = threading.Thread(target=check,args=(checksumCMD,)) new_thread.start() ##def main(): ## ## checkMD5() ## ### ------------------------------------ ### Program running ### ------------------------------------ ##if __name__ == '__main__': ## try: ## main() ## except KeyboardInterrupt: ## warn("User interrupts me! ;-) See you ^.^!") ## sys.exit(0)
<gh_stars>0 #! /usr/bin/env python # -*- coding: utf-8 -*- import numpy as np from flatstar import draw REQUIRED_INTENSITY_PRECISION = 1E-6 IMPLEMENTED_LD_LAWS = ["linear", "quadratic", "square-root", "log", "exp", "sing", "claret"] N_LAWS = len(IMPLEMENTED_LD_LAWS) TEST_COEFFICIENTS = [np.random.random(), np.random.random(size=2), np.random.random(size=2), np.random.random(size=2), np.random.random(size=2), np.random.random(size=3), np.random.random(size=4)] IMPLEMENTED_SAMPLERS = ["nearest", "box", "bilinear", "hamming", "bicubic", "lanczos"] N_SAMPLERS = len(IMPLEMENTED_SAMPLERS) # Test each limb-darkening law def test_ld_laws(grid_size=101): did_it_work = 1 exception_in = [] for i in range(N_LAWS): star = draw.star(grid_size, limb_darkening_law=IMPLEMENTED_LD_LAWS[i], ld_coefficient=TEST_COEFFICIENTS[i]) total_intensity = np.sum(star.intensity) obtained_precision = abs(1.0 - total_intensity) assert (obtained_precision < REQUIRED_INTENSITY_PRECISION) # Test a custom limb-darkening law def test_custom_ld(grid_size=200): # Let's come up with a semi-arbitrary LD law here def custom_ld(mu, c, i0=1.0): c1, c2 = c attenuation = 1 - c1 * (1 - mu ** 0.9) - c2 * (1 - mu ** (3 / 2)) i_mu = i0 * attenuation return i_mu star = draw.star(grid_size, limb_darkening_law='custom', ld_coefficient=TEST_COEFFICIENTS[1], custom_limb_darkening=custom_ld) total_intensity = np.sum(star.intensity) obtained_precision = abs(1.0 - total_intensity) assert(obtained_precision < REQUIRED_INTENSITY_PRECISION) # Test no limb-darkening law def test_no_ld(grid_size=200): star = draw.star(grid_size, limb_darkening_law=None) total_intensity = np.sum(star.intensity) obtained_precision = abs(1.0 - total_intensity) assert (obtained_precision < REQUIRED_INTENSITY_PRECISION) # Test the supersampling and the resampling (aka downsampling) def test_supersampling(grid_size=100, factor=np.random.randint(2, 10), use_ld=6): did_it_work = 1 exception_in = [] for i in range(N_SAMPLERS): star = draw.star(grid_size, limb_darkening_law=IMPLEMENTED_LD_LAWS[use_ld], ld_coefficient=TEST_COEFFICIENTS[use_ld], supersampling=factor, resample_method=IMPLEMENTED_SAMPLERS[i]) total_intensity = np.sum(star.intensity) obtained_precision = abs(1.0 - total_intensity) assert (obtained_precision < REQUIRED_INTENSITY_PRECISION) # Test the upscaling def test_upscaling(grid_size=500, factor=np.random.random() * 10, use_ld=6): did_it_work = 1 exception_in = [] for i in range(N_SAMPLERS): star = draw.star(grid_size, limb_darkening_law=IMPLEMENTED_LD_LAWS[use_ld], ld_coefficient=TEST_COEFFICIENTS[use_ld], upscaling=factor, resample_method=IMPLEMENTED_SAMPLERS[i]) total_intensity = np.sum(star.intensity) obtained_precision = abs(1.0 - total_intensity) assert (obtained_precision < REQUIRED_INTENSITY_PRECISION) # Test drawing a transit def test_transit(grid_size=2001, planet_to_star_ratio=0.15, transit_required_precision=1E-3): star_grid = draw.star(grid_size) transit_grid = draw.planet_transit(star_grid, planet_to_star_ratio, rescaling_factor=0.5, resample_method="box") transit_depth = transit_grid.transit_depth obtained_precision = abs(transit_depth - planet_to_star_ratio ** 2) assert (obtained_precision < transit_required_precision)
<reponame>garlico-in/electrum-grlc<filename>electrum_grlc/plugins/coldcard/basic_psbt.py # # basic_psbt.py - yet another PSBT parser/serializer but used only for test cases. # # - history: taken from coldcard-firmware/testing/psbt.py # - trying to minimize electrum code in here, and generally, dependancies. # import io import struct from base64 import b64decode from binascii import a2b_hex, b2a_hex from struct import pack, unpack from electrum_grlc.transaction import Transaction # BIP-174 (aka PSBT) defined values # PSBT_GLOBAL_UNSIGNED_TX = (0) PSBT_GLOBAL_XPUB = (1) PSBT_IN_NON_WITNESS_UTXO = (0) PSBT_IN_WITNESS_UTXO = (1) PSBT_IN_PARTIAL_SIG = (2) PSBT_IN_SIGHASH_TYPE = (3) PSBT_IN_REDEEM_SCRIPT = (4) PSBT_IN_WITNESS_SCRIPT = (5) PSBT_IN_BIP32_DERIVATION = (6) PSBT_IN_FINAL_SCRIPTSIG = (7) PSBT_IN_FINAL_SCRIPTWITNESS = (8) PSBT_OUT_REDEEM_SCRIPT = (0) PSBT_OUT_WITNESS_SCRIPT = (1) PSBT_OUT_BIP32_DERIVATION = (2) # Serialization/deserialization tools def ser_compact_size(l): r = b"" if l < 253: r = struct.pack("B", l) elif l < 0x10000: r = struct.pack("<BH", 253, l) elif l < 0x100000000: r = struct.pack("<BI", 254, l) else: r = struct.pack("<BQ", 255, l) return r def deser_compact_size(f): try: nit = f.read(1)[0] except IndexError: return None # end of file if nit == 253: nit = struct.unpack("<H", f.read(2))[0] elif nit == 254: nit = struct.unpack("<I", f.read(4))[0] elif nit == 255: nit = struct.unpack("<Q", f.read(8))[0] return nit def my_var_int(l): # Bitcoin serialization of integers... directly into binary! if l < 253: return pack("B", l) elif l < 0x10000: return pack("<BH", 253, l) elif l < 0x100000000: return pack("<BI", 254, l) else: return pack("<BQ", 255, l) class PSBTSection: def __init__(self, fd=None, idx=None): self.defaults() self.my_index = idx if not fd: return while 1: ks = deser_compact_size(fd) if ks is None: break if ks == 0: break key = fd.read(ks) vs = deser_compact_size(fd) val = fd.read(vs) kt = key[0] self.parse_kv(kt, key[1:], val) def serialize(self, fd, my_idx): def wr(ktype, val, key=b''): fd.write(ser_compact_size(1 + len(key))) fd.write(bytes([ktype]) + key) fd.write(ser_compact_size(len(val))) fd.write(val) self.serialize_kvs(wr) fd.write(b'\0') class BasicPSBTInput(PSBTSection): def defaults(self): self.utxo = None self.witness_utxo = None self.part_sigs = {} self.sighash = None self.bip32_paths = {} self.redeem_script = None self.witness_script = None self.others = {} def __eq__(a, b): if a.sighash != b.sighash: if a.sighash is not None and b.sighash is not None: return False rv = a.utxo == b.utxo and \ a.witness_utxo == b.witness_utxo and \ a.redeem_script == b.redeem_script and \ a.witness_script == b.witness_script and \ a.my_index == b.my_index and \ a.bip32_paths == b.bip32_paths and \ sorted(a.part_sigs.keys()) == sorted(b.part_sigs.keys()) # NOTE: equality test on signatures requires parsing DER stupidness # and some maybe understanding of R/S values on curve that I don't have. return rv def parse_kv(self, kt, key, val): if kt == PSBT_IN_NON_WITNESS_UTXO: self.utxo = val assert not key elif kt == PSBT_IN_WITNESS_UTXO: self.witness_utxo = val assert not key elif kt == PSBT_IN_PARTIAL_SIG: self.part_sigs[key] = val elif kt == PSBT_IN_SIGHASH_TYPE: assert len(val) == 4 self.sighash = struct.unpack("<I", val)[0] assert not key elif kt == PSBT_IN_BIP32_DERIVATION: self.bip32_paths[key] = val elif kt == PSBT_IN_REDEEM_SCRIPT: self.redeem_script = val assert not key elif kt == PSBT_IN_WITNESS_SCRIPT: self.witness_script = val assert not key elif kt in ( PSBT_IN_REDEEM_SCRIPT, PSBT_IN_WITNESS_SCRIPT, PSBT_IN_FINAL_SCRIPTSIG, PSBT_IN_FINAL_SCRIPTWITNESS): assert not key self.others[kt] = val else: raise KeyError(kt) def serialize_kvs(self, wr): if self.utxo: wr(PSBT_IN_NON_WITNESS_UTXO, self.utxo) if self.witness_utxo: wr(PSBT_IN_WITNESS_UTXO, self.witness_utxo) if self.redeem_script: wr(PSBT_IN_REDEEM_SCRIPT, self.redeem_script) if self.witness_script: wr(PSBT_IN_WITNESS_SCRIPT, self.witness_script) for pk, val in sorted(self.part_sigs.items()): wr(PSBT_IN_PARTIAL_SIG, val, pk) if self.sighash is not None: wr(PSBT_IN_SIGHASH_TYPE, struct.pack('<I', self.sighash)) for k in self.bip32_paths: wr(PSBT_IN_BIP32_DERIVATION, self.bip32_paths[k], k) for k in self.others: wr(k, self.others[k]) class BasicPSBTOutput(PSBTSection): def defaults(self): self.redeem_script = None self.witness_script = None self.bip32_paths = {} def __eq__(a, b): return a.redeem_script == b.redeem_script and \ a.witness_script == b.witness_script and \ a.my_index == b.my_index and \ a.bip32_paths == b.bip32_paths def parse_kv(self, kt, key, val): if kt == PSBT_OUT_REDEEM_SCRIPT: self.redeem_script = val assert not key elif kt == PSBT_OUT_WITNESS_SCRIPT: self.witness_script = val assert not key elif kt == PSBT_OUT_BIP32_DERIVATION: self.bip32_paths[key] = val else: raise ValueError(kt) def serialize_kvs(self, wr): if self.redeem_script: wr(PSBT_OUT_REDEEM_SCRIPT, self.redeem_script) if self.witness_script: wr(PSBT_OUT_WITNESS_SCRIPT, self.witness_script) for k in self.bip32_paths: wr(PSBT_OUT_BIP32_DERIVATION, self.bip32_paths[k], k) class BasicPSBT: "Just? parse and store" def __init__(self): self.txn = None self.filename = None self.parsed_txn = None self.xpubs = [] self.inputs = [] self.outputs = [] def __eq__(a, b): return a.txn == b.txn and \ len(a.inputs) == len(b.inputs) and \ len(a.outputs) == len(b.outputs) and \ all(a.inputs[i] == b.inputs[i] for i in range(len(a.inputs))) and \ all(a.outputs[i] == b.outputs[i] for i in range(len(a.outputs))) and \ sorted(a.xpubs) == sorted(b.xpubs) def parse(self, raw, filename=None): # auto-detect and decode Base64 and Hex. if raw[0:10].lower() == b'70736274ff': raw = a2b_hex(raw.strip()) if raw[0:6] == b'cHNidP': raw = b64decode(raw) assert raw[0:5] == b'psbt\xff', "bad magic" self.filename = filename with io.BytesIO(raw[5:]) as fd: # globals while 1: ks = deser_compact_size(fd) if ks is None: break if ks == 0: break key = fd.read(ks) vs = deser_compact_size(fd) val = fd.read(vs) kt = key[0] if kt == PSBT_GLOBAL_UNSIGNED_TX: self.txn = val self.parsed_txn = Transaction(val.hex()) num_ins = len(self.parsed_txn.inputs()) num_outs = len(self.parsed_txn.outputs()) elif kt == PSBT_GLOBAL_XPUB: # key=(xpub) => val=(path) self.xpubs.append( (key, val) ) else: raise ValueError('unknown global key type: 0x%02x' % kt) assert self.txn, 'missing reqd section' self.inputs = [BasicPSBTInput(fd, idx) for idx in range(num_ins)] self.outputs = [BasicPSBTOutput(fd, idx) for idx in range(num_outs)] sep = fd.read(1) assert sep == b'' return self def serialize(self, fd): def wr(ktype, val, key=b''): fd.write(ser_compact_size(1 + len(key))) fd.write(bytes([ktype]) + key) fd.write(ser_compact_size(len(val))) fd.write(val) fd.write(b'psbt\xff') wr(PSBT_GLOBAL_UNSIGNED_TX, self.txn) for k,v in self.xpubs: wr(PSBT_GLOBAL_XPUB, v, key=k) # sep fd.write(b'\0') for idx, inp in enumerate(self.inputs): inp.serialize(fd, idx) for idx, outp in enumerate(self.outputs): outp.serialize(fd, idx) def as_bytes(self): with io.BytesIO() as fd: self.serialize(fd) return fd.getvalue() # EOF
""" Stack-In-A-WSGI: stackinawsgi.admin.admin.StackInAWsgiSessionManager """ import datetime import json import unittest import ddt from stackinabox.services.service import StackInABoxService from stackinabox.services.hello import HelloService from stackinawsgi.admin.admin import StackInAWsgiAdmin from stackinawsgi.session.service import ( global_sessions, StackInAWsgiSessionManager ) from stackinawsgi.wsgi.request import Request from stackinawsgi.wsgi.response import Response from stackinawsgi.test.helpers import make_environment @ddt.ddt class TestSessionManager(unittest.TestCase): """ Test the interaction of StackInAWSGI's Session Manager """ def setUp(self): """ configure env for the test """ self.manager = StackInAWsgiSessionManager() self.manager.register_service(HelloService) self.base_uri = 'test://testing-url' def tearDown(self): """ clean up after the test """ keys = tuple(global_sessions.keys()) for k in keys: del global_sessions[k] def test_construction(self): """ test basic construction of the admin interface """ admin = StackInAWsgiAdmin(self.manager, self.base_uri) self.assertIsInstance(admin, StackInABoxService) self.assertEqual(id(self.manager), id(admin.manager)) self.assertTrue(admin.base_uri.startswith(self.base_uri)) def test_property_base_uri_with_no_slash(self): """ test basic construction of the admin interface """ base_uri = 'hello' admin = StackInAWsgiAdmin(self.manager, base_uri) self.assertIsInstance(admin, StackInABoxService) self.assertEqual(id(self.manager), id(admin.manager)) self.assertTrue(admin.base_uri.startswith(base_uri)) def test_property_base_uri_start_with_slash(self): """ test basic construction of the admin interface """ base_uri = '/hello' admin = StackInAWsgiAdmin(self.manager, base_uri) self.assertIsInstance(admin, StackInABoxService) self.assertEqual(id(self.manager), id(admin.manager)) self.assertTrue(admin.base_uri.startswith(base_uri[1:])) def test_property_base_uri_ends_with_slash(self): """ test the base uri property to ensure the trailing slash is removed """ base_uri = 'hello/' admin = StackInAWsgiAdmin(self.manager, base_uri) self.assertIsInstance(admin, StackInABoxService) self.assertEqual(id(self.manager), id(admin.manager)) self.assertTrue(admin.base_uri.startswith(base_uri[:-1])) def test_helper_get_session_id(self): """ test extracting the session-id from the headers """ admin = StackInAWsgiAdmin(self.manager, self.base_uri) session_id = 'some-session-id' headers = { 'x-session-id': session_id } extracted_session_id = admin.helper_get_session_id(headers) self.assertEqual(session_id, extracted_session_id) def test_helper_get_session_id_no_session_id(self): """ test extracting the session-id from the headers """ admin = StackInAWsgiAdmin(self.manager, self.base_uri) headers = {} extracted_session_id = admin.helper_get_session_id(headers) self.assertIsNone(extracted_session_id) def test_helper_get_uri(self): """ test building the URI """ admin = StackInAWsgiAdmin(self.manager, self.base_uri) session_id = 'some-session-id' expected_uri = '{0}/{1}/'.format(self.base_uri, session_id) result_uri = admin.helper_get_uri(session_id) self.assertEqual(expected_uri, result_uri) def test_session_creation(self): """ test creating a new session """ admin = StackInAWsgiAdmin(self.manager, self.base_uri) uri = u'/' environment = make_environment( self, method='POST', path=uri[1:] ) request = Request(environment) response = Response() result = admin.create_session( request, uri, response.headers ) response.from_stackinabox( result[0], result[1], result[2] ) # validate response self.assertEqual(response.status, 201) # validate header entries self.assertIn('x-session-id', response.headers) self.assertIn('location', response.headers) # validate x-session-id session_id = response.headers['x-session-id'] self.assertIn(session_id, global_sessions) # validate location self.assertEqual( '{0}/{1}/'.format(self.base_uri, session_id), response.headers['location'] ) def test_session_creation_with_session_id(self): """ test creating a new session with a session-id """ admin = StackInAWsgiAdmin(self.manager, self.base_uri) session_id = 'my-session-id' uri = u'/' environment = make_environment( self, method='POST', path=uri[1:], headers={ 'x-session-id': session_id } ) request = Request(environment) self.assertIn('x-session-id', request.headers) self.assertEqual(session_id, request.headers['x-session-id']) response = Response() result = admin.create_session( request, uri, request.headers ) response.from_stackinabox( result[0], result[1], result[2] ) # validate response self.assertEqual(response.status, 201) # validate header entries self.assertIn('x-session-id', response.headers) self.assertIn('location', response.headers) # validate x-session-id extracted_session_id = response.headers['x-session-id'] self.assertEqual(session_id, extracted_session_id) self.assertIn(extracted_session_id, global_sessions) # validate location self.assertEqual( '{0}/{1}/'.format(self.base_uri, extracted_session_id), response.headers['location'] ) def test_session_remove(self): """ test removing a session """ admin = StackInAWsgiAdmin(self.manager, self.base_uri) session_id = self.manager.create_session() uri = u'/' environment = make_environment( self, method='DELETE', path=uri[1:], headers={ 'x-session-id': session_id } ) request = Request(environment) self.assertIn('x-session-id', request.headers) self.assertEqual(session_id, request.headers['x-session-id']) response = Response() result = admin.remove_session( request, uri, request.headers ) response.from_stackinabox( result[0], result[1], result[2] ) # validate response self.assertEqual(response.status, 204) def test_session_remove_invalid_session_id(self): """ test removing a session with an invalid session id """ admin = StackInAWsgiAdmin(self.manager, self.base_uri) session_id = 'my-session-id' uri = u'/' environment = make_environment( self, method='DELETE', path=uri[1:], headers={ 'x-session-id': session_id } ) request = Request(environment) self.assertIn('x-session-id', request.headers) self.assertEqual(session_id, request.headers['x-session-id']) response = Response() result = admin.remove_session( request, uri, request.headers ) response.from_stackinabox( result[0], result[1], result[2] ) # validate response self.assertEqual(response.status, 404) def test_session_reset(self): """ test resetting a session """ admin = StackInAWsgiAdmin(self.manager, self.base_uri) session_id = self.manager.create_session() uri = u'/' environment = make_environment( self, method='PUT', path=uri[1:], headers={ 'x-session-id': session_id } ) request = Request(environment) self.assertIn('x-session-id', request.headers) self.assertEqual(session_id, request.headers['x-session-id']) response = Response() result = admin.reset_session( request, uri, request.headers ) response.from_stackinabox( result[0], result[1], result[2] ) # validate response self.assertEqual(response.status, 205) def test_session_reset_invalid_session_id(self): """ test resetting a session with an invalid session id """ admin = StackInAWsgiAdmin(self.manager, self.base_uri) session_id = 'my-session-id' uri = u'/' environment = make_environment( self, method='PUT', path=uri[1:], headers={ 'x-session-id': session_id } ) request = Request(environment) self.assertIn('x-session-id', request.headers) self.assertEqual(session_id, request.headers['x-session-id']) response = Response() result = admin.reset_session( request, uri, request.headers ) response.from_stackinabox( result[0], result[1], result[2] ) # validate response self.assertEqual(response.status, 404) @ddt.data(0, 1, 2, 3, 5, 8, 13) def test_get_sessions(self, session_count): """ test get sessions """ admin = StackInAWsgiAdmin(self.manager, self.base_uri) uri = u'/' environment = make_environment( self, method='GET', path=uri[1:], headers={} ) request = Request(environment) for _ in range(session_count): admin.manager.create_session() response = Response() result = admin.get_sessions( request, uri, request.headers ) response.from_stackinabox( result[0], result[1], result[2] ) # validate response self.assertEqual(response.status, 200) response_body = response.body session_data = json.loads(response_body) self.assertIn('base_url', session_data) self.assertEqual(session_data['base_url'], self.base_uri) self.assertIn('services', session_data) self.assertEqual(len(session_data['services']), 1) self.assertIn('hello', session_data['services']) self.assertEqual(session_data['services']['hello'], 'HelloService') self.assertIn('sessions', session_data) self.assertEqual(len(session_data['sessions']), session_count) def test_get_session_info(self): """ test resetting a session with an invalid session id """ admin = StackInAWsgiAdmin(self.manager, self.base_uri) session_id = 'my-session-id' uri = u'/{0}'.format(session_id) environment = make_environment( self, method='GET', path=uri[1:], headers={ 'x-session-id': session_id } ) request = Request(environment) self.assertIn('x-session-id', request.headers) self.assertEqual(session_id, request.headers['x-session-id']) response_created = Response() result_create = admin.create_session( request, uri, request.headers ) response_created.from_stackinabox( result_create[0], result_create[1], result_create[2] ) self.assertEqual(response_created.status, 201) response = Response() result = admin.get_session_info( request, uri, request.headers ) response.from_stackinabox( result[0], result[1], result[2] ) # validate response self.assertEqual(response.status, 200) response_body = response.body session_data = json.loads(response_body) self.assertIn('base_url', session_data) self.assertEqual(session_data['base_url'], self.base_uri) self.assertIn('session_valid', session_data) self.assertTrue(session_data['session_valid']) self.assertIn('services', session_data) self.assertEqual(len(session_data['services']), 1) self.assertIn('hello', session_data['services']) self.assertEqual(session_data['services']['hello'], 'HelloService') self.assertIn('trackers', session_data) self.assertEqual(len(session_data['trackers']), 3) self.assertIn('created-time', session_data['trackers']) self.assertIsNotNone(session_data['trackers']['created-time']) created_time = datetime.datetime.strptime( session_data['trackers']['created-time'], "%Y-%m-%dT%H:%M:%S.%f" ) self.assertIn('accessed', session_data['trackers']) self.assertEqual(len(session_data['trackers']['accessed']), 2) self.assertIn('time', session_data['trackers']['accessed']) self.assertIsNotNone(session_data['trackers']['accessed']['time']) accessed_time = datetime.datetime.strptime( session_data['trackers']['accessed']['time'], "%Y-%m-%dT%H:%M:%S.%f" ) self.assertEqual(created_time, accessed_time) self.assertIn('count', session_data['trackers']['accessed']) self.assertIn('status', session_data['trackers']) self.assertEqual(len(session_data['trackers']['status']), 0) def test_get_session_info_invalid_session(self): """ test resetting a session with an invalid session id """ admin = StackInAWsgiAdmin(self.manager, self.base_uri) session_id = 'my-session-id' uri = u'/{0}'.format(session_id) environment = make_environment( self, method='PUT', path=uri[1:], ) request = Request(environment) response = Response() result = admin.get_session_info( request, uri, request.headers ) response.from_stackinabox( result[0], result[1], result[2] ) # validate response self.assertEqual(response.status, 200) response_body = response.body session_data = json.loads(response_body) self.assertIn('base_url', session_data) self.assertEqual(session_data['base_url'], self.base_uri) self.assertIn('session_valid', session_data) self.assertFalse(session_data['session_valid']) self.assertIn('services', session_data) self.assertEqual(len(session_data['services']), 1) self.assertIn('hello', session_data['services']) self.assertEqual(session_data['services']['hello'], 'HelloService') self.assertIn('trackers', session_data) self.assertEqual(len(session_data['trackers']), 3) self.assertIn('created-time', session_data['trackers']) self.assertIsNone(session_data['trackers']['created-time']) self.assertIn('accessed', session_data['trackers']) self.assertEqual(len(session_data['trackers']['accessed']), 2) self.assertIn('time', session_data['trackers']['accessed']) self.assertIsNone(session_data['trackers']['accessed']['time']) self.assertIn('count', session_data['trackers']['accessed']) self.assertIn('status', session_data['trackers']) self.assertEqual(len(session_data['trackers']['status']), 0) def test_extract_session_from_uri(self): """ test extracting a session from the URI - positive test """ admin = StackInAWsgiAdmin(self.manager, self.base_uri) session_id = 'my-session-id' uri = u'/{0}'.format(session_id) extracted_session_id = admin.helper_get_session_id_from_uri( uri ) self.assertEqual(session_id, extracted_session_id) def test_extract_session_from_uri_invalid(self): """ test extracting a session from the URI - negative test """ admin = StackInAWsgiAdmin(self.manager, self.base_uri) uri = u'/' extracted_session_id = admin.helper_get_session_id_from_uri( uri ) self.assertIsNone(extracted_session_id)
<filename>hippie.py import sublime import sublime_plugin from collections import defaultdict import re VIEW_TOO_BIG = 1000000 WORD_PATTERN = re.compile(r'(\w{2,})', re.S) # Start from words of length 2 words_by_view = {} words_global = set() last_view = None initial_primer = "" matching = [] last_index = 0 history = defaultdict(dict) # type: Dict[sublime.Window, Dict[str, str]] class HippieWordCompletionCommand(sublime_plugin.TextCommand): def run(self, edit): global last_view, matching, last_index, initial_primer window = self.view.window() assert window def word_start(region): word_region = self.view.word(region) return sublime.Region(word_region.a, region.end()) first_sel = self.view.sel()[0] primer = self.view.substr(word_start(first_sel)) def _matching(): yield primer # Always be able to cycle back if primer in history[window]: yield history[window][primer] yield from fuzzyfind(primer, words_by_view[self.view]) yield from fuzzyfind(primer, words_global) if last_view is not self.view or not matching or primer != matching[last_index]: if words_by_view[self.view] is None: index_view(self.view, exclude_sel=True) last_view = self.view initial_primer = primer matching = ldistinct(_matching()) last_index = 0 if matching[last_index] == primer: last_index += 1 if last_index >= len(matching): last_index = 0 for region in self.view.sel(): self.view.replace(edit, word_start(region), matching[last_index]) history[window][initial_primer] = matching[last_index] class HippieListener(sublime_plugin.EventListener): def on_init(self, views): for view in views: index_view(view) def on_modified(self, view): words_by_view[view] = None # Drop cached word set def on_deactivated_async(self, view): index_view(view) def index_view(view, exclude_sel=False): if view.size() > VIEW_TOO_BIG: return if exclude_sel: regions = invert_regions(view, map(view.word, view.sel())) else: regions = [sublime.Region(0, view.size())] words = set().union(*[WORD_PATTERN.findall(view.substr(region)) for region in regions]) words_by_view[view] = words words_global.update(words) def invert_regions(view, regions): # NOTE: regions should be non-overlapping and ordered, # no check here for performance reasons start = 0 end = view.size() result = [] for r in regions: if r.a > start: result.append(sublime.Region(start, r.a)) start = r.b if start < end: result.append(sublime.Region(start, end)) return result def fuzzyfind(primer, coll): """ Args: primer: A partial string which is typically entered by a user. coll: A collection of strings which will be filtered based on the `primer`. """ primer_lower = primer.lower() suggestions = [(score, item) for item in coll if (score := fuzzy_score(primer_lower, item))] return [z[-1] for z in sorted(suggestions, key=lambda x: x[0])] def fuzzy_score(primer, item, _abbr={}): if item not in _abbr: _abbr[item] = make_abbr(item) if _abbr[item] and (abbr_score := _fuzzy_score(primer, _abbr[item])): return abbr_score return _fuzzy_score(primer, item) def _fuzzy_score(primer, item): start, pos, prev, score = -1, -1, 0, 1 item_l = item.lower() for c in primer: pos = item_l.find(c, pos + 1) if pos == -1: return if start == -1: start = pos score += pos - prev prev = pos return (score, len(item)) def make_abbr(item): abbr = item[0] for c, nc in zip(item, item[1:]): if c in "_-" or c.isupper() < nc.isupper(): abbr += nc if len(abbr) > 1: return abbr def ldistinct(seq): """Iterates over sequence skipping duplicates""" seen = set() res = [] for item in seq: if item not in seen: seen.add(item) res.append(item) return res
<gh_stars>1-10 #Author: <NAME> #Descriptions: Interface class to varous data format formats (locations, rucio, s3,...) #import sys #import datetime #import os import json class Templater(): def __init__(self): self._template_configuration = None self._config_path = None self._eval_list = {} def Config(self, config_path): #put in your favourite experimental configuration #to describe the rucio cataloge description self._config_path = config_path self.LoadConfig() def LoadConfig(self): with open(self._config_path) as f: self._template_configuration = json.load(f) def FindOccurrences(self, test_string, test_char): #https://stackoverflow.com/questions/13009675/find-all-the-occurrences-of-a-character-in-a-string return [i for i, letter in enumerate(test_string) if letter == test_char] def ExtractTagWords(self, test_string, beg_char, end_char): word_list = [] char_beg = self.FindOccurrences(test_string, beg_char) char_end = self.FindOccurrences(test_string, end_char) for j in range(len(char_beg)): j_char_beg = char_beg[j] j_char_end = char_end[j] word = test_string[j_char_beg+1:j_char_end] if word not in word_list: word_list.append(word) return word_list def ExtractSplit(self, test_string, beg_char, end_char): split_list = [] char_beg = self.FindOccurrences(test_string, beg_char) char_end = self.FindOccurrences(test_string, end_char) for j in range(len(char_beg)-1): j_char_beg = char_beg[j] j_char_end = char_end[j] if j_char_beg > j_char_end: j_char_beg = char_beg[j] j_char_end = char_end[j+1] split_symbol = test_string[j_char_beg+1:j_char_end] split_list.extend(split_symbol) return split_list def Eval(self, plugin=None, host=None, string=None): #Get data types (=keys) #Prepare the return: self._structure = None self._eval_list = {} self._types = list(self._template_configuration.keys()) if len(self._types) == 0: print("Upload types are not defined") #Get the overall file structure from your configuration file (depends on experiment) for i_type in self._types: if i_type != plugin: continue i_levels = self._template_configuration[i_type] if host in list(i_levels.keys()): self._structure = i_levels[host] if string==None: return 0 if self._structure == None: #print("check your config file") return 0 #Evaluate the string according the template: tag_words = self.ExtractTagWords(self._structure, "{", "}") split_list = self.ExtractSplit(self._structure, "}", "{") tag_words = list(reversed(tag_words)) split_list= reversed(split_list) for i, i_split in enumerate(split_list): take = string.split(i_split)[-1] string = string.replace(i_split+take, "") self._eval_list[tag_words[i]]=take #print(i, take, string) self._eval_list[tag_words[-1]]=string def GetTypes(self): return self._types def GetStructure(self, plugin=None, host=None): self.Eval(plugin=plugin, host=host, string=None) return self._structure def GetTemplateEval(self, plugin=None, host=None, string=None): self.Eval(plugin=plugin, host=host, string=string) return self._eval_list
<gh_stars>0 # ---------------------------------------------------------------------------- # Copyright (c) 2016-2017, UniFrac development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file LICENSE, distributed with this software. # ---------------------------------------------------------------------------- import unittest import os from io import StringIO from tempfile import gettempdir import pkg_resources import numpy as np import numpy.testing as npt from biom import Table, load_table from biom.util import biom_open from skbio import TreeNode import skbio.diversity from unifrac import ssu, faith_pd class UnifracAPITests(unittest.TestCase): package = 'unifrac.tests' def get_data_path(self, filename): # adapted from qiime2.plugin.testing.TestPluginBase return pkg_resources.resource_filename(self.package, 'data/%s' % filename) def test_unweighted_root_eval_issue_46(self): tree = self.get_data_path('crawford.tre') table = self.get_data_path('crawford.biom') table_inmem = load_table(table) tree_inmem = skbio.TreeNode.read(tree) ids = table_inmem.ids() otu_ids = table_inmem.ids(axis='observation') cnts = table_inmem.matrix_data.astype(int).toarray().T exp = skbio.diversity.beta_diversity('unweighted_unifrac', cnts, ids=ids, otu_ids=otu_ids, tree=tree_inmem) obs = ssu(table, tree, 'unweighted', False, 1.0, False, 1) npt.assert_almost_equal(obs.data, exp.data) def test_meta_unifrac(self): t1 = self.get_data_path('t1.newick') e1 = self.get_data_path('e1.biom') result = ssu(e1, t1, 'unweighted', False, 1.0, False, 1) u1_distances = np.array([[0, 10 / 16., 8 / 13.], [10 / 16., 0, 8 / 17.], [8 / 13., 8 / 17., 0]]) npt.assert_almost_equal(u1_distances, result.data) self.assertEqual(tuple('ABC'), result.ids) def test_ssu_bad_tree(self): e1 = self.get_data_path('e1.biom') with self.assertRaisesRegex(IOError, "Tree file not found."): ssu(e1, 'bad-file', 'unweighted', False, 1.0, False, 1) def test_ssu_bad_table(self): t1 = self.get_data_path('t1.newick') with self.assertRaisesRegex(IOError, "Table file not found."): ssu('bad-file', t1, 'unweighted', False, 1.0, False, 1) def test_ssu_bad_method(self): t1 = self.get_data_path('t1.newick') e1 = self.get_data_path('e1.biom') with self.assertRaisesRegex(ValueError, "Unknown method."): ssu(e1, t1, 'unweightedfoo', False, 1.0, False, 1) class EdgeCasesTests(unittest.TestCase): # These tests were mostly ported from skbio's # skbio/diversity/beta/tests/test_unifrac.py at SHA-256 ea901b3b6b0b # note that not all tests were kept since the APIs are different. # # The test cases below only exercise unweighted, weighted and weighted # normalized UniFrac. The C++ test suite verifies (against reference # implementations) the variance adjusted and generalized variants of the # algorithm. package = 'unifrac.tests' def _work(self, u_counts, v_counts, otu_ids, tree, method): data = np.array([u_counts, v_counts]).T bt = Table(data, otu_ids, ['u', 'v']) ta = os.path.join(gettempdir(), 'table.biom') tr = os.path.join(gettempdir(), 'tree.biom') self.files_to_delete.append(ta) self.files_to_delete.append(tr) with biom_open(ta, 'w') as fhdf5: bt.to_hdf5(fhdf5, 'Table for unit testing') tree.write(tr) # return value is a distance matrix, get the distance from u->v return ssu(ta, tr, method, False, 1.0, False, 1)['u', 'v'] def weighted_unifrac(self, u_counts, v_counts, otu_ids, tree, normalized=False): if normalized: method = 'weighted_normalized' else: method = 'weighted_unnormalized' return self._work(u_counts, v_counts, otu_ids, tree, method) def unweighted_unifrac(self, u_counts, v_counts, otu_ids, tree, normalized=False): return self._work(u_counts, v_counts, otu_ids, tree, 'unweighted') def setUp(self): self.b1 = np.array( [[1, 3, 0, 1, 0], [0, 2, 0, 4, 4], [0, 0, 6, 2, 1], [0, 0, 1, 1, 1], [5, 3, 5, 0, 0], [0, 0, 0, 3, 5]]) self.sids1 = list('ABCDEF') self.oids1 = ['OTU%d' % i for i in range(1, 6)] self.t1 = TreeNode.read( StringIO('(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:' '0.75,OTU5:0.75):1.25):0.0)root;')) self.t1_w_extra_tips = TreeNode.read( StringIO('(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:' '0.75,(OTU5:0.25,(OTU6:0.5,OTU7:0.5):0.5):0.5):1.25):0.0' ')root;')) self.t2 = TreeNode.read( StringIO('((OTU1:0.1, OTU2:0.2):0.3, (OTU3:0.5, OTU4:0.7):1.1)' 'root;')) self.oids2 = ['OTU%d' % i for i in range(1, 5)] self.files_to_delete = [] def tearDown(self): for f in self.files_to_delete: try: os.remove(f) except OSError: pass def test_ssu_table_not_subset_tree(self): tree = TreeNode.read(StringIO('((OTU1:0.5,OTU3:1.0):1.0)root;')) expected_message = "The table does not appear to be completely "\ "represented by the phylogeny." with self.assertRaisesRegex(ValueError, expected_message): self.unweighted_unifrac(self.b1[0], self.b1[1], self.oids1, tree) def test_unweighted_otus_out_of_order(self): # UniFrac API does not assert the observations are in tip order of the # input tree shuffled_ids = self.oids1[:] shuffled_b1 = self.b1.copy() shuffled_ids[0], shuffled_ids[-1] = shuffled_ids[-1], shuffled_ids[0] shuffled_b1[:, [0, -1]] = shuffled_b1[:, [-1, 0]] for i in range(len(self.b1)): for j in range(len(self.b1)): actual = self.unweighted_unifrac( self.b1[i], self.b1[j], self.oids1, self.t1) expected = self.unweighted_unifrac( shuffled_b1[i], shuffled_b1[j], shuffled_ids, self.t1) self.assertAlmostEqual(actual, expected) def test_weighted_otus_out_of_order(self): # UniFrac API does not assert the observations are in tip order of the # input tree shuffled_ids = self.oids1[:] shuffled_b1 = self.b1.copy() shuffled_ids[0], shuffled_ids[-1] = shuffled_ids[-1], shuffled_ids[0] shuffled_b1[:, [0, -1]] = shuffled_b1[:, [-1, 0]] for i in range(len(self.b1)): for j in range(len(self.b1)): actual = self.weighted_unifrac( self.b1[i], self.b1[j], self.oids1, self.t1) expected = self.weighted_unifrac( shuffled_b1[i], shuffled_b1[j], shuffled_ids, self.t1) self.assertAlmostEqual(actual, expected) def test_unweighted_extra_tips(self): # UniFrac values are the same despite unobserved tips in the tree for i in range(len(self.b1)): for j in range(len(self.b1)): actual = self.unweighted_unifrac( self.b1[i], self.b1[j], self.oids1, self.t1_w_extra_tips) expected = self.unweighted_unifrac( self.b1[i], self.b1[j], self.oids1, self.t1) self.assertAlmostEqual(actual, expected) def test_weighted_extra_tips(self): # UniFrac values are the same despite unobserved tips in the tree for i in range(len(self.b1)): for j in range(len(self.b1)): actual = self.weighted_unifrac( self.b1[i], self.b1[j], self.oids1, self.t1_w_extra_tips) expected = self.weighted_unifrac( self.b1[i], self.b1[j], self.oids1, self.t1) self.assertAlmostEqual(actual, expected) def test_unweighted_minimal_trees(self): # two tips tree = TreeNode.read(StringIO('(OTU1:0.25, OTU2:0.25)root;')) actual = self.unweighted_unifrac([1, 0], [0, 0], ['OTU1', 'OTU2'], tree) expected = 1.0 self.assertEqual(actual, expected) def test_unweighted_root_not_observed(self): # expected values computed with QIIME 1.9.1 and by hand # root node not observed, but branch between (OTU1, OTU2) and root # is considered shared actual = self.unweighted_unifrac([1, 1, 0, 0], [1, 0, 0, 0], self.oids2, self.t2) # for clarity of what I'm testing, compute expected as it would # based on the branch lengths. the values that compose shared was # a point of confusion for me here, so leaving these in for # future reference expected = 0.2 / (0.1 + 0.2 + 0.3) # 0.3333333333 self.assertAlmostEqual(actual, expected) # root node not observed, but branch between (OTU3, OTU4) and root # is considered shared actual = self.unweighted_unifrac([0, 0, 1, 1], [0, 0, 1, 0], self.oids2, self.t2) # for clarity of what I'm testing, compute expected as it would # based on the branch lengths. the values that compose shared was # a point of confusion for me here, so leaving these in for # future reference expected = 0.7 / (1.1 + 0.5 + 0.7) # 0.3043478261 self.assertAlmostEqual(actual, expected) def test_weighted_root_not_observed(self): # expected values computed by hand, these disagree with QIIME 1.9.1 # root node not observed, but branch between (OTU1, OTU2) and root # is considered shared actual = self.weighted_unifrac([1, 0, 0, 0], [1, 1, 0, 0], self.oids2, self.t2) expected = 0.15 self.assertAlmostEqual(actual, expected) # root node not observed, but branch between (OTU3, OTU4) and root # is considered shared actual = self.weighted_unifrac([0, 0, 1, 1], [0, 0, 1, 0], self.oids2, self.t2) expected = 0.6 self.assertAlmostEqual(actual, expected) def test_weighted_normalized_root_not_observed(self): # expected values computed by hand, these disagree with QIIME 1.9.1 # root node not observed, but branch between (OTU1, OTU2) and root # is considered shared actual = self.weighted_unifrac([1, 0, 0, 0], [1, 1, 0, 0], self.oids2, self.t2, normalized=True) expected = 0.1764705882 self.assertAlmostEqual(actual, expected) # root node not observed, but branch between (OTU3, OTU4) and root # is considered shared actual = self.weighted_unifrac([0, 0, 1, 1], [0, 0, 1, 0], self.oids2, self.t2, normalized=True) expected = 0.1818181818 self.assertAlmostEqual(actual, expected) def test_unweighted_unifrac_identity(self): for i in range(len(self.b1)): actual = self.unweighted_unifrac( self.b1[i], self.b1[i], self.oids1, self.t1) expected = 0.0 self.assertAlmostEqual(actual, expected) def test_unweighted_unifrac_symmetry(self): for i in range(len(self.b1)): for j in range(len(self.b1)): actual = self.unweighted_unifrac( self.b1[i], self.b1[j], self.oids1, self.t1) expected = self.unweighted_unifrac( self.b1[j], self.b1[i], self.oids1, self.t1) self.assertAlmostEqual(actual, expected) def test_unweighted_unifrac_non_overlapping(self): # these communities only share the root node actual = self.unweighted_unifrac( self.b1[4], self.b1[5], self.oids1, self.t1) expected = 1.0 self.assertAlmostEqual(actual, expected) actual = self.unweighted_unifrac( [1, 1, 1, 0, 0], [0, 0, 0, 1, 1], self.oids1, self.t1) expected = 1.0 self.assertAlmostEqual(actual, expected) def test_unweighted_unifrac(self): # expected results derived from QIIME 1.9.1, which # is a completely different implementation skbio's initial # unweighted unifrac implementation # sample A versus all actual = self.unweighted_unifrac( self.b1[0], self.b1[1], self.oids1, self.t1) expected = 0.238095238095 self.assertAlmostEqual(actual, expected) actual = self.unweighted_unifrac( self.b1[0], self.b1[2], self.oids1, self.t1) expected = 0.52 self.assertAlmostEqual(actual, expected) actual = self.unweighted_unifrac( self.b1[0], self.b1[3], self.oids1, self.t1) expected = 0.52 self.assertAlmostEqual(actual, expected) actual = self.unweighted_unifrac( self.b1[0], self.b1[4], self.oids1, self.t1) expected = 0.545454545455 self.assertAlmostEqual(actual, expected) actual = self.unweighted_unifrac( self.b1[0], self.b1[5], self.oids1, self.t1) expected = 0.619047619048 self.assertAlmostEqual(actual, expected) # sample B versus remaining actual = self.unweighted_unifrac( self.b1[1], self.b1[2], self.oids1, self.t1) expected = 0.347826086957 self.assertAlmostEqual(actual, expected) actual = self.unweighted_unifrac( self.b1[1], self.b1[3], self.oids1, self.t1) expected = 0.347826086957 self.assertAlmostEqual(actual, expected) actual = self.unweighted_unifrac( self.b1[1], self.b1[4], self.oids1, self.t1) expected = 0.68 self.assertAlmostEqual(actual, expected) actual = self.unweighted_unifrac( self.b1[1], self.b1[5], self.oids1, self.t1) expected = 0.421052631579 self.assertAlmostEqual(actual, expected) # sample C versus remaining actual = self.unweighted_unifrac( self.b1[2], self.b1[3], self.oids1, self.t1) expected = 0.0 self.assertAlmostEqual(actual, expected) actual = self.unweighted_unifrac( self.b1[2], self.b1[4], self.oids1, self.t1) expected = 0.68 self.assertAlmostEqual(actual, expected) actual = self.unweighted_unifrac( self.b1[2], self.b1[5], self.oids1, self.t1) expected = 0.421052631579 self.assertAlmostEqual(actual, expected) # sample D versus remaining actual = self.unweighted_unifrac( self.b1[3], self.b1[4], self.oids1, self.t1) expected = 0.68 self.assertAlmostEqual(actual, expected) actual = self.unweighted_unifrac( self.b1[3], self.b1[5], self.oids1, self.t1) expected = 0.421052631579 self.assertAlmostEqual(actual, expected) # sample E versus remaining actual = self.unweighted_unifrac( self.b1[4], self.b1[5], self.oids1, self.t1) expected = 1.0 self.assertAlmostEqual(actual, expected) def test_weighted_unifrac_identity(self): for i in range(len(self.b1)): actual = self.weighted_unifrac( self.b1[i], self.b1[i], self.oids1, self.t1) expected = 0.0 self.assertAlmostEqual(actual, expected) def test_weighted_unifrac_symmetry(self): for i in range(len(self.b1)): for j in range(len(self.b1)): actual = self.weighted_unifrac( self.b1[i], self.b1[j], self.oids1, self.t1) expected = self.weighted_unifrac( self.b1[j], self.b1[i], self.oids1, self.t1) self.assertAlmostEqual(actual, expected) def test_weighted_unifrac_non_overlapping(self): # expected results derived from QIIME 1.9.1, which # is a completely different implementation skbio's initial # weighted unifrac implementation # these communities only share the root node actual = self.weighted_unifrac( self.b1[4], self.b1[5], self.oids1, self.t1) expected = 4.0 self.assertAlmostEqual(actual, expected) def test_weighted_unifrac(self): # expected results derived from QIIME 1.9.1, which # is a completely different implementation skbio's initial # weighted unifrac implementation actual = self.weighted_unifrac( self.b1[0], self.b1[1], self.oids1, self.t1) expected = 2.4 self.assertAlmostEqual(actual, expected) actual = self.weighted_unifrac( self.b1[0], self.b1[2], self.oids1, self.t1) expected = 1.86666666667 self.assertAlmostEqual(actual, expected) actual = self.weighted_unifrac( self.b1[0], self.b1[3], self.oids1, self.t1) expected = 2.53333333333 self.assertAlmostEqual(actual, expected) actual = self.weighted_unifrac( self.b1[0], self.b1[4], self.oids1, self.t1) expected = 1.35384615385 self.assertAlmostEqual(actual, expected) actual = self.weighted_unifrac( self.b1[0], self.b1[5], self.oids1, self.t1) expected = 3.2 self.assertAlmostEqual(actual, expected) # sample B versus remaining actual = self.weighted_unifrac( self.b1[1], self.b1[2], self.oids1, self.t1) expected = 2.26666666667 self.assertAlmostEqual(actual, expected) actual = self.weighted_unifrac( self.b1[1], self.b1[3], self.oids1, self.t1) expected = 0.933333333333 self.assertAlmostEqual(actual, expected) actual = self.weighted_unifrac( self.b1[1], self.b1[4], self.oids1, self.t1) expected = 3.2 self.assertAlmostEqual(actual, expected) actual = self.weighted_unifrac( self.b1[1], self.b1[5], self.oids1, self.t1) expected = 0.8375 self.assertAlmostEqual(actual, expected) # sample C versus remaining actual = self.weighted_unifrac( self.b1[2], self.b1[3], self.oids1, self.t1) expected = 1.33333333333 self.assertAlmostEqual(actual, expected) actual = self.weighted_unifrac( self.b1[2], self.b1[4], self.oids1, self.t1) expected = 1.89743589744 self.assertAlmostEqual(actual, expected) actual = self.weighted_unifrac( self.b1[2], self.b1[5], self.oids1, self.t1) expected = 2.66666666667 self.assertAlmostEqual(actual, expected) # sample D versus remaining actual = self.weighted_unifrac( self.b1[3], self.b1[4], self.oids1, self.t1) expected = 2.66666666667 self.assertAlmostEqual(actual, expected) actual = self.weighted_unifrac( self.b1[3], self.b1[5], self.oids1, self.t1) expected = 1.33333333333 self.assertAlmostEqual(actual, expected) # sample E versus remaining actual = self.weighted_unifrac( self.b1[4], self.b1[5], self.oids1, self.t1) expected = 4.0 self.assertAlmostEqual(actual, expected) def test_weighted_unifrac_identity_normalized(self): for i in range(len(self.b1)): actual = self.weighted_unifrac( self.b1[i], self.b1[i], self.oids1, self.t1, normalized=True) expected = 0.0 self.assertAlmostEqual(actual, expected) def test_weighted_unifrac_symmetry_normalized(self): for i in range(len(self.b1)): for j in range(len(self.b1)): actual = self.weighted_unifrac( self.b1[i], self.b1[j], self.oids1, self.t1, normalized=True) expected = self.weighted_unifrac( self.b1[j], self.b1[i], self.oids1, self.t1, normalized=True) self.assertAlmostEqual(actual, expected) def test_weighted_unifrac_non_overlapping_normalized(self): # these communities only share the root node actual = self.weighted_unifrac( self.b1[4], self.b1[5], self.oids1, self.t1, normalized=True) expected = 1.0 self.assertAlmostEqual(actual, expected) actual = self.weighted_unifrac( [1, 1, 1, 0, 0], [0, 0, 0, 1, 1], self.oids1, self.t1, normalized=True) expected = 1.0 self.assertAlmostEqual(actual, expected) def test_weighted_unifrac_normalized(self): # expected results derived from QIIME 1.9.1, which # is a completely different implementation skbio's initial # weighted unifrac implementation actual = self.weighted_unifrac( self.b1[0], self.b1[1], self.oids1, self.t1, normalized=True) expected = 0.6 self.assertAlmostEqual(actual, expected) actual = self.weighted_unifrac( self.b1[0], self.b1[2], self.oids1, self.t1, normalized=True) expected = 0.466666666667 self.assertAlmostEqual(actual, expected) actual = self.weighted_unifrac( self.b1[0], self.b1[3], self.oids1, self.t1, normalized=True) expected = 0.633333333333 self.assertAlmostEqual(actual, expected) actual = self.weighted_unifrac( self.b1[0], self.b1[4], self.oids1, self.t1, normalized=True) expected = 0.338461538462 self.assertAlmostEqual(actual, expected) actual = self.weighted_unifrac( self.b1[0], self.b1[5], self.oids1, self.t1, normalized=True) expected = 0.8 self.assertAlmostEqual(actual, expected) # sample B versus remaining actual = self.weighted_unifrac( self.b1[1], self.b1[2], self.oids1, self.t1, normalized=True) expected = 0.566666666667 self.assertAlmostEqual(actual, expected) actual = self.weighted_unifrac( self.b1[1], self.b1[3], self.oids1, self.t1, normalized=True) expected = 0.233333333333 self.assertAlmostEqual(actual, expected) actual = self.weighted_unifrac( self.b1[1], self.b1[4], self.oids1, self.t1, normalized=True) expected = 0.8 self.assertAlmostEqual(actual, expected) actual = self.weighted_unifrac( self.b1[1], self.b1[5], self.oids1, self.t1, normalized=True) expected = 0.209375 self.assertAlmostEqual(actual, expected) # sample C versus remaining actual = self.weighted_unifrac( self.b1[2], self.b1[3], self.oids1, self.t1, normalized=True) expected = 0.333333333333 self.assertAlmostEqual(actual, expected) actual = self.weighted_unifrac( self.b1[2], self.b1[4], self.oids1, self.t1, normalized=True) expected = 0.474358974359 self.assertAlmostEqual(actual, expected) actual = self.weighted_unifrac( self.b1[2], self.b1[5], self.oids1, self.t1, normalized=True) expected = 0.666666666667 self.assertAlmostEqual(actual, expected) # sample D versus remaining actual = self.weighted_unifrac( self.b1[3], self.b1[4], self.oids1, self.t1, normalized=True) expected = 0.666666666667 self.assertAlmostEqual(actual, expected) actual = self.weighted_unifrac( self.b1[3], self.b1[5], self.oids1, self.t1, normalized=True) expected = 0.333333333333 self.assertAlmostEqual(actual, expected) # sample E versus remaining actual = self.weighted_unifrac( self.b1[4], self.b1[5], self.oids1, self.t1, normalized=True) expected = 1.0 self.assertAlmostEqual(actual, expected) class FaithPDEdgeCasesTests(unittest.TestCase): # These tests were mostly ported from skbio's # skbio/diversity/alpha/tests/test_fatih_pd.py at SHA-256 a8c086b # note that not all tests were kept since the APIs are different. package = 'unifrac.tests' def write_table_tree(self, u_counts, otu_ids, sample_ids, tree): data = np.array([u_counts]).T bt = Table(data, otu_ids, sample_ids) ta = os.path.join(gettempdir(), 'table.biom') tr = os.path.join(gettempdir(), 'tree.biom') self.files_to_delete.append(ta) self.files_to_delete.append(tr) with biom_open(ta, 'w') as fhdf5: bt.to_hdf5(fhdf5, 'Table for unit testing') tree.write(tr) return ta, tr def faith_pd_work(self, u_counts, otu_ids, sample_ids, tree): ta, tr = self.write_table_tree(u_counts, otu_ids, sample_ids, tree) return faith_pd(ta, tr) def setUp(self): self.counts = np.array([0, 1, 1, 4, 2, 5, 2, 4, 1, 2]) self.b1 = np.array([[1, 3, 0, 1, 0], [0, 2, 0, 4, 4], [0, 0, 6, 2, 1], [0, 0, 1, 1, 1]]) self.sids1 = list('ABCD') self.oids1 = ['OTU%d' % i for i in range(1, 6)] self.t1 = TreeNode.read(StringIO( '(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):' '0.0,(OTU4:0.75,OTU5:0.75):1.25):0.0)root;')) self.t1_w_extra_tips = TreeNode.read( StringIO('(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:' '0.75,(OTU5:0.25,(OTU6:0.5,OTU7:0.5):0.5):0.5):1.25):0.0' ')root;')) self.files_to_delete = [] def tearDown(self): for f in self.files_to_delete: try: os.remove(f) except OSError: pass def test_faith_pd_zero_branches_omitted(self): # also deleted branch length fo t2 = TreeNode.read(StringIO( '((OTU1:0.5,OTU2:0.5),(OTU3:1.0,(OTU4:0.5,' 'OTU5:0.75):1.0):1.0)root;' )) actual = self.faith_pd_work([1, 1, 0, 0, 0], self.oids1, ['foo'], t2) expected = 1.0 self.assertAlmostEqual(actual[0], expected) def test_faith_pd_none_observed(self): actual = self.faith_pd_work([0, 0, 0, 0, 0], self.oids1, ['foo'], self.t1) expected = 0.0 self.assertAlmostEqual(actual.values, expected) def test_faith_pd_biom_table_empty(self): table, tree = self.write_table_tree([], [], [], self.t1) self.assertRaises(ValueError, faith_pd, table, tree) def test_faith_pd_table_not_subset_tree(self): tree = TreeNode.read(StringIO('((OTU1:0.5,OTU3:1.0):1.0)root;')) table_ids = ['OTU1', 'OTU2'] table, tree = self.write_table_tree([1, 0], table_ids, ['foo'], tree) expected_message = "The table does not appear to be completely "\ "represented by the phylogeny." with self.assertRaisesRegex(ValueError, expected_message): faith_pd(table, tree) def test_faith_pd_all_observed(self): actual = self.faith_pd_work([1, 1, 1, 1, 1], self.oids1, ['foo'], self.t1) expected = sum(n.length for n in self.t1.traverse() if n.length is not None) self.assertAlmostEqual(actual.values, expected) actual = self.faith_pd_work([1, 2, 3, 4, 5], self.oids1, ['foo'], self.t1) expected = sum(n.length for n in self.t1.traverse() if n.length is not None) self.assertAlmostEqual(actual.values, expected) def test_faith_pd(self): # expected results derived from QIIME 1.9.1, which # is a completely different implementation unifrac's initial # phylogenetic diversity implementation actual = self.faith_pd_work(self.b1[0], self.oids1, [self.sids1[0]], self.t1) expected = 4.5 self.assertAlmostEqual(actual.values, expected) actual = self.faith_pd_work(self.b1[1], self.oids1, [self.sids1[1]], self.t1) expected = 4.75 self.assertAlmostEqual(actual.values, expected) actual = self.faith_pd_work(self.b1[2], self.oids1, [self.sids1[2]], self.t1) expected = 4.75 self.assertAlmostEqual(actual.values, expected) actual = self.faith_pd_work(self.b1[3], self.oids1, [self.sids1[3]], self.t1) expected = 4.75 self.assertAlmostEqual(actual.values, expected) def test_faith_pd_extra_tips(self): # results are the same despite presences of unobserved tips in tree actual = self.faith_pd_work(self.b1[0], self.oids1, [self.sids1[0]], self.t1_w_extra_tips) expected = self.faith_pd_work(self.b1[0], self.oids1, [self.sids1[0]], self.t1) self.assertAlmostEqual(actual.values, expected.values) actual = self.faith_pd_work(self.b1[1], self.oids1, [self.sids1[1]], self.t1_w_extra_tips) expected = self.faith_pd_work(self.b1[1], self.oids1, [self.sids1[1]], self.t1) self.assertAlmostEqual(actual.values, expected.values) actual = self.faith_pd_work(self.b1[2], self.oids1, [self.sids1[2]], self.t1_w_extra_tips) expected = self.faith_pd_work(self.b1[2], self.oids1, [self.sids1[2]], self.t1) self.assertAlmostEqual(actual.values, expected.values) actual = self.faith_pd_work(self.b1[3], self.oids1, [self.sids1[3]], self.t1_w_extra_tips) expected = self.faith_pd_work(self.b1[3], self.oids1, [self.sids1[3]], self.t1) self.assertAlmostEqual(actual.values, expected.values) def test_faith_pd_minimal(self): # two tips tree = TreeNode.read(StringIO('(OTU1:0.25, OTU2:0.25)root;')) actual = self.faith_pd_work([1, 0], ['OTU1', 'OTU2'], ['foo'], tree) expected = 0.25 self.assertEqual(actual.values, expected) def test_faith_pd_series_name(self): tree = TreeNode.read(StringIO('(OTU1:0.25, OTU2:0.25)root;')) actual = self.faith_pd_work([1, 0], ['OTU1', 'OTU2'], ['foo'], tree) self.assertEqual("faith_pd", actual.name) def test_faith_pd_root_not_observed(self): # expected values computed by hand tree = TreeNode.read( StringIO('((OTU1:0.1, OTU2:0.2):0.3, (OTU3:0.5, OTU4:0.7):1.1)' 'root;')) otu_ids = ['OTU%d' % i for i in range(1, 5)] # root node not observed, but branch between (OTU1, OTU2) and root # is considered observed actual = self.faith_pd_work([1, 1, 0, 0], otu_ids, ['foo'], tree) expected = 0.6 self.assertAlmostEqual(actual[0], expected) # root node not observed, but branch between (OTU3, OTU4) and root # is considered observed actual = self.faith_pd_work([0, 0, 1, 1], otu_ids, ['foo'], tree) expected = 2.3 self.assertAlmostEqual(actual[0], expected) def test_faith_pd_invalid_input(self): # tests are based of skbio tests, checking for duplicate ids, # negative counts are not included but should be incorporated # tree has duplicated tip ids tree = TreeNode.read( StringIO('((OTU1:0.1, OTU2:0.2):0.3, (OTU3:0.5, OTU4:0.7):1.1)' 'root;')) otu_ids = ['OTU%d' % i for i in range(1, 5)] u_counts = [1, 1, 0, 0] data = np.array([u_counts]).T bt = Table(data, otu_ids, ['u']) ta = os.path.join(gettempdir(), 'table.biom') tr = os.path.join(gettempdir(), 'tree.biom') self.files_to_delete.append(ta) self.files_to_delete.append(tr) with biom_open(ta, 'w') as fhdf5: bt.to_hdf5(fhdf5, 'Table for unit testing') tree.write(tr) self.assertRaises(IOError, faith_pd, 'dne.biom', tr) self.assertRaises(IOError, faith_pd, ta, 'dne.tre') if __name__ == "__main__": unittest.main()
# TODO: Implement session state to control test and next button # TODO: Show results in the plot # TODO: Conditional coloring in the plot import numpy as np import matplotlib.pyplot as plt from matplotlib import rcParams from . import equity rcParams["font.family"] = "monospace" SUITS_COLORS = {"s": "k", "h": "r", "c": "g", "d": "b"} def ellipse(theta): """ Ellipse shape.""" a = 2 b = 1 return a * b / np.sqrt((a * np.sin(theta) ** 2) + (b * np.cos(theta) ** 2)) def hand( n_seats, pot, hero_name, hero_hand, hero_chips, villains_names, villains_ranges, villains_chips, ): """ Hand viewer. """ # Table line theta_arr = np.linspace(start=0, stop=2 * np.pi, num=100, endpoint=True) radius_arr = [ellipse(t) for t in theta_arr] # Seats coordinates. seats_theta = -1 * np.linspace(start=0, stop=2 * np.pi, num=n_seats, endpoint=False) seats_radius = [ellipse(t) for t in seats_theta] # Place players into the seats. (+ 1 because of the hero). players_theta = [seats_theta[i] for i in range(len(villains_names) + 1)] players_radius = [seats_radius[i] for i in range(len(villains_names) + 1)] # Create figure and polar axes. fig = plt.figure(figsize=(8, 4)) ax = fig.add_axes([0.1, 0, 0.8, 1], polar=True) # Create table. ax.plot(theta_arr, radius_arr, color="gainsboro", linewidth=5) ax.fill_between(theta_arr, radius_arr, color="whitesmoke") ax.axis("off") # Get hero hand percentage. hero_descr = equity.hand_to_descr(hero_hand) hero_percent = equity.descr_to_percentage(hero_descr) # Concatenate hero and villains. players_name = np.concatenate([[hero_name], villains_names]) players_range = np.concatenate([[hero_percent], villains_ranges]) players_chips = np.concatenate([[hero_chips], villains_chips]) iterator = zip( players_name, players_range, players_chips, players_theta, players_radius, ) # Players for name, rng, chips, theta, radius in iterator: bbox_props = dict(boxstyle="Round", fc="whitesmoke", ec="gray") ax.annotate( f"{name} {rng:.0f}%\n{chips} BB", xy=(theta, radius), ha="center", va="center", bbox=bbox_props, ) # How much the small blind will be moved to avoid overlapping. move_small_blind = 0 # Dealer index. if n_seats == 2: dealer_idx = -2 move_small_blind = -0.2 # Displace small blind to not overlap the button. elif len(players_name) == 2: dealer_idx = None else: dealer_idx = -3 if dealer_idx is not None: bbox_props = dict(boxstyle="Circle", fc="pink", ec="k") ax.annotate( "D", xy=(players_theta[dealer_idx], 0.7 * players_radius[dealer_idx]), ha="center", va="center", bbox=bbox_props, ) # Blinds and antes ax.annotate(f"Antes\n{pot - 1.5} BB", xy=(0, 0), ha="center", va="center") ax.annotate( "1 BB", xy=(players_theta[-1], 0.7 * players_radius[-1]), ha="center", va="center", ) ax.annotate( ".5 BB", xy=(players_theta[-2], (0.7 + move_small_blind) * players_radius[-2]), ha="center", va="center", ) # Hero cards. card0 = hero_hand[:2] card1 = hero_hand[2:] rank0 = card0[0] rank1 = card1[0] suit0 = card0[1] suit1 = card1[1] bbox_props = dict(boxstyle="Round", fc="w", ec="gray") hero_cards_kwargs = dict( va="center", bbox=bbox_props, fontsize="x-large", fontweight="bold", annotation_clip=False, ) ax.annotate( rank0, xy=(players_theta[0], 1.35 * players_radius[0]), ha="right", color=SUITS_COLORS[suit0], **hero_cards_kwargs, ) ax.annotate( rank1, xy=(players_theta[0], 1.41 * players_radius[0]), ha="left", color=SUITS_COLORS[suit1], **hero_cards_kwargs, ) return fig
<reponame>kellyhirano/flp<filename>weather.py #!/usr/bin/env python3 import configparser import json import time import paho.mqtt.client as mqtt import fourletterphat as flp # Global for data storage g_mqtt_data = {} def on_connect(client, userdata, flags, rc): """The callback for when the client receives a CONNACK from the server.""" print("Connected with result code "+str(rc)) # Subscribing in on_connect() means that if we lose the connection and # reconnect then subscriptions will be renewed. client.subscribe([("weewx/sensor", 0), ("purpleair/sensor", 0), ("purpleair/last_hour", 0)]) def on_message(client, userdata, msg): """The callback for when a PUBLISH message is received from the server.""" global g_mqtt_data print(msg.topic+" -> "+str(msg.payload.decode('UTF-8'))) message_data = json.loads(str(msg.payload.decode('UTF-8'))) g_mqtt_data[msg.topic] = message_data # Flash the rightmost decimal to show receipt of a message flp.set_decimal(3, True) flp.show() time.sleep(.5) flp.set_decimal(3, False) flp.show() time.sleep(.25) flp.set_decimal(3, True) flp.show() time.sleep(.5) flp.set_decimal(3, False) flp.show() def display_message(titles, numbers, show_title_at_end=False, number_type='str', float_decimal_digits=1, number_sleep=1, title_sleep=.5): """Display messaages with different timings for titles vs numbers.""" for title in titles: flp.clear() flp.print_str(title) flp.show() time.sleep(title_sleep) for number in numbers: flp.clear() flp.print_number_str(str(number)) flp.show() time.sleep(number_sleep) if (show_title_at_end): for title in titles: flp.clear() flp.print_str(title) flp.show() time.sleep(title_sleep) config = configparser.ConfigParser() config.read('mqtt.conf') mqtt_host = config.get('ALL', 'mqtt_host') mqtt_host_port = int(config.get('ALL', 'mqtt_host_port')) client = mqtt.Client() client.on_connect = on_connect client.on_message = on_message client.connect_async(mqtt_host, mqtt_host_port, 60) client.loop_start() while(1): if ('weewx/sensor' not in g_mqtt_data): time.sleep(5) client.loop() continue temp = g_mqtt_data['weewx/sensor']['outdoor_temperature'] temp_change = g_mqtt_data['weewx/sensor']['outdoor_temp_change'] temp_change_24h = g_mqtt_data['weewx/sensor']['outdoor_24h_temp_change'] rain_rate = g_mqtt_data['weewx/sensor']['rain_rate'] wind_gust = g_mqtt_data['weewx/sensor']['wind_gust'] aqi = 0 last_1hr_aqi = 0 if ('purpleair/sensor' in g_mqtt_data): aqi = g_mqtt_data['purpleair/sensor']['st_aqi'] if ('purpleair/last_hour' in g_mqtt_data): last_1hr_aqi = g_mqtt_data['purpleair/last_hour']['st_aqi'] current_hour = int(time.strftime("%H", time.localtime())) if (current_hour >= 7 and current_hour <= 23): if (aqi >= 100): display_message(['AQI'], [aqi, last_1hr_aqi]) if (wind_gust >= 10): display_message(['GUST'], [wind_gust]) if (rain_rate > 0): display_message(['RAIN', 'RATE'], [rain_rate]) display_message(['TEMP'], [temp]) display_message(['1H'], [temp_change]) display_message([], [temp]) display_message(['24H'], [temp_change_24h]) display_message([], [temp]) # Give the main display a rest at night and show a blinky pattern else: flp.clear() for i in range(4): flp.set_decimal(i, True) flp.show() time.sleep(.1) flp.set_decimal(i, False) flp.show() time.sleep(8) # Total = 10s of sleep time.sleep(2)
<filename>pyRdfa/options.py # -*- coding: utf-8 -*- """ L{Options} class: collect the possible options that govern the parsing possibilities. The module also includes the L{ProcessorGraph} class that handles the processor graph, per RDFa 1.1 (i.e., the graph containing errors and warnings). @summary: RDFa parser (distiller) @requires: U{RDFLib<http://rdflib.net>} @organization: U{World Wide Web Consortium<http://www.w3.org>} @author: U{<NAME><a href="http://www.w3.org/People/Ivan/">} @license: This software is available for use under the U{W3C SOFTWARE NOTICE AND LICENSE<href="http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231">} """ """ $Id: options.py,v 1.5 2011/11/15 10:03:13 ivan Exp $ $Date: 2011/11/15 10:03:13 $ """ import sys, datetime import rdflib from rdflib import URIRef from rdflib import Literal from rdflib import BNode from rdflib import Namespace if rdflib.__version__ >= "3.0.0" : from rdflib import Graph from rdflib import RDF as ns_rdf from rdflib import RDFS as ns_rdfs else : from rdflib.Graph import Graph from rdflib.RDFS import RDFSNS as ns_rdfs from rdflib.RDF import RDFNS as ns_rdf from pyRdfa.host import HostLanguage, MediaTypes, content_to_host_language from pyRdfa import ns_xsd, ns_distill, ns_rdfa from pyRdfa import RDFA_Error, RDFA_Warning, RDFA_Info ns_dc = Namespace("http://purl.org/dc/terms/") ns_ht = Namespace("http://www.w3.org/2006/http#") class ProcessorGraph : """Wrapper around the 'processor graph', ie, the (RDF) Graph containing the warnings, error messages, and informational messages. """ def __init__(self) : self.graph = Graph() self.graph.bind("dcterm", ns_dc) self.graph.bind("pyrdfa", ns_distill) self.graph.bind("rdf", ns_rdf) def add_triples(self, msg, top_class, info_class, context, node) : """ Add an error structure to the processor graph: a bnode with a number of predicates. The structure follows U{the processor graph vocabulary<http://www.w3.org/2010/02/rdfa/wiki/Processor_Graph_Vocabulary>} as described on the RDFa WG Wiki page. @param msg: the core error message, added as an object to a dc:description @param top_class: Error, Warning, or Info; an explicit rdf:type added to the bnode @type top_class: URIRef @param info_class: An additional error class, added as an rdf:type to the bnode in case it is not None @type info_class: URIRef @param context: An additional information added, if not None, as an object with rdfa:context as a predicate @type context: either an URIRef or a URI String (an URIRef will be created in the second case) @param node: The node's element name that contains the error @type node: string @return: the bnode that serves as a subject for the errors. The caller may add additional information @rtype: BNode """ bnode = BNode() if node != None: try : full_msg = "[In element '%s'] %s" % (node.nodeName, msg) except : full_msg = "[In element '%s'] %s" % (node, msg) else : full_msg = msg self.graph.add((bnode, ns_rdf["type"], top_class)) if info_class : self.graph.add((bnode, ns_rdf["type"], info_class)) self.graph.add((bnode, ns_dc["description"], Literal(full_msg))) self.graph.add((bnode, ns_dc["date"], Literal(datetime.datetime.utcnow().isoformat(),datatype=ns_xsd["dateTime"]))) if context : if not isinstance(context,URIRef) : context = URIRef(context) self.graph.add((bnode, ns_rdfa["context"], context)) return bnode def add_http_context(self, subj, http_code) : self.graph.bind("ht",ns_ht) bnode = BNode() self.graph.add((subj, ns_rdfa["context"], bnode)) self.graph.add((bnode, ns_rdf["type"], ns_ht["Response"])) self.graph.add((bnode, ns_ht["responseCode"], URIRef("http://www.w3.org/2006/http#%s" % http_code))) class Options : """Settable options. An instance of this class is stored in the L{execution context<ExecutionContext>} of the parser. @ivar space_preserve: whether plain literals should preserve spaces at output or not @type space_preserve: Boolean @ivar output_default_graph: whether the 'default' graph should be returned to the user @type output_default_graph: Boolean @ivar output_processor_graph: whether the 'processor' graph should be returned to the user @type output_processor_graph: Boolean @ivar processor_graph: the 'processor' Graph @type processor_graph: L{ProcessorGraph} @ivar transformers: extra transformers @type transformers: list @ivar vocab_cache_report: whether the details of vocabulary file caching process should be reported as information (mainly for debug) @type vocab_cache_report: Boolean @ivar bypass_vocab_cache: whether the caching checks of vocabs should be by-passed, ie, if caches should be generated regardless of the stored date (important for vocab development) @type bypass_vocab_cache: Boolean @ivar hturtle: whether hturtle (ie, turtle in an HTML script element) should be extracted and added to the final graph. This is a non-standard option... @type hturtle: Boolean @ivar rdfa_sem: whether the @vocab elements should be expanded and a mini-RDFS processing should be done on the merged graph @type rdfa_sem: Boolean @ivar vocab_cache: whether the system should use the vocabulary caching mechanism when expanding via the mini-RDFS, or should just fetch the graphs every time @type vocab_cache: Boolean @ivar host_language: the host language for the RDFa attributes. Default is HostLanguage.xhtml, but it can be HostLanguage.rdfa_core and HostLanguage.html, or others... @type host_language: integer (logically: an enumeration) @ivar content_type: the content type of the host file. Default is None @type content_type: string (logically: an enumeration) """ def __init__(self, output_default_graph = True, output_processor_graph = False, space_preserve = True, transformers = [], vocab_cache_report = False, bypass_vocab_cache = False, hturtle = False, vocab_expansion = False, vocab_cache = False) : """ @keyword space_preserve: whether plain literals should preserve spaces at output or not @type space_preserve: Boolean @keyword output_default_graph: whether the 'default' graph should be returned to the user @type output_default_graph: Boolean @keyword output_processor_graph: whether the 'processor' graph should be returned to the user @type output_processor_graph: Boolean @keyword transformers: extra transformers @type transformers: list """ self.space_preserve = space_preserve self.transformers = transformers self.processor_graph = ProcessorGraph() self.output_default_graph = output_default_graph self.output_processor_graph = output_processor_graph self.host_language = HostLanguage.rdfa_core self.vocab_cache_report = vocab_cache_report self.bypass_vocab_cache = bypass_vocab_cache self.hturtle = hturtle self.vocab_expansion = vocab_expansion self.vocab_cache = vocab_cache def set_host_language(self, content_type) : """ Set the host language for processing, based on the recognized types. What this means is that everything is considered to be 'core' RDFa, except if XHTML or HTML is used; indeed, no other language defined a deviation to core (yet...) @param content_type: content type @type content_type: string """ if content_type in content_to_host_language : self.host_language = content_to_host_language[content_type] else : self.host_language = HostLanguage.rdfa_core def __str__(self) : retval = """Current options: preserve space : %s output processor graph : %s output default graph : %s host language : %s accept embedded turtle : %s perfom semantic postprocessing : %s cache vocabulary graphs : %s """ return retval % (self.space_preserve, self.output_processor_graph, self.output_default_graph, self.host_language, self.hturtle, self.rdfa_sem, self.vocab_cache) def reset_processor_graph(self): """Empty the processor graph. This is necessary if the same options is reused for several RDFa sources, and new error messages should be generated. """ self.processor_graph.graph.remove((None,None,None)) def add_warning(self, txt, warning_type=None, context=None, node=None) : """Add a warning to the processor graph. @param txt: the warning text. @keyword warning_type: Warning Class @type warning_type: URIRef @keyword context: possible context to be added to the processor graph @type context: URIRef or String """ return self.processor_graph.add_triples(txt, RDFA_Warning, warning_type, context, node) def add_info(self, txt, info_type=None, context=None, node=None) : """Add an informational comment to the processor graph. @param txt: the information text. @keyword info_type: Info Class @type info_type: URIRef @keyword context: possible context to be added to the processor graph @type context: URIRef or String """ return self.processor_graph.add_triples(txt, RDFA_Info, info_type, context, node) def add_error(self, txt, err_type=None, context=None, node=None) : """Add an error to the processor graph. @param txt: the information text. @keyword err_type: Error Class @type err_type: URIRef @keyword context: possible context to be added to the processor graph @type context: URIRef or String """ return self.processor_graph.add_triples(txt, RDFA_Error, err_type, context, node)
<reponame>e-koch/Phys-595<filename>project_code/Spec Fitting/post_proc_specfit.py ''' Post-process spectral line fitting results ''' import numpy as np from pandas import read_csv, Series, concat import shutil def concat_csvs(file_list, output_name, save=True): ''' Concatenate csv files. ''' data = [read_csv(file_nm) for file_nm in file_list] index = data[0]["Unnamed: 0"] for dat in data: del dat['Unnamed: 0'] data = concat(data, ignore_index=False, axis=1) data.index = index if save: data.to_csv(output_name) else: return data def blank_the_crap(filename, min_amp_sn=3, min_width_sn=3, num_lines=12, remove_failed=True): ''' Function to blank bad fits. Since the fits performed are highly restricted, bad fits are assumed to be non-detections. Along with the minimum S/N inputs, any error less than 0 is blanked. Parameters ---------- remove_failed : bool, optional Uses spurious_blanking to remove small number of failed fits. ''' data = read_csv(filename, index_col=0) df = data.copy() df = spurious_blanking(df) # There are 12 fitted lines and 4 parameters columns = df.columns assert len(columns) == num_lines * 4 for i in range(num_lines): amp = df[columns[i]] width = df[columns[i+num_lines]] amp_err = df[columns[i+2*num_lines]] width_err = df[columns[i+3*num_lines]] # Remove fits where either error is 0 bad_errs = \ df.index[np.where(np.logical_or(amp_err <= 0.0, width_err <= 0.0))] bad_sn_amp = df.index[np.where(np.abs(amp / amp_err) < min_amp_sn)] bad_sn_width = \ df.index[np.where(np.abs(width / width_err) < min_width_sn)] amp[bad_errs] = 0.0 amp[bad_sn_amp] = 0.0 amp[bad_sn_width] = 0.0 width[bad_errs] = 0.0 width[bad_sn_amp] = 0.0 width[bad_sn_width] = 0.0 df[columns[i]] = amp df[columns[i+num_lines]] = width # Save cleaned version without the error columns df.iloc[:, :2*num_lines].to_csv(filename[:-4] + "_cleaned.csv") def make_weighted_df(df): ''' Weight by the inverse squared of the errors. ''' pass def spurious_blanking(df): ''' A small number of spectra have various systematic problems. Most just show up as terrible fits, and are removed, but a few completely fail. ''' bad_halp_nii = \ [u'spec-0931-52619-0176', u'spec-1240-52734-0249', u'spec-2153-54212-0176', u'spec-2478-54097-0218', u'spec-2003-53442-0250', u'spec-2286-53700-0250'] for spec in bad_halp_nii: df.ix[spec]['NIIa Amplitude'] = 0.0 df.ix[spec]['Halp Amplitude'] = 0.0 df.ix[spec]['NIIb Amplitude'] = 0.0 df.ix[spec]['NIIa Width'] = 0.0 df.ix[spec]['Halp Width'] = 0.0 df.ix[spec]['NIIb Width'] = 0.0 bad_ca_hk = \ [u'spec-1085-52531-0278', u'spec-1053-52468-0523', u'spec-1320-52759-0280', u'spec-2642-54232-0522'] for spec in bad_ca_hk: df.ix[spec]['Ca H Amplitude'] = 0.0 df.ix[spec]['Ca K Amplitude'] = 0.0 df.ix[spec]['Ca H Width'] = 0.0 df.ix[spec]['Ca K Width'] = 0.0 return df def collect_spectra(filename, path='anomalies/', verbose=True): ''' Given a dataframe with an index of files, find those files and copy them to a new directory. ''' df = read_csv(filename) # Spectra names names = df['Unnamed: 0.1'].drop_duplicates() # The files could be in any of 4 places prefixes = ["samples_1/", "samples_2/", "samples_3/", "samples_4/"] for name in names: # Need to make sure the filename is right... if len(name.split("-")[1]) < 4: add_zeros = 4 - len(name.split("-")) split_name = name.split("-") split_name[1] = "0"*add_zeros + split_name[1] name = "-".join(split_name) i = 0 while True: try: shutil.copy("/mnt/"+prefixes[i]+name+".fits", "/mnt/"+path) i = 0 break except IOError: if i > 3: raise TypeError("Cannot find spectrum named: " + name) i += 1
import numpy import os from pylab import plot, show, bar from scipy import stats from sklearn import svm from sklearn.datasets import make_multilabel_classification from sklearn.multiclass import OneVsRestClassifier from sklearn.model_selection import GridSearchCV from sklearn.ensemble import RandomForestClassifier from sklearn import preprocessing K_NUM_CHANNELS = 8 K_SEC_PER_REP = 2 K_SKIPPED_REPS = 2 K_NUM_FOLDS = 7 def extract_features_from_window(values): abs_values = [abs(v) for v in values] num_samples = len(values) min_val = min(values) max_val = max(values) pt5_val = numpy.percentile(values, 5) pt95_val = numpy.percentile(values, 95) mean_val = numpy.mean(values) var_val = numpy.var(values) diff_mean = numpy.mean(abs(numpy.diff(values))) max_abs_val = max(abs_values) pt95_abs_val = numpy.percentile(abs_values, 95) pt75_abs_val = numpy.percentile(abs_values, 75) pt50_abs_val = numpy.percentile(abs_values, 50) pt25_abs_val = numpy.percentile(abs_values, 25) pt5_abs_val = numpy.percentile(abs_values, 5) mean_abs_val = numpy.mean(abs_values) var_abs_val = numpy.var(values) cnt0_40 = len([v for v in abs_values if v <= 40]) cnt0_80 = len([v for v in abs_values if v <= 80]) cnt40_80 = len([v for v in abs_values if 40 <= v and v <= 80]) cnt80_120 = len([v for v in abs_values if 80 <= v and v <= 120]) cnt125_inf = len([v for v in abs_values if v <= 125]) ratio0_40 = cnt0_40 / num_samples ratio0_80 = cnt0_80 / num_samples ratio40_80 = cnt40_80 / num_samples ratio80_120 = cnt80_120 / num_samples ratio125_inf = cnt125_inf / num_samples #vector.extend(list(numpy.absolute(numpy.fft.fft(values)[:12]))) return [min_val, max_val, pt5_val, pt95_val, mean_val, var_val, diff_mean, max_abs_val, pt95_abs_val, pt75_abs_val, pt50_abs_val, pt25_abs_val, pt5_abs_val, mean_abs_val, var_abs_val, #num_samples, ratio0_40, ratio0_80, ratio40_80, ratio80_120, ratio125_inf] def parser(paths, symbol): ret_Xy = [] for filepath in paths: vectors = [] with open(filepath, "r") as f: lines = f.readlines() # preprocessing splitted = [l.strip().split(",") for l in lines] timestamps = [float(l[0]) for l in splitted] start_timestamp = timestamps[0] timestamps = [(t - start_timestamp) * 1e-3 for t in timestamps] features = [list(map(int, l))[2:] for l in splitted] # put into buckets buckets = {} bucket_time = {} for time, feature_row in zip(timestamps, features): if len(feature_row) != K_NUM_CHANNELS: continue bucket_idx = int(time / K_SEC_PER_REP) if bucket_idx < K_SKIPPED_REPS: continue if bucket_idx not in buckets: buckets[bucket_idx] = [] bucket_time[bucket_idx] = (time, time) buckets[bucket_idx].append(feature_row) st, et = bucket_time[bucket_idx] bucket_time[bucket_idx] = min(st, time), max(et, time) # clean the buckets a bit bucket_idxs_to_be_removed = [] for bidx in buckets: is_small_coverage = (bucket_time[bidx][1] - bucket_time[bidx][0] < 1.5) is_insufficient_samples = (len(buckets[bidx]) < 100) if is_small_coverage or is_insufficient_samples: bucket_idxs_to_be_removed.append(bidx) for bidx in bucket_idxs_to_be_removed: buckets.pop(bidx, None) for bidx in buckets: cur_bucket = buckets[bidx] cur_vector = [] # channel dependent features for j in range(K_NUM_CHANNELS): values = [row[j] for row in cur_bucket] cur_vector.extend(extract_features_from_window(values)) # aggregate channel features values = [numpy.mean(row) for row in cur_bucket] cur_vector.extend(extract_features_from_window(values)) vectors.append(cur_vector) labels = [symbol for _ in range(len(vectors))] ret_Xy.append((vectors, labels)) return ret_Xy def get_file_paths(folders): ret = [] for folder in folders: ret.extend([os.path.join(folder, fname) for fname in os.listdir(folder) if fname.startswith('log_2017')]) return ret def merge_folds_for_training_Xy(folds): X, y = [], [] for fold in folds: for weight in fold: for tX, tY in fold[weight]: X.extend(tX) y.extend(tY) return (X, y) def decide_set_label(pred_labels, method='majority vote'): if method == 'majority vote': return int(stats.mode(pred_labels)[0][0]) elif method == 'mean': return int(round(numpy.mean(pred_labels), -1)) else: raise Exception('Non-existed method') def evaluate_cross_validation(classifier, folds_of_lb_2_Xy_sets): total_test_reps = 0 correct_test_reps = 0 rep_weight_errors = [] total_test_sets = 0 correct_test_sets = 0 for fold_idx in range(K_NUM_FOLDS): training_folds = [folds_of_lb_2_Xy_sets[i] for i in range(K_NUM_FOLDS) if i != fold_idx] X_train, y_train = merge_folds_for_training_Xy(training_folds) scaler = preprocessing.StandardScaler().fit(X_train) X_train_scale = scaler.transform(X_train) classifier.fit(X_train_scale, y_train) testing_fold = folds_of_lb_2_Xy_sets[fold_idx] for lb in testing_fold: for X_test, y_test in testing_fold[lb]: X_test_scale = scaler.transform(X_test) prediction = clf.predict(X_test_scale) correct_cnt = sum([abs(gnd - pred) < 5.0 for gnd, pred in zip(y_test, prediction)]) weight_errors = [abs(gnd - pred) for gnd, pred in zip(y_test, prediction)] correct_test_reps += correct_cnt total_test_reps += len(y_test) rep_weight_errors.extend(weight_errors) total_test_sets += 1 correct_test_sets += (abs(decide_set_label(prediction, method='mean') - y_test[0]) < 5.0) avg_rep_accuracy = correct_test_reps / total_test_reps avg_rep_weight_error = numpy.mean(rep_weight_errors) avg_set_accuracy = correct_test_sets / total_test_sets return (avg_rep_accuracy, correct_test_reps, total_test_reps, avg_rep_weight_error, avg_set_accuracy, correct_test_sets, total_test_sets) ### folder root root_folder = '../Data' ### dates dates = [ '0401', '0404', '0404_night', '0405', ] ### Knob of type and weights workout_type = 'barbell_bicep' #workout_type = 'barbell_hold' weights = [20, 30, 40, 50, 60, 70] #workout_type = 'tricep_extension_machine' #weights = [] ### Knob of data for training and testing person = 'renju' #person = 'bo' lb_2_Xy_sets = {} for weight in weights: weight_str = str(weight) + 'lbs' folders = [] for date in dates: folder = os.path.join(root_folder, date, workout_type, person, weight_str) if os.path.isdir(folder): folders.append(folder) paths = get_file_paths(folders) lb_2_Xy_sets[weight] = parser(paths, weight) num_total_reps = 0 for X, _ in lb_2_Xy_sets[weight]: num_total_reps += len(X) print('%d lbs: %d sets, %d reps' % (weight, len(lb_2_Xy_sets[weight]), num_total_reps)) folds_of_lb_2_Xy_sets = [{} for _ in range(K_NUM_FOLDS)] for weight in weights: for fidx in range(K_NUM_FOLDS): folds_of_lb_2_Xy_sets[fidx][weight] = [] Xy_sets = lb_2_Xy_sets[weight] for i, Xy in enumerate(Xy_sets): fold_idx = K_NUM_FOLDS * i // len(Xy_sets) folds_of_lb_2_Xy_sets[fold_idx][weight].append(Xy) best_rep_accuracy, best_rep_weight_error, best_rep_params = -1, None, None best_set_accuracy, best_set_params = -1, None for p_C in [2**e for e in range(-8, 11)]: for p_gamma in [2**e for e in range(-8, 11)]: clf = svm.SVR(C=p_C, gamma=p_gamma) (avg_rep_accuracy, correct_test_reps, total_test_reps, avg_rep_weight_error, avg_set_accuracy, correct_test_sets, total_test_sets) = evaluate_cross_validation( clf, folds_of_lb_2_Xy_sets) print('C=%12f, gamma=%12f: rep accuracy=%3d/%3d (%f), rep weight error=%4.1f, set accuracy=%3d/%3d (%f)' % ( p_C, p_gamma, correct_test_reps, total_test_reps, avg_rep_accuracy, avg_rep_weight_error, correct_test_sets, total_test_sets, avg_set_accuracy)) params_str = 'Clf=SVR.rbf, C=%f, gamma=%f' % (p_C, p_gamma) if avg_rep_accuracy > best_rep_accuracy: best_rep_accuracy, best_rep_weight_error, best_rep_params = ( avg_rep_accuracy, avg_rep_weight_error, params_str) if avg_set_accuracy > best_set_accuracy: best_set_accuracy, best_set_params = avg_set_accuracy, params_str print('Final result rep: %s, accuracy=%f, weight error=%.1f' % ( best_rep_params, best_rep_accuracy, best_rep_weight_error)) print('Final result sep: %s, accuracy=%f' % ( best_set_params, best_set_accuracy))
############################################################################### # Author: <NAME> # Project: Multi-task Match Tensor: a Deep Relevance Model for Search # Date Created: 7/29/2017 # # File Description: This script contains code related to the sequence-to-sequence # network. ############################################################################### import torch, helper import torch.nn as nn import torch.nn.functional as f from nn_layer import EmbeddingLayer, Encoder, ExactMatchChannel, EncoderCell, DecoderCell class MatchTensor(nn.Module): """Class that classifies question pair as duplicate or not.""" def __init__(self, dictionary, embedding_index, args): """"Constructor of the class.""" super(MatchTensor, self).__init__() self.dictionary = dictionary self.embedding_index = embedding_index self.config = args self.num_directions = 2 if self.config.bidirection else 1 self.embedding = EmbeddingLayer(len(self.dictionary), self.config) self.embedding.init_embedding_weights(self.dictionary, self.embedding_index, self.config.emsize) self.linear_projection = nn.Linear(self.config.emsize, self.config.featsize) self.query_encoder = Encoder(self.config.featsize, self.config.nhid_query, True, self.config) self.document_encoder = Encoder(self.config.featsize, self.config.nhid_doc, True, self.config) self.query_projection = nn.Linear(self.config.nhid_query * self.num_directions, self.config.nchannels) self.document_projection = nn.Linear(self.config.nhid_doc * self.num_directions, self.config.nchannels) self.exact_match_channel = ExactMatchChannel() self.conv1 = nn.Conv2d(self.config.nchannels + 1, self.config.nfilters, (3, 3), padding=1) self.conv2 = nn.Conv2d(self.config.nchannels + 1, self.config.nfilters, (3, 5), padding=(1, 2)) self.conv3 = nn.Conv2d(self.config.nchannels + 1, self.config.nfilters, (3, 7), padding=(1, 3)) self.relu = nn.ReLU() self.conv = nn.Conv2d(self.config.nfilters * 3, self.config.match_filter_size, (1, 1)) self.output = nn.Linear(self.config.match_filter_size, 1) self.session_encoder = EncoderCell(self.config.nchannels, self.config.nhid_session, False, self.config) self.decoder = DecoderCell(self.config.emsize, self.config.nhid_session, len(dictionary), self.config) @staticmethod def compute_decoding_loss(logits, target, seq_idx, length, regularize): """ Compute negative log-likelihood loss for a batch of predictions. :param logits: 2d tensor [batch_size x vocab_size] :param target: 1d tensor [batch_size] :param seq_idx: an integer represents the current index of the sequences :param length: 1d tensor [batch_size], represents each sequences' true length :return: total loss over the input mini-batch [autograd Variable] and number of loss elements """ losses = -torch.gather(logits, dim=1, index=target.unsqueeze(1)).squeeze() mask = helper.mask(length, seq_idx) # mask: batch x 1 losses = losses * mask.float() num_non_zero_elem = torch.nonzero(mask.data).size() if regularize: regularized_loss = logits.exp().mul(logits).sum(1).squeeze() * regularize loss = losses.sum() + regularized_loss.sum() if not num_non_zero_elem: return loss, 0 else: return loss, num_non_zero_elem[0] else: if not num_non_zero_elem: return losses.sum(), 0 else: return losses.sum(), num_non_zero_elem[0] def forward(self, session_queries, session_query_length, rel_docs, rel_docs_length, doc_labels): """ Forward function of the neural click model. Return average loss for a batch of sessions. :param session_queries: 3d tensor [batch_size x session_length x max_query_length] :param session_query_length: 2d tensor [batch_size x session_length] :param rel_docs: 4d tensor [batch_size x session_length x num_rel_docs_per_query x max_doc_length] :param rel_docs_length: 3d tensor [batch_size x session_length x num_rel_docs_per_query] :param doc_labels: 3d tensor [batch_size x session_length x num_rel_docs_per_query] :return: average loss over batch [autograd Variable] """ batch_queries = session_queries.view(-1, session_queries.size(-1)) batch_docs = rel_docs.view(-1, *rel_docs.size()[2:]) projected_queries = self.encode_query(batch_queries, session_query_length) # (B*S) x L x H projected_docs = self.encode_document(batch_docs, rel_docs_length) score = self.document_ranker(projected_queries, projected_docs, batch_queries, batch_docs) click_loss = f.binary_cross_entropy_with_logits(score, doc_labels.view(-1, doc_labels.size(2))) # encoded_queries: batch_size x session_length x nhid_query encoded_queries = projected_queries.max(1)[0].view(*session_queries.size()[:2], -1) decoding_loss = self.query_recommender(session_queries, session_query_length, encoded_queries) return click_loss, decoding_loss def query_recommender(self, session_queries, session_query_length, encoded_queries): # session level encoding sess_q_hidden = self.session_encoder.init_weights(encoded_queries.size(0)) hidden_states, cell_states = [], [] # loop over all the queries in a session for idx in range(encoded_queries.size(1)): # update session-level query encoder state using query representations sess_q_out, sess_q_hidden = self.session_encoder(encoded_queries[:, idx, :].unsqueeze(1), sess_q_hidden) # -1 stands for: only consider hidden states from the last layer if self.config.model == 'LSTM': hidden_states.append(sess_q_hidden[0][-1]) cell_states.append(sess_q_hidden[1][-1]) else: hidden_states.append(sess_q_hidden[-1]) hidden_states = torch.stack(hidden_states, 1) # remove the last hidden states which stand for the last queries in sessions hidden_states = hidden_states[:, :-1, :].contiguous().view(-1, hidden_states.size(-1)).unsqueeze(0) if self.config.model == 'LSTM': cell_states = torch.stack(cell_states, 1) cell_states = cell_states[:, :-1, :].contiguous().view(-1, cell_states.size(-1)).unsqueeze(0) # Initialize hidden states of decoder with the last hidden states of the session encoder decoder_hidden = (hidden_states, cell_states) else: # Initialize hidden states of decoder with the last hidden states of the session encoder decoder_hidden = hidden_states embedded_queries = self.embedding(session_queries.view(-1, session_queries.size(-1))) # train the decoder for all the queries in a session except the last embedded_queries = embedded_queries.view(*session_queries.size(), -1) decoder_input = embedded_queries[:, 1:, :, :].contiguous().view(-1, *embedded_queries.size()[2:]) decoder_target = session_queries[:, 1:, :].contiguous().view(-1, session_queries.size(-1)) target_length = session_query_length[:, 1:].contiguous().view(-1) decoding_loss, total_local_decoding_loss_element = 0, 0 for idx in range(decoder_input.size(1) - 1): input_variable = decoder_input[:, idx, :].unsqueeze(1) decoder_output, decoder_hidden = self.decoder(input_variable, decoder_hidden) local_loss, num_local_loss = self.compute_decoding_loss(decoder_output, decoder_target[:, idx + 1], idx, target_length, self.config.regularize) decoding_loss += local_loss total_local_decoding_loss_element += num_local_loss if total_local_decoding_loss_element > 0: decoding_loss = decoding_loss / total_local_decoding_loss_element return decoding_loss def document_ranker(self, projected_queries, projected_docs, batch_queries, batch_docs): # step6: 2d product between projected query and doc vectors projected_queries = projected_queries.unsqueeze(1).expand(projected_queries.size(0), batch_docs.size(1), *projected_queries.size()[1:]) projected_queries = projected_queries.contiguous().view(-1, *projected_queries.size()[2:]) projected_docs = projected_docs.view(-1, batch_docs.size(2), projected_docs.size()[-1]) projected_queries = projected_queries.unsqueeze(2).expand(*projected_queries.size()[:2], batch_docs.size()[-1], projected_queries.size(2)) projected_docs = projected_docs.unsqueeze(1).expand(projected_docs.size(0), batch_queries.size()[-1], *projected_docs.size()[1:]) query_document_product = projected_queries * projected_docs # step7: append exact match channel exact_match = self.exact_match_channel(batch_queries, batch_docs).unsqueeze(3) query_document_product = torch.cat((query_document_product, exact_match), 3) query_document_product = query_document_product.transpose(2, 3).transpose(1, 2) # step8: run the convolutional operation, max-pooling and linear projection convoluted_feat1 = self.conv1(query_document_product) convoluted_feat2 = self.conv2(query_document_product) convoluted_feat3 = self.conv3(query_document_product) convoluted_feat = self.relu(torch.cat((convoluted_feat1, convoluted_feat2, convoluted_feat3), 1)) convoluted_feat = self.conv(convoluted_feat).transpose(1, 2).transpose(2, 3) max_pooled_feat = torch.max(convoluted_feat, 2)[0].squeeze() max_pooled_feat = torch.max(max_pooled_feat, 1)[0].squeeze() return self.output(max_pooled_feat).squeeze().view(*batch_docs.size()[:2]) def encode_query(self, batch_queries, session_query_length): # step1: apply embedding lookup embedded_queries = self.embedding(batch_queries) # step2: apply linear projection on embedded queries and documents embedded_queries = self.linear_projection(embedded_queries.view(-1, embedded_queries.size(-1))) # step3: transform the tensors so that they can be given as input to RNN embedded_queries = embedded_queries.view(*batch_queries.size(), self.config.featsize) # step4: pass the encoded query and doc through a bi-LSTM encoded_queries = self.query_encoder(embedded_queries, session_query_length.view(-1).data.cpu().numpy()) # step5: apply linear projection on query hidden states projected_queries = self.query_projection(encoded_queries.view(-1, encoded_queries.size()[-1])).view( *batch_queries.size(), -1) return projected_queries def encode_document(self, batch_docs, rel_docs_length): # step1: apply embedding lookup embedded_docs = self.embedding(batch_docs.view(-1, batch_docs.size(-1))) # step2: apply linear projection on embedded queries and documents embedded_docs = self.linear_projection(embedded_docs.view(-1, embedded_docs.size(-1))) # step3: transform the tensors so that they can be given as input to RNN embedded_docs = embedded_docs.view(-1, batch_docs.size()[-1], self.config.featsize) # step4: pass the encoded query and doc through a bi-LSTM encoded_docs = self.document_encoder(embedded_docs, rel_docs_length.view(-1).data.cpu().numpy()) # step5: apply linear projection on query hidden states projected_docs = self.document_projection(encoded_docs.view(-1, encoded_docs.size()[-1])) return projected_docs
""" .. codeauthor:: <NAME> <<EMAIL>> """ import itertools import pytest from pathvalidate import ascii_symbols, replace_symbol, unprintable_ascii_chars, validate_symbol from pathvalidate._symbol import validate_unprintable from pathvalidate.error import ErrorReason, ValidationError from ._common import alphanum_chars class Test_validate_symbol: VALID_CHARS = alphanum_chars INVALID_CHARS = ascii_symbols @pytest.mark.parametrize( ["value"], [["abc" + valid_char + "hoge123"] for valid_char in VALID_CHARS] ) def test_normal(self, value): validate_symbol(value) @pytest.mark.parametrize(["value"], [["あいうえお"], ["シート"]]) def test_normal_multibyte(self, value): pytest.skip("TODO") validate_symbol(value) @pytest.mark.parametrize( ["value"], [ ["abc" + invalid_char + "hoge123"] for invalid_char in INVALID_CHARS + unprintable_ascii_chars ], ) def test_exception_invalid_char(self, value): with pytest.raises(ValidationError) as e: validate_symbol(value) assert e.value.reason == ErrorReason.INVALID_CHARACTER class Test_replace_symbol: TARGET_CHARS = ascii_symbols NOT_TARGET_CHARS = alphanum_chars REPLACE_TEXT_LIST = ["", "_"] @pytest.mark.parametrize( ["value", "replace_text", "expected"], [ ["A" + c + "B", rep, "A" + rep + "B"] for c, rep in itertools.product(TARGET_CHARS, REPLACE_TEXT_LIST) ] + [ ["A" + c + "B", rep, "A" + c + "B"] for c, rep in itertools.product(NOT_TARGET_CHARS, REPLACE_TEXT_LIST) ] + [["", "", ""]], ) def test_normal(self, value, replace_text, expected): assert replace_symbol(value, replace_text) == expected @pytest.mark.parametrize( ["value", "exclude_symbols", "expected"], [ ["/tmp/h!o|g$e.txt", ["/", "."], "/tmp/hoge.txt"], ["/tmp/h!o|g$e.txt", [], "tmphogetxt"], ["/tmp/h!o|g$e.txt", ["n", "o", "p"], "tmphogetxt"], ], ) def test_normal_exclude_symbols(self, value, exclude_symbols, expected): assert replace_symbol(value, exclude_symbols=exclude_symbols) == expected @pytest.mark.parametrize( ["value", "replace_text", "is_replace_consecutive_chars", "is_strip", "expected"], [ ["!a##b$$$c((((d]]]])", "_", True, True, "a_b_c_d"], ["!a##b$$$c((((d]]]])", "_", True, False, "_a_b_c_d_"], ["!a##b$$$c((((d]]]])", "_", False, True, "a__b___c____d"], ["!a##b$$$c((((d]]]])", "_", False, False, "_a__b___c____d_____"], ], ) def test_normal_consecutive( self, value, replace_text, is_replace_consecutive_chars, is_strip, expected ): assert ( replace_symbol( value, replace_text, is_replace_consecutive_chars=is_replace_consecutive_chars, is_strip=is_strip, ) == expected ) @pytest.mark.parametrize( ["value", "expected"], [[None, TypeError], [1, TypeError], [True, TypeError]] ) def test_abnormal(self, value, expected): with pytest.raises(expected): replace_symbol(value) class Test_validate_unprintable: VALID_CHARS = alphanum_chars INVALID_CHARS = unprintable_ascii_chars @pytest.mark.parametrize( ["value"], [["abc" + valid_char + "hoge123"] for valid_char in VALID_CHARS] ) def test_normal(self, value): validate_unprintable(value) @pytest.mark.parametrize(["value"], [["あいうえお"], ["シート"]]) def test_normal_multibyte(self, value): pytest.skip("TODO") validate_unprintable(value) @pytest.mark.parametrize( ["value"], [ ["abc" + invalid_char + "hoge123"] for invalid_char in INVALID_CHARS + unprintable_ascii_chars ], ) def test_exception_invalid_char(self, value): with pytest.raises(ValidationError) as e: validate_unprintable(value) assert e.value.reason == ErrorReason.INVALID_CHARACTER
<filename>GearBot/Util/Actions.py<gh_stars>10-100 from discord import Member from Util import Translator, MessageUtils, Utils, Emoji class ActionFailed(Exception): def __init__(self, message) -> None: super().__init__() self.message = message async def act(ctx, name, target, handler, allow_bots=True, require_on_server=True, send_message=True, check_bot_ability=True, **kwargs): user = await Utils.get_member(ctx.bot, ctx.guild, target) if user is None: if require_on_server: message = Translator.translate('user_not_on_server', ctx.guild.id) if send_message: await ctx.send(f"{Emoji.get_chat_emoji('NO')} {message}") return False, message else: user = ctx.bot.get_user(target) if user is None and not require_on_server: user = await Utils.get_user(target) if user is None: return False, "Unknown user" allowed, message = can_act(name, ctx, user, require_on_server=require_on_server, action_bot=allow_bots, check_bot_ability=check_bot_ability) if allowed: try: await handler(ctx, user, **kwargs) return True, None except ActionFailed as ex: return False, ex.message else: if send_message: await ctx.send(f"{Emoji.get_chat_emoji('NO')} {message}") return False, message async def mass_action(ctx, name, targets, handler, allow_duplicates=False, allow_bots=True, max_targets=None, require_on_server=True, **kwargs): if max_targets is not None and len(targets) > max_targets: await MessageUtils.send_to(ctx, "NO", "mass_action_too_many_people", max=max_targets) return failed = [] handled = set() if kwargs["dm_action"] and len(targets) > 5: await MessageUtils.send_to(ctx, "NO", "mass_action_too_many_people_dm", max=5) kwargs["dm_action"]=False for target in targets: if not allow_duplicates and target in handled: failed.append(f"{target}: {Translator.translate('mass_action_duplicate', ctx)}") else: done, error = await act(ctx, name, target, handler, allow_bots, require_on_server=require_on_server, send_message=False, **kwargs) if not done: failed.append(f"{target}: {error}") else: handled.add(target) return failed def can_act(action, ctx, user, require_on_server=True, action_bot=True, check_bot_ability=True): is_member = isinstance(user, Member) if not require_on_server and not is_member: return True, None if (not is_member) and require_on_server: return False, Translator.translate("user_not_on_server", ctx.guild.id) if check_bot_ability and user.top_role >= ctx.guild.me.top_role: return False, Translator.translate(f'{action}_unable', ctx.guild.id, user=Utils.clean_user(user)) if ((ctx.author != user and ctx.author.top_role > user.top_role) or ( ctx.guild.owner == ctx.author)) and user != ctx.guild.owner and user != ctx.bot.user and ctx.author != user: return True, None if user.bot and not action_bot: return False, Translator.translate(f"cant_{action}_bot", ctx.guild.id, user=user) return False, Translator.translate(f'{action}_not_allowed', ctx.guild.id, user=user)
<gh_stars>1-10 import random # stolen from https://github.com/arizonatribe/word-generator class Words: words = { "nouns": [ "aardvark", "aardwolf", "ability", "abroad", "abuse", "accentor", "access", "accident", "account", "act", "action", "active", "activity", "actor", "ad", "addax", "addition", "address", "administration", "adult", "advance", "advantage", "advertising", "advice", "affair", "affect", "african buffalo", "african wild ass", "african wild dog", "afternoon", "agama", "age", "agency", "agent", "agouti", "agreement", "air", "airline", "airport", "alarm", "albatross", "alcohol", "alligator", "alpaca", "alternative", "ambition", "american black bear", "american sparrow", "amount", "amur leopard", "anaconda", "analysis", "analyst", "andean mountain cat", "anemone", "angelfish", "anger", "angle", "anhinga", "animal", "annual", "anoa", "anole", "answer", "ant", "anteater", "antelope", "anxiety", "anybody", "anything", "anywhere", "apartment", "appeal", "appearance", "apple", "application", "appointment", "archerfish", "arctic fox", "area", "argument", "arm", "armadillo", "army", "arowana", "arrival", "art", "article", "asian black bear", "aside", "ask", "aspect", "assignment", "assist", "assistance", "assistant", "associate", "association", "assumption", "atmosphere", "attack", "attempt", "attention", "attitude", "audience", "auk", "author", "average", "avocet", "award", "awareness", "axolotl", "aye aye", "babirusa", "baboon", "baby", "back", "background", "bad", "badger", "bag", "bake", "balance", "bald eagle", "ball", "band", "bandicoot", "bank", "banteng", "bar", "barbet", "base", "baseball", "basilisk", "basis", "basket", "bat", "bat", "batfish", "bath", "bathroom", "battle", "beach", "bear", "bear", "bearded dragon", "beat", "beautiful", "beaver", "bed", "bed bug", "bedroom", "bee", "bee eater", "beer", "beetle", "beginning", "being", "bell", "belt", "bench", "bend", "benefit", "bet", "betta", "bettong", "beyond", "bicycle", "bid", "big", "bigeyes", "bike", "bilby", "bill", "binturong", "bird", "bird", "bird of paradise", "birth", "birthday", "bison", "bit", "bite", "bitter", "bitterling", "bittern", "black", "black footed cat", "black footed ferret", "blackdevil", "blame", "blank", "blind", "block", "blood", "blow", "blue", "blue sheep", "blue whale", "bluebird", "boa", "board", "boat", "bobcat", "body", "bone", "bongo", "bonus", "booby", "book", "boot", "border", "boss", "bother", "bottle", "bottom", "bowerbird", "bowl", "box", "boy", "boyfriend", "brain", "branch", "brave", "bread", "break", "breakfast", "breast", "breath", "brick", "bridge", "brief", "brilliant", "broad", "broadbill", "brother", "brown", "brown bear", "brush", "buddy", "budget", "budgie", "bug", "building", "bulbul", "bull", "bullfrog", "bunch", "bunny", "bunting", "burn", "bus", "bush dog", "bushbaby", "bushshrike", "business", "bustard", "butterfly", "button", "buy", "buyer", "buzzard", "cabinet", "cable", "caecilian", "cake", "calendar", "call", "calm", "camel", "camera", "camp", "campaign", "can", "cancel", "cancer", "candidate", "candle", "candy", "cap", "capital", "capybara", "car", "caracal", "caracara", "card", "cardinal", "cardinalfish", "care", "career", "caribou", "carp", "carpet", "carry", "case", "cash", "cassowary", "cat", "cat", "catch", "category", "caterpillar", "catfish", "cattle", "cause", "cavy", "celebration", "cell", "centipede", "chain", "chair", "challenge", "chameleon", "chamois", "champion", "championship", "chance", "change", "channel", "chapter", "character", "charge", "charity", "chart", "chat", "check", "cheek", "cheetah", "chemical", "chemistry", "chest", "chicken", "chicken", "child", "childhood", "chimaera", "chimpanzee", "chinchilla", "chip", "chipmunk", "chocolate", "choice", "chough", "chuckwalla", "church", "cicada", "cichlid", "cigarette", "city", "civet", "claim", "clam", "class", "classic", "classroom", "clerk", "click", "client", "climate", "climbing mouse", "climbing perch", "clock", "closet", "clothes", "cloud", "clouded leopard", "clownfish", "club", "clue", "coach", "coast", "coat", "coati", "cobra", "cockatiel", "cockatoo", "cockroach", "code", "coffee", "cold", "collar", "collection", "college", "colugo", "combination", "combine", "comfort", "comfortable", "command", "comment", "commercial", "commission", "committee", "common", "common genet", "communication", "community", "company", "comparison", "competition", "complaint", "complex", "computer", "concentrate", "concept", "concern", "concert", "conch", "conclusion", "condition", "conference", "confidence", "conflict", "confusion", "connection", "consequence", "consideration", "consist", "constant", "construction", "contest", "context", "contract", "contribution", "control", "conversation", "convert", "cook", "cookie", "coot", "copperhead", "copy", "cormorant", "corner", "cost", "cotinga", "cotton rat", "cougar", "count", "counter", "country", "county", "couple", "courage", "course", "courser", "court", "cousin", "cover", "cow", "cow", "coyote", "coypu", "crab", "crack", "craft", "crane", "crane fly", "crash", "crayfish", "crazy", "cream", "creative", "credit", "crew", "cricket", "criticism", "crocodile", "cross", "crow", "cry", "cuckoo", "culpeo", "culture", "cup", "curlew", "currency", "current", "curve", "cuscus", "customer", "cut", "cuttlefish", "cycle", "damage", "dance", "dare", "dark", "dartfish", "dassie rat", "data", "database", "date", "daughter", "day", "dead", "deal", "dealer", "dear", "death", "death adder", "debate", "debt", "decision", "deep", "deer", "deer mouse", "definition", "degree", "degu", "delay", "delivery", "demand", "department", "departure", "dependent", "deposit", "depression", "depth", "description", "design", "designer", "desire", "desk", "detail", "development", "device", "devil", "dhole", "diamond", "dibbler", "diet", "difference", "difficulty", "dig", "dik dik", "dikkop", "dimension", "dimetrodon", "dingo", "dinner", "dinosaur", "dipper", "direction", "director", "dirt", "disaster", "discipline", "discount", "discus", "discussion", "disease", "dish", "disk", "display", "distance", "distribution", "district", "divide", "doctor", "document", "dodo", "dog", "dog", "dolphin", "donkey", "door", "dormouse", "dot", "double", "doubt", "dove", "draft", "drag", "dragonfly", "drama", "draw", "drawer", "drawing", "dream", "dress", "drink", "drive", "driver", "drongo", "drop", "drunk", "duck", "due", "dugong", "duiker", "dump", "dunnart", "dust", "duty", "eagle", "eagle ray", "ear", "earth", "ease", "east", "eat", "echidna", "economics", "economy", "edge", "editor", "education", "eel", "effect", "effective", "efficiency", "effort", "egg", "egret", "eider", "election", "electric eel", "electric ray", "elephant", "elephant bird", "elevator", "elk", "emergency", "emotion", "emphasis", "employ", "employee", "employer", "employment", "emu", "end", "energy", "engine", "engineer", "engineering", "entertainment", "enthusiasm", "entrance", "entry", "environment", "equal", "equipment", "equivalent", "ermine", "error", "escape", "essay", "establishment", "estate", "estimate", "evening", "event", "evidence", "exam", "examination", "example", "exchange", "excitement", "excuse", "exercise", "exit", "experience", "expert", "explanation", "expression", "extension", "extent", "external", "extreme", "eye", "face", "fact", "factor", "fail", "failure", "falcon", "fall", "familiar", "family", "fan", "farm", "farmer", "fat", "father", "fault", "fear", "feature", "fee", "feed", "feedback", "feel", "feeling", "female", "fennec fox", "ferret", "few", "field", "fight", "figure", "file", "fill", "film", "final", "finance", "finch", "finding", "finger", "finish", "fire", "fish", "fish", "fisher", "fishing", "fishing cat", "fix", "flamingo", "flat headed cat", "flea", "flight", "floor", "flow", "flower", "flowerpecker", "fly", "fly", "flying fish", "flying frog", "focus", "fold", "following", "food", "foot", "football", "force", "forever", "form", "formal", "fortune", "fossa", "foundation", "fox", "frame", "freedom", "friend", "friendship", "frigatebird", "frog", "frogmouth", "front", "fruit", "fuel", "fulmar", "fun", "function", "funeral", "funny", "future", "gain", "galago", "gallinule", "game", "gannet", "gap", "gar", "garage", "garbage", "garden", "garter snake", "gas", "gate", "gather", "gaur", "gazelle", "gear", "gecko", "gene", "general", "geoffroy's cat", "gerbil", "gerenuk", "giant panda", "giant tortoise", "gibbon", "gift", "gila monster", "giraffe", "girl", "girlfriend", "give", "glad", "glass", "glove", "gnu", "go", "goal", "goat", "goatfish", "god", "gold", "goldfish", "golf", "good", "goose", "gopher", "goral", "gorilla", "gourami", "government", "grab", "grackle", "grade", "grand", "grandfather", "grandmother", "grass", "grasshopper", "gray wolf", "great", "greater glider", "grebe", "green", "green iguana", "grison", "grizzly bear", "grocery", "ground", "groundhog", "group", "grouse", "growth", "guanaco", "guarantee", "guard", "guess", "guest", "guidance", "guide", "guinea pig", "guitar", "gull", "gundi", "guy", "habit", "hair", "half", "hall", "hamster", "hand", "handle", "hang", "harm", "harrier", "hartebeest", "hat", "hate", "hawaiian honeycreeper", "hawk", "head", "health", "hearing", "heart", "heat", "heavy", "hedgehog", "height", "hell", "hello", "helmetshrike", "help", "hermit crab", "heron", "hide", "high", "highlight", "highway", "himalayan tahr", "hippopotamus", "hire", "hissing cockroach", "historian", "history", "hit", "hold", "hole", "holiday", "home", "homework", "honey", "honeyeater", "hook", "hope", "hornbill", "hornet", "horror", "horse", "horse", "hospital", "host", "hotel", "hour", "house", "housing", "hoverfly", "human", "hummingbird", "hunt", "hurry", "hurt", "husband", "hutia", "hyena", "hyrax", "iberian lynx", "ibex", "ibis", "ice", "icterid", "idea", "ideal", "if", "iguana", "illegal", "image", "imagination", "impact", "impala", "implement", "importance", "impress", "impression", "improvement", "incident", "income", "increase", "independence", "independent", "indication", "individual", "industry", "inevitable", "inflation", "influence", "information", "initial", "initiative", "injury", "insect", "insect", "inside", "inspection", "inspector", "instance", "instruction", "insurance", "intention", "interaction", "interest", "internal", "international", "internet", "interview", "introduction", "investment", "invite", "iron", "island", "issue", "it", "item", "jacana", "jack", "jackal", "jacket", "jaguar", "jaguarundi", "jay", "jellyfish", "jerboa", "job", "join", "joint", "joke", "judge", "judgment", "juice", "jump", "jungle cat", "junior", "jury", "kangaroo", "kangaroo rat", "keep", "kerodon", "kestrel", "key", "kick", "kid", "kill", "kind", "king", "king cobra", "kingbird", "kingfisher", "kinkajou", "kiss", "kitchen", "kite", "kitten", "kiwi", "klipspringer", "knee", "knife", "knifefish", "knowledge", "koala", "kodiak bear", "kodkod", "koi", "<NAME>", "kookaburra", "kowari", "kudu", "kultarr", "lab", "lack", "ladder", "lady", "ladybug", "lake", "lamb", "lamprey", "land", "landscape", "language", "lapwing", "laugh", "law", "lawyer", "lay", "layer", "lead", "leader", "leadership", "leading", "league", "leather", "leave", "lecture", "leech", "leg", "lemming", "lemur", "length", "leopard", "lesson", "let", "letter", "level", "library", "lie", "life", "lift", "liger", "light", "limit", "line", "link", "lion", "lionfish", "lip", "list", "listen", "literature", "living", "lizard", "llama", "loach", "load", "loan", "lobster", "local", "location", "lock", "log", "long", "long tailed tit", "longspur", "look", "loon", "loris", "lory", "loss", "love", "lovebird", "low", "luck", "lunch", "lynx", "lyrebird", "macaw", "machine", "magazine", "mail", "main", "maintenance", "major", "make", "male", "mall", "mallard", "mamba", "mammoth", "man", "management", "manager", "manakin", "manatee", "mandrill", "manner", "<NAME>", "<NAME>", "manufacturer", "many", "map", "mara", "march", "margay", "<NAME>", "<NAME>", "mark", "market", "marketing", "markhor", "marlin", "marmot", "marriage", "<NAME>", "marten", "master", "mastodon", "match", "mate", "material", "math", "matter", "maximum", "maybe", "meadowlark", "meal", "meaning", "measurement", "meat", "media", "medicine", "medium", "meerkat", "meet", "meeting", "megaloceros", "megapode", "member", "membership", "memory", "mention", "menu", "mess", "message", "metal", "method", "middle", "midnight", "might", "milk", "millipede", "mind", "mine", "miniature horse", "minimum", "mink", "minnow", "minor", "minute", "mirror", "miss", "mission", "mistake", "mix", "mixture", "mobile", "mockingbird", "mode", "model", "mole", "mole rat", "mom", "moment", "money", "mongoose", "monitor", "monitor lizard", "monkey", "month", "mood", "moorhen", "moose", "moray eel", "morning", "mortgage", "mosasaur", "mosquito", "most", "moth", "mother", "motmot", "motor", "mountain", "mountain goat", "mountain lion", "mouse", "mouse", "mouse deer", "mousebird", "mouth", "move", "movie", "mud", "mudpuppy", "mudskipper", "mullet", "muntjac", "muscle", "music", "muskox", "muskrat", "musky rat kangaroo", "nail", "naked mole rat", "name", "narwhal", "nasty", "nation", "national", "native", "natural", "nature", "nautilus", "neat", "necessary", "neck", "needlefish", "negative", "negotiation", "nerve", "net", "network", "news", "newspaper", "newt", "night", "nighthawk", "nightjar", "nobody", "noise", "normal", "north", "nose", "note", "nothing", "notice", "novel", "numbat", "number", "nurse", "nuthatch", "nutria", "object", "objective", "obligation", "occasion", "ocelot", "octopus", "offer", "office", "officer", "official", "oil", "okapi", "old world babbler", "old world flycatcher", "olingo", "onager", "opening", "operation", "opinion", "opossum", "opportunity", "opposite", "option", "orange", "orangutan", "orca", "order", "ordinary", "organization", "original", "oriole", "oryx", "osprey", "ostrich", "other", "otter", "outcome", "outside", "oven", "ovenbird", "owl", "owner", "oyster", "paca", "pace", "pack", "package", "paddlefish", "pademelon", "page", "pain", "paint", "painting", "pair", "pallas's cat", "panda", "pangolin", "panic", "panther", "paper", "parakeet", "parent", "park", "parking", "parrot", "parrotfish", "part", "particular", "partner", "party", "pass", "passage", "passenger", "passenger pigeon", "passion", "past", "path", "patience", "patient", "pattern", "pause", "pay", "payment", "peace", "peacock", "peafowl", "peak", "peccary", "pelican", "pen", "penalty", "penguin", "pension", "people", "percentage", "perception", "performance", "period", "permission", "permit", "person", "personal", "personality", "perspective", "phase", "pheasant", "philosophy", "phone", "photo", "phrase", "physical", "physics", "piano", "pick", "picture", "pie", "piece", "pig", "pigeon", "pika", "pike", "pin", "pipe", "piranha", "pitch", "pitohui", "pizza", "pizzly bear", "place", "plan", "plane", "plant", "plastic", "plate", "platform", "platypus", "play", "player", "pleasure", "plenty", "plover", "pocket gopher", "poem", "poet", "poetry", "pogona", "point", "poison dart frog", "polar bear", "police", "policy", "politics", "pollution", "pony", "pool", "pop", "population", "porcupine", "porpoise", "position", "positive", "possession", "possibility", "possible", "possum", "post", "pot", "potato", "potential", "potoo", "potoroo", "potto", "pouched rat", "pound", "power", "practice", "prairie dog", "prawn", "praying mantis", "preference", "preparation", "presence", "present", "presentation", "president", "press", "pressure", "price", "pride", "priest", "primary", "principle", "print", "prior", "priority", "private", "prize", "problem", "procedure", "process", "produce", "product", "profession", "professional", "professor", "profile", "profit", "program", "progress", "project", "promise", "promotion", "prompt", "pronghorn", "proof", "property", "proposal", "protection", "przewalski's horse", "psychology", "ptarmigan", "pterosaur", "public", "pudu", "puff adder", "puffer fish", "puffin", "pull", "puma", "punch", "puppy", "purchase", "purple", "purpose", "push", "put", "python", "qinling panda", "quagga", "quail", "quality", "quantity", "quarter", "queen", "question", "quetzal", "quiet", "quit", "quokka", "quoll", "quote", "rabbit", "raccoon", "raccoon dog", "race", "radio", "rail", "rain", "rainbowfish", "raise", "range", "rat", "rate", "ratio", "rattlesnake", "raven", "raw", "reach", "reaction", "read", "reading", "reality", "reason", "reception", "recipe", "recognition", "recommendation", "record", "recording", "recover", "red", "red panda", "red river hog", "reference", "reflection", "refrigerator", "refuse", "region", "register", "regret", "regular", "reindeer", "relation", "relationship", "relative", "release", "relief", "remote", "remove", "rent", "repair", "repeat", "replacement", "reply", "report", "representative", "republic", "reputation", "request", "requirement", "research", "reserve", "resident", "resist", "resolution", "resolve", "resort", "resource", "respect", "respond", "response", "responsibility", "rest", "restaurant", "result", "return", "reveal", "revenue", "review", "revolution", "reward", "rhea", "rhinoceros", "rice", "rich", "ride", "ring", "ringtail", "rip", "rise", "risk", "river", "river dolphin", "road", "roadrunner", "robin", "rock", "rock hyrax", "rockfish", "role", "roll", "roller", "roof", "rook", "room", "rope", "rough", "round", "routine", "row", "royal", "rub", "rufous rat kangaroo", "ruin", "rule", "run", "rush", "saber toothed cat", "sad", "safe", "safety", "sail", "sailfish", "salad", "salamander", "salary", "sale", "salmon", "salt", "sample", "sand", "sand cat", "sandgrouse", "sandwich", "satisfaction", "save", "savings", "sawfish", "scale", "scene", "schedule", "scheme", "school", "science", "score", "scorpion", "scratch", "screen", "screw", "script", "sea", "sea anemone", "sea cucumber", "sea duck", "sea gull", "sea lion", "sea otter", "sea snake", "seadragon", "seahorse", "seal", "search", "season", "seat", "second", "secret", "secretary", "section", "sector", "security", "selection", "self", "sell", "senior", "sense", "sensitive", "sentence", "series", "serval", "serve", "service", "session", "set", "setting", "sex", "shake", "shame", "shape", "share", "shark", "she", "shearwater", "sheep", "shelduck", "shelter", "shift", "shine", "ship", "shirt", "shock", "shoe", "shoebill", "shoot", "shop", "shopping", "shot", "shoulder", "show", "shower", "shrew", "shrimp", "sick", "side", "sign", "signal", "signature", "significance", "silly", "silver", "simple", "sing", "singer", "single", "sink", "sir", "sister", "site", "situation", "size", "skate", "skill", "skimmer", "skin", "skink", "skirt", "skua", "skunk", "sky", "sleep", "slice", "slide", "slip", "sloth", "sloth bear", "slow loris", "slug", "smell", "smile", "smoke", "snail", "snake", "snipe", "snow", "snow leopard", "society", "sock", "soft", "software", "soil", "solenodon", "solid", "solution", "somewhere", "son", "song", "songbird", "sort", "sound", "soup", "source", "south", "space", "spare", "sparrow", "speaker", "special", "specialist", "specific", "spectacled bear", "speech", "speed", "spell", "spend", "spider", "spiny lobster", "spiny mouse", "spiny rat", "spirit", "spiritual", "spite", "split", "sponge", "spoonbill", "sport", "spot", "spray", "spread", "spring", "springhare", "square", "squid", "squirrel", "squirrel glider", "stable", "staff", "stage", "stand", "standard", "star", "starfish", "starling", "start", "state", "statement", "station", "status", "stay", "steak", "steal", "step", "steppe lemming", "stick", "stick bug", "still", "stingray", "stoat", "stock", "stomach", "stone curlew", "stonefish", "stop", "storage", "store", "stork", "storm", "story", "strain", "stranger", "strategy", "street", "strength", "stress", "stretch", "strike", "string", "strip", "stroke", "structure", "struggle", "student", "studio", "study", "stuff", "stupid", "sturgeon", "style", "subject", "substance", "success", "suck", "sugar", "suggestion", "suit", "summer", "sun", "sun bear", "sunbird", "supermarket", "support", "surgeonfish", "surgery", "surprise", "surround", "survey", "suspect", "swallow", "swamphen", "swan", "sweet", "swift", "swim", "swimming", "swing", "switch", "swordfish", "sympathy", "system", "t rex", "table", "tackle", "tadpole", "takin", "tale", "talk", "tamandua", "tamarin", "tanager", "tank", "tap", "tapaculo", "tapir", "tarantula", "target", "tarpon", "tarsier", "task", "<NAME>", "<NAME>", "taste", "tax", "tayra", "tea", "teach", "teacher", "teaching", "team", "tear", "technology", "telephone", "television", "tell", "temperature", "temporary", "tennis", "tenrec", "tension", "term", "termite", "tern", "test", "tetra", "text", "thanks", "theme", "theory", "thick knee", "thing", "thornbill", "thought", "thrasher", "throat", "thrush", "ticket", "tie", "tiger", "till", "time", "tip", "tit", "title", "toad", "toadfish", "today", "toe", "tomorrow", "tone", "tongue", "tonight", "tool", "tooth", "top", "topic", "tortoise", "total", "toucan", "touch", "tough", "tour", "tourist", "towel", "tower", "town", "track", "trade", "tradition", "traffic", "train", "trainer", "training", "transition", "transportation", "trash", "travel", "treat", "tree", "treeshrew", "trick", "trip", "trogon", "trouble", "trout", "truck", "trumpeter", "trust", "truth", "try", "tuatara", "tuna", "tune", "turaco", "turkey", "turn", "turtle", "twist", "type", "tyrant flycatcher", "uncle", "understanding", "union", "unique", "unit", "university", "upper", "upstairs", "urchin", "use", "user", "usual", "vacation", "valuable", "value", "vanga", "vaquita", "variation", "variety", "vast", "vegetable", "vehicle", "version", "vicuna", "video", "view", "village", "viper", "virus", "viscacha", "visit", "visual", "voice", "vole", "volume", "vulture", "wader", "wagtail", "wait", "wake", "walk", "wall", "wallaby", "walleye", "walrus", "wapiti", "war", "warbler", "warning", "warthog", "wash", "wasp", "watch", "water", "water buffalo", "wave", "waxwing", "way", "weakness", "wealth", "wear", "weasel", "weather", "weaver", "weaver finch", "web", "wedding", "week", "weekend", "weight", "weird", "welcome", "west", "western", "whale", "wheel", "whereas", "while", "whistler", "whistling duck", "white", "white eye", "whole", "whydah", "widow spider", "wife", "wigeon", "wildcat", "wildebeest", "will", "win", "wind", "window", "wine", "wing", "winner", "winter", "wish", "witness", "wolf", "wolverine", "woman", "wombat", "wonder", "wongai ningaui", "wood", "woodchuck", "woodcock", "woodpecker", "woodswallow", "word", "work", "worker", "working", "world", "worm", "worry", "worth", "wrap", "wren", "writer", "writing", "x ray tetra", "x-ray", "xenophobe", "xenopoecilus", "xenops", "xerus", "xylophone", "yak", "yapok", "yard", "year", "yellow", "yellowjacket", "yesterday", "young", "youth", "zebra", "zebra", "zebu", "zone", "zzyzx" ], "adverbs": [ "aboard", "abnormally", "about", "abroad", "absentmindedly", "absolutely", "abundantly", "accidentally", "accordingly", "actively", "actually", "acutely", "admiringly", "affectionately", "affirmatively", "after", "afterwards", "agreeably", "almost", "already", "always", "amazingly", "angrily", "annoyingly", "annually", "anxiously", "anyhow", "anyplace", "anyway", "anywhere", "appreciably", "appropriately", "around", "arrogantly", "aside", "assuredly", "astonishingly", "away", "awfully", "awkwardly", "badly", "barely", "bashfully", "beautifully", "before", "begrudgingly", "believably", "bewilderedly", "bewilderingly", "bitterly", "bleakly", "blindly", "blissfully", "boldly", "boastfully", "boldly", "boyishly", "bravely", "briefly", "brightly", "brilliantly", "briskly", "brutally", "busily", "calmly", "candidly", "carefully", "carelessly", "casually", "cautiously", "certainly", "charmingly", "cheerfully", "chiefly", "childishly", "cleanly", "clearly", "cleverly", "closely", "cloudily", "clumsily", "coaxingly", "coincidentally", "coldly", "colorfully", "commonly", "comfortably", "compactly", "compassionately", "completely", "confusedly", "consequently", "considerably", "considerately", "consistently", "constantly", "continually", "continuously", "coolly", "correctly", "courageously", "covertly", "cowardly", "crazily", "crossly", "cruelly", "cunningly", "curiously", "currently", "customarily", "cutely", "daily", "daintily", "dangerously", "daringly", "darkly", "dastardly", "dearly", "decently", "deeply", "defiantly", "deftly", "deliberately", "delicately", "delightfully", "densely", "diagonally", "differently", "diligently", "dimly", "directly", "disorderly", "divisively", "docilely", "dopily", "doubtfully", "down", "dramatically", "dreamily", "during", "eagerly", "early", "earnestly", "easily", "efficiently", "effortlessly", "elaborately", "eloquently", "elegantly", "elsewhere", "emotionally", "endlessly", "energetically", "enjoyably", "enormously", "enough", "enthusiastically", "entirely", "equally", "especially", "essentially", "eternally", "ethically", "even", "evenly", "eventually", "evermore", "every", "everywhere", "evidently", "evocatively", "exactly", "exceedingly", "exceptionally", "excitedly", "exclusively", "explicitly", "expressly", "extensively", "externally", "extra", "extraordinarily", "extremely", "fairly", "faithfully", "famously", "far", "fashionably", "fast", "fatally", "favorably", "ferociously", "fervently", "fiercely", "fiery", "finally", "financially", "finitely", "fluently", "fondly", "foolishly", "forever", "formally", "formerly", "fortunately", "forward", "frankly", "frantically", "freely", "frequently", "frenetically", "fully", "furiously", "furthermore", "generally", "generously", "genuinely", "gently", "genuinely", "girlishly", "gladly", "gleefully", "gracefully", "graciously", "gradually", "gratefully", "greatly", "greedily", "grimly", "grudgingly", "habitually", "half-heartedly", "handily", "handsomely", "haphazardly", "happily", "hastily", "harmoniously", "harshly", "hastily", "hatefully", "hauntingly", "healthily", "heartily", "heavily", "helpfully", "hence", "highly", "hitherto", "honestly", "hopelessly", "horizontally", "hourly", "how", "however", "hugely", "humorously", "hungrily", "hurriedly", "hysterically", "icily", "identifiably", "idiotically", "imaginatively", "immeasurably", "immediately", "immensely", "impatiently", "impressively", "inappropriately", "incessantly", "incorrectly", "indeed", "independently", "indoors", "indubitably", "inevitably", "infinitely", "informally", "infrequently", "innocently", "inquisitively", "instantly", "intelligently", "intensely", "intently", "interestingly", "intermittently", "internally", "invariably", "invisibly", "inwardly", "ironically", "irrefutably", "irritably", "jaggedly", "jauntily", "jealously", "jovially", "joyfully", "joylessly", "joyously", "jubilantly", "judgmentally", "just", "justly", "keenly", "kiddingly", "kindheartedly", "kindly", "knavishly", "knottily", "knowingly", "knowledgeably", "kookily", "lastly", "late", "lately", "later", "lazily", "less", "lightly", "likely", "limply", "lithely", "lively", "loftily", "longingly", "loosely", "loudly", "lovingly", "loyally", "luckily", "luxuriously", "madly", "magically", "mainly", "majestically", "markedly", "materially", "meaningfully", "meanly", "meantime", "meanwhile", "measurably", "mechanically", "medically", "menacingly", "merely", "merrily", "methodically", "mightily", "miserably", "mockingly", "monthly", "morally", "more", "moreover", "mortally", "mostly", "much", "mysteriously", "nastily", "naturally", "naughtily", "nearby", "nearly", "neatly", "needily", "negatively", "nervously", "never", "nevertheless", "next", "nicely", "nightly", "noisily", "normally", "nosily", "not", "now", "nowadays", "numbly", "obediently", "obligingly", "obnoxiously", "obviously", "occasionally", "oddly", "offensively", "officially", "often", "ominously", "once", "only", "openly", "optimistically", "orderly", "ordinarily", "outdoors", "outrageously", "outwardly", "outwards", "overconfidently", "overseas", "painfully", "painlessly", "paradoxically", "partially", "particularly", "passionately", "patiently", "perfectly", "periodically", "perpetually", "persistently", "personally", "persuasively", "physically", "plainly", "playfully", "poetically", "poignantly", "politely", "poorly", "positively", "possibly", "potentially", "powerfully", "presently", "presumably", "prettily", "previously", "primly", "principally", "probably", "promptly", "properly", "proudly", "punctually", "puzzlingly", "quaintly", "queasily", "questionably", "questioningly", "quicker", "quickly", "quietly", "quirkily", "quite", "quizzically", "randomly", "rapidly", "rarely", "readily", "really", "reasonably", "reassuringly", "recently", "recklessly", "regularly", "reliably", "reluctantly", "remarkably", "repeatedly", "reproachfully", "reponsibly", "resentfully", "respectably", "respectfully", "restfully", "richly", "ridiculously", "righteously", "rightfully", "rightly", "rigidly", "roughly", "routinely", "rudely", "ruthlessly", "sadly", "safely", "scarcely", "scarily", "scientifically", "searchingly", "secretively", "securely", "sedately", "seemingly", "seldom", "selfishly", "selflessly", "separately", "seriously", "shakily", "shamelessly", "sharply", "sheepishly", "shoddily", "shortly", "shrilly", "significantly", "silently", "simply", "sincerely", "singularly", "shyly", "skillfully", "sleepily", "slightly", "slowly", "slyly", "smoothly", "so", "softly", "solely", "solemnly", "solidly", "silicitiously", "somehow", "sometimes", "somewhat", "somewhere", "soon", "specially", "specifically", "spectacularly", "speedily", "spiritually", "splendidly", "sporadically", "spasmodically", "startlingly", "steadily", "stealthily", "sternly", "still", "strenuously", "stressfully", "strictly", "structurally", "studiously", "stupidly", "stylishly", "subsequently", "substantially", "subtly", "successfully", "suddenly", "sufficiently", "suitably", "superficially", "supremely", "surely", "surprisingly", "suspiciously", "sweetly", "swiftly", "sympathetically", "systematically", "temporarily", "tenderly", "tensely", "tepidly", "terribly", "thankfully", "then", "there", "thereby", "thoroughly", "thoughtfully", "thus", "tightly", "today", "together", "tomorrow", "too", "totally", "touchingly", "tremendously", "truly", "truthfully", "twice", "ultimately", "unabashedly", "unanimously", "unbearably", "unbelievably", "unemotionally", "unethically", "unexpectedly", "unfailingly", "unfavorably", "unfortunately", "uniformly", "unilaterally", "unimpressively", "universally", "unnaturally", "unnecessarily", "unquestionably", "unwillingly", "up", "upbeat", "unkindly", "upliftingly", "upright", "unselfishly", "upside-down", "unskillfully", "upward", "upwardly", "urgently", "usefully", "uselessly", "usually", "utterly", "vacantly", "vaguely", "vainly", "valiantly", "vastly", "verbally", "vertically", "very", "viciously", "victoriously", "vigilantly", "vigorously", "violently", "visibly", "visually", "vivaciously", "voluntarily", "warmly", "weakly", "wearily", "weekly", "well", "wetly", "when", "where", "while", "whole-heartedly", "wholly", "why", "wickedly", "widely", "wiggly", "wildly", "willfully", "willingly", "wisely", "woefully", "wonderfully", "worriedly", "worthily", "wrongly", "yearly", "yearningly", "yesterday", "yet", "youthfully", "zanily", "zealously", "zestfully", "zestily" ], "adjectives": [ "abandoned", "able", "absolute", "adorable", "adventurous", "academic", "acceptable", "acclaimed", "accomplished", "accurate", "aching", "acidic", "acrobatic", "active", "actual", "adept", "admirable", "admired", "adolescent", "adorable", "adored", "advanced", "afraid", "affectionate", "aged", "aggravating", "aggressive", "agile", "agitated", "agonizing", "agreeable", "ajar", "alarmed", "alarming", "alert", "alienated", "alive", "all", "altruistic", "amazing", "ambitious", "ample", "amused", "amusing", "anchored", "ancient", "angelic", "angry", "anguished", "animated", "annual", "another", "antique", "anxious", "any", "apprehensive", "appropriate", "apt", "arctic", "arid", "aromatic", "artistic", "ashamed", "assured", "astonishing", "athletic", "attached", "attentive", "attractive", "austere", "authentic", "authorized", "automatic", "avaricious", "average", "aware", "awesome", "awful", "awkward", "babyish", "bad", "back", "baggy", "bare", "barren", "basic", "beautiful", "belated", "beloved", "beneficial", "better", "best", "bewitched", "big", "big-hearted", "biodegradable", "bite-sized", "bitter", "black", "black-and-white", "bland", "blank", "blaring", "bleak", "blind", "blissful", "blond", "blue", "blushing", "bogus", "boiling", "bold", "bony", "boring", "bossy", "both", "bouncy", "bountiful", "bowed", "brave", "breakable", "brief", "bright", "brilliant", "brisk", "broken", "bronze", "brown", "bruised", "bubbly", "bulky", "bumpy", "buoyant", "burdensome", "burly", "bustling", "busy", "buttery", "buzzing", "calculating", "calm", "candid", "canine", "capital", "carefree", "careful", "careless", "caring", "cautious", "cavernous", "celebrated", "charming", "cheap", "cheerful", "cheery", "chief", "chilly", "chubby", "circular", "classic", "clean", "clear", "clear-cut", "clever", "close", "closed", "cloudy", "clueless", "clumsy", "cluttered", "coarse", "cold", "colorful", "colorless", "colossal", "comfortable", "common", "compassionate", "competent", "complete", "complex", "complicated", "composed", "concerned", "concrete", "confused", "conscious", "considerate", "constant", "content", "conventional", "cooked", "cool", "cooperative", "coordinated", "corny", "corrupt", "costly", "courageous", "courteous", "crafty", "crazy", "creamy", "creative", "creepy", "criminal", "crisp", "critical", "crooked", "crowded", "cruel", "crushing", "cuddly", "cultivated", "cultured", "cumbersome", "curly", "curvy", "cute", "cylindrical", "damaged", "damp", "dangerous", "dapper", "daring", "darling", "dark", "dazzling", "dead", "deadly", "deafening", "dear", "dearest", "decent", "decimal", "decisive", "deep", "defenseless", "defensive", "defiant", "deficient", "definite", "definitive", "delayed", "delectable", "delicious", "delightful", "delirious", "demanding", "dense", "dental", "dependable", "dependent", "descriptive", "deserted", "detailed", "determined", "devoted", "different", "difficult", "digital", "diligent", "dim", "dimpled", "dimwitted", "direct", "disastrous", "discrete", "disfigured", "disgusting", "disloyal", "dismal", "distant", "downright", "dreary", "dirty", "disguised", "dishonest", "dismal", "distant", "distinct", "distorted", "dizzy", "dopey", "doting", "double", "downright", "drab", "drafty", "dramatic", "dreary", "droopy", "dry", "dual", "dull", "dutiful", "each", "eager", "earnest", "early", "easy", "easy-going", "ecstatic", "edible", "educated", "elaborate", "elastic", "elated", "elderly", "electric", "elegant", "elementary", "elliptical", "embarrassed", "embellished", "eminent", "emotional", "empty", "enchanted", "enchanting", "energetic", "enlightened", "enormous", "enraged", "entire", "envious", "equal", "equatorial", "essential", "esteemed", "ethical", "euphoric", "even", "evergreen", "everlasting", "every", "evil", "exalted", "excellent", "exemplary", "exhausted", "excitable", "excited", "exciting", "exotic", "expensive", "experienced", "expert", "extraneous", "extroverted", "extra-large", "extra-small", "fabulous", "failing", "faint", "fair", "faithful", "fake", "false", "familiar", "famous", "fancy", "fantastic", "far", "faraway", "far-flung", "far-off", "fast", "fat", "fatal", "fatherly", "favorable", "favorite", "fearful", "fearless", "feisty", "feline", "female", "feminine", "few", "fickle", "filthy", "fine", "finished", "firm", "first", "firsthand", "fitting", "fixed", "flaky", "flamboyant", "flashy", "flat", "flawed", "flawless", "flickering", "flimsy", "flippant", "flowery", "fluffy", "fluid", "flustered", "focused", "fond", "foolhardy", "foolish", "forceful", "forked", "formal", "forsaken", "forthright", "fortunate", "fragrant", "frail", "frank", "frayed", "free", "French", "fresh", "frequent", "friendly", "frightened", "frightening", "frigid", "frilly", "frizzy", "frivolous", "front", "frosty", "frozen", "frugal", "fruitful", "full", "fumbling", "functional", "funny", "fussy", "fuzzy", "gargantuan", "gaseous", "general", "generous", "gentle", "genuine", "giant", "giddy", "gigantic", "gifted", "giving", "glamorous", "glaring", "glass", "gleaming", "gleeful", "glistening", "glittering", "gloomy", "glorious", "glossy", "glum", "golden", "good", "good-natured", "gorgeous", "graceful", "gracious", "grand", "grandiose", "granular", "grateful", "grave", "gray", "great", "greedy", "green", "gregarious", "grim", "grimy", "gripping", "grizzled", "gross", "grotesque", "grouchy", "grounded", "growing", "growling", "grown", "grubby", "gruesome", "grumpy", "guilty", "gullible", "gummy", "hairy", "half", "handmade", "handsome", "handy", "happy", "happy-go-lucky", "hard", "hard-to-find", "harmful", "harmless", "harmonious", "harsh", "hasty", "hateful", "haunting", "healthy", "heartfelt", "hearty", "heavenly", "heavy", "hefty", "helpful", "helpless", "hidden", "hideous", "high", "high-level", "hilarious", "hoarse", "hollow", "homely", "honest", "honorable", "honored", "hopeful", "horrible", "hospitable", "hot", "huge", "humble", "humiliating", "humming", "humongous", "hungry", "hurtful", "husky", "icky", "icy", "ideal", "idealistic", "identical", "idle", "idiotic", "idolized", "ignorant", "ill", "illegal", "ill-fated", "ill-informed", "illiterate", "illustrious", "imaginary", "imaginative", "immaculate", "immaterial", "immediate", "immense", "impassioned", "impeccable", "impartial", "imperfect", "imperturbable", "impish", "impolite", "important", "impossible", "impractical", "impressionable", "impressive", "improbable", "impure", "inborn", "incomparable", "incompatible", "incomplete", "inconsequential", "incredible", "indelible", "inexperienced", "indolent", "infamous", "infantile", "infatuated", "inferior", "infinite", "informal", "innocent", "insecure", "insidious", "insignificant", "insistent", "instructive", "insubstantial", "intelligent", "intent", "intentional", "interesting", "internal", "international", "intrepid", "ironclad", "irresponsible", "irritating", "itchy", "jaded", "jagged", "jam-packed", "jaunty", "jealous", "jittery", "joint", "jolly", "jovial", "joyful", "joyous", "jubilant", "judicious", "juicy", "jumbo", "junior", "jumpy", "juvenile", "kaleidoscopic", "keen", "key", "kind", "kindhearted", "kindly", "klutzy", "knobby", "knotty", "knowledgeable", "knowing", "known", "kooky", "kosher", "lame", "lanky", "large", "last", "lasting", "late", "lavish", "lawful", "lazy", "leading", "lean", "leafy", "left", "legal", "legitimate", "light", "lighthearted", "likable", "likely", "limited", "limp", "limping", "linear", "lined", "liquid", "little", "live", "lively", "livid", "loathsome", "lone", "lonely", "long", "long-term", "loose", "lopsided", "lost", "loud", "lovable", "lovely", "loving", "low", "loyal", "lucky", "lumbering", "luminous", "lumpy", "lustrous", "luxurious", "mad", "made-up", "magnificent", "majestic", "major", "male", "mammoth", "married", "marvelous", "masculine", "massive", "mature", "meager", "mealy", "mean", "measly", "meaty", "medical", "mediocre", "medium", "meek", "mellow", "melodic", "memorable", "menacing", "merry", "messy", "metallic", "mild", "milky", "mindless", "miniature", "minor", "minty", "miserable", "miserly", "misguided", "misty", "mixed", "modern", "modest", "moist", "monstrous", "monthly", "monumental", "moral", "mortified", "motherly", "motionless", "mountainous", "muddy", "muffled", "multicolored", "mundane", "murky", "mushy", "musty", "muted", "mysterious", "naive", "narrow", "nasty", "natural", "naughty", "nautical", "near", "neat", "necessary", "needy", "negative", "neglected", "negligible", "neighboring", "nervous", "new", "next", "nice", "nifty", "nimble", "nippy", "nocturnal", "noisy", "nonstop", "normal", "notable", "noted", "noteworthy", "novel", "noxious", "numb", "nutritious", "nutty", "obedient", "obese", "oblong", "oily", "oblong", "obvious", "occasional", "odd", "oddball", "offbeat", "offensive", "official", "old", "old-fashioned", "only", "open", "optimal", "optimistic", "opulent", "orange", "orderly", "organic", "ornate", "ornery", "ordinary", "original", "other", "our", "outlying", "outgoing", "outlandish", "outrageous", "outstanding", "oval", "overcooked", "overdue", "overjoyed", "overlooked", "palatable", "pale", "paltry", "parallel", "parched", "partial", "passionate", "past", "pastel", "peaceful", "peppery", "perfect", "perfumed", "periodic", "perky", "personal", "pertinent", "pesky", "pessimistic", "petty", "phony", "physical", "piercing", "pink", "pitiful", "plain", "plaintive", "plastic", "playful", "pleasant", "pleased", "pleasing", "plump", "plush", "polished", "polite", "political", "pointed", "pointless", "poised", "poor", "popular", "portly", "posh", "positive", "possible", "potable", "powerful", "powerless", "practical", "precious", "present", "prestigious", "pretty", "precious", "previous", "pricey", "prickly", "primary", "prime", "pristine", "private", "prize", "probable", "productive", "profitable", "profuse", "proper", "proud", "prudent", "punctual", "pungent", "puny", "pure", "purple", "pushy", "putrid", "puzzled", "puzzling", "quaint", "qualified", "quarrelsome", "quarterly", "queasy", "querulous", "questionable", "quick", "quick-witted", "quiet", "quintessential", "quirky", "quixotic", "quizzical", "radiant", "ragged", "rapid", "rare", "rash", "raw", "recent", "reckless", "rectangular", "ready", "real", "realistic", "reasonable", "red", "reflecting", "regal", "regular", "reliable", "relieved", "remarkable", "remorseful", "remote", "repentant", "required", "respectful", "responsible", "repulsive", "revolving", "rewarding", "rich", "rigid", "right", "ringed", "ripe", "roasted", "robust", "rosy", "rotating", "rotten", "rough", "round", "rowdy", "royal", "rubbery", "rundown", "ruddy", "rude", "runny", "rural", "rusty", "sad", "safe", "salty", "same", "sandy", "sane", "sarcastic", "sardonic", "satisfied", "scaly", "scarce", "scared", "scary", "scented", "scholarly", "scientific", "scornful", "scratchy", "scrawny", "second", "secondary", "second-hand", "secret", "self-assured", "self-reliant", "selfish", "sentimental", "separate", "serene", "serious", "serpentine", "several", "severe", "shabby", "shadowy", "shady", "shallow", "shameful", "shameless", "sharp", "shimmering", "shiny", "shocked", "shocking", "shoddy", "short", "short-term", "showy", "shrill", "shy", "sick", "silent", "silky", "silly", "silver", "similar", "simple", "simplistic", "sinful", "single", "sizzling", "skeletal", "skinny", "sleepy", "slight", "slim", "slimy", "slippery", "slow", "slushy", "small", "smart", "smoggy", "smooth", "smug", "snappy", "snarling", "sneaky", "sniveling", "snoopy", "sociable", "soft", "soggy", "solid", "somber", "some", "spherical", "sophisticated", "sore", "sorrowful", "soulful", "soupy", "sour", "Spanish", "sparkling", "sparse", "specific", "spectacular", "speedy", "spicy", "spiffy", "spirited", "spiteful", "splendid", "spotless", "spotted", "spry", "square", "squeaky", "squiggly", "stable", "staid", "stained", "stale", "standard", "starchy", "stark", "starry", "steep", "sticky", "stiff", "stimulating", "stingy", "stormy", "straight", "strange", "steel", "strict", "strident", "striking", "striped", "strong", "studious", "stunning", "stupendous", "stupid", "sturdy", "stylish", "subdued", "submissive", "substantial", "subtle", "suburban", "sudden", "sugary", "sunny", "super", "superb", "superficial", "superior", "supportive", "sure-footed", "surprised", "suspicious", "svelte", "sweaty", "sweet", "sweltering", "swift", "sympathetic", "tall", "talkative", "tame", "tan", "tangible", "tart", "tasty", "tattered", "taut", "tedious", "teeming", "tempting", "tender", "tense", "tepid", "terrible", "terrific", "testy", "thankful", "that", "these", "thick", "thin", "third", "thirsty", "this", "thorough", "thorny", "those", "thoughtful", "threadbare", "thrifty", "thunderous", "tidy", "tight", "timely", "tinted", "tiny", "tired", "torn", "total", "tough", "traumatic", "treasured", "tremendous", "tragic", "trained", "tremendous", "triangular", "tricky", "trifling", "trim", "trivial", "troubled", "true", "trusting", "trustworthy", "trusty", "truthful", "tubby", "turbulent", "twin", "ugly", "ultimate", "unacceptable", "unaware", "uncomfortable", "uncommon", "unconscious", "understated", "unequaled", "uneven", "unfinished", "unfit", "unfolded", "unfortunate", "unhappy", "unhealthy", "uniform", "unimportant", "unique", "united", "unkempt", "unknown", "unlawful", "unlined", "unlucky", "unnatural", "unpleasant", "unrealistic", "unripe", "unruly", "unselfish", "unsightly", "unsteady", "unsung", "untidy", "untimely", "untried", "untrue", "unused", "unusual", "unwelcome", "unwieldy", "unwilling", "unwitting", "unwritten", "upbeat", "upright", "upset", "urban", "usable", "used", "useful", "useless", "utilized", "utter", "vacant", "vague", "vain", "valid", "valuable", "vapid", "variable", "vast", "velvety", "venerated", "vengeful", "verifiable", "vibrant", "vicious", "victorious", "vigilant", "vigorous", "villainous", "violet", "violent", "virtual", "virtuous", "visible", "vital", "vivacious", "vivid", "voluminous", "wan", "warlike", "warm", "warmhearted", "warped", "wary", "wasteful", "watchful", "waterlogged", "watery", "wavy", "wealthy", "weak", "weary", "webbed", "wee", "weekly", "weepy", "weighty", "weird", "welcome", "well-documented", "well-groomed", "well-informed", "well-lit", "well-made", "well-off", "well-to-do", "well-worn", "wet", "which", "whimsical", "whirlwind", "whispered", "white", "whole", "whopping", "wicked", "wide", "wide-eyed", "wiggly", "wild", "willing", "wilted", "winding", "windy", "winged", "wiry", "wise", "witty", "wobbly", "woeful", "wonderful", "wooden", "woozy", "wordy", "worldly", "worn", "worried", "worrisome", "worse", "worst", "worthless", "worthwhile", "worthy", "wrathful", "wretched", "writhing", "wrong", "wry", "yawning", "yearly", "yellow", "yellowish", "young", "youthful", "yummy", "zany", "zealous", "zesty", "zigzag" ], "verbs": [ "accept", "ache", "acknowledge", "act", "add", "admire", "admit", "admonish", "advise", "adopt", "affirm", "afford", "agree", "ail", "alert", "allege", "allow", "allude", "amuse", "analyze", "announce", "annoy", "answer", "apologize", "appeal", "appear", "applaud", "appreciate", "approve", "argue", "arrange", "arrest", "arrive", "articulate", "ask", "assert", "assure", "attach", "attack", "attempt", "attend", "attract", "auction", "avoid", "avow", "awake", "babble", "back", "bake", "balance", "balk", "ban", "bang", "bandage", "bar", "bare", "bargain", "bark", "barrage", "barter", "baste", "bat", "bathe", "battle", "bawl", "be", "beam", "bear", "beat", "become", "befriend", "beg", "begin", "behave", "believe", "bellow", "belong", "bend", "berate", "besiege", "bestow", "bet", "bid", "bite", "bleach", "bleed", "bless", "blind", "blink", "blot", "blow", "blurt", "blush", "boast", "bob", "boil", "bolt", "bomb", "book", "bore", "borrow", "bounce", "bow", "box", "brag", "brake", "branch", "brand", "break", "breathe", "breed", "bring", "broadcast", "broil", "bruise", "brush", "bubble", "build", "bump", "burn", "burnish", "bury", "buy", "buzz", "cajole", "calculate", "call", "camp", "care", "carry", "carve", "cause", "caution", "catch", "challenge", "change", "chant", "charge", "chase", "cheat", "check", "cheer", "chew", "chide", "chip", "choke", "chomp", "choose", "chop", "claim", "clap", "clean", "clear", "climb", "clip", "close", "coach", "coil", "collect", "color", "comb", "come", "comfort", "command", "comment", "communicate", "compare", "compete", "complain", "complete", "concede", "concentrate", "concern", "conclude", "concur", "confess", "confide", "confirm", "connect", "consent", "consider", "consist", "contain", "contend", "continue", "cook", "copy", "correct", "cost", "cough", "count", "counter", "cover", "covet", "crack", "crash", "crave", "crawl", "crochet", "cross", "criticize", "croak", "cross-examine", "crowd", "crush", "cry", "cure", "curl", "curse", "curve", "cut", "cycle", "dam", "damage", "dance", "dare", "deal", "debate", "decay", "deceive", "decide", "decipher", "declare", "decorate", "delay", "delight", "deliver", "demand", "deny", "depend", "describe", "desert", "deserve", "desire", "deter", "develop", "dial", "dictate", "die", "dig", "digress", "direct", "disclose", "dislike", "dive", "divide", "divorce", "divulge", "do", "dock", "dole", "dote", "double", "doubt", "drag", "drain", "draw", "dream", "dress", "drip", "drill", "drink", "drive", "drone", "drop", "drown", "dry", "dupe", "dump", "dust", "dye", "earn", "eat", "echo", "edit", "educate", "elope", "embarrass", "emigrate", "emit", "emphasize", "employ", "empty", "enchant", "encode", "encourage", "end", "enjoin", "enjoy", "enter", "entertain", "enunciate", "envy", "equivocate", "escape", "evacuate", "evaporate", "exaggerate", "examine", "excite", "exclaim", "excuse", "exercise", "exist", "expand", "expect", "expel", "exhort", "explain", "explode", "explore", "extend", "extoll", "face", "fade", "fail", "fall", "falter", "fasten", "favor", "fax", "fear", "feed", "feel", "fence", "fetch", "fight", "file", "fill", "film", "find", "fire", "fish", "fit", "fix", "flap", "flash", "flee", "float", "flood", "floss", "flow", "flower", "fly", "fold", "follow", "fool", "force", "foretell", "forget", "forgive", "form", "found", "frame", "freeze", "fret", "frighten", "fry", "fume", "garden", "gasp", "gather", "gaze", "gel", "get", "gild", "give", "glide", "glue", "gnaw", "go", "grab", "grate", "grease", "greet", "grill", "grin", "grip", "groan", "grow", "growl", "grumble", "grunt", "guarantee", "guard", "guess", "guide", "gurgle", "gush", "hail", "hammer", "hand", "handle", "hang", "happen", "harass", "harm", "harness", "hate", "haunt", "have", "head", "heal", "heap", "hear", "heat", "help", "hide", "highlight", "hijack", "hinder", "hint", "hiss", "hit", "hold", "hook", "hoot", "hop", "hope", "hover", "howl", "hug", "hum", "hunt", "hurry", "hurt", "ice", "identify", "ignore", "imagine", "immigrate", "imply", "implore", "impress", "improve", "include", "increase", "infect", "inflate", "influence", "inform", "infuse", "inject", "injure", "inquire", "insist", "inspect", "inspire", "instruct", "intend", "interest", "interfere", "interject", "interrupt", "introduce", "invent", "invest", "invite", "irritate", "iron", "itch", "jab", "jabber", "jail", "jam", "jeer", "jest", "jog", "join", "joke", "jolt", "judge", "juggle", "jump", "keep", "kick", "kill", "kiss", "kneel", "knit", "knock", "knot", "know", "label", "lament", "land", "last", "laugh", "lay", "lead", "lean", "learn", "leave", "lecture", "lend", "let", "level", "license", "lick", "lie", "lift", "light", "lighten", "like", "list", "listen", "live", "load", "loan", "lock", "long", "look", "loosen", "lose", "love", "lower", "mail", "maintain", "make", "man", "manage", "mar", "march", "mark", "marry", "marvel", "mate", "matter", "mean", "measure", "meet", "melt", "memorize", "mend", "mention", "merge", "milk", "mine", "miss", "mix", "moan", "moor", "mourn", "molt", "move", "mow", "mug", "multiply", "mumble", "murder", "mutter", "nag", "nail", "name", "nap", "need", "nest", "nod", "note", "notice", "number", "obey", "object", "observe", "obtain", "occur", "offend", "offer", "ogle", "oil", "omit", "open", "operate", "order", "overflow", "overrun", "owe", "own", "pack", "pad", "paddle", "paint", "pant", "park", "part", "pass", "paste", "pat", "pause", "pay", "peck", "pedal", "peel", "peep", "peer", "peg", "pelt", "perform", "permit", "pester", "pet", "phone", "pick", "pinch", "pine", "place", "plan", "plant", "play", "plead", "please", "pledge", "plow", "plug", "point", "poke", "polish", "ponder", "pop", "possess", "post", "postulate", "pour", "practice", "pray", "preach", "precede", "predict", "prefer", "prepare", "present", "preserve", "press", "pretend", "prevent", "prick", "print", "proceed", "proclaim", "produce", "profess", "program", "promise", "propose", "protect", "protest", "provide", "pry", "pull", "pump", "punch", "puncture", "punish", "push", "put", "question", "quilt", "quit", "quiz", "quote", "race", "radiate", "rain", "raise", "rant", "rain", "rate", "rave", "reach", "realize", "read", "rebuff", "recall", "receive", "recite", "recognize", "recommend", "record", "reduce", "reflect", "refuse", "regret", "reign", "reiterate", "reject", "rejoice", "relate", "relax", "release", "rely", "remain", "remember", "remind", "remove", "repair", "repeat", "replace", "reply", "report", "reprimand", "reproduce", "request", "rescue", "retire", "retort", "return", "reveal", "reverse", "rhyme", "ride", "ring", "rinse", "rise", "risk", "roar", "rob", "rock", "roll", "rot", "row", "rub", "ruin", "rule", "run", "rush", "sack", "sail", "satisfy", "save", "savor", "saw", "say", "scare", "scatter", "scoff", "scold", "scoot", "scorch", "scrape", "scratch", "scream", "screech", "screw", "scribble", "seal", "search", "see", "sell", "send", "sense", "separate", "serve", "set", "settle", "sever", "sew", "shade", "shampoo", "share", "shave", "shelter", "shift", "shiver", "shock", "shoot", "shop", "shout", "show", "shriek", "shrug", "shut", "sigh", "sign", "signal", "sin", "sing", "singe", "sip", "sit", "skate", "skateboard", "sketch", "ski", "skip", "slap", "sleep", "slice", "slide", "slip", "slow", "smash", "smell", "smile", "smoke", "snap", "snarl", "snatch", "sneak", "sneer", "sneeze", "snicker", "sniff", "snore", "snort", "snoop", "snooze", "snow", "soak", "sob", "soothe", "sound", "sow", "span", "spare", "spark", "sparkle", "speak", "speculate", "spell", "spend", "spill", "spin", "spoil", "spot", "spray", "sprout", "sputter", "squash", "squeeze", "stab", "stain", "stammer", "stamp", "stand", "star", "stare", "start", "stash", "state", "stay", "steer", "step", "stipulate", "stir", "stitch", "stop", "store", "strap", "storm", "stow", "strengthen", "stress", "stretch", "strip", "stroke", "stuff", "stutter", "stray", "strum", "strut", "stun", "stunt", "submerge", "succeed", "suffer", "suggest", "suit", "supply", "support", "suppose", "surmise", "surprise", "surround", "suspect", "suspend", "sway", "swear", "swim", "swing", "switch", "swoop", "sympathize", "talk", "take", "tame", "tap", "taste", "taunt", "teach", "tear", "tease", "telephone", "tell", "tempt", "terrify", "test", "testify", "thank", "thaw", "theorize", "think", "threaten", "throw", "thunder", "tick", "tickle", "tie", "time", "tip", "tire", "toast", "toss", "touch", "tour", "tow", "trace", "track", "trade", "train", "translate", "transport", "trap", "travel", "treat", "tremble", "trick", "trickle", "trim", "trip", "trot", "trouble", "trust", "trounce", "try", "tug", "tumble", "turn", "twist", "type", "understand", "undress", "unfasten", "unite", "unlock", "unpack", "uphold", "upset", "upstage", "urge", "untie", "use", "usurp", "utter", "vacuum", "value", "vanish", "vanquish", "venture", "visit", "voice", "volunteer", "vote", "vouch", "wail", "wait", "wake", "walk", "wallow", "wander", "want", "warm", "warn", "wash", "waste", "watch", "water", "wave", "waver", "wear", "weave", "wed", "weigh", "welcome", "whimper", "whine", "whip", "whirl", "whisper", "whistle", "win", "wink", "wipe", "wish", "wobble", "wonder", "work", "worry", "wrap", "wreck", "wrestle", "wriggle", "write", "writhe", "x-ray", "yawn", "yell", "yelp", "yield", "yodel", "zip", "zoom" ] } @staticmethod def sentence(rng=random.Random()): return rng.choice(Words.words['verbs']) + 'ing a ' + \ rng.choice(Words.words['adverbs']) + ' ' + \ rng.choice(Words.words['adjectives']) + ' ' + \ rng.choice(Words.words['nouns'])
<filename>Models_mnist.py import torch import torch.nn.functional as F from MultiOctConv.model import MultiOctaveConv """ MNist classifier with their traditional convolution replaced for M-OctConv input: full: boolean that indicate if the fully conected layer should be added in to de model """ class M_OctConv_MNIST(torch.nn.Module): def __init__(self, full = True): self.full = full super(M_OctConv_MNIST, self).__init__() self.conv1 = MultiOctaveConv( 1, 12, 3, alpha_in=0., alpha_out=0.5, beta_in=0.0,beta_out=0.0, conv_args = {"padding":1, "bias":False}, downsample = torch.nn.AvgPool2d(kernel_size=(2, 2), stride=2), upsample = torch.nn.Upsample(scale_factor=2, mode='nearest') ) self.conv2 = MultiOctaveConv( 12, 12, 3, alpha_in=0.5, alpha_out=1/3, beta_in=0.0,beta_out=1/3, conv_args = {"padding":1, "bias":False}, downsample = torch.nn.AvgPool2d(kernel_size=(2, 2), stride=2), upsample = torch.nn.Upsample(scale_factor=2, mode='nearest') ) self.conv3 = MultiOctaveConv( 12, 12, 3, alpha_in=1/3, alpha_out=0.5, beta_in=1/3,beta_out=0.0, conv_args = {"padding":1, "bias":False}, downsample = torch.nn.AvgPool2d(kernel_size=(2, 2), stride=2), upsample = torch.nn.Upsample(scale_factor=2, mode='nearest') ) self.conv4 = MultiOctaveConv(12, 15, 3, alpha_in=0.5, alpha_out=0.0, beta_in=0.0,beta_out=0.0, conv_args = {"padding":1, "bias":False}, downsample = torch.nn.AvgPool2d(kernel_size=(2, 2), stride=2), upsample = torch.nn.Upsample(scale_factor=2, mode='nearest') ) if self.full: self.fc1 = torch.nn.Linear(735, 50) self.fc2 = torch.nn.Linear(50, 10) self.conv2_drop_h = torch.nn.Dropout2d() self.conv2_drop_m = torch.nn.Dropout2d() self.conv2_drop_l = torch.nn.Dropout2d() self.conv4_drop_h = torch.nn.Dropout2d() """ Method that return the forward pass of only part of the layers inputs: x: A tensor or Tuple of size 3 of tensor wich represent high, medium and low frequency features maps of M-OctConv in that order. If there is no channels in a level the tensor is replace by the None object. layer: Last layer to wich x should be pass by """ def partial_forward(self,x, layer): x_h, x_m, x_l = self.conv1(x) x_h = F.relu(x_h) x_m = F.relu(x_m) if layer == 1: return (x_h, x_m, x_l) x_h, x_m, x_l =self.conv2((x_h, x_m, x_l)) x_h = F.relu(x_h) x_m = F.relu(x_m) x_l = F.relu(x_l) if layer == 2: return (x_h, x_m, x_l) x_h, x_m, x_l =self.conv3((x_h, x_m, x_l)) x_h = F.relu(F.avg_pool2d(x_h, 2)) x_m = F.relu(F.avg_pool2d(x_m, 2)) if layer == 3: return (x_h, x_m, x_l) x_h, x_m, x_l = self.conv4((x_h, x_m, x_l)) x_h = F.relu(F.avg_pool2d(x_h,2)) return (x_h) """ x: A tensor or Tuple of size 3 of tensor wich represent high, medium and low frequency features maps of M-OctConv in that order. If there is no channels in a level the tensor is replace by the None object. """ def forward(self, x): x_h, x_m, x_l = self.conv1(x) x_h = F.relu(x_h) x_m = F.relu(x_m) x_h, x_m, x_l =self.conv2((x_h, x_m, x_l)) if self.full: x_h = self.conv2_drop_h(x_h) x_m = self.conv2_drop_m(x_m) x_l = self.conv2_drop_l(x_l) x_h = F.relu(x_h) x_m = F.relu(x_m) x_l = F.relu(x_l) x_h, x_m, x_l =self.conv3((x_h, x_m, x_l)) x_h = F.relu(F.avg_pool2d(x_h, 2)) x_m = F.relu(F.avg_pool2d(x_m, 2)) x_h, x_m, x_l = self.conv4((x_h, x_m, x_l)) if self.full: x_h = self.conv4_drop_h(x_h) x_h = F.relu(F.avg_pool2d(x_h,2)) if self.full: x = x_h.view(-1, 735) x = F.relu(self.fc1(x)) x = F.dropout(x, training=self.training) x = self.fc2(x) return F.log_softmax(x, dim=1) else: return x_h """ A simple MNSIT classfier """ class MNIST(torch.nn.Module): def __init__(self): super(MNIST, self).__init__() self.conv1 = torch.nn.Conv2d( 1, 12, kernel_size=3, padding=1, bias= False) self.conv2 = torch.nn.Conv2d(12, 12, kernel_size=3, padding=1, bias= False) self.conv3 = torch.nn.Conv2d(12, 12, kernel_size=3, padding=1, bias= False) self.conv4 = torch.nn.Conv2d(12, 15, kernel_size=3, padding=1, bias= False) self.conv2_drop = torch.nn.Dropout2d() self.conv4_drop = torch.nn.Dropout2d() self.fc1 = torch.nn.Linear(735, 50) self.fc2 = torch.nn.Linear(50, 10) """ Function that make a forward pass only in the fully conected layer of the model x: A tensor with shape [-1,-735] """ def fc_layers(self,x): x = x.view(-1, 735) x = F.relu(self.fc1(x)) x = F.dropout(x, training=self.training) x = self.fc2(x) return F.log_softmax(x, dim=0) """ Method that return the forward pass of only part of the layers inputs: x: A tensor that represent a MNIST image layer: Last layer to wich x should be pass by """ def partial_forward(self,x, layer): x = F.relu(self.conv1(x)) if layer == 1: return x x = F.relu(self.conv2(x)) if layer == 2: return x x = F.relu(F.avg_pool2d(self.conv3(x), 2)) if layer == 3: return x x = F.relu(F.avg_pool2d(self.conv4(x), 2)) return x def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv2_drop(self.conv2(x))) x = F.relu(F.avg_pool2d(self.conv3(x), 2)) x = F.relu(F.avg_pool2d(self.conv4_drop(self.conv4(x)), 2)) x = x.view(-1, 735) x = F.relu(self.fc1(x)) x = F.dropout(x, training=self.training) x = self.fc2(x) return F.log_softmax(x, dim=1)
<filename>meu_grafo_matriz_adjacencia_dir.py<gh_stars>0 from bibgrafo.grafo_matriz_adj_dir import GrafoMatrizAdjacenciaDirecionado from bibgrafo.grafo_exceptions import * from copy import deepcopy, copy class MeuGrafo(GrafoMatrizAdjacenciaDirecionado): def verticesAdjacentes(self, V=''): ''' Provê uma lista de todas os vertices adjacentes a determinado vertice. :return: Uma lista com os vértices adjacentes possiveis. ''' matriz = self.matrizModificada() verticesAdjacentes = [] for i in matriz: if i[1] == V: verticesAdjacentes.append(i[2]) return verticesAdjacentes def grau(self, V=''): ''' Provê o grau do vértice passado como parâmetro :param V: O rótulo do vértice a ser analisado :return: Um valor inteiro que indica o grau do vértice :raises: VerticeInvalidoException se o vértice não existe no grafo ''' if V not in self.N: raise VerticeInvalidoException("Vértice não existe") indiceVertice = self.posicaoVertice(V) grau = 0 for i in range(len(self.N)): if self.M[i][indiceVertice] != {}: grau += 1 return grau def matrizModificada(self): ''' :param: grafo. :return: Uma matriz que representa o grafo de outra forma, com rótulo, v1, v2 e peso. Ex: [['a1', 'A', 'B', 3], ['a2', 'A', 'C', 4], ['a3', 'A', 'D', 5], ['a4', 'B', 'G', 5], ... ''' # Listas que servirão para criação da matriz final. rotulos = [] pesos = [] v1 = [] v2 = [] matrizModificada = [] # Matriz final for i in range(len(self.N)): for j in range(len(self.N)): if self.M[i][j]: rotulo = list(self.M[i][j].keys())[0] # Extraindo rótulo da posição matriz[i][j] rotulos.append(rotulo) # Rótulo pesos.append(self.M[i][j][rotulo].getPeso()) # Peso do rótulo v1.append(self.N[i]) # Vértice 1 v2.append(self.N[j]) # Vértice 2 # Adicionando as informações recolhidas numa só matriz for k in range(len(rotulos)): aux = [] # Identificador da ordem da aresta. Será utilizado # para organizar os arcos na posição correta, pois eles # aparecem em ordens aleatórias. Ex: a1, a2, a13, a15, a7 ... aux.append(int(rotulos[k][1:])) aux.append(rotulos[k]) aux.append(v1[k]) aux.append(v2[k]) aux.append(pesos[k]) matrizModificada.append(aux) # Colocando arcos na ordem correta com ajuda # do primeiro elemento, que é o identificador matrizModificada.sort() # Removendo o identificador for l in matrizModificada: del l[0] return matrizModificada def pesoArco(self, v1, v2): ''' :param: Vértices 1 e 2. :return: Peso do arco formado por V1-V2, caso ele exista. ''' matriz = self.matrizModificada() # Ex: i = [['a1', 'A', 'B', 1] for i in matriz: if i[1] == v1 and i[2] == v2: return i[3] # Peso return False def pegarRotulo(self, v1, v2): ''' :param: Vértices 1 e 2. :return: Rótulo do arco formado por V1-V2, caso ele exista. ''' matriz = self.matrizModificada() # Ex: i = [['a1', 'A', 'B', 1] for i in matriz: if i[1] == v1 and i[2] == v2: return i[0] # Rotulo return False def matrizAdjacenciaComPesos(self): ''' Modifica o grafo para que ele se transforme em uma matriz composta pelo peso do arco e 0. :param grafo: O grafo que será transformado. :return: Uma matriz. Ex: [[3, 0, 0, 5], [0, 4, 0, 7], [2, 0, 0, 4], [0, 3, 3, 0]] ''' matriz = [] for i in range(len(self.M)): aux = [] for j in range(len(self.M[0])): if self.M[i][j]: aux.append(self.pesoArco(self.N[i], self.N[j])) else: aux.append(0) matriz.append(aux) return matriz def matrizAdjacenciaBinaria(self): ''' Modifica o grafo para que ele se transforme em uma matriz composta apenas de 1 e 0. :param grafo: Uma cópia do grafo que será transformado. :return: Uma matriz. Ex: [[1, 0, 0, 1], [0, 0, 0, 1], [1, 0, 0, 0], [0, 1, 0, 0]] ''' matriz = [] for i in range(len(self.M)): aux = [] for j in range(len(self.M[0])): if self.M[i][j]: aux.append(1) else: aux.append(0) matriz.append(aux) return matriz def warshall(self): ''' Utiliza o algoritmo de warshall para criar uma matriz formada de 1 e 0 com base em uma matriz inicial. :param : O grafo. :return: A matriz alcançabilidade formada por 0 e 1. Ex: [[1, 1, 0, 1], [0, 1, 0, 1], [1, 1, 0, 1], [0, 1, 0, 1]] ''' matrizAlcancabilidade = self.matrizAdjacenciaBinaria() for i in range(len(self.N)): for j in range(len(self.N)): if matrizAlcancabilidade[j][i] == 1: for k in range(len(self.N)): matrizAlcancabilidade[j][k] = max(matrizAlcancabilidade[j][k], matrizAlcancabilidade[i][k]) return matrizAlcancabilidade def imprimirMatriz(self, matriz): ''' imprime o grafo de uma forma mais visível para o usuário. :param matriz: Uma matriz formada por 0 e 1. Ex: :entrada: [[1, 1, 0, 1], [0, 1, 0, 1], [1, 1, 0, 1], [0, 1, 0, 1]]. :saída: |A|B|C|D| A|1 1 0 1 B|0 1 0 1 C|1 1 0 1 D|0 1 0 1 ''' print(" ",end='|') for k in range(len(self.N)): print(self.N[k], end='|') print() for i in range(len(matriz)): print(self.N[i], end='|') for j in range(len(matriz[0])): print(matriz[i][j], end=' ') print() def posicaoVertice(self, V): ''' :param: Vértice. :return: Posição do vértice na lista de vértices (self.N). ''' for i in range(len(self.N)): if self.N[i] == V: return i return False def colocarArestas(self, listaVertices): ''' Provê uma lista com as arestas adicionadas a uma lista anteriormente com vértices apenas. :param V: Uma lista com vértices. Ex: ['A', 'J', 'K', 'G', 'H'] :return: Uma lista com vértices e arestas. Ex: ['A', 'a3', 'J', 'a5', 'K', 'a4', 'G', 'a9', 'H'] ''' pares = [] # Formando pares com a lista de vértices passados for i in range(1, len(listaVertices)): pares.append(listaVertices[i - 1] + '-' + listaVertices[i]) rotulos = [] # Lista com os rótulos dos pares for j in pares: rotulos.append(self.pegarRotulo(j[0], j[2])) listaFinal = [] # Lista com vértices e arestas (Um por índice) for k in range(len(listaVertices)): # Colocando um vértice e uma aresta por vez listaFinal.append(listaVertices[k]) if len(listaVertices) - 1 != k: listaFinal.append(rotulos[k]) return listaFinal ############### DIJKSTRA ############### def setComPesos(self, lista): ''' Recebe uma lista com vértices e retorna set com valores. :param: Lista de vértices adjacentes. :return: set() com valores None. ''' dictAux = {} for i in lista: dictAux[i] = None return dictAux def modificandoGrafoUtil(self): ''' Modifica o grafo para um dicionário. :param: Grafo. :return: Grafo como um dicionário. ''' matriz = [] for i in self.N: aux = [] aux.append(i) aux.append(self.setComPesos(self.verticesAdjacentes(i))) matriz.append(aux) matriz = dict(matriz) for j in matriz: for k in matriz[j]: matriz[j][k] = self.pesoArco(j, k) return matriz def dijkstra(self, origem, destino): ''' Utiliza o algoritmo de Dijkstra para criar uma lista com o menor caminho. :param : O grafo, o vértice de origem e o de destino. :return: Uma lista com o menor caminho. Ex: De 'A' para 'I': ['A', 'a2', 'C', 'a6', 'F', 'a11', 'I'] ''' novoGrafo = self.modificandoGrafoUtil() if origem == destino: # Se o vertice de origem e de destino são iguais return [origem + '-' + destino] if origem not in novoGrafo: # Se o vertice de origem não está no grafo return False if destino not in novoGrafo: # Se o vertice de destino não está no grafo return False rotulos = {} # Criando dicionário de rótulos ordem = {} # registrar se um rotulo foi atualizado # Criando dicionário de rótulos for i in novoGrafo.keys(): if i == origem: rotulos[i] = 0 # Menor distância de origem para origem é 0 else: rotulos[i] = float("inf") # Rótulos iniciais são infinitos naoVisitado = copy(rotulos) # Usado no looping ## Início do algoritmo while len(naoVisitado) > 0: # Encontrando a key de menor rótulo minNo = min(naoVisitado, key=naoVisitado.get) # minNo é o nó com o menor rótulo # Rótulos conectados ao menor rótulo (minNo) for i in novoGrafo[minNo]: if rotulos[i] > (rotulos[minNo] + novoGrafo[minNo][i]): rotulos[i] = rotulos[minNo] + novoGrafo[minNo][i] naoVisitado[i] = rotulos[minNo] + novoGrafo[minNo][i] ordem[i] = minNo del naoVisitado[minNo] # Uma vez que um nó é visitado, ele é excluído de naoVisitado temp = copy(destino) auxCaminho = [] caminho = [] while 1: auxCaminho.append(temp) if temp in ordem: temp = ordem[temp] # Se ordem.has_key(temp): temp = ordem[temp] else: return False # Se não houver caminho, retorne False if temp == origem: auxCaminho.append(temp) break for j in range(len(auxCaminho) - 1, -1, -1): caminho.append(auxCaminho[j]) # O retorno do caminho é dado pela variável "caminho". # Caso queira retornar o peso do menor caminho deve-se retornar "str(rotulos[destino])" caminho = str(self.colocarArestas(caminho)) return caminho ############### KANH ############### def kahnAlgoritmo(self): ''' Utiliza o algoritmo de Kahn para criar uma lista com a ordenação topológica. :param : O grafo. :return: Uma lista dos vértices em ordenação topológica. Ex: ['5', '3', '7', '8', '11', '10', '9', '2'] ''' l = [] # lista que conterá os elementos da ordenação topológica. i = [] # Lista que conterá os elementos (vértices) com o grau de incidência. s = [] # Conjunto de vértices que não possuem arcos de entrada. for n in self.N: # Colocando em i todos os graus dos vértices i.append(self.grau(n)) for m in range(len(i)): # Buscando os vértices fonte (grau 0) e colocando em s if i[m] == 0: s.append(m) while s: v = s[-1] # v = último elemento de s # Removendo último elemento de s e adicionando ele em l s.pop(-1) l.append(self.N[v]) for o in self.verticesAdjacentes(self.N[v]): u = self.posicaoVertice(o) # Índice dos vértices adjacentes ao inserido em l i[u] -= 1 # Diminuindo o grau dos vértices adjacentes em 1 (já que seu pai foi removido) if i[u] == 0: # Se o grau dos vértices adjacentes se tornar 0, adicione em s s.append(u) return l
<gh_stars>0 #!/usr/bin/env python3 # -*- coding: utf-8 -*- # -*- encoding: utf-8 -*- # # Copyright (c) 2020 <EMAIL> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import os import sys sys.path.append(os.path.join(os.path.dirname(__file__), "../")) from base.smart_log import smart_log from config.gerrit_merged_config import GerritMergedConfig from config.jira_server_config import JiraServerConfig from config.user_config import UserConfig from gerrit.bot_gerrit import BotGerrit from im.we_chat import Bot from issues.bot_jira import BotJira merged_message = "{issue}: {title} \n" \ "---> 处理人: {owner}\n" \ "---> Jira地址: {issue_link}\n" \ "---> Gerrit地址: {patch_link}\n\n" class BotGerritMerged(object): def send_merged(self, config, who): jira_server = JiraServerConfig.get_configs() bot_jira = BotJira(jira_server.service, jira_server.fields, jira_server.user, jira_server.pwd) today = datetime.date.today() yesterday = today - datetime.timedelta(days=1) cmd = config.base_sql + config.time_sql.format(branch=config.branch, status='merged', yesterday=yesterday, today=today) bot_patches = BotGerrit().search_patch(cmd) if bot_patches is None: smart_log("search merged patch error") return 0 if len(bot_patches) > 0: message = "%s %s 模块合入%s分支问题数 = %d \n" \ "👇 👇 👇 👇 👇 👇 👇 👇 👇 👇 👇 👇 👇 \n" % ( config.project, yesterday, config.branch, len(bot_patches)) for bot_patch in bot_patches: bot_issue = bot_jira.search_issue(bot_patch.issue) message += merged_message.format(issue=bot_patch.issue, issue_link=bot_patch.issue_link, owner=bot_patch.owner_name, patch_link=bot_patch.url, title=bot_issue.title) smart_log(message) # send myself bot = Bot(UserConfig.get_configs().__getitem__("bot_owner")) bot.set_text(message, type='text').send() # send who, except bot_owner if who != "bot_owner": bot = Bot(UserConfig.get_configs().__getitem__(who)) bot.set_text(message, type='text').send() def fetch_merged(self, project, branch, who): configs = GerritMergedConfig.get_configs() for config in configs: if project == config.project: if branch == "all": self.send_merged(config, who) elif branch == config.branch: self.send_merged(config, who) # if __name__ == "__main__": # bot_gerrit_merged = BotGerritMerged() # bot_gerrit_merged.fetch_merged("all", "all", "bot_owner")
# -*- coding: utf-8 -*- """ @author: <NAME>. Department of Aerodynamics Faculty of Aerospace Engineering TU Delft, Delft, Netherlands """ import sys if './' not in sys.path: sys.path.append('./') from root.config.main import * from objects.CSCG._3d.master import MeshGenerator, SpaceInvoker, FormCaller from objects.CSCG._3d.__tests__.Random.form_caller import random_FormCaller_of_total_load_around import random def test_Naive_Numbering_NO1_0form(): """""" if rAnk == mAster_rank: print("--- [test_Naive_Numbering_NO1_0form] ...... ", flush=True) mesh = MeshGenerator('crazy_periodic')([2, 2, 2], EDM='debug') space = SpaceInvoker('polynomials')([('Lobatto', 2), ('Lobatto', 1), ('Lobatto', 2)]) FC = FormCaller(mesh, space) f0 = FC('0-f', is_hybrid=False, numbering_parameters='Naive') benchmark = np.array( [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17], [2, 18, 0, 5, 21, 3, 8, 19, 6, 11, 22, 9, 14, 20, 12, 17, 23, 15], [3, 4, 5, 0, 1, 2, 9, 10, 11, 6, 7, 8, 15, 16, 17, 12, 13, 14], [5, 21, 3, 2, 18, 0, 11, 22, 9, 8, 19, 6, 17, 23, 15, 14, 20, 12], [12, 13, 14, 15, 16, 17, 24, 26, 28, 25, 27, 29, 0, 1, 2, 3, 4, 5], [14, 20, 12, 17, 23, 15, 28, 30, 24, 29, 31, 25, 2, 18, 0, 5, 21, 3], [15, 16, 17, 12, 13, 14, 25, 27, 29, 24, 26, 28, 3, 4, 5, 0, 1, 2], [17, 23, 15, 14, 20, 12, 29, 31, 25, 28, 30, 24, 5, 21, 3, 2, 18, 0]] ) for i in f0.numbering.gathering: assert np.all(f0.numbering.gathering[i].full_vector == benchmark[i,:]) return 1 def test_Naive_Numbering_NO2_1form(): """""" if rAnk == mAster_rank: print("--- [test_Naive_Numbering_NO2_1form] ...... ", flush=True) mesh = MeshGenerator('crazy_periodic')([2, 2, 2], EDM='debug') space = SpaceInvoker('polynomials')([('Lobatto', 2), ('Lobatto', 1), ('Lobatto', 2)]) FC = FormCaller(mesh, space) f1 = FC('1-f', is_hybrid=False, numbering_parameters='Naive') benchmark = np.array( [[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32], [33, 39, 36, 42, 34, 40, 37, 43, 35, 41, 38, 44, 14, 45, 12, 17, 46, 15, 20, 47, 18, 23, 48, 21, 26, 50, 24, 29, 49, 27, 32, 51, 30], [2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 52, 55, 58, 53, 56, 59, 54, 57, 60, 24, 25, 26, 21, 22, 23, 30, 31, 32, 27, 28, 29], [36, 42, 33, 39, 37, 43, 34, 40, 38, 44, 35, 41, 58, 61, 52, 59, 62, 53, 60, 63, 54, 26, 50, 24, 23, 48, 21, 32, 51, 30, 29, 49, 27], [8, 9, 10, 11, 64, 66, 65, 67, 0, 1, 2, 3, 18, 19, 20, 68, 69, 70, 12, 13, 14, 71, 75, 79, 73, 77, 81, 72, 76, 80, 74, 78, 82], [35, 41, 38, 44, 83, 85, 84, 86, 33, 39, 36, 42, 20, 47, 18, 70, 87, 68, 14, 45, 12, 79, 88, 71, 81, 90, 73, 80, 89, 72, 82, 91, 74], [10, 11, 8, 9, 65, 67, 64, 66, 2, 3, 0, 1, 54, 57, 60, 92, 93, 94, 52, 55, 58, 73, 77, 81, 71, 75, 79, 74, 78, 82, 72, 76, 80], [38, 44, 35, 41, 84, 86, 83, 85, 36, 42, 33, 39, 60, 63, 54, 94, 95, 92, 58, 61, 52, 81, 90, 73, 79, 88, 71, 82, 91, 74, 80, 89, 72]] ) for i in f1.numbering.gathering: assert np.all(f1.numbering.gathering[i].full_vector == benchmark[i,:]) return 1 def test_Naive_Numbering_NO3_2form(): """""" if rAnk == mAster_rank: print("--- [test_Naive_Numbering_NO3_2form] ...... ", flush=True) mesh = MeshGenerator('crazy_periodic')([2, 2, 2], EDM='debug') space = SpaceInvoker('polynomials')([('Lobatto', 2), ('Lobatto', 1), ('Lobatto', 2)]) FC = FormCaller(mesh, space) f2 = FC('2-f', is_hybrid=False, numbering_parameters='Naive') benchmark = np.array([ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [2, 20, 0, 5, 21, 3, 22, 26, 24, 28, 23, 27, 25, 29, 30, 33, 31, 34, 32, 35], [36, 38, 40, 37, 39, 41, 8, 9, 6, 7, 12, 13, 10, 11, 42, 45, 43, 46, 44, 47], [40, 48, 36, 41, 49, 37, 24, 28, 22, 26, 25, 29, 23, 27, 50, 53, 51, 54, 52, 55], [56, 58, 60, 57, 59, 61, 62, 66, 64, 68, 63, 67, 65, 69, 18, 19, 70, 71, 14, 15], [60, 72, 56, 61, 73, 57, 74, 78, 76, 80, 75, 79, 77, 81, 32, 35, 82, 83, 30, 33], [84, 86, 88, 85, 87, 89, 64, 68, 62, 66, 65, 69, 63, 67, 44, 47, 90, 91, 42, 45], [88, 92, 84, 89, 93, 85, 76, 80, 74, 78, 77, 81, 75, 79, 52, 55, 94, 95, 50, 53] ]) for i in f2.numbering.gathering: assert np.all(f2.numbering.gathering[i].full_vector == benchmark[i,:]) return 1 def test_Naive_Numbering_NO5_0trace(): """""" if rAnk == mAster_rank: load = random.randint(10, 199) print(f"--- [test_Naive_Numbering_NO5_0trace] @ FC-load = {load} ...... ", flush=True) else: load= None load = cOmm.bcast(load, root=mAster_rank) FC = random_FormCaller_of_total_load_around(load) t0 = FC('0-t', numbering_parameters={'scheme_name': 'Naive',}) GM_TEW = t0.numbering.trace_element_wise for i in GM_TEW: assert i in t0.mesh.trace.elements, "must be the case" assert len(set(GM_TEW.keys())) == t0.mesh.trace.elements.num, "must be the case" GM_TEW = cOmm.gather(GM_TEW, root=mAster_rank) if rAnk == mAster_rank: END = 0 for i in range(t0.mesh.trace.elements.GLOBAL_num):# go through all (global) trace elements for gm_core in GM_TEW: if i in gm_core: fv = gm_core[i].full_vector assert fv[0] == END, f"must be the case." NEW_END = fv[-1] + 1 END = NEW_END num_of_dofs_in_this_core = t0.numbering.num_local_dofs LOCAL_NUMBERING = set() GM = t0.numbering.gathering for i in GM: LOCAL_NUMBERING.update(GM[i].full_vector) assert num_of_dofs_in_this_core == len(LOCAL_NUMBERING), "must be the case!" return 1 def test_Naive_Numbering_NO6_1trace(): """""" if rAnk == mAster_rank: load = random.randint(10, 199) print(f"--- [test_Naive_Numbering_NO6_1trace] @ FC-load = {load} ...... ", flush=True) else: load= None load = cOmm.bcast(load, root=mAster_rank) FC = random_FormCaller_of_total_load_around(load, EDM_pool=('chaotic',)) t1 = FC('1-t', numbering_parameters={'scheme_name': 'Naive',}) GM_TEW = t1.numbering.trace_element_wise for i in GM_TEW: assert i in t1.mesh.trace.elements, "must be the case" assert len(set(GM_TEW.keys())) == t1.mesh.trace.elements.num, "must be the case" GM_TEW = cOmm.gather(GM_TEW, root=mAster_rank) if rAnk == mAster_rank: END = 0 for i in range(t1.mesh.trace.elements.GLOBAL_num):# go through all (global) trace elements for gm_core in GM_TEW: if i in gm_core: fv = gm_core[i].full_vector assert fv[0] == END, f"must be the case." NEW_END = fv[-1] + 1 END = NEW_END num_of_dofs_in_this_core = t1.numbering.num_local_dofs LOCAL_NUMBERING = set() GM = t1.numbering.gathering for i in GM: LOCAL_NUMBERING.update(GM[i].full_vector) assert num_of_dofs_in_this_core == len(LOCAL_NUMBERING), "must be the case!" return 1 def test_Naive_Numbering_NO4_2trace(): """""" if rAnk == mAster_rank: print("--- [test_Naive_Numbering_NO4_2trace] ...... ", flush=True) mesh = MeshGenerator('crazy_periodic')([2, 2, 2], EDM='debug') space = SpaceInvoker('polynomials')([('Lobatto', 2), ('Lobatto', 3), ('Lobatto', 1)]) FC = FormCaller(mesh, space) t2 = FC('2-t', numbering_parameters='Naive') GM = t2.numbering.gathering num_of_dofs_in_this_core = t2.numbering.num_local_dofs LOCAL_NUMBERING = set() benchmark = np.array( [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21], [3, 4, 5, 0, 1, 2, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37], [38, 39, 40, 41, 42, 43, 8, 9, 6, 7, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55], [41, 42, 43, 38, 39, 40, 24, 25, 22, 23, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67], [68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 16, 17, 18, 19, 20, 21, 10, 11, 12, 13, 14, 15], [71, 72, 73, 68, 69, 70, 78, 79, 80, 81, 32, 33, 34, 35, 36, 37, 26, 27, 28, 29, 30, 31], [82, 83, 84, 85, 86, 87, 76, 77, 74, 75, 50, 51, 52, 53, 54, 55, 44, 45, 46, 47, 48, 49], [85, 86, 87, 82, 83, 84, 80, 81, 78, 79, 62, 63, 64, 65, 66, 67, 56, 57, 58, 59, 60, 61]] ) for i in GM: assert np.all(GM[i].full_vector == benchmark[i,:]) LOCAL_NUMBERING.update(GM[i].full_vector) assert num_of_dofs_in_this_core == len(LOCAL_NUMBERING), "must be the case!" return 1 if __name__ == '__main__': # mpiexec -n 6 python _3dCSCG\TESTS\unittest_Naive_numbering.py # test_Naive_Numbering_NO4_2trace() test_Naive_Numbering_NO5_0trace() test_Naive_Numbering_NO6_1trace()
<filename>archivebox/index/__init__.py __package__ = 'archivebox.index' import os import shutil import json as pyjson from pathlib import Path from itertools import chain from typing import List, Tuple, Dict, Optional, Iterable from collections import OrderedDict from contextlib import contextmanager from urllib.parse import urlparse from django.db.models import QuerySet, Q from ..util import ( scheme, enforce_types, ExtendedEncoder, ) from ..config import ( ARCHIVE_DIR_NAME, SQL_INDEX_FILENAME, JSON_INDEX_FILENAME, OUTPUT_DIR, TIMEOUT, URL_BLACKLIST_PTN, stderr, OUTPUT_PERMISSIONS ) from ..logging_util import ( TimedProgress, log_indexing_process_started, log_indexing_process_finished, log_indexing_started, log_indexing_finished, log_parsing_finished, log_deduping_finished, ) from .schema import Link, ArchiveResult from .html import ( write_html_link_details, ) from .json import ( parse_json_link_details, write_json_link_details, ) from .sql import ( write_sql_main_index, write_sql_link_details, ) from ..search import search_backend_enabled, query_search_index ### Link filtering and checking @enforce_types def merge_links(a: Link, b: Link) -> Link: """deterministially merge two links, favoring longer field values over shorter, and "cleaner" values over worse ones. """ assert a.base_url == b.base_url, f'Cannot merge two links with different URLs ({a.base_url} != {b.base_url})' # longest url wins (because a fuzzy url will always be shorter) url = a.url if len(a.url) > len(b.url) else b.url # best title based on length and quality possible_titles = [ title for title in (a.title, b.title) if title and title.strip() and '://' not in title ] title = None if len(possible_titles) == 2: title = max(possible_titles, key=lambda t: len(t)) elif len(possible_titles) == 1: title = possible_titles[0] # earliest valid timestamp timestamp = ( a.timestamp if float(a.timestamp or 0) < float(b.timestamp or 0) else b.timestamp ) # all unique, truthy tags tags_set = ( set(tag.strip() for tag in (a.tags or '').split(',')) | set(tag.strip() for tag in (b.tags or '').split(',')) ) tags = ','.join(tags_set) or None # all unique source entries sources = list(set(a.sources + b.sources)) # all unique history entries for the combined archive methods all_methods = set(list(a.history.keys()) + list(a.history.keys())) history = { method: (a.history.get(method) or []) + (b.history.get(method) or []) for method in all_methods } for method in all_methods: deduped_jsons = { pyjson.dumps(result, sort_keys=True, cls=ExtendedEncoder) for result in history[method] } history[method] = list(reversed(sorted( (ArchiveResult.from_json(pyjson.loads(result)) for result in deduped_jsons), key=lambda result: result.start_ts, ))) return Link( url=url, timestamp=timestamp, title=title, tags=tags, sources=sources, history=history, ) @enforce_types def validate_links(links: Iterable[Link]) -> List[Link]: timer = TimedProgress(TIMEOUT * 4) try: links = archivable_links(links) # remove chrome://, about:, mailto: etc. links = sorted_links(links) # deterministically sort the links based on timstamp, url links = fix_duplicate_links(links) # merge/dedupe duplicate timestamps & urls finally: timer.end() return list(links) @enforce_types def archivable_links(links: Iterable[Link]) -> Iterable[Link]: """remove chrome://, about:// or other schemed links that cant be archived""" for link in links: try: urlparse(link.url) except ValueError: continue if scheme(link.url) not in ('http', 'https', 'ftp'): continue if URL_BLACKLIST_PTN and URL_BLACKLIST_PTN.search(link.url): continue yield link @enforce_types def fix_duplicate_links(sorted_links: Iterable[Link]) -> Iterable[Link]: """ ensures that all non-duplicate links have monotonically increasing timestamps """ # from core.models import Snapshot unique_urls: OrderedDict[str, Link] = OrderedDict() for link in sorted_links: if link.url in unique_urls: # merge with any other links that share the same url link = merge_links(unique_urls[link.url], link) unique_urls[link.url] = link return unique_urls.values() @enforce_types def sorted_links(links: Iterable[Link]) -> Iterable[Link]: sort_func = lambda link: (link.timestamp.split('.', 1)[0], link.url) return sorted(links, key=sort_func, reverse=True) @enforce_types def links_after_timestamp(links: Iterable[Link], resume: Optional[float]=None) -> Iterable[Link]: if not resume: yield from links return for link in links: try: if float(link.timestamp) <= resume: yield link except (ValueError, TypeError): print('Resume value and all timestamp values must be valid numbers.') @enforce_types def lowest_uniq_timestamp(used_timestamps: OrderedDict, timestamp: str) -> str: """resolve duplicate timestamps by appending a decimal 1234, 1234 -> 1234.1, 1234.2""" timestamp = timestamp.split('.')[0] nonce = 0 # first try 152323423 before 152323423.0 if timestamp not in used_timestamps: return timestamp new_timestamp = '{}.{}'.format(timestamp, nonce) while new_timestamp in used_timestamps: nonce += 1 new_timestamp = '{}.{}'.format(timestamp, nonce) return new_timestamp ### Main Links Index @contextmanager @enforce_types def timed_index_update(out_path: Path): log_indexing_started(out_path) timer = TimedProgress(TIMEOUT * 2, prefix=' ') try: yield finally: timer.end() assert out_path.exists(), f'Failed to write index file: {out_path}' log_indexing_finished(out_path) @enforce_types def write_main_index(links: List[Link], out_dir: Path=OUTPUT_DIR) -> None: """Writes links to sqlite3 file for a given list of links""" log_indexing_process_started(len(links)) try: with timed_index_update(out_dir / SQL_INDEX_FILENAME): write_sql_main_index(links, out_dir=out_dir) os.chmod(out_dir / SQL_INDEX_FILENAME, int(OUTPUT_PERMISSIONS, base=8)) # set here because we don't write it with atomic writes except (KeyboardInterrupt, SystemExit): stderr('[!] Warning: Still writing index to disk...', color='lightyellow') stderr(' Run archivebox init to fix any inconsistencies from an ungraceful exit.') with timed_index_update(out_dir / SQL_INDEX_FILENAME): write_sql_main_index(links, out_dir=out_dir) os.chmod(out_dir / SQL_INDEX_FILENAME, int(OUTPUT_PERMISSIONS, base=8)) # set here because we don't write it with atomic writes raise SystemExit(0) log_indexing_process_finished() @enforce_types def load_main_index(out_dir: Path=OUTPUT_DIR, warn: bool=True) -> List[Link]: """parse and load existing index with any new links from import_path merged in""" from core.models import Snapshot try: return Snapshot.objects.all() except (KeyboardInterrupt, SystemExit): raise SystemExit(0) @enforce_types def load_main_index_meta(out_dir: Path=OUTPUT_DIR) -> Optional[dict]: index_path = out_dir / JSON_INDEX_FILENAME if index_path.exists(): with open(index_path, 'r', encoding='utf-8') as f: meta_dict = pyjson.load(f) meta_dict.pop('links') return meta_dict return None @enforce_types def parse_links_from_source(source_path: str, root_url: Optional[str]=None) -> Tuple[List[Link], List[Link]]: from ..parsers import parse_links new_links: List[Link] = [] # parse and validate the import file raw_links, parser_name = parse_links(source_path, root_url=root_url) new_links = validate_links(raw_links) if parser_name: num_parsed = len(raw_links) log_parsing_finished(num_parsed, parser_name) return new_links @enforce_types def fix_duplicate_links_in_index(snapshots: QuerySet, links: Iterable[Link]) -> Iterable[Link]: """ Given a list of in-memory Links, dedupe and merge them with any conflicting Snapshots in the DB. """ unique_urls: OrderedDict[str, Link] = OrderedDict() for link in links: index_link = snapshots.filter(url=link.url) if index_link: link = merge_links(index_link[0].as_link(), link) unique_urls[link.url] = link return unique_urls.values() @enforce_types def dedupe_links(snapshots: QuerySet, new_links: List[Link]) -> List[Link]: """ The validation of links happened at a different stage. This method will focus on actual deduplication and timestamp fixing. """ # merge existing links in out_dir and new links dedup_links = fix_duplicate_links_in_index(snapshots, new_links) new_links = [ link for link in new_links if not snapshots.filter(url=link.url).exists() ] dedup_links_dict = {link.url: link for link in dedup_links} # Replace links in new_links with the dedup version for i in range(len(new_links)): if new_links[i].url in dedup_links_dict.keys(): new_links[i] = dedup_links_dict[new_links[i].url] log_deduping_finished(len(new_links)) return new_links ### Link Details Index @enforce_types def write_link_details(link: Link, out_dir: Optional[str]=None, skip_sql_index: bool=False) -> None: out_dir = out_dir or link.link_dir write_json_link_details(link, out_dir=out_dir) write_html_link_details(link, out_dir=out_dir) if not skip_sql_index: write_sql_link_details(link) @enforce_types def load_link_details(link: Link, out_dir: Optional[str]=None) -> Link: """check for an existing link archive in the given directory, and load+merge it into the given link dict """ out_dir = out_dir or link.link_dir existing_link = parse_json_link_details(out_dir) if existing_link: return merge_links(existing_link, link) return link LINK_FILTERS = { 'exact': lambda pattern: Q(url=pattern), 'substring': lambda pattern: Q(url__icontains=pattern), 'regex': lambda pattern: Q(url__iregex=pattern), 'domain': lambda pattern: Q(url__istartswith=f"http://{pattern}") | Q(url__istartswith=f"https://{pattern}") | Q(url__istartswith=f"ftp://{pattern}"), 'tag': lambda pattern: Q(tags__name=pattern), } @enforce_types def q_filter(snapshots: QuerySet, filter_patterns: List[str], filter_type: str='exact') -> QuerySet: q_filter = Q() for pattern in filter_patterns: try: q_filter = q_filter | LINK_FILTERS[filter_type](pattern) except KeyError: stderr() stderr( f'[X] Got invalid pattern for --filter-type={filter_type}:', color='red', ) stderr(f' {pattern}') raise SystemExit(2) return snapshots.filter(q_filter) def search_filter(snapshots: QuerySet, filter_patterns: List[str], filter_type: str='search') -> QuerySet: if not search_backend_enabled(): stderr() stderr( '[X] The search backend is not enabled, set config.USE_SEARCHING_BACKEND = True', color='red', ) raise SystemExit(2) from core.models import Snapshot qsearch = Snapshot.objects.none() for pattern in filter_patterns: try: qsearch |= query_search_index(pattern) except: raise SystemExit(2) return snapshots & qsearch @enforce_types def snapshot_filter(snapshots: QuerySet, filter_patterns: List[str], filter_type: str='exact') -> QuerySet: if filter_type != 'search': return q_filter(snapshots, filter_patterns, filter_type) else: return search_filter(snapshots, filter_patterns, filter_type) def get_indexed_folders(snapshots, out_dir: Path=OUTPUT_DIR) -> Dict[str, Optional[Link]]: """indexed links without checking archive status or data directory validity""" links = [snapshot.as_link_with_details() for snapshot in snapshots.iterator()] return { link.link_dir: link for link in links } def get_archived_folders(snapshots, out_dir: Path=OUTPUT_DIR) -> Dict[str, Optional[Link]]: """indexed links that are archived with a valid data directory""" links = [snapshot.as_link_with_details() for snapshot in snapshots.iterator()] return { link.link_dir: link for link in filter(is_archived, links) } def get_unarchived_folders(snapshots, out_dir: Path=OUTPUT_DIR) -> Dict[str, Optional[Link]]: """indexed links that are unarchived with no data directory or an empty data directory""" links = [snapshot.as_link_with_details() for snapshot in snapshots.iterator()] return { link.link_dir: link for link in filter(is_unarchived, links) } def get_present_folders(snapshots, out_dir: Path=OUTPUT_DIR) -> Dict[str, Optional[Link]]: """dirs that actually exist in the archive/ folder""" all_folders = {} for entry in (out_dir / ARCHIVE_DIR_NAME).iterdir(): if entry.is_dir(): link = None try: link = parse_json_link_details(entry.path) except Exception: pass all_folders[entry.name] = link return all_folders def get_valid_folders(snapshots, out_dir: Path=OUTPUT_DIR) -> Dict[str, Optional[Link]]: """dirs with a valid index matched to the main index and archived content""" links = [snapshot.as_link_with_details() for snapshot in snapshots.iterator()] return { link.link_dir: link for link in filter(is_valid, links) } def get_invalid_folders(snapshots, out_dir: Path=OUTPUT_DIR) -> Dict[str, Optional[Link]]: """dirs that are invalid for any reason: corrupted/duplicate/orphaned/unrecognized""" duplicate = get_duplicate_folders(snapshots, out_dir=OUTPUT_DIR) orphaned = get_orphaned_folders(snapshots, out_dir=OUTPUT_DIR) corrupted = get_corrupted_folders(snapshots, out_dir=OUTPUT_DIR) unrecognized = get_unrecognized_folders(snapshots, out_dir=OUTPUT_DIR) return {**duplicate, **orphaned, **corrupted, **unrecognized} def get_duplicate_folders(snapshots, out_dir: Path=OUTPUT_DIR) -> Dict[str, Optional[Link]]: """dirs that conflict with other directories that have the same link URL or timestamp""" by_url = {} by_timestamp = {} duplicate_folders = {} data_folders = ( str(entry) for entry in (Path(out_dir) / ARCHIVE_DIR_NAME).iterdir() if entry.is_dir() and not snapshots.filter(timestamp=entry.name).exists() ) for path in chain(snapshots.iterator(), data_folders): link = None if type(path) is not str: path = path.as_link().link_dir try: link = parse_json_link_details(path) except Exception: pass if link: # link folder has same timestamp as different link folder by_timestamp[link.timestamp] = by_timestamp.get(link.timestamp, 0) + 1 if by_timestamp[link.timestamp] > 1: duplicate_folders[path] = link # link folder has same url as different link folder by_url[link.url] = by_url.get(link.url, 0) + 1 if by_url[link.url] > 1: duplicate_folders[path] = link return duplicate_folders def get_orphaned_folders(snapshots, out_dir: Path=OUTPUT_DIR) -> Dict[str, Optional[Link]]: """dirs that contain a valid index but aren't listed in the main index""" orphaned_folders = {} for entry in (Path(out_dir) / ARCHIVE_DIR_NAME).iterdir(): if entry.is_dir(): link = None try: link = parse_json_link_details(str(entry)) except Exception: pass if link and not snapshots.filter(timestamp=entry.name).exists(): # folder is a valid link data dir with index details, but it's not in the main index orphaned_folders[str(entry)] = link return orphaned_folders def get_corrupted_folders(snapshots, out_dir: Path=OUTPUT_DIR) -> Dict[str, Optional[Link]]: """dirs that don't contain a valid index and aren't listed in the main index""" corrupted = {} for snapshot in snapshots.iterator(): link = snapshot.as_link() if is_corrupt(link): corrupted[link.link_dir] = link return corrupted def get_unrecognized_folders(snapshots, out_dir: Path=OUTPUT_DIR) -> Dict[str, Optional[Link]]: """dirs that don't contain recognizable archive data and aren't listed in the main index""" unrecognized_folders: Dict[str, Optional[Link]] = {} for entry in (Path(out_dir) / ARCHIVE_DIR_NAME).iterdir(): if entry.is_dir(): index_exists = (entry / "index.json").exists() link = None try: link = parse_json_link_details(str(entry)) except KeyError: # Try to fix index if index_exists: try: # Last attempt to repair the detail index link_guessed = parse_json_link_details(str(entry), guess=True) write_json_link_details(link_guessed, out_dir=str(entry)) link = parse_json_link_details(str(entry)) except Exception: pass if index_exists and link is None: # index exists but it's corrupted or unparseable unrecognized_folders[str(entry)] = link elif not index_exists: # link details index doesn't exist and the folder isn't in the main index timestamp = entry.name if not snapshots.filter(timestamp=timestamp).exists(): unrecognized_folders[str(entry)] = link return unrecognized_folders def is_valid(link: Link) -> bool: dir_exists = Path(link.link_dir).exists() index_exists = (Path(link.link_dir) / "index.json").exists() if not dir_exists: # unarchived links are not included in the valid list return False if dir_exists and not index_exists: return False if dir_exists and index_exists: try: parsed_link = parse_json_link_details(link.link_dir, guess=True) return link.url == parsed_link.url except Exception: pass return False def is_corrupt(link: Link) -> bool: if not Path(link.link_dir).exists(): # unarchived links are not considered corrupt return False if is_valid(link): return False return True def is_archived(link: Link) -> bool: return is_valid(link) and link.is_archived def is_unarchived(link: Link) -> bool: if not Path(link.link_dir).exists(): return True return not link.is_archived def fix_invalid_folder_locations(out_dir: Path=OUTPUT_DIR) -> Tuple[List[str], List[str]]: fixed = [] cant_fix = [] for entry in os.scandir(out_dir / ARCHIVE_DIR_NAME): if entry.is_dir(follow_symlinks=True): if (Path(entry.path) / 'index.json').exists(): try: link = parse_json_link_details(entry.path) except KeyError: link = None if not link: continue if not entry.path.endswith(f'/{link.timestamp}'): dest = out_dir / ARCHIVE_DIR_NAME / link.timestamp if dest.exists(): cant_fix.append(entry.path) else: shutil.move(entry.path, dest) fixed.append(dest) timestamp = entry.path.rsplit('/', 1)[-1] assert link.link_dir == entry.path assert link.timestamp == timestamp write_json_link_details(link, out_dir=entry.path) return fixed, cant_fix
<reponame>mhndlsz/memodrop<filename>categories/tests.py from django.contrib.auth.models import User from django.core.exceptions import ObjectDoesNotExist from django.test import TestCase, Client from django.urls import reverse from braindump.models import CardPlacement from cards.models import Card from categories.models import Category, ShareContract class CategoryTestCase(TestCase): def setUp(self): """Set up test scenario """ self.test_user = User.objects.create_user('test') self.test_category = Category.objects.create(name='Category 1', description='Description 1', owner=self.test_user) self.client = Client() self.client.force_login(self.test_user) self.foreign_test_user = User.objects.create_user('category foreigner') self.foreign_test_category = Category.objects.create(name='Category Foreign', description='Description Foreign', owner=self.foreign_test_user) self.foreign_client = Client() self.foreign_client.force_login(self.foreign_test_user) def _create_test_card(self, suffix='', category=False): """Create a single test card """ if not category: category = self.test_category card = Card.objects.create( question='Question'.format(suffix), answer='Answer'.format(suffix), hint='Hint'.format(suffix), category=category, ) return card def test_list(self): """Test if the category list is displayed successfully """ url = reverse('category-list') response = self.client.get(url) self.assertEqual(response.status_code, 200) def test_detail(self): """Test if the category list is displayed successfully """ test_card = self._create_test_card() url = reverse('category-detail', args=(test_card.category.pk,)) response = self.client.get(url) self.assertEqual(response.status_code, 200) def test_foreign_category_detail(self): """Test if the user has no access to foreign categories """ test_user = User.objects.create_user('foreigner') test_category = Category.objects.create( name='Category 1337', description='Description 1337', owner=test_user ) url = reverse('category-detail', args=(test_category.pk,)) foreign_client = Client() foreign_client.force_login(test_user) foreign_response = foreign_client.get(url) self.assertEqual(foreign_response.status_code, 200) response = self.client.get(url) self.assertEqual(response.status_code, 404) def test_delete_empty_category(self): """Test if the "Delete" button works for an empty category """ test_category = Category.objects.create(name='Category 2', description='Description 2', owner=self.test_user) url = reverse('category-delete', args=(test_category.pk,)) response = self.client.post(url) self.assertEqual(response.status_code, 302) with self.assertRaises(ObjectDoesNotExist): Category.objects.get(pk=test_category.pk) def test_share_contract_list(self): """Test if the share contract list is displayed successfully """ url = reverse('category-share-contract-list', args=(self.test_category.pk,)) response = self.client.get(url) self.assertEqual(response.status_code, 200) def test_share_contract_request_valid_user(self): """Test if a share contract for a valid user works """ url = reverse('category-share-contract-request', args=(self.test_category.pk,)) response = self.client.post(url, data={'username': self.foreign_test_user.username}) self.assertEqual(response.status_code, 302) share_contract = ShareContract.user_category_objects.all(user=self.foreign_test_user, category=self.test_category) self.assertTrue(share_contract.exists()) def test_share_contract_request_owner(self): """Test if a share contract fails if the target user is equal to the owner of the category """ url = reverse('category-share-contract-request', args=(self.test_category.pk,)) response = self.client.post(url, data={'username': self.test_user.username}) self.assertEqual(response.status_code, 200) share_contract = ShareContract.user_category_objects.all(user=self.test_user, category=self.test_category) self.assertFalse(share_contract.exists()) def test_share_contract_request_invalid_user(self): """Test if a share contract for a invalid user doesn't work (but should respond exactly like a valid user) """ url = reverse('category-share-contract-request', args=(self.test_category.pk,)) response = self.client.post(url, data={'username': 'cake'}) self.assertEqual(response.status_code, 302) share_contract = ShareContract.user_category_objects.all(user=self.foreign_test_user, category=self.test_category) self.assertFalse(share_contract.exists()) def test_share_contract_request_foreign_category(self): """Test if a share contract for a invalid category doesn't work """ url = reverse('category-share-contract-request', args=(self.foreign_test_category.pk,)) response = self.client.post(url, data={'username': self.test_user.username}) self.assertEqual(response.status_code, 404) share_contract = ShareContract.user_category_objects.all(user=self.foreign_test_user, category=self.foreign_test_category) self.assertFalse(share_contract.exists()) def test_share_contract_accept(self): """Test if a share contract can be accepted """ test_category = Category.objects.create(name='Category', description='Description', owner=self.test_user) test_card = self._create_test_card(category=test_category) share_contract = ShareContract.objects.create(user=self.foreign_test_user, category=test_category) url = reverse('category-share-contract-accept', args=(share_contract.pk,)) response = self.foreign_client.post(url, data={'decision': 'Accept'}) self.assertEqual(response.status_code, 302) refreshed_share_contract = ShareContract.objects.get(pk=share_contract.pk) self.assertTrue(refreshed_share_contract.accepted) test_card_placement = CardPlacement.objects.filter(card=test_card, user=self.foreign_test_user) self.assertTrue(test_card_placement.exists()) def test_share_contract_decline(self): """Test if a share contract can be declined """ share_contract = ShareContract.objects.create(user=self.foreign_test_user, category=self.test_category) url = reverse('category-share-contract-accept', args=(share_contract.pk,)) response = self.foreign_client.post(url, data={'decision': 'Decline'}) self.assertEqual(response.status_code, 302) refreshed_share_contract = ShareContract.objects.filter(pk=share_contract.pk).all() self.assertFalse(refreshed_share_contract.exists()) def test_share_contract_revoke(self): """Test if a share contract can be revoked """ rand = User.objects.make_random_password(length=32) test_category = Category.objects.create(name=rand, description='Description', owner=self.test_user) test_card = self._create_test_card(category=test_category) share_contract = ShareContract.objects.create(user=self.foreign_test_user, category=test_category) share_contract.accept() card_placement = CardPlacement.objects.get(user=self.foreign_test_user, card=test_card) url = reverse('category-share-contract-revoke', args=(test_category.pk, share_contract.pk,)) response = self.client.post(url) self.assertEqual(response.status_code, 302) refreshed_share_contract = ShareContract.objects.filter(pk=share_contract.pk).all() self.assertFalse(refreshed_share_contract.exists()) refreshed_card_placement = CardPlacement.objects.filter( user=self.foreign_test_user, card__category__name=rand, ).first() self.assertNotEqual(card_placement.card, refreshed_card_placement.card)
<reponame>Sensirion/python-i2c-sen5x<filename>sensirion_i2c_sen5x/response_types.py # -*- coding: utf-8 -*- # (c) Copyright 2022 Sensirion AG, Switzerland import logging log = logging.getLogger(__name__) class Sen5xMassConcentration: """ Represents a SEN5x measurement response for the particulate matter mass concentration. With the :py:attr:`ticks` attribute you can access the raw data as received from the device. For the converted physical value the :py:attr:`physical` attribute is available. The attribute :py:attr:`available` can be used to check whether the value is available or not. """ def __init__(self, ticks): """ Creates an instance from the received raw data. :param int ticks: The read ticks as received from the device. """ super(Sen5xMassConcentration, self).__init__() #: The ticks (int) as received from the device. self.ticks = int(ticks) #: Flag (bool) whether the received value is available or not. self.available = self.ticks != 0xFFFF #: The converted physical value (float) in µg/m³. This is NaN if the #: value is not available. self.physical = (ticks / 10.0) if self.available else float('nan') def __str__(self): return '{:0.1f} µg/m^3'.format(self.physical) if self.available \ else 'N/A' class Sen5xHumidity: """ Represents a SEN5x measurement response for the humidity. With the :py:attr:`ticks` attribute you can access the raw data as received from the device. For the converted value the :py:attr:`percent_rh` attribute is available. The attribute :py:attr:`available` can be used to check whether the value is available or not. """ def __init__(self, ticks): """ Creates an instance from the received raw data. :param int ticks: The read ticks as received from the device. """ super(Sen5xHumidity, self).__init__() #: The ticks (int) as received from the device. self.ticks = int(ticks) #: Flag (bool) whether the received value is available or not. self.available = self.ticks != 0x7FFF #: The converted humidity (float) in %RH. This is NaN if the #: value is not available. self.percent_rh = (ticks / 100.0) if self.available else float('nan') def __str__(self): return '{:0.2f} %RH'.format(self.percent_rh) if self.available \ else 'N/A' class Sen5xTemperature: """ Represents a SEN5x measurement response for the temperature. With the :py:attr:`ticks` attribute you can access the raw data as received from the device. For the converted values you can choose between :py:attr:`degrees_celsius` and :py:attr:`degrees_fahrenheit`. The attribute :py:attr:`available` can be used to check whether the value is available or not. """ def __init__(self, ticks): """ Creates an instance from the received raw data. :param int ticks: The read ticks as received from the device. """ super(Sen5xTemperature, self).__init__() #: The ticks (int) as received from the device. self.ticks = int(ticks) #: Flag (bool) whether the received value is available or not. self.available = self.ticks != 0x7FFF #: The converted temperature (float) in °C. This is NaN if the #: value is not available. self.degrees_celsius = (ticks / 200.0) if self.available else float('nan') #: The converted temperature (float) in °F. This is NaN if the #: value is not available. self.degrees_fahrenheit = ((self.degrees_celsius * 9.0 / 5.0) + 32.0) \ if self.available else float('nan') def __str__(self): return '{:0.2f} °C'.format(self.degrees_celsius) if self.available \ else 'N/A' class Sen5xAirQualityIndex: """ Represents a SEN5x measurement response for the air quality index. With the :py:attr:`ticks` attribute you can access the raw data as received from the device. For the converted value the :py:attr:`scaled` attribute is available. The attribute :py:attr:`available` can be used to check whether the value is available or not. """ def __init__(self, ticks): """ Creates an instance from the received raw data. :param int ticks: The read ticks as received from the device. """ super(Sen5xAirQualityIndex, self).__init__() #: The ticks (int) as received from the device. self.ticks = int(ticks) #: Flag (bool) whether the received value is available or not. self.available = self.ticks != 0x7FFF #: The converted/scaled index (float). This is NaN if the #: value is not available. self.scaled = (ticks / 10.0) if self.available else float('nan') def __str__(self): return '{:.1f}'.format(self.scaled) if self.available else 'N/A' class Sen5xDeviceStatus: """ Represents a SEN5x device status response. With the :py:attr:`value` attribute you can access the raw value as received from the device. The convenience attribute :py:attr:`flags` allows you to get all set flags as strings. In addition, each flag is provided as separate bool attribute. """ def __init__(self, value): """ Creates an instance from the received raw data. :param int value: The raw device status value as received from the device. """ super(Sen5xDeviceStatus, self).__init__() #: The value (int) as received from the device. self.value = value #: All currently set flags as a list of flag names, i.e. list(str) self.flags = [] #: Flag (bool) whether a fan error occurred. self.fan_error = self._add(4, 'fan_error') #: Flag (bool) whether a laser error occurred. self.laser_error = self._add(5, 'laser_error') #: Flag (bool) whether an SHT error occurred. self.sht_error = self._add(6, 'sht_error') #: Flag (bool) whether an SGP error occurred. self.sgp_error = self._add(7, 'sgp_error') #: Flag (bool) whether the fan cleaning is currently active. self.fan_cleaning = self._add(19, 'fan_cleaning') #: Flag (bool) whether the fan speed is currently out of specs. self.fan_speed_out_of_specs = self._add(21, 'fan_speed_out_of_specs') def _add(self, index, name): is_set = (self.value & (1 << index)) != 0 if is_set: self.flags.append(name) return is_set def __str__(self): return "0x{:08X} [{}]".format( self.value, ', '.join(self.flags) if len(self.flags) else 'OK') class Sen5xFirmwareVersion: """ Class representing the firmware version of a device. """ def __init__(self, major, minor, debug): """ Constructor. :param byte major: Major version. :param byte minor: Minor version. :param bool debug: Debug flag (False for official releases). """ super(Sen5xFirmwareVersion, self).__init__() self.major = major self.minor = minor self.debug = debug def __str__(self): return '{}.{}{}'.format(self.major, self.minor, self.debug and '-debug' or '') class Sen5xHardwareVersion: """ Class representing the hardware version of a device. """ def __init__(self, major, minor): """ Constructor. :param byte major: Major version. :param byte minor: Minor version. """ super(Sen5xHardwareVersion, self).__init__() self.major = major self.minor = minor def __str__(self): return '{}.{}'.format(self.major, self.minor) class Sen5xProtocolVersion: """ Class representing the I2C protocol version of an I2C device. """ def __init__(self, major, minor): """ Constructor. :param byte major: Major version. :param byte minor: Minor version. """ super(Sen5xProtocolVersion, self).__init__() self.major = major self.minor = minor def __str__(self): return '{}.{}'.format(self.major, self.minor) class Sen5xVersion: """ Class representing all version numbers of an I2C device. This is used for the "Get Version" command. """ def __init__(self, firmware, hardware, protocol): """ Constructor. :param ~sensirion_i2c_sen5x.response_types.Sen5xFirmwareVersion firmware: Firmware version. :param ~sensirion_i2c_sen5x.response_types.Sen5xHardwareVersion hardware: Hardware version. :param ~sensirion_i2c_sen5x.response_types.Sen5xProtocolVersion protocol: SHDLC protocol version. """ super(Sen5xVersion, self).__init__() self.firmware = firmware self.hardware = hardware self.protocol = protocol def __str__(self): return 'Firmware {}, Hardware {}, Protocol {}'.format( self.firmware, self.hardware, self.protocol )
<filename>code/gym_envs/gym_envs/jaco_env/reaching.py import os import copy from gym import spaces import numpy as np import pybullet as p from .env import RobotEnv from .env_description import ObservationShapes, ActionShapes, RewardFunctions class ReachingEnv(RobotEnv): def __init__( self, random_position, random_orientation, moving_target, target_type, goal_oriented, obstacle, obs_type, reward_type, action_type, alpha_reward, action_amplitude, observation_amplitude, robot_gains): super(ReachingEnv, self).__init__( action_amp=action_amplitude, obs_amp=observation_amplitude, frame_skip=5, time_step=0.02, action_robot_len=7, obs_robot_len=17) self.random_position = random_position self.random_orientation = random_orientation self.moving_target = moving_target self.target_type = target_type self.goal_oriented = goal_oriented # Not implemented yet self.obstacle = obstacle self.obs_type = obs_type # Not implemented yet self.reward_type = reward_type self.action_type = action_type # Not implemented yet self.alpha_reward = alpha_reward self.robot_gains = robot_gains self.robot_forces = 1.0 self.task_success_threshold = 0.03 self.fixed_goal_coord = np.array([0.7, 0.0, 1.0]) self.obstacle_pos = np.array([0.6, 0.0, 1.0]) self.obstacle_orient = np.array([0, np.pi/2, 0]) self.dist = 0 self.old_dist = 0 self.orient = 0 self.old_orient = 0 self.term1 = 0 self.term2 = 0 self.delta_pos = 0 self.delta_dist = 0 self.target_speed = 0.01 self.endeffector_pos = np.zeros(3) self.old_endeffector_pos = np.zeros(3) self.endeffector_orient = np.zeros(4) # this is a quaternion! self.old_endeffector_orient = np.zeros(4) # this is a quaternion! self.torso_pos = np.zeros(3) self.torso_orient = np.zeros(4) self.end_torso_pos = np.zeros(3) self.end_goal_pos = np.zeros(3) self.end_torso_orient = np.zeros(4) self.end_goal_orient = np.zeros(4) self.delta_orient = np.zeros(3) self.delta_endeff_orient = np.zeros(3) self.goal_pos = np.zeros(3) self.goal_orient = np.zeros(3) self.target_object_orient = np.zeros(3) self.joint_positions = np.zeros(7) self.new_joint_positions = np.zeros(7) self.pybullet_action_min = - np.array([self.robot_gains]*self.action_robot_len) self.pybullet_action_max = np.array([self.robot_gains]*self.action_robot_len) def step(self, action): # get distance and end effector position before taking the action self.old_dist = np.linalg.norm(self.endeffector_pos - self.goal_pos) self.old_endeffector_pos = self.endeffector_pos self.old_orient = np.linalg.norm(self.endeffector_orient - self.goal_orient) self.old_endeffector_orient = self.endeffector_orient # Update target position and move the target object if self.moving_target: self.goal_pos[1] += self.target_speed p.resetBasePositionAndOrientation( self.target_object, self.goal_pos, p.getQuaternionFromEuler(self.target_object_orient)) # Execute action self.take_step( action=action, gains=self.robot_gains, forces=self.robot_forces, indices=self.robot_arm_joint_indices, upper_limit=self.robot_upper_limits, lower_limit=self.robot_lower_limits, robot=self.robot) # Get observations self._get_general_obs() obs = self.get_obs1() # get distance and orientation self.dist = np.linalg.norm(self.endeffector_pos - self.goal_pos) self.orient = np.linalg.norm(self.endeffector_orient - self.goal_orient) # get reward # reward = - self.dist self.reward_function = RewardFunctions( self.dist, self.alpha_reward, action, self.delta_dist, self.delta_pos, self.orient, self._detect_collision() ) if self.reward_type == 1: reward = self.reward_function.get_reward1() elif self.reward_type == 2: reward = self.reward_function.get_reward2() elif self.reward_type == 3: reward = self.reward_function.get_reward3() elif self.reward_type == 4: reward = self.reward_function.get_reward4() elif self.reward_type == 5: reward = self.reward_function.get_reward5() elif self.reward_type == 6: reward = self.reward_function.get_reward6() elif self.reward_type == 7: reward = self.reward_function.get_reward7() elif self.reward_type == 8: reward = self.reward_function.get_reward8() elif self.reward_type == 9: reward = self.reward_function.get_reward9() elif self.reward_type == 10: reward = self.reward_function.get_reward10() elif self.reward_type == 11: reward = self.reward_function.get_reward11() elif self.reward_type == 12: reward = self.reward_function.get_reward12() elif self.reward_type == 13: reward = self.reward_function.get_reward13() elif self.reward_type == 14: reward = self.reward_function.get_reward14() elif self.reward_type == 15: reward = self.reward_function.get_reward15() elif self.reward_type == 16: reward = self.reward_function.get_reward16() elif self.reward_type == 17: reward = self.reward_function.get_reward17() elif self.reward_type == 18: reward = self.reward_function.get_reward18() elif self.reward_type == 19: reward = self.reward_function.get_reward19() elif self.reward_type == 20: reward = self.reward_function.get_reward20() elif self.reward_type == 21: reward = self.reward_function.get_reward21() elif self.reward_type == 22: reward = self.reward_function.get_reward22() elif self.reward_type == 23: reward = self.reward_function.get_reward23() elif self.reward_type == 24: reward = self.reward_function.get_reward24() elif self.reward_type == 25: reward = self.reward_function.get_reward25() elif self.reward_type == 26: reward = self.reward_function.get_reward26() elif self.reward_type == 27: reward = self.reward_function.get_reward27() # get info self.delta_dist = self.old_dist - self.dist self.delta_pos = np.linalg.norm(self.old_endeffector_pos - self.endeffector_pos) self.delta_orient = self.old_orient - self.orient self.delta_endeff_orient = np.linalg.norm(self.old_endeffector_orient - self.endeffector_orient) info = {} info['task_success'] = int(self.dist <= self.task_success_threshold) info['action_robot_len'] = self.action_robot_len info['obs_robot_len'] = self.obs_robot_len info['distance'] = self.dist info['goal_pos'] = self.goal_pos info['endeffector_pos'] = self.endeffector_pos info['orientation'] = self.orient info['goal_orient'] = self.goal_orient info['endeffector_orient'] = self.endeffector_orient info['joint_pos'] = self.joint_positions info['joint_vel'] = self.joint_vel info['joint_tor'] = self.joint_torques info['desired_joint_pos'] = self.robot_joint_positions info['joint_min'] = self.robot_lower_limits info['joint_max'] = self.robot_upper_limits info['term1'] = self.term1 info['term2'] = self.term2 info['action'] = action info['action_min'] = self.action_space.low info['action_max'] = self.action_space.high info['pybullet_action'] = self.action_robot info['pybullet_action_min'] = self.pybullet_action_min info['pybullet_action_max'] = self.pybullet_action_max # # According to the Pybullet documentation, 1 timestep = 240 Hz info['vel_dist'] = self.delta_dist / self.time_step info['vel_pos'] = self.delta_pos / self.time_step info['collision'] = self._detect_collision() # get done done = False return obs, reward, done, info def _detect_collision(self): """ Detect any collision with the arm (require physics enabled) """ if len(p.getContactPoints(self.robot)) > 0: return True else: return False def _get_general_obs(self): """ Get information for generating observation array """ self.endeffector_pos = self._get_end_effector_position() self.endeffector_orient = self._get_end_effector_orientation() self.torso_pos = self._get_torso_position() self.torso_orient = self._get_torso_orientation() self.joint_positions, self.joint_vel, self.joint_rf, self.joint_torques = self._get_joint_info() self.end_torso_pos = self.endeffector_pos - self.torso_pos self.end_goal_pos = self.endeffector_pos - self.goal_pos self.end_torso_orient = self.endeffector_orient - self.torso_orient self.end_goal_orient = self.endeffector_orient - self.goal_orient def _get_joint_info(self): """ Return current joint positions, velocities, reaction forces and torques """ info = p.getJointStates( self.robot, jointIndices=self.robot_arm_joint_indices, physicsClientId=self.id) pos, vel, rf, t = [], [], [], [] for joint_info in info: pos.append(joint_info[0]) vel.append(joint_info[1]) rf.append(joint_info[2]) t.append(joint_info[3]) return np.array(pos), np.array(vel), np.array(rf), np.array(t) def _get_end_effector_position(self): """ Get end effector coordinates """ return np.array(p.getLinkState( self.robot, linkIndex=8, computeForwardKinematics=True, physicsClientId=self.id) [0]) def _get_end_effector_orientation(self): """ Get end effector orientation """ orient_quat = p.getLinkState( self.robot, linkIndex=8, computeForwardKinematics=True, physicsClientId=self.id)[1] # orient_euler = p.getEulerFromQuaternion(orient_quat) return np.array(orient_quat) def _get_torso_position(self): """ Get torso coordinates """ return np.array(p.getLinkState( self.robot, linkIndex=0, computeForwardKinematics=True, physicsClientId=self.id) [0]) def _get_torso_orientation(self): """ Get torso orientation """ orient_quat = p.getLinkState( self.robot, linkIndex=0, computeForwardKinematics=True, physicsClientId=self.id)[1] # orient_euler = p.getEulerFromQuaternion(orient_quat) return np.array(orient_quat) def get_obs1(self): robot_obs = np.concatenate([self.end_torso_pos, self.end_goal_pos, self.joint_positions, self.endeffector_orient]).ravel() return robot_obs def reset(self): self.setup_timing() self.task_success = 0 self.contact_points_on_arm = {} self.robot, self.robot_lower_limits, self.robot_upper_limits, self.robot_arm_joint_indices = self.world_creation.create_new_world(print_joints=False) self.robot_lower_limits = self.robot_lower_limits[self.robot_arm_joint_indices] self.robot_upper_limits = self.robot_upper_limits[self.robot_arm_joint_indices] self.reset_robot_joints(robot=self.robot) # Disable gravity # p.setGravity(0, 0, -9.81, physicsClientId=self.id) p.setGravity(0, 0, 0, physicsClientId=self.id) # p.setGravity(0, 0, 0, body=self.robot, physicsClientId=self.id) # Initialise goal position if self.random_position: self.goal_pos = self.sample_random_position() else: # deepcopy is necessary to avoid changing the value of fixed_goal_coord self.goal_pos = copy.deepcopy(self.fixed_goal_coord) # Initialise goal orientation if self.random_orientation: self.goal_orient = self.sample_random_orientation() else: self.goal_orient = p.getQuaternionFromEuler( np.array([0, np.pi/2.0, 0]), physicsClientId=self.id) # Spawn target object path = os.path.abspath(os.path.dirname(__file__)) if self.target_type == "arrow": self.target_object = p.loadURDF( os.path.join( path, "assets/URDFs/arrow.urdf"), useFixedBase=True) elif self.target_type == "sphere": self.target_object = p.loadURDF( os.path.join( path, "assets/URDFs/sphere.urdf"), useFixedBase=True) p.resetBasePositionAndOrientation( self.target_object, self.goal_pos, p.getQuaternionFromEuler(self.target_object_orient)) # Spawn obstacle if self.obstacle == "circular_window": self.obstacle_object = p.loadURDF( os.path.join( path, "assets/URDFs/circular_window.urdf"), useFixedBase=True) elif self.obstacle == "circular_window_small": self.obstacle_object = p.loadURDF( os.path.join( path, "assets/URDFs/circular_window_small_vhacd.urdf"), useFixedBase=True) if self.obstacle is not None: p.resetBasePositionAndOrientation( self.obstacle_object, self.obstacle_pos, p.getQuaternionFromEuler(self.obstacle_orient)) # OLD IMPLEMENTATION # sphere_collision = -1 # sphere_visual = p.createVisualShape(shapeType=p.GEOM_SPHERE, radius=0.03, rgbaColor=[0, 1, 0, 1], physicsClientId=self.id) # self.target = p.createMultiBody( # baseMass=0.0, # baseCollisionShapeIndex=sphere_collision, # baseVisualShapeIndex=sphere_visual, # basePosition=self.goal_pos, # useMaximalCoordinates=False, # physicsClientId=self.id) # Jaco _, _ , _ = self.position_robot_toc( robot=self.robot, joints=8, joint_indices=self.robot_arm_joint_indices, lower_limits=self.robot_lower_limits, upper_limits=self.robot_upper_limits, pos_offset=np.array([0, 0, 0.6])) self.world_creation.set_gripper_open_position(self.robot, position=1.1, set_instantly=True) # load tool self.tool = self.world_creation.init_tool( self.robot, pos_offset=[-0.01, 0, 0.03], orient_offset=p.getQuaternionFromEuler([0, -np.pi/2.0, 0], physicsClientId=self.id)) # Load a nightstand in the environment for the jaco arm self.nightstand_scale = 0.275 visual_filename = os.path.join(self.world_creation.directory, 'nightstand', 'nightstand.obj') collision_filename = os.path.join(self.world_creation.directory, 'nightstand', 'nightstand.obj') nightstand_visual = p.createVisualShape( shapeType=p.GEOM_MESH, fileName=visual_filename, meshScale=[self.nightstand_scale]*3, rgbaColor=[0.5, 0.5, 0.5, 1.0], physicsClientId=self.id) nightstand_collision = p.createCollisionShape( shapeType=p.GEOM_MESH, fileName=collision_filename, meshScale=[self.nightstand_scale]*3, physicsClientId=self.id) nightstand_pos = np.array([0, 0, 0]) nightstand_orient = p.getQuaternionFromEuler( np.array([np.pi/2.0, 0, 0]), physicsClientId=self.id) self.nightstand = p.createMultiBody( baseMass=0, baseCollisionShapeIndex=nightstand_collision, baseVisualShapeIndex=nightstand_visual, basePosition=nightstand_pos, baseOrientation=nightstand_orient, baseInertialFramePosition=[0, 0, 0], useMaximalCoordinates=False, physicsClientId=self.id) # Enable rendering p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1, physicsClientId=self.id) return self.get_obs1() def sample_random_position(self): # Assuming that the max reach of the Jaco is 0.7, # the target is generated randomly inside the inscribed cube of the reaching sphere goal_pos = np.array([0, 0, 0.95]) + np.array([ self.np_random.uniform(-0.7*np.cos(np.pi/4), 0.7*np.cos(np.pi/4)), self.np_random.uniform(-0.7*np.cos(np.pi/4), 0.7*np.cos(np.pi/4)), self.np_random.uniform(0, 0.7*np.cos(np.pi/4))]) return goal_pos def sample_random_orientation(self): """ Sample random target orientation """ MIN_GOAL_ORIENTATION = np.array([-np.pi, np.pi/2.0, 0.0]) MAX_GOAL_ORIENTATION = np.array([np.pi, np.pi/2.0, 0.0]) euler_orient = np.random.uniform(low=MIN_GOAL_ORIENTATION, high=MAX_GOAL_ORIENTATION) return p.getQuaternionFromEuler(euler_orient, physicsClientId=self.id)
# Copyright 2015 <NAME> # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import types import mock from cloudkittyclient import client from cloudkittyclient.tests import fakes from cloudkittyclient.tests import utils from cloudkittyclient.v1 import client as v1client FAKE_ENV = { 'username': 'username', 'password': 'password', 'tenant_name': 'tenant_name', 'auth_url': 'http://no.where', 'os_endpoint': 'http://no.where', 'auth_plugin': 'fake_auth', 'token': '<PASSWORD>', 'user_domain_name': 'default', 'project_domain_name': 'default', } class ClientTest(utils.BaseTestCase): @staticmethod def create_client(env, api_version=1, endpoint=None, exclude=[]): env = dict((k, v) for k, v in env.items() if k not in exclude) return client.get_client(api_version, **env) def setUp(self): super(ClientTest, self).setUp() def test_client_version(self): c1 = self.create_client(env=FAKE_ENV, api_version=1) self.assertIsInstance(c1, v1client.Client) def test_client_auth_lambda(self): env = FAKE_ENV.copy() env['token'] = lambda: env['token'] self.assertIsInstance(env['token'], types.FunctionType) c1 = self.create_client(env) self.assertIsInstance(c1, v1client.Client) def test_client_auth_non_lambda(self): env = FAKE_ENV.copy() env['token'] = "<PASSWORD>" self.assertIsInstance(env['token'], str) c1 = self.create_client(env) self.assertIsInstance(c1, v1client.Client) @mock.patch('keystoneclient.v2_0.client', fakes.FakeKeystone) def test_client_without_auth_plugin(self): env = FAKE_ENV.copy() del env['auth_plugin'] c = self.create_client(env, api_version=1, endpoint='fake_endpoint') self.assertIsInstance(c.auth_plugin, client.AuthPlugin) def test_client_without_auth_plugin_keystone_v3(self): env = FAKE_ENV.copy() del env['auth_plugin'] expected = { 'username': 'username', 'endpoint': 'http://no.where', 'tenant_name': 'tenant_name', 'service_type': None, 'token': '<PASSWORD>', 'endpoint_type': None, 'auth_url': 'http://no.where', 'tenant_id': None, 'cacert': None, 'password': 'password', 'user_domain_name': 'default', 'user_domain_id': None, 'project_domain_name': 'default', 'project_domain_id': None, } with mock.patch('cloudkittyclient.client.AuthPlugin') as auth_plugin: self.create_client(env, api_version=1) auth_plugin.assert_called_with(**expected) def test_client_with_auth_plugin(self): c = self.create_client(FAKE_ENV, api_version=1) self.assertIsInstance(c.auth_plugin, str) def test_v1_client_timeout_invalid_value(self): env = FAKE_ENV.copy() env['timeout'] = 'abc' self.assertRaises(ValueError, self.create_client, env) env['timeout'] = '1.5' self.assertRaises(ValueError, self.create_client, env) def _test_v1_client_timeout_integer(self, timeout, expected_value): env = FAKE_ENV.copy() env['timeout'] = timeout expected = { 'auth_plugin': 'fake_auth', 'timeout': expected_value, 'original_ip': None, 'http': None, 'region_name': None, 'verify': True, 'timings': None, 'keyring_saver': None, 'cert': None, 'endpoint_type': None, 'user_agent': None, 'debug': None, } cls = 'cloudkittyclient.openstack.common.apiclient.client.HTTPClient' with mock.patch(cls) as mocked: self.create_client(env) mocked.assert_called_with(**expected) def test_v1_client_timeout_zero(self): self._test_v1_client_timeout_integer(0, None) def test_v1_client_timeout_valid_value(self): self._test_v1_client_timeout_integer(30, 30) def test_v1_client_cacert_in_verify(self): env = FAKE_ENV.copy() env['cacert'] = '/path/to/cacert' client = self.create_client(env) self.assertEqual('/path/to/cacert', client.client.verify) def test_v1_client_certfile_and_keyfile(self): env = FAKE_ENV.copy() env['cert_file'] = '/path/to/cert' env['key_file'] = '/path/to/keycert' client = self.create_client(env) self.assertEqual(('/path/to/cert', '/path/to/keycert'), client.client.cert)
# Pomito - Pomodoro timer in steroids # A simple console UI plugin import cmd import logging import click from pomito.plugins import ui # pylint: disable=invalid-name logger = logging.getLogger("pomito.plugins.ui.console") _POMODORO_SERVICE = None def _get_pomodoro_service(): """Gets pomodoro service.""" if _POMODORO_SERVICE is None: raise RuntimeError("Console.pomodoro_service is not initialized.") return _POMODORO_SERVICE def _set_pomodoro_service(pomodoro_service): """Sets pomodoro service.""" # pylint: disable=global-statement global _POMODORO_SERVICE _POMODORO_SERVICE = pomodoro_service @click.group() def pomito_shell(): """Command group for pomito interactive shell.""" pass @pomito_shell.command("start") @click.argument('task_id', type=int) def _pomito_start(task_id): """Starts a pomito session.""" pomodoro_service = _get_pomodoro_service() task = pomodoro_service.get_task_by_id(task_id) pomodoro_service.start_session(task) @pomito_shell.command("stop") def _pomito_stop(): """Stops a pomito session.""" pomodoro_service = _get_pomodoro_service() pomodoro_service.stop_session() @pomito_shell.command("list") @click.argument('task_filter', type=str, required=False) def _pomito_list(task_filter=None): """Lists available tasks.""" pomodoro_service = _get_pomodoro_service() tasks = pomodoro_service.get_tasks_by_filter(task_filter) count = 0 for t in tasks: if task_filter == None and count > 10: click.echo("\nShowing first 10 tasks, use `list *`"\ + "to show all tasks.") break click.echo(t) count += 1 @pomito_shell.command("quit") @click.pass_context def _pomito_quit(ctx): """Quit pomito shell.""" click.echo("Good bye!") ctx.exit(1) @pomito_shell.command("EOF") @click.pass_context def __pomito_eof(ctx): """Quit pomito shell for ^D.""" click.echo("Good bye!") ctx.exit(1) class Console(ui.UIPlugin, cmd.Cmd): """Interactive shell for pomito app.""" intro = "Welcome to Pomito shell.\n\ Type 'help' or '?' to list available commands." # TODO should the prompt be two lines # [<timer> <Task>] # > prompt = "pomito> " def __init__(self, pomodoro_service): self._stop_signalled = False self._message_queue = [] _set_pomodoro_service(pomodoro_service) cmd.Cmd.__init__(self) def initialize(self): pass def do_parse(self, args): """Parse pomito shell commands.""" try: pomito_shell.main(args=args.split()) except SystemExit as e: if e.code == 1: self._stop_signalled = True return def completenames(self, text, *ignored): import sys cmdname = "_pomito_" + text return [c[8:] for c in dir(sys.modules[__name__])\ if c.startswith(cmdname)] def precmd(self, line): return "parse {0}".format(line) def postcmd(self, stop, line): return self._stop_signalled def _print_message(self, msg): print(msg) return def notify_session_started(self): self._print_message("Pomodoro session started.") self._message_queue.append("Pomodoro session started.") return def notify_session_end(self, reason): self._print_message("Pomodoro session ended.") self._message_queue.append("Pomodoro session ended.") return def notify_break_started(self, break_type): self._print_message("Pomodoro break started: {0}".format(break_type)) self._message_queue.append("Pomodoro break started: {0}".format(break_type)) return def notify_break_end(self, reason): self._print_message("Pomodoro break ended: {0}".format(reason)) self._message_queue.append("Pomodoro break ended: {0}".format(reason)) return def run(self): if len(self._message_queue) > 0: print("-- msg: {0}".format(self._message_queue.pop())) try: self.cmdloop() except KeyboardInterrupt: self._print_message("Got keyboard interrupt.") return
<filename>ui/CounterfactualInterface/CounterfactualInferfaceWorker.py # Author: <NAME> # this class handles to run the counterfactual generation # it is needed because this process takes time enough to freeze the interface, # so, this class is used to be instantiated in another thread from .CounterfactualInterfaceEnums import CounterfactualInterfaceEnums from CounterFactualParameters import FeatureType from CounterFactualParameters import BinaryDecisionVariables, TreeConstraintsType from RandomForestCounterFactual import RandomForestCounterFactualMilp import numpy as np from PyQt5.QtCore import QObject, pyqtSignal class CounterfactualInferfaceWorker(QObject): progress = pyqtSignal(str) couterfactualClass = pyqtSignal(str) tableCounterfactualValues = pyqtSignal(object) finished = pyqtSignal() def __init__(self, controller): super().__init__() import sys modulename = '.CounterfactualInterfaceController' if modulename not in sys.modules: from .CounterfactualInterfaceController import CounterfactualInterfaceController assert isinstance(controller, CounterfactualInterfaceController) self.__controller = controller def run(self): # showing the steps self.progress.emit(CounterfactualInterfaceEnums.Status.STEP3.value) # instantiating the optimization model randomForestMilp = RandomForestCounterFactualMilp(self.__controller.randomForestClassifier, [self.__controller.transformedChosenDataPoint], 1-self.__controller.predictedOriginalClass[0], isolationForest=self.__controller.isolationForest, constraintsType=TreeConstraintsType.LinearCombinationOfPlanes, objectiveNorm=0, mutuallyExclusivePlanesCutsActivated=True, strictCounterFactual=True, verbose=True, binaryDecisionVariables=BinaryDecisionVariables.PathFlow_y, featuresActionnability=self.__controller.model.transformedFeaturesActionability, featuresType=self.__controller.model.transformedFeaturesType, featuresPossibleValues=self.__controller.model.transformedFeaturesPossibleValues) randomForestMilp.buildModel() # adding the user constraints over the optimization model constraintIndex = 0 for feature in self.__controller.model.features: if feature != 'Class': if self.__controller.model.featuresType[feature] is FeatureType.Binary: notAllowedValue = self.__controller.featuresConstraints[feature]['notAllowedValue'] if notAllowedValue == self.__controller.model.featuresInformations[feature]['value0']: randomForestMilp.model.addConstr(randomForestMilp.x_var_sol[constraintIndex] == 1, notAllowedValue+' not allowed') elif notAllowedValue == self.__controller.model.featuresInformations[feature]['value1']: randomForestMilp.model.addConstr(randomForestMilp.x_var_sol[constraintIndex] == 0, notAllowedValue+' not allowed') constraintIndex += 1 elif self.__controller.model.featuresType[feature] is FeatureType.Discrete or self.__controller.model.featuresType[feature] is FeatureType.Numeric: selectedMinimum = self.__controller.featuresConstraints[feature]['selectedMinimum'] selectedMaximum = self.__controller.featuresConstraints[feature]['selectedMaximum'] randomForestMilp.model.addConstr(randomForestMilp.x_var_sol[constraintIndex] >= selectedMinimum, feature+' minimum constraint') randomForestMilp.model.addConstr(randomForestMilp.x_var_sol[constraintIndex] <= selectedMaximum, feature+' maximum constraint') constraintIndex += 1 elif self.__controller.model.featuresType[feature] is FeatureType.Categorical: notAllowedValues = self.__controller.featuresConstraints[feature]['notAllowedValues'] for value in self.__controller.model.featuresInformations[feature]['possibleValues']: if value in notAllowedValues: randomForestMilp.model.addConstr(randomForestMilp.x_var_sol[constraintIndex] == 0, feature+'_'+value+' not allowed') constraintIndex += 1 randomForestMilp.solveModel() counterfactualResult = randomForestMilp.x_sol if (np.array(counterfactualResult) == np.array([self.__controller.transformedChosenDataPoint])).all(): self.progress.emit('Model is infeasible') elif counterfactualResult is not None: counterfactualResultClass = self.__controller.randomForestClassifier.predict(counterfactualResult) result = self.__controller.model.invertTransformedDataPoint(counterfactualResult[0]) counterfactualComparison = [] for index, feature in enumerate(self.__controller.model.features): if feature != 'Class': item1 = self.__controller.chosenDataPoint[index] item2 = result[index] if isinstance(item2, float): item1 = float(item1) counterfactualComparison.append([feature, str(item1), str(item2)]) # showing the steps self.progress.emit(CounterfactualInterfaceEnums.Status.STEP4.value) # showing the counterfactual class self.couterfactualClass.emit(str(counterfactualResultClass[0])) # showing the steps self.progress.emit(CounterfactualInterfaceEnums.Status.STEP5.value) # showing the comparisson between the selected and the counterfactual values self.tableCounterfactualValues.emit(counterfactualComparison) else: # showing the steps self.progress.emit(CounterfactualInterfaceEnums.Status.ERROR_MSG.value) self.finished.emit()
<filename>Movement/config.py<gh_stars>1-10 ''' Copyright HiWonder LewanSoul Bus Servo Communication Protocol 1.Summary Using an asynchronous serial communication bus, theoretically, up to 253 robot Bus Servos can be daisy chain connected into the bus, you can control them individually through the UART asynchronous serial interfaces. Each Bus Servo can be set to a different node (ID) address, and multiple Bus Servos can be grouped or controlled individually. Communicating with the user's host computer software (micro controller or PC) through the asynchronous serial interface, you can set the Bus Servo's parameters, control functions and movement. Sending instructions through the asynchronous serial interface, the Bus Servo can be set to motor control mode (continuous rotation) or position control mode (limited angle servo). In motor control mode, the Bus Servo can be used as a DC reduction motor with adjustable speed; In position control mode, the Bus Servo has 240° of rotation range with plus ± 30° adjustable offset available, with high precise positional control & adjustable speed. Parameters can be read back from the Bus Servo including servo position, input voltage, internal temperature. Any half-duplex UART asynchronous serial interface which conforms to the protocol can communicate with the Bus Servos and control the Bus Servos in a variety of ways. 2.UART Interface The Bus Servo uses program code to perform the timing control to the UART asynchronous serial interface, achieving the half-duplex asynchronous serial bus communication, the communication baud rate is 115200bps, and the interface is simple, the protocol is equally simple. In the users own controller design, the UART interface for communication with the Bus Servo must be handled as shown below. 3.Command Packet Command packet format Table 1: Header | ID number | Data Length | Command | Parameters | Checksum 0x55 0x55 | ID | Length | Cmd | Prm 1... Prm N | Checksum Header: Two consecutive 0x55 are received, indicating the arrival of a data packet. ID:Each Bus Servo has an ID number. ID numbers range from 0 ~ 253, converted to hexadecimal 0x00 ~ 0xFD. Additionally, there is a broadcast ID: ID No. 254 (0xFE). If the ID number transmitted by the controller is 254 (0xFE), all Bus Servos will receive the instruction, but they do not perform the command nor return a response message, (except in the case of reading the Bus Servo ID number, in that case the servo returns it's servo ID. In this case, only 1 servo can be attached to the bus at once. Please refer to the following instructions for details) to prevent bus conflict. Length(data): Equal to the length of the data to be sent (including its own one byte). That is, the length of the data plus 3 is equal to the length of this command packet, including the header and checksum. Command: The numeric control code to control the various instructions of the Bus Servo, such as parameter setting, position, speed control, reading back parameters, etc. Parameters: In addition to commands, parameters are control information that needs to be added. Checksum: The calculation method is as follows: Checksum = ~(ID + Length + Cmd + Prm1 +...PrmN) If any of the numbers in the brackets are calculated and exceeded 255, then use the least significant byte. "~" means Negation. 4.Command type There are two kinds of commands, write commands and read commands. Write commands: are normally followed by parameters. Transmit the parameters of the function to the Bus Servo to complete the corresponding action. Read commands: are normally, not followed by parameters, when the Bus Servo receives a read command, it will return the corresponding parameters immediately, the returned command value is the same as the read command value that was sent to the Bus Servo. So the host software must immediately prepare to change itself to “read mode” as soon as it sends a read command. The following table contains a list of the available commands for the Bus Servos. Specific details on each command follow the table. Table 2: Command name Command Length ''' BS_MOVE_TIME_WRITE = 1 # 7 BS_MOVE_TIME_READ = 2 # 3 # No commands from 3 - 6 BS_MOVE_TIME_WAIT_WRITE = 7 # 7 BS_MOVE_TIME_WAIT_READ = 8 # 3 # No commands 9 or 10 BS_MOVE_START = 11 # 3 BS_MOVE_STOP = 12 # 3 # BS_ID_WRITE = 13 # 4 Too dangerous to be included # BS_ID_READ = 14 # 3 Useless in a multi servo environment # No commands 15 or 16 BS_ANGLE_OFFSET_ADJUST = 17 # 4 BS_ANGLE_OFFSET_WRITE = 18 # 3 BS_ANGLE_OFFSET_READ = 19 # 3 BS_ANGLE_LIMIT_WRITE = 20 # 7 BS_ANGLE_LIMIT_READ = 21 # 3 BS_VIN_LIMIT_WRITE = 22 # 7 BS_VIN_LIMIT_READ = 23 # 3 BS_TEMP_LIMIT_WRITE = 24 # 4 BS_TEMP_LIMIT_READ = 25 # 3 BS_TEMP_READ = 26 # 3 BS_VIN_READ = 27 # 3 BS_POS_READ = 28 # 3 BS_MOTOR_MODE_WRITE = 29 # 7 BS_MOTOR_MODE_READ = 30 # 3 BS_LOAD_MODE_WRITE = 31 # 4 BS_LOAD_MODE_READ = 32 # 3 BS_LED_CTRL_WRITE = 33 # 4 BS_LED_CTRL_READ = 34 # 3 BS_LED_ERROR_WRITE = 35 # 4 BS_LED_ERROR_READ = 36 # 3 BS_num_params = (2,0,-1,-1,-1,-1,2,0,-1,-1,0,0,-1,-1,-1,-1,1,0,0,2,0,2,0,1,0,0,0,0,2,0,1,0,1,0,1,0) ''' Command name: For easy identification, the user can also set their own according to their preference. Command name suffix "_WRITE" which represents write command, and the suffix "_READ" represents read command. Command value: The command Cmd value specified in command packet of Table 1 Length: The length of the expected command packet (data length) in table 1 Detailed individual command instructions 1: Command name: BS_MOVE_TIME_WRITE. Command value: 1 Length: 7 When this command is sent to the Bus Servo, the Bus Servo will be rotated from its current angle to a new angle specified by parameter 1 and 2, at uniform speed, to arrive within the period specified by the interval in parameter 3 and 4. After the command reaches the Bus Servo, the Bus Servo will rotate immediately. Parameter 1: lower 8 bits of the new angle value. Parameter 2: higher 8 bits of the new angle value. range 0~1000. Corresponding to Bus Servo angles of 0 ~ 240 °, this means the minimum angle the Bus Servo can be varied by is 0.24 degree. Parameter 3: lower 8 bits of the time interval value. Parameter 4: higher 8 bits of the time interval value. the range of time is 0~30000ms. The Bus Servo's have a maximum speed of rotation of 0.20sec/60°(@ VIn 11.1V). 2. Command name: BS_MOVE_TIME_READ. Command value: 2 Length: 3 When this command is sent to the Bus Servo, the Bus Servo will return the angle and time value which was last sent by BS_MOVE_TIME_WRITE to the Bus Servo. For details of the command packet that the Bus Servo returns to the host computer, please refer to the description in Table 4 below. 3-6. There are no Bus Servo commands with codes between 3 and 6. 7. Command name: BS_MOVE_TIME_WAIT_WRITE. Command value: 7 Length : 7 When this command is sent to the Bus Servo, the function is similar to the BS_MOVE_TIME_WRITE command. However, the Bus Servo will not immediately rotate when the command arrives at the Bus Servo, the Bus Servo's rotation will be triggered by BS_MOVE_START (command code 11) command being sent to Bus Servo (command value 11), then the Bus Servo will be rotated from the current angle to the specified angle at uniform speed to arrive within the specified time interval. Parameter 1: lower 8 bits of the new angle value. Parameter 2: higher 8 bits of the new angle value. Range 0~1000. Corresponding to Bus Servo angles of 0 ~ 240 °, this means the minimum angle the Bus Servo can be varied by is 0.24 degree. Parameter 3: lower 8 bits of the time interval value. Parameter 4: higher 8 bits of the time interval value. Range 0~30000ms. The Bus Servo's have a maximum speed of rotation of 0.20sec/60°(@ VIn 11.1V). 8. Command name: BS_MOVE_TIME_WAIT_READ. Command value: 8 Length: 3 When this command is sent to the Bus Servo, the Bus Servo returns the preset angle and preset time value which were sent last by the BS_MOVE_TIME_WAIT_WRITE command to the Bus Servo. For details of the command packet that the Bus Servo returns to the host computer, please refer to the description in Table 4 below. 9-10. There are no Bus Servo commands with codes between 9 or 10. 11. Command name: BS_MOVE_START. Command value: 11 Length: 3 When this command is sent to the Bus Servo, it triggers the movement defined in the last BS_MOVE_TIME_WAIT_WRITE (command code 7) command transmitted to the Bus Servo. 12. Command name: BS_MOVE_STOP. Command value: 12 Length: 3 When this command is sent to the Bus Servo, it will stop rotating immediately. 13. Command name: BS_ID_WRITE. Command value: 13 Length: 4 When this command is sent to the Bus Servo, it changes the ID number of the Bus Servo. Care should be taken as it is possible to assign an ID number already assigned to another Bus Servo attached to the UART. In this case, from this point forward, both of Bus Servos now assigned to that ID number will respond to commands intended for either of te Bus Servos. Any attempts to reassign the Bus Servo back to it's original ID number will ALSO affect both Bus Servo's too. YOU SEE THE PROBLEM HERE, RIGHT? The New ID will be written to non-volatile memory and survive powering-down. Parameter 1: The NEW Bus Servo ID, range 0 ~ 253, the command will default to 1 if no new ID is specified. 14. Command name: BS_ID_READ. Command value: 14 Length: 3 When this command is sent to the Bus Servo, the Bus Servo will return the servo ID. This command is only useful in determining the ID of an unknown Bus Servo. In this case, transmit the broadcast servo ID 254 (0xFE) and the Bus Servo will return it's own ID. This can only be achieved with one Bus Servo attached to the UART. For the details of the command package that the Bus Servo returns to the host computer, please refer to the description in Table 4 below. 17. Command name: BS_ANGLE_OFFSET_ADJUST. Command value: 17 Length: 4 When this command is sent to the Bus Servo, it adjusts the offset of the Bus Servo rotation. The new value is not saved when the Bus Servo is powered-down, if you want the value to survive powering-down follow this command with command 18 below. Parameter 1: Bus Servo offset, range -125 ~ +125, The corresponding angle of -30 ° ~ 30 °, when this command reaches the Bus Servo, the Bus Servo will immediately rotate to correct for the new offset value. Because the parameter is a signed integer, and the command packets to be sent are bytes, before sending, the parameter must be converted to unsigned data and then appended to the command packet. 18. Command name: BS_ANGLE_OFFSET_WRITE. Command value: 18 Length: 3 When this command is sent to the Bus Servo, it saves the current offset value into non volatile memory to survive power-down. The adjustment of the offset is achieved using command 17, above. 19. Command name: BS_ANGLE_OFFSET_READ. Command value: 19 Length: 3 When this command is sent to the Bus Servo, the Bus Servo returns the offset value. For details of the command packet that the Bus Servo returns to the host computer, please refer to the description in Table 4 below. Because the parameter is a signed integer, and the command packets to be sent are bytes, after receiving, the parameter must be converted from unsigned data when it stripped from the command packet. 20. Command name: BS_ANGLE_LIMIT_WRITE. Command value: 20 Length: 7 When this command is sent to the Bus Servo, it sets the minimum and maximum limits on the rotation of the Bus Servo. The minimum angle value should always be less than the maximum angle value. The values will be saved into non volatile memory to survive power-down. Parameter 1: lower 8 bits of minimum angle Parameter 2: higher 8 bits of minimum angle, range 0 ~ 1000 Parameter 3: lower 8 bits of maximum angle Parameter 4: higher 8 bits of maximum angle, range 0 ~ 1000, > lower limit 21. Command name: BS_ANGLE_LIMIT_READ. Command value: 21 Length: 3 When this command is sent to the Bus Servo, the Bus Servo retunrs both of the angle limit values of the Bus Servo, for the details of the instruction packet that the Bus Servo returns to host computer, please refer to the description in Table 4 below. 22. Command name: BS_VIN_LIMIT_WRITE. Command value: 22 Length: 7 When this command is sent to the Bus Servo, it sets the lower and upper voltage alarm values. Voltage supplies outside these limits will cause the Bus Servo to transmit an error code, cause the LED to flash and alarm (if an LED alarm is set). In order to protect the Bus Servo, the motor will switch to "unloaded", and the Bus Servo will not output torque. The values will be saved into non volatile memory to survive power-down. The minimum input voltage should always be less than the maximum input voltage. The working voltage range of the HiWonder LX-824HV Bus Servo is 9-12.6 volts. Parameter 1: lower 8 bits of minimum input voltage Parameter 2: higher 8 bits of minimum input voltage, range 4500~12000mv Parameter 3: lower 8 bits of maximum input voltage Parameter 4: higher 8 bits of maximum input voltage, range 4500~12000mv, > lower limit. 23. Command name: BS_VIN_LIMIT_READ. Command value: 23 Length: 3 When this command is sent to the Bus Servo, the Bus Servo returns both the lower and higher voltage input limit values, for the details of the instruction packet that the Bus Servo returns to the host computer, please refer to the description in Table 4 below. 24. Command name: BS_TEMP_LIMIT_WRITE. Command value: 24 Length: 4 When this command is sent to the Bus Servo, it sets the upper temperature alarm value. High temperatures inside the Bus Servo are caused by the Bus Servo working against high loads. Temperatures within the Bus Servo in excess of this limit will cause the Bus Servo to transmit an error code, cause the LED to flash and alarm (if an LED alarm is set). In order to protect the Bus Servo, the motor will switch to "unloaded", and the Bus Servo will not output torque until the Bus Servo measures a temperature below this limit, then the Bus Servo will switch back to "loaded". The value will be saved into non volatile memory to survive power-down. Parameter 1: The maximum temperature limit inside the Bus Servo. Range 50~100°C. The default value is 85°C. 25. Command name: BS_TEMP_LIMIT_READ. Command value: 25 Length: 3 When this command is sent to the Bus Servo, the Bus Servo returns the maximum internal temperature limit of the Bus Servo, for details of the command package that the Bus Servo returns to the host computer, please refer to the description in Table 4 below. 26. Command name: BS_TEMP_READ. Command value: 26 Length: 3 When this command is sent to the Bus Servo, the Bus Servo returns the real-time temperature inside the Bus Servo, for details of the instruction packet that the Bus Servo returns to host computer, please refer to the description in Table 4 below. 27. Command name: BS_VIN_READ. Command value: 27 Length: 3 When this command is sent to the Bus Servo, the Bus Servo returns real time input voltage of the Bus Servo, for the details of the instruction packet that the SB returns to the host computer, please refer to the description in Table 4 below. 28. Command name: BS_POS_READ. Command value: 28 Length: 3 When this command is sent to the Bus Servo, the Bus Servo returns the real time angle value of the Bus Servo, for the details of the instruction packet that the Bus Servo returns to the host computer, please refer to the description in Table 4 below. 29. Command name: BS_MOTOR_MODE_WRITE. Command value: 29 Length: 7 When this command is sent to the Bus Servo, it switches Bus Servo between servo mode and motor mode. In motor mode, the Bus Servo can be used as a DC reduction motor with adjustable speed; In position control mode, the Bus Servo has 240° of rotation range with Plus ± 30° adjustable offset available, with high precise position control & adjustable speed. Since the rotation speed is a signed short integer, it must be forcibly to unsinged bytes before sending the command packet. Parameter 1: Bus Servo mode. 0 for position control (servo) mode, 1 for motor control (continuous rotation) mode, default 0. Parameter 2: null value Parameter 3: lower 8 bits of rotation speed value Parameter 4: higher 8 bits of rotation speed value. range -1000 ~ +1000, this is only valid for motor (continuous rotation) control mode, to control the rotation speed of the motor. Negative values represent reverse speeds, positive values represent forward speeds. The motor mode and rotation speed do not survive power-down. 30. Command name: BS_MOTOR_MODE_READ. Command value: 30 Length: 3 When this command is sent to the Bus Servo, the Bus Servo returns the motor mode and rotation speed values of the Bus Servo. Because the rotation parameter is a signed integer, and the command packets to be sent are bytes, after receiving, the parameter must be converted from unsigned data when it stripped from the command packet. For the details of the command package that the Bus Servo returns to the host computer, please refer to the description in Table 4 below. 31. Command name: BS_LOAD_MODE_WRITE. Command value: 31 Length: 4 When this command is sent to the Bus Servo, it sets the Bus Servo to unloaded (no torque output) or loaded (high torque output). Parameter 1: Range 0 or 1. 0 represents unloaded, the Bus Servo has no torque output. 1 represents loaded, the Bus Servo has high torque output, the default value is 0. The loaded mode does not survive power-down. 32. Command name: BS_LOAD_MODE_READ Command value: 32 Length: 3 When this command is sent to the Bus Servo, the Bus Servo returns the load state of the Bus Servo. For details of the command package that the Bus Servo returns to host computer, please refer to the description of Table 4 below. 33. Command name: BS_LED_CTRL_WRITE. Command value: 33 Length: 4 When this command is sent to the Bus Servo, it switches the status of the LED. Parameter 1: LED light/off state. Range 0 or 1. 0 represents LED always on. 1 represents LED off. The default is 0. The value will be saved into non volatile memory to survive power-down. 34. Command name: BS_LED_CTRL_READ Command value: 34 Length: 3 When this command is sent to the Bus Servo, the Bus Servo returns the state of the LED. Range 0 or 1. 0 represents LED always on. 1 represents LED off. For the details of the command packet that the Bus Servo returns to host computer, please refer to the description of Table 4 below. 35. Command name: BS_LED_ERROR_WRITE Command value: 35 Length: 4 When this command is sent to the Bus Servo, it determines what faults will cause the LED to flash alarm. Parameter 1: Range 0~7 There are three types of faults that cause the LED to flash and alarm, regardless of whether the LED is in or off mode. The first fault is that internal temperature of the Bus Servo exceeds the maximum temperature limit (this value is set with command 24, the limit is read with command 25 and the real time value is read with command 26). The second fault is that the Bus Servo input voltage exceeds the limit values (this value is set with command 22, the limit is read with command 23 and the real time value is read with command 27). The third fault is when the Bus Servo rotor becomes locked. The fault codes are as shown below: Table 3: 0 No alarm 1 Over temperature 2 Over voltage 3 Over temperature and over voltage 4 ocked_rotor 5 Over temperature and ocked_rotor 6 Over voltage and ocked_rotor 7 Over temperature , over voltage and locked_rotor 36. Command name: BS_LED_ERROR_READ command value: 36 Length: 3 Read the Bus Servo fault alarm value. The values are as above. For the details of the command packet that the Bus Servo returns to thehost computer, please refer to the description of Table 4 below. List of read commands: Table 4: Command name Command Length BS_MOVE_TIME_READ 2 7 BS_MOVE_TIME_WAIT_READ 8 7 BS_ID_READ 14 4 BS_ANGLE_OFFSET_READ 19 4 BS_ANGLE_LIMIT_READ 21 7 BS_VIN_LIMIT_READ 23 7 BS_TEMP_LIMIT_READ 25 4 BS_TEMP_READ 26 4 BS_VIN_READ 27 5 BS_POS_READ 28 5 BS_MOTOR_MODE_READ 30 7 BS_LOAD_MODE_READ 32 4 BS_LED_CTRL_READ 34 4 BS_LED_ERROR_READ 36 4 ''' BS_inactive_rcmds = (1,3,4,5,6,7,9,10,11,12,13,15,16,17,18,20,22,24,29,31,33,35) ''' Table 4 lists the commands that the Bus Servo returns to the host computer. These commands will only be returned when the host computer has sent a read command to the Bus Servo. What’s more, the returned command value is consistent with the read command that the host computer sent to the Bus Servo. The difference is that the returned command has parameters. The format of the returned data command packet is the same as the command package that the host computer sent to Bus Servo, as in Table 1. Detailed individual command instructions 2. Command name: BS_MOVE_TIME_READ. Command value: 2 Length: 7 The Bus Servo returns the angle and time value which was last sent by BS_MOVE_TIME_WRITE to the host computer. Parameter 1: lower 8 bits of angle value Parameter 2: higher 8 bits of angle, range 0 ~ 1000 Parameter 3: lower 8 bits of time value Parameter 4: higher 8 bits of time value, range 0 ~ 30000ms 8. Command name: BS_MOVE_TIME_WAIT_READ. Command value: 8 Length: 7 The Bus Servo returns the preset angle and preset time value which were sent last by the BS_MOVE_TIME_WAIT_WRITE command to the host computer. Parameter 1: lower 8 bits of preset angle value Parameter 2: higher 8 bits of preset angle, range 0 ~ 1000 Parameter 3: lower 8 bits of preset time value Parameter 4: higher 8 bits of preset time value, range 0 ~ 30000ms 14. Command name: BS_ID_READ. Command value: 14 Length: 4 The Bus Servo returns the servo ID. ID read is a little bit special compared with other read commands. If the command packet ID is the broadcast ID 254 (0xFE), the Bus Servo will return the response information. Other read commands will not respond when the ID is the broadcast ID. The purpose of this design is to acquire the Bus Servo ID number of Bus Servos whose ID is unknown. However the limit is that the bus can only be attached to a single Bus Servo, or it will cause a bus conflict. Parameter 1: Bus Servo ID value 19. Command name: BS_ANGLE_OFFSET_READ. Command value: 19 Length: 4 The Bus Servo returns the offset value. Because the parameter is a signed integer, and the command packets to be sent are bytes, after receiving, the parameter must be converted from unsigned data when it stripped from the command packet. Parameter 1: The offset set by the Bus Servo, range -125 ~ +125 21. Command name: BS_ANGLE_LIMIT_READ. Command value: 21 Length: 7 The Bus Servo returns both of the angle limit values of the Bus Servo. Parameter 1: lower 8 bits of minimum angle value Parameter 2: higher 8 bits of minimum angle, range 0 ~ 1000 Parameter 3: lower 8 bits of maximum angle value Parameter 4: higher 8 bits of maximum angle value, range 0 ~ 1000 23. Command name: BS_VIN_LIMIT_READ. Command value: 23 Length: 7 The Bus Servo returns both the lower and upper voltage input limit values. Parameter 1: lower 8 bits of input voltage value Parameter 2: higher 8 bits of input voltage value ,range 4500~12000mv Parameter 3: lower 8 bits of maximum input voltage value Parameter 4: higher 8 bits of maximum input voltage value,range 4500~12000mv 25. Command name: BS_TEMP_LIMIT_READ. Command value: 25 Length: 4 The Bus Servo returns the maximum internal temperature limit of the Bus Servo. Parameter 1: The maximum temperature limit inside the Bus Servo, range 50~100°C 26. Command name: BS_TEMP_READ. Command value: 26 Length: 4 The Bus Servo returns the real-time temperature inside the Bus Servo. Parameter 1: The real time temperature inside the Bus Servo 27. Command name: BS_VIN_READ. Command value: 27 Length: 5 The Bus Servo returns real time input voltage of the Bus Servo. Parameter 1: lower 8 bits of current input voltage value Parameter 2: higher 8 bits of current input voltage value, no default 28. Command name BS_POS_READ. Command value: 28 Length: 5 The Bus Servo returns the real time angle value of the Bus Servo. Parameter 1: lower 8 bits of current Bus Servo position value Parameter 2: higher 8 bits of current Bus Servo position value 30. Command name: BS_MOTOR_MODE_READ Command value: 30 Length: 7 The Bus Servo returns the motor mode and rotation speed values of the Bus Servo. Because the rotation parameter is a signed integer, and the command packets to be sent are bytes, after receiving, the parameter must be converted from unsigned data when it stripped from the command packet. Parameter 1: The current mode of the Bus Servo, 0 for the position control (servo) mode, 1 for the motor control (continuous rotation) mode, the default 0 Parameter 2: Null, set to 0 Parameter 3: lower 8 bits of rotation speed value Parameter 4: higher 8 bits of rotation speed value. Range -1000 ~ +1000. Only valid in motor control mode. Negative values represent reverse rotation, positive values represent forward rotation. 32. Command name: BS_LOAD_MODE_READ. Command value: 32 Length: 4 The Bus Servo returns the load state of the Bus Servo. Parameter 1: Whether the Bus Servo is loaded/unloaded. Range 0 or 1. 0 represents unloaded, no torque output. 1 represents loaded, high torque output. 34. Command name: BS_LED_CTRL_READ. Command value: 34 Length: 4 The Bus Servo returns the state of the LED. Range 0 or 1. 0 represents LED always on. 1 represents LED off. Parameter 1: LED light on/off state. 36. Command name: BS_LED_ERROR_READ. Command value: 36 Length: 4 The Bus Servo returns the fault alarm code. The values are as below. Parameter 1: What faults in the Bus Servo are causing the LED to flash and alarm, range 0~7. ''' BS_no_alarm = 0 BS_over_temp_alarm = 1 BS_over_volt_alarm = 2 BS_over_temp_volt_alarm = 3 BS_locked_rotor_alarm = 4 BS_over_temp_locked_rotor_alarm = 5 BS_over_voltage_locked_rotor_alarm = 6 BS_over_temp_volt_locked_rotor = 7 ''' The corresponding relationship between the numerical value and the faults are shown in table 3.''' # Bus Servo parameters BS_servo_type = "LX-224HV" # Manufacturer/model of the servo BS_num_servos = 18 # Number of servos of this type on the robot BS_rotate_limits = (0, 1000) # Miminum & maximum values for full defection BS_cont_speed = (-1000, 1000) # Range of continuous rotation speeds BS_max_speed = 1250 # HiWonder LX-824HV Bus Servos have a maximum speed of rotation of 0.20sec/60°(@ VIn 11.1V). # With a maximum rotation of 0 ~ 240° defined as a range of 1-1000. BS_time_limits = (0, 30000) # Minimum & maximum time durations BS_offset_limits = (-125, 125) # Minimum & maximum offset values # Offsets can be set between angles of -30 ° ~ 30 ° BS_Vin_limits = (9000, 12600) # Voltage limits specified in mV # Servos allow voltage limits to be set from 4500mV to 14000mV but the operating voltage of the # servos is 9-12.6V BS_temp_limits = (50, 85) # Temperature limit in °C. The limit can be set between 50 ~ 100°C. BS_default_pos = 500 # The default position for 50% rotation # PWM servo parameters PWM_servo_type = "PWM_generic" # Manufacturer/model of the servo PWM_num_servos = 2 # Number of servos of this type on the robot PWM_rotate_limits = (500, 2500) # Miminum & maximum number of pulses for full defection PWM_time_limits = (20, 5000) # Minimum & maximum time to reach destination PWM_offset_limits = (-300, 300) # Minimum & maximum offset values PWM_offsets = (0,0) # The PWM servos have no internal memory so the offsets have to be set here PWM_freq = 50 # PWM frequency PWM_default_pos = 1500 # The default position for straight ahead if __name__ == '__main__': print("This file just contains environment variables. It doesn't do anything")
<filename>otdd/pytorch/functionals.py ################################################################################ ############### COLLECTION OF FUNCTIONALS ON DATASETS ########################## ################################################################################ import numpy as np import torch class Functional(): """ Defines a JKO functional over measures implicitly by defining it over individual particles (points). The input should be a full dataset: points X (n x d) with labels Y (n x 1). Optionally, the means/variances associated with each class can be passed. (extra space do to repeating) """ def __init__(self, V=None, W=None, f=None, weights=None): self.V = V # The functional on Z space in potential energy 𝒱() = V self.W = W # The bi-linear form on ZxZ spaces in interaction energy 𝒲 self.f = f # The scalar-valued function in the niternal energy term ℱ def __call__(x, y, μ=None, Σ=None): sum = 0 if self.F is not None: sum += self.F(x,y,μ,Σ) if self.V is not None: sum += self.V(x,y,μ,Σ) if self.W is not None: sum += self.W(x,y,μ,Σ) return sum ################################################################################ ####### Potential energy functionals (denoted by V in the paper) ######### ################################################################################ def affine_feature_norm(X,Y=None,A=None, b=None, threshold=None, weight=1.0): """ A simple (feature-only) potential energy based on affine transform + norm: v(x,y) = || Ax - b ||, so that V(ρ) = ∫|| Ax - b || dρ(x,y) where the integral is approximated by empirical expectation (mean). """ if A is None and b is None: norm = X.norm(dim=1) elif A is None and not b is None: norm = (X - b).norm(dim=1) elif not A is None and b is None: norm = (X - b).norm(dim=1) else: norm = (X@A - b).norm(dim=1) if threshold: norm = torch.nn.functional.threshold(norm, threshold, 0) return weight*norm.mean() def binary_hyperplane_margin(X, Y, w, b, weight=1.0): """ A potential function based on margin separation according to a (given and fixed) hyperplane: v(x,y) = max(0, 1 - y(x'w - b) ), so that V(ρ) = ∫ max(0, y(x'w - b) ) dρ(x,y) Returns 0 if all points are at least 1 away from margin. Note that y is expected to be {0,1} Needs separation hyperplane be determined by (w, b) parameters. """ Y_hat = 2*Y-1 # To map Y to {-1, 1}, required by the SVM-type margin obj we use margin = torch.relu(1-Y_hat*(torch.matmul(X, w) - b)) return weight*margin.mean() def dimension_collapse(X, Y, dim=1, v=None, weight=1.0): """ Potential function to induce a dimension collapse """ if v is None: v = 0 deviation = (X[:,dim] - v)**2 return weight*deviation.mean() def cluster_repulsion(X, Y): pdb.set_trace() ################################################################################ ######## Interaction energy functionals (denoted by W in the paper) ######### ################################################################################ def interaction_fun(X, Y, weight=1.0): """ """ Z = torch.cat((X, Y.float().unsqueeze(1)), -1) n,d = Z.shape Diffs = Z.repeat(n,1,1).transpose(0,1) - Z.repeat(n,1,1) def _f(δz): # Enforces cluster repulsion: δx, δy = torch.split(δz,[δz.shape[-1]-1,1], dim=-1) δy = torch.abs(δy/δy.max()).ceil() # Hacky way to get 0/1 loss for δy return -(δx*δy).norm(dim=-1).mean(dim=-1) val = _f(Diffs).mean() return val*weight def binary_cluster_margin(X, Y, μ=None, weight=1.0): """ Similar to binary_hyperplane_margin but does to require a separating hyperplane be provided in advance. Instead, computes one based on current datapoints as the hyperplane through the midpoint of their means. Also, ensures that ..., so it requires point-to-point comparison (interaction) """ μ_0 = X[Y==0].mean(0) μ_1 = X[Y==1].mean(0) n,d = X.shape diffs_x = X.repeat(n,1,1).transpose(0,1) - X.repeat(n,1,1) diffs_x = torch.nn.functional.normalize(diffs_x, dim=2, p=2) μ = torch.zeros(n,d) μ[Y==0,:] = μ_0 μ[Y==1,:] = μ_1 diffs_μ = μ.repeat(n,1,1).transpose(0,1) - μ.repeat(n,1,1) diffs_μ = torch.nn.functional.normalize(diffs_μ, dim=2, p=2) inner_prod = torch.einsum("ijk,ijl->ij", diffs_x, diffs_μ) print(inner_prod.min(), inner_prod.max()) out = torch.relu(-inner_prod + 1) print(out.shape) margin = torch.exp(out) return weight*margin.mean()
"""Tests the encoding of domain information into the embedding""" # The tests are verbose, but they are not intended to read exhaustively # anyway. When reading particular failing examples, verbosity is good. # pylint:disable=too-many-lines from pytest import approx from infrastructure import InfrastructureNetwork from overlay import OverlayNetwork from embedding import PartialEmbedding, ENode # Sometimes its nicer to start variable names with uppercase letters # pylint: disable=invalid-name def take_action(embedding, action, expect_success=True): """Takes an action by text, to ease testing""" possibilities = embedding.possibilities() for possibility in possibilities: if str(possibility) == action: embedding.take_action(*possibility) return True if expect_success: raise Exception( f"Action {action} not in possibilities: {possibilities}" ) return False def test_path_loss(): """ Tests that an embedding over impossible distances is recognized as invalid. """ infra = InfrastructureNetwork() # Two nodes, 1km apart. The transmitting node has a transmission # power of 1dBm (=1.26mW). With a path loss over 1km of *at least* # 30dBm, less than ~-30dBm (approx. 10^-3 = 0.001mW = 1uW) arrives # at the target. That is a very optimistic approximation and is not # nearly enough to send any reasonable signal. source_node = infra.add_source(pos=(0, 0), transmit_power_dbm=1) infra.set_sink(pos=(1000, 0), transmit_power_dbm=0) overlay = OverlayNetwork() source_block = overlay.add_source() sink_block = overlay.set_sink() overlay.add_link(source_block, sink_block) embedding = PartialEmbedding( infra, overlay, source_mapping=[(source_block, source_node)] ) assert len(embedding.possibilities()) == 0 def test_trivial_possibilities(): """ Tests that a single reasonable option is correctly generated in a trivial case. """ infra = InfrastructureNetwork() # Two nodes, 1m apart. The transmitting node has a # transmit_power_dbm # power of 30dBm (similar to a regular router) which should easily # cover the distance of 1m without any noise. source_node = infra.add_source(pos=(0, 0), transmit_power_dbm=0.1) infra.set_sink(pos=(1, 0), transmit_power_dbm=0) overlay = OverlayNetwork() source_block = overlay.add_source() sink_block = overlay.set_sink() overlay.add_link(source_block, sink_block) embedding = PartialEmbedding( infra, overlay, source_mapping=[(source_block, source_node)] ) # can only embed B2 into N2 assert len(embedding.possibilities()) == 1 def test_manually_verified_sinr(): """ Tests that the SINR calculation agrees with a manually verified example. """ infra = InfrastructureNetwork(noise_floor_dbm=15) # 2 sources, 2 intermediaries, 1 sink n_source1 = infra.add_source(pos=(0, 0), transmit_power_dbm=30) n_interm1 = infra.add_intermediate(pos=(1, 0), transmit_power_dbm=30) n_source2 = infra.add_source(pos=(0, 2), transmit_power_dbm=30) n_interm2 = infra.add_intermediate(pos=(1, 2), transmit_power_dbm=30) n_sink = infra.set_sink(pos=(3, 1), transmit_power_dbm=0) overlay = OverlayNetwork() b_source1 = overlay.add_source() b_interm1 = overlay.add_intermediate() b_source2 = overlay.add_source() b_interm2 = overlay.add_intermediate() b_sink = overlay.set_sink() overlay.add_link(b_source1, b_interm1) overlay.add_link(b_source2, b_interm2) # just to make the embedding complete overlay.add_link(b_interm1, b_sink) overlay.add_link(b_interm2, b_sink) embedding = PartialEmbedding( infra, overlay, source_mapping=[(b_source1, n_source1), (b_source2, n_source2)], ) # this doesn't actually do anything, just makes the next step more # convenient e_source1 = ENode(b_source1, n_source1) e_source2 = ENode(b_source2, n_source2) e_interm1 = ENode(b_interm1, n_interm1) e_interm2 = ENode(b_interm2, n_interm2) e_sink = ENode(b_sink, n_sink) # Here is the important part: Two signals in parallel embedding.take_action(e_source1, e_interm1, 0) embedding.take_action(e_source2, e_interm2, 0) # We don't really care what is going on in other timeslots, this is # just to make the embedding valid. embedding.take_action(e_interm1, e_sink, 1) embedding.take_action(e_interm2, e_sink, 2) # Now we have a clean model to work with in timeslot 1: Two parallel # communications, one signal and one noise. # Let's calculate the SINR or signal1. # source1 sends with 30dBm. There is a distance of 1m to interm1. # According to the log path loss model with a loss exponent of 2 # (appropriate for a building), the signal will incur a loss of # 2 * distance_decibel dBm # Where distance_decibel is the distance in relation to 1m, i.e. 0 # in this case. That means there is *no loss*, at least according to # the model. # It follows that interm1 receives a signal of 30dBm. Now on to the # received noise. source2 also sends with 30dBm and has a distance # of sqrt(1^2 + 2^2) ~= 2.24m to interm1. According to the log path # loss model: # distance_decibel = 10 * lg(2.24) ~= 3.50 # => path_loss = 2 * 3.50 ~= 7 dBm # So interm1 receives roughly 30 - 7 = 23 dBm of noise. Lets assume # a base noise of 15dB. We have to add those two. Care must be taken # here because of the logarithmic scale. Naive addition would result # in multiplication of the actual power in watts. So we need to # convert back to watts first, then add, then convert back: # base_noise_milliwatts = 10^(1.5) ~= 31.62 mW # com_noise_milliwatts = 10^(2.3) ~= 199.53 mW # => total_noise = 31.62 + 199.53 = 231.15 mW # The total noise is 231.15 mW, which equals # 10*lg(231.15) ~= 23.64 dB # That is way less than the naively calculated 16 + 15 = 31 dB. # That means the SINR should be # sinr = 30dBm - 23.64 = 6.36dB # Here the subtraction actually *should* represent a division of the # powers. sinr = embedding.known_sinr(n_source1, n_interm1, 0) assert sinr == approx(6.36, abs=0.1) def test_invalidating_earlier_choice_impossible(): """ Tests that an action that would invalidate an earlier action is impossible. """ infra = InfrastructureNetwork() # Two sources, one sink. Equal distance from both sources to sink. # One source with moderate transmit power (but enough to cover the # distance, one source with excessive transmit power. # transmit_power_dbm # power of 30dBm (similar to a regular router) which should easily # cover the distance of 1m without any noise. source_node_silent = infra.add_source( pos=(0, 0), transmit_power_dbm=20, name="Silent" ) source_node_screamer = infra.add_source( pos=(3, 0), transmit_power_dbm=100, name="Screamer" ) node_sink = infra.set_sink(pos=(1, 3), transmit_power_dbm=0, name="Sink") overlay = OverlayNetwork() esource_silent = ENode(overlay.add_source(), source_node_silent) esource_screamer = ENode(overlay.add_source(), source_node_screamer) esink = ENode(overlay.set_sink(), node_sink) overlay.add_link(esource_silent.block, esink.block) overlay.add_link(esource_screamer.block, esink.block) embedding = PartialEmbedding( infra, overlay, source_mapping=[ (esource_silent.block, esource_silent.node), (esource_screamer.block, esource_screamer.node), ], ) action_to_be_invalidated = (esource_screamer, esink, 0) # make sure the action is an option in the first place assert action_to_be_invalidated in embedding.possibilities() # embed the link from the silent node to the sink embedding.take_action(esource_silent, esink, 0) # first assert that action would be valid by itself screamer_sinr = embedding.known_sinr(source_node_screamer, node_sink, 0) assert screamer_sinr > 2.0 new_possibilities = embedding.possibilities() # but since the action would make the first embedding invalid (a # node cannot receive two signals at the same time), it should still # not be possible assert action_to_be_invalidated not in new_possibilities # since there are no options left in the first timeslot, there are # now exactly 2 (screamer -> silent as relay, screamer -> sink # embedded) options left in the newly created second timeslot assert len(new_possibilities) == 2 def test_no_unnecessary_options(): """ Tests that no unnecessary connections are offered. """ infra = InfrastructureNetwork() # Two sources, one sink. Equal distance from both sources to sink. # One source with moderate transmit power (but enough to cover the # distance, one source with excessive transmit power. # transmit_power_dbm # power of 30dBm (similar to a regular router) which should easily # cover the distance of 1m without any noise. source_node = infra.add_source( pos=(0, 0), transmit_power_dbm=30, name="Source" ) sink_node = infra.set_sink(pos=(1, 3), transmit_power_dbm=0, name="Sink") overlay = OverlayNetwork() esource = ENode(overlay.add_source(), source_node) esink = ENode(overlay.set_sink(), sink_node) overlay.add_link(esource.block, esink.block) embedding = PartialEmbedding( infra, overlay, source_mapping=[(esource.block, esource.node)] ) assert len(embedding.possibilities()) == 1 # embed the sink embedding.take_action(esource, esink, 0) # Now it would still be *feasible* according to add a connection to # the relay in the other timeslot. It shouldn't be possible however, # since all outgoing connections are already embedded. assert len(embedding.possibilities()) == 0 def test_all_viable_options_offered(): """ Tests that all manually verified options are offered in a concrete example. """ infra = InfrastructureNetwork() # Two sources, one sink, one intermediate, one relay # Enough transmit power so that it doesn't need to be taken into account nso1 = infra.add_source( pos=(0, 0), # transmit power should not block anything in this example transmit_power_dbm=100, name="nso1", ) nso2 = infra.add_source(pos=(1, 0), transmit_power_dbm=100, name="nso2") _nrelay = infra.add_intermediate( pos=(0, 1), transmit_power_dbm=100, name="nr" ) _ninterm = infra.add_intermediate( pos=(2, 0), transmit_power_dbm=100, name="ni" ) _nsink = infra.set_sink(pos=(1, 1), transmit_power_dbm=100, name="nsi") overlay = OverlayNetwork() bso1 = overlay.add_source(name="bso1") bso2 = overlay.add_source(name="bso2") bsi = overlay.set_sink(name="bsi") bin_ = overlay.add_intermediate(name="bin") eso1 = ENode(bso1, nso1) eso2 = ENode(bso2, nso2) # source1 connects to the sink over the intermediate source2 # connects both to the sink and to source1. overlay.add_link(bso1, bin_) overlay.add_link(bin_, bsi) overlay.add_link(bso2, bsi) overlay.add_link(bso2, bso1) embedding = PartialEmbedding( infra, overlay, source_mapping=[(bso1, eso1.node), (bso2, eso2.node)] ) # source1 can connect to the intermediate, which could be embedded # in any node (5). It could also connect to any other node as a # relay (4) -> 9. source2 can connect to the sink (1) or the other # source (1). It could also connect to any other node as a relay for # either of its two links (2 * 3) -> 8 No timeslot is used yet, so # there is just one timeslot option. assert len(embedding.possibilities()) == 9 + 8 def test_timeslots_dynamically_created(): """Tests the dynamic creation of new timeslots as needed""" infra = InfrastructureNetwork() nso1 = infra.add_source( name="nso1", pos=(0, 0), # transmits so loudly that no other node can realistically # transmit in the same timeslot transmit_power_dbm=1000, ) nso2 = infra.add_source(name="nso2", pos=(1, 0), transmit_power_dbm=1000) nsi = infra.set_sink(name="nsi", pos=(1, 1), transmit_power_dbm=1000) overlay = OverlayNetwork() bso1 = overlay.add_source(name="bso1") bso2 = overlay.add_source(name="bso2") bsi = overlay.set_sink(name="bsi") eso1 = ENode(bso1, nso1) esi = ENode(bsi, nsi) overlay.add_link(bso1, bsi) overlay.add_link(bso2, bsi) embedding = PartialEmbedding( infra, overlay, source_mapping=[(bso1, nso1), (bso2, nso2)] ) # nothing used yet assert embedding.used_timeslots == 0 # it would be possible to create a new timeslot and embed either # link in it (2) or go to a relay from either source (2) assert len(embedding.possibilities()) == 4 # Take an action. nosurce1 will transmit so strongly that nso2 # cannot send at the same timelot assert embedding.take_action(eso1, esi, 0) # timeslot 0 is now used assert embedding.used_timeslots == 1 # New options (for creating timeslot 1) were created accordingly. # The second source could now still send to the other source as a # relay or to to the sink directly, it will just have to do it in a # new timeslot. assert len(embedding.possibilities()) == 2 def test_completion_detection(): """ Tests that the completeness of an embedding is accurately detected in a simple example. """ infra = InfrastructureNetwork() # One source, one sink, one relay. # Enough transmit power so that it doesn't need to be taken into account nsource = infra.add_source( pos=(0, 0), # transmit power should not block anything in this example transmit_power_dbm=100, ) _nrelay = infra.add_intermediate(pos=(0, 1), transmit_power_dbm=100) nsink = infra.set_sink(pos=(1, 1), transmit_power_dbm=100) overlay = OverlayNetwork() esource = ENode(overlay.add_source(), nsource) esink = ENode(overlay.set_sink(), nsink) overlay.add_link(esource.block, esink.block) embedding = PartialEmbedding( infra, overlay, source_mapping=[(esource.block, esource.node)] ) assert not embedding.is_complete() embedding.take_action(esource, esink, 0) assert embedding.is_complete() def test_parallel_receive_impossible(): """ Tests that receiving from two sender nodes at the same time is impossible """ infra = InfrastructureNetwork() nsource1 = infra.add_source(pos=(0, 0), transmit_power_dbm=30) nsource2 = infra.add_source(pos=(3, 0), transmit_power_dbm=30) nsink = infra.set_sink(pos=(2, 0), transmit_power_dbm=30) overlay = OverlayNetwork() esource1 = ENode(overlay.add_source(), nsource1) esource2 = ENode(overlay.add_source(), nsource2) esink = ENode(overlay.set_sink(), nsink) # two incoming connections to sink overlay.add_link(esource1.block, esink.block) overlay.add_link(esource2.block, esink.block) embedding = PartialEmbedding( infra, overlay, source_mapping=[ (esource1.block, esource1.node), (esource2.block, esource2.node), ], ) # Try to send two signals to sink at the same timeslot. This should # fail, as either one signal should overshadow the other. embedding.take_action(esource1, esink, 0) assert not embedding.take_action(esource2, esink, 0) def test_broadcast_possible(): """Tests that broadcast is possible despite SINR constraints""" infra = InfrastructureNetwork() # One source, one sink, one intermediate nsource = infra.add_source(pos=(0, 0), transmit_power_dbm=30) ninterm = infra.add_intermediate(pos=(1, 2), transmit_power_dbm=30) nsink = infra.set_sink(pos=(2, 0), transmit_power_dbm=30) overlay = OverlayNetwork() esource = ENode(overlay.add_source(), nsource) einterm = ENode(overlay.add_intermediate(), ninterm) esink = ENode(overlay.set_sink(), nsink) # fork overlay.add_link(esource.block, einterm.block) overlay.add_link(esource.block, esink.block) # make complete overlay.add_link(einterm.block, esink.block) embedding = PartialEmbedding( infra, overlay, source_mapping=[(esource.block, esource.node)] ) # Broadcast from source to sink and intermediate sinr_before = embedding.known_sinr(esource.node, esink.node, timeslot=0) assert embedding.take_action(esource, esink, 0) # Easiest way to test this, easy to change if internals change. # pylint: disable=protected-access power_at_sink = embedding.infra.power_at_node( esink.node, frozenset(embedding._nodes_sending_in[0]) ) assert embedding.take_action(esource, einterm, 0) # Make sure the broadcasting isn't counted twice new_power = embedding.infra.power_at_node( esink.node, frozenset(embedding._nodes_sending_in[0]) ) assert new_power == power_at_sink # Make sure the broadcasts do not interfere with each other assert sinr_before == embedding.known_sinr( esource.node, esink.node, timeslot=0 ) def test_count_timeslots_multiple_sources(): """Tests correct counting behaviour with multiple sources""" infra = InfrastructureNetwork() nsource1 = infra.add_source(pos=(0, -1), transmit_power_dbm=30) nsource2 = infra.add_source(pos=(0, 1), transmit_power_dbm=30) nsink = infra.set_sink(pos=(1, 0), transmit_power_dbm=30) overlay = OverlayNetwork() esource1 = ENode(overlay.add_source(), nsource1) esource2 = ENode(overlay.add_source(), nsource2) esink = ENode(overlay.set_sink(), nsink) overlay.add_link(esource1.block, esink.block) overlay.add_link(esource2.block, esink.block) embedding = PartialEmbedding( infra, overlay, source_mapping=[ (esource1.block, esource1.node), (esource2.block, esource2.node), ], ) assert not embedding.is_complete() assert embedding.used_timeslots == 0 assert embedding.take_action(esource1, esink, 0) assert not embedding.is_complete() assert embedding.used_timeslots == 1 assert embedding.take_action(esource2, esink, 1) assert embedding.is_complete() assert embedding.used_timeslots == 2 def test_count_timeslots_parallel(): """Tests correct counting behaviour with parallel connections""" infra = InfrastructureNetwork() # One source, one sink, two intermediates nsource = infra.add_source( pos=(0, 0), transmit_power_dbm=30, name="nsource" ) ninterm1 = infra.add_intermediate( pos=(1, 2), transmit_power_dbm=30, name="ninterm1" ) ninterm2 = infra.add_intermediate( pos=(1, -2), transmit_power_dbm=30, name="ninterm2" ) nsink = infra.set_sink(pos=(2, 0), transmit_power_dbm=30, name="nsink") overlay = OverlayNetwork() esource = ENode(overlay.add_source(name="bsource"), nsource) einterm1 = ENode(overlay.add_intermediate(name="binterm1"), ninterm1) einterm2 = ENode(overlay.add_intermediate(name="binterm2"), ninterm2) esink = ENode(overlay.set_sink(name="bsink"), nsink) # fork overlay.add_link(esource.block, einterm1.block) overlay.add_link(esource.block, einterm2.block) overlay.add_link(einterm1.block, esink.block) overlay.add_link(einterm2.block, esink.block) embedding = PartialEmbedding( infra, overlay, source_mapping=[(esource.block, esource.node)] ) assert not embedding.is_complete() assert embedding.used_timeslots == 0 assert embedding.take_action(esource, einterm1, 0) assert embedding.take_action(esource, einterm2, 0) assert not embedding.is_complete() assert embedding.used_timeslots == 1 assert embedding.take_action(einterm1, esink, 1) assert not embedding.is_complete() assert embedding.used_timeslots == 2 assert embedding.take_action(einterm2, esink, 2) assert embedding.is_complete() assert embedding.used_timeslots == 3 def test_count_timeslots_loop(): """Tests reasonable counting behaviour with loops""" infra = InfrastructureNetwork() # One source, one sink, two intermediates nsource = infra.add_source(pos=(0, 0), transmit_power_dbm=30, name="nso") ninterm1 = infra.add_intermediate( pos=(2, 1), transmit_power_dbm=5, name="ni1" ) ninterm2 = infra.add_intermediate( pos=(0, -1), transmit_power_dbm=5, name="ni2" ) nsink = infra.set_sink(pos=(2, 0), transmit_power_dbm=30, name="nsi") overlay = OverlayNetwork() esource = ENode(overlay.add_source(name="bso"), nsource) einterm1 = ENode(overlay.add_intermediate(name="bi1"), ninterm1) einterm2 = ENode(overlay.add_intermediate(name="bi2"), ninterm2) esink = ENode(overlay.set_sink(name="bsi"), nsink) overlay.add_link(esource.block, einterm1.block) overlay.add_link(einterm1.block, esink.block) overlay.add_link(esink.block, einterm2.block) overlay.add_link(einterm2.block, esource.block) embedding = PartialEmbedding( infra, overlay, source_mapping=[(esource.block, esource.node)] ) assert not embedding.is_complete() assert embedding.used_timeslots == 0 assert embedding.take_action(esource, einterm1, 0) assert not embedding.is_complete() assert embedding.used_timeslots == 1 assert embedding.take_action(einterm1, esink, 1) assert not embedding.is_complete() assert embedding.used_timeslots == 2 assert embedding.take_action(esink, einterm2, 2) assert not embedding.is_complete() assert embedding.used_timeslots == 3 assert embedding.take_action(einterm2, esource, 1) assert embedding.is_complete() assert embedding.used_timeslots == 3 def test_outlinks_limited(): """ Tests that the number of possible outlinks is limited by the number of outlinks to embed for that block. """ infra = InfrastructureNetwork() nsource = infra.add_source(pos=(0, 0), transmit_power_dbm=1, name="nso") nrelay = infra.add_intermediate( pos=(1, 0), transmit_power_dbm=1, name="nr" ) # The sink is way out of reach, embedding is not possible _nsink = infra.set_sink(pos=(1, 1), transmit_power_dbm=1, name="nsi") overlay = OverlayNetwork() bsource = overlay.add_source(name="bso") bsink = overlay.set_sink(name="bsi") esource = ENode(bsource, nsource) overlay.add_link(bsource, bsink) embedding = PartialEmbedding( infra, overlay, source_mapping=[(esource.block, esource.node)] ) erelay = ENode(bsource, nrelay, bsink) assert embedding.take_action(esource, erelay, 0) possibilities_from_source = [ (source, target, timeslot) for (source, target, timeslot) in embedding.possibilities() if source == esource ] # the source block has one outgoing edge, one outlink is already # embedded (although the link is not embedded completely) assert len(possibilities_from_source) == 0 possibilities_from_relay = [ (source, target, timeslot) for (source, target, timeslot) in embedding.possibilities() if source == erelay ] # yet the link can be continued from the relay assert len(possibilities_from_relay) > 0 def test_loop_within_infra_possible(): """ Tests that a loop within the infrastructure is always possible and does not interfere with other connections. This can be used to embed multiple consecutive blocks within one node. """ infra = InfrastructureNetwork() nsource = infra.add_source(pos=(0, 0), transmit_power_dbm=30, name="nso") nsink = infra.set_sink(pos=(1, 0), transmit_power_dbm=30, name="nsi") overlay = OverlayNetwork() esource = ENode(overlay.add_source(name="bso"), nsource) einterm = ENode(overlay.add_intermediate(name="bin"), nsource) esink = ENode(overlay.set_sink(name="bsi"), nsink) overlay.add_link(esource.block, einterm.block) overlay.add_link(einterm.block, esink.block) embedding = PartialEmbedding( infra, overlay, source_mapping=[(esource.block, esource.node)] ) sinr_before = embedding.known_sinr(nsource, nsink, 0) assert embedding.take_action(esource, einterm, 0) sinr_after = embedding.known_sinr(nsource, nsink, 0) assert sinr_before == sinr_after assert embedding.take_action(einterm, esink, 0) assert embedding.is_complete() def test_link_edges_cannot_be_embedded_twice(): """Tests that edges completing a link that is already embedded are removed or not even added when creating a new timestep""" infra = InfrastructureNetwork() nso = infra.add_source(pos=(0, 0), transmit_power_dbm=30, name="nso") nsi = infra.set_sink(pos=(2, 0), transmit_power_dbm=30, name="nsi") _nint = infra.add_intermediate( pos=(1, -1), transmit_power_dbm=30, name="nint" ) overlay = OverlayNetwork() bso = overlay.add_source(name="bso") bsi = overlay.set_sink(name="bsi") bint = overlay.add_intermediate(name="bint") overlay.add_link(bso, bsi) overlay.add_link(bso, bint) overlay.add_link(bint, bsi) embedding = PartialEmbedding(infra, overlay, source_mapping=[(bso, nso)]) eso = ENode(bso, nso) esi = ENode(bsi, nsi) # now the link from source to sink is already embedded, only the one # from source to intermediate should be left assert embedding.take_action(eso, esi, 0) # so embedding it again should not be possible assert not embedding.take_action(ENode(bso, nso), ENode(bsi, nsi), 1) def test_unnecessary_links_removed_in_other_timeslots(): """ Tests that links in other timeslots are removed if they are embedded in one timeslot. """ infra = InfrastructureNetwork() nfaraway_1 = infra.add_source( pos=(999999998, 99999999), transmit_power_dbm=5, name="nfaraway_1" ) nfaraway_2 = infra.add_intermediate( pos=(999999999, 99999999), transmit_power_dbm=5, name="nfaraway_2" ) nsi = infra.set_sink(pos=(9, 5), transmit_power_dbm=12, name="nsi") nso = infra.add_source(pos=(8, 3), transmit_power_dbm=3, name="nso") overlay = OverlayNetwork() bsi = overlay.set_sink(name="bsi") bso = overlay.add_source(name="bso") bfaraway_1 = overlay.add_source(name="bfaraway_1") bfaraway_2 = overlay.add_intermediate(name="bfaraway_2", datarate=0) overlay.add_link(bso, bsi) overlay.add_link(bfaraway_1, bfaraway_2) # just to make it correct overlay.add_link(bfaraway_2, bsi) embedding = PartialEmbedding( infra, overlay, source_mapping=[(bso, nso), (bfaraway_1, nfaraway_1)] ) esi = ENode(bsi, nsi) eso = ENode(bso, nso) efaraway_1 = ENode(bfaraway_1, nfaraway_1) efaraway_2 = ENode(bfaraway_2, nfaraway_2) # make sure a second timeslot is created assert embedding.take_action(efaraway_1, efaraway_2, 0) # make sure embedding is possible in ts1 assert (eso, esi, 1) in embedding.possibilities() # embed the link in ts 0 assert embedding.take_action(eso, esi, 0) # now no embedding in another timeslot should be possible anymore possible_outlinks_from_eso = [ pos for pos in embedding.possibilities() if pos[0] == eso ] assert len(possible_outlinks_from_eso) == 0 def test_block_embedding_is_unique(): """Tests that other embedding options are removed once one of them is chosen""" infra = InfrastructureNetwork() nso1 = infra.add_source(pos=(0, 0), transmit_power_dbm=26, name="nso1") nso2 = infra.add_source(pos=(2, 0), transmit_power_dbm=26, name="nso2") _nsi = infra.set_sink(pos=(0, 1), transmit_power_dbm=16, name="nsi") n1 = infra.add_intermediate(pos=(1, 0), transmit_power_dbm=16, name="n1") _n2 = infra.add_intermediate(pos=(1, 1), transmit_power_dbm=16, name="n2") overlay = OverlayNetwork() bso1 = overlay.add_source(name="bso1") bso2 = overlay.add_source(name="bso2") binterm = overlay.add_intermediate(name="binterm") bsi = overlay.set_sink(name="bsi") overlay.add_link(bso1, binterm) # there are multiple in-edges to binterm, which could lead to # multiple different embeddings overlay.add_link(bso2, binterm) overlay.add_link(binterm, bsi) embedding = PartialEmbedding( infra, overlay, source_mapping=[(bso1, nso1), (bso2, nso2)] ) eso1 = ENode(bso1, nso1) def embeddings_for_block(block): count = 0 for node in embedding.graph.nodes(): if node.block == block: count += 1 return count # could embed binterm in multiple blocks assert embeddings_for_block(binterm) > 1 # decide for one embedding assert embedding.take_action(eso1, ENode(binterm, n1), 0) # other options are removed assert embeddings_for_block(binterm) == 1 def test_block_capacity(): """Tests that per-node capacity is respected for each timeslot""" infra = InfrastructureNetwork() nso = infra.add_source(pos=(0, 0), transmit_power_dbm=30, name="nso") nin1 = infra.add_intermediate( pos=(-1, 1), transmit_power_dbm=30, capacity=42, name="nin1" ) nin2 = infra.add_intermediate( pos=(1, 1), transmit_power_dbm=30, capacity=5, name="nin2" ) _nsi = infra.set_sink(pos=(0, 1), transmit_power_dbm=30, name="nsi") overlay = OverlayNetwork() # ignore sinr constraints -> 0 datarate requirements bso = overlay.add_source(name="bso", datarate=0) bin1 = overlay.add_intermediate(requirement=40, name="bin1", datarate=0) bin2 = overlay.add_intermediate(requirement=5, name="bin2", datarate=0) bsi = overlay.set_sink(name="bsi", datarate=0) overlay.add_link(bso, bin1) overlay.add_link(bso, bin2) overlay.add_link(bin1, bsi) overlay.add_link(bin2, bsi) embedding = PartialEmbedding(infra, overlay, source_mapping=[(bso, nso)]) eso = ENode(bso, nso) possibilities = embedding.possibilities() # bin1 can be embedded in nin1, because 42>=40 assert (eso, ENode(bin1, nin1), 0) in possibilities # but not in nin2 because it does not have enough capacity assert (eso, ENode(bin1, nin2), 0) not in possibilities # bin2 has less requirements and can be embedded in either one assert (eso, ENode(bin2, nin1), 0) in possibilities assert (eso, ENode(bin2, nin2), 0) in possibilities # embed bin1 in nin1 assert embedding.take_action(ENode(bso, nso), ENode(bin1, nin1), 0) possibilities = embedding.possibilities() # pylint:disable=protected-access # The easiest way to test this, not too hard to adjust when # internals change. assert embedding._capacity_used[nin1] == 40 # which means bin2 can no longer be embedded in it assert (eso, ENode(bin2, nin1), 0) not in possibilities # while it can still be embedded in nin2 assert (eso, ENode(bin2, nin2), 0) in possibilities def test_source_and_sink_capacity_check(): """Tests that an embedding with invalid source or sink capacity cannot be created""" infra = InfrastructureNetwork() nso = infra.add_source( pos=(0, 0), transmit_power_dbm=30, capacity=0, name="nso" ) _nsi = infra.set_sink( pos=(1, 0), transmit_power_dbm=30, capacity=0, name="nsi" ) def embedding_fails(overlay): source_block = list(overlay.sources)[0] print(f"bso is {source_block}") failed = False try: _embedding = PartialEmbedding( infra, overlay, source_mapping=[(source_block, nso)] ) except AssertionError as _: failed = True return failed # this is fine overlay = OverlayNetwork() bso = overlay.add_source(name="bso", requirement=0) bsi = overlay.set_sink(name="bin", requirement=0) overlay.add_link(bso, bsi) assert not embedding_fails(overlay) # source requirement not met overlay = OverlayNetwork() bso = overlay.add_source(name="bso", requirement=1) bsi = overlay.set_sink(name="bin", requirement=0) overlay.add_link(bso, bsi) assert embedding_fails(overlay) # sink requirement not met overlay = OverlayNetwork() bso = overlay.add_source(name="bso", requirement=0) bsi = overlay.set_sink(name="bin", requirement=1) overlay.add_link(bso, bsi) assert embedding_fails(overlay) def test_non_broadcast_parallel_communications_impossible(): """Tests that non-broadcast parallel communications *do* affect the SINR.""" infra = InfrastructureNetwork() nso1 = infra.add_source(pos=(1, 0), transmit_power_dbm=30, name="nso1") nso2 = infra.add_source(pos=(-1, 0), transmit_power_dbm=30, name="nso2") nin = infra.add_intermediate(pos=(1, 0), transmit_power_dbm=30, name="nin") nsi = infra.set_sink(pos=(2, 0), transmit_power_dbm=30, name="nsi") overlay = OverlayNetwork() bso1 = overlay.add_source(name="bso1") bso2 = overlay.add_source(name="bso2") bsi = overlay.set_sink(name="bsi") overlay.add_link(bso1, bsi) overlay.add_link(bso2, bsi) embedding = PartialEmbedding( infra, overlay, source_mapping=[(bso1, nso1), (bso2, nso2)] ) # both sources use nin as a relay eso1 = ENode(bso1, nso1) eso2 = ENode(bso2, nso2) esi = ENode(bsi, nsi) ein1 = ENode(bso1, nin, bsi) ein2 = ENode(bso2, nin, bsi) assert embedding.take_action(eso1, ein1, 0) assert embedding.take_action(eso2, ein2, 1) assert embedding.take_action(ein1, esi, 2) assert (ein2, esi, 2) not in embedding.possibilities() def test_relay_circles_impossible(): """Tests that each relay node can be taken at most once in a path""" infra = InfrastructureNetwork() N2 = infra.add_source(name="N2", pos=(4.8, 5.7), transmit_power_dbm=29.7) N4 = infra.add_intermediate( name="N4", pos=(4.7, 8.8), transmit_power_dbm=13.4 ) _N1 = infra.set_sink(name="N1", pos=(7.7, 5.2), transmit_power_dbm=22.9) overlay = OverlayNetwork() B2 = overlay.add_source(name="B2", requirement=0, datarate=0) _B5 = overlay.add_intermediate(name="B5", requirement=0, datarate=0) B4 = overlay.add_intermediate(name="B4", requirement=0, datarate=0) B1 = overlay.set_sink(name="B1", requirement=0, datarate=0) overlay.add_link(B2, B4) overlay.add_link(B4, B1) embedding = PartialEmbedding(infra, overlay, source_mapping=[(B2, N2)]) eso = ENode(B2, N2) ein1 = ENode(B2, N4, B4) ein2 = ENode(B2, N2, B4) assert embedding.take_action(eso, ein1, 0) # n2 was already visited, circle assert not embedding.take_action(ein1, ein2, 1) def test_same_connection_not_possible_twice(): """Tests that the same connection cannot be taken twice""" infra = InfrastructureNetwork() N2 = infra.add_source(name="N2", pos=(2.3, 2.2), transmit_power_dbm=26.9) N3 = infra.add_intermediate(name="N3", pos=(0, 4), transmit_power_dbm=11) _N1 = infra.set_sink(name="N1", pos=(9.4, 9.5), transmit_power_dbm=26.1) overlay = OverlayNetwork() B2 = overlay.add_source(name="B2") B3 = overlay.add_intermediate(name="B3") B1 = overlay.set_sink(name="B1") overlay.add_link(B2, B1) overlay.add_link(B2, B3) overlay.add_link(B3, B1) embedding = PartialEmbedding(infra, overlay, source_mapping=[(B2, N2)]) eso = ENode(B2, N2) ein = ENode(B2, N3, B1) assert embedding.take_action(eso, ein, 0) # this connection has already been taken assert not embedding.take_action(eso, ein, 1) def test_half_duplex(): """Tests that a node cannot send and receive at the same time""" infra = InfrastructureNetwork() nso = infra.add_source(name="nso", pos=(0, 0), transmit_power_dbm=30) ni = infra.add_intermediate(name="ni", pos=(1, 0), transmit_power_dbm=30) nsi = infra.set_sink(name="nsi", pos=(2, 0), transmit_power_dbm=30) overlay = OverlayNetwork() # links have no datarate requirements, so SINR concerns don't apply bso = overlay.add_source(name="bso", datarate=0) bsi = overlay.set_sink(name="bsi", datarate=0) overlay.add_link(bso, bsi) embedding = PartialEmbedding(infra, overlay, source_mapping=[(bso, nso)]) eso = ENode(bso, nso) esi = ENode(bsi, nsi) ein = ENode(bso, ni, bsi) print(embedding.possibilities()) assert embedding.take_action(eso, ein, 0) assert not embedding.take_action(ein, esi, 0) def test_connection_within_node_always_possible(): """Tests that a node cannot send and receive at the same time""" infra = InfrastructureNetwork() nso = infra.add_source(name="nso", pos=(0, 0), transmit_power_dbm=30) nsi = infra.set_sink(name="nsi", pos=(2, 0), transmit_power_dbm=30) overlay = OverlayNetwork() bso = overlay.add_source(name="bso", datarate=0, requirement=0) bin_ = overlay.add_intermediate(name="bin", datarate=0, requirement=0) bsi = overlay.set_sink(name="bsi", datarate=0, requirement=0) overlay.add_link(bso, bin_) overlay.add_link(bin_, bsi) embedding = PartialEmbedding(infra, overlay, source_mapping=[(bso, nso)]) eso = ENode(bso, nso) ein = ENode(bin_, nsi) esi = ENode(bsi, nsi) assert embedding.take_action(eso, ein, 0) # even though nsi is already receiving in ts 0 assert embedding.take_action(ein, esi, 0) def test_self_loop_does_not_interfere(): """Tests self-loop does not interfere with other connections""" infra = InfrastructureNetwork() nso1 = infra.add_source(name="nso1", pos=(0, 0), transmit_power_dbm=30) nso2 = infra.add_source(name="nso2", pos=(0, 1), transmit_power_dbm=30) nsi = infra.set_sink(name="nsi", pos=(2, 0), transmit_power_dbm=30) overlay = OverlayNetwork() bso1 = overlay.add_source(name="bso1", datarate=5, requirement=0) bso2 = overlay.add_source(name="bso2", datarate=5, requirement=0) bin_ = overlay.add_intermediate(name="bin", datarate=5, requirement=0) bsi = overlay.set_sink(name="bsi", datarate=5, requirement=0) overlay.add_link(bso1, bin_) overlay.add_link(bin_, bsi) overlay.add_link(bso2, bsi) embedding = PartialEmbedding( infra, overlay, source_mapping=[(bso1, nso1), (bso2, nso2)] ) eso1 = ENode(bso1, nso1) eso2 = ENode(bso2, nso2) ein = ENode(bin_, nsi) esi = ENode(bsi, nsi) assert embedding.take_action(eso1, ein, 0) # self loop at node esi, ts 1 assert embedding.take_action(ein, esi, 1) # can still send to that node at the same ts assert embedding.take_action(eso2, esi, 1)
<reponame>raphiz/bsAbstimmungen from . import utils from datetime import datetime import os import re import requests import logging from bs4 import BeautifulSoup from ..exceptions import ParserException, AlreadyImportedException logger = logging.getLogger(__name__) def fetch(db, fromDate, toDate, directory='build/cache'): scraper = VotingScraper() logger.info('Searching for votes....') docs = scraper.find(fromDate, toDate) logger.info('Found {0} votes'.format(len(docs))) saved = _download(docs, directory) parser = VotingParser(db) for current in saved: try: parser.parse(current) except AlreadyImportedException as e: logger.info(e) # TODO: spit - one part into fetch, the other into utils def _download(docs, directory): if not os.path.exists(directory): os.makedirs(directory) saved = [] for doc in docs: local_filename = os.path.join(directory, doc.split('/')[-1]) utils.download(doc, local_filename) saved.append(local_filename) return saved class VotingScraper: BASE = 'http://abstimmungen.grosserrat-basel.ch/' def find(self, fromDate, toDate): legislations = self._fetch_legislations(fromDate, toDate) dates = self._fetch_meeting_dates(legislations, fromDate, toDate) return self._fetch_votings(dates) def _fetch_votings(self, dates): result = [] for date in dates: soup = BeautifulSoup(self._page(date), "html.parser") for link in soup.find_all('a'): if re.match('^[0-9]*$', link.string): result.append(self.BASE + link['href']) return result def _fetch_meeting_dates(self, legislations, fromDate, toDate): result = [] for legislation in legislations: soup = BeautifulSoup(self._page(legislation), "html.parser") for link in soup.find_all('a'): if re.match('^[0-9]{2}\.[0-9]{2}\.[0-9]{4}$', link.string): date = self._date(link.string) if self._in_timespan(fromDate, toDate, date): result.append(link['href']) return result def _fetch_legislations(self, fromDate, toDate): soup = BeautifulSoup(self._page('index_archiv_v2.php'), "html.parser") result = [] for span in soup.find_all('span'): link = span.find('a') if link is None: continue startswith = 'Amtsjahr' if len(link.string) > len(startswith) and \ link.string[:len(startswith)] == startswith: begin = self._date(span.text[21:31]) end = self._date(span.text[34:44]) if self._in_timespan(fromDate, toDate, begin, end): result.append(link['href']) return result def _page(self, page): return requests.get(self.BASE + page).text def _in_timespan(self, fromDate, toDate, begin, end=None): if end is None: end = begin # If the end date was earlier than the fromDate # or If begin date was later than the toDate if end < fromDate or begin > toDate: return False return True def _date(self, string): return datetime.strptime(string, '%d.%m.%Y') class VotingParser: enum_mapping = {'J': 'yes', 'N': 'no', 'E': 'abstain', 'A': 'away', 'P': 'president'} lines = None def __init__(self, db): self.db = db def parse(self, file): self.idx = -1 basename = os.path.basename(file) if self.db['votes'].find({'source': basename}).count() > 0: raise AlreadyImportedException( 'The vote from source {0} is already imported'.format(basename)) self.lines = utils.get_pdf_content(file) vote = self._parseVote(basename) for i in range(0, 100): self._parseVoting(vote) def _parseVoting(self, vote): match = re.fullmatch('([^\(\)]*)\s\(([a-zA-Z\/]*)\)', self.token()) fullname = match.group(1) fraction = match.group(2) self._create_councillor_if_not_exists(fullname, fraction) self.db['councillors'].update_one( {'fullname': fullname, 'fraction': fraction}, {'$push': {'votings': { 'vote': vote['_id'], 'voting': self.enum_mapping.get(self.token(), None) }} }) def _create_councillor_if_not_exists(self, fullname, fraction): councillor = self.db['councillors'].find_one({"fullname": fullname}) if councillor is None: councillor = {"fullname": fullname, "fraction": fraction, "votings": []} self.db['councillors'].insert(councillor) def _parseVote(self, basename): vote = { 'source': basename } self._assertToken('<NAME>') self._assertToken('R<PASSWORD>') self._assertToken('Nr') vote['nr'] = int(self.token()) vote['timestamp'] = datetime.strptime( '%s %s' % (self.token(), self.token()), '%d.%m.%Y %H:%M:%S' ) vote['type'] = self.token() query = self.db['votes'].find( {'nr': vote['nr'], 'timestamp': vote['timestamp']}) if query.count() > 0: raise AlreadyImportedException( 'The vote nr {0} from {1} is already imported'.format( vote['nr'], vote['timestamp'])) self._assertToken('Ergebnis der Abstimmung') self._assertToken('G<PASSWORD>') vote['affair'] = self._parseUntil('Gegenstand / Antrag') vote['proposal'] = self._parseUntil('Abstimmungsfrage') vote['question'] = self._parseUntil('([^\(\)\s]*\s)*\([A-Z]*\)', True) self._step_back() self.db['votes'].insert(vote) return vote def _parseUntil(self, stopper, regex=False): text = '' nextLine = self.token() while( (not regex and nextLine != stopper) or (regex and re.fullmatch(stopper, nextLine) is None) ): # TODO: handle words with a dash if(len(text) == 0): text = nextLine else: text = '%s %s' % (text, nextLine) nextLine = self.token() return text def _assertToken(self, expected): actual = self.token() if(actual != expected): raise ParserException( 'Unexpected message in PDF. Excepted "%s" but recieved "%s"' % (expected, actual) ) def _step_back(self): self.idx = self.idx-1 def token(self): self.idx = self.idx+1 return self.lines[self.idx]
#!/usr/bin/env python """ Fraunhofer IML Department Automation and Embedded Systems Tabsize : 4 Charset : UTF-8 """ __author__ = "<NAME>" __maintainer__ = "<NAME>" __email__ = "<EMAIL>" from MARSEntity import MARSEntity from NodeLaunchException import NodeLaunchException import rospy import roslaunch # General mars node parameter _PARAM_NAME_ID = "id" _PARAM_NAME_DESCRIPTION = "description" _PARAM_NAME_X_POS = "x_pos" _PARAM_NAME_Y_POS = "y_pos" _PARAM_NAME_IS_LOCKED = "is_locked" _PARAM_NAME_TYPE = "type" _PARAM_NAME_FRAME_ID = "frame_id" _PARAM_NAME_NODE_RATE = "node_rate" _PARAM_NAME_DRAWING_RATE = "drawing_rate" _PARAM_NAME_FORBIDDEN_HAZARD_TYPES = "forbidden_hazard_types" _PARAM_NAME_FORBIDDEN_VEHICLE_TYPES = "forbidden_vehicle_types" _PARAM_NAME_MAXIMUM_HEIGHT = "maximum_height" _PARAM_NAME_MAXIMUM_TOTAL_WEIGHT = "maximum_total_weight" _TOPOLOGY_NODE_RATE = 1000 _TOPOLOGY_DRAWING_RATE = 0.1 _TOPOLOGY_FRAME_ID = "map" # Vertex specific parameters _PARAM_NAME_INGOING_EDGE_IDS = "ingoing_edge_ids" _PARAM_NAME_OUTGOING_EDGE_IDS = "outgoing_edge_ids" # Edge specific parameters _PARAM_NAME_LENGTH = "length" _PARAM_NAME_ORIGIN_VERTEX_ID = "origin_id" _PARAM_NAME_DESTINATION_VERTEX_ID = "destination_id" _PARAM_NAME_DIRECTION = "direction" _PARAM_NAME_VELOCITIES = "velocities" _PARAM_NAME_FOOTPRINT_X = "footprint_x" _PARAM_NAME_FOOTPRINT_Y = "footprint_y" _PARAM_NAME_MAXIMUM_LINEAR_VELOCITY = "maximum_linear_velocity" _PARAM_NAME_MAXIMUM_ANGULAR_VELOCITY = "maximum_angular_velocity" _PARAM_NAME_MAXIMUM_LINEAR_ACCELERATION = "maximum_linear_acceleration" _PARAM_NAME_MAXIMUM_ANGULAR_ACCELERATION = "maximum_angular_acceleration" _EDGE_DIRECTION_UNIDIRECTIONAL = 0 _EDGE_DIRECTION_BIDIRECTIONAL = 2 class MARSTopologieNode(): def __init__(self): self.__launch = roslaunch.scriptapi.ROSLaunch() self.__launch.start() def _set_general_params_on_parameter_server(self, mars_entity, node_name, ns): self._set_param_on_parameter_server(str(mars_entity.get_id().get_id()), _PARAM_NAME_ID, node_name, ns) self._set_param_on_parameter_server(mars_entity.get_id().get_description(), _PARAM_NAME_DESCRIPTION, node_name, ns) self._set_param_on_parameter_server(float(mars_entity.get_x_position()), _PARAM_NAME_X_POS, node_name, ns) self._set_param_on_parameter_server(mars_entity.get_y_position(), _PARAM_NAME_Y_POS, node_name, ns) self._set_param_on_parameter_server(mars_entity.get_lock(), _PARAM_NAME_IS_LOCKED, node_name, ns) self._set_param_on_parameter_server(mars_entity.get_type().get_value(), _PARAM_NAME_TYPE, node_name, ns) self._set_param_on_parameter_server(_TOPOLOGY_NODE_RATE, _PARAM_NAME_NODE_RATE, node_name, ns) def _set_param_on_parameter_server(self, value, param_name, node_name, ns=""): # Add beginning slash to ns if len(ns) > 0 and not ns[:1] == '/': ns = '/' + ns rospy.logdebug("set_param: " + param_name) rospy.set_param(ns + '/' + node_name + '/' + param_name, value) def _create_string_ids_from_id_array(self, id_array): string_id_array = [] for id in id_array: string_id_array.append(str(id.get_id())) return string_id_array def _create_arrays_from_mars_paths(self, mars_paths): origin_vertex_ids = [] dst_vertex_ids = [] edge_velocities = [] for mars_path in mars_paths: origin_vertex_ids.append(mars_path.get_origin_vertex_id()) dst_vertex_ids.append(mars_path.get_dst_vertex_id()) edge_velocities.append(mars_path.get_max_velocity()) return origin_vertex_ids, dst_vertex_ids, edge_velocities def _run_node(self, node_pkg, node_type, node_name, ns): """ Starts a ros node. Starts a ros node inside a given namespace etc. Args: node_pkg: name of the ros package. node_type: file inside the package which has to be started. node_name: name of the node. ns: namespace for the node which has to be started. Returns: Raises: Raises a 'NodeLaunchException' if the node can't be started. """ node = roslaunch.core.Node( node_pkg, node_type, name=node_name, namespace=ns) process = self.__launch.launch(node) rospy.logdebug("[class MARSTopologieNode][_run_node] Launched node: " + ns ) if not process.is_alive(): raise NodeLaunchException( "[class MARSTopologieNode][_run_node] Can't run ros node: " + os.linesep + "ns: " + str(ns) + os.linesep + "node_pkg: " + str(node_pkg) + os.linesep + "node_name: " + str(node_name) + os.linesep + "node_type: " + str(node_type))
import math import os import random import shutil import pandas as pd from Preprocessor import Preprocessor from VSM import VSM def build_package_index(): vista_code = "G:\Download\VistA-M-master\Packages" index = {} code_path = {} packages = os.listdir(vista_code) for pk in packages: pk_path = os.path.join(vista_code, pk) for dirName, subdirList, fileList in os.walk(pk_path): for fname in fileList: index[fname] = pk code_path[fname] = os.path.join(dirName, fname) return index, packages, code_path def readReqTopicIndex(): req_path = "G:\Download\Vista\VistA RequirementsHierarchy.xlsx" df = pd.read_excel(req_path, sheet_name="Sheet1") req_index = {} req_content = {} req_ids = df["Regulation ID"] topics = df["Sub-section"] contents = df["Requirement"] for req_id, topic, content in zip(req_ids, topics, contents): req_index[req_id] = topic if not isinstance(content, str): content = " " req_content[req_id] = content return req_index, topics, req_content def get_high_value_tokens(code_vec, req_vec, vsm): """ Find the terms with high weight and rank them according to the weight :param code_vec: :param req_vec: :return: """ code_dict = dict(code_vec) req_dict = dict(req_vec) res = [] for tk in code_dict: if tk in req_dict: req_score = req_dict[tk] code_score = code_dict[tk] tk_value = req_score * code_score word = vsm.tfidf_model.id2word[tk] entry = (word, req_score, code_score, tk_value) res.append(entry) res = sorted(res, key=lambda x: x[1], reverse=True) return res if __name__ == "__main__": code_index, packages, code_path = build_package_index() # key is file name ,value is the package name req_index, topics, req_content = readReqTopicIndex() code_list = random.sample(list(code_index.keys()), 10) code_list.append("OOPSGUIT.m") code_content = {} is_reuse = False code_df_file_name = "code_df.csv" req_df_file_name = "req_df.csv" default_df_schema = ["id", "content"] if os.path.isfile(code_df_file_name) and os.path.isfile(req_df_file_name) and is_reuse: code_df = pd.read_csv(code_df_file_name) req_df = pd.read_csv(req_df_file_name) code_def = code_df.fillna("") req_df = req_df.fillna("") code_content = dict((id, content) for id, content in zip(code_df.id, code_df.content)) req_content = dict((id, content) for id, content in zip(req_df.id, req_df.content)) else: preprocessor = Preprocessor() for code_id in code_list: with open(code_path[code_id], encoding='utf8', errors="ignore") as fin: code_content[code_id] = " ".join(preprocessor.get_tokens(fin.read(), "en")) code_df = pd.DataFrame(list(code_content.items()), columns=default_df_schema) code_df.to_csv(code_df_file_name) for req_id in req_content: req_content[req_id] = " ".join(preprocessor.get_stemmed_tokens(req_content[req_id], "en")) req_df = pd.DataFrame(list(req_content.items()), columns=default_df_schema) req_df.to_csv(req_df_file_name) vsm = VSM("en") docs = [] docs.extend(list(req_content.values())) docs.extend(list(code_content.values())) vsm.build_model(docs) cnt = 0 req_vec_dict = dict() for code_id in code_content: cnt += 1 output_dir = os.path.join("res", str(cnt)) if os.path.isdir(output_dir): shutil.rmtree(output_dir) os.makedirs(output_dir) links = [] code_tokens = code_content[code_id].split() code_vec = vsm.tfidf_model[vsm.tfidf_model.id2word.doc2bow(code_tokens)] for req_id in req_content: req_tokens = req_content[req_id].split() score = vsm._get_doc_similarity(code_tokens, req_tokens) req_vec = req_vec_dict.get(req_id, vsm.tfidf_model[vsm.tfidf_model.id2word.doc2bow(req_tokens)]) high_value_tokens = get_high_value_tokens(code_vec, req_vec, vsm) links.append((req_id, score, req_vec, code_vec, high_value_tokens)) links = sorted(links, key=lambda x: x[1], reverse=True) link_df = pd.DataFrame() link_df["req_id"] = [x[0] for x in links] link_df["score"] = [x[1] for x in links] link_df["high_value_tokens"] = [x[4] for x in links] link_df.to_csv(os.path.join(output_dir, "{}_links.csv".format(code_id))) print("Finish")
<filename>galaxydb/statics.py def bytes_needed(num): i = 1 while True: if num < 2**(8*i): return i i += 1 def zeros_needed_fmt(num): num = 2**(8*num) zeros = str(num) return r"{:0"+str(len(zeros))+r"d}" def max_int_bytes(b): return 2**(8*b) def print_bytes(seq): out = [] for i in seq: out.append("{:02X}".format(i)) return out def to_bytes_e (num,size = 2): if size == 0: size = bytes_needed(num) return num.to_bytes(size,byteorder='big') def from_bytes_e(num): return int.from_bytes(num,byteorder='big') def find_seq (f,seq,start_pos=None): # this function return files to the point where it were when the function was called seek_back = f.tell() if start_pos != None: f.seek(start_pos) else: start_pos = seek_back buf = 64*1024 part = f.read(buf) bytes_walked = len(part) start_point = start_pos while part: pos = part.find(seq) if pos != -1: f.seek(seek_back) return (start_point+pos,start_pos+bytes_walked) part = f.read(buf) bytes_walked += len(part) start_point += len(part) f.seek(seek_back) return (-1,start_pos+bytes_walked) def read_until (f,seq,start_pos=None,none_not_found = True): where = f.tell() pos = find_seq(f,seq,start_pos) if pos[0] > -1: return f.read(pos[0]-where) else: if none_not_found: return None else: f.seek(where) r = f.read(64*1024) while r: r += f.read(64*1024) return r def seek_until(f,seq,start_pos=None): pos = find_seq(f,seq,start_pos) if pos[0] > -1: f.seek(pos[0]) # returns the tuple of find_seq for any desired purpose return pos def find_all_seq(f,seq,start_pos=None,return_only_pos=True): seek_back = f.tell() # this function return files to the point where it were when the function was called bytes_walked = 0 x = find_seq(f,seq,start_pos) ret = [] while not x[0] == -1: bytes_walked += x[1] if return_only_pos: ret.append(x[0]) else: ret.append(x) x = find_seq(f,seq,x[0]+len(seq)) f.seek(seek_back) return ret def logic_oper(comp,data,val_b): #data is the value returned from database #val_b is the value tested if comp == '=': test = data == val_b elif comp == '>': test = data > val_b elif comp == '<': test = data < val_b elif comp == '!=': test = data != val_b elif comp == '<=': test = data <= val_b elif comp == '>=': test = data >= val_b elif comp == 'LIKE%': test = data.find(val_b) == 0 elif comp == '%LIKE': test = data[-len(val_b):] == val_b elif comp == 'LIKE': test = data.find(val_b) > -1 return test
# Copyright 2019, <NAME>, mailto:<<EMAIL>> # # Part of "Nuitka", an optimizing Python compiler that is compatible and # integrates with CPython, but also works on its own. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Details see below in class definition. """ import os import re import shutil import sys from logging import info from nuitka import Options from nuitka.plugins.PluginBase import NuitkaPluginBase from nuitka.utils.FileOperations import makePath from nuitka.utils.Utils import isWin32Windows # ------------------------------------------------------------------------------ # The following code is largely inspired by PyInstaller hook_numpy.core.py # ------------------------------------------------------------------------------ # START # ------------------------------------------------------------------------------ def get_sys_prefix(): """ Return sys.prefix as guaranteed abspath format. """ sys_prefix = getattr(sys, "real_prefix", getattr(sys, "base_prefix", sys.prefix)) sys_prefix = os.path.abspath(sys_prefix) return sys_prefix def get_scipy_core_binaries(module): """ Return binaries from the extra-dlls folder (Windows only). """ binaries = [] scipy_dir = module.getCompileTimeDirectory() extra_dll = os.path.join(scipy_dir, "extra-dll") if not os.path.isdir(extra_dll): return binaries netto_bins = os.listdir(extra_dll) suffix_start = len(extra_dll) + 1 # this will put the files in dist root for f in netto_bins: if not f.endswith(".dll"): continue binaries.append((os.path.join(extra_dll, f), suffix_start)) return binaries def get_numpy_core_binaries(module): """ Return any binaries in numpy/core and/or numpy/.libs, whether or not actually used by our script. Notes: This covers the special cases like MKL binaries, which cannot be detected by dependency managers. Returns: tuple of abspaths of binaries """ numpy_dir = module.getCompileTimeDirectory() numpy_core_dir = os.path.join(numpy_dir, "core") base_prefix = get_sys_prefix() binaries = [] # first look in numpy/.libs for binaries libdir = os.path.join(numpy_dir, ".libs") suffix_start = len(libdir) + 1 if os.path.isdir(libdir): dlls_pkg = [f for f in os.listdir(libdir)] binaries += [[os.path.join(libdir, f), suffix_start] for f in dlls_pkg] # then look for libraries in numpy.core package path # should already return the MKL files in ordinary cases re_anylib = re.compile(r"\w+\.(?:dll|so|dylib)", re.IGNORECASE) dlls_pkg = [f for f in os.listdir(numpy_core_dir) if re_anylib.match(f)] binaries += [[os.path.join(numpy_core_dir, f), suffix_start] for f in dlls_pkg] # Also look for MKL libraries in folder "above" numpy. # This should meet the layout of Anaconda installs. if isWin32Windows(): lib_dir = os.path.join(base_prefix, "Library", "bin") suffix_start = len(lib_dir) + 1 else: lib_dir = os.path.join(base_prefix, "lib") suffix_start = len(lib_dir) + 1 if not os.path.isdir(lib_dir): return binaries re_mkllib = re.compile(r"^(?:lib)?mkl\w+\.(?:dll|so|dylib)", re.IGNORECASE) for f in os.listdir(lib_dir): if isWin32Windows(): if not (f.startswith(("libi", "libm", "mkl")) and f.endswith(".dll")): continue else: if not re_mkllib.match(f): continue binaries.append([os.path.join(lib_dir, f), suffix_start]) return binaries # ------------------------------------------------------------------------------ # END PyInstaller inspired code # ------------------------------------------------------------------------------ class NumpyPlugin(NuitkaPluginBase): """ This class represents the main logic of the plugin. This is a plugin to ensure scripts using numpy, scipy, matplotlib, pandas, scikit-learn, etc. work well in standalone mode. While there already are relevant entries in the "ImplicitImports.py" plugin, this plugin copies any additional binary or data files required by many installations. Args: NuitkaPluginBase: plugin template class we are inheriting. """ plugin_name = "numpy" # Nuitka knows us by this name plugin_desc = "Required for numpy, scipy, pandas, matplotlib, etc." def __init__(self): self.enabled_plugins = None # list of active standard plugins self.numpy_copied = False # indicator: numpy files copied self.matplotlib = self.getPluginOptionBool("matplotlib", False) self.scipy = self.getPluginOptionBool("scipy", False) if self.scipy: self.scipy_copied = False # indicator: scipy files copied else: self.scipy_copied = True def considerExtraDlls(self, dist_dir, module): """ Copy extra shared libraries or data for this installation. Args: dist_dir: the name of the program's dist folder module: module object Returns: empty tuple """ full_name = module.getFullName() if full_name == "numpy" and not self.numpy_copied: self.numpy_copied = True binaries = get_numpy_core_binaries(module) for f in binaries: bin_file, idx = f # (filename, pos. prefix + 1) back_end = bin_file[idx:] tar_file = os.path.join(dist_dir, back_end) makePath( # create any missing intermediate folders os.path.dirname(tar_file) ) shutil.copyfile(bin_file, tar_file) bin_total = len(binaries) # anything there at all? if bin_total > 0: msg = "Copied %i %s from 'numpy' installation." % ( bin_total, "file" if bin_total < 2 else "files", ) info(msg) if full_name == "scipy" and not self.scipy_copied: self.scipy_copied = True binaries = get_scipy_core_binaries(module) for f in binaries: bin_file, idx = f # (filename, pos. prefix + 1) back_end = bin_file[idx:] tar_file = os.path.join(dist_dir, back_end) makePath( # create any missing intermediate folders os.path.dirname(tar_file) ) shutil.copyfile(bin_file, tar_file) bin_total = len(binaries) if bin_total > 0: msg = "Copied %i %s from 'scipy' installation." % ( bin_total, "file" if bin_total < 2 else "files", ) info(msg) return () def onModuleEncounter(self, module_filename, module_name, module_kind): # pylint: disable=too-many-branches,too-many-return-statements elements = module_name.split(".") if not self.scipy and elements[0] in ("scipy", "sklearn"): return False, "Omit unneeded components" if module_name == "scipy.sparse.csgraph._validation": return True, "Replicate implicit import" if elements[0] == "mpl_toolkits" and self.matplotlib is True: return True, "Needed by matplotlib" if module_name.getPackageName() is None: return None if module_name in ("cv2", "cv2.cv2", "cv2.data"): return True, "Needed for OpenCV" if module_name in ( "sklearn.utils.sparsetools._graph_validation", "sklearn.utils.sparsetools._graph_tools", ): return True, "Needed by sklearn" if module_name in ( "sklearn.utils.lgamma", "sklearn.utils.weight_vector", "sklearn.utils._unittest_backport", ): return True, "Needed by sklearn" posix = ( "sklearn.externals.joblib.externals.loky.backend.managers", "sklearn.externals.joblib.externals.loky.backend.synchronize", "sklearn.externals.joblib.externals.loky.backend.compat_posix", "sklearn.externals.joblib.externals.loky.backend._posix_reduction", "sklearn.externals.joblib.externals.loky.backend.popen_loky_posix", ) win32 = ( "sklearn.externals.joblib.externals.loky.backend.managers", "sklearn.externals.joblib.externals.loky.backend.synchronize", "sklearn.externals.joblib.externals.loky.backend._win_wait", "sklearn.externals.joblib.externals.loky.backend._win_reduction", "sklearn.externals.joblib.externals.loky.backend.popen_loky_win32", ) if isWin32Windows(): valid_list = win32 else: valid_list = posix if module_name in valid_list: return True, "Needed by sklearn" if module_name == "sklearn.externals.joblib.externals.cloudpickle.dumps": return True, "Needed by sklearn" # some special handling for matplotlib: # keep certain modules depending on whether Tk or Qt plugins are enabled if self.enabled_plugins is None: self.enabled_plugins = [p for p in Options.getPluginsEnabled()] if "tk-inter" in self.enabled_plugins: if module_name in ( "matplotlib.backends.backend_tk", "matplotlib.backends.backend_tkagg", "matplotlib.backend.tkagg", ): return True, "Needed for tkinter interaction" if "qt-plugins" in self.enabled_plugins: if module_name == "matplotlib.backends.backend_qt": return True, "Needed for Qt interaction" if module_name == "matplotlib.backends.backend_agg": return True, "Needed as standard backend" class NumpyPluginDetector(NuitkaPluginBase): """ Only used if plugin is NOT activated. Notes: We are given the chance to issue a warning if we think we may be required. """ plugin_name = "numpy" # Nuitka knows us by this name @staticmethod def isRelevant(): """ This method is called one time only to check, whether the plugin might make sense at all. Returns: True if this is a standalone compilation. """ return Options.isStandaloneMode() def onModuleDiscovered(self, module): """ This method checks whether a numpy module is imported. Notes: For this we check whether its full name contains the string "numpy". Args: module: the module object Returns: None """ full_name = module.getFullName().split(".") if "numpy" in full_name: self.warnUnusedPlugin("numpy support.")
''' Author: <NAME> GitHub: https://github.com/josephlyu The dash app and layout for the index page. ''' import dash import dash_core_components as dcc import dash_html_components as html import dash_bootstrap_components as dbc from dash.dependencies import Input, Output from layouts import layout_uk, layout_world, layout_map from figures.figures_uk import (api_uk_last_update, api_uk_date, today_uk_newcases, today_uk_newdeaths, today_uk_cumcases, today_uk_cumdeaths, fig_index_cases, fig_index_deaths) ##### DEFINE GLOBAL VARIABLES ##### SIDEBAR_STYLE = {'position':'fixed', 'top':0, 'left':0, 'bottom':0, 'width':'22rem', 'padding':'1.5rem 1rem'} CONTENT_STYLE = {'margin-left':'22rem', 'margin-right':'0rem', 'padding':'1rem 1rem', 'background-color':'#e6ecec'} COLORS = {'side_title':'#ba3a0a', 'side_blue':'#3b6b7b', 'side_red':'#c65d35', 'side_dark':'#666666', 'side_text':'#9e9e9e', 'side_time':'#2196f3'} ##### CREATE APP AND SERVER ##### app = dash.Dash(__name__, title='COVID-19', update_title='Loading...', external_stylesheets=[dbc.themes.BOOTSTRAP], suppress_callback_exceptions=True) application = app.server ##### INDEX PAGE LAYOUT ##### sidebar = html.Div([ html.Img(src='/assets/logo.png', style={'width':'10rem', 'margin-left':'4.83rem'}), html.Hr(), html.Span('COVID-19', style={'color':COLORS['side_title'], 'font-size':20, 'font-weight':'bold'}), html.P('Visualization and Forecasting', style={'color':COLORS['side_dark'], 'font-weight':'bold'}), html.Hr(), html.P([ html.Span('Last update: ', style={'color':COLORS['side_text'], 'font-size':13}), html.Span(api_uk_last_update, style={'color':COLORS['side_time'], 'font-size':10}) ]), dbc.Row([ dbc.Col( dbc.ButtonGroup([ dbc.Button('UK', href='/uk', id='uk-link', color='light', style={'width':80, 'color':COLORS['side_blue'], 'font-weight':'bold'}), dbc.Button('World', href='/world', id='world-link', color='light', style={'width':80, 'color':COLORS['side_blue'], 'font-weight':'bold'}) ], size='sm') ), dbc.Col( dbc.Button('Map', href='/map', id='map-link', color='light', size='sm', style={'width':80, 'color':COLORS['side_red'], 'font-weight':'bold', 'float':'right'})) ]), html.Hr(), html.P([ html.Span('Latest Figures (UK)', style={'color':COLORS['side_text']}), html.P(api_uk_date, style={'color':COLORS['side_blue'], 'font-size':25, 'font-weight':'bold'}) ]), dbc.Row([ dbc.Col(html.Span('New cases', style={'color':COLORS['side_text']})), dbc.Col(html.Span('New deaths', style={'color':COLORS['side_text']})) ]), dbc.Row([ dbc.Col(html.P(today_uk_newcases, style={'color':COLORS['side_dark'], 'font-size':25, 'font-weight':'bold'})), dbc.Col(html.P(today_uk_newdeaths, style={'color':COLORS['side_red'], 'font-size':25, 'font-weight':'bold'})) ]), dbc.Row([ dbc.Col(html.Span('Total cases', style={'color':COLORS['side_text']})), dbc.Col(html.Span('Total deaths', style={'color':COLORS['side_text']})) ]), dbc.Row([ dbc.Col(html.Span(today_uk_cumcases, style={'color':COLORS['side_dark'], 'font-size':25, 'font-weight':'bold'})), dbc.Col(html.Span(today_uk_cumdeaths, style={'color':COLORS['side_red'], 'font-size':25, 'font-weight':'bold'})) ]), html.Hr(), html.P('The Curve of Cases', style={'color':COLORS['side_text']}), dcc.Graph(figure=fig_index_cases, config={'displayModeBar':False}), html.Hr(), html.P('The Curve of Deaths', style={'color':COLORS['side_text']}), dcc.Graph(figure=fig_index_deaths, config={'displayModeBar':False}) ], style=SIDEBAR_STYLE) content = html.Div(id='page-content', style=CONTENT_STYLE) app.layout = html.Div([dcc.Location(id='url'), sidebar, content]) ##### INDEX PAGE CALLBACKS ##### @app.callback( [Output(f'{s}-link', 'active') for s in ['uk', 'world', 'map']], [Input('url', 'pathname')]) def toggle_active_links(pathname): if pathname == '/': return True, False, False return [pathname == f'/{s}' for s in ['uk', 'world', 'map']] @app.callback( Output('page-content', 'children'), [Input('url', 'pathname')]) def render_page_content(pathname): if pathname in ['/', '/uk']: return layout_uk elif pathname == '/world': return layout_world elif pathname == '/map': return layout_map return dbc.Jumbotron([ html.H1('404: Not found', className='text-danger'), html.Hr(), html.P(f'The pathname {pathname} was not recognised...') ]) ##### MAIN FUNCTION ##### if __name__ == '__main__': app.run_server(debug=True)
import time # the pounce language runtime def run(pl, debug = False, test_value_stack = []): global words vs = [] while pl != None and len(pl) > 0: next = pl[0]; pl = pl[1:] if debug: print('about to', vs, next) time.sleep(0.3) if isValue(next, words) or isArray(next) or isRecord(next): if next == 'true': vs.append(True) elif next == 'false': vs.append(False) else: vs.append(next) elif next in words.keys(): if debug: print('applying', vs, next, pl) time.sleep(0.3) if isfunction(words[next]): (vs, pl) = words[next](vs, pl) elif isinstance(words[next], str): pl = jp.parse(words[next]) + pl elif isRecord(words[next]): if 'args' in words[next].keys(): arg_rec = {} while len(words[next].args) > 0: arg_rec[words[next].args.pop()] = s.pop() (vs, pl) = words[next].func(vs, pl, arg_rec) else: pl = words[next] + pl else: print('unknown term or word:', next) return vs def isValue(e, fun): return (isinstance(e, int) or isinstance(e, float) or isinstance(e, bool) or (isinstance(e, str) and not e in fun.keys())) def isNumber(e): return isinstance(e, int) or isinstance(e, float) def isArray(a): return isinstance(a, (list,)) def isRecord(a): return isinstance(a, (dict,)) # from inspect import isfunction def isfunction(candidate): return not (isinstance(candidate, str) or isinstance(candidate, (list,))) def _halt(s, pl): global halt halt = True return [s, pl] def _def(s, pl): global words # usage: [words that the define a new-function] new-funcion-name def new_word = s.pop() new_definition = s.pop() words[new_word] = new_definition return [s, pl] def _define(s, pl): global words # [param1 param2] [words the define a new-function] new-funcion define new_word = s.pop() new_definition = s.pop() new_params = s.pop() words[new_word] = new_definition return [s, pl] def _str_first(s, pl): a_str = s[-1] s.append(a_str[0:1]) return [s, pl] def _str_last(s, pl): a_str = s[-1] s.append(a_str[-2:-1]) return [s, pl] def _str_length(s, pl): a_str = s.pop() s.append(len(a_str)) return [s, pl] def _str_append(s, pl): a_str = s.pop() s[-1] = s[-1] + a_str return [s, pl] def _push(s, pl): item = s.pop() list = s[-1] list.push(item) return [s, pl] def _pop(s, pl): list = s[-1] if isArray(list): #item = cloneItem(list.pop()) item = list.pop() s.push(item) else: runtime.log({'word':'pop', 'error':"unable to 'pop' from non-Array"}) return [s, pl] def _dup(s, pl): a = s[-1] s.append(a) return [s, pl] def _add(s, pl): a = s.pop() b = s.pop() s.append(a + b) return [s, pl] def _sub(s, pl): a = s.pop() b = s.pop() s.append(b - a) return [s, pl] def _prod(s, pl): a = s.pop() b = s.pop() s.append(a * b) return [s, pl] def _n_prod(s, pl): if len(s) >= 2: a = s.pop() b = s.pop() if isNumber(a) and isNumber(b): s.append(a * b) pl.insert(0, 'n*') return [s, pl] else: s.append(b) s.append(a) return [s, pl]; def _eq(s, pl): a = s.pop() b = s.pop() s.append(a == b) return [s, pl] def _gt(s, pl): a = s.pop() b = s.pop() s.append(b > a) return [s, pl] def _lt(s, pl): a = s.pop() b = s.pop() s.append(b < a) return [s, pl] def _ift(s, pl): then_block = s.pop() expression = s.pop() if expression: if isArray(then_block): pl = then_block+pl else: pl.insert(0, then_block) return [s, pl] def _ifte (s, pl): else_block = s.pop() then_block = s.pop() expression = s.pop() if expression: if isArray(then_block): #print(then_block) pl = then_block+pl else: pl.insert(0, then_block) else: if isArray(else_block): pl = else_block+pl else: pl.insert(0, else_block) return [s, pl] def _get(s, l): # (dict key -- dict value) key = s.pop() dictionary = s[-1] s.append(dictionary[key]) return [s, l] def _set(s, l): # (dict value key -- dict) key = s.pop() value = s.pop() dictionary = s[-1] dictionary[key] = value return [s, l] def _apply(s, l): # (dict key fun -- dict) fun = s.pop() key = s[-1] #l.insert(0, ['get', fun, key, 'set']) l.insert(0, 'set') l.insert(0, key) l = fun+l # concat arrays so that the program list (l) has words on it, not a list l.insert(0, 'get') return [s, l] def _swap(s, l): a = s.pop() b = s.pop() s.append(a) s.append(b) return [s, l] def _drop(s, l): a = s.pop() return [s, l] def _dip(s, l): f = s.pop() a = s.pop() l.insert(0, a) l = f+l return [s, l] words = { 'halt': _halt, 'def': _def, 'define': _define, 'str-first': _str_first, 'str-last': _str_last, 'str-length': _str_length, 'str-append': _str_append, 'push': _push, 'pop': _pop, 'dup': _dup, '+': _add, '-': _sub, '*': _prod, 'n*': _n_prod, '==': _eq, '<': _lt, '>': _gt, 'if': _ift, 'if-else': _ifte, 'get': _get, 'set': _set, 'app': _apply, 'swap': _swap, 'drop': _drop, 'dip': _dip } #def runScript(program_script, vs): # pl = jp.parse(program_script) # return run(pl, vs)
# -*- coding: utf-8 -*- # pylint: disable=missing-docstring,unused-import,reimported import pathlib from unittest import mock # pylint: disable=no-name-in-module import pytest # type: ignore import scale_html_map_area_coords.scale_html_map_area_coords as do def test_apply_scaling_ok_string(): assert do.apply_scaling(2, "imension is implicit") == "imension is implicit" def test_apply_scaling_ok_empty_string(): assert do.apply_scaling(2, '') == '' def test_apply_scaling_ok_string_with_target(): assert do.apply_scaling(2, ' coords="0,0" no_rstrip') == ' coords="0,0" no_rstrip' def test_apply_scaling_ok_string_with_non_zero_target_values(): assert do.apply_scaling(2, ' coords="1,3" no_rstrip') == ' coords="0,1" no_rstrip' def test_apply_scaling_nok_string_with_target_start_but_no_end(): message = r"not enough values to unpack \(expected 2, got 1\)" with pytest.raises(ValueError, match=message): do.apply_scaling(2, ' coords="0,0"no_rstrip') def test_apply_scaling_nok_string_with_target_start_and_end_but_empty_list(): message = r"invalid literal for int\(\) with base 10: ''" with pytest.raises(ValueError, match=message): do.apply_scaling(2, ' coords="" no_rstrip') def test_apply_scaling_nok_invalid_reduction_type_string_with_target_ok(): message = r"unsupported operand type\(s\) for //: 'int' and 'NoneType'" with pytest.raises(TypeError, match=message): do.apply_scaling(None, ' coords="0,0" no_rstrip') def test_apply_scaling_nok_invalid_reduction_string_with_target_ok(): message = r"integer division or modulo by zero" with pytest.raises(ZeroDivisionError, match=message): do.apply_scaling(0, ' coords="0,0" no_rstrip') def test_apply_scaling_nok_list_of_ints(): message = r"'list' object has no attribute 'rstrip'" with pytest.raises(AttributeError, match=message): do.apply_scaling(2, [1, 2, 3]) def test_apply_scaling_nok_int(): message = r"'int' object has no attribute 'rstrip'" with pytest.raises(AttributeError, match=message): do.apply_scaling(2, 42) def test_scale_html_map_area_coords_nok_reduction_gibven_but_empty_path(): message = r"path to html file missing" with pytest.raises(ValueError, match=message): do.scale_html_map_area_coords(2, '') def test_scale_html_map_area_coords_nok_reduction_gibven_but_invalid_path(): sequence_of_ints = [1, 2, 3] message = r"expected str, bytes or os.PathLike object, not list" with pytest.raises(TypeError, match=message): do.scale_html_map_area_coords(2, sequence_of_ints) def test_scale_html_map_area_coords_nok_reduction_gibven_but_non_existing_path(): nef = non_existing_file_path = 'file_does_not_exist' assert pathlib.Path(nef).is_file() is False, f"Unexpected file {nef} exists which breaks this test" message = f"\\[Errno 2\\] No such file or directory: '{non_existing_file_path}'" with pytest.raises(FileNotFoundError, match=message): do.scale_html_map_area_coords(2, non_existing_file_path) @mock.patch('builtins.open', mock.mock_open(read_data=' coords="0,0" no_rstrip')) def test_scale_html_map_area_coords_ok_with_file_mock(): assert do.scale_html_map_area_coords(2, ' coords="0,0" no_rstrip') == ' coords="0,0" no_rstrip\n'
<reponame>derezin/DPPy<filename>dppy/exotic_dpps_core.py # coding: utf8 """ Core functions for - Uniform spanning trees * :func:`ust_sampler_wilson` * :func:`ust_sampler_aldous_broder`: - Descent procresses :class:`Descent`: * :func:`uniform_permutation` - :class:`PoissonizedPlancherel` measure * :func:`uniform_permutation` * :func:`RSK`: Robinson-Schensted-Knuth correspondande * :func:`xy_young_ru` young diagram -> russian convention coordinates * :func:`limit_shape` .. seealso: `Documentation on ReadTheDocs <https://dppy.readthedocs.io/en/latest/exotic_dpps/index.html>`_ """ import functools # used for decorators to pass docstring import numpy as np # For Uniform Spanning Trees import networkx as nx from itertools import chain # create graph edges from path # For class PoissonizedPlancherel from bisect import bisect_right # for RSK from dppy.utils import check_random_state def ust_sampler_wilson(list_of_neighbors, root=None, random_state=None): rng = check_random_state(random_state) # Initialize the tree wilson_tree_graph = nx.Graph() nb_nodes = len(list_of_neighbors) # Initialize the root, if root not specified start from any node n0 = root if root else rng.choice(nb_nodes) # size=1)[0] # -1 = not visited / 0 = in path / 1 = in tree state = -np.ones(nb_nodes, dtype=int) state[n0] = 1 nb_nodes_in_tree = 1 path, branches = [], [] # branches of tree, temporary path while nb_nodes_in_tree < nb_nodes: # |Tree| = |V| - 1 # visit a neighbor of n0 uniformly at random n1 = rng.choice(list_of_neighbors[n0]) # size=1)[0] if state[n1] == -1: # not visited => continue the walk path.append(n1) # add it to the path state[n1] = 0 # mark it as in the path n0 = n1 # continue the walk if state[n1] == 0: # loop on the path => erase the loop knot = path.index(n1) # find 1st appearence of n1 in the path nodes_loop = path[knot + 1:] # identify nodes forming the loop del path[knot + 1:] # erase the loop state[nodes_loop] = -1 # mark loopy nodes as not visited n0 = n1 # continue the walk elif state[n1] == 1: # hits the tree => new branch if nb_nodes_in_tree == 1: branches.append([n1] + path) # initial branch of the tree else: branches.append(path + [n1]) # path as a new branch state[path] = 1 # mark nodes in path as in the tree nb_nodes_in_tree += len(path) # Restart the walk from a random node among those not visited nodes_not_visited = np.where(state == -1)[0] if nodes_not_visited.size: n0 = rng.choice(nodes_not_visited) # size=1)[0] path = [n0] tree_edges = list(chain.from_iterable(map(lambda x: zip(x[:-1], x[1:]), branches))) wilson_tree_graph.add_edges_from(tree_edges) return wilson_tree_graph def ust_sampler_aldous_broder(list_of_neighbors, root=None, random_state=None): rng = check_random_state(random_state) # Initialize the tree aldous_tree_graph = nx.Graph() nb_nodes = len(list_of_neighbors) # Initialize the root, if root not specified start from any node n0 = root if root else rng.choice(nb_nodes) # size=1)[0] visited = np.zeros(nb_nodes, dtype=bool) visited[n0] = True nb_nodes_in_tree = 1 tree_edges = np.zeros((nb_nodes - 1, 2), dtype=np.int) while nb_nodes_in_tree < nb_nodes: # visit a neighbor of n0 uniformly at random n1 = rng.choice(list_of_neighbors[n0]) # size=1)[0] if visited[n1]: pass # continue the walk else: # create edge (n0, n1) and continue the walk tree_edges[nb_nodes_in_tree - 1] = [n0, n1] visited[n1] = True # mark it as in the tree nb_nodes_in_tree += 1 n0 = n1 aldous_tree_graph.add_edges_from(tree_edges) return aldous_tree_graph def uniform_permutation(N, random_state=None): """ Draw a perputation :math:`\\sigma \\in \\mathfrak{S}_N` uniformly at random using Fisher-Yates' algorithm .. seealso:: - `Fisher–Yates_shuffle <https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle>_ - `Numpy shuffle <https://github.com/numpy/numpy/blob/d429f0fe16c0407509b1f20d997bf94f1027f61b/numpy/random/mtrand.pyx#L4027>_` """ rng = check_random_state(random_state) sigma = np.arange(N) for i in range(N - 1, 0, -1): # reversed(range(1, N)) j = rng.randint(0, i + 1) if j == i: continue sigma[j], sigma[i] = sigma[i], sigma[j] # for i in range(N - 1): # j = rng.randint(i, N) # sigma[j], sigma[i] = sigma[i], sigma[j] return sigma def RSK(sequence): """Apply Robinson-Schensted-Knuth correspondence on a sequence of reals, e.g. a permutation, and return the corresponding insertion and recording tableaux. :param sequence: Sequence of real numbers :type sequence: array_like :return: :math:`P, Q` insertion and recording tableaux :rtype: list .. seealso:: `RSK Wikipedia <https://en.wikipedia.org/wiki/Robinson%E2%80%93Schensted%E2%80%93Knuth_correspondence>`_ """ P, Q = [], [] # Insertion/Recording tableau for it, x in enumerate(sequence, start=1): # Iterate along the rows of the tableau P to find a place for the bouncing x and record the position where it is inserted for row_P, row_Q in zip(P, Q): # If x finds a place at the end of a row of P if x >= row_P[-1]: row_P.append(x) # add the element at the end of the row of P row_Q.append(it) # record its position in the row of Q break else: # find place for x in the row of P to keep the row ordered ind_insert = bisect_right(row_P, x) # Swap x with the value in place x, row_P[ind_insert] = row_P[ind_insert], x # If no room for x at the end of any row of P create a new row else: P.append([x]) Q.append([it]) return P, Q def xy_young_ru(young_diag): """ Compute the xy coordinates of the boxes defining the young diagram, using the russian convention. :param young_diag: points :type young_diag: array_like :return: :math:`\\omega(x)` :rtype: array_like """ def intertwine(arr_1, arr_2): inter = np.empty((arr_1.size + arr_2.size,), dtype=arr_1.dtype) inter[0::2], inter[1::2] = arr_1, arr_2 return inter # horizontal lines x_hor = intertwine(np.zeros_like(young_diag), young_diag) y_hor = np.repeat(np.arange(1, young_diag.size + 1), repeats=2) # vertical lines uniq, ind = np.unique(young_diag[::-1], return_index=True) gaps = np.ediff1d(uniq, to_begin=young_diag[-1]) x_vert = np.repeat(np.arange(1, 1 + gaps.sum()), repeats=2) y_vert = np.repeat(young_diag.size - ind, repeats=gaps) y_vert = intertwine(np.zeros_like(y_vert), y_vert) xy_young_fr = np.column_stack( [np.hstack([x_hor, x_vert]), np.hstack([y_hor, y_vert])]) rot_45_and_scale = np.array([[1.0, -1.0], [1.0, 1.0]]) return xy_young_fr.dot(rot_45_and_scale.T) def limit_shape(x): """ Evaluate :math:`\\omega(x)` the limit-shape function :cite:`Ker96` .. math:: \\omega(x) = \\begin{cases} |x|, &\\text{if } |x|\\geq 2\\ \\frac{2}{\\pi} \\left(x \\arcsin\\left(\\frac{x}{2}\\right) + \\sqrt{4-x^2} \\right) &\\text{otherwise } \\end{cases} :param x: points :type x: array_like :return: :math:`\\omega(x)` :rtype: array_like .. seealso:: - :func:`plot_diagram <plot_diagram>` - :cite:`Ker96` """ w_x = np.zeros_like(x) abs_x_gt2 = np.abs(x) >= 2.0 w_x[abs_x_gt2] = np.abs(x[abs_x_gt2]) w_x[~abs_x_gt2] = x[~abs_x_gt2] * np.arcsin(0.5 * x[~abs_x_gt2])\ + np.sqrt(4.0 - x[~abs_x_gt2]**2) w_x[~abs_x_gt2] *= 2.0 / np.pi return w_x
from testing.test_interpreter import BaseTestInterpreter import py py.test.skip("hash module unavailable") class TestArray(BaseTestInterpreter): def test_md2(self): output = self.run(''' echo hash("md2", "php"); ''') space = self.space assert space.str_w(output[0]) == "0c486bdcb26ab27500a3b9430551e230" def test_md4(self): output = self.run(''' echo hash("md4", "php"); ''') space = self.space assert space.str_w(output[0]) == "765b066d927c8e234164f014d3afcf87" def test_md5(self): output = self.run(''' echo hash("md5", "php"); ''') space = self.space assert space.str_w(output[0]) == "e1bfd762321e409cee4ac0b6e841963c" def test_sha1(self): output = self.run(''' echo hash("sha1", "php"); ''') space = self.space assert space.str_w(output[0]) == "47425e4490d1548713efea3b8a6f5d778e4b1766" def test_sha224(self): output = self.run(''' echo hash("sha224", "php"); ''') space = self.space assert space.str_w(output[0]) == "b62baeb0f0fdbc2341908ae75b84446c698dd847c0526693869605d2" def test_sha256(self): output = self.run(''' echo hash("sha256", "php"); ''') space = self.space assert space.str_w(output[0]) == "12a5d18ee896e59954bdce0f4acc7212eebe03dae1834ef4ce160ac5afa5c4a8" def test_sha384(self): output = self.run(''' echo hash("sha384", "php"); ''') space = self.space assert space.str_w(output[0]) == "7ecc1178406415a12747674afe7b57424731a4b476f0bab701ebbb15d31233bc1434d83c283a1cc40bc479e1e63ca046" def test_sha512(self): output = self.run(''' echo hash("sha512", "php"); ''') space = self.space assert space.str_w(output[0]) == "146179738dd42a9eafe5d2740bdf36d584d38d5f42e6fede12e828d5bf97a57a180bfa90c157c173e09a5c71876695807c06d39985c4ddf7b7d9800cb84ab9d7" def test_ripemd128(self): output = self.run(''' echo hash("ripemd128", "php"); ''') space = self.space assert space.str_w(output[0]) == "5b9a0b079b5abd97e8e3dfea15012213" def test_ripemd160(self): output = self.run(''' echo hash("ripemd160", "php"); ''') space = self.space assert space.str_w(output[0]) == "c78245cd5ed139bac66a99cb94afe97661eb8a3f" def test_ripemd256(self): output = self.run(''' echo hash("ripemd256", "php"); ''') space = self.space assert space.str_w(output[0]) == "8fdcc792f25ba6bcda5125a94e2ea5013e9ffd48141e843452da7d3f7fc36d23" def test_ripemd320(self): output = self.run(''' echo hash("ripemd320", "php"); ''') space = self.space assert space.str_w(output[0]) == "050331efe71e6514b6ed8bea97cf5225f1c642b4cbef7e5091ddb35ec4602b905599eeda86e6b8b2" def test_whirlpool(self): output = self.run(''' echo hash("whirlpool", "php"); ''') space = self.space assert space.str_w(output[0]) == "ccfc01f76cea38fa16f045f5c1697d857eb938892b34ef5e2233548a5a1211d807dd8d7bc56e8facd9aa4a4921641c444505b65b0772ac73be937db0910d945e" def test_tiger128_3(self): output = self.run(''' echo hash("tiger128,3", "php"); ''') space = self.space assert space.str_w(output[0]) == "f8f4e87befd50e044ad21f78aac71726" def test_tiger160_3(self): output = self.run(''' echo hash("tiger160,3", "php"); ''') space = self.space assert space.str_w(output[0]) == "f8f4e87befd50e044ad21f78aac717262a0adbed" def test_tiger192_3(self): output = self.run(''' echo hash("tiger192,3", "php"); ''') space = self.space assert space.str_w(output[0]) == "f8f4e87befd50e044ad21f78aac717262a0adbed86dc6772" def test_tiger128_4(self): output = self.run(''' echo hash("tiger128,4", "php"); ''') space = self.space assert space.str_w(output[0]) == "1c2703aed137786154befc6868da46bc" def test_tiger160_4(self): output = self.run(''' echo hash("tiger160,4", "php"); ''') space = self.space assert space.str_w(output[0]) == "1c2703aed137786154befc6868da46bc9f5dcbb2" def test_tiger192_4(self): output = self.run(''' echo hash("tiger192,4", "php"); ''') space = self.space assert space.str_w(output[0]) == "1c2703aed137786154befc6868da46bc9f5dcbb28198dc31" def test_snefru(self): output = self.run(''' echo hash("snefru", "php"); ''') space = self.space assert space.str_w(output[0]) == "bae6ce42009e5ee7c1e9294d21efc9790bcb80a9953bad3e629e5c597b0638a1" def test_snefru256(self): output = self.run(''' echo hash("snefru256", "php"); ''') space = self.space assert space.str_w(output[0]) == "bae6ce42009e5ee7c1e9294d21efc9790bcb80a9953bad3e629e5c597b0638a1" def test_gost(self): output = self.run(''' echo hash("gost", "php"); ''') space = self.space assert space.str_w(output[0]) == "ed8039b2a8b81b44f7903b55d51dc9dda1af2529ee4647ad90046cb0d68e4ace" def test_adler32(self): output = self.run(''' echo hash("adler32", "php"); ''') space = self.space assert space.str_w(output[0]) == "02930149" def test_crc32(self): output = self.run(''' echo hash("crc32", "php"); ''') space = self.space assert space.str_w(output[0]) == "70ff3613" def test_crc32b(self): output = self.run(''' echo hash("crc32b", "php"); ''') space = self.space assert space.str_w(output[0]) == "569121d1" def test_fnv132(self): output = self.run(''' echo hash("fnv132", "php"); ''') space = self.space assert space.str_w(output[0]) == "43775e3f" def test_fnv164(self): output = self.run(''' echo hash("fnv164", "php"); ''') space = self.space assert space.str_w(output[0]) == "d8a9ca186b84547f" def test_joaat(self): output = self.run(''' echo hash("joaat", "php"); ''') space = self.space assert space.str_w(output[0]) == "b1e186c8" def test_haval128_3(self): output = self.run(''' echo hash("haval128,3", "php"); ''') space = self.space assert space.str_w(output[0]) == "7b1e887549ba960e9a0dfdade24ac1df" def test_haval160_3(self): output = self.run(''' echo hash("haval160,3", "php"); ''') space = self.space assert space.str_w(output[0]) == "4155833e9234a40858e377d0a7034f388774f742" def test_haval192_3(self): output = self.run(''' echo hash("haval192,3", "php"); ''') space = self.space assert space.str_w(output[0]) == "a6538a66c878c8c2790d93ed3bae5095d0b1aa9cfbec7f39" def test_haval224_3(self): output = self.run(''' echo hash("haval224,3", "php"); ''') space = self.space assert space.str_w(output[0]) == "b58a5076078a812afc8ef6266faa41300f6c90ae90a6c7d89dc2fec3" def test_haval256_3(self): output = self.run(''' echo hash("haval256,3", "php"); ''') space = self.space assert space.str_w(output[0]) == "a74b42d01eb6bccf218415f16144ee57f04f44555b4f947f7c97bc4224aba62e" def test_haval128_4(self): output = self.run(''' echo hash("haval128,4", "php"); ''') space = self.space assert space.str_w(output[0]) == "cb3eeb48e51dadf6c4ebe6a5a6e3981e" def test_haval160_4(self): output = self.run(''' echo hash("haval160,4", "php"); ''') space = self.space assert space.str_w(output[0]) == "49ba3a12be36735856dd6bf590512968e02190db" def test_haval192_4(self): output = self.run(''' echo hash("haval192,4", "php"); ''') space = self.space assert space.str_w(output[0]) == "e76c8953c9fb4f2ea95533665050c160caa964a85ad5d880" def test_haval224_4(self): output = self.run(''' echo hash("haval224,4", "php"); ''') space = self.space assert space.str_w(output[0]) == "e56d724fde10d1c30ab2031843f374d14323b57aa9001780c6e0bc5a" def test_haval256_4(self): output = self.run(''' echo hash("haval256,4", "php"); ''') space = self.space assert space.str_w(output[0]) == "b4ce192d464cbac646fc5ea834d1518d1b96e5b704a2c38c47065286e584ad79" def test_haval128_5(self): output = self.run(''' echo hash("haval128,5", "php"); ''') space = self.space assert space.str_w(output[0]) == "32e4cc681d7499f7df1e61c0470b79bf" def test_haval160_5(self): output = self.run(''' echo hash("haval160,5", "php"); ''') space = self.space assert space.str_w(output[0]) == "4e3f1e83c100db4857cc141d1f1ada68d6f4b588" def test_haval192_5(self): output = self.run(''' echo hash("haval192,5", "php"); ''') space = self.space assert space.str_w(output[0]) == "586e134724506783724e9f690fa05c35cd2472272b6250fe" def test_haval224_5(self): output = self.run(''' echo hash("haval224,5", "php"); ''') space = self.space assert space.str_w(output[0]) == "0e70ba3ba5651b906ad92ffcf80745f88d181eeabfe9481b4e3c77b0" def test_haval256_5(self): output = self.run(''' echo hash("haval256,5", "php"); ''') space = self.space assert space.str_w(output[0]) == "7f5c091f50d1b4948c8f23121f41d795cd2d14f802c8f1a09664333b8b890b98" def test_raw_md2(self): import md5 output = self.run(''' echo hash("md2", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "40f5ce306cba15d1256182f620b3fdd1" def test_raw_md4(self): import md5 output = self.run(''' echo hash("md4", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "adc24d0e0eb0a8b26f21f9a1b95f361d" def test_raw_md5(self): import md5 output = self.run(''' echo hash("md5", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "e62e824131574bc7a294c8b39b014d4a" def test_raw_sha1(self): import md5 output = self.run(''' echo hash("sha1", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "c409c0762e20b61fca8a5661415495b8" def test_raw_sha224(self): import md5 output = self.run(''' echo hash("sha224", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "bc8f445f2c6e8aa8a5bb89a93a8e8350" def test_raw_sha256(self): import md5 output = self.run(''' echo hash("sha256", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "1e8fa266b5f865c7af37cdb282ddbe3e" def test_raw_sha384(self): import md5 output = self.run(''' echo hash("sha384", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "6050d50d373918261786c4f1c76ac7c7" def test_raw_sha512(self): import md5 output = self.run(''' echo hash("sha512", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "986cc253fa4aa169e17b0cfbd665cdf8" def test_raw_ripemd128(self): import md5 output = self.run(''' echo hash("ripemd128", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "acdd2aad79a7bcac805d5337f2cd35bd" def test_raw_ripemd160(self): import md5 output = self.run(''' echo hash("ripemd160", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "3d38785a7bfcd1e1db75e405b8f1306a" def test_raw_ripemd256(self): import md5 output = self.run(''' echo hash("ripemd256", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "863981d5498801af945d6190b0917ef8" def test_raw_ripemd320(self): import md5 output = self.run(''' echo hash("ripemd320", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "f3d561cb6341bcf757281f53a59f180a" def test_raw_whirlpool(self): import md5 output = self.run(''' echo hash("whirlpool", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "283fe7911095bd756cc906aabe775ba6" def test_raw_tiger128_3(self): import md5 output = self.run(''' echo hash("tiger128,3", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "7d63dd40ff0200141d4a69b4ff930114" def test_raw_tiger160_3(self): import md5 output = self.run(''' echo hash("tiger160,3", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "73be9e8d36fd5ff2e353857981ec4b78" def test_raw_tiger192_3(self): import md5 output = self.run(''' echo hash("tiger192,3", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "ff23845c8f60bb75a98d3dca436e707b" def test_raw_tiger128_4(self): import md5 output = self.run(''' echo hash("tiger128,4", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "c42a8cab92fc867699224ed093b6306e" def test_raw_tiger160_4(self): import md5 output = self.run(''' echo hash("tiger160,4", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "ec4c4a5c28831b6779d89d032f4e6cfa" def test_raw_tiger192_4(self): import md5 output = self.run(''' echo hash("tiger192,4", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "063af2a4356b5923b67282889677be54" def test_raw_snefru(self): import md5 output = self.run(''' echo hash("snefru", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "d8e633280559c1ef3c599d1f3a65c705" def test_raw_snefru256(self): import md5 output = self.run(''' echo hash("snefru256", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "d8e633280559c1ef3c599d1f3a65c705" def test_raw_gost(self): import md5 output = self.run(''' echo hash("gost", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "b705d06553558fc96e88d7ff5617801c" def test_raw_adler32(self): import md5 output = self.run(''' echo hash("adler32", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "7a429283defc2ccb629bdef40d0c8cc1" def test_raw_crc32(self): import md5 output = self.run(''' echo hash("crc32", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "3c220b09edf64e4fe846f3d71f5906cb" def test_raw_crc32b(self): import md5 output = self.run(''' echo hash("crc32b", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "3bd5fb0055bb0973a2d96a6db810bfde" def test_raw_fnv132(self): import md5 output = self.run(''' echo hash("fnv132", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "a967f2a52f4eefd087378623d61072e0" def test_raw_fnv164(self): import md5 output = self.run(''' echo hash("fnv164", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "7f79ae6a90e6a12feb4c481741e9073f" def test_raw_joaat(self): import md5 output = self.run(''' echo hash("joaat", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "f1fc17fb616cc24a0faaeaf82c545f07" def test_raw_haval128_3(self): import md5 output = self.run(''' echo hash("haval128,3", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "151ad1a9bd18adc7feb3371d3d340ecf" def test_raw_haval160_3(self): import md5 output = self.run(''' echo hash("haval160,3", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "5329b3cf6b631dd588ff2f78df6acc0e" def test_raw_haval192_3(self): import md5 output = self.run(''' echo hash("haval192,3", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "dcf654cdb175c5a6bd959618e2347053" def test_raw_haval224_3(self): import md5 output = self.run(''' echo hash("haval224,3", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "c60a340ca3e74dcf086fc77583ed2233" def test_raw_haval256_3(self): import md5 output = self.run(''' echo hash("haval256,3", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "9c39f5961dcd02fb2ae993bc51aedd42" def test_raw_haval128_4(self): import md5 output = self.run(''' echo hash("haval128,4", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "da5d36509b00852d607be559ba44d968" def test_raw_haval160_4(self): import md5 output = self.run(''' echo hash("haval160,4", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "f73214863b3520dfd3060aed083fe068" def test_raw_haval192_4(self): import md5 output = self.run(''' echo hash("haval192,4", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "57de554aa22a8e8abd1662e18de219b5" def test_raw_haval224_4(self): import md5 output = self.run(''' echo hash("haval224,4", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "9be92af3727d8d1f4c4402c1a5fe5a9b" def test_raw_haval256_4(self): import md5 output = self.run(''' echo hash("haval256,4", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "8bd8017f638f7cf49da010d72fee5569" def test_raw_haval128_5(self): import md5 output = self.run(''' echo hash("haval128,5", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "10fcf5dd6babefbf19e61db412e1ea53" def test_raw_haval160_5(self): import md5 output = self.run(''' echo hash("haval160,5", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "c67314a46666d06e7521d0564cc1ec0a" def test_raw_haval192_5(self): import md5 output = self.run(''' echo hash("haval192,5", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "ee212b74620ac4ef74848de0a2b54567" def test_raw_haval224_5(self): import md5 output = self.run(''' echo hash("haval224,5", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "93647955087942cf227a53eb43a81026" def test_raw_haval256_5(self): import md5 output = self.run(''' echo hash("haval256,5", "php", 1); ''') space = self.space s = space.str_w(output[0]) assert md5.new(s).hexdigest() == "96eae5ce76b317c20a75e8b821361c1e" def test_hmac_md5(self): output = self.run(''' echo hash_hmac("md5", "php", "php"); ''') space = self.space assert space.str_w(output[0]) == "720f7625c909264cf7c38921852d695b" def test_hash_init_with_hmac(self): output = self.run(''' $ctx = hash_init('md5',HASH_HMAC,str_repeat(chr(0x0b), 16)); hash_update($ctx, 'Hi There'); echo hash_final($ctx); ''') space = self.space assert space.str_w(output[0]) == "9294727a3638bb1c13f48ef8158bfc9d"
<reponame>rezabfilTUM/EyeCandy<gh_stars>0 import os import time from Settings import Settings from BrightnessManager import BrightnessManager from Battery import Battery class PowerSaver: def __init__(self, args=None): self.setup(args) def setup(self, args=None): '''Set up arguments to be used, and initialize Battery and Brightness mangager.''' arguments = { "verbose": False, "manual": False, "fade": .25, "time": 2, "profile": None } if args is not None: for arg in args.keys(): if arg in arguments: arguments[arg] = args[arg] self.arguments = arguments if self.arguments["verbose"]: print("Arguments", flush = True) print("=====================") for key, value in self.arguments.items(): print(key, ":", value, flush=True) print("=====================\n") self.brightness_manager = BrightnessManager() self.battery = Battery() self.brightness = self.brightness_manager.get_brightness() self.charging = self.battery.is_charging() self.percent = self.battery.percent() self.level = None self.min_percent = None self.max_percent = None if self.arguments["profile"] is None: cur_dir = os.path.abspath(os.path.dirname(__file__)) if self.arguments["verbose"]: print("Default settings loaded", flush=True) self.settings = Settings(os.path.join(cur_dir, "settings.json")) else: self.settings = Settings(arguments["profile"]) def poll(self): '''Poll the battery and brightness. If the battery level defined in settings has changed, update the screen brightness.''' poll_time = self.arguments["time"] while True: time.sleep(poll_time) update = False # Get percent, charge status, and brightness self.percent = self.battery.percent() charging = self.battery.is_charging() brightness = self.brightness_manager.get_brightness() # Close the program if the brightness # was changed manually and not set in # command line args. if brightness != self.brightness: if not self.arguments["manual"]: if self.arguments["verbose"]: print("Brightness Manually Changed, Exiting") exit(1) # If the battery level ("low", "medium", "high") is None, # then initialize it. and set the brightness to the # brightness value corresponding to the level # of the battery's percent is currently at if self.level is None: if self.arguments["verbose"]: print("Battery Level Initializing.", flush=True) update = True # If the battery percent has moved out of the range of the # battery level, then update to change the brightness. elif self.percent not in range(self.min_percent, self.max_percent + 1): if self.arguments["verbose"]: print("Battery level changed.", flush=True) update = True # If the battery's charging status has changed, # determine if the screen should brighten for charging # or dim for discharging. elif charging != self.charging: if self.arguments["verbose"]: print("Charging status changed:", charging, flush=True) update = True # Print out the battery percent if verbose was set. if self.arguments["verbose"]: print(self.percent, flush=True) # Only update the brightness if one of the # above requirements are met. if update: self.charging = charging # Check what level the battery percent is ("low", "medium", "high") # and cache the range that level is in. for battery_level, battery_range in self.settings.contents["levels"].items(): # If the current percent of the battery is in the range specified in the # battery level, then that is the level needed to get brightness values. if self.percent in range(battery_range[0], battery_range[1] + 1): self.level = battery_level self.min_percent, self.max_percent = battery_range if self.arguments["verbose"]: print("Battery Level: ", self.level, flush=True) break # If the battery is charging, handle brightness settings # for charging in the settings file. if self.charging: target_brightness = self.settings.contents["on_charge_brightness"][self.level] if target_brightness != self.brightness: if target_brightness < self.brightness: levels = reversed(range(target_brightness, self.brightness + 1)) else: levels = range(self.brightness, target_brightness + 1) for brightness_level in levels: self.brightness_manager.set_brightness(brightness_level) if self.arguments["verbose"]: print("Setting Brightness:", brightness_level, flush=True) time.sleep(self.arguments["fade"]) # Otherwise, handle brightness settings # for battery usage in the settings file else: target_brightness = self.settings.contents["on_battery_brightness"][self.level] if target_brightness != self.brightness: if target_brightness < self.brightness: levels = reversed(range(target_brightness, self.brightness + 1)) else: levels = range(self.brightness, target_brightness + 1) for brightness_level in levels: self.brightness_manager.set_brightness(brightness_level) if self.arguments["verbose"]: print("Setting Brightness:", brightness_level, flush=True) time.sleep(self.arguments["fade"]) # Get the brightness after everything has changed. self.brightness = self.brightness_manager.get_brightness() if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument( "-v", "--verbose", help="Display messages in the terminal each time the battery is polled.\n" "Default: Off", action="store_true" ) parser.add_argument( "-m", "--manual", help="Keep the program open if the brightness is manually changed.\n" "Default: Off", action="store_true" ) parser.add_argument( "-f", "--fade", help="The speed to fade the brightness in or out.\n" "Higher is slower. Default: .25", type=float, default=.25 ) parser.add_argument( "-t", "--time", help="The time to sleep between each poll on the battery. (in seconds)\n" "Default: 2", type=float, default=2 ) parser.add_argument( "-p", "--profile", help="The json file to use for battery levels and percentages.", type=str ) args = parser.parse_args() arguments = { "verbose": args.verbose, "manual": args.manual, "fade": args.fade, "time": args.time, "profile": None if not args.profile else args.profile, } powersaver = PowerSaver(arguments) powersaver.poll()
<filename>store/http.py<gh_stars>0 import multiprocessing import os import sys import time import fooster.web import fooster.web.file import fooster.web.json import fooster.web.page from store import config, lock, storage fooster.web.file.max_file_size = config.max_size alias = '(?P<alias>[a-zA-Z0-9._-]+)' namespace = '(?P<namespace>/[a-zA-Z0-9._/-]*)/' http = None global_lock = None routes = {} error_routes = {} def create(entry, body, date): entry.filename = body['filename'] entry.type = body['type'] try: entry.size = int(body['size']) except ValueError: raise fooster.web.HTTPError(400, status_message='Size Must Be In Bytes') if entry.size > config.max_size: raise fooster.web.HTTPError(413, status_message='Object Too Large') entry.date = date update(entry, body) return entry def update(entry, body): if 'expire' in body: try: entry.expire = float(body['expire']) except ValueError: raise fooster.web.HTTPError(400, status_message='Time Must Be In Seconds Since The Epoch') if 'locked' in body: if not isinstance(body['locked'], bool): raise fooster.web.HTTPError(400, status_message='Locked Must Be A Bool') entry.locked = body['locked'] def output(entry): return {'alias': entry.alias, 'filename': entry.filename, 'type': entry.type, 'size': entry.size, 'date': entry.date, 'expire': entry.expire, 'locked': entry.locked} class GlobalLockMixIn: def __init__(self, *args, **kwargs): self.global_lock = kwargs.pop('global_lock', None) super().__init__(*args, **kwargs) class Page(fooster.web.page.PageHandler): directory = config.template page = 'index.html' class Namespace(GlobalLockMixIn, fooster.web.json.JSONHandler): def respond(self): if not self.request.resource.endswith('/'): self.response.headers['Location'] = self.request.resource + '/' return 307, '' self.namespace = self.groups['namespace'] norm_request = fooster.web.file.normpath(self.namespace) if self.namespace != norm_request: self.response.headers.set('Location', '/api' + norm_request) return 307, '' return super().respond() def do_get(self): try: return 200, list(output(value) for value in storage.values(self.namespace)) except KeyError: raise fooster.web.HTTPError(404) def do_post(self): if self.request.headers.get('Content-Type') != 'application/json': raise fooster.web.HTTPError(400, status_message='Body Must Be JSON') entry = storage.create(self.namespace) try: create(entry, self.request.body, time.time()) except KeyError: storage.remove(self.namespace, entry.alias) raise fooster.web.HTTPError(400, status_message='Not Enough Fields') except fooster.web.HTTPError: storage.remove(self.namespace, entry.alias) raise if entry.locked: lock.acquire(self.request, self.namespace, entry.alias, True) self.response.headers['Location'] = self.request.resource + entry.alias return 201, output(entry) class Interface(GlobalLockMixIn, fooster.web.json.JSONHandler): def respond(self): self.namespace = self.groups['namespace'] self.alias = self.groups['alias'] norm_request = fooster.web.file.normpath(self.namespace + '/' + self.alias) if self.namespace + '/' + self.alias != norm_request: self.response.headers.set('Location', '/api' + norm_request) return 307, '' return super().respond() def do_get(self): try: return 200, output(storage.retrieve(self.namespace, self.alias)) except KeyError: raise fooster.web.HTTPError(404) def do_put(self): if self.request.headers.get('Content-Type') != 'application/json': raise fooster.web.HTTPError(400, status_message='Body Must Be JSON') try: entry = storage.create(self.namespace, self.alias) try: create(entry, self.request.body, time.time()) except KeyError: storage.remove(self.namespace, entry.alias) raise fooster.web.HTTPError(400, status_message='Not Enough Fields') except fooster.web.HTTPError: storage.remove(self.namespace, entry.alias) raise if entry.locked: lock.acquire(self.request, self.namespace, entry.alias, True) return 201, output(entry) except KeyError: entry = storage.retrieve(self.namespace, self.alias) if entry.locked: raise fooster.web.HTTPError(403) try: update(entry, self.request.body) except KeyError: raise fooster.web.HTTPError(400, status_message='Not Enough Fields') return 200, output(entry) def do_delete(self): try: entry = storage.retrieve(self.namespace, self.alias) if entry.locked: raise fooster.web.HTTPError(403) storage.remove(self.namespace, entry.alias) return 204, '' except KeyError: raise fooster.web.HTTPError(404) class Store(GlobalLockMixIn, fooster.web.file.ModifyMixIn, fooster.web.file.PathHandler): local = storage.path remote = '/store' def get_body(self): return False def respond(self): if 'namespace' not in self.groups or 'alias' not in self.groups: self.response.headers['Location'] = self.request.resource + '/' return 307, '' self.namespace = self.groups['namespace'] self.alias = self.groups['alias'] self.pathstr = self.namespace + '/' + self.alias return super().respond() def do_get(self): try: entry = storage.retrieve(self.namespace, self.alias) except KeyError: raise fooster.web.HTTPError(404) if entry.type is not None: self.response.headers['Content-Type'] = entry.type if entry.filename is not None: self.response.headers['Content-Filename'] = entry.filename.encode(fooster.web.http_encoding, 'ignore').decode() self.response.headers['Last-Modified'] = fooster.web.mktime(time.gmtime(entry.date)) self.response.headers['Expires'] = fooster.web.mktime(time.gmtime(entry.expire)) return super().do_get() def do_put(self): try: entry = storage.retrieve(self.namespace, self.alias) except KeyError: raise fooster.web.HTTPError(404) if entry.locked and os.path.isfile(self.filename): raise fooster.web.HTTPError(403) if self.request.headers.get('Content-Length') != str(entry.size): raise fooster.web.HTTPError(400, status_message='Content-Length Does Not Match Database Size') if 'Content-Type' in self.request.headers and self.request.headers['Content-Type'] != entry.type: raise fooster.web.HTTPError(400, status_message='Content-Type Does Not Match Database Type') response = super().do_put() return response routes.update({'/': Page, '/api': Namespace, '/api' + namespace: Namespace, '/api' + namespace + alias: Interface, '/store': Store, '/store' + namespace + alias: Store}) error_routes.update(fooster.web.json.new_error()) def start(): global http, global_lock if sys.version_info >= (3, 7): global_lock = multiprocessing.get_context('spawn').Lock() else: global_lock = multiprocessing.get_context('fork').Lock() run_routes = {} for route, handler in routes.items(): if issubclass(handler, GlobalLockMixIn): run_routes[route] = fooster.web.HTTPHandlerWrapper(handler, global_lock=global_lock) else: run_routes[route] = handler http = fooster.web.HTTPServer(config.addr, run_routes, error_routes, timeout=60, keepalive=60) http.start() def stop(): global http, global_lock http.stop() http = None global_lock = None def join(): global http http.join()
#!/usr/bin/python # -*- coding: utf-8 -*- # cython: language_level=3 """ Example using WrapBokeh """ import logging logger = logging.getLogger("TMI.login") from bokeh.layouts import row, layout, Spacer, widgetbox, column from bokeh.models.widgets.inputs import TextInput, PasswordInput from bokeh.models.widgets.buttons import Button from bokeh.models.widgets import Div from flask import redirect, abort, Blueprint, session from flask import request from flask import current_app as app from pywrapbokeh import WrapBokeh from app.css import url_page_css, index_toolbar_menu, index_menu_redirect from app.urls import * from app.app_hooks import login from common.models import User PAGE_URL = COMMON_URL_LOGIN common_login = Blueprint('common_login', __name__) @common_login.route(PAGE_URL, methods=['GET', 'POST']) def common__login(): w = WrapBokeh(PAGE_URL, logger) w.add("tin_uname", TextInput(title="Login Name:", placeholder="", css_classes=['tin_lname'])) w.add("tin_lpw", PasswordInput(title="Password:", placeholder="", css_classes=['tin_lpw'])) w.add("b_submit", Button(label="Submit", css_classes=['b_submit'])) w.add("b_signup", Button(label="Sign Up", css_classes=['b_signup'])) w.add("b_recover", Button(label="Recover Password", css_classes=['b_recover'])) w.init() # Create a dominate document, see https://github.com/Knio/dominate # this line should go after any "return redirect" statements w.dominate_document(title=session["title"]) url_page_css(w.dom_doc, PAGE_URL) args, _redirect_page_metrics = w.process_req(request) if not args: return _redirect_page_metrics logger.info("{} : args {}".format(PAGE_URL, args)) left_margin = int(int(args.get("windowWidth", 800)) * 0.1) redir, url = index_menu_redirect(args) if redir: return redirect(url) if args.get("b_signup", False): return redirect(COMMON_URL_LOGIN_SIGNUP) if args.get("b_recover", False): return redirect(COMMON_URL_LOGIN_RECOVER) login_failed = False if args.get("b_submit", False): uname = args.get("tin_uname", None) pw = w.get("tin_lpw").value if uname is not None and pw is not None: user = User.login(uname, pw) if user is not None: logger.info("{} {}".format(user.username, user.id)) session['user_id'] = user.id login(user.id) return redirect(COMMON_URL_LAND) else: logger.info("Login failed for {}".format(uname)) login_failed = True doc_layout = layout(sizing_mode='scale_width') index_toolbar_menu(w, doc_layout, args) if login_failed: doc_layout.children.append(row(Spacer(width=left_margin), column([Div(text="""<p>Login failed, Recover Password?</p>"""), w.get("b_recover")]))) w.add_css("b_submit", {'button': {'background-color': '#98FB98', 'min-width': '60px'}}) w.add_css("b_signup", {'button': {'background-color': '#98FB98', 'min-width': '60px'}}) w.add_css("tin_uname", {'input': {'width': '90%'}}) w.add_css("tin_lpw", {'input': {'width': '90%'}}) if app.config["app"]["user"]["signup_enabled"]: wbox = widgetbox(w.get("tin_uname"), w.get("tin_lpw"), w.get("b_submit"), w.get("b_signup")) else: wbox = widgetbox(w.get("tin_uname"), w.get("tin_lpw"), w.get("b_submit")) doc_layout.children.append(row([Spacer(width=left_margin), wbox])) return w.render(doc_layout)
import discord, random, httpx from discord.ext import commands from lxml import html class MmangaModule(commands.Cog): def __init__(self, bot): self.bot = bot @commands.command(name='mmanga', aliases=['mavimanga']) async def mSearch(self, ctx, *, mname): mname = mname.replace(" ", "-") post = "https://mavimanga.com/manga/" + mname source = httpx.get(post) page = html.fromstring(source.content) title=random.choice(page.xpath(f"//meta[@property='og:title']/@content")) title=title[:-13] if not title.startswith('Sayfa bulunamadı'): imgURL=page.xpath(f"//meta[@property='og:image']/@content") if imgURL: imgURL = imgURL[0] else: imgURL = "https://i.resimyukle.xyz/1yx268.png" durum=random.choice(page.xpath(f"//span[@class='mangasc-stat']/text()")) mangaka=random.choice(page.xpath(f"//td[./b/text()='Mangaka:']/text()")) bolum=random.choice(page.xpath("//td[./b/text()='Bölüm Sayısı:']/text()")) turler=page.xpath("//td[./b/text()='Türler:']/ul/li/a/text()") turlerS=", ".join(str(x) for x in turler) diger=random.choice(page.xpath("//td[./b/text()='Diğer Adları:']/text()")) cikis=random.choice(page.xpath("//td[./b/text()='Çıkış Yılı:']/text()")) konu=random.choice(page.xpath("//meta[@property='og:description']/@content")) latestL=page.xpath("(//a[@class='mangaep-episode'])[1]/@href") if latestL: latestL=random.choice(latestL) latestN=page.xpath("(//a[@class='mangaep-episode'])[1]/text()") latestN = latestN[0] else: latestN="Bölüm Yok" latestL=b embed = discord.Embed(title=f'__**{title}**__', url=post, colour=0x64C6E9) embed.set_thumbnail(url=imgURL) embed.add_field(name='Türler', value=turlerS, inline=False) embed.add_field(name='Diğer Adlar', value=diger) embed.add_field(name='Çıkış Yılı', value=cikis, inline=True) embed.add_field(name='Mangaka', value=mangaka) embed.add_field(name='Toplam Bölüm', value=bolum) embed.add_field(name='Durum', value=durum) embed.add_field(name='Son Bölüm', value=f'[{latestN}]({latestL})', inline=True) embed.add_field(name='Konusu', value=konu) await ctx.channel.send(embed=embed) else: await ctx.channel.send("Aradığınız mangaya ulaşılamadı, lütfen başka bir isim seçin. <:lul:536833872076210198>") <EMAIL>own(1, 5, commands.BucketType.user) @commands.command(name='srcdb', aliases=['mmdb']) async def mangaSearch(self, ctx, *, mName: str): manga = await self.bot.pg_con.fetchrow(f'SELECT * FROM manga WHERE name=$1', mName) title=manga['name'] mangaka=manga['author'] cikis=manga['releaseDate'] durum=manga['status'] turlerS=manga['genre'] konu=manga['konu'] latestN=manga['latestN'] latestL=manga['latestL'] post=manga['url'] imgURL=manga['img'] bolum=manga['total'] diger=manga['alias'] embed = discord.Embed(title=f'__**{title}**__', url=post, colour=0x64C6E9) embed.set_thumbnail(url=imgURL) embed.add_field(name='Türler', value=turlerS, inline=False) embed.add_field(name='Diğer Adlar', value=diger) embed.add_field(name='Çıkış Yılı', value=cikis, inline=True) embed.add_field(name='Mangaka', value=mangaka) embed.add_field(name='Toplam Bölüm', value=bolum) embed.add_field(name='Durum', value=durum) embed.add_field(name='Son Bölüm', value=f'[{latestN}]({latestL})', inline=True) embed.add_field(name='Konusu', value=konu) await ctx.channel.send(embed=embed) def setup(bot): bot.add_cog(MmangaModule(bot))
# -- coding: utf-8 -- # MIT License # # Copyright (c) 2019 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from sympy import Symbol, Integral, factorial from sympy import gamma, hyper, exp_polar, I, pi, log from scipy.special import betainc, betaincinv import numpy as np from warnings import warn class HansonKoopmans(object): def __init__(self, p, g, n, j, method='secant', max_iter=200, tol=1e-5, step_size=1e-4): r""" An object to solve for the Hanson-Koopmans bound. Solve the Hanson-Koopmans [1] bound for any percentile, confidence level, and number of samples. This assumes the lowest value is the first order statistic, but you can specify the index of the second order statistic as j. Parameters ---------- p : float Percentile where p < 0.5 and p > 0. g : float Confidence level where g > 0. and g < 1. n : int Number of samples. j : int Index of the second value to use for the second order statistic. method : string, optional Which rootfinding method to use to solve for the Hanson-Koopmans bound. Default is method='secant' which appears to converge quickly. Other choices include 'newton-raphson' and 'halley'. max_iter : int, optional Maximum number of iterations for the root finding method. tol : float, optional Tolerance for the root finding method to converge. step_size : float, optional Step size for the secant solver. Default step_size = 1e-4. Attributes ---------- b : float_like Hanson-Koopmans bound. un_conv : bool Unconvergence status. If un_conv, then the method did not converge. count : int Number of iterations used in the root finding method. fall_back : bool Whether to fall back to the traditional non-parametric method. Raises ------ ValueError Incorrect input, or unable to comptue the Hanson-Koopmans bound. References ---------- [1] <NAME>., & <NAME>. (1964). Tolerance Limits for the Class of Distributions with Increasing Hazard Rates. Ann. Math. Statist., 35(4), 1561–1570. https://doi.org/10.1214/aoms/1177700380 [2] <NAME>. (1994). One-sided nonparametric tolerance limits. Communications in Statistics - Simulation and Computation, 23(4), 1137–1154. https://doi.org/10.1080/03610919408813222 """ self.max_iter = max_iter self.tol = tol self.step_size = step_size # create a dummy variable v self.v = Symbol('v', nonzero=True, rational=True, positive=True) # check that p, g, n, j are valid if not(p < 0.5 and p > 0.): self.invalid_value(p, 'p') else: self.p = p if not(g > 0. and g < 1.): self.invalid_value(g, 'g') else: self.g = g self.n = int(n) if not(j < n and j > -1): self.invalid_value(j, 'j') else: self.j = int(j) # compute the stuff that doesn't depend on b self.constant_vales() # compute b = 1 pi_B_1 = self.piB(0) # remember that b - 1 = B; b = B + 1 if pi_B_1 >= self.g: self.fall_back = True # raise ValueError('b = 1, defer to traditional methods...') # raise RunTimeWarning? else: self.fall_back = False b_guess = self.vangel_approx(p=float(self.p)) # print(float(b_guess)) if np.isnan(b_guess): raise RuntimeError('Bad Vangel Approximation is np.nan') elif b_guess <= 0: b_guess = 1e-2 # print(b_guess) self.b_guess = b_guess if method == 'secant': B, status, count = self.secant_solver(b_guess - 1.) elif method == 'newton-raphson': B, status, count = self.nr_solver(b_guess - 1.) elif method == 'halley': B, status, count = self.halley_solver(b_guess - 1.) else: raise ValueError(str(method) + ' is not a valid method!') self.b = B + 1. self.un_conv = status self.count = count if self.un_conv: war = 'HansonKoopmans root finding method failed to converge!' warn(war, RuntimeWarning) # This should raise RuntimeError if not converged! def invalid_value(self, value, variable): err = str(value) + ' was not a valid value for ' + variable raise ValueError(err) def constant_vales(self): self.nj = self.n-self.j-1 self.A = factorial(self.n) / (factorial(self.nj) * factorial(self.j-1)) # compute the left integral int_left = (self.p*self.p**self.j*gamma(self.j + 1) * hyper((-self.nj, self.j + 1), (self.j + 2,), self.p*exp_polar(2*I*pi)) / (self.j*gamma(self.j + 2))) self.int_left = int_left.evalf() # evaluates to double precision def piB(self, B): int_right_exp = (self.v**self.j*(1 - self.v)**self.nj/self.j - (1 - self.v)**self.nj*(-self.p**(1/(B + 1)) * self.v**(B/(B + 1)) + self.v)**self.j/self.j) int_right = Integral(int_right_exp, (self.v, self.p, 1)).evalf() return (self.int_left + int_right)*self.A def dpiB(self, B): d_int_right_exp_B = (self.v**self.j*(1 - self.v)**self.nj/self.j - (1 - self.v)**self.nj*(-self.p**(1/(B + 1)) * self.v**(B/(B + 1))*(-B/(B + 1)**2 + 1/(B + 1)) * log(self.v) + self.p**(1/(B + 1))*self.v ** (B / (B + 1))*log(self.p)/(B + 1)**2 + self.v) ** self.j / self.j) d_int_right = Integral(d_int_right_exp_B, (self.v, self.p, 1)).evalf() return d_int_right*self.A def d2piB2(self, B): d2_int_r_B = (-(1 - self.v)**self.nj*(-self.p**(1/(B + 1))*self.v ** (B/(B + 1))*(-B/(B + 1)**2 + 1/(B + 1))*log(self.v) + self.p**(1/(B + 1))*self.v**(B/(B + 1))*log(self.p) / (B + 1)**2 + self.v)**self.j*(-self.p**(1/(B + 1)) * self.v**(B/(B + 1))*(2*B/(B + 1)**3 - 2/(B + 1)**2) * log(self.v) - self.p**(1/(B + 1))*self.v**(B/(B + 1)) * (-B/(B + 1)**2 + 1/(B + 1))**2*log(self.v)**2 + 2 * self.p**(1/(B + 1))*self.v**(B/(B + 1)) * (-B/(B + 1)**2 + 1/(B + 1)) * log(self.p)*log(self.v)/(B + 1)**2 - 2 * self.p**(1/(B + 1))*self.v**(B/(B + 1))*log(self.p) / (B + 1)**3 - self.p**(1/(B + 1))*self.v**(B/(B + 1)) * log(self.p)**2/(B + 1)**4)/(-self.p**(1/(B + 1)) * self.v**(B/(B + 1))*(-B/(B + 1)**2 + 1/(B + 1)) * log(self.v) + self.p**(1/(B + 1))*self.v ** (B/(B + 1)) * log(self.p)/(B + 1)**2 + self.v)) d2_int_right = Integral(d2_int_r_B, (self.v, self.p, 1)).evalf() return d2_int_right*self.A def vangel_approx(self, n=None, i=None, j=None, p=None, g=None): if n is None: n = self.n if i is None: i = 1 if j is None: j = self.j+1 if p is None: p = self.p if g is None: g = self.g betatmp = betainc(j, n-j+1, p) a = g - betatmp b = 1.0 - betatmp q = betaincinv(i, j-i, a/b) return np.log(((p)*(n+1))/j) / np.log(q) def secant_solver(self, B_guess, max_iter=None, tol=None, step_size=None): if max_iter is None: max_iter = self.max_iter if tol is None: tol = self.tol if step_size is None: step_size = self.step_size count = 0 f = self.piB(B_guess) - self.g f1 = self.piB(B_guess + step_size) - self.g dfdx = (f1 - f) / step_size B_next = B_guess - (f/dfdx) un_conv = np.abs(B_next - B_guess) > tol while un_conv and count < max_iter: B_guess = B_next f = self.piB(B_guess) - self.g f1 = self.piB(B_guess + step_size) - self.g dfdx = (f1 - f) / step_size B_next = B_guess - (f/dfdx) un_conv = np.abs(B_next - B_guess) > tol count += 1 return B_next, un_conv, count def nr_solver(self, B_guess, max_iter=None, tol=None): if max_iter is None: max_iter = self.max_iter if tol is None: tol = self.tol count = 0 f = self.piB(B_guess) - self.g dfdx = self.dpiB(B_guess) B_next = B_guess - (f/dfdx) un_conv = np.abs(B_next - B_guess) > tol while un_conv and count < max_iter: B_guess = B_next f = self.piB(B_guess) - self.g dfdx = self.dpiB(B_guess) B_next = B_guess - (f/dfdx) un_conv = np.abs(B_next - B_guess) > tol count += 1 return B_next, un_conv, count def halley_solver(self, B_guess, max_iter=None, tol=None): if max_iter is None: max_iter = self.max_iter if tol is None: tol = self.tol count = 0 f = self.piB(B_guess) - self.g dfdx = self.dpiB(B_guess) d2fdx2 = self.d2piB2(B_guess) B_next = B_guess - ((2*f*dfdx) / (2*(dfdx**2) - (f*d2fdx2))) un_conv = np.abs(B_next - B_guess) > tol while un_conv and count < max_iter: B_guess = B_next f = self.piB(B_guess) - self.g dfdx = self.dpiB(B_guess) d2fdx2 = self.d2piB2(B_guess) B_next = B_guess - ((2*f*dfdx) / (2*(dfdx**2) - (f*d2fdx2))) un_conv = np.abs(B_next - B_guess) > tol count += 1 return B_next, un_conv, count
<gh_stars>1000+ import sys import OmniDB_app.include.OmniDatabase as OmniDatabase import OmniDB_app.include.Spartacus.Utils as Utils from django.contrib.auth.models import User from OmniDB_app.models.main import * from datetime import datetime from django.utils.timezone import make_aware import django.db.transaction as transaction from django.utils import timezone def log_message(p_logger, p_type, p_message): print(p_message,flush=True) if p_type=='error': p_logger.error(p_message) else: p_logger.info(p_message) def migration_main(p_old_db_file, p_interactive, p_logger): config_object = Config.objects.all()[0] perform_migration = False if not config_object.mig_2_to_3_done: perform_migration = True elif p_interactive: value = input('Target database already migrated from OmniDB 2, continue anyway? (y/n) ') if value.lower()=='y': perform_migration = True if perform_migration: log_message(p_logger,'info','Attempting to migrate users, connections and monitoring units and snippets from OmniDB 2 to 3...') database = OmniDatabase.Generic.InstantiateDatabase( 'sqlite','','',p_old_db_file,'','','0','' ) # Check that OmniDB 2 required tables exist migration_enabled = True try: database.v_connection.Query(''' select count(1) from users ''') except: migration_enabled = False try: database.v_connection.Query(''' select count(1) from connections ''') except: migration_enabled = False if not migration_enabled: log_message(p_logger,'info','Source database file does not contain the required tables, skipping...') config_object.mig_2_to_3_done = True config_object.save() else: try: transaction.set_autocommit(False) cryptor = Utils.Cryptor('omnidb', 'iso-8859-1') v_users = database.v_connection.Query(''' select user_id as userid, user_name as username, super_user as superuser from users order by user_id ''') for user in v_users.Rows: # Try to get existing user try: log_message(p_logger,'info',"Creating user '{0}'...".format(user['username'])) user_object=User.objects.get(username=user['username']) log_message(p_logger,'info',"User '{0}' already exists.".format(user['username'])) except: # Creating the user user_object = User.objects.create_user(username=user['username'], password='<PASSWORD>', email='', last_login=timezone.now(), is_superuser=user['superuser']==1, first_name='', last_name='', is_staff=False, is_active=True, date_joined=timezone.now()) log_message(p_logger,'info',"User '{0}' created.".format(user['username'])) # User connections v_connections = database.v_connection.Query(''' select coalesce(dbt_st_name,'') as dbt_st_name, coalesce(server,'') as server, coalesce(port,'') as port, coalesce(service,'') as service, coalesce(user,'') as user, coalesce(alias,'') as alias, coalesce(ssh_server,'') as ssh_server, coalesce(ssh_port,'') as ssh_port, coalesce(ssh_user,'') as ssh_user, coalesce(ssh_password,'') as ssh_password, coalesce(ssh_key,'') as ssh_key, coalesce(use_tunnel,0) as use_tunnel, coalesce(conn_string,'') as conn_string from connections where user_id = {0} order by dbt_st_name, conn_id '''.format(user['userid'])) if len(v_connections.Rows) == 0: log_message(p_logger,'info',"User '{0}' does not contain connections in the source database.".format(user['username'])) else: log_message(p_logger,'info',"Attempting to create connections of user '{0}'...".format(user['username'])) for r in v_connections.Rows: try: v_server = cryptor.Decrypt(r["server"]) except Exception as exc: v_server = r["server"] try: v_port = cryptor.Decrypt(r["port"]) except Exception as exc: v_port = r["port"] try: v_service = cryptor.Decrypt(r["service"]) except Exception as exc: v_service = r["service"] try: v_user = cryptor.Decrypt(r["user"]) except Exception as exc: v_user = r["user"] try: v_alias = cryptor.Decrypt(r["alias"]) except Exception as exc: v_alias = r["alias"] try: v_conn_string = cryptor.Decrypt(r["conn_string"]) except Exception as exc: v_conn_string = r["conn_string"] #SSH Tunnel information try: v_ssh_server = cryptor.Decrypt(r["ssh_server"]) except Exception as exc: v_ssh_server = r["ssh_server"] try: v_ssh_port = cryptor.Decrypt(r["ssh_port"]) except Exception as exc: v_ssh_port = r["ssh_port"] try: v_ssh_user = cryptor.Decrypt(r["ssh_user"]) except Exception as exc: v_ssh_user = r["ssh_user"] try: v_ssh_password = cryptor.Decrypt(r["ssh_password"]) except Exception as exc: v_ssh_password = r["ssh_password"] try: v_ssh_key = cryptor.Decrypt(r["ssh_key"]) except Exception as exc: v_ssh_key = r["ssh_key"] try: v_use_tunnel = cryptor.Decrypt(r["use_tunnel"]) except Exception as exc: v_use_tunnel = r["use_tunnel"] # Check if connection already exists before creating it conn = Connection.objects.filter( user=user_object, technology=Technology.objects.get(name=r["dbt_st_name"]), server=v_server, port=v_port, database=v_service, username=v_user, password='', alias=v_alias, ssh_server=v_ssh_server, ssh_port=v_ssh_port, ssh_user=v_ssh_user, ssh_password=<PASSWORD>, ssh_key=v_ssh_key, use_tunnel=v_use_tunnel==1, conn_string=v_conn_string, ) if len(conn)>0: log_message(p_logger,'info',"Skipping creation of connection with alias '{0}' because an identical connection already exists.".format(v_alias)) else: log_message(p_logger,'info',"Creating connection with alias '{0}'...".format(v_alias)) connection = Connection( user=user_object, technology=Technology.objects.get(name=r["dbt_st_name"]), server=v_server, port=v_port, database=v_service, username=v_user, password='', alias=v_alias, ssh_server=v_ssh_server, ssh_port=v_ssh_port, ssh_user=v_ssh_user, ssh_password=v_<PASSWORD>, ssh_key=v_ssh_key, use_tunnel=v_use_tunnel==1, conn_string=v_conn_string, ) connection.save() log_message(p_logger,'info',"Connection with alias '{0}' created.".format(v_alias)) # User snippets log_message(p_logger,'info',"Attempting to create snippets of user '{0}'...".format(user['username'])) v_folders = database.v_connection.Query(''' select sn_id, sn_name, sn_id_parent from snippets_nodes where user_id = {0} '''.format(user['userid'])) #Child texts v_files = database.v_connection.Query(''' select st_id, st_name, sn_id_parent, st_text from snippets_texts where user_id = {0} '''.format(user['userid'])) v_root = { 'id': None, 'object': None } migration_build_snippets_object_recursive(v_folders,v_files,v_root, user_object, p_logger) # User monitoring units log_message(p_logger,'info',"Attempting to create monitoring units of user '{0}'...".format(user['username'])) v_units = database.v_connection.Query(''' select title, case type when 'chart' then 'chart' when 'chart_append' then 'timeseries' when 'grid' then 'grid' when 'graph' then 'graph' end type, interval, dbt_st_name, script_chart, script_data from mon_units where user_id = {0} '''.format(user['userid'])) for unit in v_units.Rows: unit_object = MonUnits( user=user_object, technology=Technology.objects.get(name=unit['dbt_st_name']), script_chart=unit['script_chart'], script_data=unit['script_data'], type=unit['type'], title=unit['title'], is_default=False, interval=unit['interval'] ) unit_object.save() log_message(p_logger,'info',"Monitoring unit '{0}' created.".format(unit['title'])) config_object.mig_2_to_3_done = True config_object.save() transaction.commit() log_message(p_logger,'info','Database migration finished.') except Exception as exc: log_message(p_logger,'error','Failed to complete migration, rolled back. Error: {0}'.format(str(exc))) sys.exit() transaction.set_autocommit(True) def migration_build_snippets_object_recursive(p_folders,p_files,p_current_object, p_user, p_logger): # Adding files for file in p_files.Rows: # Match if ((file['sn_id_parent'] == None and p_current_object['id'] == None) or (file['sn_id_parent']!=None and file['sn_id_parent'] == p_current_object['id'])): new_date = make_aware(datetime.now()) file_object = SnippetFile( user=p_user, parent=p_current_object['object'], name=file['st_name'], create_date=new_date, modify_date=new_date, text=file['st_text'] ) file_object.save() log_message(p_logger,'info',"Snippet '{0}' created.".format(file['st_name'])) # Adding folders for folder in p_folders.Rows: # Match if ((folder['sn_id_parent'] == None and p_current_object['id'] == None) or (folder['sn_id_parent']!=None and folder['sn_id_parent'] == p_current_object['id'])): new_date = make_aware(datetime.now()) folder_object = SnippetFolder( user=p_user, parent=p_current_object['object'], name=folder['sn_name'], create_date=new_date, modify_date=new_date ) folder_object.save() log_message(p_logger,'info',"Folder '{0}' created.".format(folder['sn_name'])) v_folder = { 'id': folder['sn_id'], 'object': folder_object } migration_build_snippets_object_recursive(p_folders,p_files,v_folder, p_user, p_logger)
import platform from application import log from flask import Flask, request, send_file from sipsimple.account import Account, BonjourAccount, AccountManager from sipsimple.configuration import DuplicateIDError from sipsimple.configuration.settings import SIPSimpleSettings from sipsimple.core import Engine from sipsimple.threading import run_in_thread from werkzeug.routing import BaseConverter import op2d from op2d.accounts import AccountModel from op2d.history import HistoryManager from op2d.resources import ApplicationData from op2d.sessions import SessionManager from op2d.tracing import TraceManager from op2d.web.api.utils import error_response, get_state, get_json, jsonify, set_state __all__ = ['app'] app = Flask(__name__) class SipUriConverter(BaseConverter): regex = '.*?' weight = 300 app.url_map.converters['sip'] = SipUriConverter @app.errorhandler(404) def not_found(error): return jsonify({'msg': 'resource not found'}), 404 @app.errorhandler(500) def server_error(error): return jsonify({'msg': 'internal server error'}), 500 @app.route('/') def index(): message = 'OP2d version %s APIv1' % op2d.__version__ return jsonify({'message': message}) # Account management @app.route('/accounts', methods=['GET', 'POST']) def handle_accounts(): if request.method == 'GET': # Retrieve accounts list accounts = AccountManager().get_accounts() accs = [] for account in accounts: state = get_state(account) state['id'] = account.id if 'auth' in state: state['auth']['password'] = '****' accs.append(state) return jsonify({'accounts': accs}) elif request.method == 'POST': # Create account state = get_json(request) if not state: return error_response(400, 'error processing POST body') account_id = state.pop('id', None) if not account_id: return error_response(400, 'account ID was not specified') try: account = Account(account_id) except DuplicateIDError: return error_response(409, 'duplicated account ID') try: set_state(account, state) except ValueError, e: account.delete() return error_response(400, str(e)) account.enabled = True account.save() state = get_state(account) if 'auth' in state: state['auth']['password'] = '****' return jsonify({'account': state}), 201 @app.route('/accounts/<sip:account_id>', methods=['GET', 'PUT', 'DELETE']) def handle_account(account_id): try: account = AccountManager().get_account(account_id) except KeyError: return error_response(404, 'account not found') if request.method == 'GET': # Retrieve account state = get_state(account) if 'auth' in state: state['auth']['password'] = '****' return jsonify({'account': state}) elif request.method == 'PUT': # Update existing account state = get_json(request) if not state: return error_response(400, 'error processing PUT body') state.pop('id', None) try: set_state(account, state) except ValueError, e: # TODO: some settings may have been applied, what do we do? return error_response(400, str(e)) account.save() state = get_state(account) if 'auth' in state: state['auth']['password'] = '****' return jsonify({'account': state}) elif request.method == 'DELETE': try: account.delete() except Exception, e: return error_response(400, str(e)) return '' @app.route('/accounts/<sip:account_id>/reregister') def reregister_account(account_id): try: account = AccountManager().get_account(account_id) except KeyError: return error_response(404, 'account not found') if account is BonjourAccount(): return error_response(403, 'bonjour account does not register') account.reregister() return '' @app.route('/accounts/<sip:account_id>/info') def account_info(account_id): try: account = AccountManager().get_account(account_id) except KeyError: return error_response(404, 'account not found') model = AccountModel() registration = {} info = model.get_account(account.id) if info is not None: registration['state'] = info.registration_state registration['registrar'] = info.registrar else: registration['state'] = 'unknown' registration['registrar'] = None return jsonify({'info': {'registration': registration}}) # General settings management @app.route('/settings', methods=['GET', 'PUT']) def handle_settings(): settings = SIPSimpleSettings() if request.method == 'GET': # Retrieve settings return jsonify(get_state(settings)) else: # Update settings state = get_json(request) if not state: return error_response(400, 'error processing PUT body') try: set_state(settings, state) except ValueError, e: # TODO: some settings may have been applied, what do we do? return error_response(400, str(e)) settings.save() return jsonify(get_state(settings)) # System information @app.route('/system/info') def system_info(): info = {} info['machine_type'] = platform.machine() info['network_name'] = platform.node() info['python_version'] = platform.python_version() info['platform'] = platform.platform() return jsonify({'info': info}) @app.route('/system/audio_codecs') def audio_codecs(): engine = Engine() return jsonify({'audio_codecs': engine.available_codecs}) @app.route('/system/audio_devices') def audio_devices(): engine = Engine() devices = {'input': ['system_default', None], 'output': ['system_default', None]} devices['input'].extend(engine.input_devices) devices['output'].extend(engine.output_devices) return jsonify({'devices': devices}) @app.route('/system/refresh_audio_devices') def refresh_audio_devices(): engine = Engine() engine.refresh_sound_devices() devices = {'input': ['system_default', None], 'output': ['system_default', None]} devices['input'].extend(engine.input_devices) devices['output'].extend(engine.output_devices) return jsonify({'devices': devices}) # Sessions @app.route('/sessions/dial') def dial(): to = request.args.get('to', None) if to is None: return error_response(400, 'destionation not specified') account_id = request.args.get('from', None) account = None if account_id is not None: try: account = AccountManager().get_account(account_id) except KeyError: return error_response(400, 'invalid account specified') try: session_manager = SessionManager() session_manager.start_call(None, to, account=account) except Exception, e: log.error('Starting call to %s: %s' % (to, e)) log.err() return error_response(400, str(e)) return '' # History @app.route('/history') def history(): history_manager = HistoryManager() entries = [] for entry in history_manager.calls: if entry.name: caller = '%s <%s>' % (entry.name, entry.uri) else: caller = entry.uri entries.append(dict(direction=entry.direction, caller=caller, account=entry.account_id, call_time=str(entry.call_time.replace(microsecond=0)), duration=entry.duration.total_seconds() if entry.duration is not None else 0, failure=None if not entry.failed else entry.reason, text=entry.text)) return jsonify({'history': entries}) # Logs @app.route('/logs/<logfile>', methods=['GET', 'DELETE']) def logs(logfile): if logfile not in ('sip', 'msrp', 'pjsip', 'notifications'): return error_response(404, 'invalid log file') if request.method == 'GET': try: return send_file(ApplicationData.get('logs/%s.log' % logfile)) except Exception as e: return error_response(500, str(e)) elif request.method == 'DELETE': @run_in_thread('file-io') def delete_file(logfile): trace_manager = TraceManager() if logfile == 'sip': trace_manager.siptrace_file.truncate() elif logfile == 'pjsip': trace_manager.pjsiptrace_file.truncate() elif logfile == 'msrp': trace_manager.msrptrace_file.truncate() elif logfile == 'notifications': trace_manager.notifications_file.truncate() delete_file(logfile) return ''
import sys import os import json import numpy as np from datetime import datetime from ctypes import POINTER, CDLL, c_void_p, c_int, cast, c_double, c_char_p from copy import deepcopy from .generate_c_code_explicit_ode import generate_c_code_explicit_ode from .generate_c_code_implicit_ode import generate_c_code_implicit_ode from .generate_c_code_gnsf import generate_c_code_gnsf from .generate_c_code_discrete_dynamics import generate_c_code_discrete_dynamics from .generate_c_code_constraint import generate_c_code_constraint from .generate_c_code_nls_cost import generate_c_code_nls_cost from .generate_c_code_external_cost import generate_c_code_external_cost from .acados_ocp import AcadosOcp from .acados_model import acados_model_strip_casadi_symbolics from .utils import is_column, is_empty, casadi_length, render_template, acados_class2dict,\ format_class_dict, ocp_check_against_layout, np_array_to_list, make_model_consistent,\ set_up_imported_gnsf_model, get_acados_path class AcadosOcpSolverFast: dlclose = CDLL(None).dlclose dlclose.argtypes = [c_void_p] def __init__(self, model_name, N, code_export_dir): self.solver_created = False self.N = N self.model_name = model_name self.shared_lib_name = f'{code_export_dir}/libacados_ocp_solver_{model_name}.so' # get shared_lib self.shared_lib = CDLL(self.shared_lib_name) # create capsule getattr(self.shared_lib, f"{model_name}_acados_create_capsule").restype = c_void_p self.capsule = getattr(self.shared_lib, f"{model_name}_acados_create_capsule")() # create solver getattr(self.shared_lib, f"{model_name}_acados_create").argtypes = [c_void_p] getattr(self.shared_lib, f"{model_name}_acados_create").restype = c_int assert getattr(self.shared_lib, f"{model_name}_acados_create")(self.capsule)==0 self.solver_created = True # get pointers solver getattr(self.shared_lib, f"{model_name}_acados_get_nlp_opts").argtypes = [c_void_p] getattr(self.shared_lib, f"{model_name}_acados_get_nlp_opts").restype = c_void_p self.nlp_opts = getattr(self.shared_lib, f"{model_name}_acados_get_nlp_opts")(self.capsule) getattr(self.shared_lib, f"{model_name}_acados_get_nlp_dims").argtypes = [c_void_p] getattr(self.shared_lib, f"{model_name}_acados_get_nlp_dims").restype = c_void_p self.nlp_dims = getattr(self.shared_lib, f"{model_name}_acados_get_nlp_dims")(self.capsule) getattr(self.shared_lib, f"{model_name}_acados_get_nlp_config").argtypes = [c_void_p] getattr(self.shared_lib, f"{model_name}_acados_get_nlp_config").restype = c_void_p self.nlp_config = getattr(self.shared_lib, f"{model_name}_acados_get_nlp_config")(self.capsule) getattr(self.shared_lib, f"{model_name}_acados_get_nlp_out").argtypes = [c_void_p] getattr(self.shared_lib, f"{model_name}_acados_get_nlp_out").restype = c_void_p self.nlp_out = getattr(self.shared_lib, f"{model_name}_acados_get_nlp_out")(self.capsule) getattr(self.shared_lib, f"{model_name}_acados_get_nlp_in").argtypes = [c_void_p] getattr(self.shared_lib, f"{model_name}_acados_get_nlp_in").restype = c_void_p self.nlp_in = getattr(self.shared_lib, f"{model_name}_acados_get_nlp_in")(self.capsule) getattr(self.shared_lib, f"{model_name}_acados_get_nlp_solver").argtypes = [c_void_p] getattr(self.shared_lib, f"{model_name}_acados_get_nlp_solver").restype = c_void_p self.nlp_solver = getattr(self.shared_lib, f"{model_name}_acados_get_nlp_solver")(self.capsule) def solve(self): """ Solve the ocp with current input. """ model_name = self.model_name getattr(self.shared_lib, f"{model_name}_acados_solve").argtypes = [c_void_p] getattr(self.shared_lib, f"{model_name}_acados_solve").restype = c_int status = getattr(self.shared_lib, f"{model_name}_acados_solve")(self.capsule) return status def cost_set(self, start_stage_, field_, value_, api='warn'): self.cost_set_slice(start_stage_, start_stage_+1, field_, value_[None], api='warn') return def cost_set_slice(self, start_stage_, end_stage_, field_, value_, api='warn'): """ Set numerical data in the cost module of the solver. :param stage: integer corresponding to shooting node :param field: string, e.g. 'yref', 'W', 'ext_cost_num_hess' :param value: of appropriate size """ # cast value_ to avoid conversion issues if isinstance(value_, (float, int)): value_ = np.array([value_]) value_ = np.ascontiguousarray(np.copy(value_), dtype=np.float64) field = field_ field = field.encode('utf-8') dim = np.product(value_.shape[1:]) start_stage = c_int(start_stage_) end_stage = c_int(end_stage_) self.shared_lib.ocp_nlp_cost_dims_get_from_attr.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p, POINTER(c_int)] self.shared_lib.ocp_nlp_cost_dims_get_from_attr.restype = c_int dims = np.ascontiguousarray(np.zeros((2,)), dtype=np.intc) dims_data = cast(dims.ctypes.data, POINTER(c_int)) self.shared_lib.ocp_nlp_cost_dims_get_from_attr(self.nlp_config, self.nlp_dims, self.nlp_out, start_stage_, field, dims_data) value_shape = value_.shape expected_shape = tuple(np.concatenate([np.array([end_stage_ - start_stage_]), dims])) if len(value_shape) == 2: value_shape = (value_shape[0], value_shape[1], 0) elif len(value_shape) == 3: if api=='old': pass elif api=='warn': if not np.all(np.ravel(value_, order='F')==np.ravel(value_, order='K')): raise Exception("Ambiguity in API detected.\n" "Are you making an acados model from scrach? Add api='new' to cost_set and carry on.\n" "Are you seeing this error suddenly in previously running code? Read on.\n" " You are relying on a now-fixed bug in cost_set for field '{}'.\n".format(field_) + " acados_template now correctly passes on any matrices to acados in column major format.\n" + " Two options to fix this error: \n" + " * Add api='old' to cost_set to restore old incorrect behaviour\n" + " * Add api='new' to cost_set and remove any unnatural manipulation of the value argument " + "such as non-mathematical transposes, reshaping, casting to fortran order, etc... " + "If there is no such manipulation, then you have probably been getting an incorrect solution before.") # Get elements in column major order value_ = np.ravel(value_, order='F') elif api=='new': # Get elements in column major order value_ = np.ravel(value_, order='F') else: raise Exception("Unknown api: '{}'".format(api)) if value_shape != expected_shape: raise Exception('AcadosOcpSolver.cost_set(): mismatching dimension', ' for field "{}" with dimension {} (you have {})'.format( field_, expected_shape, value_shape)) value_data = cast(value_.ctypes.data, POINTER(c_double)) value_data_p = cast((value_data), c_void_p) self.shared_lib.ocp_nlp_cost_model_set_slice.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_int, c_char_p, c_void_p, c_int] self.shared_lib.ocp_nlp_cost_model_set_slice(self.nlp_config, self.nlp_dims, self.nlp_in, start_stage, end_stage, field, value_data_p, dim) return def constraints_set(self, start_stage_, field_, value_, api='warn'): self.constraints_set_slice(start_stage_, start_stage_+1, field_, value_[None], api='warn') return def constraints_set_slice(self, start_stage_, end_stage_, field_, value_, api='warn'): """ Set numerical data in the constraint module of the solver. :param stage: integer corresponding to shooting node :param field: string in ['lbx', 'ubx', 'lbu', 'ubu', 'lg', 'ug', 'lh', 'uh', 'uphi'] :param value: of appropriate size """ # cast value_ to avoid conversion issues if isinstance(value_, (float, int)): value_ = np.array([value_]) value_ = value_.astype(float) field = field_ field = field.encode('utf-8') dim = np.product(value_.shape[1:]) start_stage = c_int(start_stage_) end_stage = c_int(end_stage_) self.shared_lib.ocp_nlp_constraint_dims_get_from_attr.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p, POINTER(c_int)] self.shared_lib.ocp_nlp_constraint_dims_get_from_attr.restype = c_int dims = np.ascontiguousarray(np.zeros((2,)), dtype=np.intc) dims_data = cast(dims.ctypes.data, POINTER(c_int)) self.shared_lib.ocp_nlp_constraint_dims_get_from_attr(self.nlp_config, \ self.nlp_dims, self.nlp_out, start_stage_, field, dims_data) value_shape = value_.shape expected_shape = tuple(np.concatenate([np.array([end_stage_ - start_stage_]), dims])) if len(value_shape) == 2: value_shape = (value_shape[0], value_shape[1], 0) elif len(value_shape) == 3: if api=='old': pass elif api=='warn': if not np.all(np.ravel(value_, order='F')==np.ravel(value_, order='K')): raise Exception("Ambiguity in API detected.\n" "Are you making an acados model from scrach? Add api='new' to constraints_set and carry on.\n" "Are you seeing this error suddenly in previously running code? Read on.\n" " You are relying on a now-fixed bug in constraints_set for field '{}'.\n".format(field_) + " acados_template now correctly passes on any matrices to acados in column major format.\n" + " Two options to fix this error: \n" + " * Add api='old' to constraints_set to restore old incorrect behaviour\n" + " * Add api='new' to constraints_set and remove any unnatural manipulation of the value argument " + "such as non-mathematical transposes, reshaping, casting to fortran order, etc... " + "If there is no such manipulation, then you have probably been getting an incorrect solution before.") # Get elements in column major order value_ = np.ravel(value_, order='F') elif api=='new': # Get elements in column major order value_ = np.ravel(value_, order='F') else: raise Exception("Unknown api: '{}'".format(api)) if value_shape != expected_shape: raise Exception('AcadosOcpSolver.constraints_set(): mismatching dimension' \ ' for field "{}" with dimension {} (you have {})'.format(field_, expected_shape, value_shape)) value_data = cast(value_.ctypes.data, POINTER(c_double)) value_data_p = cast((value_data), c_void_p) self.shared_lib.ocp_nlp_constraints_model_set_slice.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_int, c_char_p, c_void_p, c_int] self.shared_lib.ocp_nlp_constraints_model_set_slice(self.nlp_config, \ self.nlp_dims, self.nlp_in, start_stage, end_stage, field, value_data_p, dim) return # Note: this function should not be used anymore, better use cost_set, constraints_set def set(self, stage_, field_, value_): """ Set numerical data inside the solver. :param stage: integer corresponding to shooting node :param field: string in ['x', 'u', 'pi', 'lam', 't', 'p'] .. note:: regarding lam, t: \n the inequalities are internally organized in the following order: \n [ lbu lbx lg lh lphi ubu ubx ug uh uphi; \n lsbu lsbx lsg lsh lsphi usbu usbx usg ush usphi] .. note:: pi: multipliers for dynamics equality constraints \n lam: multipliers for inequalities \n t: slack variables corresponding to evaluation of all inequalities (at the solution) \n sl: slack variables of soft lower inequality constraints \n su: slack variables of soft upper inequality constraints \n """ cost_fields = ['y_ref', 'yref'] constraints_fields = ['lbx', 'ubx', 'lbu', 'ubu'] out_fields = ['x', 'u', 'pi', 'lam', 't', 'z'] mem_fields = ['sl', 'su'] # cast value_ to avoid conversion issues if isinstance(value_, (float, int)): value_ = np.array([value_]) value_ = value_.astype(float) model_name = self.model_name field = field_ field = field.encode('utf-8') stage = c_int(stage_) # treat parameters separately if field_ == 'p': getattr(self.shared_lib, f"{model_name}_acados_update_params").argtypes = [c_void_p, c_int, POINTER(c_double)] getattr(self.shared_lib, f"{model_name}_acados_update_params").restype = c_int value_data = cast(value_.ctypes.data, POINTER(c_double)) assert getattr(self.shared_lib, f"{model_name}_acados_update_params")(self.capsule, stage, value_data, value_.shape[0])==0 else: if field_ not in constraints_fields + cost_fields + out_fields + mem_fields: raise Exception("AcadosOcpSolver.set(): {} is not a valid argument.\ \nPossible values are {}. Exiting.".format(field, \ constraints_fields + cost_fields + out_fields + ['p'])) self.shared_lib.ocp_nlp_dims_get_from_attr.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p] self.shared_lib.ocp_nlp_dims_get_from_attr.restype = c_int dims = self.shared_lib.ocp_nlp_dims_get_from_attr(self.nlp_config, \ self.nlp_dims, self.nlp_out, stage_, field) if value_.shape[0] != dims: msg = 'AcadosOcpSolver.set(): mismatching dimension for field "{}" '.format(field_) msg += 'with dimension {} (you have {})'.format(dims, value_.shape) raise Exception(msg) value_data = cast(value_.ctypes.data, POINTER(c_double)) value_data_p = cast((value_data), c_void_p) if field_ in constraints_fields: self.shared_lib.ocp_nlp_constraints_model_set.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p] self.shared_lib.ocp_nlp_constraints_model_set(self.nlp_config, \ self.nlp_dims, self.nlp_in, stage, field, value_data_p) elif field_ in cost_fields: self.shared_lib.ocp_nlp_cost_model_set.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p] self.shared_lib.ocp_nlp_cost_model_set(self.nlp_config, \ self.nlp_dims, self.nlp_in, stage, field, value_data_p) elif field_ in out_fields: self.shared_lib.ocp_nlp_out_set.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p] self.shared_lib.ocp_nlp_out_set(self.nlp_config, \ self.nlp_dims, self.nlp_out, stage, field, value_data_p) elif field_ in mem_fields: self.shared_lib.ocp_nlp_set.argtypes = \ [c_void_p, c_void_p, c_int, c_char_p, c_void_p] self.shared_lib.ocp_nlp_set(self.nlp_config, \ self.nlp_solver, stage, field, value_data_p) return def get_slice(self, start_stage_, end_stage_, field_): """ Get the last solution of the solver: :param start_stage: integer corresponding to shooting node that indicates start of slice :param end_stage: integer corresponding to shooting node that indicates end of slice :param field: string in ['x', 'u', 'z', 'pi', 'lam', 't', 'sl', 'su',] .. note:: regarding lam, t: \n the inequalities are internally organized in the following order: \n [ lbu lbx lg lh lphi ubu ubx ug uh uphi; \n lsbu lsbx lsg lsh lsphi usbu usbx usg ush usphi] .. note:: pi: multipliers for dynamics equality constraints \n lam: multipliers for inequalities \n t: slack variables corresponding to evaluation of all inequalities (at the solution) \n sl: slack variables of soft lower inequality constraints \n su: slack variables of soft upper inequality constraints \n """ out_fields = ['x', 'u', 'z', 'pi', 'lam', 't'] mem_fields = ['sl', 'su'] field = field_ field = field.encode('utf-8') if (field_ not in out_fields + mem_fields): raise Exception('AcadosOcpSolver.get_slice(): {} is an invalid argument.\ \n Possible values are {}. Exiting.'.format(field_, out_fields)) if not isinstance(start_stage_, int): raise Exception('AcadosOcpSolver.get_slice(): stage index must be Integer.') if not isinstance(end_stage_, int): raise Exception('AcadosOcpSolver.get_slice(): stage index must be Integer.') if start_stage_ >= end_stage_: raise Exception('AcadosOcpSolver.get_slice(): end stage index must be larger than start stage index') if start_stage_ < 0 or end_stage_ > self.N + 1: raise Exception('AcadosOcpSolver.get_slice(): stage index must be in [0, N], got: {}.'.format(self.N)) self.shared_lib.ocp_nlp_dims_get_from_attr.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p] self.shared_lib.ocp_nlp_dims_get_from_attr.restype = c_int dims = self.shared_lib.ocp_nlp_dims_get_from_attr(self.nlp_config, \ self.nlp_dims, self.nlp_out, start_stage_, field) out = np.ascontiguousarray(np.zeros((end_stage_ - start_stage_, dims)), dtype=np.float64) out_data = cast(out.ctypes.data, POINTER(c_double)) if (field_ in out_fields): self.shared_lib.ocp_nlp_out_get_slice.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_int, c_char_p, c_void_p] self.shared_lib.ocp_nlp_out_get_slice(self.nlp_config, \ self.nlp_dims, self.nlp_out, start_stage_, end_stage_, field, out_data) elif field_ in mem_fields: self.shared_lib.ocp_nlp_get_at_stage.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p] self.shared_lib.ocp_nlp_get_at_stage(self.nlp_config, \ self.nlp_dims, self.nlp_solver, start_stage_, end_stage_, field, out_data) return out def get_cost(self): """ Returns the cost value of the current solution. """ # compute cost internally self.shared_lib.ocp_nlp_eval_cost.argtypes = [c_void_p, c_void_p, c_void_p] self.shared_lib.ocp_nlp_eval_cost(self.nlp_solver, self.nlp_in, self.nlp_out) # create output array out = np.ascontiguousarray(np.zeros((1,)), dtype=np.float64) out_data = cast(out.ctypes.data, POINTER(c_double)) # call getter self.shared_lib.ocp_nlp_get.argtypes = [c_void_p, c_void_p, c_char_p, c_void_p] field = "cost_value".encode('utf-8') self.shared_lib.ocp_nlp_get(self.nlp_config, self.nlp_solver, field, out_data) return out[0]
<gh_stars>0 #!/usr/bin/env python3 # encoding: utf-8 """ ARC's main module. To run ARC through its API, first make an instance of the ARC class, then call the .execute() method. For example:: arc0 = ARC(project='ArcDemo', arc_species_list=[spc0, spc1, spc2]) arc0.execute() Where ``spc0``, ``spc1``, and ``spc2`` in the above example are :ref:`ARCSpecies <species>` objects. """ import datetime import logging import os import shutil import time from distutils.spawn import find_executable from IPython.display import display from arkane.statmech import assign_frequency_scale_factor from rmgpy.reaction import Reaction from rmgpy.species import Species import arc.rmgdb as rmgdb from arc.common import VERSION, read_yaml_file, time_lapse, check_ess_settings, initialize_log, log_footer, get_logger,\ save_yaml_file, initialize_job_types from arc.exceptions import InputError, SettingsError, SpeciesError from arc.job.ssh import SSHClient from arc.processor import Processor from arc.reaction import ARCReaction from arc.scheduler import Scheduler from arc.settings import arc_path, default_levels_of_theory, servers, valid_chars, default_job_types from arc.species.species import ARCSpecies from arc.utils.scale import determine_scaling_factors try: from arc.settings import global_ess_settings except ImportError: global_ess_settings = None logger = get_logger() class ARC(object): """ The main ARC class. Args: input_dict (dict, str, optional): Either a dictionary from which to recreate this object, or the path to an ARC input/restart YAML file. project (str, optional): The project's name. Used for naming the working directory. arc_species_list (list, optional): A list of :ref:`ARCSpecies <species>` objects. arc_rxn_list (list, optional): A list of :ref:`ARCReaction <reaction>` objects. level_of_theory (str, optional): A string representing either sp//geometry levels or a composite method, e.g. 'CBS-QB3', 'CCSD(T)-F12a/aug-cc-pVTZ//B3LYP/6-311++G(3df,3pd)'... conformer_level (str, optional): Level of theory for conformer searches. composite_method (str, optional): Composite method. opt_level (str, optional): Level of theory for geometry optimization. freq_level (str, optional): Level of theory for frequency calculations. sp_level (str, optional): Level of theory for single point calculations. scan_level (str, optional): Level of theory for rotor scans. ts_guess_level (str, optional): Level of theory for comparisons of TS guesses between different methods. use_bac (bool, optional): Whether or not to use bond additivity corrections for thermo calculations. job_types (dict, optional): A dictionary of job types to execute. Keys are job types, values are boolean. model_chemistry (str, optional): The model chemistry in Arkane for energy corrections (AE, BAC) and frequencies/ZPE scaling factor. Can usually be determined automatically. initial_trsh (dict, optional): Troubleshooting methods to try by default. Keys are ESS software, values are trshs. t_min (tuple, optional): The minimum temperature for kinetics computations, e.g., (500, str('K')). t_max (tuple, optional): The maximum temperature for kinetics computations, e.g., (3000, str('K')). t_count (int, optional): The number of temperature points between t_min and t_max for kinetics computations. verbose (int, optional): The logging level to use. project_directory (str, optional): The path to the project directory. max_job_time (int, optional): The maximal allowed job time on the server in hours. allow_nonisomorphic_2d (bool, optional): Whether to optimize species even if they do not have a 3D conformer that is isomorphic to the 2D graph representation. job_memory (int, optional): The total allocated job memory in GB (14 by default to be lower than 90% * 16 GB). ess_settings (dict, optional): A dictionary of available ESS (keys) and a corresponding server list (values). bath_gas (str, optional): A bath gas. Currently used in OneDMin to calc L-J parameters. Allowed values are He, Ne, Ar, Kr, H2, N2, O2. adaptive_levels (dict, optional): A dictionary of levels of theory for ranges of the number of heavy atoms in the molecule. Keys are tuples of (min_num_atoms, max_num_atoms), values are dictionaries with ``optfreq`` and ``sp`` as keys and levels of theory as values. freq_scale_factor (float, optional): The harmonic frequencies scaling factor. Could be automatically determined if not available in Arkane and not provided by the user. calc_freq_factor (bool, optional): Whether to calculate the frequencies scaling factor using Truhlar's method if it was not given by the user and could not be determined by Arkane. True to calculate, False to use user input / Arkane's value / Arkane's default. confs_to_dft (int, optional): The number of lowest MD conformers to DFT at the conformers_level. keep_checks (bool, optional): Whether to keep all Gaussian checkfiles when ARC terminates. True to keep, default is False. dont_gen_confs (list, optional): A list of species labels for which conformer generation should be avoided if xyz is given. Attributes: project (str): The project's name. Used for naming the working directory. project_directory (str): The path to the project directory. arc_species_list (list): A list of :ref:`ARCSpecies <species>` objects. arc_rxn_list (list): A list of :ref:`ARCReaction <reaction>` objects. conformer_level (str): Level of theory for conformer searches. composite_method (str): Composite method. opt_level (str): Level of theory for geometry optimization. freq_level (str): Level of theory for frequency calculations. sp_level (str): Level of theory for single point calculations. scan_level (str): Level of theory for rotor scans. ts_guess_level (str): Level of theory for comparisons of TS guesses between different methods. adaptive_levels (dict): A dictionary of levels of theory for ranges of the number of heavy atoms in the molecule. Keys are tuples of (min_num_atoms, max_num_atoms), values are dictionaries with ``optfreq`` and ``sp`` as keys and levels of theory as values. output (dict): Output dictionary with status and final QM file paths for all species. Only used for restarting, the actual object used is in the Scheduler class. use_bac (bool): Whether or not to use bond additivity corrections for thermo calculations. model_chemistry (str): The model chemistry in Arkane for energy corrections (AE, BAC) and frequencies/ZPE scaling factor. Can usually be determined automatically. freq_scale_factor (float): The harmonic frequencies scaling factor. Could be automatically determined if not available in Arkane and not provided by the user. calc_freq_factor (bool): Whether to calculate the frequencies scaling factor using Truhlar's method if it was not given by the user and could not be determined by Arkane. True to calculate, False to use user input / Arkane's value / Arkane's default. ess_settings (dict): A dictionary of available ESS (keys) and a corresponding server list (values). initial_trsh (dict): Troubleshooting methods to try by default. Keys are ESS software, values are trshs. t0 (float): Initial time when the project was spawned. confs_to_dft (int): The number of lowest MD conformers to DFT at the conformers_level. execution_time (str): Overall execution time. lib_long_desc (str): A multiline description of levels of theory for the outputted RMG libraries. running_jobs (dict): A dictionary of jobs submitted in a precious ARC instance, used for restarting ARC. t_min (tuple): The minimum temperature for kinetics computations, e.g., (500, str('K')). t_max (tuple): The maximum temperature for kinetics computations, e.g., (3000, str('K')). t_count (int): The number of temperature points between t_min and t_max for kinetics computations. max_job_time (int): The maximal allowed job time on the server in hours. rmgdb (RMGDatabase): The RMG database object. allow_nonisomorphic_2d (bool): Whether to optimize species even if they do not have a 3D conformer that is isomorphic to the 2D graph representation. memory (int): The total allocated job memory in GB (14 by default to be lower than 90% * 16 GB). job_types (dict): A dictionary of job types to execute. Keys are job types, values are boolean. specific_job_type (str): Specific job type to execute. Legal strings are job types (keys of job_types dict). bath_gas (str): A bath gas. Currently used in OneDMin to calc L-J parameters. Allowed values are He, Ne, Ar, Kr, H2, N2, O2. keep_checks (bool): Whether to keep all Gaussian checkfiles when ARC terminates. True to keep, default is False. dont_gen_confs (list): A list of species labels for which conformer generation should be avoided if xyz is given. """ def __init__(self, input_dict=None, project=None, arc_species_list=None, arc_rxn_list=None, level_of_theory='', conformer_level='', composite_method='', opt_level='', freq_level='', sp_level='', scan_level='', ts_guess_level='', use_bac=True, job_types=None, model_chemistry='', initial_trsh=None, t_min=None, t_max=None, t_count=None, verbose=logging.INFO, project_directory=None, max_job_time=120, allow_nonisomorphic_2d=False, job_memory=14, ess_settings=None, bath_gas=None, adaptive_levels=None, freq_scale_factor=None, calc_freq_factor=True, confs_to_dft=5, keep_checks=False, dont_gen_confs=None, specific_job_type=''): self.__version__ = VERSION self.verbose = verbose self.output = dict() self.running_jobs = dict() self.lib_long_desc = '' self.unique_species_labels = list() self.rmgdb = rmgdb.make_rmg_database_object() self.max_job_time = max_job_time self.allow_nonisomorphic_2d = allow_nonisomorphic_2d self.memory = job_memory self.orbitals_level = default_levels_of_theory['orbitals'].lower() self.ess_settings = dict() self.calc_freq_factor = calc_freq_factor self.keep_checks = keep_checks if input_dict is None: if project is None: raise ValueError('A project name must be provided for a new project') self.project = project self.t_min = t_min self.t_max = t_max self.t_count = t_count self.specific_job_type = specific_job_type self.job_types = initialize_job_types(job_types, specific_job_type=self.specific_job_type) self.bath_gas = bath_gas self.confs_to_dft = confs_to_dft self.adaptive_levels = adaptive_levels self.project_directory = project_directory if project_directory is not None\ else os.path.join(arc_path, 'Projects', self.project) if not os.path.exists(self.project_directory): os.makedirs(self.project_directory) initialize_log(log_file=os.path.join(self.project_directory, 'arc.log'), project=self.project, project_directory=self.project_directory, verbose=self.verbose) self.dont_gen_confs = dont_gen_confs if dont_gen_confs is not None else list() self.t0 = time.time() # init time self.execution_time = None self.initial_trsh = initial_trsh if initial_trsh is not None else dict() self.use_bac = use_bac self.model_chemistry = model_chemistry self.freq_scale_factor = freq_scale_factor if self.model_chemistry: logger.info('Using {0} as model chemistry for energy corrections in Arkane'.format( self.model_chemistry)) if not self.job_types['fine']: logger.info('\n') logger.warning('Not using a fine grid for geometry optimization jobs') logger.info('\n') if not self.job_types['rotors']: logger.info('\n') logger.warning("Not running rotor scans." " This might compromise finding the best conformer, as dihedral angles won't be" " corrected. Also, entropy won't be accurate.") logger.info('\n') if level_of_theory.count('//') > 1: raise InputError('Level of theory seems wrong. It should either be a composite method (like CBS-QB3)' ' or be of the form sp//geometry, e.g., CCSD(T)-F12/avtz//wB97x-D3/6-311++g**.' ' Got: {0}'.format(level_of_theory)) if conformer_level: logger.info('Using {0} for refined conformer searches (after filtering via force fields)'.format( conformer_level)) self.conformer_level = conformer_level.lower() else: self.conformer_level = default_levels_of_theory['conformer'].lower() logger.info('Using default level {0} for refined conformer searches (after filtering via force' ' fields)'.format(default_levels_of_theory['conformer'])) if ts_guess_level: logger.info('Using {0} for TS guesses comparison of different methods'.format(ts_guess_level)) self.ts_guess_level = ts_guess_level.lower() else: self.ts_guess_level = default_levels_of_theory['ts_guesses'].lower() logger.info('Using default level {0} for TS guesses comparison of different methods'.format( default_levels_of_theory['ts_guesses'])) if level_of_theory: if '/' not in level_of_theory: # assume this is a composite method self.composite_method = level_of_theory.lower() logger.info('Using composite method {0}'.format(self.composite_method)) self.opt_level = '' self.sp_level = '' if freq_level: self.freq_level = freq_level.lower() logger.info('Using {0} for frequency calculations'.format(self.freq_level)) else: self.freq_level = default_levels_of_theory['freq_for_composite'].lower() logger.info('Using default level {0} for frequency calculations after composite jobs'.format( self.freq_level)) elif '//' in level_of_theory: self.composite_method = '' self.opt_level = level_of_theory.lower().split('//')[1] self.freq_level = level_of_theory.lower().split('//')[1] self.sp_level = level_of_theory.lower().split('//')[0] logger.info('Using {0} for geometry optimizations'.format(level_of_theory.split('//')[1])) logger.info('Using {0} for frequency calculations'.format(level_of_theory.split('//')[1])) logger.info('Using {0} for single point calculations'.format(level_of_theory.split('//')[0])) elif '/' in level_of_theory and '//' not in level_of_theory: # assume this is not a composite method, and the user meant to run opt, freq and sp at this level. # running an sp after opt at the same level is meaningless, but doesn't matter much also... self.composite_method = '' self.opt_level = level_of_theory.lower() self.freq_level = level_of_theory.lower() self.sp_level = level_of_theory.lower() logger.info('Using {0} for geometry optimizations'.format(level_of_theory)) logger.info('Using {0} for frequency calculations'.format(level_of_theory)) logger.info('Using {0} for single point calculations'.format(level_of_theory)) else: self.composite_method = composite_method.lower() if self.composite_method: if level_of_theory and level_of_theory.lower != self.composite_method: raise InputError('Specify either composite_method or level_of_theory') logger.info('Using composite method {0}'.format(composite_method)) if self.composite_method == 'cbs-qb3': self.model_chemistry = self.composite_method logger.info('Using {0} as model chemistry for energy corrections in Arkane'.format( self.model_chemistry)) elif self.use_bac: raise InputError('Could not determine model chemistry to use for composite method {0}'.format( self.composite_method)) if opt_level: self.opt_level = opt_level.lower() logger.info('Using {0} for geometry optimizations'.format(self.opt_level)) elif not self.composite_method: self.opt_level = default_levels_of_theory['opt'].lower() logger.info('Using default level {0} for geometry optimizations'.format(self.opt_level)) else: self.opt_level = '' if freq_level: self.freq_level = freq_level.lower() logger.info('Using {0} for frequency calculations'.format(self.freq_level)) elif not self.composite_method: if opt_level: self.freq_level = opt_level.lower() logger.info('Using user-defined opt level {0} for frequency calculations as well'.format( self.freq_level)) else: # self.freq_level = 'wb97x-d3/def2-tzvpd' # logger.info('Using wB97x-D3/def2-TZVPD for frequency calculations') self.freq_level = default_levels_of_theory['freq'].lower() logger.info('Using default level {0} for frequency calculations'.format(self.freq_level)) else: # This is a composite method self.freq_level = default_levels_of_theory['freq_for_composite'].lower() logger.info('Using default level {0} for frequency calculations after composite jobs'.format( self.freq_level)) if sp_level: self.sp_level = sp_level.lower() logger.info('Using {0} for single point calculations'.format(self.sp_level)) elif not self.composite_method: self.sp_level = default_levels_of_theory['sp'].lower() logger.info('Using default level {0} for single point calculations'.format(self.sp_level)) else: # It's a composite method, no need in explicit sp self.sp_level = '' if scan_level: self.scan_level = scan_level.lower() if self.job_types['rotors']: logger.info('Using {0} for rotor scans'.format(self.scan_level)) elif self.job_types['rotors']: if not self.composite_method: self.scan_level = default_levels_of_theory['scan'].lower() logger.info('Using default level {0} for rotor scans'.format(self.scan_level)) else: # This is a composite method self.scan_level = default_levels_of_theory['scan_for_composite'].lower() logger.info('Using default level {0} for rotor scans after composite jobs'.format( self.scan_level)) else: self.scan_level = '' if self.composite_method: self.opt_level = '' self.sp_level = '' self.arc_species_list = arc_species_list if arc_species_list is not None else list() converted_species_list = list() indices_to_pop = [] for i, spc in enumerate(self.arc_species_list): if isinstance(spc, Species): if not spc.label: raise InputError('Missing label on RMG Species object {0}'.format(spc)) indices_to_pop.append(i) arc_spc = ARCSpecies(is_ts=False, rmg_species=spc) # assuming an RMG Species is not a TS converted_species_list.append(arc_spc) elif not isinstance(spc, ARCSpecies): raise ValueError('A species should either be an `ARCSpecies` object or an RMG `Species` object.' ' Got: {0} for {1}'.format(type(spc), spc.label)) for i in reversed(range(len(self.arc_species_list))): # pop from the end, so other indices won't change if i in indices_to_pop: self.arc_species_list.pop(i) self.arc_species_list.extend(converted_species_list) if self.job_types['bde']: self.add_hydrogen_for_bde() self.determine_unique_species_labels() self.arc_rxn_list = arc_rxn_list if arc_rxn_list is not None else list() converted_rxn_list = list() indices_to_pop = [] for i, rxn in enumerate(self.arc_rxn_list): if isinstance(rxn, Reaction): if not rxn.reactants or not rxn.products: raise InputError('Missing reactants and/or products in RMG Reaction object {0}'.format(rxn)) indices_to_pop.append(i) arc_rxn = ARCReaction(rmg_reaction=rxn) converted_rxn_list.append(arc_rxn) for spc in rxn.reactants + rxn.products: if not isinstance(spc, Species): raise InputError('All reactants and procucts of an RMG Reaction have to be RMG Species' ' objects. Got: {0} in reaction {1}'.format(type(spc), rxn)) if not spc.label: raise InputError('Missing label on RMG Species object {0} in reaction {1}'.format( spc, rxn)) if spc.label not in self.unique_species_labels: # Add species participating in an RMG Reaction to arc_species_list if not already there # We assume each species has a unique label self.arc_species_list.append(ARCSpecies(is_ts=False, rmg_species=spc)) self.unique_species_labels.append(spc.label) elif not isinstance(rxn, ARCReaction): raise ValueError('A reaction should either be an `ARCReaction` object or an RMG `Reaction` object.' ' Got: {0} for {1}'.format(type(rxn), rxn.label)) for i in reversed(range(len(self.arc_rxn_list))): # pop from the end, so other indices won't change if i in indices_to_pop: self.arc_rxn_list.pop(i) self.arc_rxn_list.extend(converted_rxn_list) rxn_index = 0 for arc_rxn in self.arc_rxn_list: arc_rxn.index = rxn_index rxn_index += 1 else: # ARC is run from an input or a restart file. # Read the input_dict project_directory = project_directory if project_directory is not None\ else os.path.abspath(os.path.dirname(input_dict)) self.from_dict(input_dict=input_dict, project=project, project_directory=project_directory) if self.adaptive_levels is not None: logger.info('Using the following adaptive levels of theory:\n{0}'.format(self.adaptive_levels)) if not self.ess_settings: # don't override self.ess_settings if determined from an input dictionary self.ess_settings = check_ess_settings(ess_settings or global_ess_settings) if not self.ess_settings: self.determine_ess_settings() self.determine_model_chemistry() self.scheduler = None self.check_project_name() self.check_freq_scaling_factor() self.restart_dict = self.as_dict() # make a backup copy of the restart file if it exists (but don't save an updated one just yet) if os.path.isfile(os.path.join(self.project_directory, 'restart.yml')): if not os.path.isdir(os.path.join(self.project_directory, 'log_and_restart_archive')): os.mkdir(os.path.join(self.project_directory, 'log_and_restart_archive')) local_time = datetime.datetime.now().strftime("%H%M%S_%b%d_%Y") restart_backup_name = 'restart.old.' + local_time + '.yml' shutil.copy(os.path.join(self.project_directory, 'restart.yml'), os.path.join(self.project_directory, 'log_and_restart_archive', restart_backup_name)) def as_dict(self): """ A helper function for dumping this object as a dictionary in a YAML file for restarting ARC. """ restart_dict = dict() restart_dict['project'] = self.project if self.bath_gas is not None: restart_dict['bath_gas'] = self.bath_gas if self.adaptive_levels is not None: restart_dict['adaptive_levels'] = self.adaptive_levels restart_dict['job_types'] = self.job_types restart_dict['use_bac'] = self.use_bac restart_dict['model_chemistry'] = self.model_chemistry restart_dict['composite_method'] = self.composite_method restart_dict['conformer_level'] = self.conformer_level restart_dict['ts_guess_level'] = self.ts_guess_level restart_dict['scan_level'] = self.scan_level if not self.composite_method: restart_dict['opt_level'] = self.opt_level restart_dict['freq_level'] = self.freq_level restart_dict['sp_level'] = self.sp_level if self.initial_trsh: restart_dict['initial_trsh'] = self.initial_trsh if self.freq_scale_factor is not None: restart_dict['freq_scale_factor'] = self.freq_scale_factor restart_dict['calc_freq_factor'] = self.calc_freq_factor if self.dont_gen_confs: restart_dict['dont_gen_confs'] = self.dont_gen_confs restart_dict['species'] = [spc.as_dict() for spc in self.arc_species_list] restart_dict['reactions'] = [rxn.as_dict() for rxn in self.arc_rxn_list] restart_dict['output'] = self.output # if read from_dict then it has actual values restart_dict['running_jobs'] = self.running_jobs # if read from_dict then it has actual values restart_dict['t_min'] = self.t_min restart_dict['t_max'] = self.t_max restart_dict['t_count'] = self.t_count restart_dict['max_job_time'] = self.max_job_time restart_dict['allow_nonisomorphic_2d'] = self.allow_nonisomorphic_2d restart_dict['ess_settings'] = self.ess_settings restart_dict['job_memory'] = self.memory restart_dict['confs_to_dft'] = self.confs_to_dft restart_dict['specific_job_type'] = self.specific_job_type if self.keep_checks: restart_dict['keep_checks'] = self.keep_checks return restart_dict def from_dict(self, input_dict, project=None, project_directory=None): """ A helper function for loading this object from a dictionary in a YAML file for restarting ARC. If `project` name and `ess_settings` are given as well to __init__, they will override the respective values in the restart dictionary. """ if isinstance(input_dict, str): input_dict = read_yaml_file(input_dict) if project is None and 'project' not in input_dict: raise InputError('A project name must be given') self.project = project if project is not None else input_dict['project'] self.project_directory = project_directory if project_directory is not None \ else os.path.join(arc_path, 'Projects', self.project) if not os.path.exists(self.project_directory): os.makedirs(self.project_directory) initialize_log(log_file=os.path.join(self.project_directory, 'arc.log'), project=self.project, project_directory=self.project_directory, verbose=self.verbose) self.t0 = time.time() # init time self.execution_time = None self.verbose = input_dict['verbose'] if 'verbose' in input_dict else self.verbose self.max_job_time = input_dict['max_job_time'] if 'max_job_time' in input_dict else self.max_job_time self.memory = input_dict['job_memory'] if 'job_memory' in input_dict else self.memory self.bath_gas = input_dict['bath_gas'] if 'bath_gas' in input_dict else None self.confs_to_dft = input_dict['confs_to_dft'] if 'confs_to_dft' in input_dict else 5 self.adaptive_levels = input_dict['adaptive_levels'] if 'adaptive_levels' in input_dict else None self.keep_checks = input_dict['keep_checks'] if 'keep_checks' in input_dict else False self.allow_nonisomorphic_2d = input_dict['allow_nonisomorphic_2d']\ if 'allow_nonisomorphic_2d' in input_dict else False self.output = input_dict['output'] if 'output' in input_dict else dict() self.freq_scale_factor = input_dict['freq_scale_factor'] if 'freq_scale_factor' in input_dict else None if self.output: for label, spc_output in self.output.items(): if 'paths' in spc_output: for key, val in spc_output['paths'].items(): if key in ['geo', 'freq', 'sp', 'composite']: if val and not os.path.isfile(val): # try correcting relative paths if os.path.isfile(os.path.join(arc_path, val)): self.output[label]['paths'][key] = os.path.join(arc_path, val) logger.debug('corrected path to {0}'.format(os.path.join(arc_path, val))) elif os.path.isfile(os.path.join(arc_path, 'Projects', val)): self.output[label]['paths'][key] = os.path.join(arc_path, 'Projects', val) logger.debug('corrected path to {0}'.format(os.path.join(arc_path, val))) else: raise SpeciesError('Could not find {0} output file for species {1}: {2}'.format( key, label, val)) self.running_jobs = input_dict['running_jobs'] if 'running_jobs' in input_dict else dict() logger.debug('output dictionary successfully parsed:\n{0}'.format(self.output)) self.t_min = input_dict['t_min'] if 't_min' in input_dict else None self.t_max = input_dict['t_max'] if 't_max' in input_dict else None self.t_count = input_dict['t_count'] if 't_count' in input_dict else None self.initial_trsh = input_dict['initial_trsh'] if 'initial_trsh' in input_dict else dict() self.specific_job_type = input_dict['specific_job_type'] if 'specific_job_type' in input_dict else None self.job_types = input_dict['job_types'] if 'job_types' in input_dict else default_job_types self.job_types = initialize_job_types(self.job_types, specific_job_type=self.specific_job_type) self.use_bac = input_dict['use_bac'] if 'use_bac' in input_dict else True self.calc_freq_factor = input_dict['calc_freq_factor'] if 'calc_freq_factor' in input_dict else True self.model_chemistry = input_dict['model_chemistry'] if 'model_chemistry' in input_dict else '' ess_settings = input_dict['ess_settings'] if 'ess_settings' in input_dict else global_ess_settings self.ess_settings = check_ess_settings(ess_settings) self.dont_gen_confs = input_dict['dont_gen_confs'] if 'dont_gen_confs' in input_dict else list() if not self.job_types['fine']: logger.info('\n') logger.warning('Not using a fine grid for geometry optimization jobs') logger.info('\n') if not self.job_types['rotors']: logger.info('\n') logger.warning("Not running rotor scans." " This might compromise finding the best conformer, as dihedral angles won't be" " corrected. Also, entropy won't be accurate.") logger.info('\n') if 'conformer_level' in input_dict: self.conformer_level = input_dict['conformer_level'].lower() logger.info('Using {0} for refined conformer searches (after filtering via force fields)'.format( self.conformer_level)) else: self.conformer_level = default_levels_of_theory['conformer'].lower() logger.info('Using default level {0} for refined conformer searches (after filtering via force' ' fields)'.format(default_levels_of_theory['conformer'])) if 'ts_guess_level' in input_dict: self.ts_guess_level = input_dict['ts_guess_level'].lower() logger.info('Using {0} for TS guesses comparison of different methods'.format(self.ts_guess_level)) else: self.ts_guess_level = default_levels_of_theory['ts_guesses'].lower() logger.info('Using default level {0} for TS guesses comparison of different methods'.format( default_levels_of_theory['ts_guesses'])) if 'level_of_theory' in input_dict: if '/' not in input_dict['level_of_theory']: # assume this is a composite method self.composite_method = input_dict['level_of_theory'].lower() logger.info('Using composite method {0}'.format(self.composite_method)) self.opt_level = '' self.sp_level = '' if 'freq_level' in input_dict: self.freq_level = input_dict['freq_level'].lower() logger.info('Using {0} for frequency calculations'.format(self.freq_level)) else: self.freq_level = default_levels_of_theory['freq_for_composite'].lower() logger.info('Using default level {0} for frequency calculations after composite jobs'.format( self.freq_level)) elif '//' in input_dict['level_of_theory']: self.composite_method = '' self.opt_level = input_dict['level_of_theory'].lower().split('//')[1] self.freq_level = input_dict['level_of_theory'].lower().split('//')[1] self.sp_level = input_dict['level_of_theory'].lower().split('//')[0] logger.info('Using {0} for geometry optimizations'.format( input_dict['level_of_theory'].split('//')[1])) logger.info('Using {0} for frequency calculations'.format( input_dict['level_of_theory'].split('//')[1])) logger.info('Using {0} for single point calculations'.format( input_dict['level_of_theory'].split('//')[0])) elif '/' in input_dict['level_of_theory'] and '//' not in input_dict['level_of_theory']: # assume this is not a composite method, and the user meant to run opt, freq and sp at this level. # running an sp after opt at the same level is meaningless, but doesn't matter much also... self.composite_method = '' self.opt_level = input_dict['level_of_theory'].lower() self.freq_level = input_dict['level_of_theory'].lower() self.sp_level = input_dict['level_of_theory'].lower() logger.info('Using {0} for geometry optimizations'.format(input_dict['level_of_theory'])) logger.info('Using {0} for frequency calculations'.format(input_dict['level_of_theory'])) logger.info('Using {0} for single point calculations'.format(input_dict['level_of_theory'])) else: self.composite_method = input_dict['composite_method'].lower() if 'composite_method' in input_dict else '' if self.composite_method: logger.info('Using composite method {0}'.format(self.composite_method)) if self.composite_method == 'cbs-qb3': self.model_chemistry = self.composite_method logger.info('Using {0} as model chemistry for energy corrections in Arkane'.format( self.model_chemistry)) elif self.use_bac: raise InputError('Could not determine model chemistry to use for composite method {0}'.format( self.composite_method)) if 'opt_level' in input_dict: self.opt_level = input_dict['opt_level'].lower() logger.info('Using {0} for geometry optimizations'.format(self.opt_level)) elif not self.composite_method: self.opt_level = default_levels_of_theory['opt'].lower() logger.info('Using default level {0} for geometry optimizations'.format(self.opt_level)) else: self.opt_level = '' if 'freq_level' in input_dict: self.freq_level = input_dict['freq_level'].lower() elif not self.composite_method: if 'opt_level' in input_dict: self.freq_level = input_dict['opt_level'].lower() logger.info('Using user-defined opt level {0} for frequency calculations as well'.format( self.freq_level)) else: self.freq_level = default_levels_of_theory['freq'].lower() logger.info('Using default level {0} for frequency calculations'.format(self.freq_level)) else: # This is a composite method self.freq_level = default_levels_of_theory['freq_for_composite'].lower() logger.info('Using default level {0} for frequency calculations after composite jobs'.format( self.freq_level)) if 'sp_level' in input_dict: self.sp_level = input_dict['sp_level'].lower() logger.info('Using {0} for single point calculations'.format(self.sp_level)) elif not self.composite_method: self.sp_level = default_levels_of_theory['sp'].lower() logger.info('Using default level {0} for single point calculations'.format(self.sp_level)) else: # It's a composite method, no need in explicit sp self.sp_level = '' if 'scan_level' in input_dict: self.scan_level = input_dict['scan_level'].lower() if 'rotors' in self.job_types: logger.info('Using {0} for rotor scans'.format(self.scan_level)) elif 'rotors' in self.job_types: if not self.composite_method: self.scan_level = default_levels_of_theory['scan'].lower() logger.info('Using default level {0} for rotor scans'.format(self.scan_level)) else: # This is a composite method self.scan_level = default_levels_of_theory['scan_for_composite'].lower() logger.info('Using default level {0} for rotor scans after composite jobs'.format( self.scan_level)) else: self.scan_level = '' if 'species' in input_dict: self.arc_species_list = [ARCSpecies(species_dict=spc_dict) for spc_dict in input_dict['species']] for spc in self.arc_species_list: for rotor_num, rotor_dict in spc.rotors_dict.items(): if not os.path.isfile(rotor_dict['scan_path']) and rotor_dict['success']: # try correcting relative paths if os.path.isfile(os.path.join(arc_path, rotor_dict['scan_path'])): spc.rotors_dict[rotor_num]['scan_path'] = os.path.join(arc_path, rotor_dict['scan_path']) elif os.path.isfile(os.path.join(arc_path, 'Projects', rotor_dict['scan_path'])): spc.rotors_dict[rotor_num]['scan_path'] =\ os.path.join(arc_path, 'Projects', rotor_dict['scan_path']) else: raise SpeciesError('Could not find rotor scan output file for rotor {0} of species {1}:' ' {2}'.format(rotor_num, spc.label, rotor_dict['scan_path'])) else: self.arc_species_list = list() if self.job_types['bde']: self.add_hydrogen_for_bde() self.determine_unique_species_labels() if 'reactions' in input_dict: self.arc_rxn_list = [ARCReaction(reaction_dict=rxn_dict) for rxn_dict in input_dict['reactions']] for i, rxn in enumerate(self.arc_rxn_list): rxn.index = i else: self.arc_rxn_list = list() def write_input_file(self, path=None): """ Save the current attributes as an ARC input file. Args: path (str, optional): The full path for the generated input file. """ if path is None: path = os.path.join(self.project_directory, 'input.yml') base_path = os.path.dirname(path) if not os.path.isdir(base_path): os.makedirs(base_path) logger.info('\n\nWriting input file to {0}'.format(path)) save_yaml_file(path=path, content=self.restart_dict) def execute(self): """ Execute ARC. """ logger.info('\n') for species in self.arc_species_list: if not isinstance(species, ARCSpecies): raise ValueError('All species in arc_species_list must be ARCSpecies objects.' ' Got {0}'.format(type(species))) if species.is_ts: logger.info('Considering transition state: {0}'.format(species.label)) else: logger.info('Considering species: {0}'.format(species.label)) if species.mol is not None: display(species.mol) logger.info('\n') for rxn in self.arc_rxn_list: if not isinstance(rxn, ARCReaction): raise ValueError('All reactions in arc_rxn_list must be ARCReaction objects.' ' Got {0}'.format(type(rxn))) self.scheduler = Scheduler(project=self.project, species_list=self.arc_species_list, rxn_list=self.arc_rxn_list, composite_method=self.composite_method, conformer_level=self.conformer_level, opt_level=self.opt_level, freq_level=self.freq_level, sp_level=self.sp_level, scan_level=self.scan_level, ts_guess_level=self.ts_guess_level, ess_settings=self.ess_settings, job_types=self.job_types, bath_gas=self.bath_gas, initial_trsh=self.initial_trsh, rmgdatabase=self.rmgdb, restart_dict=self.restart_dict, project_directory=self.project_directory, max_job_time=self.max_job_time, allow_nonisomorphic_2d=self.allow_nonisomorphic_2d, memory=self.memory, orbitals_level=self.orbitals_level, adaptive_levels=self.adaptive_levels, confs_to_dft=self.confs_to_dft, dont_gen_confs=self.dont_gen_confs) save_yaml_file(path=os.path.join(self.project_directory, 'output', 'status.yml'), content=self.scheduler.output) if not self.keep_checks: self.delete_check_files() self.save_project_info_file() prc = Processor(project=self.project, project_directory=self.project_directory, species_dict=self.scheduler.species_dict, rxn_list=self.scheduler.rxn_list, output=self.scheduler.output, use_bac=self.use_bac, model_chemistry=self.model_chemistry, lib_long_desc=self.lib_long_desc, rmgdatabase=self.rmgdb, t_min=self.t_min, t_max=self.t_max, t_count=self.t_count, freq_scale_factor=self.freq_scale_factor) prc.process() self.summary() log_footer(execution_time=self.execution_time) def save_project_info_file(self): """ Save a project info file. """ self.execution_time = time_lapse(t0=self.t0) path = os.path.join(self.project_directory, '{0}.info'.format(self.project)) if os.path.exists(path): os.remove(path) if self.job_types['fine']: fine_txt = '(using a fine grid)' else: fine_txt = '(NOT using a fine grid)' txt = '' txt += 'ARC v{0}\n'.format(self.__version__) txt += 'ARC project {0}\n\nLevels of theory used:\n\n'.format(self.project) txt += 'Conformers: {0}\n'.format(self.conformer_level) txt += 'TS guesses: {0}\n'.format(self.ts_guess_level) if self.composite_method: txt += 'Composite method: {0} {1}\n'.format(self.composite_method, fine_txt) txt += 'Frequencies: {0}\n'.format(self.freq_level) else: txt += 'Optimization: {0} {1}\n'.format(self.opt_level, fine_txt) txt += 'Frequencies: {0}\n'.format(self.freq_level) txt += 'Single point: {0}\n'.format(self.sp_level) if 'rotors' in self.job_types: txt += 'Rotor scans: {0}\n'.format(self.scan_level) else: txt += 'Not scanning rotors\n' if self.use_bac: txt += 'Using bond additivity corrections for thermo\n' else: txt += 'NOT using bond additivity corrections for thermo\n' if self.initial_trsh: txt += 'Using an initial troubleshooting method "{0}"'.format(self.initial_trsh) txt += '\nUsing the following ESS settings: {0}\n'.format(self.ess_settings) txt += '\nConsidered the following species and TSs:\n' for species in self.arc_species_list: descriptor = 'TS' if species.is_ts else 'Species' failed = '' if self.scheduler.output[species.label]['convergence'] else ' (Failed!)' txt += '{descriptor} {label}{failed} (run time: {time})\n'.format( descriptor=descriptor, label=species.label, failed=failed, time=species.run_time) if self.arc_rxn_list: for rxn in self.arc_rxn_list: txt += 'Considered reaction: {0}\n'.format(rxn.label) txt += '\nOverall time since project initiation: {0}'.format(self.execution_time) txt += '\n' with open(path, 'w') as f: f.write(str(txt)) self.lib_long_desc = txt def summary(self): """ Report status and data of all species / reactions. """ logger.info('\n\n\nAll jobs terminated. Summary for project {0}:\n'.format(self.project)) for label, output in self.scheduler.output.items(): if output['convergence']: logger.info('Species {0} converged successfully\n'.format(label)) else: job_type_status = {key: val for key, val in self.output[label]['job_types'].items() if key in self.job_types and self.job_types[key]} logger.info('Species {0} failed with status:\n {1}'.format(label, job_type_status)) keys = ['conformers', 'isomorphism', 'info'] for key in keys: if key in output and output[key]: logger.info(output[key]) if 'warnings' in output and output['warnings']: logger.info('\n and warnings: {0}'.format(output['warnings'])) if 'errors' in output and output['errors']: logger.info('\n and errors: {0}'.format(output['errors'])) logger.info('\n') def determine_model_chemistry(self): """ Determine the model_chemistry to be used in Arkane. Todo: * Determine whether the model chemistry exists in Arkane automatically instead of hard coding """ if self.model_chemistry: self.model_chemistry = self.model_chemistry.lower() if self.model_chemistry.split('//')[0] not in [ 'cbs-qb3', 'cbs-qb3-paraskevas', 'ccsd(t)-f12/cc-pvdz-f12', 'ccsd(t)-f12/cc-pvtz-f12', 'ccsd(t)-f12/cc-pvqz-f12', 'b3lyp/cbsb7', 'b3lyp/6-311g(2d,d,p)', 'b3lyp/6-311+g(3df,2p)', 'b3lyp/6-31g(d,p)']: logger.warning('No bond additivity corrections (BAC) are available in Arkane for "model chemistry"' ' {0}. As a result, thermodynamic parameters are expected to be inaccurate. Make sure' ' that atom energy corrections (AEC) were supplied or are available in Arkane to avoid' ' error.'.format(self.model_chemistry)) else: # model chemistry was not given, try to determine it from the sp_level and freq_level model_chemistry = '' if self.composite_method: self.model_chemistry = self.composite_method.lower() else: sp_level = self.sp_level.replace('f12a', 'f12').replace('f12b', 'f12').lower() freq_level = self.freq_level.replace('f12a', 'f12').replace('f12b', 'f12').lower() if sp_level in ['ccsd(t)-f12/cc-pvdz', 'ccsd(t)-f12/cc-pvtz', 'ccsd(t)-f12/cc-pvqz']: logger.warning('Using model chemistry {0} based on sp level {1}.'.format( sp_level + '-f12', sp_level)) sp_level += '-f12' if sp_level not in ['ccsd(t)-f12/cc-pvdz-f12', 'ccsd(t)-f12/cc-pvtz-f12', 'ccsd(t)-f12/cc-pvqz-f12', 'b3lyp/cbsb7', 'b3lyp/6-311g(2d,d,p)', 'b3lyp/6-311+g(3df,2p)', 'b3lyp/6-31g(d,p)']\ and self.use_bac: logger.info('\n\n') logger.warning('Could not determine appropriate Model Chemistry to be used in Arkane for ' 'thermochemical parameter calculations.\nNot using atom energy corrections and ' 'bond additivity corrections!\n\n') self.use_bac = False elif sp_level not in ['m06-2x/cc-pvtz', 'g3', 'm08so/mg3s*', 'klip_1', 'klip_2', 'klip_3', 'klip_2_cc', 'ccsd(t)-f12/cc-pvdz-f12_h-tz', 'ccsd(t)-f12/cc-pvdz-f12_h-qz', 'ccsd(t)-f12/cc-pvdz-f12', 'ccsd(t)-f12/cc-pvtz-f12', 'ccsd(t)-f12/cc-pvqz-f12', 'ccsd(t)-f12/cc-pcvdz-f12', 'ccsd(t)-f12/cc-pcvtz-f12', 'ccsd(t)-f12/cc-pcvqz-f12', 'ccsd(t)-f12/cc-pvtz-f12(-pp)', 'ccsd(t)/aug-cc-pvtz(-pp)', 'ccsd(t)-f12/aug-cc-pvdz', 'ccsd(t)-f12/aug-cc-pvtz', 'ccsd(t)-f12/aug-cc-pvqz', 'b-ccsd(t)-f12/cc-pvdz-f12', 'b-ccsd(t)-f12/cc-pvtz-f12', 'b-ccsd(t)-f12/cc-pvqz-f12', 'b-ccsd(t)-f12/cc-pcvdz-f12', 'b-ccsd(t)-f12/cc-pcvtz-f12', 'b-ccsd(t)-f12/cc-pcvqz-f12', 'b-ccsd(t)-f12/aug-cc-pvdz', 'b-ccsd(t)-f12/aug-cc-pvtz', 'b-ccsd(t)-f12/aug-cc-pvqz', 'mp2_rmp2_pvdz', 'mp2_rmp2_pvtz', 'mp2_rmp2_pvqz', 'ccsd-f12/cc-pvdz-f12', 'ccsd(t)-f12/cc-pvdz-f12_noscale', 'g03_pbepbe_6-311++g_d_p', 'fci/cc-pvdz', 'fci/cc-pvtz', 'fci/cc-pvqz', 'bmk/cbsb7', 'bmk/6-311g(2d,d,p)', 'b3lyp/6-31g(d,p)', 'b3lyp/6-311+g(3df,2p)', 'MRCI+Davidson/aug-cc-pV(T+d)Z']: logger.warning('Could not determine a Model Chemistry to be used in Arkane, ' 'NOT calculating thermodata') for spc in self.arc_species_list: spc.generate_thermo = False self.model_chemistry = sp_level + '//' + freq_level if self.model_chemistry: logger.info('Using {0} as a model chemistry in Arkane'.format(self.model_chemistry)) def determine_ess_settings(self, diagnostics=False): """ Determine where each ESS is available, locally (in running on a server) and/or on remote servers. if `diagnostics` is True, this method will not raise errors, and will print its findings. """ if self.ess_settings is not None and not diagnostics: self.ess_settings = check_ess_settings(self.ess_settings) return if diagnostics: t0 = time.time() logger.info('\n\n\n ***** Running ESS diagnostics: *****\n') # os.system('. ~/.bashrc') # TODO This might be a security risk - rethink it for software in ['gaussian', 'molpro', 'qchem', 'orca', 'onedmin']: self.ess_settings[software] = list() # first look for ESS locally (e.g., when running ARC itself on a server) if 'SSH_CONNECTION' in os.environ and diagnostics: logger.info('Found "SSH_CONNECTION" in the os.environ dictionary, ' 'using distutils.spawn.find_executable() to find ESS') if 'local' in servers: g03 = find_executable('g03') g09 = find_executable('g09') g16 = find_executable('g16') if g03 or g09 or g16: if diagnostics: logger.info('Found Gaussian: g03={0}, g09={1}, g16={2}'.format(g03, g09, g16)) self.ess_settings['gaussian'] = ['local'] qchem = find_executable('qchem') if qchem: self.ess_settings['qchem'] = ['local'] qchem = find_executable('orca') if qchem: self.ess_settings['orca'] = ['local'] molpro = find_executable('molpro') if molpro: self.ess_settings['molpro'] = ['local'] if any([val for val in self.ess_settings.values()]): if diagnostics: logger.info('Found the following ESS on the local machine:') logger.info([software for software, val in self.ess_settings.items() if val]) logger.info('\n') else: logger.info('Did not find ESS on the local machine\n\n') else: logger.info("\nNot searching for ESS locally ('local' wasn't specified in the servers dictionary)\n") # look for ESS on remote servers ARC has access to logger.info('\n\nMapping servers...\n') for server in servers.keys(): if server == 'local': continue if diagnostics: logger.info('\nTrying {0}'.format(server)) ssh = SSHClient(server) cmd = '. ~/.bashrc; which g03' g03 = ssh.send_command_to_server(cmd)[0] cmd = '. ~/.bashrc; which g09' g09 = ssh.send_command_to_server(cmd)[0] cmd = '. ~/.bashrc; which g16' g16 = ssh.send_command_to_server(cmd)[0] if g03 or g09 or g16: if diagnostics: logger.info(' Found Gaussian on {3}: g03={0}, g09={1}, g16={2}'.format(g03, g09, g16, server)) self.ess_settings['gaussian'].append(server) elif diagnostics: logger.info(' Did NOT find Gaussian on {0}'.format(server)) cmd = '. ~/.bashrc; which qchem' qchem = ssh.send_command_to_server(cmd)[0] if qchem: if diagnostics: logger.info(' Found QChem on {0}'.format(server)) self.ess_settings['qchem'].append(server) elif diagnostics: logger.info(' Did NOT find QChem on {0}'.format(server)) cmd = '. ~/.bashrc; which orca' orca = ssh.send_command_to_server(cmd)[0] if orca: if diagnostics: logger.info(' Found Orca on {0}'.format(server)) self.ess_settings['orca'].append(server) elif diagnostics: logger.info(' Did NOT find Orca on {0}'.format(server)) cmd = '. .bashrc; which molpro' molpro = ssh.send_command_to_server(cmd)[0] if molpro: if diagnostics: logger.info(' Found Molpro on {0}'.format(server)) self.ess_settings['molpro'].append(server) elif diagnostics: logger.info(' Did NOT find Molpro on {0}'.format(server)) if diagnostics: logger.info('\n\n') if 'gaussian' in self.ess_settings.keys(): logger.info('Using Gaussian on {0}'.format(self.ess_settings['gaussian'])) if 'qchem' in self.ess_settings.keys(): logger.info('Using QChem on {0}'.format(self.ess_settings['qchem'])) if 'orca' in self.ess_settings.keys(): logger.info('Using Orca on {0}'.format(self.ess_settings['orca'])) if 'molpro' in self.ess_settings.keys(): logger.info('Using Molpro on {0}'.format(self.ess_settings['molpro'])) logger.info('\n') if 'gaussian' not in self.ess_settings.keys() and 'qchem' not in self.ess_settings.keys() \ and 'orca' not in self.ess_settings.keys() and 'molpro' not in self.ess_settings.keys()\ and 'onedmin' not in self.ess_settings.keys() and not diagnostics: raise SettingsError('Could not find any ESS. Check your .bashrc definitions on the server.\n' 'Alternatively, you could pass a software-server dictionary to arc as `ess_settings`') elif diagnostics: logger.info('ESS diagnostics completed (elapsed time: {0})'.format(time_lapse(t0))) def check_project_name(self): """ Check the validity of the project name. """ for char in self.project: if char not in valid_chars: raise InputError('A project name (used to naming folders) must contain only valid characters.' ' Got {0} in {1}.'.format(char, self.project)) if char == ' ': # space IS a valid character for other purposes, but isn't valid in project names raise InputError('A project name (used to naming folders) must not contain spaces.' ' Got {0}.'.format(self.project)) def check_freq_scaling_factor(self): """ Check that the harmonic frequencies scaling factor is known, otherwise spawn a calculation for it if calc_freq_factor is set to True. """ if self.freq_scale_factor is None: # the user did not specify a scaling factor, see if Arkane has it level = self.freq_level if not self.composite_method else self.composite_method freq_scale_factor = assign_frequency_scale_factor(level) if freq_scale_factor != 1: # Arkane has this harmonic frequencies scaling factor (if not found, the factor is set to exactly 1) self.freq_scale_factor = freq_scale_factor else: logger.info('Could not determine the harmonic frequencies scaling factor for {0} from ' 'Arkane.'.format(level)) if self.calc_freq_factor: logger.info("Calculating it using Truhlar's method:\n\n") self.freq_scale_factor = determine_scaling_factors( level, ess_settings=self.ess_settings, init_log=False)[0] else: logger.info('Not calculating it, assuming a frequencies scaling factor of 1.') def delete_check_files(self): """ Delete the Gaussian checkfiles, the usually take up lots of space and are not needed after ARC terminates. Pass True to the keep_checks flag to avoid deleting check files. """ logged = False calcs_path = os.path.join(self.project_directory, 'calcs') for (root, _, files) in os.walk(calcs_path): for file_ in files: if file_ == 'check.chk' and os.path.isfile(os.path.join(root, file_)): if not logged: logger.info('deleting all Gaussian check files...') logged = True os.remove(os.path.join(root, file_)) def determine_unique_species_labels(self): """ Determine unique species labels. Raises: ValueError: If a non-unique species is found. """ for arc_spc in self.arc_species_list: if arc_spc.label not in self.unique_species_labels: self.unique_species_labels.append(arc_spc.label) else: raise ValueError('Species label {0} is not unique'.format(arc_spc.label)) def add_hydrogen_for_bde(self): """ Make sure ARC has a hydrogen species labeled as 'H' for the final processing of bde jobs (if not, create one). """ if any([spc.bdes is not None for spc in self.arc_species_list]): for species in self.arc_species_list: if species.label == 'H': if species.number_of_atoms == 1 and species.get_xyz(generate=True)['symbols'][0] == 'H': break else: raise SpeciesError('A species with label "H" was defined, but does not seem to be ' 'the hydrogen atom species. Cannot calculate bond dissociation energies.\n' 'Got the following species: {0}'.format( [spc.label for spc in self.arc_species_list])) else: # no H species defined, make one h = ARCSpecies(label='H', smiles='[H]', generate_thermo=False) self.arc_species_list.append(h)
<gh_stars>10-100 import argparse import torch from util import str2bool parser = argparse.ArgumentParser(description='UED') # PPO Arguments. parser.add_argument( '--algo', type=str, default='ppo', choices=['ppo', 'a2c', 'acktr', 'ucb', 'mixreg'], help='Which RL algorithm to use') parser.add_argument( '--lr', type=float, default=1e-4, help='PPO learning rate') parser.add_argument( '--eps', type=float, default=1e-5, help='RMSprop optimizer epsilon') parser.add_argument( '--alpha', type=float, default=0.99, help='RMSprop optimizer apha') parser.add_argument( '--gamma', type=float, default=0.995, help='Discount factor for rewards') parser.add_argument( '--use_gae', type=str2bool, nargs='?', const=True, default=True, help='Use generalized advantage estimator.') parser.add_argument( '--gae_lambda', type=float, default=0.95, help='GAE lambda parameter') parser.add_argument( '--entropy_coef', type=float, default=0.0, help='Entropy term coefficient for student agents') parser.add_argument( '--adv_entropy_coef', type=float, default=0.005, help='Entropy term coefficient for adversary') parser.add_argument( '--value_loss_coef', type=float, default=0.5, help='Value loss coefficient') parser.add_argument( '--max_grad_norm', type=float, default=0.5, help='Max norm of gradients for student agents') parser.add_argument( '--adv_max_grad_norm', type=float, default=0.5, help='Max norm of gradients for adversary') parser.add_argument( '--normalize_returns', type=str2bool, nargs='?', const=True, default=False, help='Whether to normalize student agent returns') parser.add_argument( '--adv_normalize_returns', type=str2bool, nargs='?', const=True, default=False, help='Whether to normalize adversary returns') parser.add_argument( '--use_popart', type=str2bool, nargs='?', const=True, default=False, help='Whether to normalize student values via PopArt') parser.add_argument( '--adv_use_popart', type=str2bool, nargs='?', const=True, default=False, help='Whether to normalize adversary values using PopArt') parser.add_argument( '--seed', type=int, default=1, help='Random seed for run') parser.add_argument( '--num_processes', type=int, default=4, help='How many training CPU processes to use for rollouts') parser.add_argument( '--num_steps', type=int, default=256, help='Number of rollout steps for PPO') parser.add_argument( '--ppo_epoch', type=int, default=5, help='Number of PPO epochs used by student agents') parser.add_argument( '--adv_ppo_epoch', type=int, default=5, help='Number of PPO epochs used by adversary') parser.add_argument( '--num_mini_batch', type=int, default=1, help='Number of batches for PPO used by student agents') parser.add_argument( '--adv_num_mini_batch', type=int, default=1, help='number of batches for PPO used by adversary') parser.add_argument( '--clip_param', type=float, default=0.2, help='PPO clip parameter') parser.add_argument( '--clip_value_loss', type=str2bool, default=True, help='PPO clip value loss') parser.add_argument( '--adv_clip_reward', type=float, default=None, help='Clip adversary rewards') parser.add_argument( '--num_env_steps', type=int, default=500000, help='Total number of environment steps for training') # Architecture arguments. parser.add_argument( '--recurrent_arch', type=str, default='lstm', choices=['gru', 'lstm'], help='RNN architecture') parser.add_argument( '--recurrent_agent', type=str2bool, nargs='?', const=True, default=True, help='Use RNN for student agents') parser.add_argument( '--recurrent_adversary_env', type=str2bool, nargs='?', const=True, default=False, help='Use RNN for adversary') parser.add_argument( '--recurrent_hidden_size', type=int, default=256, help='Recurrent hidden state size') # Environment arguments. parser.add_argument( '--env_name', type=str, default='MiniHack-GoalLastAdversarial-v0', help='Environment to train on') parser.add_argument( '--handle_timelimits', type=str2bool, nargs='?', const=True, default=False, help="Bootstrap off of early termination states. Requires env to be wrapped by envs.wrappers.TimeLimit.") parser.add_argument( '--singleton_env', type=str2bool, nargs='?', const=True, default=False, help="When using a fixed env, whether the same environment should also be reused across workers.") parser.add_argument( '--clip_reward', type=float, default=None, help="Clip sparse rewards") # UED arguments. parser.add_argument( '--ued_algo', type=str, default='paired', choices=['domain_randomization', 'minimax', 'paired', 'flexible_paired'], help='UED algorithm') # Hardware arguments. parser.add_argument( '--no_cuda', type=str2bool, nargs='?', const=True, default=False, help='disables CUDA training') # Logging arguments. parser.add_argument( "--verbose", type=str2bool, nargs='?', const=True, default=True, help="Whether to print logs") parser.add_argument( '--xpid', default='latest', help='Name for the run, also name of directory containing log files') parser.add_argument( '--log_dir', default='~/logs/paired/', help='Directory in which to log experiments') parser.add_argument( '--log_interval', type=int, default=1, help='Log interval, one log per n updates') parser.add_argument( "--save_interval", type=int, default=20, help="Save model every n updates.") parser.add_argument( "--archive_interval", type=int, default=0, help="Save an archived model every n updates.") parser.add_argument( "--screenshot_interval", type=int, default=1, help="Save screenshot of environment every n updates.") parser.add_argument( '--render', type=str2bool, nargs='?', const=True, default=False, help='Render one parallel environment to screen during training') parser.add_argument( "--checkpoint", type=str2bool, nargs='?', const=True, default=False, help="Begin training from checkpoint.") parser.add_argument( "--disable_checkpoint", type=str2bool, nargs='?', const=True, default=False, help="Disable saving checkpoint.") parser.add_argument( '--log_grad_norm', type=str2bool, nargs='?', const=True, default=False, help="Log gradient norms") parser.add_argument( '--log_action_complexity', type=str2bool, nargs='?', const=True, default=False, help="Log action trajectory complexity metric") parser.add_argument( '--test_interval', type=int, default=250, help='Evaluate on test envs every n updates.') parser.add_argument( '--test_num_episodes', type=int, default=10, help='Number of test episodes per test environment') parser.add_argument( '--test_num_processes', type=int, default=4, help='Number of test CPU processes per test environment') parser.add_argument( '--test_env_names', type=str, default='MiniHack-Room-15x15-v0,MiniHack-MazeWalk-9x9-v0,MiniHack-MazeWalk-15x15-v0', help='Environments to evaluate on (csv string)')
from flask_jwt_extended import JWTManager from flask_restful import Api from flask import Flask, jsonify from Endpoints import User, EdgeDevice, SensorDevice, Sensor, SensorData from TableStorage.TableStorageConnection import AzureTableStorage from flask_cors import CORS import Settings.Salt as salt app = Flask(__name__) api = Api(app) CORS(app) app.config['JWT_BLACKLIST_ENABLED'] = True app.config['JWT_BLACKLIST_TOKEN_CHECKS'] = ['access', 'refresh'] app.config['JWT_SECRET_KEY'] = salt.secret app.config['PROPAGATE_EXCEPTIONS'] = True jwt = JWTManager(app) # message if jwt is revoked @jwt.revoked_token_loader def return_json_revoke_response(): return jsonify({ "message": "token revoked" }), 401 # return identity of jwt user @jwt.user_identity_loader def user_identity_lookup(user): return user.email # return claims from jwt token @jwt.user_claims_loader def add_claims_to_access_token(identity): return { 'name': identity.username, 'email': identity.email, 'id': identity.id } # return true or false if jwt token is in blacklist @jwt.token_in_blacklist_loader def check_if_token_in_blacklist(decrypted_token): storage = AzureTableStorage() table_service = storage.get_table() jti = decrypted_token['jti'] filter = "Token eq '{}'".format(jti) existence = table_service.query_entities('revokedtokens', filter=filter) existence = list(existence) return len(existence) == 1 # User endpoints api.add_resource(User.UserLogin, '/Api/V1/Login', endpoint='Login') api.add_resource(User.UserRegistration, '/Api/V1/Register', endpoint='Register') api.add_resource(User.TokenRefresh, '/Api/V1/RefreshToken', endpoint='RefreshToken') api.add_resource(User.UserLogoutAccess, '/Api/V1/Logout/Access', endpoint='Access') api.add_resource(User.UserLogoutRefresh, '/Api/V1/Logout/Refresh', endpoint='Refresh') api.add_resource(User.Account, '/Api/V1/Account', endpoint='Account') # EdgeDevices endpoints api.add_resource(EdgeDevice.EdgeDevices, '/Api/V1/EdgeDevices', endpoint="EdgeDevices") api.add_resource(EdgeDevice.SingleEdgeDevice, '/Api/V1/EdgeDevices/<string:id>') # SensorsDevices endpoints api.add_resource(SensorDevice.SensorsDevices, '/Api/V1/SensorsDevices', endpoint="SensorDevices") api.add_resource(SensorDevice.GetEdgeSensorsDevices, '/Api/V1/EdgeDevices/<string:id>/SensorsDevices') api.add_resource(SensorDevice.SingleSensorsDevice, '/Api/V1/SensorsDevices/<string:id>') # Sensors endpoints api.add_resource(Sensor.Sensors, '/Api/V1/Sensors', endpoint="Sensors") api.add_resource(Sensor.SingleSensor, '/Api/V1/Sensors/<string:id>') api.add_resource(Sensor.GetEdgeDeviceSensors, '/Api/V1/EdgeDevices/<string:id>/Sensors') api.add_resource(Sensor.GetSensorDeviceSensors, '/Api/V1/SensorsDevices/<string:id>/Sensors') # SensorData endpoints api.add_resource(SensorData.SensorData, '/Api/V1/SensorData', endpoint="SensorData") api.add_resource(SensorData.SingleSensorData, '/Api/V1/SensorData/<string:id>') api.add_resource(SensorData.GetSensorSensorData, '/Api/V1/Sensors/<string:id>/SensorData') api.add_resource(SensorData.GetSensorDeviceSensorData, '/Api/V1/SensorsDevices/<string:id>/SensorData') if __name__ == '__main__': app.run()
<gh_stars>0 #!/usr/bin/env python import os import sys import glob import shutil import platform import tempfile import subprocess ALL_PY_VERSIONS = ["3.5", "3.6", "3.7", "3.8"] SKIP_PY_VERS = os.environ.get("SKIP_PY_VERS", "").split(",") if 'PYPI_USERNAME' not in os.environ: print("\n!!! Please set PYPI_USERNAME in the environment !!!\n\n") sys.exit(1) PYPI_USERNAME = os.environ['PYPI_USERNAME'] if 'PYPI_PASSWORD' not in os.environ: print("\n!!! Please set PYPI_PASSWORD in the environment !!!\n\n") sys.exit(1) PYPI_PASSWORD = os.environ['PYPI_PASSWORD'] if 'SBP_VERSION' not in os.environ: print("\n!!! Please set SBP_VERSION in the environment !!!\n\n") sys.exit(1) SBP_VERSION = os.environ['SBP_VERSION'] USE_TEST_PYPI = bool(os.environ.get('USE_TEST_PYPI', None)) if not platform.machine().startswith("arm") and not shutil.which('conda'): print("\n!!! Please install conda to deploy python !!!\n\n") sys.exit(1) script_dir = os.path.dirname(os.path.abspath(__file__)) repo_dir = os.path.join(script_dir, "..") os.chdir(script_dir) if platform.system() == "Linux" and platform.python_version().startswith("3.4"): DASHDASH = ["--"] else: DASHDASH = [] os.environ['IS_RELEASED'] = 'y' def twine_upload(conda_dir, wheel, py_version="3.7", use_conda=True): if platform.machine().startswith("arm") and py_version in ALL_PY_VERSIONS: cmd_prefix = ["/usr/local/bin/python{}".format(py_version), "-m"] if use_conda: raise RuntimeError("Conda with Python {} is not supported on ARM".format(py_version)) elif py_version in ALL_PY_VERSIONS: cmd_prefix = ["/usr/bin/python3", "-m"] if use_conda: cmd_prefix = ["conda", "run", "-p", conda_dir] + DASHDASH else: raise RuntimeError("Unsupported Python version: {} (platform: {})".format(py_version, platform.machine())) invoke = subprocess.check_call if not USE_TEST_PYPI else subprocess.call ret = invoke(cmd_prefix + [ "twine", "upload", "-u", PYPI_USERNAME, "-p", PYPI_PASSWORD] + ([ "--repository-url", "https://test.pypi.org/legacy/"] if USE_TEST_PYPI else [] ) + [wheel]) if USE_TEST_PYPI and ret != 0: print(">>> Warning: twine upload returned exit code {}".format(ret)) def build_wheel_native(conda_dir, deploy_dir, py_version): print(">>> Installing native deps for: {}...".format(py_version)) py_version_prefix = "/usr/local" py_version_suffix = py_version if py_version not in ALL_PY_VERSIONS: raise RuntimeError("Unsupported Python version") python = "{}/bin/python{}".format(py_version_prefix, py_version_suffix) subprocess.check_call(["apt-get", "update"]) if py_version.startswith("3."): subprocess.check_call(["apt-get", "install", "-y", "python3", "python3-pip", "python3-dev", "python3-setuptools" ]) else: subprocess.check_call(["apt-get", "install", "-y", "python", "python-pip", "python-dev", "python-setuptools" ]) subprocess.check_call([ python, "-m", "pip", "install", "--upgrade", "pip" ]) subprocess.check_call([ python, "-m", "pip", "install", "twine", "numpy", "cython", "wheel", "setuptools" ]) print(">>> Installing setup deps in Python {} environment...".format(py_version)) subprocess.check_call([ python, "-m", "pip", "install", "--ignore-installed", "-r", "test_requirements.txt" ]) suffix = "" if py_version.startswith("3.") else "27" subprocess.check_call([ python, "-m", "pip", "install", "--ignore-installed", "-r", "requirements{}.txt".format(suffix), "-r", "setup_requirements{}.txt".format(suffix), ]) run_bdist(conda_dir, deploy_dir, py_version, py_version_prefix=py_version_prefix, py_version_suffix=py_version_suffix, use_conda=False) def invoke_bdist(conda_dir, use_conda, py_version_prefix="/usr", py_version_suffix="3"): cmd_prefix = ["{}/bin/python{}".format(py_version_prefix, py_version_suffix)] if use_conda: cmd_prefix = ["conda", "run", "-p", conda_dir] + DASHDASH + ["python"] subprocess.check_call(cmd_prefix + [ "setup.py", "bdist_wheel" ]) def run_bdist(conda_dir, deploy_dir, py_version, py_version_prefix="/usr", py_version_suffix="3", use_conda=True): print(">>> Building staging area for deployment ...") old_cwd = os.getcwd() os.chdir(deploy_dir) os.mkdir('module') shutil.copytree(os.path.join(repo_dir, ".git"), ".git") shutil.copy(os.path.join(script_dir, ".coveragerc"), "module/.coveragerc") shutil.copy(os.path.join(script_dir, ".gitignore"), "module/.gitignore") shutil.copy(os.path.join(script_dir, ".flake8"), "module/.flake8") for dirent in glob.glob(os.path.join(script_dir, "*")): _, leaf_name = os.path.split(dirent) if os.path.isdir(dirent): print('Copying (recursive) {}'.format(dirent)) shutil.copytree(dirent, os.path.join("module", leaf_name)) else: print('Copying (non-recursive) {}'.format(dirent)) shutil.copy(dirent, os.path.join("module", leaf_name)) print(">>> Pruning ...") if os.path.exists("module/docs/_build"): shutil.rmtree("module/docs/_build") for dirent in glob.glob("module/build/*"): shutil.rmtree(dirent) if os.path.isdir(dirent) else os.unlink(dirent) os.chdir("module") print(">>> Staged to '{}'...'".format(deploy_dir)) print(">>> Building Python wheel ...") invoke_bdist(conda_dir, use_conda, py_version_prefix=py_version_prefix, py_version_suffix=py_version_suffix) whl_pattern = "dist/sbp-{}-*.whl".format(SBP_VERSION) print(">>> Uploading Python wheel (glob: {})...".format(whl_pattern)) wheels = glob.glob(whl_pattern) if not wheels: print("\n!!! No Python wheel (.whl) file found...\n\n") sys.exit(1) wheel = wheels[0] print(">>> Found wheel (of {} matches): {}".format(len(wheels), wheel)) if platform.system() == "Linux" and platform.machine().startswith("x86"): print(">>> Running 'auditwheel' against wheel: {}".format(wheel)) subprocess.check_call([ "python3", "-m", "auditwheel", "repair", "-w", "dist", wheel ]) print(">>> Copying wheel {} to {}".format(wheel, old_cwd)) shutil.copy(wheel, old_cwd) wheel = wheel.replace("-linux_x86_64", "-manylinux1_x86_64") twine_upload(conda_dir, wheel, py_version, use_conda) def build_wheel_conda(conda_dir, deploy_dir, py_version): print(">>> Creating conda environment for Python version: {}...".format(py_version)) subprocess.check_call([ "conda", "create", "--yes", "-p", conda_dir, "python={}".format(py_version)]) if platform.system() == 'Linux' and platform.machine() == 'x86_64': subprocess.check_call([ "conda", "install", "--yes", "-p", conda_dir, "gcc_linux-64", "gxx_linux-64" ]) print(">>> Installing build deps in Python {} conda environment...".format(py_version)) subprocess.check_call([ "conda", "install", "-p", conda_dir, "--yes", "cython", "wheel", "setuptools" ]) subprocess.check_call([ "conda", "run", "-p", conda_dir] + DASHDASH + [ "python", "-m", "pip", "install", "--upgrade", "pip" ]) subprocess.check_call([ "conda", "run", "-p", conda_dir] + DASHDASH + [ "python", "-m", "pip", "install", "twine", "numpy" ]) if platform.system() == "Linux" and platform.machine().startswith("x86"): subprocess.check_call([ "python3", "-m", "pip", "install", "auditwheel" ]) print(">>> Installing setup deps in Python {} conda environment...".format(py_version)) subprocess.check_call([ "conda", "run", "-p", conda_dir] + DASHDASH + [ "python", "-m", "pip", "install", "--ignore-installed", "-r", "setup_requirements.txt", "-r", "test_requirements.txt", ]) suffix = "" if py_version.startswith("3.") else "27" subprocess.check_call([ "conda", "run", "-p", conda_dir] + DASHDASH + [ "python", "-m", "pip", "install", "--ignore-installed", "-r", "requirements{}.txt".format(suffix), "-r", "setup_requirements{}.txt".format(suffix), ]) run_bdist(conda_dir, deploy_dir, py_version, use_conda=True) def build_native_on_arm(py_version): if platform.system() != "Linux": return False return platform.machine().startswith("arm") def build_wheel(conda_dir, deploy_dir, py_version): if build_native_on_arm(py_version): build_wheel_native(conda_dir, deploy_dir, py_version) else: build_wheel_conda(conda_dir, deploy_dir, py_version) def py_versions(): def _py_versions(): if os.environ.get('LIBSBP_BUILD_ANY', None): return ["3.7"] return ALL_PY_VERSIONS for pyver in _py_versions(): if pyver in SKIP_PY_VERS: continue yield pyver for py_version in py_versions(): print(">>> Building wheel for Python {}...".format(py_version)) conda_tmp_dir = tempfile.mkdtemp() conda_dir = os.path.join(conda_tmp_dir, "conda") deploy_dir = tempfile.mkdtemp() try: build_wheel(conda_dir, deploy_dir, py_version) finally: os.chdir(script_dir) # Workaround a permission denied errors that happens on Windows if platform.system() == "Windows": subprocess.check_call(["rmdir", "/s", "/q", conda_dir], shell=True) else: subprocess.check_call(["rm", "-rf", conda_dir]) if platform.system() == "Windows": subprocess.check_call(["rmdir", "/s", "/q", deploy_dir], shell=True) else: subprocess.check_call(["rm", "-rf", deploy_dir])
'''reports details about a virtual boinc farm''' # standard library modules import argparse import collections #import contextlib #from concurrent import futures #import errno import datetime #import getpass #import json import logging #import math #import os #import re #import socket #import shutil #import signal import socket #import subprocess import sys #import threading #import time import urllib #import uuid # third-party module(s) import dateutil.parser import lxml import pandas as pd import pymongo import requests # neocortix module(s) import devicePerformance import ncs logger = logging.getLogger(__name__) def anyFound( a, b ): ''' return true iff any items from iterable a is found in iterable b ''' for x in a: if x in b: return True return False def datetimeIsAware( dt ): if not dt: return None return (dt.tzinfo is not None) and (dt.tzinfo.utcoffset( dt ) is not None) def universalizeDateTime( dt ): if not dt: return None if datetimeIsAware( dt ): #return dt return dt.astimezone(datetime.timezone.utc) return dt.replace( tzinfo=datetime.timezone.utc ) def interpretDateTimeField( field ): if isinstance( field, datetime.datetime ): return universalizeDateTime( field ) elif isinstance( field, str ): return universalizeDateTime( dateutil.parser.parse( field ) ) else: raise TypeError( 'datetime or parseable string required' ) def isNumber( sss ): try: float(sss) return True except ValueError: return False def instanceDpr( inst ): #logger.info( 'NCSC Inst details %s', inst ) # cpuarch: string like "aarch64" or "armv7l" # cpunumcores: int # cpuspeeds: list of floats of length cpunumcores, each representing a clock frequency in GHz # cpufamily: list of strings of length cpunumcores cpuarch = inst['cpu']['arch'] cpunumcores = len( inst['cpu']['cores']) cpuspeeds = [] cpufamily = [] for core in inst['cpu']['cores']: cpuspeeds.append( core['freq'] / 1e9) cpufamily.append( core['family'] ) dpr = devicePerformance.devicePerformanceRating( cpuarch, cpunumcores, cpuspeeds, cpufamily ) return dpr def getStartedInstances( db ): collNames = db.list_collection_names( filter={ 'name': {'$regex': r'^launchedInstances_.*'} } ) #logger.info( 'launched collections: %s', collNames ) startedInstances = [] for collName in collNames: #logger.info( 'getting instances from %s', collName ) launchedColl = db[collName] inRecs = list( launchedColl.find( {}, {'device-id': 1, 'cpu': 1, 'instanceId': 1, 'state': 1 }) ) # fully iterates the cursor, getting all records if len(inRecs) <= 0: logger.warn( 'no launched instances found in %s', collName ) for inRec in inRecs: if 'instanceId' not in inRec: logger.warning( 'no instance ID in input record') if 'dpr' in inRec: dpr = inRec['dpr'] if dpr < 24: logger.info( 'low dpr %.1f %s', dpr, inRec ) else: dpr = instanceDpr( inRec ) if dpr < 24: logger.info( 'low dpr computed %.1f %s', dpr, inRec ) inRec['dpr'] = round( dpr ) startedInstances.extend( [inst for inst in inRecs if inst['state'] in ['started', 'stopped'] ] ) return startedInstances def getInstallerRecs( db ): instRecs = {} colls = db.list_collection_names( filter={ 'name': {'$regex': r'^startBoinc_.*'} } ) colls = sorted( colls, reverse=False ) for collName in colls: found = db[collName].find( {"instanceId": {"$ne": "<master>"} } ) for event in found: iid = event['instanceId'] if anyFound( ['exception', 'returncode', 'timeout'], event ): if iid in instRecs: logger.warning( 'alread had instRec for %s', iid ) if 'exception' in event: event['status'] = 'exception' event['exceptionType'] = event['exception']['type'] # redundant elif 'returncode' in event: event['status'] = 'failed' if event['returncode'] else 'ok' elif 'timeout' in event: event['status'] = 'timeout' instRecs[iid] = event return instRecs def parseTaskLines( lines ): tasks = [] curTask = {} firstLine = True for line in lines: line = line.rstrip() if not line: continue if firstLine and '== Tasks ==' in line: continue if line[0] != ' ': #logger.info( 'task BOUNDARY %s', line ) numPart = line.split( ')' )[0] taskNum = int(numPart) #logger.info( 'TASK %d', taskNum ) curTask = { 'num': taskNum } tasks.append( curTask ) continue if ':' in line: # extract a key:value pair from this line stripped = line.strip() parts = stripped.split( ':', 1 ) # only the first colon will be significant # convert to numeric or None type, if appropriate val = parts[1].strip() if val is None: pass elif val.isnumeric(): val = int( val ) elif isNumber( val ): val = float( val ) # store the value curTask[ parts[0] ] = val continue logger.info( '> %s', line ) return tasks def collectTaskMetrics( db ): allTasks = pd.DataFrame() collNames = sorted( db.list_collection_names( filter={ 'name': {'$regex': r'^get_tasks_.*'} } ) ) #logger.info( 'get_tasks_ collections: %s', collNames ) for collName in collNames: logger.info( 'getting data from %s', collName ) coll = db[collName] # get the tag from the collection name e.g. 'get_tasks_2020-04-09_230320' dateTimeTag = collName.split('_',2)[2] # iterate over records, each containing output for an instance for inRec in coll.find(): iid = inRec['instanceId'] eventDateTime = inRec['dateTime'] taskLines = [] if iid == '<master>': #logger.info( 'found <master> record' ) pass else: #logger.info( 'iid: %s', iid ) events = inRec['events'] for event in events: if 'stdout' in event: stdoutStr = event['stdout'] taskLines.append( stdoutStr ) #logger.info( '%s: %s', abbrevIid, stdoutStr ) #if anyFound( ['WU name:', 'fraction done', 'UNINITIALIZED'], stdoutStr ): # logger.info( "%s: %s, %s", abbrevIid, eventDateTime[0:19], stdoutStr.strip() ) tasks = pd.DataFrame( parseTaskLines( taskLines ) ) #print( 'tasks for', abbrevIid, 'from', eventDateTime[0:19] ) #print( tasks ) tasks['dateTimeTag'] = dateTimeTag tasks['eventDateTime'] = eventDateTime tasks['instanceId'] = iid allTasks = allTasks.append( tasks, sort=False ) return allTasks def reportExceptionRecoveriesHackerly( db ): instHistories = {} taskColls = db.list_collection_names( filter={ 'name': {'$regex': r'^get_tasks_.*'} } ) taskColls = sorted( taskColls, reverse=False ) for collName in taskColls: #tcoll = db['get_tasks_2020-04-13_190241'] tcoll = db[collName] found = tcoll.find( {"instanceId": {"$ne": "<master>"} } ) for instRec in found: iid = instRec['instanceId'] if iid not in instHistories: instHistories[iid] = [] #logger.info( 'scanning %s', instRec['instanceId'] ) for event in instRec['events']: if 'exception' in event: #logger.info( 'found exception for %s', iid[0:16] ) #logger.info( 'exc: %s', event['exception'] ) #if event['exception']['type'] == 'gaierror': if event['exception']['type'] == 'ConnectionRefusedError': instHistories[iid].append( event ) elif 'returncode' in event: if event['returncode'] == 0: instHistories[iid].append( event ) logger.info( 'scanning event histories' ) for iid, history in instHistories.items(): hadExcept = False for event in history: if 'exception' in event: if not hadExcept: print( iid, event['dateTime'] ) #logger.info( '%s on %s at %s', event['exception']['type'], iid[0:16], event['dateTime'][0:16] ) hadExcept = True elif 'returncode' in event: if hadExcept: logger.info('recovered %s %s', iid[0:16], event['dateTime'][0:16]) #else: # logger.info( 'noExcept' ) #if hadExcept: # logger.info( 'hadExcept: %s', iid[0:16] ) def maybeNumber( txt ): if txt is None: return None elif txt.isnumeric(): return int( txt ) elif isNumber( txt ): return float( txt ) else: return txt def retrieveAccountInfo( projUrl, authStr ): reqUrl = projUrl + 'show_user.php?format=xml&auth=' + authStr resp = requests.get( reqUrl ) # do need this try: tree = lxml.etree.fromstring(resp.content) except Exception as exc: logger.error( 'exception from lxml (%s) %s', type(exc), exc ) return {} hostInfos = [] accountInfo = {} logger.info( '%d elements in xml tree', len( tree )) for urlElem in tree: children = urlElem.getchildren() if urlElem.tag == 'host': #print( 'host with %d children' % len(children) ) hostInfo = {} for child in children: #print( child.text ) tag = child.tag hostInfo[ tag ] = maybeNumber( child.text ) hostInfos.append( hostInfo ) else: #print( urlElem.tag, urlElem.text ) accountInfo[urlElem.tag] = maybeNumber( urlElem.text ) accountInfo['hosts'] = hostInfos return accountInfo if __name__ == "__main__": logFmt = '%(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s' logDateFmt = '%Y/%m/%d %H:%M:%S' formatter = logging.Formatter(fmt=logFmt, datefmt=logDateFmt ) logging.basicConfig(format=logFmt, datefmt=logDateFmt) logger.setLevel(logging.INFO) ap = argparse.ArgumentParser( description=__doc__, fromfile_prefix_chars='@', formatter_class=argparse.ArgumentDefaultsHelpFormatter ) ap.add_argument( '--authToken', help='the NCS authorization token to use (required for launch or terminate)' ) ap.add_argument( '--farm', required=True, help='the name of the virtual boinc farm' ) ap.add_argument( '--projectUrl', help='the URL to the boinc science project', default='https://boinc.bakerlab.org/rosetta/' ) ap.add_argument( '--projectKey', required=True, help='the authorization key for the boinc project') ap.add_argument( '--mongoHost', help='the host of mongodb server', default='localhost') ap.add_argument( '--mongoPort', help='the port of mongodb server', default=27017) args = ap.parse_args() farm = args.farm # 'rosetta_2' logger.info( 'connecting to database for %s', farm ) mclient = pymongo.MongoClient( args.mongoHost ) dbName = 'boinc_' + farm db = mclient[dbName] lookbackMinutes = 120 thresholdDateTime = datetime.datetime.now( datetime.timezone.utc ) \ - datetime.timedelta( minutes=lookbackMinutes ) thresholdDateTimeStr = thresholdDateTime.isoformat() dateTimeTagFormat = '%Y-%m-%d_%H%M%S' ancientDateTime = datetime.datetime( 2020, 4, 12, tzinfo=datetime.timezone.utc ) logger.info( 'getting startedInstances' ) startedInstances = getStartedInstances( db ) instancesByIid = {inst['instanceId']: inst for inst in startedInstances } # rosetta projUrl = args.projectUrl # 'https://boinc.bakerlab.org/rosetta/' authStr = args.projectKey # account key for boinc project ("weak" key may not work) logger.info( 'retrieving account info from %s', projUrl ) accountInfo = retrieveAccountInfo( projUrl, authStr ) hostInfos = accountInfo.get('hosts') if hostInfos: hostInfoByName = { host['domain_name']: host for host in hostInfos } else: hostInfoByName = {} if not True: taskMetrics = collectTaskMetrics( db ) nCheckedInstances = 0 nSuccessfulInstances = 0 coll = db['checkedInstances'] # iterate fully into a list, so we can freely random-access it checkedInstances = list(coll.find().sort('checkedDateTime', -1) ) eventCounter = collections.Counter() eventsByIid = {} pausings = [] # this first loop does a little massaging of the records for inst in checkedInstances: iid = inst['_id'] abbrevIid = iid[0:16] inst['instanceId'] = iid inst['checkedDateTime'] = interpretDateTimeField( inst['checkedDateTime'] ) instEvents = None if 'events' in inst: instEvents = inst['events'] elif inst['checkedDateTime'] >= ancientDateTime: # '2020-04-12': logger.info( 'querying NCS for %s', abbrevIid ) response = ncs.queryNcsSc( 'instances/%s' % iid, args.authToken ) if response['statusCode'] == 200: instX = response['content'] instEvents = instX['events'] inst['events'] = instX['events'] coll.update_one( {'_id': iid}, { "$set": { "events": instX['events'] } } ) if instEvents: eventsByIid[ iid ] = instEvents lastEventStr = instEvents[-1]['category'] + '.' + instEvents[-1]['event'] #logger.info( '%s had %d events, including "%s"', # abbrevIid, len(instEvents), lastEventStr ) inst['lastEvent'] = lastEventStr unplugged = False for event in instEvents: if event['event'] == 'unplugged': unplugged = True elif event['event'] == 'paused': event['instanceId'] = iid pausings.append( event ) inst['paused'] = True elif event['event'] == 'resumed': event['instanceId'] = iid pausings.append( event ) inst['paused'] = False elif event['event'] == 'disconnected': inst['disconnected'] = True eventCounter[ event['category'] + '.' + event['event'] ] += 1 inst['unplugged'] = unplugged instHost = inst['ssh']['host'] inst['hostName'] = instHost #if 'ztka' in instHost: # logger.warning( 'DOWNLOADERR %s', inst['_id'] ) if instHost and instHost in hostInfoByName: inst['totCredit'] = hostInfoByName[instHost]['total_credit'] inst['RAC'] = hostInfoByName[instHost]['expavg_credit'] else: inst['totCredit'] = 0 inst['RAC'] = 0 # this loop prints info for recently-checked terminated instances print( 'recently terminated instances for farm', farm ) for inst in checkedInstances: nCheckedInstances += 1 cdt = inst['checkedDateTime'] ldtField = inst['launchedDateTime'] ldt = universalizeDateTime( dateutil.parser.parse( ldtField ) ) inst['launchedDateTime'] = universalizeDateTime( ldt ) if inst.get('terminatedDateTime'): inst['terminatedDateTime'] = interpretDateTimeField( inst['terminatedDateTime'] ) inst['uptime'] = (cdt-ldt).total_seconds() iid = inst['_id'] inst['instanceId'] = iid inst['dpr'] = instancesByIid[iid]['dpr'] instSucceeded = inst.get('nTasksComputed',0) > 0 if instSucceeded: nSuccessfulInstances += 1 if cdt >= thresholdDateTime and inst['state'] == 'terminated': cdtAbbrev = cdt.strftime( dateTimeTagFormat )[5:-2] abbrevIid = iid[0:16] print( '%s, %d, %d, %d, %d, %d, %s, %s, %.1f, %s' % (inst['state'], inst['devId'], inst.get('nTasksComputed',0), inst['nCurTasks'], inst.get('nFailures',0), inst.get('nExceptions',0), cdtAbbrev, abbrevIid, inst['uptime']/3600, inst.get('unplugged','')) ) #else: # print( 'older' ) checkedInstancesDf = pd.DataFrame( checkedInstances ) print() print(datetime.datetime.now( datetime.timezone.utc )) projName = projUrl.split('/')[-2] projNetLoc = urllib.parse.urlparse( projUrl ).netloc #print( 'project', projNetLoc, 'stats' ) print( 'stats for project %s (%s)' % (projName, projNetLoc) ) if 'total_credit' not in accountInfo: print( 'not available' ) print( accountInfo.get( 'error_msg', '' ) ) else: print( 'total credit:', round(accountInfo['total_credit']) ) print( 'RAC:', round(accountInfo['expavg_credit']) ) # "recent avgerage credit" nCurrentlyRunning = len( checkedInstancesDf[ checkedInstancesDf.state=='checked' ] ) print( '%d currently running instances' % (nCurrentlyRunning ) ) currentlySuccessful = checkedInstancesDf[ (checkedInstancesDf.state=='checked') & (checkedInstancesDf.nTasksComputed>0) ] print( '%d of those had finished tasks' % len(currentlySuccessful) ) print( 'historically, %d out of %d instances had finished tasks' % (nSuccessfulInstances, nCheckedInstances) ) print() # print details for the best instances with finished tasks if len( checkedInstancesDf ) > 20: bestCreditThresh = max( sorted(checkedInstancesDf.totCredit)[-20], 1 ) else: bestCreditThresh = 1 print( 'best instances' ) for inst in checkedInstances: #if inst.get('nTasksComputed',0) > 0: if inst.get('totCredit',0) >= bestCreditThresh: # 12000 150 300 cdt = inst['checkedDateTime'] cdtAbbrev = cdt.strftime( dateTimeTagFormat )[5:-2] iid = inst['_id'] abbrevIid = iid[0:16] host = inst['ssh']['host'] hostId = inst.get( 'bpsHostId' ) abbrevHost = host.split('.')[0] print( '%s, %d, %d, %d, %d, %d, %s, %s, %s, %.1f' % (inst['state'], inst['devId'], inst['totCredit'], inst.get('nTasksComputed',0), inst.get('nFailures',0), inst.get('nExceptions',0), cdtAbbrev, abbrevHost, abbrevIid, inst['uptime']/3600) ) print() sys.stdout.flush() if False: logger.info( 'checking dns for instances' ) nChecked = 0 badGais = [] for inst in checkedInstances: if inst['state'] == 'checked': nChecked += 1 iid = inst['_id'] abbrevIid = iid[0:16] #logger.info( 'checking dns for %s', abbrevIid ) host = inst['ssh']['host'] port = inst['ssh']['port'] try: info = socket.getaddrinfo( host, port ) except Exception as exc: logger.warning( 'gai failed for host "%s", port %d, %s', host, port, iid ) logger.warning( 'error (%d) %s', exc.errno, exc ) if exc.errno != socket.EAI_NONAME: logger.warning( '(unusual error)' ) badGais.append( (inst, exc )) logger.info( '%d bad gai out of %d checked', len(badGais), nChecked) # collect ram totals #ramByDevId = {} #for inst in startedInstances: # ramByDevId[ inst['device-id'] ] = inst['ram']['total'] / 1000000 lowRiders = checkedInstancesDf[(checkedInstancesDf.dpr<38) & (checkedInstancesDf.dpr>=24)] if 'totCredit' in lowRiders: print( 'silver (sub-38) total credit:', round(lowRiders.totCredit.sum())) if False: reportExceptionRecoveriesHackerly( db ) if False: instRecs = getInstallerRecs( db ) logger.info( 'found %d installation attempts', len(instRecs) ) installerDf = pd.DataFrame( instRecs.values() ) installerDf['devId'] = installerDf.instanceId.map( lambda x: instancesByIid[x]['device-id'] ) if False: # find instance events related to stoppage #for inst in startedInstances: for inst in checkedInstances: if inst['nTasksComputed'] <= 0: iid = inst['instanceId'] abbrevIid = iid[0:16] response = ncs.queryNcsSc( 'instances/%s' % iid, args.authToken ) if response['statusCode'] == 200: inst = response['content'] instEvents = inst['events'] for event in instEvents: if event['category'] == 'charger': logger.info( '%s %s', abbrevIid, event )
<filename>bridges/data_src_dependent/osm.py import math from bridges.graph_adj_list import * class OsmEdge: """ @brief Class that hold Open Street Map edges Class that holds Open Street Map edges from https://openstreetmap.org This object is generally not created by the user, to see how its created check out bridges::data_src_dependent::data_source::get_osm_data() @sa For an example, check out https://bridgesuncc.github.io/tutorials/Data_OSM.html @author <NAME>, <NAME>, <NAME>, <NAME> @date 2019, 12/28/20 """ @property def source(self) -> int: """ @brief Get source vertex (int) Returns: source vertex of edge """ return self._source @source.setter def source(self, source: int): """ @brief Set source vertex Args: source: source vertex to set """ try: value = int(source) except ValueError: raise ValueError("Source must be an int") self._source = value @source.deleter def source(self): """ @brief delete source vertex """ del self._source @property def destination(self) -> int: """ @brief Get destination vertex Returns: destination vertex of edge """ return self._destination @destination.setter def destination(self, destination: int): """ @brief Set destination vertex Args: destination: destination vertex to set (int) """ try: value = int(destination) except ValueError: raise ValueError("Destination must be an int") self._destination = value @destination.deleter def destination(self): """ @brief delete destination vertex """ del self._destination @property def distance(self) -> float: """ @brief Get distance between two vertices of the edge (float) Returns: edge length """ return self._distance @distance.setter def distance(self, distance: float): """ @brief Set distance between source and destination Args: distance: distance to set (float) """ try: value = float(distance) except ValueError: raise ValueError("Distance must be a float") self._distance = value @distance.deleter def distance(self): """ @brief Delete distance between edge vertices """ del self._distance def __init__(self, source: int, destination: int, distance: float): """ @brief Constructor Args: source: source vertex of edge destination: destination vertex of edge distance: distance between the source and destination vertices """ self._source = 0 self._destination = 0 self._distance = 0.0 self.source = source self.destination = destination self.distance = distance class OsmVertex: """ @brief Class that hold Open Street Map vertices Class that holds Open Street Map vertices from https://openstreetmap.org This object is generally not created by the user, to see how its created check out bridges::data_src_dependent::data_source::get_osm_data() @sa For an example, check out https://bridgesuncc.github.io/tutorials/Data_OSM.html @author <NAME>, <NAME>, <NAME>, <NAME> @date 2/14/19, 12/29/20 """ @property def latitude(self) -> float: """ @brief Get latitude of vertex position Returns: latitude of vertex """ return self._latitude @latitude.setter def latitude(self, latitude: float): """ @brief Set latitude of vertex position Args: latitude: vertex latitude to set (float) """ try: value = float(latitude) except ValueError: raise ValueError("latitude must be a float") self._latitude = value self._to_cartesian_coord() @latitude.deleter def latitude(self): """ @brief Delete latitude of vertex position """ del self._latitude @property def longitude(self) -> float: """ @brief Get longitude of vertex position Returns: longitude of vertex """ return self._longitude @longitude.setter def longitude(self, longitude: float): """ @brief Set longitude of vertex position Args: longitude: vertex latitude to set (float) """ try: value = float(longitude) except ValueError: raise ValueError("longitude must be a float") self._longitude = value self._to_cartesian_coord() ## # @brief Delete longitude of vertex position # @longitude.deleter def longitude(self): """ @brief Delete longitude of vertex position """ del self._longitude ## # @brief convert lat/long to Cartesian coordinates # def _to_cartesian_coord(self): earth_radius = 6378 lat_rad = self.latitude * math.pi/180 longit_rad = self.longitude * math.pi/180 self.cartesian_coord[0] = earth_radius * math.cos(lat_rad) * math.cos(longit_rad) self.cartesian_coord[1] = earth_radius * math.cos(lat_rad) * math.sin(longit_rad) def __init__(self, latitude: float, longitude: float): """OSM vertex, represents vertex using open street map data :param latitude: float, latitude of vertex :param longitude: float, longitude of vertex """ self._latitude = 0 self._longitude = 0 self.cartesian_coord = [0, 0] self.latitude = latitude self.longitude = longitude self._to_cartesian_coord() class OsmData: """ @brief Class that hold Open Street Map Data Class that holds Open Street Map data, from https://openstreetmap.org This object is generally not created by the user, to see how its created check out bridges::data_src_dependent::data_source::get_osm_data() @sa For an example, check out https://bridgesuncc.github.io/tutorials/Data_OSM.html @author <NAME>, <NAME>, <NAME>, <NAME> @date 2/16/19, 12/28/20, 1/6/21 """ from typing import List VertexList = List[OsmVertex] EdgeList = List[OsmEdge] @property def vertices(self) -> VertexList: """ @brief get vertices of the dataset (list) Returns: list of vertices """ return self._vertices @vertices.setter def vertices(self, vertices: VertexList): """ @brief Set vertices of the dataset (VertexList) Args: vertices: VertexList to set """ if not isinstance(vertices, list): raise ValueError("vertices must be a list of OsmVertex objects") lat_range = [math.inf, -math.inf] lon_range = [math.inf, -math.inf] cart_range_x = [math.inf, -math.inf] cart_range_y = [math.inf, -math.inf] # find the ranges of lat/lon and cartesian coordinates from new vertices for vertex in vertices: if not isinstance(vertex, OsmVertex): raise ValueError("vertices must be a list of OsmVertex objects") lat = vertex.latitude lon = vertex.longitude cart_x = vertex.cartesian_coord[0] cart_y = vertex.cartesian_coord[1] lat_range[0] = lat if lat_range[0] > lat else lat_range[0] lat_range[1] = lat if lat_range[1] < lat else lat_range[1] lon_range[0] = lon if lon_range[0] > lon else lon_range[0] lon_range[1] = lon if lon_range[1] < lon else lon_range[1] cart_range_x[0] = cart_x if cart_range_x[0] > cart_x else cart_range_x[0] cart_range_x[1] = cart_x if cart_range_x[1] < cart_x else cart_range_x[1] cart_range_y[0] = cart_y if cart_range_y[0] > cart_y else cart_range_y[0] cart_range_y[1] = cart_y if cart_range_y[1] < cart_y else cart_range_y[1] self.latitude_range = lat_range self.longitude_range = lon_range self.cartesian_range_x = cart_range_x self.cartesian_range_y = cart_range_y self._vertices = vertices @vertices.deleter def vertices(self): """ Delete vertices of the dataset """ del self._vertices @property def edges(self) -> EdgeList: """ @brief Get edges of the dataset (EdgeList) Returns: edges of the OSM data """ return self._edges ## # @brief Set edges of the dataset # @return edges (EdgeList) @edges.setter def edges(self, edges: EdgeList): """ @brief Set edges of the dataset Args: edges: edges to set """ if not isinstance(edges, list): raise ValueError("edges must be a list of OsmVertex objects") for edge in edges: if not isinstance(edge, OsmEdge): raise ValueError("edges must be a list of OsmEdge objects") self._edges = edges @edges.deleter def edges(self): """ @brief delete the edges in the dataset """ del self._edges def get_graph(self) -> GraphAdjList: """ @brief Construct a graph out of the vertex and edge data of the OSM object. The graph will associate the length of the edge to the graph edge. No data is bound to the vertices The vertices of the graph will be located at the location where given in the data set converted to cartesian coordinates Returns: adjacency list based graph """ ret_graph = GraphAdjList() for k, vertex in enumerate(self.vertices): ret_graph.add_vertex(k, data=vertex) ret_graph.get_vertex(k).visualizer.set_location(vertex.cartesian_coord[0], vertex.cartesian_coord[1]) ret_graph.get_vertex(k).visualizer.color = ("green") for k, edge in enumerate(self.edges): ret_graph.add_edge(edge.source, edge.destination, data=edge.distance) return ret_graph def __init__(self): """ @brief constructor """ self._vertices = [] self._edges = [] self.latitude_range = [] self.longitude_range = [] self.cartesian_range_x = [] self.cartesian_range_y = [] self.name = None
import contextlib import logging import threading import uuid from concurrent.futures.thread import ThreadPoolExecutor from io import BytesIO from keeper.storage.storage import Storage from keeper.storage.streams import WriteOnlyStream, ReadOnlyStream logger = logging.getLogger(__name__) class WriteCacheStorage(Storage): def __init__(self, storage: Storage): if storage.closed: raise ValueError(f"Underlying storage is {storage} is closed") self._storage = storage self._temp_buffer_lock = threading.RLock() self._temp_data = {} self._data_lock = threading.RLock() self._data = {} self._executor = ThreadPoolExecutor() def close(self): self._executor.shutdown() self._storage = None @property def closed(self): return self._storage is None @property def storage(self): if self.closed: raise ValueError(f"Operation on closed storage {self}") return self._storage def keys(self): """An iterator over all keys.""" with self._data_lock: pending_keys = set(self._data.keys()) yield from pending_keys for key in self.storage.keys(): if key not in pending_keys: yield key @contextlib.contextmanager def openin_meta(self, key): # Metadata is not cached with self._storage.openin_meta(key) as meta_file: yield meta_file @contextlib.contextmanager def openout_meta(self, key): # Metadata is not cached with self._storage.openout_meta(key) as meta_file: yield meta_file @contextlib.contextmanager def openout_data(self, key): """ Raises: KeyError: If a buffer with the requested key could not be found. """ logger.debug( "%s opening write-only data file for key %r", type(self).__name__, key, ) with BytesIO() as buffer: with WriteOnlyStream(buffer, name=key) as stream: yield stream with self._data_lock: self._data[key] = buffer.getvalue() self._executor.submit(self._write_buffer, key) def _write_buffer(self, key): with self._data_lock: buffer = self._data[key] with self.storage.openout_data(key) as out: out.write(buffer) with self._data_lock: del self._data[key] @contextlib.contextmanager def openin_data(self, key): try: with self._data_lock: data = self._data[key] with BytesIO(data) as buffer: with ReadOnlyStream(buffer, name=key) as stream: yield stream except KeyError: with self.storage.openin_data(key) as stream: yield stream @contextlib.contextmanager def openout_temp(self): handle = str(uuid.uuid4()) with BytesIO() as buffer: with WriteOnlyStream(buffer, name=handle) as stream: yield stream data = buffer.getvalue() with self._temp_buffer_lock: self._temp_data[handle] = data @contextlib.contextmanager def openin_temp(self, handle): with self._temp_buffer_lock: data = self._temp_data[handle] with BytesIO(data) as buffer: with ReadOnlyStream(buffer, name=handle) as stream: yield stream def promote_temp(self, name, key): self._executor.submit(self._promote_temp_buffer, name, key) def _promote_temp_buffer(self, name, key): with self._temp_buffer_lock: data = self._temp_data[name] with self._data_lock: self._data[key] = data with self.storage.openout_temp() as underlying_temp: underlying_temp.write(data) self.storage.promote_temp(underlying_temp.name, key) with self._temp_buffer_lock: del self._temp_data[name] with self._data_lock: del self._data[key] def remove_temp(self, name): with self._temp_buffer_lock: del self._temp_data[name] def discard(self, key): with self._data_lock: try: del self._data[key] except KeyError: pass self.storage.discard(key) def __repr__(self): return f"{type(self).__name__}(storage={self.storage})"
from PyQt5.QtWidgets import ( QApplication, QWidget, QHBoxLayout, QVBoxLayout, QDesktopWidget, QPushButton, QLabel, QTabWidget, QMenu, QAction, QTextEdit, QFileDialog, QListWidget, QListWidgetItem, QCheckBox) from PyQt5.QtGui import QPixmap, QCursor from PyQt5.QtCore import QSize, QThread, pyqtSignal, Qt, QPoint from PyWeChatSpy import WeChatSpy from lxml import etree import requests import sys from queue import Queue from time import sleep from threading import Thread import os import re FRIEND_LIST = [] GROUP_LIST = [] OFFICE_LIST = [] cb_contact_list = [] contact_need_details = [] current_row = 0 msg_queue = Queue() wxid_contact = {} contact_filter = ("qmessage", "qqmail", "tmessage", "medianote", "floatbottle", "fmessage") key = "18d421169d93611a5584affac335e690" if os.path.exists("key"): with open("key", "r") as rf: key = rf.read() def parser(data: dict): msg_queue.put(data) class MsgThread(QThread): signal = pyqtSignal(dict) def __init__(self): super().__init__() def run(self): while True: if not msg_queue.empty(): msg = msg_queue.get() self.signal.emit(msg) else: sleep(0.1) def download_image(url: str, output: str): resp = requests.get(url) if resp.status_code == 200: with open(output, "wb") as wf: wf.write(resp.content) return True return False class ContactWidget(QWidget): def __init__(self, contact: dict, select_changed: classmethod): super().__init__() layout = QHBoxLayout(self) checkbox_contact = QCheckBox() checkbox_contact.__setattr__("wxid", contact["wxid"]) checkbox_contact.setFixedSize(20, 20) checkbox_contact.stateChanged[int].connect(select_changed) cb_contact_list.append(checkbox_contact) layout.addWidget(checkbox_contact) label_profilephoto = QLabel(self) label_profilephoto.setFixedSize(32, 32) profilephoto_path = "profilephotos/default.jpg" if os.path.exists(f"profilephotos/{contact['wxid']}.jpg"): profilephoto_path = f"profilephotos/{contact['wxid']}.jpg" default_profilephoto = QPixmap(profilephoto_path).scaled(32, 32) label_profilephoto.setPixmap(default_profilephoto) layout.addWidget(label_profilephoto) label_nickname = QLabel(self) nickname = contact["nickname"] if remark := contact.get("remark"): nickname = f"{nickname}({remark})" if count := contact.get("member_count"): nickname = f"{nickname}[{count}]" label_nickname.setText(nickname) layout.addWidget(label_nickname) class ContactSearchWidget(QWidget): def __init__(self, contact: dict): super().__init__() layout = QHBoxLayout(self) label_profilephoto = QLabel(self) label_profilephoto.setFixedSize(32, 32) profilephoto_path = "profilephotos/default.jpg" if os.path.exists(f"profilephotos/{contact['wxid']}.jpg"): profilephoto_path = f"profilephotos/{contact['wxid']}.jpg" default_profilephoto = QPixmap(profilephoto_path).scaled(32, 32) label_profilephoto.setPixmap(default_profilephoto) layout.addWidget(label_profilephoto) label_nickname = QLabel(self) nickname = contact["nickname"] if remark := contact.get("remark"): nickname = f"{nickname}({remark})" if count := contact.get("member_count"): nickname = f"{nickname}[{count}]" label_nickname.setText(nickname) layout.addWidget(label_nickname) class MessageWidget(QWidget): def __init__(self, message: dict): super().__init__() layout_main = QHBoxLayout(self) layout_side = QVBoxLayout(self) label_content = QLabel(self) label_content.setWordWrap(True) label_content.adjustSize() label_content.setFixedWidth(300) label_speaker = QLabel(self) if message["self"]: layout_main.setAlignment(Qt.AlignRight) label_content.setAlignment(Qt.AlignRight) label_speaker.setAlignment(Qt.AlignRight) else: layout_main.setAlignment(Qt.AlignLeft) label_content.setAlignment(Qt.AlignLeft) label_speaker.setAlignment(Qt.AlignLeft) label_profilephoto = QLabel(self) label_profilephoto.setFixedSize(32, 32) profilephoto_path = "profilephotos/default.jpg" if os.path.exists(f"profilephotos/{message['wxid1']}.jpg"): profilephoto_path = f"profilephotos/{message['wxid1']}.jpg" default_profilephoto = QPixmap(profilephoto_path).scaled(32, 32) label_profilephoto.setPixmap(default_profilephoto) speaker = "" wxid1 = message["wxid1"] if contact := wxid_contact.get(wxid1): speaker = contact["nickname"] if remark := contact.get("remark"): speaker = f"{speaker}({remark})" label_speaker.setText(speaker) layout_side.addWidget(label_speaker) if message["msg_type"] == 1: label_content.setText(message["content"]) elif message["msg_type"] == 3: label_content.setText("图片消息,请在手机上查看") elif message["msg_type"] == 43: if message.get("content"): label_content.setText("不支持的消息类型,请在手机上查看") else: label_content.setText("视频消息,请在手机上查看") elif message["msg_type"] == 47: label_content.setText("表情包消息,请在手机上查看") elif message["msg_type"] == 49: label_content.setText("小程序或其他分享消息,请在手机上查看") else: label_content.setText("不支持的消息类型,请在手机上查看") layout_side.addWidget(label_content) if message["self"]: layout_main.addLayout(layout_side) layout_main.addWidget(label_profilephoto) else: layout_main.addWidget(label_profilephoto) layout_main.addLayout(layout_side) class SettingWidget(QWidget): def __init__(self, parent): super().__init__() self.setWindowTitle("设置") self.parent = parent self.tab_widget = QTabWidget(self) self.tab_widget.setTabPosition(QTabWidget.West) self.tab_widget.setFixedSize(300, 200) self.tab_common = QListWidget(self) self.tab_widget.addTab(self.tab_common, "通用") item = QListWidgetItem() item.setSizeHint(QSize(200, 50)) self.cb_auto_accept = QCheckBox("自动通过好友请求") self.tab_common.addItem(item) self.tab_common.setItemWidget(item, self.cb_auto_accept) class SendTextEdit(QTextEdit): def __init__(self, parent): super().__init__() self.parent = parent def keyPressEvent(self, event): QTextEdit.keyPressEvent(self, event) if event.key() == Qt.Key_Return: if QApplication.keyboardModifiers() == Qt.ControlModifier: self.append("") else: self.parent.send_msg() class SpyUI(QWidget): def __init__(self): super().__init__() self.layout_main = QHBoxLayout(self) self.setting_widget = SettingWidget(self) self.wxid = "" self.init_ui() def init_ui(self): fg = self.frameGeometry() center = QDesktopWidget().availableGeometry().center() fg.moveCenter(center) # 设置窗体 self.setting_widget.resize(300, 200) self.setting_widget.move(fg.topLeft()) self.setting_widget.hide() # 主窗体 self.resize(858, 608) self.move(fg.topLeft()) self.setWindowTitle("PyWeChatSpyUI Beta 1.3.3") self.show() if __name__ == '__main__': app = QApplication(sys.argv) spy = SpyUI() sys.exit(app.exec_())
<filename>dodo.py #!/usr/bin/env python3 import os import re import pwd from doit import get_var from ruamel import yaml from api.config import _update_config, CONFIG_YML, DOT_CONFIG_YML from utils.format import fmt, pfmt from utils.timestamp import utcnow, datetime2int DIR = os.path.dirname(os.path.abspath(__file__)) UID = os.getuid() GID = pwd.getpwuid(UID).pw_gid USER = pwd.getpwuid(UID).pw_name ENV=dict(AC_UID=UID, AC_GID=GID, AC_USER=USER) LOGDIR='oldlogs' MINIMUM_DOCKER_COMPOSE_VERSION = '1.6' LOG_LEVELS = [ 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL', ] DOIT_CONFIG = { 'default_tasks': ['pull', 'deploy', 'rmimages', 'rmvolumes', 'count'], 'verbosity': 2, } ENVS = ' '.join([ 'PYTHONPATH=.:api:$PYTHONPATH', ]) class UnknownPkgmgrError(Exception): def __init__(self): super(UnknownPkgmgrError, self).__init__('unknown pkgmgr!') def get_user_uid_gid(): return [ fmt('AC_USER={USER}'), fmt('AC_UID={UID}'), fmt('AC_GID={GID}'), ] def get_env_vars(regex=None): return [key+'='+value for key, value in os.environ.items() if regex == None or regex.search(key)] def check_hash(program): from subprocess import check_call, CalledProcessError, PIPE try: check_call(fmt('hash {program}'), shell=True, stdout=PIPE, stderr=PIPE) return True except CalledProcessError: return False def get_pkgmgr(): if check_hash('dpkg'): return 'deb' elif check_hash('rpm'): return 'rpm' elif check_hash('brew'): return 'brew' raise UnknownPkgmgrError def task_count(): ''' use the cloc utility to count lines of code ''' excludes = [ 'dist', 'venv', '__pycache__', 'auto_cert_cli.egg-info', ] excludes = '--exclude-dir=' + ','.join(excludes) scandir = os.path.dirname(__file__) return { 'actions': [ fmt('cloc {excludes} {scandir}'), ], 'uptodate': [ lambda: not check_hash('cloc'), ], } def task_checkpath(): ''' check for required path /data/autocert/certs ''' return { 'actions': [ 'test -d /data/autocert/certs', ], } def task_checkreqs(): ''' check for required software ''' DEBS = [ 'docker-ce', ] RPMS = [ 'docker-ce', ] return { 'deb': { 'actions': ['dpkg -s ' + deb for deb in DEBS], }, 'rpm': { 'actions': ['rpm -q ' + rpm for rpm in RPMS], }, 'brew': { 'actions': ['true'], } }[get_pkgmgr()] def task_dockercompose(): ''' assert docker-compose version ({0}) or higher ''' from utils.function import format_docstr format_docstr(task_dockercompose, MINIMUM_DOCKER_COMPOSE_VERSION) def check_docker_compose(): import re from subprocess import check_output from packaging.version import parse as version_parse pattern = '(docker-compose version) ([0-9.]+(-rc[0-9])?)(, build [a-z0-9]+)' output = check_output('docker-compose --version', shell=True).decode('utf-8').strip() regex = re.compile(pattern) match = regex.search(output) version = match.groups()[1] assert version_parse(version) >= version_parse(MINIMUM_DOCKER_COMPOSE_VERSION) return { 'actions': [ check_docker_compose, ], } def task_noroot(): ''' make sure script isn't run as root ''' then = 'echo " DO NOT RUN AS ROOT!"; echo; exit 1' bash = 'if [[ $(id -u) -eq 0 ]]; then {0}; fi'.format(then) return { 'actions': [ 'bash -c \'{0}\''.format(bash), ], } def task_pull(): ''' do a safe git pull ''' test = '`git diff-index --quiet HEAD --`' pull = 'git pull --rebase' dirty = fmt('echo "refusing to \'{pull}\' because the tree is dirty"') return { 'actions': [ fmt('if {test}; then {pull}; else {dirty}; exit 1; fi'), ], } def task_test(): ''' setup venv and run pytest ''' return { 'task_dep': [ 'noroot', 'config', ], 'actions': [ 'virtualenv --python=$(which python3) venv', 'venv/bin/pip3 install --upgrade pip', 'venv/bin/pip install -r api/requirements.txt', 'venv/bin/pip install -r tests/requirements.txt', fmt('{ENVS} venv/bin/pytest -s -vv tests/'), ], } def task_version(): ''' write git describe to VERSION file ''' return { 'actions': [ 'git describe --abbrev=7 | xargs echo -n > api/VERSION', ], } def task_savelogs(): ''' save the logs to a timestamped file ''' timestamp = datetime2int(utcnow()) return { 'task_dep': [ 'checkreqs', 'dockercompose' ], 'actions': [ fmt('mkdir -p {LOGDIR}'), fmt('docker-compose logs > {LOGDIR}/{timestamp}.log'), ] } def task_environment(): ''' set the env vars to be used inside of the container ''' def add_env_vars(): print('docker-compose.yml.wo-envs -> docker-compose.yml') print('adding env vars to docker-compose.yml file') dcy = yaml.safe_load(open('docker-compose.yml.wo-envs')) for svc in dcy['services'].keys(): envs = dcy['services'][svc].get('environment', []) envs += get_user_uid_gid() envs += get_env_vars(re.compile('(no|http|https)_proxy', re.IGNORECASE)) pfmt('{svc}:') for env in envs: pfmt(' - {env}') dcy['services'][svc]['environment'] = envs with open('docker-compose.yml', 'w') as f: yaml.dump(dcy, f, default_flow_style=False) return { 'task_dep': [ ], 'actions': [ add_env_vars, ] } def task_deploy(): ''' deloy flask app via docker-compose ''' return { 'task_dep': [ 'noroot', 'checkpath', 'checkreqs', 'version', 'test', 'config', 'dockercompose', 'environment', 'savelogs', ], 'actions': [ 'docker-compose build', fmt('docker-compose up --remove-orphans -d'), ], } def task_rmimages(): ''' remove dangling docker images ''' query = '`docker images -q -f dangling=true`' return { 'actions': [ fmt('docker rmi {query}'), ], 'uptodate': [ fmt('[ -z "{query}" ] && exit 0 || exit 1'), ], } def task_rmvolumes(): ''' remove dangling docker volumes ''' query = '`docker volume ls -q -f dangling=true`' return { 'actions': [ fmt('docker volume rm {query}'), ], 'uptodate': [ fmt('[ -z "{query}" ] && exit 0 || exit 1'), ], } def task_logs(): ''' simple wrapper that calls 'docker-compose logs' ''' return { 'actions': [ 'docker-compose logs', ], } def task_config(): ''' write config.yml -> .config.yml ''' log_level = 'WARNING' filename = '{0}/LOG_LEVEL'.format(os.path.dirname(__file__)) if os.path.isfile(filename): log_level = open(filename).read().strip() log_level = get_var('LOG_LEVEL', log_level) if log_level not in LOG_LEVELS: raise UnknownLogLevelError(log_level) punch = fmt(''' logging: loggers: api: level: {log_level} handlers: console: level: {log_level} ''') return { 'actions': [ fmt('echo "cp {CONFIG_YML}\n-> {DOT_CONFIG_YML}"'), fmt('echo "setting LOG_LEVEL={log_level}"'), fmt('cp {CONFIG_YML} {DOT_CONFIG_YML}'), lambda: _update_config(DOT_CONFIG_YML, yaml.safe_load(punch)), ] } def task_example(): ''' cp|strip config.yml -> config.yml.example ''' apikey = '82_CHAR_APIKEY' punch = fmt(''' authorities: digicert: apikey: {apikey} destinations: zeus: apikey: {apikey} ''') return { 'actions': [ fmt('cp {CONFIG_YML}.example {CONFIG_YML}.bak'), fmt('cp {CONFIG_YML} {CONFIG_YML}.example'), lambda: _update_config(CONFIG_YML+'.example', yaml.safe_load(punch)), ], } def task_tidy(): ''' delete cached files ''' TIDY_FILES = [ '.doit.db/', 'venv/', 'api/VERSION', ] return { 'actions': [ 'rm -rf ' + ' '.join(TIDY_FILES), 'find . | grep -E "(__pycache__|\.pyc$)" | xargs rm -rf', ], } def task_nuke(): ''' git clean and reset ''' return { 'task_dep': ['tidy'], 'actions': [ 'docker-compose kill', 'docker-compose rm -f', 'git clean -fd', 'git reset --hard HEAD', ], } def task_setup(): ''' setup venv ''' from utils.version import get_version return { 'actions': [ 'rm -rf auto_cert_cli.egg-info/ venv/ dist/ __pycache__/', 'virtualenv --python=python3 venv', 'venv/bin/pip3 install --upgrade pip', 'venv/bin/pip3 install -r cli/requirements.txt', 'venv/bin/python3 ./setup.py install', 'unzip -l venv/lib/python3.5/site-packages/auto_cert_cli-{0}-py3.5.egg'.format(get_version()), ], } def task_prune(): ''' prune stopped containers ''' return { 'actions': ['docker rm `docker ps -q -f "status=exited"`'], 'uptodate': ['[ -n "`docker ps -q -f status=exited`" ] && exit 1 || exit 0'] } def task_zeus(): ''' launch zeus containers ''' image = 'zeus17.3' for container in [ fmt('{image}_test{num}') for num in (1, 2)]: yield { 'task_dep': ['prune'], 'name': container, 'actions': [fmt('docker run -d --name {container} {image}')], 'uptodate': [fmt('[ -n "`docker ps -q -f name={container}`" ] && exit 0 || exit 1')] } if __name__ == '__main__': print('should be run with doit installed') import doit doit.run(globals())
# -*- coding: utf-8 -*- import unittest from mock import Mock import cloud4rpi from cloud4rpi.errors import InvalidConfigError from cloud4rpi.errors import UnexpectedVariableTypeError from cloud4rpi.errors import UnexpectedVariableValueTypeError class ApiClientMock(object): def __init__(self): def noop_on_command(cmd): pass self.publish_config = Mock() self.publish_data = Mock() self.publish_diag = Mock() self.on_command = noop_on_command def assert_publish_data_called_with(self, expected): return self.publish_data.assert_called_with(expected, data_type='cr') def raise_on_command(self, cmd): self.on_command(cmd) class MockSensor(object): def __init__(self, value=42): self.read = Mock(return_value=value) self.__innerValue__ = value def get_state(self): return self.__innerValue__ def get_updated_state(self, value): self.__innerValue__ = value return self.__innerValue__ def get_incremented_state(self, value): return self.__innerValue__ + value class TestDevice(unittest.TestCase): def testDeclareVariables(self): api = ApiClientMock() device = cloud4rpi.Device(api) device.declare({ 'CPUTemp': { 'type': 'numeric', 'bind': MockSensor() } }) cfg = device.read_config() self.assertEqual(cfg, [{'name': 'CPUTemp', 'type': 'numeric'}]) def testDeclareVariablesValidation(self): api = ApiClientMock() device = cloud4rpi.Device(api) with self.assertRaises(UnexpectedVariableTypeError): device.declare({ 'CPUTemp': { 'type': 'number', 'bind': MockSensor() } }) def testDeclareDiag(self): api = ApiClientMock() device = cloud4rpi.Device(api) device.declare_diag({ 'IPAddress': '8.8.8.8', 'Host': 'hostname', }) diag = device.read_diag() self.assertEqual(diag, {'IPAddress': '8.8.8.8', 'Host': 'hostname'}) def testReadConfig(self): api = ApiClientMock() device = cloud4rpi.Device(api) device.declare({ 'SomeVar': { 'type': 'string' } }) device.declare({ 'CPUTemp': { 'type': 'numeric', 'bind': MockSensor() } }) cfg = device.read_config() self.assertEqual(cfg, [{'name': 'CPUTemp', 'type': 'numeric'}]) def testReadConfigIfNotDeclared(self): api = ApiClientMock() device = cloud4rpi.Device(api) self.assertEqual(device.read_config(), []) def testReadVariables(self): handler = {} temperature_sensor = MockSensor(73) api = ApiClientMock() device = cloud4rpi.Device(api) device.declare({ 'LEDOn': { 'type': 'bool', 'value': False, 'bind': handler }, 'Temperature': { 'type': 'numeric', 'value': True, 'bind': temperature_sensor } }) data = device.read_data() self.assertEqual(data, { 'LEDOn': False, 'Temperature': 73 }) def testReadVariablesDoesNotContainsEmptyVars(self): api = ApiClientMock() device = cloud4rpi.Device(api) self.assertEqual(device.read_data(), {}) def testReadVariablesFromClassMethod(self): api = ApiClientMock() device = cloud4rpi.Device(api) sensor = MockSensor(10) device.declare({ 'MyParam': { 'type': 'numeric', 'bind': sensor.get_state }, }) data = device.read_data() self.assertEqual(data, { 'MyParam': 10, }) def testReadVariablesFromClassMethodWithCurrent(self): api = ApiClientMock() device = cloud4rpi.Device(api) sensor = MockSensor(10) device.declare({ 'MyParam': { 'type': 'numeric', 'value': 1, 'bind': sensor.get_incremented_state }, }) data = device.read_data() self.assertEqual(data, { 'MyParam': 11, }) def testReadDiag(self): temperature_sensor = MockSensor(73) api = ApiClientMock() device = cloud4rpi.Device(api) device.declare_diag({ 'CPUTemperature': temperature_sensor, 'IPAddress': lambda x: '8.8.8.8', 'OSName': lambda x: 'Linux', 'Host': 'weather_station' }) diag = device.read_diag() self.assertEqual(diag, { 'CPUTemperature': 73, 'IPAddress': '8.8.8.8', 'OSName': 'Linux', 'Host': 'weather_station' }) def testPublishConfig(self): api = ApiClientMock() device = cloud4rpi.Device(api) cfg = [ {'name': 'CPUTemp', 'type': 'numeric'}, {'name': 'Cooler', 'type': 'bool'} ] device.publish_config(cfg) api.publish_config.assert_called_with(cfg) def testReadBeforePublishConfig(self): api = ApiClientMock() device = cloud4rpi.Device(api) device.declare({ 'CPUTemp': { 'type': 'numeric', 'bind': MockSensor() } }) device.publish_config() cfg = [{'name': 'CPUTemp', 'type': 'numeric'}] api.publish_config.assert_called_with(cfg) def testPublishConfigFail_NotAnArray(self): api = ApiClientMock() device = cloud4rpi.Device(api) cfg = {'name': 'CPUTemp', 'type': 'numeric'} with self.assertRaises(InvalidConfigError): device.publish_config(cfg) api.publish_config.assert_not_called() def testPublishConfigFail_UnexpectedVariableType(self): api = ApiClientMock() device = cloud4rpi.Device(api) cfg = [{'name': 'CPUTemp', 'type': 'number'}] with self.assertRaises(UnexpectedVariableTypeError): device.publish_config(cfg) api.publish_config.assert_not_called() def testPublishDiag(self): api = ApiClientMock() device = cloud4rpi.Device(api) diag = { 'IPAddress': '8.8.8.8', 'Host': 'hostname' } device.publish_diag(diag) api.publish_diag.assert_called_with(diag) def testReadBeforePublishDiag(self): temperature_sensor = MockSensor(24) api = ApiClientMock() device = cloud4rpi.Device(api) device.declare_diag({ 'CPUTemperature': temperature_sensor, 'IPAddress': lambda x: '8.8.8.8', }) device.publish_diag() diag = {'IPAddress': '8.8.8.8', 'CPUTemperature': 24} api.publish_diag.assert_called_with(diag) def testPublishVariablesOnlyData(self): api = ApiClientMock() device = cloud4rpi.Device(api) device.declare({ 'Temperature': { 'type': 'numeric' }, 'Cooler': { 'type': 'bool', } }) data = { 'Temperature': 36.6, 'Cooler': True, 'TheAnswer': 42 } device.publish_data(data) api.publish_data.assert_called_with({ 'Temperature': 36.6, 'Cooler': True }) def testPublishNotDeclaredVariables(self): api = ApiClientMock() device = cloud4rpi.Device(api) data = { 'Temperature': 36.6, 'Cooler': True, 'TheAnswer': 42 } device.publish_data(data) api.publish_data.assert_called_with({}) def testReadBeforePublishData(self): temperature_sensor = MockSensor(24) api = ApiClientMock() device = cloud4rpi.Device(api) device.declare({ 'Temperature': { 'type': 'numeric', 'value': True, 'bind': temperature_sensor } }) device.publish_data() data = {'Temperature': 24} api.publish_data.assert_called_with(data) def testDataReadValidation_Bool(self): api = ApiClientMock() device = cloud4rpi.Device(api) device.declare({ 'CoolerOn': { 'type': 'bool', 'value': True, 'bind': lambda x: 100 } }) device.publish_data() data = {'CoolerOn': True} api.publish_data.assert_called_with(data) def testDataReadValidation_Numeric(self): api = ApiClientMock() device = cloud4rpi.Device(api) device.declare({ 'ReadyState': { 'type': 'numeric', 'value': True, 'bind': lambda x: True } }) device.publish_data() data = {'ReadyState': 1} api.publish_data.assert_called_with(data) def testDataReadValidation_String(self): api = ApiClientMock() device = cloud4rpi.Device(api) device.declare({ 'ReadyState': { 'type': 'string', 'value': True, 'bind': lambda x: True } }) device.publish_data() data = {'ReadyState': 'true'} api.publish_data.assert_called_with(data) def testDataReadValidation_Location(self): api = ApiClientMock() device = cloud4rpi.Device(api) device.declare({ 'MyLocation': { 'type': 'location', 'value': True, 'bind': lambda x: {'lat': 37.89, 'lng': 75.43} } }) device.publish_data() data = {'MyLocation': {'lat': 37.89, 'lng': 75.43}} api.publish_data.assert_called_with(data) class CommandHandling(unittest.TestCase): def setUp(self): super(CommandHandling, self).setUp() self.api = ApiClientMock() self.device = cloud4rpi.Device(self.api) def testCallsBoundFunction(self): handler = Mock(return_value=True) self.device.declare({ 'LEDOn': { 'type': 'bool', 'value': False, 'bind': handler } }) self.api.raise_on_command({'LEDOn': True}) handler.assert_called_with(True) def testCallsBoundFunctionWithAnArgument(self): sensor = MockSensor(0) self.device.declare({ 'Status': { 'type': 'numeric', 'value': 10, 'bind': sensor.get_updated_state } }) self.api.raise_on_command({'Status': 20}) self.api.assert_publish_data_called_with({'Status': 20}) def testBindIsNotCallableFunction(self): self.device.declare({ 'LEDOn': { 'type': 'bool', 'value': False, 'bind': 'this is not a function' } }) expected = {'LEDOn': True} self.api.raise_on_command(expected) self.api.assert_publish_data_called_with(expected) data = self.device.read_data() self.assertEqual(data, expected) def testDirectUpdateVariableValue(self): self.device.declare({ 'LEDOn': { 'type': 'bool', 'value': False, } }) expected = {'LEDOn': True} self.api.raise_on_command(expected) self.api.assert_publish_data_called_with(expected) data = self.device.read_data() self.assertEqual(data, expected) def testSkipUnknownVariable(self): self.device.declare({ 'LEDOn': { 'type': 'bool', 'value': False, 'bind': lambda x: x } }) self.api.raise_on_command({'Other': True}) self.api.publish_data.assert_not_called() def testAllowPublishNullValue(self): self.device.declare({ 'LEDOn': { 'type': 'bool', 'value': False, 'bind': lambda x: None } }) self.api.raise_on_command({'LEDOn': True}) self.api.assert_publish_data_called_with({'LEDOn': None}) def testValidateCommandValueForBool(self): self.device.declare({ 'LEDOn': { 'type': 'bool', 'value': False, 'bind': lambda x: x } }) with self.assertRaises(UnexpectedVariableValueTypeError): self.api.raise_on_command({'LEDOn': 'false'}) def testValidateCommandValueStringToNumeric(self): self.device.declare({ 'Status': { 'type': 'numeric', 'value': 0, 'bind': lambda x: x } }) self.api.raise_on_command({'Status': '100'}) self.api.assert_publish_data_called_with({'Status': 100}) def testValidateCommandValueUnicodeToNumeric(self): self.device.declare({ 'Status': { 'type': 'numeric', 'value': 0, 'bind': lambda x: x } }) unicode_val = u'38.5' self.api.raise_on_command({'Status': unicode_val}) self.api.assert_publish_data_called_with({'Status': 38.5}) def testValidateCommandValueBoolToNumeric(self): self.device.declare({ 'Status': { 'type': 'numeric', 'value': 0, 'bind': lambda x: x } }) self.api.raise_on_command({'Status': True}) self.api.assert_publish_data_called_with({'Status': 1}) def testValidateCommandValueUnicodeToString(self): self.device.declare({ 'Percent': { 'type': 'string', 'value': 0, 'bind': lambda x: x } }) unicode_val = u'38.5%' self.api.raise_on_command({'Percent': unicode_val}) self.api.assert_publish_data_called_with({'Percent': '38.5%'}) def testPublishBackUpdatedVariableValues(self): sensor = MockSensor(36.6) self.device.declare({ 'LEDOn': { 'type': 'bool', 'value': False, 'bind': lambda x: x }, 'Cooler': { 'type': 'bool', 'value': True, 'bind': lambda x: x }, 'Status': { 'type': 'numeric', 'value': 0, 'bind': lambda x: 42 }, 'Temp': { 'type': 'numeric', 'value': 24.4, 'bind': sensor } }) self.api.raise_on_command({'LEDOn': True, 'Cooler': False, 'Status': 2, 'Temp': 36.6}) expected = { 'Cooler': False, 'Status': 42, 'LEDOn': True, 'Temp': 36.6 } self.api.assert_publish_data_called_with(expected) def testPublishBackOnlyCommandVariables(self): self.device.declare({ 'Actuator': { 'type': 'string', 'value': 'to be updated and published', 'bind': lambda x: x }, 'Sensor': { 'type': 'string', 'value': None, 'bind': 'do not updated by a command' }, }) self.api.raise_on_command({'Actuator': 'ON'}) self.api.assert_publish_data_called_with({'Actuator': 'ON'}) class PayloadValidation(unittest.TestCase): def setUp(self): super(PayloadValidation, self).setUp() self.api = ApiClientMock() self.device = cloud4rpi.Device(self.api) def testNumeric(self): self.device.declare({'Temp': {'type': 'numeric'}}) self.device.publish_data({'Temp': 36.3}) self.api.publish_data.assert_called_with({'Temp': 36.3}) def testNumericAsNull(self): self.device.declare({'Temp': {'type': 'numeric'}}) self.device.publish_data({'Temp': None}) self.api.publish_data.assert_called_with({'Temp': None}) def testNumericAsInt(self): self.device.declare({'Temp': {'type': 'numeric'}}) self.device.publish_data({'Temp': 36}) self.api.publish_data.assert_called_with({'Temp': 36}) def testNumericAsFloat(self): self.device.declare({'Temp': {'type': 'numeric'}}) self.device.publish_data({'Temp': 36.6}) self.api.publish_data.assert_called_with({'Temp': 36.6}) def testNumericAsString(self): self.device.declare({'Temp': {'type': 'numeric'}}) self.device.publish_data({'Temp': "36.6"}) self.api.publish_data.assert_called_with({'Temp': 36.6}) def testNumericAsBool(self): self.device.declare({'Temp': {'type': 'numeric'}}) self.device.publish_data({'Temp': True}) self.api.publish_data.assert_called_with({'Temp': 1.0}) def testNumericAsNaN(self): self.device.declare({'Temp': {'type': 'numeric'}}) self.device.publish_data({'Temp': float('NaN')}) self.api.publish_data.assert_called_with({'Temp': None}) def testNumericAsPositiveInfinity(self): self.device.declare({'Temp': {'type': 'numeric'}}) self.device.publish_data({'Temp': float('Inf')}) self.api.publish_data.assert_called_with({'Temp': None}) def testNumericAsNegativeInfinity(self): self.device.declare({'Temp': {'type': 'numeric'}}) self.device.publish_data({'Temp': -float('Inf')}) self.api.publish_data.assert_called_with({'Temp': None}) def testBool(self): self.device.declare({'PowerOn': {'type': 'bool'}}) self.device.publish_data({'PowerOn': True}) self.api.publish_data.assert_called_with({'PowerOn': True}) def testBoolAsNull(self): self.device.declare({'PowerOn': {'type': 'bool'}}) self.device.publish_data({'PowerOn': None}) self.api.publish_data.assert_called_with({'PowerOn': None}) def testBoolAsString(self): self.device.declare({'PowerOn': {'type': 'bool'}}) with self.assertRaises(UnexpectedVariableValueTypeError): self.device.publish_data({'PowerOn': "True"}) def testBoolAsPositiveNumber(self): self.device.declare({'PowerOn': {'type': 'bool'}}) self.device.publish_data({'PowerOn': 24.1}) self.api.publish_data.assert_called_with({'PowerOn': True}) def testBoolAsNegativeNumber(self): self.device.declare({'PowerOn': {'type': 'bool'}}) self.device.publish_data({'PowerOn': -10.1}) self.api.publish_data.assert_called_with({'PowerOn': True}) def testBoolAsZeroNumber(self): self.device.declare({'PowerOn': {'type': 'bool'}}) self.device.publish_data({'PowerOn': 0}) self.api.publish_data.assert_called_with({'PowerOn': False}) def testBoolAsNaN(self): self.device.declare({'PowerOn': {'type': 'bool'}}) self.device.publish_data({'PowerOn': float('NaN')}) self.api.publish_data.assert_called_with({'PowerOn': True}) def testBoolAsPositiveInfinity(self): self.device.declare({'PowerOn': {'type': 'bool'}}) self.device.publish_data({'PowerOn': float('Inf')}) self.api.publish_data.assert_called_with({'PowerOn': True}) def testBoolAsNegativeInfinity(self): self.device.declare({'PowerOn': {'type': 'bool'}}) self.device.publish_data({'PowerOn': -float('Inf')}) self.api.publish_data.assert_called_with({'PowerOn': True}) def testString(self): self.device.declare({'Status': {'type': 'string'}}) self.device.publish_data({'Status': '100'}) self.api.publish_data.assert_called_with({'Status': '100'}) def testStringAsNull(self): self.device.declare({'Status': {'type': 'string'}}) self.device.publish_data({'Status': None}) self.api.publish_data.assert_called_with({'Status': None}) def testStringAsNumeric(self): self.device.declare({'Status': {'type': 'string'}}) self.device.publish_data({'Status': 100.100}) self.api.publish_data.assert_called_with({'Status': '100.1'}) def testStringAsNaN(self): self.device.declare({'Status': {'type': 'string'}}) self.device.publish_data({'Status': float('NaN')}) self.api.publish_data.assert_called_with({'Status': 'nan'}) def testStringAsPositiveInfinity(self): self.device.declare({'Status': {'type': 'string'}}) self.device.publish_data({'Status': float('Inf')}) self.api.publish_data.assert_called_with({'Status': 'inf'}) def testStringAsNegativeInfinity(self): self.device.declare({'Status': {'type': 'string'}}) self.device.publish_data({'Status': -float('Inf')}) self.api.publish_data.assert_called_with({'Status': '-inf'}) def testStringAsInt(self): self.device.declare({'Status': {'type': 'string'}}) self.device.publish_data({'Status': 100}) self.api.publish_data.assert_called_with({'Status': '100'}) def testStringAsBool(self): self.device.declare({'Status': {'type': 'string'}}) self.device.publish_data({'Status': True}) self.api.publish_data.assert_called_with({'Status': 'true'}) def testLocation(self): location = {'lat': 37.89, 'lng': 75.43} self.device.declare({'Pos': {'type': 'location'}}) self.device.publish_data({'Pos': location}) self.api.publish_data.assert_called_with({'Pos': location}) def testLocation_Filtering(self): obj = {'some': 'foo', 'lng': 75.43, 'lat': 37.89, 'other': 42} self.device.declare({'Pos': {'type': 'location'}}) self.device.publish_data({'Pos': obj}) location = {'lat': 37.89, 'lng': 75.43} self.api.publish_data.assert_called_with({'Pos': location}) def testLocationAsNull(self): self.device.declare({'Pos': {'type': 'location'}}) self.device.publish_data({'Pos': None}) self.api.publish_data.assert_called_with({'Pos': None}) def testLocationAsNaN(self): self.device.declare({'Pos': {'type': 'location'}}) with self.assertRaises(UnexpectedVariableValueTypeError): self.device.publish_data({'Pos': float('NaN')}) def testLocationAsInfinity(self): self.device.declare({'Pos': {'type': 'location'}}) with self.assertRaises(UnexpectedVariableValueTypeError): self.device.publish_data({'Pos': float('Inf')}) def testLocationAsEmptyObject(self): self.device.declare({'Pos': {'type': 'location'}}) with self.assertRaises(UnexpectedVariableValueTypeError): self.device.publish_data({'Pos': {}}) def testLocationWithIncorrectFields(self): location = {'Latitude': 37.89, 'LNG': 75.43} self.device.declare({'Pos': {'type': 'location'}}) with self.assertRaises(UnexpectedVariableValueTypeError): self.device.publish_data({'Pos': location}) def testLocationWithoutLatitude(self): location = {'lng': 75.43} self.device.declare({'Pos': {'type': 'location'}}) with self.assertRaises(UnexpectedVariableValueTypeError): self.device.publish_data({'Pos': location}) def testLocationWithoutLongitude(self): location = {'lat': 37.89} self.device.declare({'Pos': {'type': 'location'}}) with self.assertRaises(UnexpectedVariableValueTypeError): self.device.publish_data({'Pos': location})
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from openstack.tests.unit import base from openstack.tests.unit import test_resource from openstack import exceptions from openstack.orchestration.v1 import stack from openstack import resource FAKE_ID = '<KEY>' FAKE_NAME = 'test_stack' FAKE = { 'capabilities': '1', 'creation_time': '2015-03-09T12:15:57.233772', 'deletion_time': '2015-03-09T12:15:57.233772', 'description': '3', 'disable_rollback': True, 'environment': {'var1': 'val1'}, 'environment_files': [], 'files': {'file1': 'content'}, 'files_container': 'dummy_container', 'id': FAKE_ID, 'links': [{ 'href': 'stacks/%s/%s' % (FAKE_NAME, FAKE_ID), 'rel': 'self'}], 'notification_topics': '7', 'outputs': '8', 'parameters': {'OS::stack_id': '9'}, 'name': FAKE_NAME, 'status': '11', 'status_reason': '12', 'tags': ['FOO', 'bar:1'], 'template_description': '13', 'template_url': 'http://www.example.com/wordpress.yaml', 'timeout_mins': '14', 'updated_time': '2015-03-09T12:30:00.000000', } FAKE_CREATE_RESPONSE = { 'stack': { 'id': FAKE_ID, 'links': [{ 'href': 'stacks/%s/%s' % (FAKE_NAME, FAKE_ID), 'rel': 'self'}]} } FAKE_UPDATE_PREVIEW_RESPONSE = { 'unchanged': [ { 'updated_time': 'datetime', 'resource_name': '', 'physical_resource_id': '{resource id or ''}', 'resource_action': 'CREATE', 'resource_status': 'COMPLETE', 'resource_status_reason': '', 'resource_type': 'restype', 'stack_identity': '{stack_id}', 'stack_name': '{stack_name}' } ], 'updated': [ { 'updated_time': 'datetime', 'resource_name': '', 'physical_resource_id': '{resource id or ''}', 'resource_action': 'CREATE', 'resource_status': 'COMPLETE', 'resource_status_reason': '', 'resource_type': 'restype', 'stack_identity': '{stack_id}', 'stack_name': '{stack_name}' } ], 'replaced': [ { 'updated_time': 'datetime', 'resource_name': '', 'physical_resource_id': '{resource id or ''}', 'resource_action': 'CREATE', 'resource_status': 'COMPLETE', 'resource_status_reason': '', 'resource_type': 'restype', 'stack_identity': '{stack_id}', 'stack_name': '{stack_name}' } ], 'added': [ { 'updated_time': 'datetime', 'resource_name': '', 'physical_resource_id': '{resource id or ''}', 'resource_action': 'CREATE', 'resource_status': 'COMPLETE', 'resource_status_reason': '', 'resource_type': 'restype', 'stack_identity': '{stack_id}', 'stack_name': '{stack_name}' } ], 'deleted': [ { 'updated_time': 'datetime', 'resource_name': '', 'physical_resource_id': '{resource id or ''}', 'resource_action': 'CREATE', 'resource_status': 'COMPLETE', 'resource_status_reason': '', 'resource_type': 'restype', 'stack_identity': '{stack_id}', 'stack_name': '{stack_name}' } ] } class TestStack(base.TestCase): def test_basic(self): sot = stack.Stack() self.assertEqual('stack', sot.resource_key) self.assertEqual('stacks', sot.resources_key) self.assertEqual('/stacks', sot.base_path) self.assertTrue(sot.allow_create) self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = stack.Stack(**FAKE) self.assertEqual(FAKE['capabilities'], sot.capabilities) self.assertEqual(FAKE['creation_time'], sot.created_at) self.assertEqual(FAKE['deletion_time'], sot.deleted_at) self.assertEqual(FAKE['description'], sot.description) self.assertEqual(FAKE['environment'], sot.environment) self.assertEqual(FAKE['environment_files'], sot.environment_files) self.assertEqual(FAKE['files'], sot.files) self.assertEqual(FAKE['files_container'], sot.files_container) self.assertTrue(sot.is_rollback_disabled) self.assertEqual(FAKE['id'], sot.id) self.assertEqual(FAKE['links'], sot.links) self.assertEqual(FAKE['notification_topics'], sot.notification_topics) self.assertEqual(FAKE['outputs'], sot.outputs) self.assertEqual(FAKE['parameters'], sot.parameters) self.assertEqual(FAKE['name'], sot.name) self.assertEqual(FAKE['status'], sot.status) self.assertEqual(FAKE['status_reason'], sot.status_reason) self.assertEqual(FAKE['tags'], sot.tags) self.assertEqual(FAKE['template_description'], sot.template_description) self.assertEqual(FAKE['template_url'], sot.template_url) self.assertEqual(FAKE['timeout_mins'], sot.timeout_mins) self.assertEqual(FAKE['updated_time'], sot.updated_at) @mock.patch.object(resource.Resource, 'create') def test_create(self, mock_create): sess = mock.Mock() sot = stack.Stack(FAKE) res = sot.create(sess) mock_create.assert_called_once_with(sess, prepend_key=False, base_path=None) self.assertEqual(mock_create.return_value, res) @mock.patch.object(resource.Resource, 'commit') def test_commit(self, mock_commit): sess = mock.Mock() sot = stack.Stack(FAKE) res = sot.commit(sess) mock_commit.assert_called_once_with(sess, prepend_key=False, has_body=False, base_path=None) self.assertEqual(mock_commit.return_value, res) def test_check(self): sess = mock.Mock() sot = stack.Stack(**FAKE) sot._action = mock.Mock() body = {'check': ''} sot.check(sess) sot._action.assert_called_with(sess, body) def test_fetch(self): sess = mock.Mock() sess.default_microversion = None sot = stack.Stack(**FAKE) sess.get = mock.Mock() sess.get.side_effect = [ test_resource.FakeResponse( {'stack': {'stack_status': 'CREATE_COMPLETE'}}, 200), test_resource.FakeResponse( {'stack': {'stack_status': 'CREATE_COMPLETE'}}, 200), exceptions.ResourceNotFound(message='oops'), test_resource.FakeResponse( {'stack': {'stack_status': 'DELETE_COMPLETE'}}, 200) ] self.assertEqual(sot, sot.fetch(sess)) sess.get.assert_called_with( 'stacks/{id}'.format(id=sot.id), microversion=None) sot.fetch(sess, resolve_outputs=False) sess.get.assert_called_with( 'stacks/{id}?resolve_outputs=False'.format(id=sot.id), microversion=None) ex = self.assertRaises(exceptions.ResourceNotFound, sot.fetch, sess) self.assertEqual('oops', str(ex)) ex = self.assertRaises(exceptions.ResourceNotFound, sot.fetch, sess) self.assertEqual('No stack found for %s' % FAKE_ID, str(ex)) def test_abandon(self): sess = mock.Mock() sess.default_microversion = None mock_response = mock.Mock() mock_response.status_code = 200 mock_response.headers = {} mock_response.json.return_value = {} sess.delete = mock.Mock(return_value=mock_response) sot = stack.Stack(**FAKE) sot.abandon(sess) sess.delete.assert_called_with( 'stacks/%s/%s/abandon' % (FAKE_NAME, FAKE_ID), ) def test_update(self): sess = mock.Mock() sess.default_microversion = None mock_response = mock.Mock() mock_response.status_code = 200 mock_response.headers = {} mock_response.json.return_value = {} sess.put = mock.Mock(return_value=mock_response) sot = stack.Stack(**FAKE) body = sot._body.dirty.copy() sot.update(sess) sess.put.assert_called_with( '/stacks/%s/%s' % (FAKE_NAME, FAKE_ID), headers={}, microversion=None, json=body ) def test_update_preview(self): sess = mock.Mock() sess.default_microversion = None mock_response = mock.Mock() mock_response.status_code = 200 mock_response.headers = {} mock_response.json.return_value = FAKE_UPDATE_PREVIEW_RESPONSE.copy() sess.put = mock.Mock(return_value=mock_response) sot = stack.Stack(**FAKE) body = sot._body.dirty.copy() ret = sot.update(sess, preview=True) sess.put.assert_called_with( 'stacks/%s/%s/preview' % (FAKE_NAME, FAKE_ID), headers={}, microversion=None, json=body ) self.assertEqual( FAKE_UPDATE_PREVIEW_RESPONSE['added'], ret.added) self.assertEqual( FAKE_UPDATE_PREVIEW_RESPONSE['deleted'], ret.deleted) self.assertEqual( FAKE_UPDATE_PREVIEW_RESPONSE['replaced'], ret.replaced) self.assertEqual( FAKE_UPDATE_PREVIEW_RESPONSE['unchanged'], ret.unchanged) self.assertEqual( FAKE_UPDATE_PREVIEW_RESPONSE['updated'], ret.updated)
<reponame>KonstantinosAnd/emoFeatExtract """This is a test script for emoFeatExtract.py""" """In this script, after the feature extraction, the K-folds cross-validation technique is used, where K == 24 is the number of different speakers on the RAVDESS database. Currently we classificate the samples on binary Activation and binary Valence.""" #Current Score #Binary Activation #[0.71666667 0.76666667 0.76666667 0.68333333 0.71666667 0.78333333 # 0.73333333 0.81666667 0.56666667 0.73333333 0.7 0.71666667 # 0.73333333 0.78333333 0.71666667 0.73333333 0.75 0.65 # 0.56666667 0.7 0.68333333 0.71666667 0.78333333 0.8 ] #0.7215277777777778 #Binary Valence #[0.71666667 0.53333333 0.68333333 0.6 0.56666667 0.5 # 0.65 0.65 0.56666667 0.61666667 0.58333333 0.56666667 # 0.55 0.66666667 0.61666667 0.6 0.63333333 0.68333333 # 0.68333333 0.68333333 0.6 0.53333333 0.58333333 0.68333333] #0.6145833333333334 import os from pyAudioAnalysis import audioBasicIO from emoFeatExtract import emoFeatExtract import numpy as np from sklearn import svm from sklearn.model_selection import cross_val_score as cvs #gets label(emotion) that is mentioned in each sample name def getEmotionLabel(x): return x[6:8] #gets identity of the speaker def getSpeakerLabel(x): return x[18:20] #not used at the moment def trainortest(x): return x[15:17] def getFourth(val): return val[3] os.chdir('C:/Users/konst_000/Desktop/Σχολή/6ο Εξάμηνο/ΨΕΣ/Speech Emotion Recognition/Audio Database/Complete') fileList = os.listdir('C:/Users/konst_000/Desktop/Σχολή/6ο Εξάμηνο/ΨΕΣ/Speech Emotion Recognition/Audio Database/Complete') featureList = [] #list of lists used to store the extracted features of each training sample labelListAct = [] #list of strings used to store the labels(emotions) for each training sample labelListVal = [] speakerList = [] #list of strings used to store the speaker identity for f in fileList: label = getEmotionLabel(f) [Fs, sample] = audioBasicIO.readAudioFile(f) sample = audioBasicIO.stereo2mono(sample) #feature extraction can be performed only on mono signals speaker = getSpeakerLabel(f) features = emoFeatExtract(sample, Fs, 0.050*Fs, 0.025*Fs) featureList.append(features) #Binary Activation Labels if (label == '01' or label == '02' or label == '04' or label == '07'): labelListAct.append('Low') else: labelListAct.append('High') if (label == '04' or label == '05' or label == '06' or label == '07'): labelListVal.append('Negative') else: labelListVal.append('Positive') speakerList.append(speaker) final = [] for i in range(len(featureList)): l = [featureList[i]] l.append(labelListAct[i]) l.append(labelListVal[i]) l.append(speakerList[i]) final.append(l) final.sort(key = getFourth) featureList = [] #list of lists used to store the extracted features of each training sample labelListAct = [] #list of strings used to store the labels(emotions) for each training sample labelListVal = [] for i in range(len(final)): featureList.append(final[i][0]) labelListAct.append(final[i][1]) labelListVal.append(final[i][2]) clf = svm.SVC(gamma = 'auto') predictionsAct = cvs(clf, featureList, labelListAct, cv = 24) predictionsVal = cvs(clf, featureList, labelListVal, cv = 24) print('Binary Activation') print(predictionsAct) print(np.mean(predictionsAct)) print('Binary Valence') print(predictionsVal) print(np.mean(predictionsVal))
<filename>torque/commands/configure.py import getpass import logging from docopt import DocoptExit from torque.client import TorqueClient from torque.commands.base import BaseCommand from torque.constants import TorqueConfigKeys from torque.exceptions import ConfigFileMissingError from torque.parsers.global_input_parser import GlobalInputParser from torque.services.config import TorqueConfigProvider from torque.view.configure_list_view import ConfigureListView from torque.view.view_helper import mask_token logger = logging.getLogger(__name__) class ConfigureCommand(BaseCommand): """ usage: torque configure set [options] torque configure list torque configure remove <profile> torque configure [--help|-h] options: -P --profile <profile> Set profile name -a --account <name> Set account name -s --space <space> Set space name -t --token <token> Set token -l --login Retrieves an authentication token from server using account, email and password. Does not work for SSO -e --email <email> Set email for authentication (when --login is set) -p --password <password> Set password for authentication (when --login is set) -h --help Show this message """ def get_actions_table(self) -> dict: return {"set": self.do_configure, "list": self.do_list, "remove": self.do_remove} def do_list(self): try: config_file = GlobalInputParser.get_config_path() config = TorqueConfigProvider(config_file).load_all() result_table = ConfigureListView(config).render() except ConfigFileMissingError: raise DocoptExit("Config file doesn't exist. Use 'torque configure set' to configure Torque CLI.") except Exception as e: logger.exception(e, exc_info=False) return self.die() self.message(result_table) return self.success() def do_remove(self): profile_to_remove = self.input_parser.configure_remove.profile if not profile_to_remove: raise DocoptExit("Please provide a profile name to remove") try: config_file = GlobalInputParser.get_config_path() config_provider = TorqueConfigProvider(config_file) config_provider.remove_profile(profile_to_remove) except Exception as e: logger.exception(e, exc_info=False) return self.die() return self.success() def do_configure(self): config_file = GlobalInputParser.get_config_path() login = self.input_parser.configure_set.login config_provider = TorqueConfigProvider(config_file) config = {} try: config = config_provider.load_all() except Exception: pass # read profile profile = self.input_parser.configure_set.profile or input("Profile Name [default]: ") profile = profile or "default" # if profile exists set current values from profile current_account = config.get(profile, {}).get(TorqueConfigKeys.ACCOUNT, "") current_space = config.get(profile, {}).get(TorqueConfigKeys.SPACE, "") current_token = config.get(profile, {}).get(TorqueConfigKeys.TOKEN, "") # read account login_msg = ( "Torque Account [{current_account}]: " if login else f"Torque Account (optional) [{current_account}]: " ) account = self.input_parser.configure_set.account or input(login_msg) if login and not account: # required if login using email and password return self.die("Account cannot be empty") account = account or current_account # read space name space = self.input_parser.configure_set.space or input(f"Torque Space [{current_space}]: ") space = space or current_space if not space: return self.die("Space cannot be empty") if login: # read email email = self.input_parser.configure_set.email or input("Email: ") if not email: return self.die("Email cannot be empty") # read password password = self.input_parser.configure_set.password or getpass.getpass("Password: ") # get token try: client = TorqueClient() access_token = client.login(account, email, password) client.session.init_bearer_auth(access_token) token = client.longtoken() except Exception as e: logger.exception(e, exc_info=False) return self.die() else: # read token token = self.input_parser.configure_set.token or getpass.getpass( f"Torque Token [{mask_token(current_token)}]: " ) token = token or current_token if not token: return self.die("Token cannot be empty") # save user inputs config_provider.save_profile(profile, token, space, account) return self.success()
import sys import pandas as pd import numpy as np from sqlalchemy import create_engine # How to process duplicates keepDuplicatesStrategy = "first" def load_data(messages_filepath, categories_filepath): """ Load the data from the passed paths to csv files. Args: messages_filepath: Path to the messages csv file. categories_filepath: Path to the categories csv file. """ messages = pd.read_csv(messages_filepath) categories = pd.read_csv(categories_filepath) return messages, categories def getInvalidColumns(df): """ Get columns with only one class F.i. column "child_alone" has exclusively 0 values. This can throw an error later when we fit a multilabel classifier (f.i. SVC: Expected more than one classes, got 1) Also, if only 0 values are present, there is nothing to learn and nothing to predict. Therefore in an additional step here, we check for any such columns and remove them before proceeding further. Args: df: The dataframe to check for invalid columns. """ singleClassColumns = [] classesExceededColumns = [] for categoryColumn in df.columns: if ((df[categoryColumn] == 0).all() or (df[categoryColumn] == 1).all()): singleClassColumns.append(categoryColumn) if (len(np.unique(df[categoryColumn])) > 2): classesExceededColumns.append(categoryColumn) return singleClassColumns, classesExceededColumns def clean_data(df): """ Clean the dataframe. Drop duplicates, fill missing original messages, replace invalid class, remove obsolete column. Args: df: The dataframe to clean. """ df.drop_duplicates(keep = keepDuplicatesStrategy, inplace = True) df["related"].replace(2, 1, inplace = True) # Fill missing values of original untranslated message with translated message and appended id. # Reasoning: # 1. We are not using original now, but maybe it's important later to see the original message so we keep it for now. # 2. Better to remove as many NaN values as possible df["original"].fillna(df["message"] + ";id-{}".format(df["id"]), inplace = True) # SVC will not accept singular class, remove offending column df.drop(["child_alone"], axis = "columns", inplace = True) return df def transform_data(messages, categories): """ Transform the data. Merges the datasets, creates category column names, one-hot encodes the categories, and replaces their string values with binary values. Args: messages: The messages column. categories: The categories column, where most of the work is done. """ df = messages.merge(categories) categories = df["categories"].str.split(";", expand = True) # select the first row of the categories dataframe categoryNameParts = df["categories"][0].split(";") categoryColumnNames = [] for categoryNamePart in categoryNameParts: categoryColumnNames.append(categoryNamePart.split("-")[0]) categories.columns = categoryColumnNames for column in categories: # set each value to be the last character of the string categories[column] = categories[column].str.split("-").str[1] # convert column from string to numeric categories[column] = pd.to_numeric(categories[column]) df.drop(["categories"], axis = "columns", inplace=True) df = pd.concat([df, categories], axis = "columns", sort = False) return df def save_data(df, database_filename): """ Args: df: The dataframe to save back to the database. database_filename: Name of the database to save to. """ engine = create_engine(f"sqlite:///{database_filename}") # Safest way to deal with an existing table is to simply drop and rewrite it with engine.connect() as conn: conn.execute("DROP TABLE IF EXISTS CategorizedResponse;") df.to_sql('CategorizedResponse', engine, index = False) def main(): """ Perform the ETL pipeline process with the passed parameters. """ if len(sys.argv) == 4: messages_filepath, categories_filepath, database_filepath = sys.argv[1:] print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}' .format(messages_filepath, categories_filepath)) messages, categories = load_data(messages_filepath, categories_filepath) print('Transforming and cleaning data...') df = clean_data(transform_data(messages, categories)) print('Saving data...\n DATABASE: {}'.format(database_filepath)) save_data(df, database_filepath) print('Cleaned data saved to database!') else: print('Please provide the filepaths of the messages and categories '\ 'datasets as the first and second argument respectively, as '\ 'well as the filepath of the database to save the cleaned data '\ 'to as the third argument. \n\nExample: python process_data.py '\ 'disaster_messages.csv disaster_categories.csv '\ 'DisasterResponse.db') if __name__ == '__main__': main()
from os.path import dirname, realpath, join from datetime import date from tkinter import (Tk, Frame, Button, Label, Spinbox, font, PhotoImage) CURRENT_DIR = dirname(realpath(__file__)) CALC_PNG = join(CURRENT_DIR, 'imgs', 'calc.png') USER_PNG = join(CURRENT_DIR, 'imgs', 'user.png') def get_formated_date(): current = date.today() #formated_date = '{}/{}/{}'.format(current.day, current.month, current.year) formated_date = current.strftime('%d/%m/%Y') return formated_date def get_current_year(): return date.today().year class Program(Tk): BACKGROUND = '#fff' def __init__(self, *args, **kwargs): super(Program, self).__init__(*args, **kwargs) self.option_add('*Font', 'Georgia 14 normal') self.create_current_date_field() self.create_input_birth() self.create_button() self.create_output_result() self.settings() def settings(self): self.title('Calcular idade') self.geometry('550x380+0+0') self['background'] = self.BACKGROUND def execute(self): self.mainloop() def create_current_date_field(self): label = Label(self, background=self.BACKGROUND) label['text'] = get_formated_date() label.pack(side='top', padx=5, pady=5, anchor='nw') def create_input_birth(self): #creating frames main_frame = Frame(self, background=self.BACKGROUND) left_frame = Frame(main_frame, background=self.BACKGROUND) right_frame = Frame(main_frame, background=self.BACKGROUND) #widgets for left_frame label = Label(left_frame, text='Ano de nascimento', background=self.BACKGROUND) self.input_birth = Spinbox(left_frame, from_=0, to=9999) #widgets for right_frame img = PhotoImage(file=USER_PNG) label_img = Label(right_frame, image=img) label_img.img = img #packing label.pack(side='left', anchor='sw') self.input_birth.pack(side='left', anchor='sw') label_img.pack(side='bottom', anchor='s') left_frame.pack(side='left', fill='both', expand=True) right_frame.pack(side='left', fill='both', expand=True) main_frame.pack(fill='both', expand=True, padx=5, pady=5) left_frame.pack_propagate(False) right_frame.pack_propagate(False) main_frame.pack_propagate(False) def create_button(self): img = PhotoImage(file=CALC_PNG) button = Button(self, text='Calcular', image=img, compound='left') button.image = img button['background'] = '#eee' button['relief'] = 'raised' button['borderwidth'] = 4 button['command'] = self.button_click button.pack(fill='x', expand=False, padx=5, pady=5) def create_output_result(self): self.output_result = Label(self, text='Resultado', background=self.BACKGROUND) self.output_result.pack(padx=5, pady=5) def button_click(self): user_input = self.input_birth.get() years_old = get_current_year() - int(user_input) self.output_result['text'] = f'{years_old} anos de idade.' def main(): program = Program() program.execute() pass if __name__ == '__main__': main()
from FINE.component import Component, ComponentModeling from FINE import utils import warnings import pyomo.environ as pyomo import pandas as pd class Transmission(Component): """ Doc """ def __init__(self, esM, name, commodity, losses=0, distances=None, hasCapacityVariable=True, capacityVariableDomain='continuous', capacityPerPlantUnit=1, hasIsBuiltBinaryVariable=False, bigM=None, operationRateMax=None, operationRateFix=None, tsaWeight=1, locationalEligibility=None, capacityMin=None, capacityMax=None, sharedPotentialID=None, capacityFix=None, isBuiltFix=None, investPerCapacity=0, investIfBuilt=0, opexPerOperation=0, opexPerCapacity=0, opexIfBuilt=0, interestRate=0.08, economicLifetime=10): # TODO add unit checks # Set general component data utils.checkCommodities(esM, {commodity}) self._name, self._commodity = name, commodity self._distances = utils.checkAndSetDistances(esM, distances) self._losses = utils.checkAndSetTransmissionLosses(esM, losses, distances) # Set design variable modeling parameters utils.checkDesignVariableModelingParameters(capacityVariableDomain, hasCapacityVariable, hasIsBuiltBinaryVariable, bigM) self._hasCapacityVariable = hasCapacityVariable self._capacityVariableDomain = capacityVariableDomain self._capacityPerPlantUnit = capacityPerPlantUnit self._hasIsBuiltBinaryVariable = hasIsBuiltBinaryVariable self._bigM = bigM # Set economic data self._investPerCapacity = utils.checkAndSetCostParameter(esM, name, investPerCapacity, '2dim') self._investIfBuilt = utils.checkAndSetCostParameter(esM, name, investIfBuilt, '2dim') self._opexPerOperation = utils.checkAndSetCostParameter(esM, name, opexPerOperation, '2dim') self._opexPerCapacity = utils.checkAndSetCostParameter(esM, name, opexPerCapacity, '2dim') self._opexIfBuilt = utils.checkAndSetCostParameter(esM, name, opexIfBuilt, '2dim') self._interestRate = utils.checkAndSetCostParameter(esM, name, interestRate, '2dim') self._economicLifetime = utils.checkAndSetCostParameter(esM, name, economicLifetime, '2dim') self._CCF = self.getCapitalChargeFactor() # Set location-specific operation parameters if operationRateMax is not None and operationRateFix is not None: operationRateMax = None warnings.warn('If operationRateFix is specified, the operationRateMax parameter is not required.\n' + 'The operationRateMax time series was set to None.') utils.checkOperationTimeSeriesInputParameters(esM, operationRateMax, locationalEligibility, '2dim') utils.checkOperationTimeSeriesInputParameters(esM, operationRateFix, locationalEligibility, '2dim') self._fullOperationRateMax = utils.setFormattedTimeSeries(operationRateMax) self._aggregatedOperationRateMax = None self._operationRateMax = utils.setFormattedTimeSeries(operationRateMax) self._fullOperationRateFix = utils.setFormattedTimeSeries(operationRateFix) self._aggregatedOperationRateFix = None self._operationRateFix = utils.setFormattedTimeSeries(operationRateFix) self._tsaWeight = tsaWeight # Set location-specific design parameters self._sharedPotentialID = sharedPotentialID utils.checkLocationSpecficDesignInputParams(esM, hasCapacityVariable, hasIsBuiltBinaryVariable, capacityMin, capacityMax, capacityFix, locationalEligibility, isBuiltFix, sharedPotentialID, '2dim') self._capacityMin, self._capacityMax, self._capacityFix = capacityMin, capacityMax, capacityFix self._isBuiltFix = isBuiltFix # Set locational eligibility operationTimeSeries = operationRateFix if operationRateFix is not None else operationRateMax self._locationalEligibility = utils.setLocationalEligibility(esM, locationalEligibility, capacityMax, capacityFix, isBuiltFix, hasCapacityVariable, operationTimeSeries, '2dim') # Variables at optimum (set after optimization) self._capacityVariablesOptimum = None self._isBuiltVariablesOptimum = None self._operationVariablesOptimum = None def getCapitalChargeFactor(self): """ Computes and returns capital charge factor (inverse of annuity factor) """ return 1 / self._interestRate - 1 / (pow(1 + self._interestRate, self._economicLifetime) * self._interestRate) def addToEnergySystemModel(self, esM): esM._isTimeSeriesDataClustered = False if self._name in esM._componentNames: if esM._componentNames[self._name] == TransmissionModeling.__name__: warnings.warn('Component identifier ' + self._name + ' already exists. Data will be overwritten.') else: raise ValueError('Component name ' + self._name + ' is not unique.') else: esM._componentNames.update({self._name: TransmissionModeling.__name__}) mdl = TransmissionModeling.__name__ if mdl not in esM._componentModelingDict: esM._componentModelingDict.update({mdl: TransmissionModeling()}) esM._componentModelingDict[mdl]._componentsDict.update({self._name: self}) def setTimeSeriesData(self, hasTSA): self._operationRateMax = self._aggregatedOperationRateMax if hasTSA else self._fullOperationRateMax self._operationRateFix = self._aggregatedOperationRateFix if hasTSA else self._fullOperationRateFix def getDataForTimeSeriesAggregation(self): fullOperationRate = self._fullOperationRateFix if self._fullOperationRateFix is not None \ else self._fullOperationRateMax if fullOperationRate is not None: fullOperationRate = fullOperationRate.copy() uniqueIdentifiers = [self._name + "_operationRate_" + locationIn + '_' + locationOut for locationIn, locationOut in fullOperationRate.columns] compData = pd.DataFrame(index=fullOperationRate.index, columns=uniqueIdentifiers) compDict = {} for locationIn, locationOut in fullOperationRate.columns: uniqueIdentifier = self._name + "_operationRate_" + locationIn + '_' + locationOut compData[uniqueIdentifier] = fullOperationRate.pop((locationIn, locationOut)) compDict.update({uniqueIdentifier: self._tsaWeight}) return compData, compDict else: return None, {} def setAggregatedTimeSeriesData(self, data): fullOperationRate = self._fullOperationRateFix if self._fullOperationRateFix is not None \ else self._fullOperationRateMax if fullOperationRate is not None: uniqueIdentifiers = [self._name + "_operationRate_" + locationIn + '_' + locationOut for locationIn, locationOut in fullOperationRate.columns] compData = data[uniqueIdentifiers].copy() compData = pd.DataFrame(index=data.index, columns=fullOperationRate.columns) for locationIn, locationOut in compData.columns: compData.loc[:, (locationIn, locationOut)] = \ data.loc[:, self._name + "_operationRate_" + locationIn + '_' + locationOut] if self._fullOperationRateFix is not None: self._aggregatedOperationRateFix = compData else: self._aggregatedOperationRateMax = compData class TransmissionModeling(ComponentModeling): """ Doc """ def __init__(self): self._componentsDict = {} self._capacityVariablesOptimum = None self._isBuiltVariablesOptimum = None self._operationVariablesOptimum = None #################################################################################################################### # Declare sparse index sets # #################################################################################################################### def declareSets(self, esM, pyM): """ Declares sets and dictionaries """ compDict = self._componentsDict ################################################################################################################ # Declare design variables sets # ################################################################################################################ def initDesignVarSet(pyM): return ((loc, loc_, compName) for loc in esM._locations for loc_ in esM._locations for compName, comp in compDict.items() if comp._locationalEligibility[loc][loc_] == 1 and comp._hasCapacityVariable) pyM.designDimensionVarSet_trans = pyomo.Set(dimen=3, initialize=initDesignVarSet) def initContinuousDesignVarSet(pyM): return ((loc, loc_, compName) for loc, loc_, compName, in pyM.designDimensionVarSet_trans if compDict[compName]._capacityVariableDomain == 'continuous') pyM.continuousDesignDimensionVarSet_trans = pyomo.Set(dimen=3, initialize=initContinuousDesignVarSet) def initDiscreteDesignVarSet(pyM): return ((loc, loc_, compName) for loc, loc_, compName in pyM.designDimensionVarSet_trans if compDict[compName]._capacityVariableDomain == 'discrete') pyM.discreteDesignDimensionVarSet_trans = pyomo.Set(dimen=3, initialize=initDiscreteDesignVarSet) def initDesignDecisionVarSet(pyM): return ((loc, loc_, compName) for loc, loc_, compName in pyM.designDimensionVarSet_trans if compDict[compName]._hasIsBuiltBinaryVariable) pyM.designDecisionVarSet_trans = pyomo.Set(dimen=3, initialize=initDesignDecisionVarSet) ################################################################################################################ # Declare operation variables sets # ################################################################################################################ def initOpVarSet(pyM): return ((loc, loc_, compName) for loc in esM._locations for loc_ in esM._locations for compName, comp in compDict.items() if comp._locationalEligibility[loc][loc_] == 1) pyM.operationVarSet_trans = pyomo.Set(dimen=3, initialize=initOpVarSet) pyM.operationVarDict_transOut = {loc: {loc_: {compName for compName in compDict if (loc, loc_, compName) in pyM.operationVarSet_trans} for loc_ in esM._locations} for loc in esM._locations} pyM.operationVarDict_transIn = {loc: {loc_: {compName for compName in compDict if (loc_, loc, compName) in pyM.operationVarSet_trans} for loc_ in esM._locations} for loc in esM._locations} ################################################################################################################ # Declare sets for case differentiation of operating modes # ################################################################################################################ def initOpConstrSet1(pyM): return ((loc, loc_, compName) for loc, loc_, compName in pyM.operationVarSet_trans if compDict[compName]._hasCapacityVariable and compDict[compName]._operationRateMax is None and compDict[compName]._operationRateFix is None) pyM.opConstrSet1_trans = pyomo.Set(dimen=3, initialize=initOpConstrSet1) def initOpConstrSet2(pyM): return ((loc, loc_, compName) for loc, loc_, compName in pyM.operationVarSet_trans if compDict[compName]._hasCapacityVariable and compDict[compName]._operationRateFix is not None) pyM.opConstrSet2_trans = pyomo.Set(dimen=3, initialize=initOpConstrSet2) def initOpConstrSet3(pyM): return ((loc, loc_, compName) for loc, loc_, compName in pyM.operationVarSet_trans if compDict[compName]._hasCapacityVariable and compDict[compName]._operationRateMax is not None) pyM.opConstrSet3_trans = pyomo.Set(dimen=3, initialize=initOpConstrSet3) def initOpConstrSet4(pyM): return ((loc, loc_, compName) for loc, loc_, compName in pyM.operationVarSet_trans if not compDict[compName]._hasCapacityVariable and compDict[compName]._operationRateFix is not None) pyM.opConstrSet4_trans = pyomo.Set(dimen=3, initialize=initOpConstrSet4) def initOpConstrSet5(pyM): return ((loc, loc_, compName) for loc, loc_, compName in pyM.operationVarSet_trans if not compDict[compName]._hasCapacityVariable and compDict[compName]._operationRateMax is not None) pyM.opConstrSet5_trans = pyomo.Set(dimen=3, initialize=initOpConstrSet5) potentialDict = {} # TODO adapt for 2dim components for compName, comp in compDict.items(): if comp._sharedPotentialID is not None: potentialDict.setdefault(comp._sharedPotentialID, []).append(compName) pyM.sharedPotentialTransmissionDict = potentialDict #################################################################################################################### # Declare variables # #################################################################################################################### def declareVariables(self, esM, pyM): """ Declares design and operation variables """ # Function for setting lower and upper capacity bounds def capBounds(pyM, loc, loc_, compName): comp = self._componentsDict[compName] return (comp._capacityMin[loc][loc_] if (comp._capacityMin is not None and not comp._hasIsBuiltBinaryVariable) else 0, comp._capacityMax[loc][loc_] if comp._capacityMax is not None else None) # Capacity of components [powerUnit] pyM.cap_trans = pyomo.Var(pyM.designDimensionVarSet_trans, domain=pyomo.NonNegativeReals, bounds=capBounds) # Number of components [-] pyM.nbReal_trans = pyomo.Var(pyM.continuousDesignDimensionVarSet_trans, domain=pyomo.NonNegativeReals) # Number of components [-] pyM.nbInt_trans = pyomo.Var(pyM.discreteDesignDimensionVarSet_trans, domain=pyomo.NonNegativeIntegers) # Binary variables [-], indicate if a component is considered at a location or not pyM.designBin_trans = pyomo.Var(pyM.designDecisionVarSet_trans, domain=pyomo.Binary) # Operation of component [energyUnit] pyM.op_trans = pyomo.Var(pyM.operationVarSet_trans, pyM.timeSet, domain=pyomo.NonNegativeReals) #################################################################################################################### # Declare component constraints # #################################################################################################################### def declareComponentConstraints(self, esM, pyM): """ Declares time independent and dependent constraints""" compDict = self._componentsDict ################################################################################################################ # Declare time independent constraints # ################################################################################################################ # Determine the components' capacities from the number of installed units def capToNbReal_trans(pyM, loc, loc_, compName): return pyM.cap_trans[loc, loc_, compName] == \ pyM.nbReal_trans[loc, loc_, compName] * compDict[compName]._capacityPerPlantUnit pyM.ConstrCapToNbReal_trans = pyomo.Constraint(pyM.continuousDesignDimensionVarSet_trans, rule=capToNbReal_trans) # Determine the components' capacities from the number of installed units def capToNbInt_trans(pyM, loc, loc_, compName): return pyM.cap_trans[loc, loc_, compName] == \ pyM.nbInt_trans[loc, loc_, compName] * compDict[compName]._capacityPerPlantUnit pyM.ConstrCapToNbInt_trans = pyomo.Constraint(pyM.discreteDesignDimensionVarSet_trans, rule=capToNbInt_trans) # Enforce the consideration of the binary design variables of a component def bigM_trans(pyM, loc, loc_, compName): return pyM.cap_trans[loc, loc_, compName] <= \ compDict[compName]._bigM * pyM.designBin_trans[loc, loc_, compName] pyM.ConstrBigM_trans = pyomo.Constraint(pyM.designDecisionVarSet_trans, rule=bigM_trans) # Enforce the consideration of minimum capacities for components with design decision variables def capacityMinDec_trans(pyM, loc, loc_, compName): return (pyM.cap_trans[loc, loc_, compName] >= compDict[compName]._capacityMin[loc][loc_] * pyM.designBin_trans[loc, loc_, compName] if compDict[compName]._capacityMin is not None else pyomo.Constraint.Skip) pyM.ConstrCapacityMinDec_trans = pyomo.Constraint(pyM.designDecisionVarSet_trans, rule=capacityMinDec_trans) # Sets, if applicable, the installed capacities of a component def capacityFix_trans(pyM, loc, loc_, compName): return (pyM.cap_trans[loc, loc_, compName] == compDict[compName]._capacityFix[loc][loc_] if compDict[compName]._capacityFix is not None else pyomo.Constraint.Skip) pyM.ConstrCapacityFix_trans = pyomo.Constraint(pyM.designDimensionVarSet_trans, rule=capacityFix_trans) # Sets, if applicable, the binary design variables of a component def designBinFix_trans(pyM, loc, loc_, compName): return (pyM.designBin_trans[loc, loc_, compName] == compDict[compName]._isBuiltFix[loc][loc_] if compDict[compName]._isBuiltFix is not None else pyomo.Constraint.Skip) pyM.ConstrDesignBinFix_trans = pyomo.Constraint(pyM.designDecisionVarSet_trans, rule=designBinFix_trans) def sharedPotentialTransmission(pyM, key, loc, loc_): return sum(pyM.cap_trans[loc, loc_, compName] / compDict[compName].capacityMax[loc][loc_] for compName in compDict if compDict[compName]._sharedPotentialID == key and (loc, loc_, compName) in pyM.designDimensionVarSet_trans) pyM.ConstSharedPotential_trans = \ pyomo.Constraint(pyM.sharedPotentialTransmissionDict.keys(), esM._locations, esM._locations, rule=sharedPotentialTransmission) def symmetricalCapacity_trans(pyM, loc, loc_, compName): return pyM.cap_trans[loc, loc_, compName] == pyM.cap_trans[loc_, loc, compName] pyM.ConstrSymmetricalCapacity_trans = \ pyomo.Constraint(pyM.designDimensionVarSet_trans, rule=symmetricalCapacity_trans) ################################################################################################################ # Declare time dependent constraints # ################################################################################################################ # Operation [energyUnit] limited by the installed capacity [powerUnit] multiplied by the hours per time step def op1_trans(pyM, loc, loc_, compName, p, t): return pyM.op_trans[loc, loc_, compName, p, t] <= \ pyM.cap_trans[loc, loc_, compName] * esM._hoursPerTimeStep pyM.ConstrOperation1_trans = pyomo.Constraint(pyM.opConstrSet1_trans, pyM.timeSet, rule=op1_trans) # Operation [energyUnit] equal to the installed capacity [powerUnit] multiplied by operation time series # [powerUnit/powerUnit] and the hours per time step [h]) def op2_trans(pyM, loc, loc_, compName, p, t): return pyM.op_trans[loc, loc_, compName, p, t] == pyM.cap_trans[loc, loc_, compName] * \ compDict[compName]._operationRateFix[loc, loc_][p, t] * esM._hoursPerTimeStep pyM.ConstrOperation2_trans = pyomo.Constraint(pyM.opConstrSet2_trans, pyM.timeSet, rule=op2_trans) # Operation [energyUnit] limited by the installed capacity [powerUnit] multiplied by operation time series # [powerUnit/powerUnit] and the hours per time step [h]) def op3_trans(pyM, loc, loc_, compName, p, t): return pyM.op_trans[loc, loc_, compName, p, t] <= pyM.cap_trans[loc, loc_, compName] * \ compDict[compName]._operationRateMax[loc, loc_][p, t] * esM._hoursPerTimeStep pyM.ConstrOperation3_trans = pyomo.Constraint(pyM.opConstrSet3_trans, pyM.timeSet, rule=op3_trans) # Operation [energyUnit] equal to the operation time series [energyUnit] def op4_trans(pyM, loc, loc_, compName, p, t): return pyM.op_trans[loc, loc_, compName, p, t] == compDict[compName]._operationRateFix[loc, loc_][p, t] pyM.ConstrOperation4_trans = pyomo.Constraint(pyM.opConstrSet4_trans, pyM.timeSet, rule=op4_trans) # Operation [energyUnit] limited by the operation time series [energyUnit] def op5_trans(pyM, loc, loc_, compName, p, t): return pyM.op_trans[loc, loc_, compName, p, t] <= compDict[compName]._operationRateMax[loc, loc_][p, t] pyM.ConstrOperation5_trans = pyomo.Constraint(pyM.opConstrSet5_trans, pyM.timeSet, rule=op5_trans) #################################################################################################################### # Declare component contributions to basic EnergySystemModel constraints and its objective function # #################################################################################################################### def getSharedPotentialContribution(self, pyM, key, loc): return 0 def hasOpVariablesForLocationCommodity(self, esM, loc, commod): return any([comp._commodity == commod and (comp._locationalEligibility[loc][loc_] == 1 or comp._locationalEligibility[loc_][loc] == 1) for comp in self._componentsDict.values() for loc_ in esM._locations]) def getCommodityBalanceContribution(self, pyM, commod, loc, p, t): # TODO losses connected to distances return sum(pyM.op_trans[loc_, loc, compName, p, t] * (1 - self._componentsDict[compName]._losses[loc_][loc] * self._componentsDict[compName]._distances[loc_][loc]) for loc_ in pyM.operationVarDict_transIn[loc].keys() for compName in pyM.operationVarDict_transIn[loc][loc_] if commod in self._componentsDict[compName]._commodity) - \ sum(pyM.op_trans[loc, loc_, compName, p, t] for loc_ in pyM.operationVarDict_transOut[loc].keys() for compName in pyM.operationVarDict_transOut[loc][loc_] if commod in self._componentsDict[compName]._commodity) def getObjectiveFunctionContribution(self, esM, pyM): # TODO replace 0.5 with factor which is one when non-directional and 0.5 when bi-directional compDict = self._componentsDict capexDim = sum(compDict[compName]._investPerCapacity[loc][loc_] * pyM.cap_trans[loc, loc_, compName] * compDict[compName]._distances[loc][loc_] / compDict[compName]._CCF[loc][loc_] for loc, loc_, compName in pyM.cap_trans) * 0.5 capexDec = sum(compDict[compName]._investIfBuilt[loc][loc_] * pyM.designBin_trans[loc, loc_, compName] * compDict[compName]._distances[loc][loc_] / compDict[compName]._CCF[loc][loc_] for loc, loc_, compName in pyM.designBin_trans) * 0.5 opexDim = sum(compDict[compName]._opexPerCapacity[loc][loc_] * pyM.cap_trans[loc, loc_, compName] * compDict[compName]._distances[loc][loc_] for loc, loc_, compName in pyM.cap_trans) * 0.5 opexDec = sum(compDict[compName]._opexIfBuilt[loc][loc_] * pyM.designBin_trans[loc, loc_, compName] * compDict[compName]._distances[loc][loc_] for loc, loc_, compName in pyM.designBin_trans) * 0.5 opexOp = sum(compDict[compName]._opexPerOperation[loc][loc_] * sum(pyM.op_trans[loc, loc_, compName, p, t] * esM._periodOccurrences[p] for p, t in pyM.timeSet) for loc, subDict in pyM.operationVarDict_transOut.items() for loc_, compNames in subDict.items() for compName in compNames) / esM._numberOfYears return capexDim + capexDec + opexDim + opexDec + opexOp def setOptimalValues(self, esM, pyM): optVal = utils.formatOptimizationOutput(pyM.cap_trans.get_values(), 'designVariables', '1dim') self._capacityVariablesOptimum = optVal utils.setOptimalComponentVariables(optVal, '_capacityVariablesOptimum', self._componentsDict) optVal = utils.formatOptimizationOutput(pyM.designBin_trans.get_values(), 'designVariables', '1dim') self._isBuiltVariablesOptimum = optVal utils.setOptimalComponentVariables(optVal, '_isBuiltVariablesOptimum', self._componentsDict) optVal = utils.formatOptimizationOutput(pyM.op_trans.get_values(), 'operationVariables', '1dim', esM._periodsOrder) self._operationVariablesOptimum = optVal utils.setOptimalComponentVariables(optVal, '_operationVariablesOptimum', self._componentsDict) def getOptimalCapacities(self): return self._capacitiesOpt
# -*- coding: utf-8 -*- """ Created on Thu Jan 31 09:43:04 2019 @author: NG7a8f3 """ import random #Diese Funktion nimmt einen Int und ein Rundungslevel #Der Int wird als Kreuzer intepretiert. #Es wird dann ein String zurück gegeben der die umwandlung in Dukaten Silber #Heller und Kreuzer unter Berücksichtigung des Rundungslevels beinhaltet def Geldrechner(money, level,rundungslevel): moneystring='' money=int(money) Kreuzer=money%10 money=money/10 Heller=money%10 money=money/10 Silber=money%10 Dukaten=money/10 if Dukaten>=1: if rundungslevel=='Dukaten': moneystring= str(int(round(Dukaten,0))) + ' Dukaten ' return moneystring else: moneystring=str(int(Dukaten)) + ' Dukaten ' if Silber>=1: if rundungslevel=='Silber' and rundungslevel!='Dukaten': moneystring=moneystring+str(int(round(Silber,0))) + ' Silber ' return moneystring else: moneystring=moneystring+str(int(Silber)) + ' Silber ' if Heller>=1 and rundungslevel!='Silber' and rundungslevel!='Dukaten': if rundungslevel=='Heller': moneystring= moneystring+str(int(round(Heller,0))) + ' Heller ' return moneystring else: moneystring=moneystring+str(int(Heller)) + ' Heller ' if Kreuzer>=1 and rundungslevel!='Silber' and rundungslevel!='Dukaten' and rundungslevel!='Heller': moneystring=moneystring+str(int(round(Kreuzer,0)))+ ' Kreuzer ' return moneystring # Gleiche Funktion wie Geldrechner. Der String enthält jedoch statt "Dukaten" nur ein D # Respektiv auch für die anderen Währungen def GeldrechnerKurz(money, level,rundungslevel): moneystring='' money=int(money) Kreuzer=money%10 money=money/10 Heller=money%10 money=money/10 Silber=money%10 Dukaten=money/10 if Dukaten>=1: if rundungslevel=='Dukaten': moneystring= str(int(round(Dukaten,0))) + ' D ' return moneystring else: moneystring=str(int(Dukaten)) + ' D ' if Silber>=1: if rundungslevel=='Silber' and rundungslevel!='Dukaten': moneystring=moneystring+str(int(round(Silber,0))) + ' S ' return moneystring else: moneystring=moneystring+str(int(Silber)) + ' S ' if Heller>=1 and rundungslevel!='Silber' and rundungslevel!='Dukaten': if rundungslevel=='Heller': moneystring= moneystring+str(int(round(Heller,0))) + ' H ' return moneystring else: moneystring=moneystring+str(int(Heller)) + ' H ' if Kreuzer>=1 and rundungslevel!='Silber' and rundungslevel!='Dukaten' and rundungslevel!='Heller': moneystring=moneystring+str(int(round(Kreuzer,0)))+ ' K ' return moneystring # Konstanten #Geschwindigkeit in Meilen pro Tag speedFuß = 30 speedFlusskahn = 50 speedReiseKutsche = 40 speedShip = 100 speedHorse = 50 #Kosten in Kreuzer kostenFlusskahn = 25 / 2 kostenReisekutsche = 1200 kostenSeereiseHängematte = 800 kostenSeereiseKabine = 1500 #Proviantkosten pro Tag in Kreuzer foodPerDay = 50 waterPerDay = 3 def berechne_chance(wegZustand, simulation): if simulation: if wegZustand=='Perfekt': chance=1 if wegZustand=='Gut': chance=0.68 if wegZustand=='Mittel': chance=0.40 if wegZustand=='Schlecht': chance=0.25 else: if wegZustand=='Perfekt': chance=1 if wegZustand=='Gut': chance=0.90 if wegZustand=='Mittel': chance=0.74 if wegZustand=='Schlecht': chance=0.65 return chance def simuliere_reise(chance, reisewege): pferdReise, fußWeg, FlusskahnWeg, reiseKutschenWeg, seeReiseWegM, seeReiseWegK = reisewege daysToTravel = 0 #Jeder Tag wird einzeln simuliert. #So lange noch Reiseweg vorhanden ist while pferdReise>0: #Berechne den Verlangsamungsfaktor daySlow = 1/(random.randint(chance*100,100)/100) #Zähle Reisetag einen hoch daysToTravel += 1 #Die Distanz ist die Normaldistanz-Distanz*Verlangsamungsfaktor pferdReise = pferdReise-(speedHorse*daySlow) while fußWeg>0: daySlow = (random.randint(chance*100,100)/100) daysToTravel += 1 fußWeg = fußWeg-(speedFuß*daySlow) while FlusskahnWeg>0: daySlow = (random.randint(chance*100,100)/100) daysToTravel += 1 FlusskahnWeg = FlusskahnWeg-(speedFlusskahn*daySlow) while reiseKutschenWeg>0: daySlow = (random.randint(chance*100,100)/100) daysToTravel += 1 reiseKutschenWeg = reiseKutschenWeg-(speedReiseKutsche*daySlow) while seeReiseWegM>0: daySlow = (random.randint(chance*100,100)/100) daysToTravel += 1 seeReiseWegM = seeReiseWegM-(speedShip*daySlow) while seeReiseWegK>0: daySlow = (random.randint(chance*100,100)/100) daysToTravel += 1 seeReiseWegK = seeReiseWegK-(speedShip*daySlow) return daysToTravel def reisedauerrechnung(reisewege, rundung, gruppengröße, wegZustand, simulation): pferdReise, fußWeg, FlusskahnWeg, reiseKutschenWeg, seeReiseWegM, seeReiseWegK = reisewege chance = berechne_chance(wegZustand, simulation) #Transportkosten Berechnen für das Handeln FlusskahnKosten = FlusskahnWeg/100 * kostenFlusskahn * gruppengröße ReiseKutscheKosten = reiseKutschenWeg/100 * kostenReisekutsche * gruppengröße SeereiseKostenM = seeReiseWegM/100 * kostenSeereiseHängematte * gruppengröße SeereiseKostenK = seeReiseWegK/100 * kostenSeereiseKabine * gruppengröße transportMittelKosten = (FlusskahnKosten, ReiseKutscheKosten, SeereiseKostenM, SeereiseKostenK) #Gesamt Transport Kosten werden nach Distanz und Gruppengröße festgelegt transportCost = gruppengröße * (FlusskahnWeg * kostenFlusskahn + reiseKutschenWeg * kostenReisekutsche + seeReiseWegM * kostenSeereiseHängematte + seeReiseWegK * kostenSeereiseKabine)/100 if simulation and wegZustand != 'Perfekt': #Simulation der Reisedistanz. daysToTravel = simuliere_reise(chance, reisewege) else: #Wenn nicht simuliert wird ist es einfach das Level mal der Reisedauer daysToTravel = (pferdReise/speedHorse+fußWeg/speedFuß+FlusskahnWeg/speedFlusskahn+reiseKutschenWeg/speedReiseKutsche+seeReiseWegM/speedShip+seeReiseWegK/speedShip)*(1/chance) return daysToTravel, transportCost, transportMittelKosten def berechne_nahrungsbedarf(daysToTravel, gruppengröße, wegZustand, simulation): food = 0 water = 0 chance = berechne_chance(wegZustand, simulation) if simulation and wegZustand != 'Perfekt': for x in range(int(daysToTravel)): dayHard = 1 / (random.randint(chance*100,100) / 100) foodNeed = foodPerDay * gruppengröße * dayHard waterNeed = waterPerDay * gruppengröße * dayHard food += foodNeed water += waterNeed else: food = foodPerDay * gruppengröße * daysToTravel * (1 / chance) water = waterPerDay * gruppengröße * daysToTravel * (1 / chance) return food, water def wuerfeln(wuerfelSeiten,anzahl): wurf=0 for i in range(anzahl): wurf=wurf+random.randint(1,wuerfelSeiten) return wurf
import os import shlex import shutil import subprocess from typing import List, Dict from xml.etree import ElementTree class OutputParser: def __init__(self, xml: str): self.xml = xml def get_addresses(self) -> List[Dict[str, str]]: """ Several things need to happen for an address to be included: 1. Host is up 2. Port is TCP 22 3. Port status is open Otherwise the iterator will not be filled :return: List of dictionaries It is possible to have multiple PTR records assigned to different IP addresses [josevnz@dmaf5 EnableSysadmin]$ nslookup dmaf5.home Server: 127.0.0.53 Address: 127.0.0.53#53 Non-authoritative answer: Name: dmaf5.home Address: 192.168.1.26 Name: dmaf5.home Address: 192.168.1.25 Name: dmaf5.home Address: fd22:4e39:e630:1:1937:89d4:5cbc:7a8d Name: dmaf5.home Address: fd22:4e39:e630:1:e711:3539:b731:10dd """ addresses = [] root = ElementTree.fromstring(self.xml) for host in root.findall('host'): name = None for hostnames in host.findall('hostnames'): for hostname in hostnames: name = hostname.attrib['name'] break if not name: continue is_up = True for status in host.findall('status'): if status.attrib['state'] == 'down': is_up = False break if not is_up: continue port_22_open = False for ports in host.findall('ports'): for port in ports.findall('port'): if port.attrib['portid'] == '22': for state in port.findall('state'): if state.attrib['state'] == "open": # Up not the same as open, we want SSH access! port_22_open = True break if not port_22_open: continue address = None for address_data in host.findall('address'): address = address_data.attrib['addr'] break addresses.append({name: address}) return addresses class NmapRunner: def __init__(self, hosts: str): self.nmap_report_file = None found_nmap = shutil.which('nmap', mode=os.F_OK | os.X_OK) if not found_nmap: raise ValueError(f"Nmap is missing!") self.nmap = found_nmap self.hosts = hosts def __iter__(self): command = [self.nmap] command.extend(__NMAP__FLAGS__) command.append(self.hosts) completed = subprocess.run( command, capture_output=True, shell=False, check=True ) completed.check_returncode() out_par = OutputParser(completed.stdout.decode('utf-8')) self.addresses = out_par.get_addresses() return self def __next__(self): try: return self.addresses.pop() except IndexError: raise StopIteration """ Convert the args for proper usage on the Nmap CLI Also, do not use the -n flag. We need to resolve IP addresses to hostname, even if we sacrifice a little bit of speed """ NMAP_DEFAULT_FLAGS = { '-p22': 'Port 22 scanning', '-T4': 'Aggressive timing template', '-PE': 'Enable this echo request behavior. Good for internal networks', '--disable-arp-ping': 'No ARP or ND Ping', '--max-hostgroup 50': 'Hostgroup (batch of hosts scanned concurrently) size', '--min-parallelism 50': 'Number of probes that may be outstanding for a host group', '--osscan-limit': 'Limit OS detection to promising targets', '--max-os-tries 1': 'Maximum number of OS detection tries against a target', '-oX -': 'Send XML output to STDOUT, avoid creating a temp file' } __NMAP__FLAGS__ = shlex.split(" ".join(NMAP_DEFAULT_FLAGS.keys()))
from fractions import Fraction from functools import reduce from itertools import chain import logging from math import gcd, copysign, floor, log, log2 from operator import add, mul import random import sys import time from mpyc.runtime import mpc from mpyc.sectypes import SecureInteger from mpyc.finfields import GF from mpyc.mpctools import reduce as mpyc_reduce logger_sd = logging.getLogger("secure_division") logger_sd.setLevel(logging.INFO) logger_sm = logging.getLogger("secure_montgomery_exponentiation") logger_sm.setLevel(logging.INFO) logger_sx = logging.getLogger("secure_xgcd") logger_sx.setLevel(logging.INFO) logger_sb = logging.getLogger("secure_binary_xgcd") logger_sb.setLevel(logging.DEBUG) sign = lambda x: (1, -1)[x < 0] def extended_euclid_xgcd(a, b): """ Returns d, u, v = xgcd(a,b) Where d = ua + vb = gcd(a, b) """ s = 0 old_s = 1 t = 1 old_t = 0 r = b old_r = a while r != 0: quotient = old_r // r old_r, r = r, old_r - quotient * r old_s, s = s, old_s - quotient * s old_t, t = t, old_t - quotient * t d, u, v = old_r, old_s, old_t return d, u, v def binary_xgcd(x, y): """Binary extended GCD algorithm. Handbook of applied cryptography: [http://cacr.uwaterloo.ca/hac/about/chap14.pdf] by <NAME>, <NAME>, and <NAME>, CRC Press, 1996. Following Section 14.61 Algorithm Binary extended gcd algorithm. The numberof bits needed to represent either u or v decreases by (at least) 1, after at most two iterations of steps 4–7; thus,the algorithm takes at most 2 (floor(lg(x))+floor(lg(y))+2) such iterations. """ g = 1 while (x | y) & 1 == 0: g = 2 * g x >>= 1 y >>= 1 u = x v = y A, B = 1, 0 C, D = 0, 1 while u != 0: while (u & 1) == 0: u >>= 1 if (A | B) & 1 == 0: A >>= 1 B >>= 1 else: A = (A + y) >> 1 B = (B - x) >> 1 while (v & 1) == 0: v >>= 1 if (C | D) & 1 == 0: C >>= 1 D >>= 1 else: C = (C + y) >> 1 D = (D - x) >> 1 if u >= v: u = u - v A = A - C B = B - D else: v = v - u C = C - A D = D - B a = C b = D return g * v, a, b def shortgcd2(f, g): """Original shortgcd2 algorithm from [BY19]. Invariant: f should be odd. See paper: https://eprint.iacr.org/2019/266 """ delta = 1 assert f & 1 m = 4 + 3 * max(f.bit_length(), g.bit_length()) for _ in range(m): if delta > 0 and g & 1: delta, f, g = -delta, g, -f delta, g = 1 + delta, (g + (g & 1) * f) // 2 return abs(f) @mpc.coroutine async def sec_shortgcd2(f, g): """Secure version of shortgcd2 from [BY19]. Invariant: f should be odd at start. f remains odd throughout. Therefore, we only calculate lsb_g once per loop. See paper: https://eprint.iacr.org/2019/266 """ assert isinstance(f, SecureInteger) assert isinstance(g, SecureInteger) secint = type(g) await mpc.returnType(secint) delta = secint(1) assert await mpc.output(mpc.lsb(f)) m = 4 + 3 * max(f.bit_length, g.bit_length) for i in range(m): lsb_g = mpc.lsb(g) # Optimized version of: check = (delta > 0) * lsb_g check = (1 - mpc.sgn(delta - 1, LT=True, l=(i + 1).bit_length() + 1)) * lsb_g delta, f, g = mpc.if_else(check, [-delta, g, -f], [delta, f, g]) # truediv is less expensive and allowed here delta, g = 1 + delta, (g + lsb_g * f) / 2 return mpc.abs(f) @mpc.coroutine async def to_bits_approx(a, l=None): """Secure extraction of l (or all) least significant bits of a, correct up to and including the least significant 1 (if any). """ stype = type(a) # secint if l is None: l = stype.bit_length await mpc.returnType(stype, l) field = stype.field r_bits = await mpc.random_bits(field, l) r_modl = 0 for r_i in reversed(r_bits): r_modl <<= 1 r_modl += r_i.value k = mpc.options.sec_param r_divl = mpc._random(field, 1<<(stype.bit_length + k - l)).value a = await mpc.gather(a) c = await mpc.output(a + ((1<<stype.bit_length) + (r_divl << l) + r_modl)) c = c.value % (1<<l) return [1-r if (c >> i)&1 else r for i, r in enumerate(r_bits)] def secure_even_gcd(a, b): x = to_bits_approx(a) # low to high bits, correct up to and including the first 1 (if any) y = to_bits_approx(b) # low to high bits z = mpc.vector_sub(mpc.vector_add(x, y), mpc.schur_prod(x, y)) return secure_norm(z, msb=False, power=True)[0] # TODO: clean up secure_norm() @mpc.coroutine async def secure_gcd(a, b): """Secure safegcd, generalized to both odd and even inputs. safegcd2 as defined in BY19 requires odd a. This protocol extends the functionality to even a 'by first finding the number of shared powers of 2 in a and b and then reducing to the odd case.' ([BY19]) """ secint = type(a) await mpc.returnType(secint) pow_of_2 = secure_even_gcd(a, b) a = a / pow_of_2 b = b / pow_of_2 # If a is even, swap a and b a, b = mpc.if_else(mpc.lsb(a), [a, b], [b, a]) gcd_of_remainder = sec_shortgcd2(a, b) result = pow_of_2 * gcd_of_remainder return result def truncate(f, t): """Truncate input f to t coefficients. Source: Bernstein & Yang 2019, 'Fast constant-time gcd computation and modular inversion' Link to paper: https://eprint.iacr.org/2019/266 Link to original SAGE code: https://gcd.cr.yp.to/safegcd/11.sage """ if t == 0: return 0 twot = 1 << (t - 1) return ((f + twot) & (2 * twot - 1)) - twot def divsteps2(n, t, delta, f, g): """Simple fast constant-time “division steps”. 'When division steps are iterated a constant number of times, they reveal the gcd of the inputs, the modular reciprocal when the inputs are coprime, etc.' Source: Bernstein & Yang 2019, 'Fast constant-time gcd computation and modular inversion' Link to paper: https://eprint.iacr.org/2019/266 Link to original SAGE code: https://gcd.cr.yp.to/safegcd/11.sage """ assert t >= n and n >= 0 f, g = truncate(f, t), truncate(g, t) u, v, q, r = Fraction(1, 1), Fraction(0, 1), Fraction(0, 1), Fraction(1, 1) while n > 0: f = truncate(f, t) if delta > 0 and g & 1: delta, f, g, u, v, q, r = -delta, g, -f, q, r, -u, -v g0 = g & 1 delta, g, q, r = ( 1 + delta, (g + g0 * f) // 2, (q + g0 * u) / 2, (r + g0 * v) / 2, ) n, t = n - 1, t - 1 g = truncate(int(g), t) # print("f=", f) # print("g=", g) return delta, f, g, [[u, v], [q, r]] def jumpdivsteps2(n, t, delta, f, g): assert t >= n and n >= 0 if n <= 1: return divsteps2(n, t, delta, f, g) j = n // 2 delta, f1, g1, P1 = jumpdivsteps2(j, j, delta, f, g) f, g = P1[0][0] * f + P1[0][1] * g, P1[1][0] * f + P1[1][1] * g f, g = truncate(int(f), t - j), truncate(int(g), t - j) delta, f2, g2, P2 = jumpdivsteps2(n - j, n - j, delta, f, g) f, g = P2[0][0] * f + P2[0][1] * g, P2[1][0] * f + P2[1][1] * g f, g = truncate(int(f), t - n + 1), truncate(int(g), t - n) P3 = [ [sum(a * b for a, b in zip(P2_row, P1_col)) for P1_col in zip(*P1)] for P2_row in P2 ] return delta, f, g, P3 def iterations(d): """Bound on number of iterations for divstep. See Theorem 11.2 in BY19. Source: Bernstein & Yang 2019, 'Fast constant-time gcd computation and modular inversion' Link to paper: https://eprint.iacr.org/2019/266 Link to original SAGE code: https://gcd.cr.yp.to/safegcd/11.sage """ return (49*d + 80) // 17 if d < 46 else (49*d + 57) // 17 def gcd2(f, g, jumpdivsteps=False): """Compute gcd in constant time for odd f. Source: Bernstein & Yang 2019, 'Fast constant-time gcd computation and modular inversion' Link to paper: https://eprint.iacr.org/2019/266 Link to original SAGE code: https://gcd.cr.yp.to/safegcd/11.sage """ assert f & 1 d = max(f.bit_length(), g.bit_length()) m = iterations(d) if jumpdivsteps: delta, fm, gm, P = jumpdivsteps2(m, m + d, 1, f, g) else: delta, fm, gm, P = divsteps2(m, m + d, 1, f, g) assert gm == 0 return abs(fm) def recip2(f, g, jumpdivsteps=False): """Computes reciprocal of g modulo f. Requires f, g coprime; does not notice if not the case (see p. 26 of BY) """ assert f & 1 d = max(f.bit_length(), g.bit_length()) m = iterations(d) precomp = ( ((f + 1) // 2) ** (m - 1) ) % f # // is allowed because f is odd (invariant) if jumpdivsteps: delta, fm, gm, P = jumpdivsteps2(m, m + 1, 1, f, g) else: delta, fm, gm, P = divsteps2(m, m + 1, 1, f, g) assert gm == 0 p = P[0][1] * (2 ** (m - 1)) V = int(copysign(1, fm)) * int(p) result = V * precomp return result % f def extended_safegcd(f, g): v = gcd2(f, g) f_prime = f // v g_prime = g // v r = recip2(f // v, g // v) # recip2 computes reciprocal of g mod f. v2 = gcd2(f_prime, g_prime) assert v2 == 1 b = r % (f // v) a = (1 - (b * g_prime)) // (f_prime) assert a * f + b * g == v return v, a, b def general_extended_safegcd(f, g): """Expand extended_safegcd for even f. """ # Divide by common powers of 2. pow_of_2 = 1 while (f | g) & 1 == 0: pow_of_2 = 2 * pow_of_2 f >>= 1 g >>= 1 # If f is even, swap f and g swapped = 0 if f & 1 == 0: swapped = 1 f, g = g, f # Run extended gcd for odd f v, a, b = extended_safegcd(f, g) # Swap f and g back if needed if swapped: a, b = b, a f, g = g, f return v * pow_of_2, a, b def divsteps22(a, b, l=None): """New divsteps without 2-adic computation.""" delta, f, v, g, r = 1, a, 0, b, 1 for i in range(iterations(l)): delta_gt0 = delta > 0 g_0 = g%2 if delta_gt0 * g_0: delta, f, v, g, r = -delta, g, r, -f, -v if g_0: g, r = g + f, r + v if r % 2: r = r - a delta, g, r = delta+1, g//2, r//2 s = sign(f) f, v = s*f, s*v u = (f - v * b) // a return f, u, v def even_gcd(x, y): g = 1 while (x | y) & 1 == 0: g = 2 * g x >>= 1 y >>= 1 return g def safe_xgcd(a, b, l=None): """Secure extended GCD based on BY. Bit length l for a and b, if known. Generalized to both odd and even inputs. """ pow_of_2 = even_gcd(a, b) a, b = a//pow_of_2, b//pow_of_2 swap = 1 - a%2 # If a is even, swap a and b if swap: a, b = b, a if not l: l = int(log2(max(abs(a), abs(b))))+1 g, u, v= divsteps22(a, b, l) if swap: u, v = v, u g = f * pow_of_2 return g, u, v def secure_divsteps2(n, f, g): secint = type(g) delta = secint(1) u, v, q, r = secint(1), secint(0), secint(0), secint(1) for i in range(n): lsb_g = mpc.lsb(g) delta_gt0_g_odd = ( 1 - mpc.sgn(delta - 1, LT=True, l=(i + 1).bit_length() + 1) ) * lsb_g delta, f, g, u, v, q, r = mpc.if_else( delta_gt0_g_odd, [-delta, g, -f, q, r, -u, -v], [delta, f, g, u, v, q, r] ) delta, g, q, r = ( 1 + delta, (g + lsb_g * f) / 2, (q + lsb_g * u) / 2, (r + lsb_g * v) / 2, ) return f, g, [[u, v], [q, r]] # def secure_jumpdivsteps2(n, t, delta, f, g): # assert t >= n and n >= 0 # if n <= 1: # return secure_divsteps2(n, t, delta, f, g) # j = n // 2 # delta, f1, g1, P1 = secure_jumpdivsteps2(j, j, delta, f, g) # f, g = P1[0][0] * f + P1[0][1] * g, P1[1][0] * f + P1[1][1] * g # # f, g = truncate(int(f), t-j), truncate(int(g), t-j) # delta, f2, g2, P2 = secure_jumpdivsteps2(n - j, n - j, delta, f, g) # f, g = P2[0][0] * f + P2[0][1] * g, P2[1][0] * f + P2[1][1] * g # # f, g = truncate(int(f), t-n+1), truncate(int(g), t-n) # P3 = [ # [sum(a * b for a, b in zip(P2_row, P1_col)) for P1_col in zip(*P1)] # for P2_row in P2 # ] # return delta, f, g, P3 async def secure_recip2(f, g, bit_length=None): """Computes reciprocal of g modulo f for secret f and g. Requires f, g coprime; does not notice if not the case (see p. 26 of BY) """ # assert f & 1 secint = type(g) if not bit_length: d = max(f.bit_length, g.bit_length) else: d = bit_length m = iterations(d) precomp = (1 - hensel_inverse_mod2k(f, m-1) * f) / 2**(m-1) fm, gm, P = secure_divsteps2(m, f, g) # assert gm == 0 V = fm * P[0][1] * (2 ** (m - 1)) # NB: fm in {1, -1} _, recip = secure_division(V * precomp, f) # reduction mod f; return recip def secure_gcd2(f, g, bit_length=None): """Invariant: f is odd. d, m are public values. """ secint = type(g) if not bit_length: d = max(f.bit_length, g.bit_length) else: d = bit_length m = iterations(d) fm, gm, P = secure_divsteps2(m, f, g) return mpc.abs(fm) def secure_divsteps22(a, b, l=None): """New divsteps without 2-adic computation.""" secint = type(a) l = l or secint.bit_length delta, f, v, g, r = secint(1), a, secint(0), b, secint(1) for i in range(iterations(l)): delta_gt0 = 1 - mpc.sgn(delta-1, LT=True, l=(i+1).bit_length()+1) g_0 = g%2 delta, f, v, g, r = mpc.if_else(delta_gt0 * g_0, [-delta, g, r, -f, -v], [delta, f, v, g, r]) g, r = mpc.if_else(g_0, [g + f, r + v], [g, r]) r = mpc.if_else(r%2, r - a, r) delta, g, r = delta+1, g/2, r/2 s = 1 - 2*(f < 0) # sign of f f, v = mpc.scalar_mul(s, [f, v]) u = (f - v * b) / a return f, u, v def secure_xgcd(a, b, l=None): """Secure extended GCD based on BY. Bit length l for a and b, if known. Generalized to both odd and even inputs. safegcd2 as defined in BY19 requires odd a. This protocol extends the functionality to even a 'by first finding the common factors of 2 in a and b and then reducing to the odd case.' ([BY19]) """ # Divide by common powers of 2. pow_of_2 = secure_even_gcd(a, b) a, b = mpc.scalar_mul(1/pow_of_2, [a, b]) swap = 1 - a%2 # If a is even, swap a and b a, b = mpc.if_else(swap, [b, a], [a, b]) f, u, v = secure_divsteps22(a, b, l) u, v = mpc.if_else(swap, [v, u], [u, v]) return f * pow_of_2, u, v def flip(x): return 1 - x def secure_norm(x, msb=True, power=True): """Determine most or least significant bit. Logarithmic round complexity and low computational overhead. Args: x (list): List of secure integers in {0, 1} (i.e. bits). msb (boolean): Flag to set most or least significant bit mode. power (boolean): Flag to return 2^pos or pos, where pos is the position of msb/lsb. """ if msb: x = list(reversed(x)) if power: nz, i = mpc.find(x, 1, e=None, cs_f=lambda b, i: (b+1) << i) else: nz, i = mpc.find(x, 1, e=None, cs_f=lambda b, i: b + i) if msb: if power: i = (1 << len(x)) / i # proper division else: i = len(x) - i return i, 1 - nz @mpc.coroutine async def secure_binary_xgcd(x, y, progress_bar=False, allow_negative=True, bit_length = None): """Secure binary extended GCD algorithm. """ secint = type(x) await mpc.returnType(secint, 3) # Securely calculate bit_length if not provided. if not bit_length: x_bl = secure_norm(mpc.to_bits(x), msb=True, power=False)[0] y_bl = secure_norm(mpc.to_bits(y), msb=True, power=False)[0] x_bl = int(await mpc.output(x_bl)) + 1 y_bl = int(await mpc.output(y_bl)) + 1 print("x_bl=", x_bl) print("y_bl=", y_bl) bit_length = max(x_bl, y_bl) logger_sx.debug( f"In secure_binary_xgcd, bit_length securely calculated (at performance penalty) = {bit_length}" ) l = bit_length """The numberof bits needed to represent either u or v decreases by (at least) 1, after at most two iterations of steps 4–7; thus,the algorithm takes at most 2 (lg(x)+lg(y)+2) such iterations. (See Handbook, p. 610) """ # m = 2 * (x.bit_length + y.bit_length) m = 2*2*bit_length if progress_bar: toolbar_width = m sys.stdout.write("Constructing circuit: [%s]" % (" " * toolbar_width)) sys.stdout.flush() sys.stdout.write( "\b" * (toolbar_width + 1) ) # return to start of line, after '[' if allow_negative: x_nonneg = x >= 0 y_nonneg = y >= 0 x = mpc.if_else(x_nonneg, x, -x) y = mpc.if_else(y_nonneg, y, -y) g = secure_even_gcd(x, y) x = x / g y = y / g u = x v = y A, B = secint(1), secint(0) C, D = secint(0), secint(1) # Page 610: "algorithm takes at most 2 (floor(lg(x))+floor(lg(y))+2) such iterations" for i in range(m): not_done = flip(mpc.is_zero(u)) for i in range(l): not_done *= flip(mpc.lsb(u)) # TODO: avoid separate calls AB_both_even = flip(mpc.lsb(A)) * flip(mpc.lsb(B)) u, A, B = mpc.if_else( not_done, [u / 2] + mpc.if_else(AB_both_even, [A / 2, B / 2], [(A + y) / 2, (B - x) / 2]), [u, A, B], ) # TODO: avoid unnecessary execution of both loops by obliously swapping inputs not_done = secint(1) for i in range(l): not_done *= flip(mpc.lsb(v)) CD_both_even = flip(mpc.lsb(C)) * flip(mpc.lsb(D)) v, C, D = mpc.if_else( not_done, [v / 2] + mpc.if_else(CD_both_even, [C / 2, D / 2], [(C + y) / 2, (D - x) / 2]), [v, C, D], ) # TODO: consider using bit representation and sgn? u, A, B, v, C, D = mpc.if_else( u >= v, [u - v, A - C, B - D, v, C, D], [u, A, B, v - u, C - A, D - B] ) if progress_bar: sys.stdout.write("-") sys.stdout.flush() a = C b = D if progress_bar: sys.stdout.write("]\n") # this ends the progress bar if allow_negative: a = mpc.if_else(x_nonneg, a, -a) b = mpc.if_else(y_nonneg, b, -b) return g * v, a, b def pow_list(a, x, n): """Return [a,ax, ax^2, ..., ax^(n-1)]. Runs in O(log n) rounds using minimal number of n-1 secure multiplications. NB: equivalent to list(mpyc.mpctools.accumulate([x] * (n-1), f=operator.mul, iv=a)), which also runs in O(log n) rounds but using O(n log n) secure multiplications. """ if n == 1: powers = [a] elif n == 2: powers = [a, a * x] else: even_powers = pow_list(a, x * x, (n + 1) // 2) if n % 2: d = even_powers.pop() odd_powers = mpc.scalar_mul(x, even_powers) powers = [t for _ in zip(even_powers, odd_powers) for t in _] if n % 2: powers.append(d) return powers @mpc.coroutine async def prefix_mul(x): """WORK IN PROGRESS: Prefix multiplication for vector x of sectypes. Follows Protocol 4.17 from De Hoogh's PhD thesis, "Design of large scale applications of secure multiparty computation", TU Eindhoven, 2012. See: https://pure.tue.nl/ws/files/3430368/735328.pdf """ sectype = type(x[0]) k = len(x) await mpc.returnType(sectype, k) u = [None] * k r = [None] * k s = [None] * k # TODO: parallellize # generate all random at once and check if the vector for i in range(k): u[i] = 0 while u[i] == 0: r[i] = mpc._random(sectype) s[i] = mpc._random(sectype) u[i] = await mpc.output( r[i] * s[i] ) # TODO: use Schur product to parallellize; avoid Schur; no resharing v = [None] * k for i in range(1, k): v[i] = r[i] * s[i - 1] # TODO: use Schur product to parallellize w = [None] * k w[0] = r[0] for i in range(1, k): w[i] = v[i] * ( 1 / u[i - 1] ) # TODO: save 1/u for reuse below. # TODO: use Schur product to parallellize m = [None] * k # TODO: parallellize for i in range(k): m[i] = await mpc.output(x[i] * w[i]) # TODO: use Schur product to parallellize y = [None] * k y[0] = x[0] for i in range(1, k): y[i] = s[i] * (1 / u[i]) * reduce(mul, m[0 : i + 1]) # note: typo in DH12. return y @mpc.coroutine async def secure_poly_A(omega, x): sectype = type(x) await mpc.returnType(sectype) p = prefix_mul([x] * omega) return sum(p) + 1 @mpc.coroutine async def prefix_or(b): """Prefix or for vector b of secure field/int elements in {0,1}. Reversed to align it with definition of prefix-OR of DNT12. Follows Protocol 4.18 from De Hoogh's PhD thesis, "Design of large scale applications of secure multiparty computation", TU Eindhoven, 2012. See: https://pure.tue.nl/ws/files/3430368/735328.pdf """ sectype = type(b[0]) k = len(b) await mpc.returnType(sectype, k) b = b[::-1] z = prefix_mul([b_i + 1 for b_i in b]) x = [None] * k x[0] = b[0] # note: typo in DH12. for i in range(1, k): x[i] = 1 - mpc.lsb(z[i]) x = x[::-1] return x @mpc.coroutine async def secure_2_pow_bit_length(a): sectype = type(a) await mpc.returnType(sectype) a_bits = mpc.to_bits(a) y = prefix_or(a_bits) return 1 + sum([y_i * 2 ** i for i, y_i in enumerate(y)]) @mpc.coroutine async def secure_bit_length(a): sectype = type(a) await mpc.returnType(sectype) a_bits = mpc.to_bits(a) y = prefix_or(a_bits) return sum(y) # @mpc.coroutine # async def secure_division(n, d, l=None, l_d=None): # """Divides secint n by secint d. # Based on <NAME> 2012: "On Secure Two-Party Integer Division" # See latest version: https://eprint.iacr.org/2012/164/20151016:230655 # Args: # n (mpc.SecInt): numerator of the secure division. # d (mpc.SecInt): denominator of the secure division. # l (int): max bitlength of n and d. (optional, recommended for security) # l_d (int): bitlength of denominator. (optional, speed up, less secure) # Use sub-protocols with logarithmic round complexity and low computational # overhead (instead of constant round complexity, as in paper). # """ # sectype = type(n) # original_sectype = sectype # await mpc.returnType(sectype, 2) # # If l is not provided, compute bitlength securely based on input n. # if not l: # # TODO: re-use bits between max and to_bits # logger_sd.debug( # "In secure_division(): Parameter l (max bit_length) not provided, calulcating with secure_norm()." # ) # secure_max_n_d = mpc.max(n, d) # l = secure_norm(mpc.to_bits(secure_max_n_d), msb=True, power=False)[0] # l = int(await mpc.output(l)) + 1 # logger_sd.debug( # f"In secure_division(): Parameter l (max bit_length) calculated: l = {l}" # ) # # omega >= l_n - l_d sufficient to ensure approximation error of 2^k / d below 2^(k-l_d-ω). # omega = l # # Convert to larger sectype, required to fit terms of Taylor series and avoid wrap-around. # k = l ** 2 + l # safe_bit_length = ( # l ** 2 + 2 * l # ) # Corresponds to l**2 + l in paper, but intermediate step n * a_tilde requires l**2 + 2l. # conversion_required = False # original_sectype_bit_length = original_sectype.bit_length # if original_sectype_bit_length < safe_bit_length: # logger_sd.debug( # f"In secure_division(): Sectype bit_length {original_sectype_bit_length} insufficient (l={l}), convert to sectype of bit_length {safe_bit_length} (excl. headroom). Required for Taylor series." # ) # conversion_required = True # sectype = mpc.SecInt( # safe_bit_length # ) # kappa_s = 30 is implicit (via -K flag in MPYC) # d_original = d # n_original = n # n = mpc.convert(n, sectype) # d = mpc.convert(d, sectype) # # Calculate 2^l_d and its reciprocal. # if isinstance(l_d, int): # # If l_d is public, compute reciprocal directly. # two_to_ld = 2 ** l_d # two_to_ld_inv = type(n).field(two_to_ld).reciprocal() # else: # # If l_d is secret, follow paper. (log complexity) # two_to_ld = secure_norm(mpc.to_bits(d), msb=True, power=True)[0] # two_to_ld_inv = mpc.reciprocal(two_to_ld) # # Calculate a_tilde, the estimate of floor((2^k)/d). (log complexity) # p = (two_to_ld - d) * two_to_ld_inv # a_tilde_list = pow_list((2 ** k) * two_to_ld_inv, p, omega + 1) # a_tilde = mpyc_reduce(add, a_tilde_list, 0) # # Calculate q_tilde, the estimate of floor(n/d) # q_hat = n * a_tilde # q_tilde = mpc.trunc(q_hat, k) # TODO: mpc.trunc is probabilistic. Consider performing a full bit-decomposition trunc. # # Convert back to original (smaller) sectype. # if conversion_required: # q_tilde = mpc.convert(q_tilde, original_sectype) # d = d_original # n = n_original # # Calculate remainder and correct q_tilde for approximation error. # r = n - d * q_tilde # epsilon_plus = mpc.ge(r + d, 2 * d) # epsilon_minus = mpc.ge(d - 1, r + d) # q = q_tilde + epsilon_plus - epsilon_minus # # Re-calculate remainder with correct q. (Figure 1 in DNT12 skips this step.) # r = n - d * q # return q, r @mpc.coroutine async def secure_division(a, b, l=None, l_d=None): """Integer division divmod(a, b) via NR. Ignores parameters l, l_d.""" secint = type(a) await mpc.returnType(secint, 2) # assert await mpc.output(b>0) secfxp = mpc.SecFxp(2*secint.bit_length+2) a1, b1 = mpc.convert([a, b], secfxp) q = a1 / b1 q = mpc.convert(q, secint) r = a - b * q q, r = mpc.if_else(r < 0, [q - 1, r + b], [q, r]) # correction using one < # q, r = mpc.if_else(r >= b, [q + 1, r - b], [q, r]) # correction using one < # assert await mpc.output(a == b * q + r), await mpc.output([q, r, a, b]) # assert await mpc.output(0 <= r), await mpc.output([q, r, a, b]) # assert await mpc.output(r < b), await mpc.output([q, r, a, b]) return q, r def hensel_inverse_mod2k(a, k): """Apply Hensel lifting to calculate modular inverse of a mod 2^k, k>=1. Solves f(x) = ax - 1 = 0 modulo 2^k iteratively, using a_k+1 = a_k - f(a_k) mod 2^k+1. """ # TODO: fix doc string / function name to cover use of Newton-Raphson # Newton-Raphson with quadratic convergence: y = type(a)(1) # n = 2 # for i in range(k.bit_length()-1): # # NR iteration y = (y * (2 - a*y)) % 2**(2**(i+1)) to double number of bits of y # y += (((y * (1 - a*y)) / n) % n) * n # n **= 2 # if n < 2**k: # y += (((y * (1 - a*y)) / n) % (2**k // n)) * n # return y # use Hensel lifting with linear convergence for i in range(1, k): # compute y one bit at a time, using one secure multiplication and one secure lsb y += (((a * y - 1)/(1<<i)) % 2) * (1<<i) # NB: a*y - 1 equal to 0 or 2**i return y # b = a + 1 # y = 1 # for i in range(2, k + 1): # y = (b * y - 1) % 2 ** i # NB: y + f(y) = y + ay - 1 = by - 1 # return y def mont(a, b, r, n, n_prime): """Montgomery multiplication. Compute Montgomery reduction of the product of two integers. Taken from: https://www.microsoft.com/en-us/research/wp-content/uploads/1996/01/j37acmon.pdf Args: a, b (int): Integers to multiply and apply Montgomery reduction to. r (int): Auxiliary modulus. n (int): original modulus. n_prime (int): -1 * modular inverse of n mod r. """ t = a * b t_np_mod_r = (t * n_prime) % r u = (t + (t_np_mod_r) * n) // r if u >= n: return u - n else: return u def montgomery_exponentiation(a, e, r, n, n_prime): """Modular exponentiation, a^e mod n, using Montgomery exponentiation. Algorithm follows Algorithm 14.94 from Handbook of Applied Cryptography, p. 620. """ a_tilde = mont(a, r ** 2 % n, r, n, n_prime) result = r % n if e == 0: return result for i in range(e.bit_length() - 1, -1, -1): result = mont(result, result, r, n, n_prime) if (e >> i) & 1: result = mont(result, a_tilde, r, n, n_prime) result = mont(result, 1, r, n, n_prime) return result async def secure_mont(a, b, r, n, n_prime): """Montgomery multiplication. See mont() docstring for details. """ assert type(a).bit_length >= 3 * (r.bit_length()) # Ensure a*b*n_prime fits t = a * b t_np_mod_r = t*n_prime % r u = (t + t_np_mod_r * n) / r return mpc.if_else(u >= n, u - n, u) async def secure_montgomery_exponentiation(a, e, r, n, n_prime): """Modular exponentiation, a^e mod n, using Montgomery exponentiation. Algorithm follows Algorithm 14.94 from Handbook of Applied Cryptography, p. 620. """ secint = type(a) _, r2modn = secure_division(secint(r ** 2), n) _, result = secure_division(secint(r), n) # Convert to sectype with larger bit-length if needed. conversion_required = False safe_bit_length = 3 * (r.bit_length()) # for a*b*n_prime in secure_mont() if secint.bit_length < safe_bit_length: logger_sd.debug( f"In secure_montgomery_exponentiation(), original sectype bit_length {secint.bit_length} insufficient, convert to sectype of bit_length {safe_bit_length} (excl. headroom)." ) conversion_required = True original_secint = type(a) secint = mpc.SecInt(safe_bit_length) r2modn = mpc.convert(r2modn, secint) result = mpc.convert(result, secint) a = mpc.convert(a, secint) n = mpc.convert(n, secint) n_prime = mpc.convert(n_prime, secint) a_tilde = await secure_mont(a, r2modn, r, n, n_prime) if await mpc.output(a == (n+1)/2): """The following shows that we can remove the computation for r2modn entirely. This saves the probably most expensive secure integet division to compute r**2 / n. Only the division to compute r / n to get result remains. This can be done when a = 1/2 mod n, which is the case of interest when we compute the secure reciprocal modulo n, see secure_recip2(). We compute a_tilde = (a * r) mod n = (r / 2) mod n from result using only one lsb (%2). """ new_a_tilde = (result + (result%2) * n)/2 assert await mpc.output(a_tilde == new_a_tilde) # confirms new approach if e == 0: return result for i in range(e.bit_length() - 1, -1, -1): result = await secure_mont(result, result, r, n, n_prime) if (e >> i) & 1: result = await secure_mont(result, a_tilde, r, n, n_prime) result = await secure_mont(result, 1, r, n, n_prime) # Convert back to original (smaller) sectype. if conversion_required: result = mpc.convert(result, original_secint) return result async def secure_modular_exp(a, e, modulus, bit_length): """Wraps secure Montgomery exponentiation: a^e mod modulus. Calculated r and n_prime before calling Montgomery exponentiation. """ r = 2 ** bit_length n_prime = -hensel_inverse_mod2k(modulus, bit_length) return await secure_montgomery_exponentiation(a, e, r, modulus, n_prime) def secgcdKnuthAlgB(u, v): m = type(u).bit_length g = secure_even_gcd(u, v) u /= g v /= g t = mpc.if_else(mpc.lsb(u), -v, u) for _ in range(m): x = mpc.to_bits(t) s = x[-1] # sign of t gg, nz = secure_norm(x, msb=False, power=True) t /= gg t, u, v = mpc.if_else( s, [t + u, u, -t], # t < 0 mpc.if_else(nz, [t - v, t, v], [t, u, v]), # t > 0 ) # t = 0 return g * v # TODO's # 2: Remove unnecessary "async" and "await" statements (secure_recip2, _divsteps2, _gcd2, etc.)
<reponame>MiWeiss/probability # Copyright 2021 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for MarkovChain.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf from tensorflow_probability.python import bijectors as tfb from tensorflow_probability.python import distributions as tfd from tensorflow_probability.python.distributions import log_prob_ratio from tensorflow_probability.python.internal import prefer_static as ps from tensorflow_probability.python.internal import samplers from tensorflow_probability.python.internal import test_util @test_util.test_graph_and_eager_modes class MarkovChainTest(test_util.TestCase): def test_error_when_transition_modifies_batch_shape(self): loses_batch_shape = tfd.MarkovChain( initial_state_prior=tfd.Normal(loc=0., scale=[1., 1.]), transition_fn=lambda _, x: tfd.Independent( # pylint: disable=g-long-lambda tfd.Normal(loc=0., scale=tf.ones_like(x)), reinterpreted_batch_ndims=1), num_steps=5) x = self.evaluate(loses_batch_shape.sample([2], seed=test_util.test_seed())) with self.assertRaisesRegexp(ValueError, 'batch shape is incorrect'): loses_batch_shape.log_prob(x) gains_batch_shape = tfd.MarkovChain( initial_state_prior=tfd.Independent( tfd.Normal(loc=0., scale=[1., 1.]), reinterpreted_batch_ndims=1), transition_fn=lambda _, x: tfd.Normal(loc=0., scale=tf.ones_like(x)), num_steps=5) x = self.evaluate(gains_batch_shape.sample([2], seed=test_util.test_seed())) with self.assertRaisesRegexp(ValueError, 'batch shape is incorrect'): gains_batch_shape.log_prob(x) def test_log_prob_matches_linear_gaussian_ssm(self): dim = 2 batch_shape = [3, 1] seed, *model_seeds = samplers.split_seed(test_util.test_seed(), n=6) # Sample a random linear Gaussian process. prior_loc = self.evaluate( tfd.Normal(0., 1.).sample(batch_shape + [dim], seed=model_seeds[0])) prior_scale = self.evaluate( tfd.InverseGamma(1., 1.).sample(batch_shape + [dim], seed=model_seeds[1])) transition_matrix = self.evaluate( tfd.Normal(0., 1.).sample([dim, dim], seed=model_seeds[2])) transition_bias = self.evaluate( tfd.Normal(0., 1.).sample(batch_shape + [dim], seed=model_seeds[3])) transition_scale_tril = self.evaluate( tf.linalg.cholesky( tfd.WishartTriL(df=dim, scale_tril=tf.eye(dim)).sample( seed=model_seeds[4]))) initial_state_prior = tfd.MultivariateNormalDiag( loc=prior_loc, scale_diag=prior_scale, name='initial_state_prior') lgssm = tfd.LinearGaussianStateSpaceModel( num_timesteps=7, transition_matrix=transition_matrix, transition_noise=tfd.MultivariateNormalTriL( loc=transition_bias, scale_tril=transition_scale_tril), # Trivial observation model to pass through the latent state. observation_matrix=tf.eye(dim), observation_noise=tfd.MultivariateNormalDiag(loc=tf.zeros(dim), scale_diag=tf.zeros(dim)), initial_state_prior=initial_state_prior) markov_chain = tfd.MarkovChain( initial_state_prior=initial_state_prior, transition_fn=lambda _, x: tfd.MultivariateNormalTriL( # pylint: disable=g-long-lambda loc=tf.linalg.matvec(transition_matrix, x) + transition_bias, scale_tril=transition_scale_tril), num_steps=7) x = markov_chain.sample(5, seed=seed) self.assertAllClose(lgssm.log_prob(x), markov_chain.log_prob(x)) @test_util.numpy_disable_test_missing_functionality( 'JointDistributionNamedAutoBatched') def test_docstring_example_autoregressive_process(self): def transition_fn(_, previous_state): return tfd.JointDistributionNamedAutoBatched( # The previous state may include batch dimensions. Since the log scale # is a scalar quantity, its shape is the batch shape. batch_ndims=ps.rank(previous_state['log_scale']), model={ # The autoregressive coefficients and the `log_scale` each follow # an independent slow-moving random walk. 'coefs': tfd.Normal(loc=previous_state['coefs'], scale=0.01), 'log_scale': tfd.Normal(loc=previous_state['log_scale'], scale=0.01), # The level is a linear combination of the previous *two* levels, # with additional noise of scale `exp(log_scale)`. 'level': lambda coefs, log_scale: tfd.Normal( # pylint: disable=g-long-lambda loc=(coefs[..., 0] * previous_state['level'] + coefs[..., 1] * previous_state['previous_level']), scale=tf.exp(log_scale)), # Store the previous level to access at the next step. 'previous_level': tfd.Deterministic(previous_state['level'])}) process = tfd.MarkovChain( # For simplicity, define the prior as a 'transition' from fixed values. initial_state_prior=transition_fn( 0, previous_state={ 'coefs': [0.7, -0.2], 'log_scale': -1., 'level': 0., 'previous_level': 0.}), transition_fn=transition_fn, num_steps=100) self.assertAllEqualNested(process.event_shape, {'coefs': [100, 2], 'log_scale': [100], 'level': [100], 'previous_level': [100]}) self.assertAllEqual(process.batch_shape, []) x = process.sample(5, seed=test_util.test_seed()) self.assertAllEqual(x['coefs'].shape, [5, 100, 2]) self.assertAllEqual(x['log_scale'].shape, [5, 100]) self.assertAllEqual(x['level'].shape, [5, 100]) self.assertAllEqual(x['previous_level'].shape, [5, 100]) lp = process.log_prob(x) self.assertAllEqual(lp.shape, [5]) x2, lp2 = process.experimental_sample_and_log_prob( 2, seed=test_util.test_seed()) self.assertAllClose(lp2, process.log_prob(x2)) @parameterized.named_parameters( ('float32_dynamic', tf.float32, True), ('float64_static', tf.float64, False)) def test_docstring_example_batch_gaussian_walk(self, float_dtype, use_dynamic_shapes): if tf.executing_eagerly() and use_dynamic_shapes: self.skipTest('No dynamic shapes in eager mode.') def _as_tensor(x, dtype=None): x = ps.cast(x, dtype=dtype if dtype else float_dtype) if use_dynamic_shapes: x = tf1.placeholder_with_default(x, shape=None) return x scales = _as_tensor([0.5, 0.3, 0.2, 0.2, 0.3, 0.2, 0.7]) batch_gaussian_walk = tfd.MarkovChain( # The prior distribution determines the batch shape for the chain. # Transitions must respect this batch shape. initial_state_prior=tfd.Normal(loc=_as_tensor([-10., 0., 10.]), scale=_as_tensor([1., 1., 1.])), transition_fn=lambda t, x: tfd.Normal( # pylint: disable=g-long-lambda loc=x, # The `num_steps` dimension will always be leftmost in `x`, so we # pad the scale to the same rank as `x` so that the shapes line up. scale=tf.reshape( tf.gather(scales, t), ps.concat([[-1], ps.ones(ps.rank(x) - 1, dtype=tf.int32)], axis=0))), # Limit to eight steps since we only specified scales for seven # transitions. num_steps=8) self.assertAllEqual(batch_gaussian_walk.event_shape_tensor(), [8]) self.assertAllEqual(batch_gaussian_walk.batch_shape_tensor(), [3]) x = batch_gaussian_walk.sample(5, seed=test_util.test_seed()) self.assertAllEqual(ps.shape(x), [5, 3, 8]) lp = batch_gaussian_walk.log_prob(x) self.assertAllEqual(ps.shape(lp), [5, 3]) x2, lp2 = batch_gaussian_walk.experimental_sample_and_log_prob( [2], seed=test_util.test_seed()) self.assertAllClose(lp2, batch_gaussian_walk.log_prob(x2)) def test_docstring_example_gaussian_walk(self): gaussian_walk = tfd.MarkovChain( initial_state_prior=tfd.Normal(loc=0., scale=1.), transition_fn=lambda _, x: tfd.Normal(loc=x, scale=1.), num_steps=100) self.assertAllEqual(gaussian_walk.event_shape, [100]) self.assertAllEqual(gaussian_walk.batch_shape, []) x = gaussian_walk.sample(5, seed=test_util.test_seed()) self.assertAllEqual(x.shape, [5, 100]) lp = gaussian_walk.log_prob(x) self.assertAllEqual(lp.shape, [5]) n = tfd.Normal(0., 1.) expected_lp = (n.log_prob(x[:, 0]) + tf.reduce_sum(n.log_prob(x[:, 1:] - x[:, :-1]), axis=-1)) self.assertAllClose(lp, expected_lp) x2, lp2 = gaussian_walk.experimental_sample_and_log_prob( [2], seed=test_util.test_seed()) self.assertAllClose(lp2, gaussian_walk.log_prob(x2)) def test_non_autobatched_joint_distribution(self): def transition_fn(_, previous_state): return tfd.JointDistributionNamed( { # The autoregressive coefficients and the `log_scale` each follow # an independent slow-moving random walk. 'coefs': tfd.Independent( tfd.Normal(loc=previous_state['coefs'], scale=0.01), reinterpreted_batch_ndims=1), 'log_scale': tfd.Normal(loc=previous_state['log_scale'], scale=0.01), # The level is a linear combination of the previous *two* levels, # with additional noise of scale `exp(log_scale)`. 'level': lambda coefs, log_scale: tfd.Normal( # pylint: disable=g-long-lambda loc=(coefs[..., 0] * previous_state['level'] + coefs[..., 1] * previous_state['previous_level']), scale=tf.exp(log_scale)), # Store the previous level to access at the next step. 'previous_level': tfd.Deterministic(previous_state['level'])}) process = tfd.MarkovChain( # For simplicity, define the prior as a 'transition' from fixed values. initial_state_prior=transition_fn( 0, previous_state={ 'coefs': [0.7, -0.2], 'log_scale': -1., 'level': 0., 'previous_level': 0.}), transition_fn=transition_fn, num_steps=100) self.assertAllEqualNested(process.event_shape, {'coefs': [100, 2], 'log_scale': [100], 'level': [100], 'previous_level': [100]}) self.assertAllEqual(process.batch_shape, {'coefs': [], 'log_scale': [], 'level': [], 'previous_level': []}) x = process.sample(5, seed=test_util.test_seed()) self.assertAllEqual(x['coefs'].shape, [5, 100, 2]) self.assertAllEqual(x['log_scale'].shape, [5, 100]) self.assertAllEqual(x['level'].shape, [5, 100]) self.assertAllEqual(x['previous_level'].shape, [5, 100]) lp = process.log_prob(x) self.assertAllEqual(lp.shape, [5]) x2, lp2 = process.experimental_sample_and_log_prob( 2, seed=test_util.test_seed()) self.assertAllClose(lp2, process.log_prob(x2)) def test_log_prob_ratio(self): p = tfd.MarkovChain( initial_state_prior=tfd.Normal(0., 1.), transition_fn=lambda _, x: tfd.Normal(x, tf.nn.softplus(x)), num_steps=10) q = tfd.MarkovChain( initial_state_prior=tfd.Normal(-10, 3.), transition_fn=lambda _, x: tfd.Normal(x, tf.abs(x)), num_steps=10) x = self.evaluate(p.sample(4, seed=test_util.test_seed())) y = self.evaluate(q.sample(4, seed=test_util.test_seed())) self.assertAllClose( p.log_prob(x) - q.log_prob(y), log_prob_ratio.log_prob_ratio(p, x, q, y), atol=1e-5) def test_unexpected_num_steps_raises(self): p = tfd.MarkovChain( initial_state_prior=tfd.Normal(0., 1.), transition_fn=lambda _, x: tfd.Normal(x, tf.nn.softplus(x)), num_steps=10, validate_args=True) with self.assertRaisesRegex( (ValueError, tf.errors.InvalidArgumentError), 'does not match the expected num_steps'): p.log_prob(tf.zeros([11])) @test_util.test_graph_and_eager_modes class MarkovChainBijectorTest(test_util.TestCase): # pylint: disable=g-long-lambda @parameterized.named_parameters( dict(testcase_name='deterministic_prior', prior_fn=lambda: tfd.Deterministic([-100., 0., 100.]), transition_fn=lambda _, x: tfd.Normal(loc=x, scale=1.)), dict(testcase_name='deterministic_transition', prior_fn=lambda: tfd.Normal(loc=[-100., 0., 100.], scale=1.), transition_fn=lambda _, x: tfd.Deterministic(x)), dict(testcase_name='fully_deterministic', prior_fn=lambda: tfd.Deterministic([-100., 0., 100.]), transition_fn=lambda _, x: tfd.Deterministic(x)), dict(testcase_name='mvn_diag', prior_fn=( lambda: tfd.MultivariateNormalDiag(loc=[[2.], [2.]], scale_diag=[1.])), transition_fn=lambda _, x: tfd.VectorDeterministic(x)), dict(testcase_name='docstring_dirichlet', prior_fn=lambda: tfd.JointDistributionNamedAutoBatched( {'probs': tfd.Dirichlet([1., 1.])}), transition_fn=lambda _, x: tfd.JointDistributionNamedAutoBatched( {'probs': tfd.MultivariateNormalDiag(loc=x['probs'], scale_diag=[0.1, 0.1])}, batch_ndims=ps.rank(x['probs']))), dict(testcase_name='uniform_step', prior_fn=lambda: tfd.Exponential(tf.ones([4, 1])), transition_fn=lambda _, x: tfd.Uniform(low=x, high=x + 1.)), dict(testcase_name='joint_distribution', prior_fn=lambda: tfd.JointDistributionNamedAutoBatched( batch_ndims=2, model={ 'a': tfd.Gamma(tf.zeros([5]), 1.), 'b': lambda a: ( tfb.Reshape( event_shape_in=[4, 3], event_shape_out=[2, 3, 2])( tfd.Independent( tfd.Normal( loc=tf.zeros([5, 4, 3]), scale=a[..., tf.newaxis, tf.newaxis]), reinterpreted_batch_ndims=2)))}), transition_fn=lambda _, x: tfd.JointDistributionNamedAutoBatched( batch_ndims=ps.rank_from_shape(x['a'].shape), model={'a': tfd.Normal(loc=x['a'], scale=1.), 'b': lambda a: tfd.Deterministic( x['b'] + a[..., tf.newaxis, tf.newaxis, tf.newaxis])}) ), dict(testcase_name='nested_chain', prior_fn=lambda: tfd.MarkovChain( initial_state_prior=tfb.Split(2)( tfd.MultivariateNormalDiag(0., [1., 2.])), transition_fn=lambda _, x: tfb.Split(2)( tfd.MultivariateNormalDiag(x[0], [1., 2.])), num_steps=6), transition_fn=( lambda _, x: tfd.JointDistributionSequentialAutoBatched( [ tfd.MultivariateNormalDiag(x[0], [1.]), tfd.MultivariateNormalDiag(x[1], [1.])], batch_ndims=ps.rank(x[0]))))) # pylint: enable=g-long-lambda def test_default_bijector(self, prior_fn, transition_fn): chain = tfd.MarkovChain(initial_state_prior=prior_fn(), transition_fn=transition_fn, num_steps=7) y = self.evaluate(chain.sample(seed=test_util.test_seed())) bijector = chain.experimental_default_event_space_bijector() self.assertAllEqual(chain.batch_shape_tensor(), bijector.experimental_batch_shape_tensor()) x = bijector.inverse(y) yy = bijector.forward( tf.nest.map_structure(tf.identity, x)) # Bypass bijector cache. self.assertAllCloseNested(y, yy) chain_event_ndims = tf.nest.map_structure( ps.rank_from_shape, chain.event_shape_tensor()) self.assertAllEqualNested(bijector.inverse_min_event_ndims, chain_event_ndims) ildj = bijector.inverse_log_det_jacobian( tf.nest.map_structure(tf.identity, y), # Bypass bijector cache. event_ndims=chain_event_ndims) if not bijector.is_constant_jacobian: self.assertAllEqual(ildj.shape, chain.batch_shape) fldj = bijector.forward_log_det_jacobian( tf.nest.map_structure(tf.identity, x), # Bypass bijector cache. event_ndims=bijector.inverse_event_ndims(chain_event_ndims)) self.assertAllClose(ildj, -fldj) # Verify that event shapes are passed through and flattened/unflattened # correctly. inverse_event_shapes = bijector.inverse_event_shape(chain.event_shape) x_event_shapes = tf.nest.map_structure( lambda t, nd: t.shape[ps.rank(t) - nd:], x, bijector.forward_min_event_ndims) self.assertAllEqualNested(inverse_event_shapes, x_event_shapes) forward_event_shapes = bijector.forward_event_shape(inverse_event_shapes) self.assertAllEqualNested(forward_event_shapes, chain.event_shape) # Verify that the outputs of other methods have the correct structure. inverse_event_shape_tensors = bijector.inverse_event_shape_tensor( chain.event_shape_tensor()) self.assertAllEqualNested(inverse_event_shape_tensors, x_event_shapes) forward_event_shape_tensors = bijector.forward_event_shape_tensor( inverse_event_shape_tensors) self.assertAllEqualNested(forward_event_shape_tensors, chain.event_shape_tensor()) if __name__ == '__main__': tf.test.main()
<reponame>zte-lhg/chromium_org #!/usr/bin/env python # -*- coding: utf-8 -*- # # hostsutil.py: Start a TUI session of `Hosts Setup Utility`. # # Copyleft (C) 2014 - huhamhire <<EMAIL>> # ===================================================================== # Licensed under the GNU General Public License, version 3. You should # have received a copy of the GNU General Public License along with # this program. If not, see <http://www.gnu.org/licenses/>. # ===================================================================== __author__ = "huhamhire <<EMAIL>>" import os from zipfile import BadZipfile from curses_d import CursesDaemon import sys sys.path.append("..") from util import CommonUtil, RetrieveData class HostsUtil(CursesDaemon): """ HostsUtil class in :mod:`tui` module is the main entrance to the Text-based User Interface (TUI) mode of `Hosts Setup Utility`. This class contains methods to start a TUI session of `Hosts Setup Utility`. .. note:: This class is subclass of :class:`~tui.curses_d.CursesDaemon` class. .. inheritance-diagram:: tui.hostsutil.HostsUtil :parts: 2 Typical usage to start a TUI session:: import tui util = tui.HostsUtil() util.start() :ivar str platform: Platform of current operating system. The value could be `Windows`, `Linux`, `Unix`, `OS X`, and of course `Unknown`. :ivar str hostname: The hostname of current operating system. .. note:: This attribute would only be used on linux. :ivar str hosts_path: The absolute path to the hosts file on current operating system. .. seealso:: :attr:`platform`, :attr:`hostname`, :attr:`hosts_path` in :class:`~tui.curses_d.CursesDaemon` class. :ivar str sys_eol: The End-Of-Line marker. This maker could typically be one of `CR`, `LF`, or `CRLF`. .. seealso:: :attr:`sys_eol` in :class:`~tui.curses_ui.CursesUI` class. """ platform = "" hostname = "" hosts_path = "" sys_eol = "" def __init__(self): """ Initialize a new TUI session. * Load server list from a configuration file under working directory. * Try to load the hosts data file under working directory if it exists. .. note:: IF hosts data file does not exists correctly in current working directory, a warning message box would popup. And operations to change the hosts file on current system could be done only until a new data file has been downloaded. .. seealso:: :meth:`~tui.curses_d.CursesDaemon.session_daemon` method in :class:`~tui.curses_d.CursesDaemon`. .. seealso:: :meth:`~gui.hostsutil.HostsUtil.init_main` in :class:`~gui.hostsutil.HostsUtil` class. """ super(HostsUtil, self).__init__() # Set mirrors self.settings[0][2] = CommonUtil.set_network("network.conf") # Read data file and set function list try: self.set_platform() RetrieveData.unpack() RetrieveData.connect_db() self.set_info() self.set_func_list() except IOError: self.messagebox("No data file found! Press F6 to get data file " "first.", 1) except BadZipfile: self.messagebox("Incorrect Data file! Press F6 to get a new data " "file first.", 1) def __del__(self): """ Reset the terminal and clear up the temporary data file while TUI session is finished. """ super(HostsUtil, self).__del__() try: RetrieveData.clear() except: pass def start(self): """ Start the TUI session. .. note:: This method is the trigger to start a TUI session of `Hosts Setup Utility`. """ while True: # Reload if self.session_daemon(): self.__del__() self.__init__() else: break def set_platform(self): """ Set the information about current operating system. """ system, hostname, path, encode, flag = CommonUtil.check_platform() color = "GREEN" if flag else "RED" self.platform = system self.statusinfo[1][1] = system self.hostname = hostname self.hosts_path = path self.statusinfo[1][2] = color if encode == "win_ansi": self.sys_eol = "\r\n" else: self.sys_eol = "\n" def set_func_list(self): """ Set the function selection list in TUI session. """ for ip in range(2): choice, defaults, slices = RetrieveData.get_choice(ip) if os.path.isfile(self.custom): choice.insert(0, [4, 1, 0, "customize"]) defaults[0x04] = [1] for i in range(len(slices)): slices[i] += 1 slices.insert(0, 0) self.choice[ip] = choice self.slices[ip] = slices funcs = [] for func in choice: if func[1] in defaults[func[0]]: funcs.append(1) else: funcs.append(0) self._funcs[ip] = funcs def set_info(self): """ Set the information of the current local data file. """ info = RetrieveData.get_info() build = info["Buildtime"] self.hostsinfo["Version"] = info["Version"] self.hostsinfo["Release"] = CommonUtil.timestamp_to_date(build) if __name__ == "__main__": main = HostsUtil() main.start()