input
stringlengths
2.65k
237k
output
stringclasses
1 value
<reponame>nrccua/aioradio """Generic functions related to working with files or the file system.""" # pylint: disable=broad-except # pylint: disable=consider-using-enumerate # pylint: disable=invalid-name # pylint: disable=logging-fstring-interpolation # pylint: disable=too-many-arguments # pylint: disable=too-many-boolean-expressions # pylint: disable=too-many-branches # pylint: disable=too-many-instance-attributes # pylint: disable=too-many-lines # pylint: disable=too-many-nested-blocks # pylint: disable=too-many-public-methods import asyncio import functools import json import logging import os import re import time import zipfile from asyncio import sleep from collections import defaultdict from dataclasses import dataclass from dataclasses import field as dc_field from datetime import datetime, timezone, tzinfo from pathlib import Path from types import coroutine from typing import Any, Dict, List import mandrill import numpy as np from smb.base import SharedFile from smb.smb_structs import OperationFailure from smb.SMBConnection import SMBConnection from aioradio.aws.secrets import get_secret from aioradio.psycopg2 import establish_psycopg2_connection from aioradio.pyodbc import establish_pyodbc_connection DIRECTORY = Path(__file__).parent.absolute() LOG = logging.getLogger('file_ingestion') @dataclass class EFIParse: """EnrollmentFileIngestion parse class.""" filename: str fice_enrolled_logic: set = dc_field(default_factory=set) entry_year_filter: dict = dc_field(default_factory=dict) def __post_init__(self): if not self.fice_enrolled_logic: self.fice_enrolled_logic = { "001100", "001397", "001507", "001526", "002120", "002122", "002180", "002760", "002778", "002795", "002907", "003301", "003450", "003505", "003535", "003688", "003709" } if not self.entry_year_filter: self.entry_year_filter = { "start": "2021", "end": "2025" } self.field_to_max_widths = { "StudentID": 50, "LastName": 64, "FirstName": 64, "Gender": 1, "GPA": 20, "Address1": 150, "Address2": 150, "City": 50, "StateCode": 50, "ZipCode": 20, "BirthDate": 10, "EntryTerm": 14, "EntryYear": 4, "HSGradYear": 4, "SrcCode": 256, "SrcDate": 10, "Inquired": 10, "Applied": 10, "Completed": 10, "Admitted": 10, "Confirmed": 10, "Enrolled": 10, "Canceled": 10, "Dropped": 10, "Graduated": 10, "AcademicProgram": 256, "StudentAthlete": 50, "CampusLocation": 50, "Email": 75 } self.gender_map = { "MALE/MAN": "M", "MALE": "M", "MAN": "M", "M": "M", "FEMALE/WOMAN": "F", "FEMALE": "F", "WOMAN": "F", "F": "F" } self.grades_map = { "A+": "4.0", "A": "4.0", "A-": "3.667", "B+": "3.333", "B": "3.0", "B-": "2.667", "C+": "2.333", "C": "2.0", "C-": "1.667", "D+": "1.333", "D": "1.0", "D-": "0.667", "F+": "0.333", "F": "0.0", "F-": "0.0" } self.date_formats = [ "%m/%d/%y", "%m/%d/%Y", "%-m/%-d/%Y", "%-m/%-d/%y", "%Y-%m-%d", "%Y%m%d", "%d-%b-%Y", "%m-%d-%y", "%m-%d-%Y", "%b %d, %Y", "%Y-%m-%dT%H:%M:%SZ", "%d-%b-%y", "%Y-%m-%dT%H:%M:%S", "%-d-%b-%y", "%-d-%b-%Y", "%Y/%m/%d" ] self.student_athlete_map = { "0": "N", "1": "Y", "CHRL": "Y", "NO": "N", "WLAX": "Y", "YES": "Y", "TRUE": "Y", "MWR": "Y", "MTR": "Y", "VB": "Y", "N": "N", "BASEB": "Y", "MSWIM": "Y", "WWR": "Y", "Y": "Y", "FALSE": "N", "FB": "Y" } self.season_year_map = { "FA19": "2019", "FA20": "2020", "FA21": "2021", "FA22": "2022", "FA23": "2023", "FA24": "2024", "FA25": "2025", "FA26": "2026", "FA27": "2027", "19FA": "2019", "20FA": "2020", "21FA": "2021", "22FA": "2022", "23FA": "2023", "24FA": "2024", "25FA": "2025", "26FA": "2026", "27FA": "2027" } self.seasons_map = { "SPRING": "SPRING", "SUMMER": "SUMMER", "FALL": "FALL", "WINTER": "WINTER", "FA": "FALL" } self.state_to_statecode = { "ALABAMA": "AL", "ALASKA": "AK", "AMERICAN SAMOA": "AS", "ARIZONA": "AZ", "ARKANSAS": "AR", "CALIFORNIA": "CA", "COLORADO": "CO", "CONNECTICUT": "CT", "DELAWARE": "DE", "DISTRICT OF COLUMBIA": "DC", "FEDERATED STATES OF MICRONESIA": "FM", "FLORIDA": "FL", "GEORGIA": "GA", "GUAM": "GU", "HAWAII": "HI", "IDAHO": "ID", "ILLINOIS": "IL", "INDIANA": "IN", "IOWA": "IA", "KANSAS": "KS", "KENTUCKY": "KY", "LOUISIANA": "LA", "MAINE": "ME", "MARSHALL ISLANDS": "MH", "MARYLAND": "MD", "MASSACHUSETTS": "MA", "MICHIGAN": "MI", "MINNESOTA": "MN", "MISSISSIPPI": "MS", "MISSOURI": "MO", "MONTANA": "MT", "NEBRASKA": "NE", "NEVADA": "NV", "NEW HAMPSHIRE": "NH", "NEW JERSEY": "NJ", "NEW MEXICO": "NM", "NEW YORK": "NY", "NORTH CAROLINA": "NC", "NORTH DAKOTA": "ND", "NORTHERN MARIANA ISLANDS": "MP", "OHIO": "OH", "OKLAHOMA": "OK", "OREGON": "OR", "PALAU": "PW", "PENNSYLVANIA": "PA", "PUERTO RICO": "PR", "RHODE ISLAND": "RI", "SOUTH CAROLINA": "SC", "SOUTH DAKOTA": "SD", "TENNESSEE": "TN", "TEXAS": "TX", "U.S. ARMED FORCES - AMERICAS": "AA", "U.S. ARMED FORCES - EUROPE": "AE", "U.S. ARMED FORCES - PACIFIC": "AP", "UTAH": "UT", "VERMONT": "VT", "VIRGIN ISLANDS": "VI", "VIRGINIA": "VA", "WASHINGTON": "WA", "WEST VIRGINIA": "WV", "WISCONSIN": "WI", "WYOMING": "WY" } self.cache = { 'year': {}, 'sort_date': {}, 'date': {}, 'bad_date': set() } self.year_formats = [ '%Y', '%y' ] self.apt_to_compiled = { "apt": re.compile(re.escape("apt"), re.IGNORECASE), "avenue": re.compile(re.escape("avenue"), re.IGNORECASE), "ave": re.compile(re.escape("ave"), re.IGNORECASE), "blvd": re.compile(re.escape("blvd"), re.IGNORECASE), "circle": re.compile(re.escape("circle"), re.IGNORECASE), "cir": re.compile(re.escape("cir"), re.IGNORECASE), "court": re.compile(re.escape("court"), re.IGNORECASE), "drive": re.compile(re.escape("drive"), re.IGNORECASE), "lane": re.compile(re.escape("lane"), re.IGNORECASE), "parkway": re.compile(re.escape("parkway"), re.IGNORECASE), "place": re.compile(re.escape("place"), re.IGNORECASE), "road": re.compile(re.escape("road"), re.IGNORECASE), "street": re.compile(re.escape("street"), re.IGNORECASE), "way": re.compile(re.escape("way"), re.IGNORECASE) } self.addr_suffix_list = [ "ct", "dr", "pl", "rd", "st" ] self.addr_unit_to_compiled = { " unit ": re.compile(re.escape(" unit "), re.IGNORECASE), " bldg ": re.compile(re.escape(" bldg "), re.IGNORECASE), " ste ": re.compile(re.escape(" ste "), re.IGNORECASE), " # ": re.compile(re.escape(" # "), re.IGNORECASE), " #": re.compile(re.escape(" #"), re.IGNORECASE) } #### Used by EFI exclusively #### self.non_prospect_row_idxs = set() self.enrollment_funnel_fields = { 'Inquired', 'Applied', 'Completed', 'Admitted', 'Confirmed', 'Enrolled', 'Canceled', 'Dropped', 'Graduated' } self.non_prospect_fields = self.enrollment_funnel_fields - {'Dropped', 'Graduated'} self.season_year_map = defaultdict(str, self.season_year_map) self.filtered = { 'entryyear': 0, 'prospects': 0 } def check_width(self, value: str, field: str, row_idx: int) -> str: """Check field value and truncate if it is longer than expected. Args: value (str): Value field (str): Column header field value row_idx (int): Row index Returns: str: [description] """ if len(value) > self.field_to_max_widths[field]: new_value = value[:self.field_to_max_widths[field]].rstrip() LOG.warning(f"[{self.filename}] [row:{row_idx}] [{field}] - '{value}' " f"exceeds max width of {self.field_to_max_widths[field]}. Trimming value to {new_value}") value = new_value return value def check_name(self, value: str, field: str, row_idx: int) -> str: """Check FirstName | LastName logic. Args: value (str): Name value field (str): Column header field value row_idx (int): Row number in file Returns: str: Name value """ if value != '': value = value.replace('"', '') value = self.check_width(value, field, row_idx) return value def check_gender(self, value: str) -> str: """Check Gender logic. Args: value (str): Gender value Returns: str: Gender value """ if value != '': value_upper = value.upper() value = self.gender_map[value_upper] if value_upper in self.gender_map else '' return value def check_gpa(self, value: str, field: str, row_idx: int) -> str: """Check GPA logic. Args: value (str): GPA value field (str): Column header field value row_idx (int): Row number in file Returns: str: GPA value """ if value != '': try: value = '' if not (0 <= float(value) <= 200) else self.check_width(value, field, row_idx) except ValueError: value_upper = value.upper() value = self.grades_map[value_upper] if value_upper in self.grades_map else '' return value def check_statecode(self, value: str, field: str, row_idx: int) -> str: """Check StateCode logic. Args: value (str): StateCode value field (str): Column header field value row_idx (int): Row number in file Returns: str: StateCode value """ if value != '': value_upper = value.upper() if value_upper in self.state_to_statecode: value = self.state_to_statecode[value_upper] value = self.check_width(value, field, row_idx) return value def check_date(self, value: str, field: str, past: datetime, future: datetime, row_idx: int) -> str: """Check date conforms to expected date within time range. Args: value (str): Date value field (str): Column header field value past (datetime): Past datetime threshold future (datetime): Future datetime threshold row_idx (int): Row number in file Returns: str: Date value """ if value != '': index = value.find(' ') if index != -1: value = value[:index] if value in self.cache['date']: value = self.cache['date'][value] elif value in self.cache['bad_date']: value = '' else: for idx, pattern in enumerate(self.date_formats): try: val = datetime.strptime(value, pattern) if idx != 0: self.date_formats[0], self.date_formats[idx] = self.date_formats[idx], self.date_formats[0] if past <= val <= future: val = val.strftime('%Y/%m/%d') self.cache['date'][value] = val self.cache['sort_date'][val] = f"{val[5:7]}/{val[8:10]}/{val[:4]}" value = val else: LOG.warning(f"[{self.filename}] [row:{row_idx}] [{field}] - {val.date()}" f" not between range of {past.date()} to {future.date()}") value = '' break except ValueError: pass else: self.cache['bad_date'].add(value) value = '' return value def check_year(self, value: str, field: str, past: datetime, future: datetime, row_idx: int) -> str: """Check year conforms to expected year within time range. Args: value (str): Year value field (str): Column header field value past (datetime): Past datetime threshold future (datetime): Future datetime threshold row_idx (int): Row number in file Returns: str: Year value """ if value != '': if value in self.cache['year']: value = self.cache['year'][value] else: for idx, pattern in enumerate(self.year_formats): try: val = datetime.strptime(value, pattern).year if idx != 0: self.year_formats[0], self.year_formats[idx] = self.year_formats[idx], self.year_formats[0] if past <= val <= future: val = str(val) self.cache['year'][value] = val value = val else: LOG.warning(f"[{self.filename}] [row:{row_idx}] [{field}] - {val} not between range of {past} to {future}") self.cache['year'][value] = '' value = '' break except ValueError: pass else: if field != 'EntryYear': self.cache['year'][value] = '' value = '' return value def check_srccode(self, value: str, field: str, row_idx: int) -> str: """Check SrcCode logic. Args: value (str): SrcCode value field (str): Column header field
at most a single -1 which indicates a dimension that should be derived from the input shape. # Returns The new output shape with a -1 replaced with its computed value. Raises a ValueError if the total array size of the output_shape is different then the input_shape, or more then one unknown dimension is specified. ''' output_shape = list(output_shape) msg = 'total size of new array must be unchanged' known, unknown = 1, None for index, dim in enumerate(output_shape): if dim < 0: if unknown is None: unknown = index else: raise ValueError('can only specify one unknown dimension') else: known *= dim original = np.prod(input_shape, dtype=int) if unknown is not None: if known == 0 or original % known != 0: raise ValueError(msg) output_shape[unknown] = original // known elif original != known: raise ValueError(msg) return tuple(output_shape) @property def output_shape(self): return (self.input_shape[0],) + self._fix_unknown_dimension(self.input_shape[1:], self.dims) def get_output(self, train=False): X = self.get_input(train) return K.reshape(X, (-1,) + self.output_shape[1:]) def get_config(self): config = {'name': self.__class__.__name__, 'dims': self.dims} base_config = super(Reshape, self).get_config() return dict(list(base_config.items()) + list(config.items())) class Permute(Layer): '''Permute the dimensions of the input according to a given pattern. Useful for e.g. connecting RNNs and convnets together. # Input shape Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. # Output shape Same as the input shape, but with the dimensions re-ordered according to the specified pattern. # Arguments dims: Tuple of integers. Permutation pattern, does not include the samples dimension. Indexing starts at 1. For instance, `(2, 1)` permutes the first and second dimension of the input. ''' def __init__(self, dims, **kwargs): super(Permute, self).__init__(**kwargs) self.dims = tuple(dims) @property def output_shape(self): input_shape = list(self.input_shape) output_shape = copy.copy(input_shape) for i, dim in enumerate(self.dims): target_dim = input_shape[dim] output_shape[i+1] = target_dim return tuple(output_shape) def get_output(self, train=False): X = self.get_input(train) return K.permute_dimensions(X, (0,) + self.dims) def get_config(self): config = {'name': self.__class__.__name__, 'dims': self.dims} base_config = super(Permute, self).get_config() return dict(list(base_config.items()) + list(config.items())) class Flatten(Layer): '''Flatten the input. Does not affect the batch size. # Input shape Arbitrary, although all dimensions in the input shape must be fixed. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. # Output shape `(batch_size,)` ''' def __init__(self, **kwargs): super(Flatten, self).__init__(**kwargs) @property def output_shape(self): input_shape = self.input_shape if not all(input_shape[1:]): raise Exception('The shape of the input to "Flatten" ' 'is not fully defined ' '(got ' + str(input_shape[1:]) + '. ' 'Make sure to pass a complete "input_shape" ' 'or "batch_input_shape" argument to the first ' 'layer in your model.') return (input_shape[0], np.prod(input_shape[1:])) def get_output(self, train=False): X = self.get_input(train) return K.flatten(X) class RepeatVector(Layer): '''Repeat the input n times. # Input shape 2D tensor of shape `(nb_samples, features)`. # Output shape 3D tensor of shape `(nb_samples, n, features)`. # Arguments n: integer, repetition factor. ''' def __init__(self, n, **kwargs): super(RepeatVector, self).__init__(**kwargs) self.n = n @property def output_shape(self): input_shape = self.input_shape return (input_shape[0], self.n, input_shape[1]) def get_output(self, train=False): X = self.get_input(train) return K.repeat(X, self.n) def get_config(self): config = {'name': self.__class__.__name__, 'n': self.n} base_config = super(RepeatVector, self).get_config() return dict(list(base_config.items()) + list(config.items())) class Dense(Layer): '''Just your regular fully connected NN layer. # Input shape 2D tensor with shape: `(nb_samples, input_dim)`. # Output shape 2D tensor with shape: `(nb_samples, output_dim)`. # Arguments output_dim: int > 0. init: name of initialization function for the weights of the layer (see [initializations](../initializations.md)), or alternatively, Theano function to use for weights initialization. This parameter is only relevant if you don't pass a `weights` argument. activation: name of activation function to use (see [activations](../activations.md)), or alternatively, elementwise Theano function. If you don't specify anything, no activation is applied (ie. "linear" activation: a(x) = x). weights: list of numpy arrays to set as initial weights. The list should have 1 element, of shape `(input_dim, output_dim)`. W_regularizer: instance of [WeightRegularizer](../regularizers.md) (eg. L1 or L2 regularization), applied to the main weights matrix. b_regularizer: instance of [WeightRegularizer](../regularizers.md), applied to the bias. activity_regularizer: instance of [ActivityRegularizer](../regularizers.md), applied to the network output. W_constraint: instance of the [constraints](../constraints.md) module (eg. maxnorm, nonneg), applied to the main weights matrix. b_constraint: instance of the [constraints](../constraints.md) module, applied to the bias. input_dim: dimensionality of the input (integer). This argument (or alternatively, the keyword argument `input_shape`) is required when using this layer as the first layer in a model. ''' input_ndim = 2 def __init__(self, output_dim, init='glorot_uniform', activation='linear', weights=None, W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None, input_dim=None, **kwargs): self.init = initializations.get(init) self.activation = activations.get(activation) self.output_dim = output_dim self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.constraints = [self.W_constraint, self.b_constraint] self.initial_weights = weights self.input_dim = input_dim if self.input_dim: kwargs['input_shape'] = (self.input_dim,) self.input = K.placeholder(ndim=2) super(Dense, self).__init__(**kwargs) def build(self): input_dim = self.input_shape[1] self.W = self.init((input_dim, self.output_dim)) self.b = K.zeros((self.output_dim,)) self.params = [self.W, self.b] self.regularizers = [] if self.W_regularizer: self.W_regularizer.set_param(self.W) self.regularizers.append(self.W_regularizer) if self.b_regularizer: self.b_regularizer.set_param(self.b) self.regularizers.append(self.b_regularizer) if self.activity_regularizer: self.activity_regularizer.set_layer(self) self.regularizers.append(self.activity_regularizer) if self.initial_weights is not None: self.set_weights(self.initial_weights) del self.initial_weights @property def output_shape(self): return (self.input_shape[0], self.output_dim) def get_output(self, train=False): X = self.get_input(train) output = self.activation(K.dot(X, self.W) + self.b) return output def get_config(self): config = {'name': self.__class__.__name__, 'output_dim': self.output_dim, 'init': self.init.__name__, 'activation': self.activation.__name__, 'W_regularizer': self.W_regularizer.get_config() if self.W_regularizer else None, 'b_regularizer': self.b_regularizer.get_config() if self.b_regularizer else None, 'activity_regularizer': self.activity_regularizer.get_config() if self.activity_regularizer else None, 'W_constraint': self.W_constraint.get_config() if self.W_constraint else None, 'b_constraint': self.b_constraint.get_config() if self.b_constraint else None, 'input_dim': self.input_dim} base_config = super(Dense, self).get_config() return dict(list(base_config.items()) + list(config.items())) class ActivityRegularization(Layer): '''Layer that passes through its input unchanged, but applies an update to the cost function based on the activity. # Input shape Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. # Output shape Same shape as input. # Arguments l1: L1 regularization factor. l2: L2 regularization factor. ''' def __init__(self, l1=0., l2=0., **kwargs): super(ActivityRegularization, self).__init__(**kwargs) self.l1 = l1 self.l2 = l2 activity_regularizer = ActivityRegularizer(l1=l1, l2=l2) activity_regularizer.set_layer(self) self.regularizers = [activity_regularizer] def get_output(self, train=False): return self.get_input(train) def get_config(self): config = {'name': self.__class__.__name__, 'l1': self.l1, 'l2': self.l2} base_config = super(ActivityRegularization, self).get_config() return dict(list(base_config.items()) + list(config.items())) class TimeDistributedDense(MaskedLayer): '''Apply a same Dense layer for each dimension[1] (time_dimension) input. Especially useful after a recurrent network with 'return_sequence=True'. # Input shape 3D tensor with shape `(nb_sample, time_dimension, input_dim)`. # Output shape 3D tensor with shape `(nb_sample, time_dimension, output_dim)`. # Arguments output_dim: int > 0. init: name of initialization function for the weights of the layer (see [initializations](../initializations.md)), or alternatively, Theano function to use for weights initialization. This parameter is only relevant if you don't pass a `weights` argument. activation: name of activation function to use (see [activations](../activations.md)), or alternatively, elementwise Theano function. If you don't specify anything, no activation is applied (ie. "linear" activation: a(x) = x). weights: list of numpy arrays to set as initial weights. The list should have 1 element, of shape `(input_dim, output_dim)`. W_regularizer: instance of [WeightRegularizer](../regularizers.md) (eg. L1 or L2 regularization), applied to the main weights matrix. b_regularizer: instance of [WeightRegularizer](../regularizers.md), applied to the bias. activity_regularizer: instance of [ActivityRegularizer](../regularizers.md), applied to the network output. W_constraint: instance of the [constraints](../constraints.md) module (eg. maxnorm, nonneg), applied to the main weights matrix. b_constraint: instance of the [constraints](../constraints.md) module, applied to the bias. input_dim: dimensionality of the input (integer). This argument (or alternatively, the keyword argument `input_shape`) is required when using this layer as the first layer in a model. ''' input_ndim = 3 def __init__(self, output_dim, init='glorot_uniform', activation='linear', weights=None, W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None, input_dim=None, input_length=None, **kwargs): self.output_dim = output_dim self.init = initializations.get(init) self.activation = activations.get(activation) self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.constraints = [self.W_constraint, self.b_constraint] self.initial_weights = weights self.input_dim = input_dim self.input_length = input_length if self.input_dim: kwargs['input_shape'] = (self.input_length, self.input_dim) self.input =
###################################################################### ###################################################################### # Copyright <NAME>, Cambridge Dialogue Systems Group, 2017 # ###################################################################### ###################################################################### import theano import numpy as np import os import operator from math import log, log10, exp, pow from copy import deepcopy import sys import random import time import itertools import pickle as pk from ast import literal_eval import gc from nnsds import NNSDS from utils.tools import setWordVector from utils.nlp import normalize from utils.bleu import sentence_bleu_4 from loader.DataReader import * from loader.GentScorer import * from ConfigParser import SafeConfigParser from api.Interact import Interact theano.gof.compilelock.set_lock_status(False) class NNDial(object): ''' Main interface class for the model. This class takes charge of save/load hyperparameters from the config file and trained models. It delegates the data preprocessing to DataReader module and delegates the learning to NNSDS module. It implements training based on early stopping and testing and interactive interfaces. ''' ####################################################################### # all variables that needs to be save and load from model file, indexed # by their names ####################################################################### learn_vars = ['self.lr','self.lr_decay','self.stop_count','self.l2', 'self.seed','self.min_impr','self.debug','self.llogp', 'self.grad_clip','self.valid_logp','self.params', 'self.cur_stop_count','self.learn_mode'] file_vars = ['self.corpusfile','self.dbfile','self.semidictfile', 'self.ontologyfile','self.modelfile'] data_vars = ['self.split','self.percent','self.shuffle','self.lengthen'] gen_vars = ['self.topk','self.beamwidth','self.verbose', 'self.repeat_penalty','self.token_reward'] n2n_vars = ['self.enc','self.trk','self.dec'] enc_vars = ['self.vocab_size','self.input_hidden'] dec_vars = ['self.output_hidden','self.seq_wvec_file','self.dec_struct'] trk_vars = ['self.trkinf','self.trkreq','self.belief','self.inf_dimensions', 'self.req_dimensions','self.trk_enc','self.trk_wvec_file'] ply_vars = ['self.policy','self.latent'] def __init__(self,config=None,opts=None): if config==None and opts==None: print "Please specify command option or config file ..." return # config parser parser = SafeConfigParser() parser.read(config) # model file name self.modelfile = parser.get('file','model') # get current mode from command argument if opts: self.mode = opts.mode # loading pretrained model if any if os.path.isfile(self.modelfile): if not opts: self.loadNet(parser,None) else: self.loadNet(parser,opts.mode) else: # init network from scrach self.initNet(config,opts) self.initBackupWeights() def initNet(self,config,opts=None): print '\n\ninit net from scrach ... ' # config parser parser = SafeConfigParser() parser.read(config) # Setting default learn from config file self.debug = parser.getboolean('learn','debug') if self.debug: print 'loading model settings from config file ...' self.lr = parser.getfloat('learn','lr') self.lr_decay = parser.getfloat('learn','lr_decay') self.stop_count = parser.getint('learn','stop_count') self.cur_stop_count = parser.getint('learn','cur_stop_count') self.l2 = parser.getfloat('learn','l2') self.seed = parser.getint('learn','random_seed') self.min_impr = parser.getfloat('learn','min_impr') self.llogp = parser.getfloat('learn','llogp') self.grad_clip = parser.getfloat('learn','grad_clip') # Setting file paths self.dbfile = parser.get('file','db') self.ontologyfile = parser.get('file','ontology') self.corpusfile = parser.get('file','corpus') self.semidictfile = parser.get('file','semidict') # setting data manipulations self.split = literal_eval(parser.get('data','split')) self.lengthen = parser.getint('data','lengthen') self.shuffle = parser.get('data','shuffle') self.percent = parser.get('data','percent') # Setting generation specific parameters self.verbose = parser.getint('gen','verbose') self.topk = parser.getint('gen','topk') self.beamwidth = parser.getint('gen','beamwidth') self.repeat_penalty = parser.get('gen','repeat_penalty') self.token_reward = parser.getboolean('gen','token_reward') # setting n2n components self.enc = parser.get('n2n','encoder') self.trk = parser.get('n2n','tracker') self.dec = parser.get('n2n','decoder') # setting encoder structure self.input_hidden = parser.getint('enc','ihidden') # setting policy structure self.policy = parser.get('ply','policy') self.latent = parser.getint('ply','latent')\ if self.policy=='latent' else 0 # setting decoder structure self.output_hidden = parser.getint('dec','ohidden') self.seq_wvec_file = parser.get('dec','wvec') self.dec_struct = parser.get('dec','struct') self.use_snapshot = parser.getboolean('dec','snapshot') # setting tracker structure self.trkinf = parser.getboolean('trk','informable') self.trkreq = parser.getboolean('trk','requestable') self.belief = parser.get('trk','belief') self.trk_enc = parser.get('trk','trkenc') self.trk_wvec_file = parser.get('trk','wvec') # setting learnable parameters self.learn_mode = parser.get('mode','learn_mode') # set random seed np.random.seed(self.seed) random.seed(self.seed) np.set_printoptions(precision=4) # setting data reader, processors, and lexicon self.reader = DataReader( self.corpusfile, self.dbfile, self.semidictfile, self.ontologyfile, self.split, self.lengthen, self.percent, self.shuffle, self.trk_enc, self.verbose, opts.mode, self.policy, self.latent) # network size according to dataset self.vocab_size = len(self.reader.vocab) self.inf_dimensions = self.reader.infoseg self.req_dimensions = self.reader.reqseg # logp for validation set self.valid_logp = 0.0 # start setting networks self.ready() def ready(self): ################################################################# ################### THEANO CONFIGURATION ######################## ################################################################# # initialise network model if self.debug: print 'setting network structures using theano variables ...' self.model = NNSDS(self.enc, self.dec, self.policy, self.trk, self.trkinf, self.trkreq, self.belief, self.trk_enc, self.use_snapshot, self.dec_struct, self.vocab_size, self.input_hidden, self.output_hidden, self.inf_dimensions, self.req_dimensions, self.grad_clip, self.learn_mode, len(self.reader.snapshots), self.latent) # setput theano variables self.model.config_theano() if self.debug: numofparams, trainable = self.model.numOfParams() print '\t\tnumber of parameters : %8d' % numofparams print '\t\tnumber of training parameters : %8d' % trainable ################################################################# ############################ END ################################ ################################################################# def testNet(self): # testing generation np.random.seed(self.seed) if self.debug: print 'generating dialogue responses for trained network ...' # evaluator bscorer = BLEUScorer() parallel_corpus = [] best_corpus = [] # load testing data testset = self.reader.iterate(mode=self.mode) # statistics for calulating semi performance stats = self._statsTable() start_time = time.time() # gate stats gstats = np.zeros((4)) num_sent = 0.0 # for each dialog for cnt in range(len(testset)): # initial state if self.verbose>0: print '='*25 + ' Dialogue '+ str(cnt) +' '+ '='*28 #print '##############################################################' # read one example source, source_len, masked_source, masked_source_len,\ target, target_len, masked_target, masked_target_len,\ snapshot, change_label, goal, inf_trk_label, req_trk_label,\ db_degree, srcfeat, tarfeat, finished, utt_group = testset[cnt] # initial selection selected_venue = -1 venue_offered = None # initial belief flatten_belief_tm1 = np.zeros((self.inf_dimensions[-1])) for i in range(len(self.inf_dimensions)-1): flatten_belief_tm1[self.inf_dimensions[i+1]-1] = 1.0 # for each turn reqs = [] generated_utt_tm1 = '' for t in range(len(source)): if self.verbose>0: print '-'*28 + ' Turn '+ str(t) +' '+ '-'*28 # extract source and target sentence for that turn source_t = source[t][:source_len[t]] masked_source_t = masked_source[t][:masked_source_len[t]] masked_target_t = masked_target[t][:masked_target_len[t]] # this turn features srcfeat_t = srcfeat[t] # previous target masked_target_tm1, target_tm1, starpos_tm1, vtarpos_tm1, offer = \ self.reader.extractSeq(generated_utt_tm1,type='target') tarfeat_tm1 = [starpos_tm1,vtarpos_tm1] # utterance preparation source_utt = ' '.join([self.reader.vocab[w] for w in source_t]) masked_source_utt= ' '.join([self.reader.vocab[w] for w in masked_source_t]) masked_target_utt= ' '.join([self.reader.vocab[w] for w in masked_target_t]) # read and understand user sentence masked_intent_t = self.model.read( masked_source_t ) full_belief_t, belief_t = self.model.track( flatten_belief_tm1, masked_source_t, masked_target_tm1, srcfeat_t, tarfeat_tm1 ) flatten_belief_t = np.concatenate(full_belief_t,axis=0) # search DB db_degree_t, query = self._searchDB(flatten_belief_t) # score table scoreTable = self._genScoreTable(full_belief_t) # generation generated,sample_t,_ = self.model.talk( masked_intent_t,belief_t, db_degree_t, masked_source_t, masked_target_t, scoreTable) # choose venue venues = [i for i, e in enumerate(db_degree_t[:-6]) if e != 0 ] # keep the current venue if selected_venue in venues: pass else: # choose the first match as default index if len(venues)!=0: selected_venue = random.choice(venues) # no matched venues else: selected_venue = None # lexicalise generated utterance generated_utts = [] for gen in generated: generated_utt = ' '.join([self.reader.vocab[g] for g in gen[0]]) generated_utts.append(generated_utt) gennerated_utt = generated_utts[0] # calculate semantic match rate twords = [self.reader.vocab[w] for w in masked_target_t] for gen in generated: gwords = [self.reader.vocab[g] for g in gen[0]] for gw in gwords: if gw.startswith('[VALUE_') or gw.startswith('[SLOT_'): if gw in twords: # match target semi token stats['approp'][0] += 1.0 stats['approp'][1] += 1.0 #gstats += np.mean( np.array(gen[2][1:]),axis=0 ) num_sent += 1 # update history belief flatten_belief_tm1 = flatten_belief_t[:self.inf_dimensions[-1]] # for calculating success: check requestable slots match requestables = ['phone','address','postcode','food','area','pricerange'] for requestable in requestables: if '[VALUE_'+requestable.upper()+']' in gennerated_utt: reqs.append(self.reader.reqs.index(requestable+'=exist')) # check offered venue if '[VALUE_NAME]' in generated_utt and selected_venue!=None: venue_offered = self.reader.db2inf[selected_venue] ############################### debugging ############################ if self.verbose>0: print 'User Input :\t%s'% source_utt print ' :\t%s'% masked_source_utt print if self.trk=='rnn' and self.trkinf==True: if self.verbose>1: print 'Belief Tracker :' print ' | %16s%13s%20s|' % ('','Informable','') print ' | %16s\t%5s\t%20s |' % ('Prediction','Prob.','Ground Truth') print ' | %16s\t%5s\t%20s |' % ('------------','-----','------------') for i in range(len(self.inf_dimensions)-1): bn = self.inf_dimensions[i] psem = self.reader.infovs[np.argmax(np.array(full_belief_t[i]))+bn] ysem = self.reader.infovs[np.argmax(np.array(\ inf_trk_label[t][bn:self.inf_dimensions[i+1]+bn]))+bn] prob = full_belief_t[i][np.argmax(np.array(full_belief_t[i]))] #print '%20s\t%.3f\t%20s' % (psem,prob,ysem) if self.verbose>1: print ' | %16s\t%.3f\t%20s |' % (psem,prob,ysem) # counting stats slt,val = ysem.split('=') if 'none' not in ysem: if psem==ysem: # true positive stats['informable'][slt][0] += 1.0 else: # false negative stats['informable'][slt][1] += 1.0 else: if psem==ysem: # true negative stats['informable'][slt][2] += 1.0 else: # false positive stats['informable'][slt][3] += 1.0 if self.trk=='rnn' and self.trkreq==True: if self.verbose>1: print ' | %16s%13s%20s|' % ('','Requestable','') print ' | %16s\t%5s\t%20s |' % ('Prediction','Prob.','Ground Truth') print ' | %16s\t%5s\t%20s |' % ('------------','-----','------------') infbn = 3 if self.trkinf else 0 for i in range(len(self.req_dimensions)-1): bn = self.req_dimensions[i] ysem = self.reader.reqs[np.argmax(np.array(\ req_trk_label[t][bn:self.req_dimensions[i+1]+bn]))+bn] psem = self.reader.reqs[ \ np.argmax(np.array(full_belief_t[infbn+i])) +\ self.req_dimensions[i] ] prob = np.max(np.array(full_belief_t[infbn+i])) if self.verbose>1: print ' | %16s\t%.3f\t%20s |' % (psem,prob,ysem) # counting stats slt,val = ysem.split('=') if slt+'=exist'==ysem: if psem==ysem: # true positive stats['requestable'][slt][0] += 1.0 else: # false negative stats['requestable'][slt][1] += 1.0 else: if psem==ysem: # true negative stats['requestable'][slt][2] += 1.0 else: # false positive stats['requestable'][slt][3] += 1.0 # offer change tracker bn = self.req_dimensions[-1] psem = 0 if full_belief_t[-1][0]>=0.5 else 1 ysem = np.argmax(change_label[t]) if ysem==0: if psem==ysem: stats['requestable']['change'][0] += 1.0 else: stats['requestable']['change'][1]
layer d_model = long_cart_embs.shape[-1] long_cart_padding_mask_list = padding_mask(long_cart) long_buy_padding_mask_list = padding_mask(long_buy) long_cart_transformer = Encoder(1, d_model, 4, 256, cfg.long_seq_len, True) long_buy_transformer = Encoder(1, d_model, 4, 256, cfg.long_seq_len, True) long_cart_output = long_cart_transformer(long_cart_embs, long_cart_padding_mask_list) long_buy_output = long_buy_transformer(long_buy_embs, long_buy_padding_mask_list) print("long_buy_output", long_buy_output) # 2. short sequence short_click_embs = tf.concat([sum_p_short_click_embed_masked, sum_p_short_click_level2_embed_masked], -1) short_cart_embs = tf.concat([sum_p_short_cart_embed_masked, sum_p_short_cart_level2_embed_masked], -1) short_buy_embs = tf.concat([sum_p_short_buy_embed_masked, sum_p_short_buy_level2_embed_masked], -1) d_model = short_cart_embs.shape[-1] short_cart_padding_mask_list = padding_mask(short_cart) short_buy_padding_mask_list = padding_mask(short_buy) short_cart_transformer = Encoder(1, d_model, 4, 256, cfg.short_seq_len, True) short_buy_transformer = Encoder(1, d_model, 4, 256, cfg.short_seq_len, True) short_cart_output = short_cart_transformer(short_cart_embs, short_cart_padding_mask_list) short_buy_output = short_buy_transformer(short_buy_embs, short_buy_padding_mask_list) print("short_buy_output", short_buy_output) # target attention long_click_din = get_weight_sum_embed(cfg.long_seq_len, cfg.embed_dim * 2)([target_embs, long_click_embs]) long_cart_din = get_weight_sum_embed(cfg.long_seq_len, cfg.embed_dim * 2)([target_embs, long_cart_output]) long_buy_din = get_weight_sum_embed(cfg.long_seq_len, cfg.embed_dim * 2)([target_embs, long_buy_output]) short_click_din = get_weight_sum_embed(cfg.short_seq_len, cfg.embed_dim * 2)([target_embs, short_click_embs]) short_cart_din = get_weight_sum_embed(cfg.short_seq_len, cfg.embed_dim * 2)([target_embs, short_cart_output]) short_buy_din = get_weight_sum_embed(cfg.short_seq_len, cfg.embed_dim * 2)([target_embs, short_buy_output]) # for dense pooling_long_click_din = tf.squeeze(long_click_din, 1) pooling_long_cart_din = tf.squeeze(long_cart_din, 1) pooling_long_buy_din = tf.squeeze(long_buy_din, 1) pooling_short_click_din = tf.squeeze(short_click_din, 1) pooling_short_cart_din = tf.squeeze(short_cart_din, 1) pooling_short_buy_din = tf.squeeze(short_buy_din, 1) print("pooling_short_buy_din", pooling_short_buy_din) ## ----------------- 行为序列 end!! ----------------- ## ----------------- 2.2 sparse embedding --------------- ## 离散特征处理 pair_offsets = tf.expand_dims(tf.cumsum([0] + cfg.pair_field_dims[:-1], axis=0), axis=0) ## [1, n] goods_offsets = tf.expand_dims(tf.cumsum([0] + cfg.goods_field_dims[:-1], axis=0), axis=0) ## [1, n] bucket_user_offsets = tf.expand_dims(tf.cumsum([0] + cfg.bucket_user_dims[:-1], axis=0), axis=0) ## [1, n] bucket_goods_offsets = tf.expand_dims(tf.cumsum([0] + cfg.bucket_goods_dims[:-1], axis=0), axis=0) ## [1, n] bucket_pair_offsets = tf.expand_dims(tf.cumsum([0] + cfg.bucket_pair_dims[:-1], axis=0), axis=0) ## [1, n] context_offsets = tf.expand_dims(tf.cumsum([0] + cfg.context_dims[:-1], axis=0), axis=0) ## [1, n] bucket_user_cspu_offsets = tf.expand_dims(tf.cumsum([0] + cfg.bucket_user_cspu_dims[:-1], axis=0), axis=0) ## [1, n] bucket_ozid_cspu_offsets = tf.expand_dims(tf.cumsum([0] + cfg.bucket_ozid_cspu_dims[:-1], axis=0), axis=0) ## [1, n] bucket_user_behavior_offsets = tf.expand_dims(tf.cumsum([0] + cfg.bucket_user_behavior_dims[:-1], axis=0), axis=0) ## [1, n] bucket_goods_gross_offsets = tf.expand_dims(tf.cumsum([0] + cfg.bucket_goods_gross_dims[:-1], axis=0), axis=0) ## [1, n] pair_feature = pair_feature + pair_offsets goods_sparse = goods_sparse + goods_offsets bucket_user_box_obj = bucket_user_box_obj + bucket_user_offsets bucket_goods_box_obj = bucket_goods_box_obj + bucket_goods_offsets bucket_pair_box_obj = bucket_pair_box_obj + bucket_pair_offsets context_features = context_features + context_offsets bucket_user_cspu_obj = bucket_user_cspu_obj + bucket_user_cspu_offsets bucket_ozid_cspu_obj = bucket_ozid_cspu_obj + bucket_ozid_cspu_offsets bucket_user_behavior_obj = bucket_user_behavior_obj + bucket_user_behavior_offsets bucket_goods_gross_obj = bucket_goods_gross_obj + bucket_goods_gross_offsets pair_feature = tf.clip_by_value(pair_feature, clip_value_min=0, clip_value_max=sum(cfg.pair_field_dims) - 1) goods_sparse = tf.clip_by_value(goods_sparse, clip_value_min=0, clip_value_max=sum(cfg.goods_field_dims) - 1) bucket_user_box_obj = tf.clip_by_value(bucket_user_box_obj, clip_value_min=0, clip_value_max=sum(cfg.bucket_user_dims) - 1) bucket_goods_box_obj = tf.clip_by_value(bucket_goods_box_obj, clip_value_min=0, clip_value_max=sum(cfg.bucket_goods_dims) - 1) bucket_pair_box_obj = tf.clip_by_value(bucket_pair_box_obj, clip_value_min=0, clip_value_max=sum(cfg.bucket_pair_dims) - 1) bucket_user_cspu_obj = tf.clip_by_value(bucket_user_cspu_obj, clip_value_min=0, clip_value_max=sum(cfg.bucket_user_cspu_dims) - 1) bucket_ozid_cspu_obj = tf.clip_by_value(bucket_ozid_cspu_obj, clip_value_min=0, clip_value_max=sum(cfg.bucket_ozid_cspu_dims) - 1) bucket_user_behavior_obj = tf.clip_by_value(bucket_user_behavior_obj, clip_value_min=0, clip_value_max=sum(cfg.bucket_user_behavior_dims) - 1) bucket_goods_gross_obj = tf.clip_by_value(bucket_goods_gross_obj, clip_value_min=0, clip_value_max=sum(cfg.bucket_goods_gross_dims) - 1) ## ----------------- 2.3 实时特征 --------------- ## 实时离散特征处理 ### ------------ 实时特征等距分箱处理 --------- ## 商品特征进行log分桶,用户和pair都进行等距分桶,间隔3为一个桶,最大10个桶。 ### ----------------------------------------- # realtime_user = tf.clip_by_value(realtime_user, clip_value_min=0, clip_value_max=29) realtime_pair_click = tf.cast(tf.clip_by_value(realtime_pair_click, clip_value_min=0, clip_value_max=2), tf.int32) # realtime_cross_category_front = tf.clip_by_value(realtime_cross_category_front, clip_value_min=0, clip_value_max=29) realtime_back_category = tf.clip_by_value(realtime_back_category, clip_value_min=0, clip_value_max=29) realtime_goods = tf.clip_by_value(realtime_goods, clip_value_min=0, clip_value_max=100000000) realtime_passtime = tf.clip_by_value(realtime_passtime, clip_value_min=0, clip_value_max=129600) realtime_user_group = tf.clip_by_value(realtime_user_group, clip_value_min=0, clip_value_max=129600) # realtime_user = tf.cast(tf.math.floordiv(realtime_user, 5), tf.int32) # realtime_cross_category_front = tf.cast(tf.floordiv(realtime_cross_category_front, 5), tf.int32) realtime_back_category = tf.cast(tf.math.floordiv(realtime_back_category, 5), tf.int32) realtime_goods = tf.cast(tf.math.log(realtime_goods + 1), tf.int32) realtime_goods = tf.clip_by_value(realtime_goods, clip_value_min=0, clip_value_max=30) realtime_passtime_hour = tf.math.floordiv(realtime_passtime, 60, name="pst_hour") realtime_passtime_day = tf.add(tf.math.floordiv(realtime_passtime, 1440), 23, name="pst_day") realtime_passtime = tf.cast(tf.where(realtime_passtime < 1440.0, realtime_passtime_hour, realtime_passtime_day), tf.int32) realtime_user_group = tf.cast(tf.math.log(realtime_user_group + 1), tf.int32) realtime_user_group = tf.clip_by_value(realtime_user_group, clip_value_min=0, clip_value_max=30) ### ------------ 实时特征等距分箱处理 done --------- realtime_back_category_offsets = tf.expand_dims(tf.cumsum([0] + cfg.realtime_back_category_dims[:-1], axis=0), axis=0) ## [1, n] realtime_back_category = realtime_back_category + realtime_back_category_offsets realtime_goods_offsets = tf.expand_dims(tf.cumsum([0] + cfg.realtime_goods_dims[:-1], axis=0), axis=0) realtime_goods = realtime_goods + realtime_goods_offsets realtime_passtime_offsets = tf.expand_dims(tf.cumsum([0] + cfg.realtime_passtime_dims[:-1], axis=0), axis=0) realtime_passtime = realtime_passtime + realtime_passtime_offsets realtime_user_group_offsets = tf.expand_dims(tf.cumsum([0] + cfg.realtime_user_group_dims[:-1], axis=0), axis=0) realtime_user_group = realtime_user_group + realtime_user_group_offsets ## ----------------- 2.4 各种embedding --------------- ## embedding pair_feature_embed = layers.Embedding(input_dim=sum(cfg.pair_field_dims), output_dim=cfg.embed_dim, embeddings_initializer=cfg.embed_init, embeddings_regularizer=cfg.kernel_regular, input_length=None)(pair_feature) goods_sparse_embed = layers.Embedding(input_dim=sum(cfg.goods_field_dims), output_dim=cfg.embed_dim, embeddings_initializer=cfg.embed_init, embeddings_regularizer=cfg.kernel_regular, input_length=None)(goods_sparse) bucket_user_box_obj_embed = layers.Embedding(input_dim=sum(cfg.bucket_user_dims), output_dim=cfg.embed_dim, embeddings_initializer=cfg.embed_init, embeddings_regularizer=cfg.kernel_regular, input_length=None)(bucket_user_box_obj) bucket_goods_box_obj_embed = layers.Embedding(input_dim=sum(cfg.bucket_goods_dims), output_dim=cfg.embed_dim, embeddings_initializer=cfg.embed_init, embeddings_regularizer=cfg.kernel_regular, input_length=None)(bucket_goods_box_obj) bucket_pair_box_obj_embed = layers.Embedding(input_dim=sum(cfg.bucket_pair_dims), output_dim=cfg.embed_dim, embeddings_initializer=cfg.embed_init, embeddings_regularizer=cfg.kernel_regular, input_length=None)(bucket_pair_box_obj) realtime_back_category_embed = layers.Embedding(input_dim=sum(cfg.realtime_back_category_dims), output_dim=cfg.embed_dim, embeddings_initializer=cfg.embed_init, embeddings_regularizer=cfg.kernel_regular, input_length=None)(realtime_back_category) realtime_goods_embed = layers.Embedding(input_dim=sum(cfg.realtime_goods_dims), output_dim=cfg.embed_dim, embeddings_initializer=cfg.embed_init, embeddings_regularizer=cfg.kernel_regular, input_length=None)(realtime_goods) realtime_pair_click_embed = layers.Embedding(input_dim=3, output_dim=cfg.embed_dim, embeddings_initializer=cfg.embed_init, embeddings_regularizer=cfg.kernel_regular, input_length=None)(realtime_pair_click) realtime_pair_click_embed = tf.expand_dims(realtime_pair_click_embed, 1) context_embed = layers.Embedding(input_dim=sum(cfg.context_dims), output_dim=cfg.embed_dim, embeddings_initializer=cfg.embed_init, embeddings_regularizer=cfg.kernel_regular, input_length=None)(context_features) realtime_passtime_embed = layers.Embedding(input_dim=sum(cfg.realtime_passtime_dims), output_dim=cfg.embed_dim, embeddings_initializer=cfg.embed_init, embeddings_regularizer=cfg.kernel_regular, input_length=None)(realtime_passtime) realtime_user_group_embed = layers.Embedding(input_dim=sum(cfg.realtime_user_group_dims), output_dim=cfg.embed_dim, embeddings_initializer=cfg.embed_init, embeddings_regularizer=cfg.kernel_regular, input_length=None)(realtime_user_group) bucket_user_cspu_obj_embed = layers.Embedding(input_dim=sum(cfg.bucket_user_cspu_dims), output_dim=cfg.embed_dim, embeddings_initializer=cfg.embed_init, embeddings_regularizer=cfg.kernel_regular, input_length=None)(bucket_user_cspu_obj) bucket_ozid_cspu_obj_embed = layers.Embedding(input_dim=sum(cfg.bucket_ozid_cspu_dims), output_dim=cfg.embed_dim, embeddings_initializer=cfg.embed_init, embeddings_regularizer=cfg.kernel_regular, input_length=None)(bucket_ozid_cspu_obj) bucket_user_behavior_obj_embed = layers.Embedding(input_dim=sum(cfg.bucket_user_behavior_dims), output_dim=cfg.embed_dim, embeddings_initializer=cfg.embed_init, embeddings_regularizer=cfg.kernel_regular, input_length=None)(bucket_user_behavior_obj) bucket_goods_gross_obj_embed = layers.Embedding(input_dim=sum(cfg.bucket_goods_gross_dims), output_dim=cfg.embed_dim, embeddings_initializer=cfg.embed_init, embeddings_regularizer=cfg.kernel_regular, input_length=None)(bucket_goods_gross_obj) ## --------- all embedding concat ----------- embed_fm = tf.concat([realtime_passtime_embed, context_embed, realtime_back_category_embed, realtime_goods_embed, realtime_pair_click_embed, realtime_user_group_embed, pair_feature_embed, goods_sparse_embed, bucket_user_box_obj_embed, bucket_goods_box_obj_embed, bucket_pair_box_obj_embed, cspu_embed, supplier_embed, lv2_embed, bucket_user_cspu_obj_embed, bucket_ozid_cspu_obj_embed, bucket_user_behavior_obj_embed, bucket_goods_gross_obj_embed], axis=1, name="all_embed_concat") embed_deep = embed_fm deep_fc = tf.reshape(embed_deep, shape=(-1, embed_deep.shape[1] * embed_deep.shape[2])) deep_fc = tf.concat([deep_fc, pooling_long_click_din, pooling_long_cart_din, pooling_long_buy_din, pooling_short_click_din, pooling_short_cart_din, pooling_short_buy_din], -1) print("concat embed", embed_fm) print("deep_fc", deep_fc) ### ------------------------------------------------------------- ### 三、 linear ### ------------------------------------------------------------- linear1 = layers.Dense(1, activation=None, kernel_regularizer=cfg.kernel_regular, use_bias=True)(bucket_goods_raw) linear2 = layers.Embedding(input_dim=sum(cfg.pair_field_dims), output_dim=1, embeddings_initializer=cfg.embed_init, embeddings_regularizer=cfg.kernel_regular, input_length=None)(pair_feature) linear3 = layers.Embedding(input_dim=sum(cfg.realtime_back_category_dims), output_dim=1, embeddings_initializer=cfg.embed_init, embeddings_regularizer=cfg.kernel_regular, input_length=None)(realtime_back_category) linear4 = layers.Embedding(input_dim=sum(cfg.realtime_goods_dims), output_dim=1, embeddings_initializer=cfg.embed_init, embeddings_regularizer=cfg.kernel_regular, input_length=None)(realtime_goods) linear5 = layers.Embedding(input_dim=3, output_dim=1, embeddings_initializer=cfg.embed_init, embeddings_regularizer=cfg.kernel_regular, input_length=None)(realtime_pair_click) linear6 = layers.Embedding(input_dim=sum(cfg.bucket_pair_dims), output_dim=1, embeddings_initializer=cfg.embed_init, embeddings_regularizer=cfg.kernel_regular, input_length=None)(bucket_pair_box_obj) ## [1, 62, embed_dim] linear7 = layers.Embedding(input_dim=sum(cfg.bucket_goods_dims), output_dim=1, embeddings_initializer=cfg.embed_init, embeddings_regularizer=cfg.kernel_regular, input_length=None)(bucket_goods_box_obj) ## [1, 62, embed_dim] linear8 = layers.Embedding(input_dim=sum(cfg.realtime_passtime_dims), output_dim=1, embeddings_initializer=cfg.embed_init, embeddings_regularizer=cfg.kernel_regular, input_length=None)(realtime_passtime) linear9 = layers.Embedding(input_dim=sum(cfg.bucket_user_cspu_dims), output_dim=1, embeddings_initializer=cfg.embed_init, embeddings_regularizer=cfg.kernel_regular, input_length=None)(bucket_user_cspu_obj) linear10 = layers.Embedding(input_dim=sum(cfg.bucket_ozid_cspu_dims), output_dim=1, embeddings_initializer=cfg.embed_init, embeddings_regularizer=cfg.kernel_regular, input_length=None)(bucket_ozid_cspu_obj) linear11 = layers.Embedding(input_dim=sum(cfg.bucket_user_behavior_dims), output_dim=1, embeddings_initializer=cfg.embed_init, embeddings_regularizer=cfg.kernel_regular, input_length=None)(bucket_user_behavior_obj) linear12 = layers.Embedding(input_dim=sum(cfg.bucket_goods_gross_dims), output_dim=1, embeddings_initializer=cfg.embed_init, embeddings_regularizer=cfg.kernel_regular, input_length=None)(bucket_goods_gross_obj) linear13 = layers.Embedding(input_dim=sum(cfg.realtime_user_group_dims), output_dim=1, embeddings_initializer=cfg.embed_init, embeddings_regularizer=cfg.kernel_regular, input_length=None)(realtime_user_group) linear2 = tf.reduce_sum(tf.squeeze(linear2, 2), 1, keepdims=True) linear3 = tf.reduce_sum(tf.squeeze(linear3, 2), 1, keepdims=True) linear4 = tf.reduce_sum(tf.squeeze(linear4, 2), 1, keepdims=True) linear6 = tf.reduce_sum(tf.squeeze(linear6, 2), 1, keepdims=True) linear7 = tf.reduce_sum(tf.squeeze(linear7, 2), 1, keepdims=True) linear8 = tf.reduce_sum(tf.squeeze(linear8, 2), 1, keepdims=True) linear9 = tf.reduce_sum(tf.squeeze(linear9, 2), 1, keepdims=True) linear10 = tf.reduce_sum(tf.squeeze(linear10, 2), 1, keepdims=True) linear11 = tf.reduce_sum(tf.squeeze(linear11, 2), 1, keepdims=True) linear12 = tf.reduce_sum(tf.squeeze(linear12, 2), 1, keepdims=True) linear13 = tf.reduce_sum(tf.squeeze(linear13, 2), 1, keepdims=True) linear = tf.concat( [linear1, linear2, linear3, linear4, linear5, linear6, linear7, linear8, linear9, linear10, linear11, linear12, linear13], axis=1, name="linear_concat") ## 不要想加 ### ------------------------------------------------------------- ### 四、 fm & deep ### ------------------------------------------------------------- ## ---------------------- fm -------------------- summed_features_emb = tf.reduce_sum(embed_fm, 1) summed_features_emb_square = tf.square(summed_features_emb) # square_sum part squared_features_emb = tf.square(embed_fm) squared_sum_features_emb = tf.reduce_sum(squared_features_emb, 1) # second order fm = 0.5 * tf.subtract(summed_features_emb_square, squared_sum_features_emb) ## (None, embed_dim) ## --------------- deep ------------------ deep_fc = tf.concat([bucket_user_raw, bucket_goods_raw, deep_fc], axis=-1, name="deep_input") print("deep 输入tensor: ", deep_fc) # deep_fc = layers.Dropout(rate=0.3)(deep_fc) deep_fc = layers.Dense(128, activation=None, kernel_regularizer=cfg.kernel_regular, use_bias=True)(deep_fc) deep_fc = layers.BatchNormalization(axis=-1, momentum=0.99)(deep_fc) deep_fc = layers.ReLU()(deep_fc) # deep_fc = layers.Dropout(rate=0.2)(deep_fc) deep_fc = layers.Dense(64, activation=None, kernel_regularizer=cfg.kernel_regular, use_bias=True)(deep_fc) deep_fc = layers.BatchNormalization(axis=-1, momentum=0.99)(deep_fc) deep_fc = layers.ReLU()(deep_fc) deep_fc = layers.Dense(32, activation=None, kernel_regularizer=cfg.kernel_regular, use_bias=True)(deep_fc) deep_fc = layers.BatchNormalization(axis=-1, momentum=0.99)(deep_fc) deep_fc = layers.ReLU()(deep_fc) concat_all = tf.concat([linear, fm, deep_fc], axis=1) concat_all = layers.Dense(1, kernel_regularizer=cfg.kernel_regular, activation=None, use_bias=True)(concat_all) output = tf.nn.sigmoid(concat_all, name="output") print("output:", output) return keras.Model(inputs=[inputs], outputs=[output]) def list_hdfs_tfrecords_file(path): cat = subprocess.Popen(["hdfs", "dfs", "-ls", "{}".format(path)], stdout=subprocess.PIPE) parquet_list = [] # print(cat) pattern = re.compile(r"/user/.+part-r-\d+") for line in cat.stdout: if re.search(pattern, str(line)) is not None: # print(str(line)) parquet_list.append(re.search(pattern, str(line)).group(0)) return parquet_list def dateRange(start, end, step=1, format="%Y-%m-%d"): ## dateRange("2017-01-01", "2017-01-03") strptime, strftime = datetime.datetime.strptime, datetime.datetime.strftime days = (strptime(end, format) - strptime(start, format)).days return [strftime(strptime(start, format) + datetime.timedelta(i), format) for i in range(0, days + 1, step)] def get_hdfs_path_list(path, train_data_end_time, days): train_data_start_time = datetime.datetime.strptime(train_data_end_time, '%Y-%m-%d') - datetime.timedelta(days) ## 7天数据训练 train_data_start_time = train_data_start_time.strftime('%Y-%m-%d') date_list = dateRange(train_data_start_time, train_data_end_time) dir_list = [os.path.join(path, t) for t in date_list] print("train parquet dataset dir: ", dir_list) hdfs_path = [] for dir_tmp in dir_list: hdfs_path += list_hdfs_tfrecords_file(dir_tmp) hdfs_path = ["hdfs://difed{}".format(s) for s in hdfs_path] random.shuffle(hdfs_path) train_path = hdfs_path[:-10] ## 训练集 val_path = hdfs_path[-10:] ## 验证集 random.shuffle(train_path) random.shuffle(val_path) return train_path, val_path def get_hdfs_path_list_test(path, train_data_end_time, days): train_data_start_time = datetime.datetime.strptime(train_data_end_time, '%Y-%m-%d') - datetime.timedelta(days) ## 7天数据训练 train_data_start_time = train_data_start_time.strftime('%Y-%m-%d') date_list = dateRange(train_data_start_time, train_data_end_time) dir_list = [os.path.join(path, t) for t in date_list] print("train parquet dataset dir: ", dir_list) hdfs_path = [] for dir_tmp in dir_list: hdfs_path += list_hdfs_tfrecords_file(dir_tmp) hdfs_path = ["hdfs://difed{}".format(s) for s in hdfs_path] random.shuffle(hdfs_path) test_path = hdfs_path[:-10] ## 训练集 val_path = hdfs_path[-10:] ## 验证集 return test_path, val_path def dataset_pipeline(hdfs_path_list, epochs): features = { 'user_id': tf.io.FixedLenFeature([1], tf.string), 'goods_id': tf.io.FixedLenFeature([1], tf.string), 'label': tf.io.FixedLenFeature([1], tf.float32), 'context_feature': tf.io.FixedLenFeature([cfg.field_length['context_feature']], tf.float32), 'realtime_features': tf.io.FixedLenFeature([cfg.field_length['realtime_features']], tf.float32), 'features_pair_4': tf.io.FixedLenFeature([cfg.field_length['features_pair_4']], tf.int64), 'goods_sparse_features': tf.io.FixedLenFeature([cfg.field_length['goods_sparse_features']], tf.int64), 'sequence': tf.io.FixedLenFeature([cfg.field_length['sequence']], tf.int64), 'bucket_user_features': tf.io.FixedLenFeature([cfg.field_length['bucket_user_features']], tf.float32), 'bucket_goods_features': tf.io.FixedLenFeature([cfg.field_length['bucket_goods_features']], tf.float32), 'bucket_pair_features': tf.io.FixedLenFeature([cfg.field_length['bucket_pair_features']], tf.float32), 'bucket_user_cspu_features': tf.io.FixedLenFeature([cfg.field_length['bucket_user_cspu_features']], tf.float32), 'bucket_ozid_cspu_features': tf.io.FixedLenFeature([cfg.field_length['bucket_ozid_cspu_features']], tf.float32), 'bucket_user_behavior_features':
<gh_stars>0 # Copyright 2015-2016 Palo Alto Networks, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cStringIO import json import re from collections import defaultdict from contextlib import contextmanager import unicodecsv from flask import request, jsonify, Response, stream_with_context from flask.ext.login import current_user from gevent import sleep from netaddr import IPRange, IPNetwork, IPSet, AddrFormatError from .aaa import MMBlueprint from .cbfeed import CbFeedInfo, CbReport from .logger import LOG from .mmrpc import MMMaster from .redisclient import SR __all__ = ['BLUEPRINT'] FEED_INTERVAL = 100 _PROTOCOL_RE = re.compile('^(?:[a-z]+:)*//') _INVALID_TOKEN_RE = re.compile('(?:[^\./+=\?&]+\*[^\./+=\?&]*)|(?:[^\./+=\?&]*\*[^\./+=\?&]+)') _IPV4_MASK_RE = re.compile('^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}(\\/[0-9]+)?$') _IPV4_RANGE_RE = re.compile( '^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}-[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$') BLUEPRINT = MMBlueprint('feeds', __name__, url_prefix='/feeds') def _translate_ip_ranges(indicator, value=None): if value is not None and value['type'] != 'IPv4': return [indicator] try: ip_range = IPRange(*indicator.split('-', 1)) except (AddrFormatError, ValueError, TypeError): return [indicator] return [str(x) if x.size != 1 else str(x.network) for x in ip_range.cidrs()] @contextmanager def _buffer(): result = cStringIO.StringIO() try: yield result finally: result.close() def generate_panosurl_feed(feed, start, num, desc, value, **kwargs): zrange = SR.zrange if desc: zrange = SR.zrevrange if num is None: num = (1 << 32) - 1 cstart = start while cstart < (start + num): ilist = zrange(feed, cstart, cstart - 1 + min(start + num - cstart, FEED_INTERVAL)) for i in ilist: i = i.lower() i = _PROTOCOL_RE.sub('', i) i = _INVALID_TOKEN_RE.sub('*', i) yield i + '\n' if len(ilist) < 100: break cstart += 100 def generate_plain_feed(feed, start, num, desc, value, **kwargs): zrange = SR.zrange if desc: zrange = SR.zrevrange if num is None: num = (1 << 32) - 1 translate_ip_ranges = kwargs.pop('translate_ip_ranges', False) cstart = start while cstart < (start + num): ilist = zrange(feed, cstart, cstart - 1 + min(start + num - cstart, FEED_INTERVAL)) if translate_ip_ranges: ilist = [xi for i in ilist for xi in _translate_ip_ranges(i)] yield '\n'.join(ilist) + '\n' if len(ilist) < 100: break cstart += 100 def generate_json_feed(feed, start, num, desc, value, **kwargs): zrange = SR.zrange if desc: zrange = SR.zrevrange if num is None: num = (1 << 32) - 1 translate_ip_ranges = kwargs.pop('translate_ip_ranges', False) if value == 'json': yield '[\n' cstart = start firstelement = True while cstart < (start + num): ilist = zrange(feed, cstart, cstart - 1 + min(start + num - cstart, FEED_INTERVAL)) result = cStringIO.StringIO() for indicator in ilist: v = SR.hget(feed + '.value', indicator) xindicators = [indicator] if translate_ip_ranges and '-' in indicator: xindicators = _translate_ip_ranges(indicator, None if v is None else json.loads(v)) if v is None: v = 'null' for i in xindicators: if value == 'json' and not firstelement: result.write(',\n') if value == 'json-seq': result.write('\x1E') result.write('{"indicator":"') result.write(i) result.write('","value":') result.write(v) result.write('}') if value == 'json-seq': result.write('\n') firstelement = False yield result.getvalue() result.close() if len(ilist) < 100: break cstart += 100 if value == 'json': yield ']\n' def generate_csv_feed(feed, start, num, desc, value, **kwargs): def _is_atomic_type(fv): return (isinstance(fv, unicode) or isinstance(fv, str) or isinstance(fv, int) or isinstance(fv, bool)) def _format_field_value(fv): if _is_atomic_type(fv): return fv if isinstance(fv, list): ok = True for fve in fv: ok &= _is_atomic_type(fve) if ok: return ','.join(fv) return json.dumps(fv) zrange = SR.zrange if desc: zrange = SR.zrevrange if num is None: num = (1 << 32) - 1 translate_ip_ranges = kwargs.pop('translate_ip_ranges', False) # extract name of fields and column names columns = [] fields = [] for addf in kwargs.pop('f', []): if '|' in addf: fname, cname = addf.rsplit('|', 1) else: fname = addf cname = addf columns.append(cname) fields.append(fname) # if no fields are specified, only indicator is generated if len(fields) == 0: fields = ['indicator'] columns = ['indicator'] # check if header should be generated header = kwargs.pop('h', None) if header is None: header = True else: header = int(header[0]) # check if bom should be generated ubom = kwargs.pop('ubom', None) if ubom is None: ubom = False else: ubom = int(ubom[0]) cstart = start if ubom: LOG.debug('BOM') yield '\xef\xbb\xbf' with _buffer() as current_line: w = unicodecsv.DictWriter( current_line, fieldnames=columns, encoding='utf-8' ) if header: w.writeheader() yield current_line.getvalue() while cstart < (start + num): ilist = zrange(feed, cstart, cstart - 1 + min(start + num - cstart, FEED_INTERVAL)) for indicator in ilist: v = SR.hget(feed + '.value', indicator) v = None if v is None else json.loads(v) xindicators = [indicator] if translate_ip_ranges and '-' in indicator: xindicators = _translate_ip_ranges(indicator, v) for i in xindicators: fieldvalues = {} for f, c in zip(fields, columns): if f == 'indicator': fieldvalues[c] = i continue if v is not None and f in v: fieldvalues[c] = _format_field_value(v[f]) current_line.truncate(0) w.writerow(fieldvalues) yield current_line.getvalue() if len(ilist) < FEED_INTERVAL: break cstart += FEED_INTERVAL def generate_mwg_feed(feed, start, num, desc, value, **kwargs): zrange = SR.zrange if desc: zrange = SR.zrevrange if num is None: num = (1 << 32) - 1 translate_ip_ranges = kwargs.pop('translate_ip_ranges', False) type_ = kwargs.get('t', None) if type_ is None: type_ = 'string' else: type_ = type_[0] translate_ip_ranges |= type_ == 'ip' yield 'type={}\n'.format(type_) cstart = start while cstart < (start + num): ilist = zrange(feed, cstart, cstart - 1 + min(start + num - cstart, FEED_INTERVAL)) for indicator in ilist: v = SR.hget(feed + '.value', indicator) v = None if v is None else json.loads(v) xindicators = [indicator] if translate_ip_ranges and '-' in indicator: xindicators = _translate_ip_ranges(indicator, v) sources = 'from minemeld' if v is not None: sources = v.get('sources', 'from minemeld') if isinstance(sources, list): sources = ','.join(sources) for i in xindicators: yield '"{}" "{}"\n'.format( i.replace('"', '\\"'), sources.replace('"', '\\"') ) if len(ilist) < 100: break cstart += 100 # This formatter implements BlueCoat custom URL format as described at # https://www.bluecoat.com/documents/download/a366dc73-d455-4859-b92a-c96bd034cb4c/f849f1e3-a906-4ee8-924e-a2061dfe3cdf # It expects the value 'bc_category' in the indicator. The value can be either a single string or a list of strings. # Optional feed arguments: # ca : Indicator's attribute that hosts the BlueCoat category. Defaults to 'bc_category' # cd : Default BlueCoat category for indicators that do not have 'catattr'. This argument can appear multiple # times and it will be handled as a list of categories the indicator belongs to. If not present then # indicators without 'catattr' will be discarded. def generate_bluecoat_feed(feed, start, num, desc, value, **kwargs): zrange = SR.zrange ilist = zrange(feed, 0, (1 << 32) - 1) bc_dict = defaultdict(list) flag_category_default = kwargs.get('cd', None) flag_category_attr = kwargs.get('ca', ['bc_category'])[0] for i in ilist: sleep(0) v = SR.hget(feed + '.value', i) v = None if v is None else json.loads(v) i = i.lower() i = _PROTOCOL_RE.sub('', i) i = _INVALID_TOKEN_RE.sub('*', i) if v is None: if flag_category_default is None: continue else: bc_cat_list = flag_category_default else: bc_cat_attr = v.get(flag_category_attr, None) if isinstance(bc_cat_attr, list): bc_cat_list = bc_cat_attr elif isinstance(bc_cat_attr, basestring): bc_cat_list = [bc_cat_attr] elif flag_category_default is not None: bc_cat_list = flag_category_default else: continue for bc_cat in bc_cat_list: bc_dict[bc_cat].append(i) for key, value in bc_dict.iteritems(): yield 'define category {}\n'.format(key) for ind in value: yield ind + '\n' yield 'end\n' def generate_carbon_black(feed, start, num, desc, value, **kwargs): zrange = SR.zrange ilist = zrange(feed, 0, (1 << 32) - 1) mm_to_cb = {"IPv4": "ipv4", "domain": "dns", "md5": "md5"} ind_by_type = {"dns": [], "md5": []} # Let's stream the information as soon as we have it yield "{\n\"feedinfo\": {\n" cb_feed_info = CbFeedInfo(name=feed) for cb_info_parts in cb_feed_info.iterate(): yield " " + cb_info_parts yield "\n},\n\"reports\": [{" report_args = dict() report_args["id"] = feed + "_report" report_title = kwargs.get('rt', ["MieneMeld Generated Report"]) if report_title is not None: report_title = report_title[0] report_args["title"] = report_title report_score = kwargs.get('rs', None) if report_score is not None: try: report_score = int(report_score[0]) except ValueError: report_score = None report_args["score"] = report_score cb_report = CbReport(**report_args) for cb_report_parts in cb_report.iterate(): yield " " + cb_report_parts yield ", \"iocs\": {" yield " \"ipv4\": [" # Loop though all indicators # Only indicators of type IPv4, domain and md5
import os,shutil,sys try: from scenegraphUSD.Utility import queue from scenegraphUSD import Logging from scenegraphUSD.Setting import * except ImportError: ## Developling envrionment sys.path.append("/home/xukai/Git/git_repo/scenegraphUSD/python") from scenegraphUSD.Utility import queue from scenegraphUSD import Logging from scenegraphUSD.Setting import * try: import maya.cmds as cmds import pymel.core as pm from pxr import Usd,Sdf,UsdGeom,Kind except ImportError: print "Can`t find maya.cmds" print "Can`t find pymel.core" print "Can`t find Pxiar`s USD" class SGUSD(object): def __init__(self): self.__stage__ = None self.__exist__ = None self.__destination__=None def __create__(self, destination=None,postfix=None, force=False): ''' Create USD destination path,if destination is None,find it in projects path. ''' if not destination: destination=self.fetchPathSceneGraphUSDSaving(LCA_USD_SEARCH_PATH,postfix) if os.path.isfile(destination): if force: os.remove(destination) self.__exist__ = False stage = Usd.Stage.CreateNew(destination) self.__stage__ = stage else: self.__exist__ = True # stage = Usd.Stage.Open(destination) else: self.__exist__ = False stage = Usd.Stage.CreateNew(destination) self.__stage__ = stage self.__destination__=destination def __export__(self): return self.__stage__ def __save__(self): self.__stage__.GetRootLayer().Save() Logging.scenegraphLogging("LCA USD file Saving to: %s"%self.__destination__) def __clear__(self): self.__stage__ = None def __defineUSDReferencePrim__(self, dagnode_path , reference_path): ''' Create a prim and add the reference on it. ''' scenegraph_path = self.fetchNameSceneGraphPrim(dagnode_path) reference_gprim = self.__stage__.DefinePrim(scenegraph_path,'Xform') reference_master_gprim = self.__stage__.DefinePrim(os.path.join(scenegraph_path,"master")) reference_master_gprim.GetPrim().GetReferences().AddReference(reference_path) def __defineUSDXformPrim__(self, dagnode_path): ''' Create the UsdPrim that type is Xform. ''' scenegraph_path = self.fetchNameSceneGraphPrim(dagnode_path) gprim = Usd.ModelAPI(UsdGeom.Xform.Define(self.__stage__, scenegraph_path)) def __defineUSDXformPrimByLoops__(self, dagnode_path): ''' Create the UsdPrim that type as Xform, and have a loop to create every prim one by one. ''' scenegraph_path = self.fetchNameSceneGraphPrim(dagnode_path) scenegraph_tree = self.fetchListUSDPrim(dagnode_path) for prim_node in scenegraph_tree: Usd.ModelAPI(UsdGeom.Xform.Define(self.__stage__, prim_node)) def __defineUSDDefaultSetting__(self, dagnode_path, rangeTimeCode=None): ''' Set the default of USD such as TimeCode, DefaultPrim and UpAxis. ''' # set USD default setting if rangeTimeCode: self.__stage__.SetStartTimeCode(rangeTimeCode[0]) self.__stage__.SetEndTimeCode(rangeTimeCode[1]) scenegraph_path = self.fetchNameSceneGraphPrim(dagnode_path) root_prim = self.__stage__.GetPrimAtPath( self.fetchNameUsdRoot(scenegraph_path)) self.__stage__.SetDefaultPrim(root_prim) UsdGeom.SetStageUpAxis(self.__stage__, UsdGeom.Tokens.y) def __defineStaticUSDPrimTransform__(self,dagnode_path): ''' Try to define static prim xform information. ''' scenegraph_path = self.fetchNameSceneGraphPrim(dagnode_path) scenegraph_data = self.fetchDataStaticMayaXform(dagnode_path) prim = self.__stage__.GetPrimAtPath( scenegraph_path ) if scenegraph_data["visibility"]: UsdGeom.Imageable(prim).MakeVisible() else: UsdGeom.Imageable(prim).MakeInvisible() rotateXYZ = scenegraph_data["rotateXYZ"] UsdGeom.XformCommonAPI(prim).SetRotate(tuple(rotateXYZ),UsdGeom.XformCommonAPI.RotationOrderXYZ) scale = scenegraph_data["scale"] UsdGeom.XformCommonAPI(prim).SetScale(tuple(scale)) translate = scenegraph_data["translate"] UsdGeom.XformCommonAPI(prim).SetTranslate(tuple(translate)) def traverse(self, node, stuckAR=False): ''' Traverse all DAG node under inpur parameter node. ''' parent_layer = self.staticMayaRelatives(node) ensemble = queue(parent_layer) all_descendents = [] # the list content all DAG node we while True: if not ensemble.knocked(): break all_descendents.append(ensemble.front) if (stuckAR): # if current node is assemblyReference,we would never dig it ang more, # we would stop from this branch and start the new one if ( not pm.PyNode(ensemble.front).type()=="assemblyReference"): if ( self.staticMayaRelatives(ensemble.front) ): ensemble.enExpand( self.staticMayaRelatives(ensemble.front) ) else: # yes,we would travse for all DAG node if ( self.staticMayaRelatives(ensemble.front) ): ensemble.enExpand( self.staticMayaRelatives(ensemble.front) ) ensemble.deQueue() return all_descendents def useDefaultSearchPath(self, reference_path): if reference_path.startswith(LCA_PROJ_PATH): return reference_path[len(LCA_PROJ_PATH)+1:] if reference_path.startswith(LCA_USD_SEARCH_PATH): return reference_path[len(LCA_USD_SEARCH_PATH)+1:] def useCurrentFolderPath(self, reference_path): return os.path.join(".",reference_path.split("/")[-1]) def useOriginalSceneGraphName(self,dagnode_path): # replace all colons into periods,but in USD world, we use double underscore dagnode_path = dagnode_path.replace(":","__") return dagnode_path def isUSDFileExist(self): destination=self.fetchPathSceneGraphUSDSaving(LCA_USD_SEARCH_PATH,postfix) if os.path.isfile(destination): return True else: return False def isAssembleReference(self,dagnode_path): ''' Checkout if this DAG node is assenblyReference. ''' node = pm.PyNode(dagnode_path) if node.type() == "assemblyReference": return True else: return False def isDAGNodeBeMoved(self, dagnode_path): ''' Checkout if this DAG node have been animated. ''' default_matrix = "[[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]" node = pm.PyNode(dagnode_path) visibility = node.attr('visibility').get() ## To DO: we should find out if attribute visibility is being animated. matrix = node.attr("matrix").get() if str(matrix) == default_matrix: return False else: return True def fetchListUSDPrim(self,dagnode_path): scenegraph_list = self.fetchNameSceneGraphPrim(dagnode_path,combine=False) result = [] current_node = "" for node in scenegraph_list: current_node = current_node+"/"+node result.append( current_node ) return result def fetchPathAssembleReferece(self,node): ''' Input a DAG node which type is "assembleReference" and find the USD file path. ''' pynode = pm.PyNode(node) definition = pynode.attr("definition").get() namespace = pynode.attr("repNamespace").get() usd_path = self.fetchPathSceneGraphUSDSaving2(definition) return usd_path def fetchPathSceneGraphUSDSaving(self, specific_path=None,postfix=None): ''' Find out the location that USD file shoule be wrote from the maya file path. Also,add a version number to assets! ''' maya_path = cmds.file(sceneName=True,query=True) file_name = maya_path.split("/")[-1][:-3] if postfix: file_name = maya_path.split("/")[-1][:-3] + postfix publish_path = maya_path[:maya_path.find("/publish/") + len("/publish")] prefix_proj_path = maya_path[len(LCA_PROJ_PATH)+1:-len(maya_path.split("/")[-1])-len(maya_path.split("/")[-2])-2] all_version = sorted(os.listdir(publish_path)) ## there is a special situation, if <flo> folder keeps the stereo folder,we should skip it. for i in range(len(all_version)): if all_version[i].find(".stereo.") > 0: continue last_version = all_version[i] if specific_path: usd_saving_path = os.path.join(specific_path, prefix_proj_path, last_version, "usd", file_name+".usda") else: usd_saving_path = os.path.join(LCA_PROJ_PATH, prefix_proj_path, last_version, "usd", file_name+".usda") return usd_saving_path def fetchPathSceneGraphUSDSaving2(self,maya_path, specific_path=None): ''' Input a DAG node which type is "assembleReference" and find the USD file path. ''' file_name = maya_path.split("/")[-1][:-3] publish_path = maya_path[:maya_path.find("/publish/") + len("/publish")] all_version = sorted(os.listdir(publish_path)) ## there is a special situation, if <flo> folder keeps the stereo folder,we should skip it. for i in range(len(all_version)): if all_version[i].find(".stereo.") > 0: continue last_version = all_version[i] if specific_path: usd_saving_path = os.path.join(specific_path, publish_path, last_version, "usd", file_name+".usda") else: usd_saving_path = os.path.join(LCA_PROJ_PATH, publish_path, last_version, "usd", file_name+".usda") return usd_saving_path def fetchNameUsdRoot(self,scenegraph_path): return "/"+scenegraph_path.split("/")[1] def fetchNameMayaRoot(self,dagnode_path): return "|"+dagnode_path.split("|")[1] def fetchNameSceneGraphPrim(self,dagnode_path,combine=True): ''' Genrate the scenegraph tree just similar to XML ''' result = [] next_path = dagnode_path while True: current_path = next_path current_path = self.mathCurrentNode(current_path) ################################## # We use original scenegraph node name at this place, # but maybe we should change this to adopt our pipeline current_path = self.useOriginalSceneGraphName(current_path) # insert element at the beginning of list result.insert(0,current_path) next_path = self.mathPreviousNode(next_path) # if next_path is empty, break the loops if not next_path: break if combine: final_path = "" for element in result: final_path += ("/" + element) result = final_path return result def fetchDataStaticMayaXform(self,dagnode_path): node = pm.PyNode(dagnode_path) # the struct result = {{frame}:{{"visibility":bool,"translation":[],"rotation":[],"scale":[]}}} result = {} visibility = node.attr('visibility').get() matrix = node.attr("matrix").get() translation = node.attr("translate").get() rotation = node.attr("rotate").get() scale = node.attr("scale").get() result = {} result["visibility"] = visibility # a bool value result["rotateXYZ"] = rotation # a list of x y z result["scale"] = scale # a list of x y z result["translate"] = translation # a list of x y z result["xformOpOrder"] = ["xformOp:translate", "xformOp:rotateXYZ", "xformOp:scale"] return result def fetchDataDynamicMayaXform(self,dagnode_path,rangeTimeCode,motionSample): ''' # arg "rangeTimeCode" should be a tuple like (1001,1010) # arg "motionSample" should be a tuple like (-0.15,0,0.15) ''' node = pm.PyNode(dagnode_path) # the struct result = {{frame}:{{"visibility":bool,"translation":[],"rotation":[],"scale":[]}}} result = {} for current_frame in range(rangeTimeCode[0],rangeTimeCode[1]+1): for current_sample in motionSample: cmds.currentTime( current_frame+current_sample ) current_timesamples = current_frame+current_sample visibility = node.attr('visibility').get() matrix = node.attr("matrix").get() translation = node.attr("translate").get() rotation = node.attr("rotate").get() scale = node.attr("scale").get() temporary = {} temporary["visibility"] = visibility # a bool value temporary["rotateXYZ"] = rotation # a list of x y z temporary["scale"] = scale # a list of x y z temporary["translate"] = translation # a list of x y z temporary["xformOpOrder"] = ["xformOp:translate", "xformOp:rotateXYZ", "xformOp:scale"] result[current_timesamples] = temporary cmds.currentTime( rangeTimeCode[0] ) return result @staticmethod def mathPreviousNode(asset_name): ''' The asset name should look like this: u'|assets|prp|brazier_tower_above:master' ''' all_pieces = asset_name.split("|") last_one = all_pieces[-1] end_point = len(last_one) + 1 return asset_name[:-end_point] @staticmethod def mathCurrentNode(asset_name): ''' The asset name should look like this: u'|assets|prp|brazier_tower_above:master' ''' all_pieces = asset_name.split("|") last_one = all_pieces[-1] return last_one @staticmethod def staticMayaRelatives(node): try: result = cmds.listRelatives(node,fullPath=True) if not result: result = [] except TypeError: result = [] return result class SGUSDExporter(SGUSD): def __init__(self): super(SGUSDExporter, self).__init__() self.sublayers = [] def __useDebugSearchPath__(self, input_path): output_path = input_path.replace(LCA_PROJ_PATH,LCA_USD_SEARCH_PATH) return output_path def __getPathAssemblyRefUSDCache__(self,dag_node): pass def __getPathMayaRefUSDCache__(self,dag_node): namespace = cmds.referenceQuery(dag_node,ns=1)[1:] # filename = cmds.referenceQuery(dag_node,filename=1).split("/")[-1].replace(".ma",".usda").replace(".mb",".usda") filename = cmds.referenceQuery(dag_node,filename=1).split("/")[-1].replace(".ma",".xml").replace(".mb",".xml") file_path = cmds.file(sceneName=True,q=True) # file_path = self.__useDebugSearchPath__(file_path) current_path = file_path[:-len(file_path.split("/")[-1])-1] shot_path = file_path[:file_path.find("/publish/")][:-4] cfx_path = os.path.join(shot_path,"cfx","publish") ani_path = os.path.join(shot_path,"ani","publish") flo_path = os.path.join(shot_path,"flo","publish") publish_list = [{"path":cfx_path,"key":".cloth."}, {"path":ani_path,"key":".animation."}, {"path":flo_path,"key":".final_layout."}] final_path = None for publish_node in publish_list: path = publish_node["path"] key = publish_node["key"] if not os.path.isdir(path): continue all_version = sorted(os.listdir(path)) last_version = None for version in all_version: if not version.find(key) > 0: continue if last_version: if int(version[-3:]) > int(last_version[-3:]): last_version = version else: last_version = version if last_version: # final_path = os.path.join(path,last_version, "cache", namespace, "usd", filename) final_path = os.path.join(path,last_version, "cache", namespace, "geo", filename) if os.path.isfile(final_path): break else: continue final_path = self.__useDebugSearchPath__(final_path) return final_path def __getPathMayaRefUSDCache2__(self,dag_node): namespace = cmds.referenceQuery(dag_node,ns=1)[1:] file_path = cmds.file(sceneName=True,q=True) shot_path = file_path[:file_path.find("/publish/")][:-4].split("/")[-1] import production.pipeline.lcProdProj as clpp cp=clpp.lcProdProj() cp.setProj('pws') # print cp.getAniAsset('f40140', 'brazier_tower_above') cache_path = cp.getAniAsset(shot_path, namespace) cache_path = cache_path.replace("/geo/","/usd/").replace(".xml",".usda") cache_path = self.__useDebugSearchPath__(cache_path) return cache_path def __getDataFrameRange__(self):
plt.savefig(savepath+".jpg") def sample_data_from_total(total_data,total_label,sample_rate=0.5): # total_data : numpy ndarray # total_label: numpy ndarray idx = list(range(total_data.shape[0])) sample_len=int(len(idx) * sample_rate) sample_idx=np.random.choice(idx,size=sample_len,replace=False) return total_data[sample_idx], total_label[sample_idx] def sample_data_2_tnse_plot(total_data,total_label,sample_rate=0.5,title='t-SNE embedding',savepath=""): sample_data,sample_label = sample_data_from_total(total_data,total_label,sample_rate=sample_rate) tnse_plot_embedding(sample_data,sample_label,title=title,savepath=savepath) # ========================= optuna ========================== # def register_search_space_by_parameters(parameters,prefix = "search_",postfix = "_list"): """[parameters has some keys starting with 'search'] Args: search_space ([type]): [description] parameters ([type]): [description] """ search_space = {} for key in parameters.keys(): if key.startswith(prefix) and parameters[key]: search_space[key[len(prefix):]] = parameters[key[len(prefix):]+postfix] return search_space def get_parameters_by_trial_or_not(parameters,trial,prefix = "search_",postfix = "_list"): return_dict = {} search_space = register_search_space_by_parameters(parameters,prefix,postfix) for key in search_space: return_dict[key] = trial_auto_generator(search_space[key],trial,key) for key in parameters.keys(): if key not in return_dict: return_dict[key] = parameters[key] return return_dict def trial_auto_generator(parameter_list,trial,name): if type(parameter_list[0]) == int: return trial.suggest_int(name,min(parameter_list),max(parameter_list)) elif type(parameter_list[0]) == float: return trial.suggest_float(name,min(parameter_list),max(parameter_list)) else: return trial.suggest_categorical(name,parameter_list) def prity_print_dict(dict_): res = [] content = "" for i in dict_.keys(): content = ">>>> {:50}: \t {}".format(i , str(dict_[i])) res.append(content) return "\n".join(res) # ================================================================ # debug = True if debug: path = "output/datasets_phase1v2/predict_modality/openproblems_bmmc_cite_phase1v2_mod2/" logging.basicConfig(level=logging.INFO,filename="./log/log.log",filemode='w') else: path = sys.argv[1] logging.basicConfig(level=logging.INFO) pathlist = os.listdir(path) if "sample_data" not in path: train_mod1_path = path + [i for i in pathlist if "output_train_mod1" in i ][0] train_mod2_path = path + [i for i in pathlist if "output_train_mod2" in i][0] test_mod1_path = path + [i for i in pathlist if "output_test_mod1" in i][0] test_mod2_path = path + [i for i in pathlist if "output_test_mod2" in i][0] else: train_mod1_path = path + [i for i in pathlist if "train_mod1" in i ][0] train_mod2_path = path + [i for i in pathlist if "train_mod2" in i][0] test_mod1_path = path + [i for i in pathlist if "test_mod1" in i][0] test_mod2_path = path + [i for i in pathlist if "test_mod2" in i][0] # test_mod1_path = path + [i for i in pathlist if "output_test_mod1" in i][0] output_path_dir = "output/predictions/predict_modality/"+path.split("/")[-2]+"/" if not debug: os.mkdir(output_path_dir) output_path = output_path_dir +path.split("/")[-2]+ ".output.h5ad" par = { 'input_train_mod1': train_mod1_path, 'input_train_mod2': train_mod2_path, 'input_test_mod1': test_mod1_path, 'input_test_mod2' : test_mod2_path, 'distance_method': 'minkowski', 'output': output_path, 'n_pcs': 50, } method_id = "python_starter_kit" logging.info('Reading `h5ad` files...') input_train_mod1 = ad.read_h5ad(par['input_train_mod1']) input_train_mod2 = ad.read_h5ad(par['input_train_mod2']) input_test_mod1 = ad.read_h5ad(par['input_test_mod1']) input_test_mod2 = ad.read_h5ad(par['input_test_mod2']) # TODO: implement own method double_ae_loss_weight_list = [(0.5,0.5,1.0), (0.4,0.6,1.0), (0.3,0.7,1.0), (0.6,0.4,1.0), (0.7, 0.3, 1.0), (0.8,0.2,1.0), (0.9, 0.1, 1.0)] # double_ae_loss_weight_list = [(0.7, 0.3, 1.0), (0.8,0.2,1.0), (0.9, 0.1, 1.0)] parameters = { "search_double_ae_loss_weight":True, "double_ae_loss_weight_list":list(range(len(double_ae_loss_weight_list))), "double_ae_loss_weight":0, } search_space = register_search_space_by_parameters(parameters) logging.info("\nsearch_space:\n"+prity_print_dict(search_space)+"\n\n") unique_save_tag = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) # 标识符号 不然不知道是那个文件产生的 logging.info("\n\n\n ===========unique save tag {}============ \n\n\n".format(unique_save_tag)) def objective(trial): # hyperparameters parameters_with_trail = get_parameters_by_trial_or_not(parameters,trial) num_epochs = 50 # train double autoencoder mlp_fit_epochs = 30 # train mlp ae_learning_rate = 0.001 mlp_learning_rate= 0.001 mod1ae_learning_rate=0.001 test_ae_path="test_ae_model.pth" latent_space_dim= 100 mlp_hidden_dim = 50 latent_dim = 50 batch_size = 320 val_split_rate = 0.2 num_mod1_epochs = 50 # test train batch = batch_size double_ae_loss_weight = double_ae_loss_weight_list[parameters_with_trail['double_ae_loss_weight']] # get description of ds ishape,oshape= input_train_mod1.X.shape[1],input_train_mod2.X.shape[1] # model path def get_score(x,ys): matrix = (x-ys).abs().pow(2).mean().sqrt().item() return matrix def total_train(): logging.info("\n\n\n ============== train ============== \n\n\n") model_ae_path = '{}-{}ae.pth'.format(ishape,oshape) model_mlp_path = '{}-{}mlp.pth'.format(ishape,oshape) mod_obs = input_train_mod1.obs.batch.values.tolist() # get train input batch information batch_dim = len(set(mod_obs)) # get length mod_obs_dict = {v:k for k,v in enumerate(set(mod_obs))} # map it into number logging.info("mod_obs_dict: "+str(mod_obs_dict)) mod_obs = np.array([mod_obs_dict[i] for i in mod_obs]) # test_obs = input_test_mod1.obs.batch.values.tolist() train_inputs = torch.from_numpy(np.array(input_train_mod1.X.toarray())) train_targets= torch.from_numpy(np.array(input_train_mod2.X.toarray())) sample_data_2_tnse_plot(train_inputs.numpy(),mod_obs,sample_rate=0.2, title="source data sample 0.2 t-sne embedding", savepath="log/souce_mod1_data_tsne") sample_data_2_tnse_plot(train_targets.numpy(),mod_obs,sample_rate=0.2, title="mod1 source data sample 0.2 t-sne embedding", savepath="log/souce_mod2_data_tsne") # split train val idx = list(range(train_inputs.shape[0])) val_len = int(len(idx) * val_split_rate) val_idx = np.random.choice(idx,size=val_len,replace=False) train_idx = np.array([i for i in idx if i not in val_idx]) train_obs = mod_obs train_ds = pairDataset(train_inputs[train_idx], train_targets[train_idx], obs=train_obs[train_idx]) val_ds = pairDataset(train_inputs[val_idx], train_targets[val_idx],obs=train_obs[val_idx]) train_dl = DataLoader(train_ds, batch_size, shuffle=True,drop_last=False) val_dl = DataLoader(val_ds, batch_size, shuffle=False,drop_last=False) # get model and lossfn model = AutoEncoder(ishape,oshape,batch_dim,latent_space_dim) loss_fn = double_autoencoder_loss logging.info('Start to build the model') opt = torch.optim.Adam(params=model.parameters(),lr=ae_learning_rate) model.cuda() def train(epoch): model.train() step = 0 for q,y in train_dl: # Generate predictions q[0] = q[0].cuda() q[1] = q[1].cuda() y = F.one_hot(y,batch_dim).cuda() x = [q[0],q[1],y] pred = model(x) eloss = loss_fn(pred, x, weights=double_ae_loss_weight) loss = eloss if step % 20 == 1: logging.info("epoch {}; step: {}; loss {}: ".format(epoch,step,loss.item())) step += 1 opt.zero_grad() loss.backward() opt.step() def validation(): model.eval() step = 0 total_loss = [] logging.info("validation phrase ") for q,y in val_dl: # Generate predictions q[0] = q[0].cuda() q[1] = q[1].cuda() y = F.one_hot(y,batch_dim).cuda() x = [q[0],q[1],y] pred = model(x) eloss = loss_fn(pred, x, weights=double_ae_loss_weight) loss = eloss total_loss.append(loss.item()) step += 1 mean_loss = sum(total_loss) / len(total_loss) logging.info("validation mean loss: {}".format(mean_loss)) return mean_loss def fit(epoches,early_stop=True): mean_loss = 99999999999999999999999 for epoch in range(epoches): train(epoch) score = validation() if score < mean_loss: mean_loss = score torch.save(model.state_dict(),model_ae_path) # save cpu result logging.info('Running Auto encoder prediction...') fit(num_epochs,False) model.load_state_dict(torch.load(model_ae_path)) model.cpu() model.eval() torch.save(model.state_dict(),model_ae_path) # save cpu result # step 2 train mlp model.load_state_dict(torch.load(model_ae_path)) model.cuda() model.eval() mlp = LatentMLP(latent_space_dim,mlp_hidden_dim) mlp.cuda() mlp_loss_fn = mlp_loss logging.info('Start to build the model') mlp_opt = torch.optim.Adam(params=mlp.parameters(),lr=mlp_learning_rate) def collect_ae_latent_representation(): total_predict = [] total_mod2_predict = [] total_label = [] for q,y in train_dl: q[0] = q[0].cuda() q[1] = q[1].cuda() total_label.append(y) # collect y = F.one_hot(y,batch_dim).cuda() x = [q[0],q[1],y] # construct model input with torch.no_grad(): pred = model.get_encoder(x) # pred : [encode1,encoder2] total_predict.append(pred[0].cpu()) total_mod2_predict.append(pred[1].cpu()) total_predict = torch.cat(total_predict,dim=0) total_mod2_predict = torch.cat(total_mod2_predict,dim=0) total_label = torch.cat(total_label, dim=0) return total_predict,total_mod2_predict,total_label def latent_representation_sample_tsne_plot(): total_predict,total_mod2_predict,total_label = collect_ae_latent_representation() sample_data_2_tnse_plot(total_predict.numpy(),total_label.numpy(), sample_rate=0.2, title="ae embedding (sample rate 0.2) tsne embedding", savepath="./log/{}ae_embedding_mod1_tsne".format(unique_save_tag)) sample_data_2_tnse_plot(total_mod2_predict.numpy(),total_label.numpy(), sample_rate=0.2, title="ae embedding (sample rate 0.2) tsne embedding", savepath="./log/{}ae_embedding_mod2_tsne".format(unique_save_tag)) latent_representation_sample_tsne_plot() def mlp_train(epoch): mlp.train() step = 0 for q,y in train_dl: # Generate predictions q[0] = q[0].cuda() q[1] = q[1].cuda() y = F.one_hot(y,batch_dim).cuda() x = [q[0],q[1],y] # construct model input with torch.no_grad(): pred = model.get_encoder(x) # pred : [encode1,encoder2] mlp_pred = mlp(pred[0]) eloss = mlp_loss_fn(mlp_pred,pred[1]) loss = eloss if step % 20 == 1: logging.info(" mlp epoch {}; step: {}; loss {}: ".format(epoch,step,loss.item())) step += 1 mlp_opt.zero_grad() loss.backward() mlp_opt.step() def mlp_validation(): mlp.eval() step = 0 total_loss = [] logging.info("mlp validation phase") for q,y in val_dl: # Generate predictions q[0] = q[0].cuda() q[1] = q[1].cuda() y = F.one_hot(y,batch_dim).cuda() x = [q[0],q[1],y] # construct model input with torch.no_grad(): pred = model.get_encoder(x) # pred : [encode1,encoder2] mlp_pred = mlp(pred[0]) eloss = mlp_loss_fn(mlp_pred,pred[1]) loss = eloss total_loss.append(loss.item()) mean_loss = sum(total_loss) / len(total_loss) logging.info("mlp validation mean loss : {}".format(mean_loss)) return mean_loss def mlp_fit(epochs): mean_loss_pre = 9999999999999999999999999999 for epoch in range(epochs): mlp_train(epoch) score = mlp_validation() if score < mean_loss_pre: torch.save(mlp.state_dict(),model_mlp_path) mlp_fit(mlp_fit_epochs) mlp.load_state_dict(torch.load(model_mlp_path)) mlp.cpu() mlp.eval() torch.save(mlp.state_dict(),model_mlp_path) # save cpu result def total_test(): logging.info("\n\n\n ============== test ============== \n\n\n") mod_obs = input_test_mod1.obs.batch.values.tolist() batch_dim = len(set(mod_obs)) mod_obs_dict = {v:k for k,v in enumerate(set(mod_obs))} logging.info("test mod batch dict "+str(mod_obs_dict)) mod_obs = np.array([mod_obs_dict[i] for i in mod_obs]) # test_obs = input_test_mod1.obs.batch.values.tolist() model_ae_path = '{}-{}ae.pth'.format(ishape,oshape) model_mlp_path = '{}-{}mlp.pth'.format(ishape,oshape) # idx = range(input_train_mod1.X.shape[0]) # val_len=int(len(idx) * 0.2) # val_idx=np.random.choice(idx,size=val_len,replace=False) # train_idx=[ i for i in idx if i not in val_idx] # test phase model apply train_obs = mod_obs train_inputs = torch.from_numpy(np.array(input_test_mod1.X.toarray())) # 这里是为了方便 就没有改变量名 test_len = train_inputs.shape[0] train_targets= torch.from_numpy(np.array(input_test_mod2.X.toarray())) idx = list(range(train_inputs.shape[0])) val_len = int(len(idx) * val_split_rate) val_idx = np.random.choice(idx,size=val_len,replace=False) train_idx = np.array([i for i in idx if i not in val_idx]) # train_ds = pairDataset(train_inputs, obs=train_obs) # train_dl = DataLoader(train_ds, batch_size, shuffle=True,drop_last=False) train_ds = pairDataset(train_inputs[train_idx], obs=train_obs[train_idx]) val_ds = pairDataset(train_inputs[val_idx], obs=train_obs[val_idx]) train_dl = DataLoader(train_ds, batch_size, shuffle=True,drop_last=False) val_dl = DataLoader(val_ds, batch_size, shuffle=False,drop_last=False) mod1ae = Mod1AutoEncoderFinetune(ishape,batch_dim,latent_space_dim) # mod1 autoencoder for mod1 2 mod1 # load parameters ae_static_dict = torch.load(model_ae_path) parameter_modify(ae_static_dict) # set parameters not grad Mod1AutoEncoderFinetuneParameterSetting(mod1ae,ae_static_dict) # update parameters which need grad mod1ae_opt = torch.optim.Adam(filter(lambda p: p.requires_grad, mod1ae.parameters()),lr=mod1ae_learning_rate) mod1ae_lossfn = rec_loss def total_test_train(epoch): # train mod1 2 mod1 to get batch effect result mod1ae.train() step = 0 for p,y in train_dl: p = p[0] y = F.one_hot(y,batch_dim) x = [p,y] # construct mod1 result pred = mod1ae(x) loss = mod1ae_lossfn(pred,p) if step % 1 == 0: logging.info("epoch {}; step: {}; loss {}: ".format(epoch,step,loss.item())) step += 1 mod1ae_opt.zero_grad() loss.backward() mod1ae_opt.step() def total_test_validation(): logging.info("test validation phrase") total_loss = [] for p,y
--* The amount of time an in-progress restore has been running, or the amount of time it took a completed restore to finish. - **EstimatedTimeToCompletionInSeconds** *(integer) --* The estimate of the time remaining before the restore will complete. Returns 0 for a completed restore. - **DataTransferProgress** *(dict) --* - **Status** *(string) --* Describes the status of the cluster. While the transfer is in progress the status is ``transferringdata`` . - **CurrentRateInMegaBytesPerSecond** *(float) --* Describes the data transfer rate in MB's per second. - **TotalDataInMegaBytes** *(integer) --* Describes the total amount of data to be transfered in megabytes. - **DataTransferredInMegaBytes** *(integer) --* Describes the total amount of data that has been transfered in MB's. - **EstimatedTimeToCompletionInSeconds** *(integer) --* Describes the estimated number of seconds remaining to complete the transfer. - **ElapsedTimeInSeconds** *(integer) --* Describes the number of seconds that have elapsed during the data transfer. - **HsmStatus** *(dict) --* A value that reports whether the Amazon Redshift cluster has finished applying any hardware security module (HSM) settings changes specified in a modify cluster command. Values: active, applying - **HsmClientCertificateIdentifier** *(string) --* Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM. - **HsmConfigurationIdentifier** *(string) --* Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM. - **Status** *(string) --* Reports whether the Amazon Redshift cluster has finished applying any HSM settings changes specified in a modify cluster command. Values: active, applying - **ClusterSnapshotCopyStatus** *(dict) --* A value that returns the destination region and retention period that are configured for cross-region snapshot copy. - **DestinationRegion** *(string) --* The destination region that snapshots are automatically copied to when cross-region snapshot copy is enabled. - **RetentionPeriod** *(integer) --* The number of days that automated snapshots are retained in the destination region after they are copied from a source region. - **ManualSnapshotRetentionPeriod** *(integer) --* The number of days that automated snapshots are retained in the destination region after they are copied from a source region. If the value is -1, the manual snapshot is retained indefinitely. The value must be either -1 or an integer between 1 and 3,653. - **SnapshotCopyGrantName** *(string) --* The name of the snapshot copy grant. - **ClusterPublicKey** *(string) --* The public key for the cluster. - **ClusterNodes** *(list) --* The nodes in the cluster. - *(dict) --* The identifier of a node in a cluster. - **NodeRole** *(string) --* Whether the node is a leader node or a compute node. - **PrivateIPAddress** *(string) --* The private IP address of a node within a cluster. - **PublicIPAddress** *(string) --* The public IP address of a node within a cluster. - **ElasticIpStatus** *(dict) --* The status of the elastic IP (EIP) address. - **ElasticIp** *(string) --* The elastic IP (EIP) address for the cluster. - **Status** *(string) --* The status of the elastic IP (EIP) address. - **ClusterRevisionNumber** *(string) --* The specific revision number of the database in the cluster. - **Tags** *(list) --* The list of tags for the cluster. - *(dict) --* A tag consisting of a name/value pair for a resource. - **Key** *(string) --* The key, or name, for the resource tag. - **Value** *(string) --* The value for the resource tag. - **KmsKeyId** *(string) --* The AWS Key Management Service (AWS KMS) key ID of the encryption key used to encrypt data in the cluster. - **EnhancedVpcRouting** *(boolean) --* An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see `Enhanced VPC Routing <https://docs.aws.amazon.com/redshift/latest/mgmt/enhanced-vpc-routing.html>`__ in the Amazon Redshift Cluster Management Guide. If this option is ``true`` , enhanced VPC routing is enabled. Default: false - **IamRoles** *(list) --* A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services. - *(dict) --* An AWS Identity and Access Management (IAM) role that can be used by the associated Amazon Redshift cluster to access other AWS services. - **IamRoleArn** *(string) --* The Amazon Resource Name (ARN) of the IAM role, for example, ``arn:aws:iam::123456789012:role/RedshiftCopyUnload`` . - **ApplyStatus** *(string) --* A value that describes the status of the IAM role's association with an Amazon Redshift cluster. The following are possible statuses and descriptions. * ``in-sync`` : The role is available for use by the cluster. * ``adding`` : The role is in the process of being associated with the cluster. * ``removing`` : The role is in the process of being disassociated with the cluster. - **PendingActions** *(list) --* Cluster operations that are waiting to be started. - *(string) --* - **MaintenanceTrackName** *(string) --* The name of the maintenance track for the cluster. - **ElasticResizeNumberOfNodeOptions** *(string) --* The number of nodes that you can resize the cluster to with the elastic resize method. - **DeferredMaintenanceWindows** *(list) --* Describes a group of ``DeferredMaintenanceWindow`` objects. - *(dict) --* Describes a deferred maintenance window - **DeferMaintenanceIdentifier** *(string) --* A unique identifier for the maintenance window. - **DeferMaintenanceStartTime** *(datetime) --* A timestamp for the beginning of the time period when we defer maintenance. - **DeferMaintenanceEndTime** *(datetime) --* A timestamp for the end of the time period when we defer maintenance. - **SnapshotScheduleIdentifier** *(string) --* A unique identifier for the cluster snapshot schedule. - **SnapshotScheduleState** *(string) --* The current state of the cluster snapshot schedule. - **ResizeInfo** *(dict) --* Returns the following: * AllowCancelResize: a boolean value indicating if the resize operation can be cancelled. * ResizeType: Returns ClassicResize - **ResizeType** *(string) --* Returns the value ``ClassicResize`` . - **AllowCancelResize** *(boolean) --* A boolean value indicating if the resize operation can be cancelled. :type ClusterIdentifier: string :param ClusterIdentifier: **[REQUIRED]** The identifier of the cluster to be deleted. Constraints: * Must contain lowercase characters. * Must contain from 1 to 63 alphanumeric characters or hyphens. * First character must be a letter. * Cannot end with a hyphen or contain two consecutive hyphens. :type SkipFinalClusterSnapshot: boolean :param SkipFinalClusterSnapshot: Determines whether a final snapshot of the cluster is created before Amazon Redshift deletes the cluster. If ``true`` , a final cluster snapshot is not created. If ``false`` , a final cluster snapshot is created before the cluster is deleted. .. note:: The *FinalClusterSnapshotIdentifier* parameter must be specified if *SkipFinalClusterSnapshot* is ``false`` . Default: ``false`` :type FinalClusterSnapshotIdentifier: string :param FinalClusterSnapshotIdentifier: The identifier of the final snapshot that is to be created immediately before deleting the cluster. If this parameter is provided, *SkipFinalClusterSnapshot* must be ``false`` . Constraints: * Must be 1 to 255 alphanumeric characters. * First character must be a letter. * Cannot end with a hyphen or contain two consecutive hyphens. :type FinalClusterSnapshotRetentionPeriod: integer :param FinalClusterSnapshotRetentionPeriod: The number of days that a manual snapshot is retained. If the value is -1, the manual snapshot is retained indefinitely. The value must be either -1 or an integer between 1 and 3,653. The default value is -1. :rtype: dict :returns: """ pass def delete_cluster_parameter_group(self, ParameterGroupName: str): """ Deletes a specified Amazon Redshift parameter group. .. note:: You cannot delete a parameter group if it is associated with a cluster. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/redshift-2012-12-01/DeleteClusterParameterGroup>`_ **Request Syntax** :: response = client.delete_cluster_parameter_group( ParameterGroupName='string' ) :type ParameterGroupName:
<reponame>vjFaLk/shipstation-client list_tags = '[{"color": "#FFFFFF", "name": "Amazon Prime Order", "tagId": 12345}]' list_marketplaces = """ [ { "canConfirmShipments":true, "canRefresh":true, "marketplaceId":23, "name":"3dcart", "supportsCustomMappings":true, "supportsCustomStatuses":false }, { "canConfirmShipments":true, "canRefresh":true, "marketplaceId":115, "name":"Acumatica", "supportsCustomMappings":false, "supportsCustomStatuses":true } ] """ list_stores = """ [ { "accountName":"<KEY>", "active":true, "autoRefresh":true, "companyName":"411Seasons", "createDate":"2017-03-01T13:57:49.033", "email":"", "integrationUrl":"", "lastRefreshAttempt":"2020-06-23T11:56:51.627", "marketplaceId":2, "marketplaceName":"Amazon", "modifyDate":"2017-05-15T07:28:54.357", "phone":"", "publicEmail":"", "refreshDate":"2020-06-23T11:56:51.377", "statusMappings":"", "storeId": 34567, "storeName":"Mexico Amazon Store", "website":"" }, { "accountName":"DEF123456789", "active":true, "autoRefresh":true, "companyName":"", "createDate":"2017-01-25T14:38:33.793", "email":"", "integrationUrl":"", "lastRefreshAttempt":"2020-06-23T07:12:32.217", "marketplaceId":2, "marketplaceName":"Amazon", "modifyDate":"2017-01-25T14:39:25.003", "phone":"", "publicEmail":"", "refreshDate":"2020-06-23T07:12:31.95", "statusMappings":"", "storeId":23456, "storeName":"CA Amazon Store", "website":"" }, { "accountName":"GHI123456789", "active":true, "autoRefresh":true, "companyName":"", "createDate":"2017-01-23T13:55:39.643", "email":"", "integrationUrl":"", "lastRefreshAttempt":"2020-06-23T11:43:20.947", "marketplaceId":2, "marketplaceName":"Amazon", "modifyDate":"2018-06-05T06:48:42.98", "phone":"", "publicEmail":"", "refreshDate":"2020-06-23T11:43:20.713", "statusMappings":"", "storeId": 12345, "storeName":"US Amazon Store", "website":"" } ] """ get_store = """ { "accountName":"GHI123456789", "active":true, "autoRefresh":true, "companyName":"", "createDate":"2017-01-23T13:55:39.643", "email":"", "integrationUrl":"", "lastRefreshAttempt":"2020-06-23T11:43:20.947", "marketplaceId":2, "marketplaceName":"Amazon", "modifyDate":"2018-06-05T06:48:42.98", "phone":"", "publicEmail":"", "refreshDate":"2020-06-23T11:43:20.713", "statusMappings":"", "storeId": 12345, "storeName":"US Amazon Store", "website":"" } """ list_users = """ [ { "name": "Merchandising", "userId": "57f4e49d-777e-4708-8b26-fd836fc975e6", "userName": "<EMAIL>" }, { "name": "Marketing", "userId": "0dbc3f54-5cd4-4054-b2b5-92427e18d6cd", "userName": "<EMAIL>" } ] """ list_warehouses = """ [ { "createDate":"2020-04-07T12:03:46.4000000", "extInventoryIdentity":"", "isDefault":false, "originAddress": { "addressVerified":"", "city":"Anywhere", "company":"", "country":"US", "name":"Warehouse 1", "phone":"18005551234", "postalCode":"12345", "residential":false, "state":"WA", "street1":"123 Long St", "street2":"Unit 4", "street3":"" }, "registerFedexMeter":"", "returnAddress": { "addressVerified":"", "city":"Anywhere", "company":"Test Company", "country":"US", "name":"", "phone":"18005553214", "postalCode":"23456", "residential":"", "state":"ID", "street1":"0 Short St", "street2":"", "street3":"" }, "sellerIntegrationId":"", "warehouseId":456789, "warehouseName":"Test Company" }, { "createDate":"2015-10-23T10:03:36.3130000", "extInventoryIdentity":"", "isDefault":false, "originAddress": { "addressVerified":"", "city":"Big City", "company":"Another LLC", "country":"US", "name":"Virtual Warehouse", "phone":"5555555555", "postalCode":"98765", "residential":false, "state":"OR", "street1":"150000000 900th Street SE", "street2":"Suite C", "street3":"" }, "registerFedexMeter":"", "returnAddress": { "addressVerified":"", "city":"Big City", "company":"Another LLC", "country":"US", "name":"Virtual Warehouse", "phone":"5555555555", "postalCode":"98765", "residential":false, "state":"OR", "street1":"150000000 900th Street SE", "street2":"Suite C", "street3":"" }, "sellerIntegrationId":"", "warehouseId":123456, "warehouseName":"Another LLC Warehouse" } ] """ list_webhooks = """{"webhooks": []}""" list_carriers = """ [ { "accountNumber": "abc123456789", "balance": 15.01, "code": "stamps_com", "name": "Stamps.com", "nickname": "", "primary": true, "requiresFundedAccount": true, "shippingProviderId": 35725 }, { "accountNumber": "36598-7894", "balance": 0.21, "code": "ups", "name": "UPS", "nickname": "UPS", "primary": true, "requiresFundedAccount": false, "shippingProviderId": 57765 } ] """ list_services = """ [ { "carrierCode": "stamps_com", "code": "usps_first_class_mail", "domestic": true, "international": false, "name": "USPS First Class Mail" }, { "carrierCode": "stamps_com", "code": "usps_media_mail", "domestic": true, "international": false, "name": "USPS Media Mail" }, { "carrierCode": "stamps_com", "code": "usps_parcel_select", "domestic": true, "international": false, "name": "USPS Parcel Select Ground" } ] """ list_packages = """ [ { "carrierCode": "stamps_com", "code": "package", "domestic": true, "international": true, "name": "Package" }, { "carrierCode": "stamps_com", "code": "flat_rate_envelope", "domestic": true, "international": true, "name": "Flat Rate Envelope" } ] """ list_customers = """ {"customers": [ { "addressVerified": "Verified", "city": "BIG TOWN", "company": "", "countryCode": "US", "createDate": "2017-12-16T18:49:16.0070000", "customerId": 123456789, "email": "<EMAIL>", "marketplaceUsernames": [ { "createDate": "2017-12-16T18:49:16.0200000", "customerId": 123456789, "customerUserId": 123456789, "marketplace": "Amazon", "marketplaceId": 2, "modifyDate": "2017-12-16T18:49:16.0200000", "username": "<EMAIL>" } ], "modifyDate": "2017-12-16T18:49:16.0070000", "name": "<NAME>", "phone": "", "postalCode": "99999-1234", "state": "FL", "street1": "1 E 1ST ST", "street2": "", "tags": "" }, { "addressVerified": "Verified", "city": "KEY WEST", "company": "", "countryCode": "US", "createDate": "2017-05-26T06:12:02.6230000", "customerId": 987654321, "email": "<EMAIL>", "marketplaceUsernames": [ { "createDate": "2017-05-26T06:12:02.6400000", "customerId": 987654321, "customerUserId": 987654321, "marketplace": "Amazon", "marketplaceId": 2, "modifyDate": "2017-05-26T06:12:02.6400000", "username": "<EMAIL>" } ], "modifyDate": "2017-05-26T06:12:02.6230000", "name": "<NAME>", "phone": "123 568 1234", "postalCode": "99999-1234", "state": "FL", "street1": "6000 Beach St", "street2": "", "tags": "" } ] } """ get_carrier = """ { "accountNumber": "example", "balance": 15.01, "code": "stamps_com", "name": "Stamps.com", "nickname": "", "primary": true, "requiresFundedAccount": true, "shippingProviderId": 35725 } """ get_customer = """ { "addressVerified": "Verified", "city": "BIG TOWN", "company": "", "countryCode": "US", "createDate": "2017-12-16T18:49:16.0070000", "customerId": 123456789, "email": "<EMAIL>", "marketplaceUsernames": [ { "createDate": "2017-12-16T18:49:16.0200000", "customerId": 123456789, "customerUserId": 123456789, "marketplace": "Amazon", "marketplaceId": 2, "modifyDate": "2017-12-16T18:49:16.0200000", "username": "<EMAIL>" } ], "modifyDate": "2017-12-16T18:49:16.0070000", "name": "<NAME>", "phone": "", "postalCode": "99999-1234", "state": "FL", "street1": "1 E 1ST ST", "street2": "", "tags": "" } """ list_orders = """ { "orders": [ { "advancedOptions": { "billToAccount": null, "billToCountryCode": null, "billToMyOtherAccount": null, "billToParty": null, "billToPostalCode": null, "containsAlcohol": false, "customField1": "EARLY BIRD SPECIAL", "customField2": "", "customField3": null, "mergedIds": [], "mergedOrSplit": false, "nonMachinable": false, "parentId": null, "saturdayDelivery": false, "source": "Manual Orders", "storeId": 12345, "warehouseId": 12345 }, "amountPaid": 0.0, "billTo": { "addressVerified": null, "city": null, "company": null, "country": null, "name": "<NAME>", "phone": null, "postalCode": null, "residential": null, "state": null, "street1": null, "street2": null, "street3": null }, "carrierCode": "stamps_com", "confirmation": "none", "createDate": "2015-06-29T13:05:13.4930000", "customerEmail": "<EMAIL>", "customerId": null, "customerNotes": null, "customerUsername": null, "dimensions": null, "externallyFulfilled": false, "externallyFulfilledBy": null, "gift": false, "giftMessage": null, "holdUntilDate": null, "insuranceOptions": { "insureShipment": false, "insuredValue": 0.0, "provider": null }, "internalNotes": null, "internationalOptions": { "contents": null, "customsItems": null, "nonDelivery": null }, "items": [], "labelMessages": null, "modifyDate": "2015-06-29T14:30:07.8970000", "orderDate": "2015-05-19T15:35:00.0000000", "orderId": 123456789, "orderKey": "123456789", "orderNumber": "123456789", "orderStatus": "shipped", "orderTotal": 7.0, "packageCode": "package", "paymentDate": "2015-06-29T13:05:13.4930000", "paymentMethod": null, "requestedShippingService": "USPS First Class Mail", "serviceCode": "usps_first_class_mail", "shipByDate": null, "shipDate": "2015-06-29", "shipTo": { "addressVerified": "Address validated successfully", "city": "ST MUNICIPALITY", "company": null, "country": "US", "name": "<NAME>", "phone": null, "postalCode": "12345-9876", "residential": true, "state": "IA", "street1": "9 DUG ST", "street2": "", "street3": null }, "shippingAmount": 0.0, "tagIds": null, "taxAmount": 0.0, "userId": null, "weight": { "WeightUnits": 1, "units": "ounces", "value": 5.0 } }, { "advancedOptions": { "billToAccount": null, "billToCountryCode": null, "billToMyOtherAccount": null, "billToParty": null, "billToPostalCode": null, "containsAlcohol": false, "customField1": "EARLY BIRD SPECIAL", "customField2": "", "customField3": null, "mergedIds": [], "mergedOrSplit": false, "nonMachinable": false, "parentId": null, "saturdayDelivery": false, "source": null, "storeId": 12345, "warehouseId": 12345 }, "amountPaid": 0.0, "billTo": { "addressVerified": null, "city": null, "company": null, "country": null, "name": "<NAME>", "phone": null, "postalCode": null, "residential": null, "state": null, "street1": null, "street2": null, "street3": null }, "carrierCode": "express_1", "confirmation": "none", "createDate": "2015-06-30T15:20:26.7230000", "customerEmail": "<EMAIL>", "customerId": null, "customerNotes": null, "customerUsername": null, "dimensions": null, "externallyFulfilled": false, "externallyFulfilledBy": null, "gift": false, "giftMessage": null, "holdUntilDate": null, "insuranceOptions": { "insureShipment": false, "insuredValue": 0.0, "provider": null }, "internalNotes": null, "internationalOptions": { "contents": "merchandise", "customsItems": [ { "countryOfOrigin": "CN", "customsItemId": 13461125, "description": "Suspicious Munitions", "harmonizedTariffCode": "", "quantity": 1, "value": 7.0 } ], "nonDelivery": "return_to_sender" }, "items": [], "labelMessages": null, "modifyDate": "2015-06-30T18:47:11.8870000", "orderDate": "2015-05-19T21:24:00.0000000", "orderId": 123456789, "orderKey": "123456789", "orderNumber": "123456789", "orderStatus": "shipped", "orderTotal": 0.0, "packageCode": "package", "paymentDate": "2015-06-30T15:20:26.7230000", "paymentMethod": null, "requestedShippingService": "USPS First Class Mail Intl", "serviceCode": "usps_first_class_package_international", "shipByDate": null, "shipDate": "2015-07-01", "shipTo": { "addressVerified": "Address not yet validated", "city": "Large City", "company": null, "country": "IN", "name": "<NAME>", "phone": null, "postalCode": "100000", "residential": false, "state": null, "street1": "6 Bustling St", "street2": "Near Flower Market", "street3": "Maharashtra" }, "shippingAmount": 0.0, "tagIds": null, "taxAmount": 0.0, "userId": null, "weight": { "WeightUnits": 1, "units": "ounces", "value": 5.0 } } ], "page": 1, "pages": 1, "total": 2 } """ get_order = """ { "advancedOptions": { "billToAccount": null, "billToCountryCode": null, "billToMyOtherAccount": null, "billToParty": null, "billToPostalCode": null, "containsAlcohol": false, "customField1": "EARLY BIRD SPECIAL", "customField2": "", "customField3": null, "mergedIds": [], "mergedOrSplit": false, "nonMachinable": false, "parentId": null, "saturdayDelivery": false, "source": null, "storeId": 12345, "warehouseId": 12345 }, "amountPaid": 0.0, "billTo": { "addressVerified": null, "city": null, "company": null, "country": null, "name": "<NAME>", "phone": null, "postalCode": null, "residential": null, "state": null, "street1": null, "street2": null, "street3": null }, "carrierCode": "stamps_com", "confirmation": "delivery", "createDate": "2015-06-30T15:20:26.7230000", "customerEmail": "<EMAIL>", "customerId": null, "customerNotes": null, "customerUsername": null, "dimensions": null, "externallyFulfilled": false, "externallyFulfilledBy": null, "gift": false, "giftMessage": null, "holdUntilDate": null, "insuranceOptions": { "insureShipment": false, "insuredValue": 0.0, "provider": null }, "internalNotes": null, "internationalOptions": { "contents": "merchandise", "customsItems": [ { "countryOfOrigin": "CN", "customsItemId": 123456789, "description": "Example Item", "harmonizedTariffCode": "", "quantity": 1, "value": 7.0 } ], "nonDelivery": "return_to_sender" }, "items": [], "labelMessages": null, "modifyDate": "2015-06-30T18:47:32.9330000", "orderDate": "2015-06-02T05:58:00.0000000", "orderId": 123456789, "orderKey": "123456789", "orderNumber": "123456789", "orderStatus": "shipped", "orderTotal": 0.0, "packageCode": "package", "paymentDate": "2015-06-30T15:20:26.7230000", "paymentMethod": null, "requestedShippingService": "USPS First Class Mail Intl", "serviceCode": "usps_first_class_package_international", "shipByDate": null, "shipDate": "2015-07-01", "shipTo": { "addressVerified": "Address not yet validated", "city": "Luzern", "company": null, "country": "CH", "name": "<NAME>", "phone": null, "postalCode": "5000", "residential": false, "state": "", "street1": "Weystrasse", "street2": "0", "street3": null }, "shippingAmount": 0.0, "tagIds": null, "taxAmount": 0.0, "userId": null, "weight": { "WeightUnits": 1, "units": "ounces", "value": 5.0 } } """ get_product = """ { "active": true, "aliases": null, "createDate": "2016-10-31T07:43:00.203", "customsCountryCode": null, "customsDescription": null, "customsTariffNo": null, "customsValue": null, "defaultCarrierCode": null, "defaultConfirmation": null, "defaultCost": null, "defaultIntlCarrierCode": null, "defaultIntlConfirmation": null, "defaultIntlPackageCode": null, "defaultIntlServiceCode": null, "defaultPackageCode": null, "defaultServiceCode": null, "fulfillmentSku": "019372892403", "height": null, "internalNotes": null, "length": null, "modifyDate": "2017-01-16T06:58:26.05", "name": "<NAME>", "noCustoms": null, "price": 0.0, "productCategory": null, "productId": 123456789, "productType": null, "sku":
""" A module for finding instantons between vacua in multiple field dimensions. The basic strategy is an iterative process: 1. Make an ansatz for the path along which the field will travel. 2. Split up the equations of motion into components that are parallel and perpendicular to the direction of travel along the path. 3. The direction of motion parallel to the path reduces to a one-dimensional equation of motion, which can be solved using the overshoot / undershoot techniques in :mod:`.tunneling1D`. Solve it. 4. Treating the motion of the field as a classical particle moving in an inverted potential, calculate the normal forces that would need to act on the particle to keep it on the path. If this forces are (close enough to) zero, the ansatz was correctly. Otherwise iteratively deform the path in the direction of the normal forces, stopping when the forces go to zero. 5. Loop back to step 3 until no further deformation is necessary. The classes :class:`Deformation_Spline` and :class:`Deformation_Points` will perform step 3, while :func:`fullTunneling` will run the entire loop. For more explicit details, see the original paper `Comput. Phys. Commun. 183 (2012)`_ [`arXiv:1109.4189`_]. .. _`Comput. Phys. Commun. 183 (2012)`: http://dx.doi.org/10.1016/j.cpc.2012.04.004 .. _`arXiv:1109.4189`: http://arxiv.org/abs/1109.4189 """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from scipy import optimize, interpolate, integrate from collections import namedtuple from . import tunneling1D from . import helper_functions import sys if sys.version_info >= (3,0): xrange = range class DeformationError(Exception): """Raised when path deformation fails.""" pass class Deformation_Spline: """ Deform a path in the presence of a potential such that the normal forces along the path are zero. This class fits a spline to the points, and does the actual deformation on the spline rather than on the points themselves. This make the path somewhat smoother than it would otherwise be (which is generally desirable), but it does make it difficult to resolve sharp turns in the path. Parameters ---------- phi : array_like The list of points that constitutes the original path. Should have shape ``(n_points, n_dimensions)``. dphidr : array_like The 'speed' along the path at the initial points. This does not change as the path deforms. Should have shape ``(n_points,)``. Gets saved into the attribute `v2` as ``v2 = dphidr[:,np.newaxis]**2``. dV : callable The potential gradient as a function of phi. The output shape should be the same as the input shape, which will be ``(..., n_dimensions)``. nb : int, optional Number of basis splines to use. kb : int, optional Order of basis splines. v2min : float, optional The smallest the square of dphidr is allowed to be, relative to the characteristic force exterted by F_ext. Note that the self-correcting nature of the deformation goes away when dphidr=0. fix_start, fix_end : bool, optional If True, the force on the first/last point along the path is set to zero, so the point will not change in the deformation step. save_all_steps : bool, optional If True, each step gets saved into ``self.phi_list`` and ``self.F_list``. Attributes ---------- phi : array_like Set during initialization, and then rewritten at each step. num_steps : int Total number of steps taken. """ """ Additional (private) attributes ------------------------------- _L : float Total length of the path, set during initialization. _t : array_like Array from (0,1] marking the locations of each point. _X, _dX, _d2X : array_like Spline basis functions and their derivatives evaluated at `_t`. Set during initialization. _beta : array_like The spline coefficients for each dimension. Recalculated each step. _F_prev, _phi_prev : array_like The normal force and the path points at the last step. """ def __init__(self, phi, dphidr, dV, nb=10, kb=3, v2min=0.0, fix_start=False, fix_end=False, save_all_steps=False): # First step: convert phi to a set of path lengths. phi = np.asanyarray(phi) dphi = phi[1:]-phi[:-1] dL = np.sqrt(np.sum(dphi*dphi,axis=-1)) y = np.cumsum(dL) self._L = y[-1] self._t = np.append(0,y)/self._L self._t[0] = 1e-100 # Without this, the first data point isn't in # any bin (this matters for dX). # Create the starting spline: # make the knots and then the spline matrices at each point t t0 = np.append(np.append([0.]*(kb-1), np.linspace(0,1,nb+3-kb)), [1.]*(kb-1)) self._X,self._dX,self._d2X = helper_functions.Nbspld2(t0, self._t, kb) self._t = self._t[:,np.newaxis] # Shape (n, 1) # subtract off the linear component. phi0, phi1 = phi[:1], phi[-1:] # These are shape (1,N) phi_lin = phi0 + (phi1-phi0)*self._t self._beta, residues, rank, s = np.linalg.lstsq(self._X, phi-phi_lin) # save the points for future use. self.phi = np.asanyarray(phi) # shape (n,N) self.v2 = np.asanyarray(dphidr)[:,np.newaxis]**2 # shape (n,1) self.dV = dV self.F_list = [] self.phi_list = [] self._phi_prev = self._F_prev = None self.save_all_steps = save_all_steps self.fix_start, self.fix_end = fix_start, fix_end self.num_steps = 0 # ensure that v2 isn't too small: v2 = dphidr**2 v2min *= np.max(np.sum(dV(self.phi)**2, -1)**.5*self._L/nb) v2[v2 < v2min] = v2min self.v2 = v2[:,np.newaxis] _forces_rval = namedtuple("forces_rval", "F_norm dV") def forces(self): """ Calculate the normal force and potential gradient on the path. Returns ------- F_norm, dV : array_like """ X, dX, d2X = self._X, self._dX, self._d2X beta = self._beta """First find phi, dphi, and d2phi. Note that dphi needs to get a linear component added in, while d2phi does not.""" phi = self.phi dphi = np.sum(beta[np.newaxis,:,:]*dX[:,:,np.newaxis], axis=1) \ + (self.phi[-1]-self.phi[1])[np.newaxis,:] d2phi = np.sum(beta[np.newaxis,:,:]*d2X[:,:,np.newaxis], axis=1) """Compute dphi/ds, where s is the path length instead of the path parameter t. This is just the direction along the path.""" dphi_sq = np.sum(dphi*dphi, axis=-1)[:,np.newaxis] dphids = dphi/np.sqrt(dphi_sq) """Then find the acceleration along the path, i.e. d2phi/ds2:""" d2phids2 = (d2phi - dphi * np.sum(dphi*d2phi, axis=-1)[:,np.newaxis] / dphi_sq)/dphi_sq """Now we have the path at the points t, as well its derivatives with respect to it's path length. We still need to get the normal force acting on the path.""" dV = self.dV(phi) dV_perp = dV - np.sum(dV*dphids, axis=-1)[:,np.newaxis]*dphids F_norm = d2phids2 * self.v2 - dV_perp if (self.fix_start): F_norm[0] = 0.0 if (self.fix_end): F_norm[-1] = 0.0 return self._forces_rval(F_norm, dV) _step_rval = namedtuple("step_rval", "stepsize step_reversed fRatio") def step(self, lastStep, maxstep=.1, minstep=1e-4, reverseCheck=.15, stepIncrease=1.5, stepDecrease=5., checkAfterFit=True, verbose=False): """ Deform the path one step. Each point is pushed in the direction of the normal force - the force that the path exerts on a classical particle moving with speed `dphidr` in a potential with gradient `dV` such that the particle stays on the path. A stepsize of 1 corresponds to moving the path an amount ``L*N/(dV_max)``, where `L` is the length of the (original) path, `N` is the normal force, and `dV_max` is the maximum force exerted by the potential along the path. Parameters ---------- lastStep : float Size of the last step. maxstep, minstep : float, optional reverseCheck : float, optional Percentage of points for which the force can reverse direcitons (relative to the last step) before the stepsize is decreased. If ``reverseCheck >= 1``, the stepsize is kept at `lastStep`. stepIncrease, stepDecrease : float, optional The amount to increase or decrease stepsize over the last step. Both should be bigger than 1. checkAfterFit : bool, optional If True, the convergence test is performed after the points are fit to a spline. If False, it's done beforehand. verbose : bool, optional If True, output is printed at each step. Returns ------- stepsize : float The stepsize used for this step. step_reversed : bool True if this step was reversed, otherwise False fRatio : float The ratio of the maximum normal force to the maximum potential gradient. When the path is a perfect fit, this should go to zero. If ``checkAfterFit == True``, the normal force in this ratio is defined by the change in phi this step *after* being fit to a spline. Note that if the spline does a poor job of fitting the points after the deformation in this step (which might be the case if there are not enough basis functions), and if ``checkAfterFit == False``, this ratio can be non-zero or large even if there is no change in `phi`. Notes ----- In prior versions
# -*- coding: utf-8 -*- ''' Author: <NAME> <<EMAIL>> Date: 2012-08-25 This example file implements 5 variations of the negative binomial regression model for count data: NB-P, NB-1, NB-2, geometric and left-truncated. The NBin class inherits from the GenericMaximumLikelihood statsmodels class which provides automatic numerical differentiation for the score and hessian. NB-1, NB-2 and geometric are implemented as special cases of the NB-P model described in Greene (2008) Functional forms for the negative binomial model for count data. Economics Letters, v99n3. Tests are included to check how NB-1, NB-2 and geometric coefficient estimates compare to equivalent models in R. Results usually agree up to the 4th digit. The NB-P and left-truncated model results have not been compared to other implementations. Note that NB-P appears to only have been implemented in the LIMDEP software. ''' import numpy as np from numpy.testing import assert_almost_equal from scipy.special import digamma from scipy.stats import nbinom import pandas import patsy from statsmodels.compat.python import urlopen from statsmodels.base.model import GenericLikelihoodModel from statsmodels.base.model import GenericLikelihoodModelResults #### Negative Binomial Log-likelihoods #### def _ll_nbp(y, X, beta, alph, Q): r''' Negative Binomial Log-likelihood -- type P References: Greene, W. 2008. "Functional forms for the negtive binomial model for count data". Economics Letters. Volume 99, Number 3, pp.585-590. <NAME>. 2011. "Negative binomial regression". Cambridge University Press. Following notation in Greene (2008), with negative binomial heterogeneity parameter :math:`\alpha`: .. math:: \lambda_i = exp(X\beta)\\ \theta = 1 / \alpha \\ g_i = \theta \lambda_i^Q \\ w_i = g_i/(g_i + \lambda_i) \\ r_i = \theta / (\theta+\lambda_i) \\ ln \mathcal{L}_i = ln \Gamma(y_i+g_i) - ln \Gamma(1+y_i) + g_iln (r_i) + y_i ln(1-r_i) ''' mu = np.exp(np.dot(X, beta)) size = 1/alph*mu**Q prob = size/(size+mu) ll = nbinom.logpmf(y, size, prob) return ll def _ll_nb1(y, X, beta, alph): '''Negative Binomial regression (type 1 likelihood)''' ll = _ll_nbp(y, X, beta, alph, Q=1) return ll def _ll_nb2(y, X, beta, alph): '''Negative Binomial regression (type 2 likelihood)''' ll = _ll_nbp(y, X, beta, alph, Q=0) return ll def _ll_geom(y, X, beta): '''Geometric regression''' ll = _ll_nbp(y, X, beta, alph=1, Q=0) return ll def _ll_nbt(y, X, beta, alph, C=0): r''' Negative Binomial (truncated) Truncated densities for count models (Cameron & Trivedi, 2005, 680): .. math:: f(y|\beta, y \geq C+1) = \frac{f(y|\beta)}{1-F(C|\beta)} ''' Q = 0 mu = np.exp(np.dot(X, beta)) size = 1/alph*mu**Q prob = size/(size+mu) ll = nbinom.logpmf(y, size, prob) - np.log(1 - nbinom.cdf(C, size, prob)) return ll #### Model Classes #### class NBin(GenericLikelihoodModel): ''' Negative Binomial regression Parameters ---------- endog : array-like 1-d array of the response variable. exog : array-like `exog` is an n x p array where n is the number of observations and p is the number of regressors including the intercept if one is included in the data. ll_type: string log-likelihood type `nb2`: Negative Binomial type-2 (most common) `nb1`: Negative Binomial type-1 `nbp`: Negative Binomial type-P (Greene, 2008) `nbt`: Left-truncated Negative Binomial (type-2) `geom`: Geometric regression model C: integer Cut-point for `nbt` model ''' def __init__(self, endog, exog, ll_type='nb2', C=0, **kwds): self.exog = np.array(exog) self.endog = np.array(endog) self.C = C super(NBin, self).__init__(endog, exog, **kwds) # Check user input if ll_type not in ['nb2', 'nb1', 'nbp', 'nbt', 'geom']: raise NameError('Valid ll_type are: nb2, nb1, nbp, nbt, geom') self.ll_type = ll_type # Starting values (assumes first column of exog is constant) if ll_type == 'geom': self.start_params_default = np.zeros(self.exog.shape[1]) elif ll_type == 'nbp': # Greene recommends starting NB-P at NB-2 start_mod = NBin(endog, exog, 'nb2') start_res = start_mod.fit(disp=False) self.start_params_default = np.append(start_res.params, 0) else: self.start_params_default = np.append(np.zeros(self.exog.shape[1]), .5) self.start_params_default[0] = np.log(self.endog.mean()) # Define loglik based on ll_type argument if ll_type == 'nb1': self.ll_func = _ll_nb1 elif ll_type == 'nb2': self.ll_func = _ll_nb2 elif ll_type == 'geom': self.ll_func = _ll_geom elif ll_type == 'nbp': self.ll_func = _ll_nbp elif ll_type == 'nbt': self.ll_func = _ll_nbt def nloglikeobs(self, params): alph = params[-1] beta = params[:self.exog.shape[1]] if self.ll_type == 'geom': return -self.ll_func(self.endog, self.exog, beta) elif self.ll_type == 'nbt': return -self.ll_func(self.endog, self.exog, beta, alph, self.C) elif self.ll_type == 'nbp': Q = params[-2] return -self.ll_func(self.endog, self.exog, beta, alph, Q) else: return -self.ll_func(self.endog, self.exog, beta, alph) def fit(self, start_params=None, maxiter=10000, maxfun=5000, **kwds): if start_params is None: countfit = super(NBin, self).fit(start_params=self.start_params_default, maxiter=maxiter, maxfun=maxfun, **kwds) else: countfit = super(NBin, self).fit(start_params=start_params, maxiter=maxiter, maxfun=maxfun, **kwds) countfit = CountResults(self, countfit) return countfit class CountResults(GenericLikelihoodModelResults): def __init__(self, model, mlefit): self.model = model self.__dict__.update(mlefit.__dict__) def summary(self, yname=None, xname=None, title=None, alpha=.05, yname_list=None): top_left = [('Dep. Variable:', None), ('Model:', [self.model.__class__.__name__]), ('Method:', ['MLE']), ('Date:', None), ('Time:', None), ('Converged:', ["%s" % self.mle_retvals['converged']])] top_right = [('No. Observations:', None), ('Log-Likelihood:', None), ] if title is None: title = self.model.__class__.__name__ + ' ' + "Regression Results" #boiler plate from statsmodels.iolib.summary import Summary smry = Summary() # for top of table smry.add_table_2cols(self, gleft=top_left, gright=top_right, #[], yname=yname, xname=xname, title=title) # for parameters, etc smry.add_table_params(self, yname=yname_list, xname=xname, alpha=alpha, use_t=True) return smry #### Score function for NB-P #### def _score_nbp(y, X, beta, thet, Q): r''' Negative Binomial Score -- type P likelihood from Greene (2007) .. math:: \lambda_i = exp(X\beta)\\ g_i = \theta \lambda_i^Q \\ w_i = g_i/(g_i + \lambda_i) \\ r_i = \theta / (\theta+\lambda_i) \\ A_i = \left [ \Psi(y_i+g_i) - \Psi(g_i) + ln w_i \right ] \\ B_i = \left [ g_i (1-w_i) - y_iw_i \right ] \\ \partial ln \mathcal{L}_i / \partial \begin{pmatrix} \lambda_i \\ \theta \\ Q \end{pmatrix}= [A_i+B_i] \begin{pmatrix} Q/\lambda_i \\ 1/\theta \\ ln(\lambda_i) \end{pmatrix} -B_i \begin{pmatrix} 1/\lambda_i\\ 0 \\ 0 \end{pmatrix} \\ \frac{\partial \lambda}{\partial \beta} = \lambda_i \mathbf{x}_i \\ \frac{\partial \mathcal{L}_i}{\partial \beta} = \left (\frac{\partial\mathcal{L}_i}{\partial \lambda_i} \right ) \frac{\partial \lambda_i}{\partial \beta} ''' lamb = np.exp(np.dot(X, beta)) g = thet * lamb**Q w = g / (g + lamb) r = thet / (thet+lamb) A = digamma(y+g) - digamma(g) + np.log(w) B = g*(1-w) - y*w dl = (A+B) * Q/lamb - B * 1/lamb dt = (A+B) * 1/thet dq = (A+B) * np.log(lamb) db = X * (dl * lamb)[:,np.newaxis] sc = np.array([dt.sum(), dq.sum()]) sc = np.concatenate([db.sum(axis=0), sc]) return sc #### Tests #### medpar = pandas.read_csv(urlopen('https://raw.githubusercontent.com/vincentarelbundock/Rdatasets/csv/COUNT/medpar.csv')) mdvis = pandas.read_csv(urlopen('https://raw.githubusercontent.com/vincentarelbundock/Rdatasets/csv/COUNT/mdvis.csv')) # NB-2 ''' # R v2.15.1 library(MASS) library(COUNT) data(medpar) f <- los~factor(type)+hmo+white mod <- glm.nb(f, medpar) summary(mod) Call: glm.nb(formula = f, data = medpar, init.theta = 2.243376203, link = log) Deviance Residuals: Min 1Q Median 3Q Max -2.4671 -0.9090 -0.2693 0.4320 3.8668 Coefficients: Estimate Std. Error z value Pr(>|z|) (Intercept) 2.31028 0.06745 34.253 < 2e-16 *** factor(type)2 0.22125 0.05046 4.385 1.16e-05 *** factor(type)3 0.70616 0.07600 9.292 < 2e-16 *** hmo -0.06796 0.05321 -1.277 0.202 white -0.12907 0.06836 -1.888 0.059 . --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 (Dispersion parameter for Negative Binomial(2.2434) family taken to be 1) Null deviance: 1691.1 on 1494 degrees of freedom Residual deviance: 1568.1 on 1490 degrees of freedom AIC: 9607 Number of Fisher Scoring iterations: 1 Theta: 2.2434 Std. Err.: 0.0997 2 x log-likelihood: -9594.9530 ''' def test_nb2(): y, X = patsy.dmatrices('los ~ C(type) + hmo + white', medpar) y = np.array(y)[:,0] nb2 = NBin(y,X,'nb2').fit(maxiter=10000, maxfun=5000) assert_almost_equal(nb2.params, [2.31027893349935, 0.221248978197356, 0.706158824346228, -0.067955221930748, -0.129065442248951, 0.4457567], decimal=2) # NB-1 ''' # R v2.15.1 # COUNT v1.2.3 library(COUNT) data(medpar) f <- los~factor(type)+hmo+white ml.nb1(f, medpar) Estimate SE Z LCL UCL (Intercept) 2.34918407 0.06023641 38.9994023 2.23112070 2.46724744 factor(type)2 0.16175471 0.04585569 3.5274735 0.07187757 0.25163186 factor(type)3 0.41879257 0.06553258 6.3906006 0.29034871 0.54723643 hmo -0.04533566 0.05004714 -0.9058592 -0.14342805 0.05275673 white -0.12951295 0.06071130 -2.1332593 -0.24850710 -0.01051880 alpha 4.57898241 0.22015968 20.7984603 4.14746943 5.01049539 ''' #def test_nb1(): #y, X = patsy.dmatrices('los ~ C(type) + hmo + white', medpar) #y = np.array(y)[:,0] ## TODO: Test fails with some of the other optimization methods #nb1 = NBin(y,X,'nb1').fit(method='ncg', maxiter=10000, maxfun=5000) #assert_almost_equal(nb1.params, #[2.34918407014186, 0.161754714412848, 0.418792569970658, # -0.0453356614650342, -0.129512952033423, 4.57898241219275], #decimal=2) # NB-Geometric ''' MASS v7.3-20 R v2.15.1 library(MASS) data(medpar) f <- los~factor(type)+hmo+white mod <- glm(f, family=negative.binomial(1), data=medpar) summary(mod) Call: glm(formula = f, family = negative.binomial(1), data = medpar) Deviance Residuals: Min 1Q Median 3Q Max -1.7942 -0.6545 -0.1896 0.3044 2.6844 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 2.30849 0.07071 32.649 < 2e-16 *** factor(type)2 0.22121 0.05283 4.187 2.99e-05 *** factor(type)3 0.70599 0.08092 8.724 < 2e-16 *** hmo -0.06779 0.05521 -1.228 0.2197 white -0.12709 0.07169 -1.773 0.0765 . --- Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1 (Dispersion parameter for Negative Binomial(1) family taken to be 0.5409721) Null deviance: 872.29 on 1494 degrees of freedom Residual deviance: 811.95 on 1490 degrees of freedom AIC: 9927.3 Number of Fisher Scoring iterations: 5 ''' #def test_geom(): #y, X = patsy.dmatrices('los ~ C(type) + hmo + white', medpar) #y = np.array(y)[:,0]
else: flux = self['flux'] """ Check linetype """ if linetype == 'abs': pm = -1. labva = 'top' elif linetype == 'em' or linetype == 'strongem': pm = 1. labva = 'bottom' else: print('') print("ERROR: linetype must be either 'abs', 'em', or 'strongem'") print('') return None, None """ Set up the tick parameters""" tmpmask = np.fabs(self['wav'] - lam) < (labww / 2.) if linetype == 'em' or linetype == 'strongem': specflux = flux[tmpmask].max() else: specflux = flux[tmpmask].min() tickstart = specflux + pm * tickfac * ticklen tickend = tickstart + pm * ticklen labstart = tickstart + pm * 1.5 * ticklen """ Draw the tick mark """ axes.plot([lam, lam], [tickstart, tickend], 'k') """ Return relevant info for plotting """ return labstart, labva # ----------------------------------------------------------------------- def mark_lines(self, linetype, z, usesmooth=False, marktype='tick', labww=20., labfs=12, tickfrac=0.05, tickfac=0.75, showz=True, zstr='z', zfs=16, labloc='default', labcolor='k', namepos='top', markatm=True, fig=None): """ A generic routine for marking spectral lines in the plotted spectrum. The required linetype parameter can be either 'abs' or 'em' and will determine whether absorption or emission lines are marked. Inputs: linetype - Must be either 'abs' or 'em' to mark absorption or emission lines, respectively. A third option, 'strongem' only marks strong emission lines z - redshift to be marked labww - width in pixels of the window used to set the vertical location of the tickmark (location is set from the minimum or maximum value within the window). labfs - font size for labels, in points ticklen - override of auto-determination of tick length if > 0 """ """ Check linetype """ if fig is None: self.fig = plt.gcf() else: self.fig = fig self.ax = self.fig.gca() if linetype == 'abs': labva = 'top' elif linetype == 'em' or linetype == 'strongem': labva = 'bottom' else: print('') print("ERROR: linetype must be either 'abs', 'em', or 'strongem'") print('') return """ Set the display limits """ lammin, lammax = self['wav'].min(), self['wav'].max() x0, x1 = self.ax.get_xlim() y0, y1 = self.ax.get_ylim() if x0 > lammin: lammin = x0 if x1 < lammax: lammax = x1 # xdiff = x1 - x0 ydiff = y1 - y0 # dlocwin = labww / 2. """ Select lines within current display range """ zlines = (z + 1.0) * self.lineinfo['wavelength'] zmask = np.logical_and(zlines > lammin, zlines < lammax) tmptype = self.lineinfo['type'] if linetype == 'em': tmask = tmptype > 0 elif linetype == 'strongem': tmask = tmptype > 2 else: tmask = (tmptype < 2) | (tmptype == 3) mask = zmask & tmask tmplines = self.lineinfo[mask] zlines = (z + 1.0) * tmplines['wavelength'] print('') print('Line lambda_rest lambda_obs') print('---------- ----------- -----------') for i in range(len(tmplines)): line = tmplines[i] print('%-10s %8.2f %8.2f' % (line['name'], line['wavelength'], zlines[i])) """ Set the length of the ticks """ ticklen = tickfrac * ydiff print('') if (len(tmplines) == 0): print('') print('No lines of the requested type within the wavelength') print(' range covered by this spectrum.') print('') return xarr = tmplines['wavelength'] * (z + 1.) """ Mark the location of the spectral lines with either tickmarks (default) or vertical dashed lines """ for i in range(len(tmplines)): info = tmplines[i] if marktype == 'tick': labstart, labva = \ self.draw_tick(xarr[i], linetype, ticklen, usesmooth=usesmooth, labww=labww, tickfac=tickfac, axes=self.ax) # tmpmask = np.fabs(self['wav']-xarr[i]) < dlocwin # if linetype == 'em' or linetype == 'strongem': # specflux = flux[tmpmask].max() # else: # specflux = flux[tmpmask].min() # tickstart = specflux + pm * tickfac*ticklen # tickend = tickstart + pm * ticklen # labstart = tickstart + pm * 1.5*ticklen # plt.plot([xarr[i], xarr[i]], [tickstart, tickend], 'k') labha = 'center' else: self.ax.axvline(xarr[i], color='k', ls='--') labha = 'right' if namepos == 'bottom': labstart = y0 + 0.05 * ydiff else: labstart = y1 - 0.05 * ydiff labva = 'top' """ Label the lines """ if info['plot']: self.ax.text(xarr[i] + info['dxlab'], labstart, info['label'], rotation='vertical', ha=labha, va=labva, color=labcolor, fontsize=labfs) """ Label the plot with the redshift, if requested """ if showz: if labloc == 'topright': labx = 0.99 laby = 0.9 ha = 'right' else: labx = 0.01 laby = 0.99 ha = 'left' self.ax.text(labx, laby, '%s = %5.3f' % (zstr, z), ha=ha, va='top', color=labcolor, fontsize=zfs, transform=self.ax.transAxes) # ----------------------------------------------------------------------- def apply_wavecal_linear(self, lambda0, dlambda, outfile=None, outformat='text', doplot=True): """ Applies a very simple linear mapping from pixels to wavelength and saves the output if desired. The required inputs provide an intercept (lambda0) and a slope (dlambda) that are used to define the linear mapping, i.e., wavelength = lambda0 + pix * dlambda Required inputs: lambda0: Intercept value in Angstrom dlambda: Slope (dispersion) in Angstrom/pix Optional inputs: outfile: Name of output file, if one is desired. The default value (None) means no output file is produced outformat: Format of output file (see help file for Spec1d.save for the possible values). Default value is 'text' doplot: Plot the spectrum with the new wavelength calibration if desired. Default value (True) means make the plot. """ """ Make the new wavelength vector """ x = np.arange(self['wav'].size) self['wav'] = lambda0 + dlambda * x """ Plot the spectrum if desired """ if doplot: self.plot() """ Save the wavelength-calibrated spectrum if desired """ if outfile is not None: self.save(outfile, outformat=outformat) # ----------------------------------------------------------------------- def check_wavecal(self, modsmooth='default', verbose=True): """ Plots the observed wavelength-calibrated sky spectrum on top of a smoothed a priori model of the night sky emission so that the quality of the wavelength calibration can be evaluated. Inputs: modsmooth - Smoothing kernel for the model sky spectrum in Angstrom?? The default value is set under the assumption that the dispersion of the spectrum gives three pixels across the FWHM of the spectral resolution. Therefore the smoothing kernel should be: sigma = fwhm / sqrt{2 ln 2} ~ fwhm / 1.177 meaning that: sigma ~ 3. * dispersion / 1.177 ~ 2.55 * dispersion """ """ For the observed sky spectrum use either: 1. The actual sky spectrum, if it exists (preferred) 2. The square root of the variance spectrum, if it exists """ """ Plot the observed sky spectrum """ try: self.plot_sky() except KeyError: return if self.sky: skyflux = self['sky'] elif 'var' in self.colnames: skyflux = np.sqrt(self['var']) mask = np.isfinite(skyflux) skyflux = skyflux[mask] """ Create the model sky spectrum, with the appropriate smoothing """ print('') if modsmooth == 'default': modsmooth = 2.55 * self.dispave print('Smoothing sky spectrum with default value of %6.3f Ang' % modsmooth) elif isinstance(modsmooth, float): print('Smoothing sky spectrum with passed value of %6.3f Ang' % modsmooth) else: print('ERROR: modsmooth parameter must be a float') raise TypeError waveobs = self['wav'].copy() skymod = make_sky_model(self['wav'], smooth=modsmooth) """ Scale the sky spectrum to roughly be 75% of the amplitude of the observed spectrum """ ymin, ymax = plt.ylim() deltaobs = ymax - ymin deltamod = skymod['flux'].max() - skymod['flux'].min() print(deltaobs, deltamod) print(skyflux.mean(), skymod['flux'].mean()) skymod['flux'] *= 0.75 * deltaobs / deltamod skymod['flux'] += skyflux.mean() - skymod['flux'].mean() """ Make the plot """ wrange = waveobs.max() - waveobs.min() xmin = waveobs.min() - 0.05*wrange xmax = waveobs.max() + 0.05*wrange fig = plt.gcf() ax = plt.gca() skymod.plot(color='r', label='Model sky', fig=fig, ax=ax) plt.legend() plt.xlim(xmin, xmax) """ Clean up """ del waveobs, skymod # ----------------------------------------------------------------------- def resample(self, owave=None, verbose=True): """ Resample the spectrum onto a new wavelength grid. There are two possibilities for the output wavelength vector that sets where the interpolation happens. They are: 1. owave = None [default] A linearized set of spacings between the minimum and maximum values in the input wavelength vector 2. owave is set to an array A user-defined x array that has been passed through the owave parameter This is just a specialized call to the Data1d.resamp method """ self.rswav, self.rsflux = self.resamp(xout=owave, verbose=verbose) # ----------------------------------------------------------------------- def mask_line(self, linereg, bkgdwidth, mode='input', atm_corr=False, **kwargs): """ Replaces the region of a spectrum containing a spectral line with a simple model of the continuum level in the location of that line. This
<reponame>anthem-ai/fhir-types from typing import Any, List, Literal, TypedDict from .FHIR_boolean import FHIR_boolean from .FHIR_canonical import FHIR_canonical from .FHIR_code import FHIR_code from .FHIR_CodeableConcept import FHIR_CodeableConcept from .FHIR_ContactDetail import FHIR_ContactDetail from .FHIR_date import FHIR_date from .FHIR_dateTime import FHIR_dateTime from .FHIR_Element import FHIR_Element from .FHIR_id import FHIR_id from .FHIR_Identifier import FHIR_Identifier from .FHIR_markdown import FHIR_markdown from .FHIR_Measure_Group import FHIR_Measure_Group from .FHIR_Measure_SupplementalData import FHIR_Measure_SupplementalData from .FHIR_Meta import FHIR_Meta from .FHIR_Narrative import FHIR_Narrative from .FHIR_Period import FHIR_Period from .FHIR_Reference import FHIR_Reference from .FHIR_RelatedArtifact import FHIR_RelatedArtifact from .FHIR_string import FHIR_string from .FHIR_uri import FHIR_uri from .FHIR_UsageContext import FHIR_UsageContext # The Measure resource provides the definition of a quality measure. FHIR_Measure = TypedDict( "FHIR_Measure", { # This is a Measure resource "resourceType": Literal["Measure"], # The logical id of the resource, as used in the URL for the resource. Once assigned, this value never changes. "id": FHIR_id, # The metadata about the resource. This is content that is maintained by the infrastructure. Changes to the content might not always be associated with version changes to the resource. "meta": FHIR_Meta, # A reference to a set of rules that were followed when the resource was constructed, and which must be understood when processing the content. Often, this is a reference to an implementation guide that defines the special rules along with other profiles etc. "implicitRules": FHIR_uri, # Extensions for implicitRules "_implicitRules": FHIR_Element, # The base language in which the resource is written. "language": FHIR_code, # Extensions for language "_language": FHIR_Element, # A human-readable narrative that contains a summary of the resource and can be used to represent the content of the resource to a human. The narrative need not encode all the structured data, but is required to contain sufficient detail to make it "clinically safe" for a human to just read the narrative. Resource definitions may define what content should be represented in the narrative to ensure clinical safety. "text": FHIR_Narrative, # These resources do not have an independent existence apart from the resource that contains them - they cannot be identified independently, and nor can they have their own independent transaction scope. "contained": List[Any], # May be used to represent additional information that is not part of the basic definition of the resource. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. "extension": List[Any], # May be used to represent additional information that is not part of the basic definition of the resource and that modifies the understanding of the element that contains it and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer is allowed to define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions.Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself). "modifierExtension": List[Any], # An absolute URI that is used to identify this measure when it is referenced in a specification, model, design or an instance; also called its canonical identifier. This SHOULD be globally unique and SHOULD be a literal address at which at which an authoritative instance of this measure is (or will be) published. This URL can be the target of a canonical reference. It SHALL remain the same when the measure is stored on different servers. "url": FHIR_uri, # Extensions for url "_url": FHIR_Element, # A formal identifier that is used to identify this measure when it is represented in other formats, or referenced in a specification, model, design or an instance. "identifier": List[FHIR_Identifier], # The identifier that is used to identify this version of the measure when it is referenced in a specification, model, design or instance. This is an arbitrary value managed by the measure author and is not expected to be globally unique. For example, it might be a timestamp (e.g. yyyymmdd) if a managed version is not available. There is also no expectation that versions can be placed in a lexicographical sequence. To provide a version consistent with the Decision Support Service specification, use the format Major.Minor.Revision (e.g. 1.0.0). For more information on versioning knowledge assets, refer to the Decision Support Service specification. Note that a version is required for non-experimental active artifacts. "version": FHIR_string, # Extensions for version "_version": FHIR_Element, # A natural language name identifying the measure. This name should be usable as an identifier for the module by machine processing applications such as code generation. "name": FHIR_string, # Extensions for name "_name": FHIR_Element, # A short, descriptive, user-friendly title for the measure. "title": FHIR_string, # Extensions for title "_title": FHIR_Element, # An explanatory or alternate title for the measure giving additional information about its content. "subtitle": FHIR_string, # Extensions for subtitle "_subtitle": FHIR_Element, # The status of this measure. Enables tracking the life-cycle of the content. "status": Literal["draft", "active", "retired", "unknown"], # Extensions for status "_status": FHIR_Element, # A Boolean value to indicate that this measure is authored for testing purposes (or education/evaluation/marketing) and is not intended to be used for genuine usage. "experimental": FHIR_boolean, # Extensions for experimental "_experimental": FHIR_Element, # The intended subjects for the measure. If this element is not provided, a Patient subject is assumed, but the subject of the measure can be anything. "subjectCodeableConcept": FHIR_CodeableConcept, # The intended subjects for the measure. If this element is not provided, a Patient subject is assumed, but the subject of the measure can be anything. "subjectReference": FHIR_Reference, # The date (and optionally time) when the measure was published. The date must change when the business version changes and it must change if the status code changes. In addition, it should change when the substantive content of the measure changes. "date": FHIR_dateTime, # Extensions for date "_date": FHIR_Element, # The name of the organization or individual that published the measure. "publisher": FHIR_string, # Extensions for publisher "_publisher": FHIR_Element, # Contact details to assist a user in finding and communicating with the publisher. "contact": List[FHIR_ContactDetail], # A free text natural language description of the measure from a consumer's perspective. "description": FHIR_markdown, # Extensions for description "_description": FHIR_Element, # The content was developed with a focus and intent of supporting the contexts that are listed. These contexts may be general categories (gender, age, ...) or may be references to specific programs (insurance plans, studies, ...) and may be used to assist with indexing and searching for appropriate measure instances. "useContext": List[FHIR_UsageContext], # A legal or geographic region in which the measure is intended to be used. "jurisdiction": List[FHIR_CodeableConcept], # Explanation of why this measure is needed and why it has been designed as it has. "purpose": FHIR_markdown, # Extensions for purpose "_purpose": FHIR_Element, # A detailed description, from a clinical perspective, of how the measure is used. "usage": FHIR_string, # Extensions for usage "_usage": FHIR_Element, # A copyright statement relating to the measure and/or its contents. Copyright statements are generally legal restrictions on the use and publishing of the measure. "copyright": FHIR_markdown, # Extensions for copyright "_copyright": FHIR_Element, # The date on which the resource content was approved by the publisher. Approval happens once when the content is officially approved for usage. "approvalDate": FHIR_date, # Extensions for approvalDate "_approvalDate": FHIR_Element, # The date on which the resource content was last reviewed. Review happens periodically after approval but does not change the original approval date. "lastReviewDate": FHIR_date, # Extensions for lastReviewDate "_lastReviewDate": FHIR_Element, # The period during which the measure content
<filename>mvpa2/mappers/fx.py # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # # See COPYING file distributed along with the PyMVPA package for the # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## """Transform data by applying a function along samples or feature axis.""" __docformat__ = 'restructuredtext' import numpy as np import inspect from mvpa2.base import warning from mvpa2.base.node import Node from mvpa2.base.param import Parameter from mvpa2.base.constraints import * from mvpa2.datasets import Dataset from mvpa2.base.dochelpers import _str, _repr_attrs from mvpa2.mappers.base import Mapper from mvpa2.misc.support import array_whereequal from mvpa2.base.dochelpers import borrowdoc from mvpa2.misc.transformers import sum_of_abs, max_of_abs, subtract_mean if __debug__: from mvpa2.base import debug class FxMapper(Mapper): """Apply a custom transformation to (groups of) samples or features. """ is_trained = True """Indicate that this mapper is always trained.""" def __init__(self, axis, fx, fxargs=None, uattrs=None, attrfx='merge', order='uattrs'): """ Parameters ---------- axis : {'samples', 'features'} fx : callable fxargs : tuple Passed as *args to ``fx`` uattrs : list List of attribute names to consider. All possible combinations of unique elements of these attributes are used to determine the sample groups to operate on. attrfx : callable Functor that is called with each sample attribute elements matching the respective samples group. By default the unique value is determined. If the content of the attribute is not uniform for a samples group a unique string representation is created. If `None`, attributes are not altered. order : {'uattrs', 'occurrence', None} If which order groups should be merged together. If `None` (default before 2.3.1), the order is imposed only by the order of `uattrs` as keys in the dictionary, thus can vary from run to run. If `'occurrence'`, groups will be ordered by the first occurrence of group samples in original dataset. If `'uattrs'`, groups will be sorted by the values of uattrs with follow-up attr having higher importance for ordering (e .g. `uattrs=['targets', 'chunks']` would order groups first by `chunks` and then by `targets` within each chunk). """ Mapper.__init__(self) if not axis in ['samples', 'features']: raise ValueError("%s `axis` arguments can only be 'samples' or " "'features' (got: '%s')." % repr(axis)) self.__axis = axis self.__uattrs = uattrs self.__fx = fx if fxargs is not None: self.__fxargs = fxargs else: self.__fxargs = () if attrfx == 'merge': self.__attrfx = _uniquemerge2literal else: self.__attrfx = attrfx assert(order in (None, 'uattrs', 'occurrence')) self.__order = order @borrowdoc(Mapper) def __repr__(self, prefixes=None): if prefixes is None: prefixes = [] return super(FxMapper, self).__repr__( prefixes=prefixes + _repr_attrs(self, ['axis', 'fx', 'uattrs']) + _repr_attrs(self, ['fxargs'], default=()) + _repr_attrs(self, ['attrfx'], default='merge') + _repr_attrs(self, ['order'], default='uattrs') ) def __str__(self): return _str(self, fx=self.__fx.__name__) def _train(self, ds): # right now it needs no training, if anything is added here make sure to # remove is_trained class attribute pass def __smart_apply_along_axis(self, data): # because apply_along_axis could be very much slower than a # direct invocation of native functions capable of operating # along specific axis, let's make it smarter for those we know # could do that. fx = None naxis = {'samples': 0, 'features': 1}[self.__axis] try: # if first argument is 'axis' -- just proceed with a native call if inspect.getargs(self.__fx.__code__).args[1] == 'axis': fx = self.__fx elif __debug__: debug('FX', "Will apply %s via apply_along_axis", (self.__fx)) except Exception, e: if __debug__: debug('FX', "Failed to deduce either %s has 'axis' argument: %s", (self.__fx, repr(e))) pass if fx is not None: if __debug__: debug('FX', "Applying %s directly to data giving axis=%d", (self.__fx, naxis)) mdata = fx(data, naxis, *self.__fxargs) else: # either failed to deduce signature or just didn't # have 'axis' second # apply fx along naxis for each sample/feature mdata = np.apply_along_axis(self.__fx, naxis, data, *self.__fxargs) assert(mdata.ndim in (data.ndim, data.ndim-1)) return mdata @borrowdoc(Mapper) def _forward_data(self, data): if self.__uattrs is not None: raise RuntimeError("%s does not support forward-mapping of plain " "data when data grouping based on attributes " "is requested" % self.__class__.__name__) mdata = self.__smart_apply_along_axis(data) if self.__axis == 'features': if len(mdata.shape) == 1: # in case we only have a scalar per sample we need to transpose # it properly, to keep the length of the samples axis intact mdata = np.atleast_2d(mdata).T return np.atleast_2d(mdata) @borrowdoc(Mapper) def _forward_dataset(self, ds): if self.__uattrs is None: mdata, sattrs = self._forward_dataset_full(ds) else: mdata, sattrs = self._forward_dataset_grouped(ds) samples = np.atleast_2d(mdata) # return early if there is no attribute treatment desired if self.__attrfx is None: out = ds.copy(deep=False) out.samples = samples return out # not copying the samples attributes, since they have to be modified # anyway if self.__axis == 'samples': out = ds.copy(deep=False, sa=[]) col = out.sa incol = ds.sa col.set_length_check(samples.shape[0]) else: out = ds.copy(deep=False, fa=[]) col = out.fa incol = ds.fa col.set_length_check(samples.shape[1]) # assign samples to do COW out.samples = samples for attr in sattrs: a = sattrs[attr] # TODO -- here might puke if e.g it is a list where some items # are empty lists... I guess just wrap in try/except and # do dtype=object if catch a = np.atleast_1d(a) # make sure we do not inflate the number of dimensions for no reason # this could happen if there was only one unique value for an # attribute and the default 'uniquemerge2literal' attrfx was given if len(a.shape) > 1 and a.shape[-1] == 1 and attr in incol \ and len(a.shape) > len(incol[attr].value.shape): a.shape = a.shape[:-1] col[attr] = a return out def _forward_dataset_grouped(self, ds): mdata = [] # list of samples array pieces if self.__axis == 'samples': col = ds.sa axis = 0 elif self.__axis == 'features': col = ds.fa axis = 1 else: raise RuntimeError("This should not have happened!") attrs = dict(zip(col.keys(), [[] for i in col])) # create a dictionary for all unique elements in all attribute this # mapper should operate on self.__attrcombs = dict(zip(self.__uattrs, [col[attr].unique for attr in self.__uattrs])) # let it generate all combinations of unique elements in any attr order = self.order order_keys = [] for comb in _orthogonal_permutations(self.__attrcombs): selector = reduce(np.multiply, [array_whereequal(col[attr].value, value) for attr, value in comb.iteritems()]) # process the samples if axis == 0: samples = ds.samples[selector] else: samples = ds.samples[:, selector] # check if there were any samples for such a combination, # if not -- warning and skip the rest of the loop body if not len(samples): warning('There were no samples for combination %s. It might be ' 'a sign of a disbalanced dataset %s.' % (comb, ds)) continue fxed_samples = self.__smart_apply_along_axis(samples) mdata.append(fxed_samples) if self.__attrfx is not None: # and now all samples attributes for i, attr in enumerate(col): fxed_attr = self.__attrfx(col[attr].value[selector]) attrs[attr].append(fxed_attr) # possibly take care about collecting information to have groups ordered if order == 'uattrs': # reverse order as per docstring -- most of the time we have # used uattrs=['targets', 'chunks'] and did expect chunks being # groupped together. order_keys.append([comb[a] for a in self.__uattrs[::-1]]) elif order == 'occurrence': # First index should be sufficient since we are dealing # with unique non-overlapping groups here (AFAIK ;) ) order_keys.append(np.where(selector)[0][0]) if order: # reorder our groups using collected "order_keys" # data order_idxs = argsort(order_keys) mdata = [mdata[i] for i in order_idxs] # and attributes attrs = dict((k, [v[i] for i in order_idxs]) for k,v in attrs.iteritems()) if axis == 0: mdata = np.vstack(mdata) else: mdata = np.vstack(np.transpose(mdata)) return mdata, attrs def _forward_dataset_full(self, ds): # simply map the all of the data mdata = self._forward_data(ds.samples) # if the attributes should not be handled, don't handle them if self.__attrfx is None: return mdata, None # and now all attributes if self.__axis == 'samples': attrs = dict(zip(ds.sa.keys(), [self.__attrfx(ds.sa[attr].value) for attr in ds.sa])) if self.__axis == 'features': attrs = dict(zip(ds.fa.keys(), [self.__attrfx(ds.fa[attr].value) for attr in ds.fa])) return mdata, attrs axis = property(fget=lambda self:self.__axis) fx = property(fget=lambda self:self.__fx) fxargs = property(fget=lambda self:self.__fxargs) uattrs = property(fget=lambda self:self.__uattrs) attrfx = property(fget=lambda self:self.__attrfx) order
duration, reason)) if response: self.out_SERVERMSG(client, '%s' % response) def in_BANSPECIFIC(self, client, arg, duration, reason): # arg might be a username(->user_id), ip, or email; ban it good, response = self.bandb.ban_specific(client, duration, reason, arg) if good: self.broadcast_Moderator("%s banned-specific <%s> for %s days (%s)" % (client.username, arg, duration, reason)) if response: self.out_SERVERMSG(client, '%s' % response) def in_UNBAN(self, client, arg): # arg might be a username(->user_id), ip, or email; remove all associated bans good, response = self.bandb.unban(client, arg) if good: self.broadcast_Moderator("%s unbanned <%s>" % (client.username, arg)) if response: self.out_SERVERMSG(client, '%s' % response) def in_BLACKLIST(self, client, domain, reason=""): # add somedomain.xyz to the blacklist good, response = self.bandb.blacklist(client, domain, reason) if good: self.broadcast_Moderator("%s blacklisted '%s' (%s)" % (client.username, domain, reason)) if response: self.out_SERVERMSG(client, '%s' % response) def in_UNBLACKLIST(self, client, domain): # remove somedomain.xyz from the blacklist good, response = self.bandb.unblacklist(client, domain) if good: self.broadcast_Moderator("%s un-blacklisted '%s'" % (client.username, domain)) if response: self.out_SERVERMSG(client, '%s' % response) def in_LISTBANS(self, client): # send the banlist banlist = self.bandb.list_bans() if banlist: self.out_SERVERMSG(client, '-- Banlist --') for entry in banlist: self.out_SERVERMSG(client, "%s, %s, %s :: '%s' :: ends %s (%s)" % (entry['username'], entry['ip'], entry['email'], entry['reason'], entry['end_date'], entry['issuer'])) self.out_SERVERMSG(client, '-- End Banlist --') return self.out_SERVERMSG(client, 'Banlist is empty') def in_LISTBLACKLIST(self, client): # send the blacklist of domains for email verification blacklist = self.bandb.list_blacklist() if blacklist: self.out_SERVERMSG(client, '-- Blacklist --') for entry in blacklist: self.out_SERVERMSG(client, "%s :: '%s' (%s)" % (entry['domain'], entry['reason'], entry['issuer'])) self.out_SERVERMSG(client, '-- End Blacklist--') return self.out_SERVERMSG(client, 'Blacklist is empty') def in_SETACCESS(self, client, username, access): # set the access level of target user. user = self.clientFromUsername(username, True) if not user: self.out_SERVERMSG(client, "User not found.") return if not access in ('user', 'mod', 'admin'): self.out_SERVERMSG(client, "Invalid access mode, only user, mod, admin is valid.") return user.access = access if username in self._root.usernames: self._calc_access_status(user) self._root.broadcast('CLIENTSTATUS %s %d' % (username, user.status)) self.userdb.save_user(user) self.out_OK(client, "SETACCESS") # remove the new mod/admin from everyones ignore list and notify affected users if access in ('mod', 'admin'): userIds = self.userdb.globally_unignore_user(user.user_id) for userId in userIds: userThatIgnored = self.clientFromID(userId) if userThatIgnored: userThatIgnored.ignored.pop(user.user_id) userThatIgnored.Send('UNIGNORE userName=%s' % (username)) def in_STATS(self, client): if not 'admin' in client.accesslevels: return self._root.stats() self.out_SERVERMSG(client, 'Stats were printed in the server logfile') def in_LISTMODS(self, client): if not 'mod' in client.accesslevels: return admins, mods = self.userdb.list_mods() self.out_SERVERMSG(client, "Admins: %s" % admins) self.out_SERVERMSG(client, "Mods: %s" % mods) def in_RELOAD(self, client): self.broadcast_Moderator('Reload initiated by <%s>' % client.username) if not 'admin' in client.accesslevels: return ret = self._root.reload(client) self.broadcast_Moderator(ret) self.out_SERVERMSG(client, ret) def in_CLEANUP(self, client): if not 'admin' in client.accesslevels: return self.cleanup(client) def cleanup(self, client=None): # keep calm, delete all inconsistencies, and carry on if client: self.broadcast_Moderator('Cleanup initiated by <%s>' % (client.username)) logging.info('Cleanup initiated by <%s>' % (client.username)) else: self.broadcast_Moderator('Cleanup initiated by server error') logging.error("Cleanup initiated by server error") logging.error(traceback.print_exc()) n_client = 0 n_username = 0 n_user_id = 0 n_bridged_location = 0 n_bridged_username = 0 n_bridged_user_id = 0 n_bridge_external_id = 0 n_bridge_location = 0 n_battle = 0 n_battle_user = 0 n_battle_pending_user = 0 n_channel = 0 n_channel_user = 0 n_channel_bridged_user = 0 n_mismatch = 0 root = self._root try: # cleanup clients/sessions dupcheck = set() todel = [] for session_id in root.clients: c = root.clients[session_id] if not c.connected: logging.error("client not connected: %s %d" % (c.username, c.session_id)) todel.append(c) continue if c.username in dupcheck: logging.error("client username failed dup check: %s %d" % (c.username, c.session_id)) todel.append(c) continue dupcheck.add(c.username) if c.username not in root.usernames: logging.error("client with missing username: %s %d" % (c.username, c.session_id)) todel.append(c) continue d = root.usernames[c.username] if d.session_id != c.session_id: logging.error("missmatched session_id: (%s %d) (%s %d)" % (c.username, c.session_id, d.username, d.session_id)) for c in todel: del root.clients[c.session_id] logging.error("deleted invalid client: %s %d" % (c.username, c.session_id)) n_client = n_client + 1 # cleanup usernames todel = [] for username in root.usernames: c = root.usernames[username] if not c.session_id in root.clients: logging.error("username with missing client: %s %d" % (c.username, c.session_id)) todel.append(username) continue d = root.clients[c.session_id] if d.username != c.username: logging.error("missmatched username: (%s %d) (%s %d)" % (d.username, d.session_id, c.username, c.session_id)) cs.n_mismatch = cs.n_mismatch + 1 for username in todel: del root.usernames[username] logging.error("deleted invalid username: %s" % username) n_username = n_username + 1 # cleanup user_ids todel = [] for user_id in root.user_ids: c = root.user_ids[user_id] if not c.session_id in root.clients: logging.error("user_id with missing client: %d<%s> %d" % (c.user_id, c.username, c.session_id)) todel.append(user_id) continue d = root.clients[c.session_id] if d.user_id != c.user_id: logging.error("missmatched user_id: (%d<%s> %d) (%d<%s> %d)" % (d.user_id, d.username, d.session_id, c.user_id, c.username, c.session_id)) n_mismatch = n_mismatch + 1 for user_id in todel: del root.user_ids[user_id] logging.error("deleted invalid user_id: %d" % user_id) n_user_id = n_user_id + 1 # cleanup bridged locations todel = [] bridged_locations = set() for location in root.bridged_locations: bridge_user_id = root.bridged_locations[location] c = root.user_ids[bridge_user_id] if not location in c.bridge: logging.error("location with missing bridge: %s %s" % (location, c.username)) todel.append(location) bridged_locations.add(location) for location in todel: del root.bridged_locations[location] logging.error("deleted invalid bridged location: %s" % location) n_bridged_location = n_bridged_location + 1 # cleanup bridge locations for session_id in root.clients: c = root.clients[session_id] todel = [] for location in c.bridge: if not location in root.bridged_locations: logging.error("bridge contains invalid location: %s %s" % (c.username, location)) todel.append(location) for location in todel: del c.bridge[location] logging.error("deleted invalid location from bridge: %s %s" % (c.username, location)) n_bridge_location = n_bridge_location + 1 # cleanup bridged usernames todel = [] for bridged_username in root.bridged_usernames: b = root.bridged_usernames[bridged_username] if not b.bridge_user_id or not b.bridge_user_id in root.user_ids: logging.error("bridged username with missing bridge: %s %d" % (b.username, b.bridge_user_id)) todel.append(bridged_username) continue bridge_user = root.user_ids[b.bridge_user_id] bridge = bridge_user.bridge if not b.location in bridge: logging.error("bridged_username has location missing from bridge: %d<%s> %s %s %s" % (b.bridged_id, b.username, b.location, b.external_id, bridge_user.username)) todel.append(bridged_username) continue if not b.external_id in bridge[b.location]: logging.error("bridged_username has external_id missing from bridge: %d<%s> %s %s %s" % (b.bridged_id, b.username, b.location, b.external_id, bridge_user.username)) todel.append(bridged_username) for bridged_username in todel: del root.bridged_usernames[bridged_username] logging.error("deleted invalid bridged_username: %s" % bridged_username) n_bridged_username = n_bridged_username + 1 # cleanup bridged_ids todel = [] for bridged_id in root.bridged_ids: b = root.bridged_ids[bridged_id] if not b.bridge_user_id or not b.bridge_user_id in root.user_ids: logging.error("bridged_id with missing bridge: %d<%s> %d" % (b.bridged_id, b.username, b.bridge_user_id)) todel.append(bridged_id) continue bridge_user = root.user_ids[b.bridge_user_id] bridge = bridge_user.bridge if not b.location in bridge: logging.error("bridged_id has location missing from bridge: %d<%s> %s %s %s" % (b.bridged_id, b.username, b.location, b.external_id, bridge_user.username)) todel.append(bridged_id) continue if not b.external_id in bridge[b.location]: logging.error("bridged_id has external_id missing from bridge: %d<%s> %s %s %s" % (b.bridged_id, b.username, b.location, b.external_id, bridge_user.username)) todel.append(bridged_id) for bridged_id in todel: del root.bridged_ids[bridged_id] logging.error("deleted invalid bridged_id: %s" % bridged_id) n_bridged_user_id = n_bridged_user_id + 1 # cleanup bridge external_ids for session_id in root.clients: c = root.clients[session_id] for location in c.bridge: todel = [] for external_id in c.bridge[location]: bridged_id = c.bridge[location][external_id] if not bridged_id in root.bridged_ids: logging.error("bridge has external_id with missing bridged_id: %s %s %s %d" % (c.username, location, external_id, bridged_id)) todel.append(external_id) for external_id in todel: del c.bridge[location][external_id] logging.error("deleted invalid external_id from bridge: %s %s %s" % (c.username, location, external_id)) n_bridge_external_id = n_bridge_external_id + 1 # cleanup battle users for battle_id, battle in root.battles.items(): for session_id in battle.users.copy(): if not session_id in root.clients: battle.users.remove(session_id) logging.error("deleted invalid session %d from battle %d" % (session_id, battle_id)) n_battle_user = n_battle_user + 1 for session_id in battle.pending_users.copy(): if not session_id in root.clients: battle.pending_users.remove(session_id) logging.error("deleted invalid session %d from pending users for battle %d" % (session_id, battle_id)) n_battle_pending_user = n_battle_pending_user + 1 # cleanup battles for battle_id in root.battles.copy(): battle = root.battles[battle_id] if not battle.host in root.clients: del root.battles[battle_id] logging.error("deleted battle %d with invalid host %d" % (battle_id, battle.host)) n_battle = n_battle + 1 continue if len(battle.users) == 0: del root.battles[battle_id] logging.error("deleted battle %d, empty" % battle_id) n_battle = n_battle + 1 # cleanup channel users & channels for channel in root.channels.copy(): for session_id in root.channels[channel].users.copy(): if not session_id in root.clients: root.channels[channel].users.remove(session_id) logging.error("deleted invalid session_id %d from channel %s" % (session_id, channel)) n_channel_user = n_channel_user + 1 for bridged_id in root.channels[channel].bridged_users.copy(): if not bridged_id in root.bridged_ids: root.channels[channel].bridged_users.remove(bridged_id) logging.error("deleted invalid bridged_id %d from channel %s" % (bridged_id, channel)) n_channel_bridged_user = n_channel_bridged_user + 1 if len(root.channels[channel].users) == 0: if len(root.channels[channel].bridged_users) > 0: logging.error("warning: empty channel %s contains %d bridged users" % (channel, len(root.channels[channel].bridged_users))) del root.channels[channel] logging.error("deleted empty channel %s" % channel) n_channel = n_channel + 1 except Exception as e: logging.error("Cleanup failed: " + str(e)) logging.error(traceback.format_exc()) return cleaned_info = "deleted:" cleaned_info += "\n %d clients, %d usernames, %d user_ids" % (n_client, n_username, n_user_id) cleaned_info += "\n %d bridged_locations, %d bridged_usernames, %d bridged_user_ids, %d bridge_external_ids, %d bridge_locations" % (n_bridged_location, n_bridged_username, n_bridged_user_id, n_bridge_external_id, n_bridge_location) cleaned_info += "\n %d battles, %d battle_users, %d battle_pending_users" % (n_battle, n_battle_user, n_battle_pending_user) cleaned_info += "\n %d channels, %d channel_users, %d channel_bridged_users" % (n_channel, n_channel_user, n_channel_bridged_user) cleaned_info += "\n found %d mismatches" % (n_mismatch) logging.info(cleaned_info) n_delete = n_client + n_username + n_user_id + n_bridged_location + n_bridged_username + n_bridged_user_id + n_bridge_external_id + n_bridge_location + n_battle + n_battle_user + n_battle_pending_user + n_channel + n_channel_user + n_channel_bridged_user cleaned_msg = 'Cleanup complete: %s deletions, %s mismatches' % (n_delete, n_mismatch) if client: self.out_SERVERMSG(client, cleaned_msg) self.broadcast_Moderator(cleaned_msg) def in_CHANGEEMAILREQUEST(self, client, newmail): # request to be sent a verification code for changing email address if not self.verificationdb.active(): client.Send("CHANGEEMAILREQUESTDENIED email verification is currently turned off, a blank verification code will be accepted!") return newmail = newmail.lower() found,_ = self.userdb.get_user_id_with_email(newmail) if found and not client.bot: client.Send("CHANGEEMAILREQUESTDENIED another user is already registered to the email address '%s'" % newmail) return reason = "requested to change your email address for the account <%s> on on the SpringRTS lobbyserver" % client.username good, reason = self.verificationdb.check_and_send(client.user_id, newmail, 4, reason) if not good: client.Send("CHANGEEMAILREQUESTDENIED " + reason) return client.Send("CHANGEEMAILREQUESTACCEPTED") def in_CHANGEEMAIL(self, client, newmail, verification_code=""): # client requests to change their own email address, with verification code if necessary newmail = newmail.lower() found,_ = self.userdb.get_user_id_with_email(newmail) if found and not client.bot: # bots should share email addr with the bot owner client.Send("CHANGEEMAILDENIED another user is already registered to the email address '%s'" % newmail) return good, reason = self.verificationdb.verify(client.user_id, newmail, verification_code) if not good: client.Send("CHANGEEMAILDENIED " + reason) return client.email = newmail self.userdb.save_user(client) self.out_SERVERMSG(client, "Your email address has been changed to "
<filename>glasses/models/classification/resnet/__init__.py from __future__ import annotations from torch import nn from torch import Tensor from glasses.nn.blocks.residuals import ResidualAdd from glasses.nn.blocks import Conv2dPad, BnActConv, ConvBnAct from collections import OrderedDict from typing import List from functools import partial from glasses.utils.PretrainedWeightsProvider import Config, pretrained from ....models.base import Encoder, VisionModule """Implementation of ResNet proposed in `Deep Residual Learning for Image Recognition <https://arxiv.org/abs/1512.03385>` """ ReLUInPlace = partial(nn.ReLU, inplace=True) class ResNetShorcut(nn.Module): """Shorcut function applied by ResNet to upsample the channel when residual and output features do not match Args: in_features (int): features (channels) of the input out_features (int): features (channels) of the desidered output """ def __init__(self, in_features: int, out_features: int, stride: int = 2): super().__init__() self.conv = Conv2dPad(in_features, out_features, kernel_size=1, stride=stride, bias=False) self.bn = nn.BatchNorm2d(out_features) def forward(self, x: Tensor) -> Tensor: x = self.conv(x) x = self.bn(x) return x class ResNetShorcutD(nn.Sequential): """Shorcut function proposed in `Bag of Tricks for Image Classification with Convolutional Neural Networks <https://arxiv.org/pdf/1812.01187.pdf>`_ It applies average pool instead of `stride=2` in the convolution layer Args: in_features (int): features (channels) of the input out_features (int): features (channels) of the desidered output """ def __init__(self, in_features: int, out_features: int, stride: int = 2): super().__init__(OrderedDict({ 'pool': nn.AvgPool2d((2, 2)) if stride == 2 else nn.Identity(), 'conv': Conv2dPad(in_features, out_features, kernel_size=1, bias=False), 'bn': nn.BatchNorm2d(out_features) })) class ResNetBasicBlock(nn.Module): """Basic ResNet block composed by two 3x3 convs with residual connection. .. image:: https://github.com/FrancescoSaverioZuppichini/glasses/blob/develop/docs/_static/images/resnet/ResNetBasicBlock.png?raw=true *The residual connection is showed as a black line* The output of the layer is defined as: :math:`x' = F(x) + x` Args: out_features (int): Number of input features out_features (int): Number of output features activation (nn.Module, optional): [description]. Defaults to ReLUInPlace. stride (int, optional): [description]. Defaults to 1. conv (nn.Module, optional): [description]. Defaults to nn.Conv2d. """ def __init__(self, in_features: int, out_features: int, activation: nn.Module = ReLUInPlace, stride: int = 1, shortcut: nn.Module = ResNetShorcut, **kwargs): super().__init__() self.should_apply_shortcut = in_features != out_features or stride != 1 self.block = nn.Sequential( OrderedDict( { 'conv1': Conv2dPad(in_features, out_features, kernel_size=3, stride=stride, bias=False, **kwargs), 'bn1': nn.BatchNorm2d(out_features), 'act1': activation(), 'conv2': Conv2dPad(out_features, out_features, kernel_size=3, bias=False), 'bn2': nn.BatchNorm2d(out_features), } )) self.shortcut = shortcut( in_features, out_features, stride=stride) if self.should_apply_shortcut else nn.Identity() self.act = activation() def forward(self, x: Tensor) -> Tensor: res = x x = self.block(x) res = self.shortcut(res) x += res x = self.act(x) return x class ResNetBottleneckBlock(ResNetBasicBlock): """ResNet Bottleneck block based on the torchvision implementation. Even if the paper says that the first conv1x1 compresses the features. We followed the original implementation that expands the `out_features` by a factor equal to `reduction`. The stride is applied into the 3x3 conv, `this improves https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch` .. image:: https://github.com/FrancescoSaverioZuppichini/glasses/blob/develop/docs/_static/images/resnet/ResNetBottleNeckBlock.png?raw=true *The residual connection is showed as a black line* Args: out_features (int): Number of input features out_features (int): Number of output features activation (nn.Module, optional): [description]. Defaults to ReLUInPlace. stride (int, optional): [description]. Defaults to 1. conv (nn.Module, optional): [description]. Defaults to nn.Conv2d. reduction (int, optional): [description]. Defaults to 4. """ def __init__(self, in_features: int, out_features: int, features: int = None, activation: nn.Module = ReLUInPlace, reduction: int = 4, stride=1, shortcut=ResNetShorcut, **kwargs): super().__init__(in_features, out_features, activation, stride, shortcut=shortcut) print self.features = out_features // reduction if features is None else features self.block = nn.Sequential( ConvBnAct(in_features, self.features, activation=activation, kernel_size=1), ConvBnAct(self.features, self.features, activation=activation, kernel_size=3, stride=stride, **kwargs), ConvBnAct(self.features, out_features, activation=None, kernel_size=1), ) class ResNetBasicPreActBlock(ResNetBasicBlock): reduction: int = 1 """Pre activation ResNet basic block proposed in `Identity Mappings in Deep Residual Networks <https://arxiv.org/pdf/1603.05027.pdf>`_ Args: out_features (int): Number of input features out_features (int): Number of ouimport inspect """ def __init__(self, in_features: int, out_features: int, activation: nn.Module = ReLUInPlace, stride: int = 1, shortcut: nn.Module = ResNetShorcut, **kwargs): super().__init__(in_features, out_features, activation, stride=stride, shortcut=shortcut, **kwargs) self.block = nn.Sequential( OrderedDict( { 'bn1': nn.BatchNorm2d(in_features), 'act1': activation(), 'conv1': Conv2dPad(in_features, out_features, kernel_size=3, bias=False, stride=stride, **kwargs), 'bn2': nn.BatchNorm2d(out_features), 'act2': activation(), 'conv2': Conv2dPad(out_features, out_features, kernel_size=3, bias=False), } )) self.act = nn.Identity() class ResNetBottleneckPreActBlock(ResNetBottleneckBlock): reduction: int = 4 """Pre activation ResNet bottleneck block proposed in `Identity Mappings in Deep Residual Networks <https://arxiv.org/pdf/1603.05027.pdf>`_ Args: out_features (int): Number of input features out_features (int): Number of output features activation (nn.Module, optional): [description]. Defaults to ReLUInPlace. stride (int, optional): [description]. Defaults to 1. conv (nn.Module, optional): [description]. Defaults to nn.Conv2d. """ def __init__(self, in_features: int, out_features: int, features: int = None, activation: nn.Module = ReLUInPlace, reduction: int = 4, stride=1, shortcut=ResNetShorcut, **kwargs): super().__init__(in_features, out_features, features, activation, stride=stride, shortcut=shortcut, **kwargs) # TODO I am not sure it is correct features = out_features // reduction self.block = nn.Sequential( BnActConv(in_features, self.features, activation=activation, kernel_size=1, bias=False), BnActConv(self.features, self.features, activation=activation, bias=False, kernel_size=3, stride=stride, **kwargs), BnActConv(self.features, out_features, activation=activation, bias=False, kernel_size=1) ) self.act = nn.Identity() class ResNetLayer(nn.Module): def __init__(self, in_features: int, out_features: int, block: nn.Module = ResNetBasicBlock, depth: int = 1, stride: int = 2, *args, **kwargs): super().__init__() # 'We perform stride directly by convolutional layers that have a stride of 2.' self.block = nn.Sequential( block(in_features, out_features, stride=stride, **kwargs), *[block(out_features, out_features, **kwargs) for _ in range(depth - 1)] ) def forward(self, x: Tensor) -> Tensor: x = self.block(x) return x class ResNetStem(nn.Sequential): def __init__(self, in_features: int, out_features: int, activation: nn.Module = ReLUInPlace): super().__init__( ConvBnAct(in_features, out_features, activation=activation, kernel_size=7, stride=2), nn.MaxPool2d(kernel_size=3, stride=2, padding=1) ) class ResNetStemC(nn.Sequential): """ Modified stem proposed in `Bag of Tricks for Image Classification with Convolutional Neural Networks <https://arxiv.org/pdf/1812.01187.pdf>`_ The observation is that the computational cost of a convolution is quadratic to the kernel width or height. A 7 × 7 convolution is 5.4 times more expensive than a 3 × 3 convolution. So this tweak replacing the 7 × 7 convolution in the input stem with three conservative 3 × 3 convolution """ def __init__(self, in_features: int, out_features: int, activation: nn.Module = ReLUInPlace): super().__init__( ConvBnAct(in_features, out_features // 2, activation=activation, kernel_size=3, stride=2), ConvBnAct(out_features // 2, out_features // 2, activation=activation, kernel_size=3), ConvBnAct(out_features // 2, out_features, activation=activation, kernel_size=3), nn.MaxPool2d(kernel_size=3, stride=2, padding=1) ) class ResNetEncoder(Encoder): """ ResNet encoder composed by increasing different layers with increasing features. Args: in_channels (int, optional): [description]. Defaults to 3. start_features (int, optional): [description]. Defaults to 64. widths (List[int], optional): [description]. Defaults to [64, 128, 256, 512]. depths (List[int], optional): [description]. Defaults to [2, 2, 2, 2]. activation (nn.Module, optional): [description]. Defaults to ReLUInPlace. block (nn.Module, optional): Block used, there are several ones such as `ResNetBasicBlock` and `ResNetBottleneckBlock` . Defaults to ResNetBasicBlock. stem (nn.Module, optional): Stem used. Defaults to ResNetStem. """ def __init__(self, in_channels: int = 3, start_features: int = 64, widths: List[int] = [64, 128, 256, 512], depths: List[int] = [2, 2, 2, 2], activation: nn.Module = ReLUInPlace, block: nn.Module = ResNetBasicBlock, stem: nn.Module = ResNetStem, downsample_first: bool = False, **kwargs): super().__init__() self.widths = widths self.start_features = start_features self.in_out_widths = list(zip(widths, widths[1:])) self.stem = stem(in_channels, start_features, activation) self.layers = nn.ModuleList([ ResNetLayer(start_features, widths[0], depth=depths[0], activation=activation, block=block, stride=2 if downsample_first else 1, **kwargs), *[ResNetLayer(in_features, out_features, depth=n, activation=activation, block=block, **kwargs) for (in_features, out_features), n in zip(self.in_out_widths, depths[1:])] ]) def forward(self, x): x = self.stem(x) for block in self.layers: x = block(x) return x @property def stages(self): return [self.stem[-2], *self.layers[:-1]] @property def features_widths(self): return [self.start_features, *self.widths[:-1]] class ResNetHead(nn.Sequential): """ This class represents the tail of ResNet. It performs a global pooling and maps the output to the correct class by using a fully connected layer. """ def __init__(self, in_features: int, n_classes: int): super().__init__() self.add_module('pool', nn.AdaptiveAvgPool2d((1, 1))) self.add_module('flat', nn.Flatten()) self.add_module('fc', nn.Linear(in_features, n_classes)) class ResNet(VisionModule): """Implementation of ResNet proposed in `Deep Residual Learning for Image Recognition <https://arxiv.org/abs/1512.03385>`_ Examples: Default models >>> ResNet.resnet18() >>> ResNet.resnet26() >>> ResNet.resnet34() >>> ResNet.resnet50() >>> ResNet.resnet101() >>> ResNet.resnet152() >>> ResNet.resnet200() Variants (d) proposed in `Bag of Tricks for Image Classification with Convolutional Neural Networks <https://arxiv.org/pdf/1812.01187.pdf>`_ >>> ResNet.resnet26d() >>> ResNet.resnet34d() >>> ResNet.resnet50d() >>> # You can construct your own one by chaning `stem` and `block` >>> resnet101d = ResNet.resnet101(stem=ResNetStemC, block=partial(ResNetBottleneckBlock, shortcut=ResNetShorcutD)) You can easily customize your model >>> # change activation >>> ResNet.resnet18(activation = nn.SELU) >>> # change number of classes (default is 1000 ) >>> ResNet.resnet18(n_classes=100) >>> # pass a different block >>> ResNet.resnet18(block=SENetBasicBlock) >>> # change the steam >>> model = ResNet.resnet18(stem=ResNetStemC) >>> change shortcut >>> model = ResNet.resnet18(block=partial(ResNetBasicBlock, shortcut=ResNetShorcutD)) >>> # store each feature >>> x = torch.rand((1, 3, 224,
from __future__ import division import json from collections import OrderedDict from datetime import timedelta from decimal import Decimal, ROUND_UP import numpy as np from django.conf import settings from django.contrib.auth.models import User from django.db import connection, transaction from django.db.models import F, Q from django.utils import timezone from ws4redis.publisher import RedisPublisher from ws4redis.redis_store import RedisMessage import constants from crowdsourcing import models from crowdsourcing.crypto import to_hash from crowdsourcing.emails import send_notifications_email, send_new_tasks_email, send_task_returned_email, \ send_task_rejected_email, send_project_completed from crowdsourcing.payment import Stripe from crowdsourcing.redis import RedisProvider from crowdsourcing.utils import hash_task from csp.celery import app as celery_app from mturk.tasks import get_provider def _expire_returned_tasks(): now = timezone.now() if now.weekday() in [5, 6]: return 'WEEKEND' # noinspection SqlResolve query = ''' with task_workers as ( SELECT * FROM ( SELECT tw.id, CASE WHEN EXTRACT(DOW FROM now()) <= %(dow)s THEN tw.returned_at + INTERVAL %(exp_days)s ELSE tw.returned_at END returned_at FROM crowdsourcing_taskworker tw INNER JOIN crowdsourcing_task t ON tw.task_id = t.id WHERE tw.status = %(status)s) r WHERE (now() - INTERVAL %(exp_days)s)::timestamp > r.returned_at ) UPDATE crowdsourcing_taskworker tw_up SET status=%(expired)s, updated_at=now() FROM task_workers WHERE task_workers.id=tw_up.id RETURNING tw_up.id, tw_up.worker_id ''' cursor = connection.cursor() cursor.execute(query, { 'status': models.TaskWorker.STATUS_RETURNED, 'expired': models.TaskWorker.STATUS_EXPIRED, 'exp_days': '{} day'.format(settings.EXPIRE_RETURNED_TASKS), 'dow': settings.EXPIRE_RETURNED_TASKS }) workers = cursor.fetchall() cursor.close() worker_list = [] task_workers = [] for w in workers: worker_list.append(w[1]) task_workers.append({'id': w[0]}) refund_task.delay(task_workers) update_worker_cache.delay(worker_list, constants.TASK_EXPIRED) return 'SUCCESS' @celery_app.task(ignore_result=True) def expire_tasks(): cursor = connection.cursor() # noinspection SqlResolve query = ''' WITH taskworkers AS ( SELECT tw.id, p.id project_id FROM crowdsourcing_taskworker tw INNER JOIN crowdsourcing_task t ON tw.task_id = t.id INNER JOIN crowdsourcing_project p ON t.project_id = p.id INNER JOIN crowdsourcing_taskworkersession sessions ON sessions.task_worker_id = tw.id WHERE tw.status=%(in_progress)s GROUP BY tw.id, p.id HAVING sum(coalesce(sessions.ended_at, now()) - sessions.started_at) > coalesce(p.timeout, INTERVAL '24 hour')) UPDATE crowdsourcing_taskworker tw_up SET status=%(expired)s FROM taskworkers WHERE taskworkers.id=tw_up.id RETURNING tw_up.id, tw_up.worker_id ''' cursor.execute(query, {'in_progress': models.TaskWorker.STATUS_IN_PROGRESS, 'expired': models.TaskWorker.STATUS_EXPIRED}) workers = cursor.fetchall() cursor.close() worker_list = [] task_workers = [] for w in workers: worker_list.append(w[1]) task_workers.append({'id': w[0]}) refund_task.delay(task_workers) update_worker_cache.delay(worker_list, constants.TASK_EXPIRED) _expire_returned_tasks() return 'SUCCESS' @celery_app.task(ignore_result=True) def auto_approve_tasks(): now = timezone.now() # if now.weekday() in [5, 6]: # return 'WEEKEND' # if now.weekday() == 0 and now.hour < 15: # return 'MONDAY' cursor = connection.cursor() # noinspection SqlResolve query = ''' WITH taskworkers AS ( SELECT tw.id, p.id project_id, p.group_id project_gid, tw.task_id, u.id user_id, u.username, u_worker.username worker_username FROM crowdsourcing_taskworker tw INNER JOIN crowdsourcing_task t ON tw.task_id = t.id INNER JOIN crowdsourcing_project p ON t.project_id = p.id INNER JOIN auth_user u ON p.owner_id = u.id INNER JOIN auth_user u_worker ON tw.worker_id = u_worker.id WHERE tw.submitted_at + INTERVAL %(auto_approve_freq)s < NOW() AND tw.status=%(submitted)s) UPDATE crowdsourcing_taskworker tw_up SET status=%(accepted)s, approved_at = %(approved_at)s, auto_approved=TRUE FROM taskworkers WHERE taskworkers.id=tw_up.id RETURNING tw_up.id, tw_up.worker_id, taskworkers.task_id, taskworkers.user_id, taskworkers.username, taskworkers.project_gid, taskworkers.worker_username ''' cursor.execute(query, {'submitted': models.TaskWorker.STATUS_SUBMITTED, 'accepted': models.TaskWorker.STATUS_ACCEPTED, 'approved_at': now, 'auto_approve_freq': '{} hour'.format(settings.AUTO_APPROVE_FREQ)}) task_workers = cursor.fetchall() for w in task_workers: task_workers.append({'id': w[0]}) post_approve.delay(w[2], 1) redis_publisher = RedisPublisher(facility='notifications', users=[w[4], w[6]]) message = RedisMessage( json.dumps({"event": 'TASK_APPROVED', "project_gid": w[5], "project_key": to_hash(w[5])})) redis_publisher.publish_message(message) cursor.close() return 'SUCCESS' @celery_app.task(ignore_result=True) def update_worker_cache(workers, operation, key=None, value=None): provider = RedisProvider() for worker in workers: name = provider.build_key('worker', worker) if operation == constants.TASK_ACCEPTED: provider.hincrby(name, 'in_progress', 1) elif operation == constants.TASK_SUBMITTED: provider.hincrby(name, 'in_progress', -1) provider.hincrby(name, 'submitted', 1) elif operation == constants.TASK_REJECTED: provider.hincrby(name, 'submitted', -1) provider.hincrby(name, 'rejected', 1) elif operation == constants.TASK_RETURNED: provider.hincrby(name, 'submitted', -1) provider.hincrby(name, 'returned', 1) elif operation == constants.TASK_APPROVED: provider.hincrby(name, 'submitted', -1) provider.hincrby(name, 'approved', 1) elif operation in [constants.TASK_EXPIRED, constants.TASK_SKIPPED]: provider.hincrby(name, 'in_progress', -1) elif operation == constants.ACTION_GROUP_ADD: provider.set_add(name + ':worker_groups', value) elif operation == constants.ACTION_GROUP_REMOVE: provider.set_remove(name + ':worker_groups', value) elif operation == constants.ACTION_UPDATE_PROFILE: provider.set_hash(name, key, value) return 'SUCCESS' @celery_app.task(ignore_result=True) def email_notifications(): users = User.objects.all() url = '%s/%s/' % (settings.SITE_HOST, 'messages') users_notified = [] for user in users: email_notification, created = models.EmailNotification.objects.get_or_create(recipient=user) if created: # unread messages message_recipients = models.MessageRecipient.objects.filter( status__lt=models.MessageRecipient.STATUS_READ, recipient=user ).exclude(message__sender=user) else: # unread messages since last notification message_recipients = models.MessageRecipient.objects.filter( status__lt=models.MessageRecipient.STATUS_READ, created_at__gt=email_notification.updated_at, recipient=user ).exclude(message__sender=user) message_recipients = message_recipients.order_by('-created_at') \ .select_related('message', 'recipient', 'message__sender') \ .values('created_at', 'message__body', 'recipient__username', 'message__sender__username') result = OrderedDict() # group messages by sender for message_recipient in message_recipients: if message_recipient['message__sender__username'] in result: result[message_recipient['message__sender__username']].append(message_recipient) else: result[message_recipient['message__sender__username']] = [message_recipient] messages = [{'sender': k, 'messages': v} for k, v in result.items()] if len(messages) > 0: # send email send_notifications_email(email=user.email, url=url, messages=messages) users_notified.append(user) # update the last time user was notified models.EmailNotification.objects.filter(recipient__in=users_notified).update(updated_at=timezone.now()) return 'SUCCESS' @celery_app.task(bind=True, ignore_result=True) def create_tasks(self, tasks): try: with transaction.atomic(): task_obj = [] x = 0 for task in tasks: x += 1 hash_digest = hash_task(task['data']) t = models.Task(data=task['data'], hash=hash_digest, project_id=task['project_id'], row_number=x) task_obj.append(t) models.Task.objects.bulk_create(task_obj) models.Task.objects.filter(project_id=tasks[0]['project_id']).update(group_id=F('id')) except Exception as e: self.retry(countdown=4, exc=e, max_retries=2) return 'SUCCESS' @celery_app.task(bind=True, ignore_result=True) def create_tasks_for_project(self, project_id, file_deleted): project = models.Project.objects.filter(pk=project_id).first() if project is None: return 'NOOP' previous_rev = models.Project.objects.prefetch_related('batch_files', 'tasks').filter(~Q(id=project.id), group_id=project.group_id) \ .order_by('-id').first() previous_batch_file = previous_rev.batch_files.first() if previous_rev else None models.Task.objects.filter(project=project).delete() if file_deleted: models.Task.objects.filter(project=project).delete() task_data = { "project_id": project_id, "data": {} } task = models.Task.objects.create(**task_data) if previous_batch_file is None and previous_rev is not None: task.group_id = previous_rev.tasks.all().first().group_id else: task.group_id = task.id task.save() # price_data = models.Task.objects.filter(project_id=project_id, price__isnull=False).values_list('price', # flat=True) _set_aux_attributes(project, []) return 'SUCCESS' try: with transaction.atomic(): data = project.batch_files.first().parse_csv() task_obj = [] x = 0 previous_tasks = previous_rev.tasks.all().order_by('row_number') if previous_batch_file else [] previous_count = len(previous_tasks) for row in data: x += 1 hash_digest = hash_task(row) price = None if project.allow_price_per_task and project.task_price_field is not None: price = row.get(project.task_price_field) t = models.Task(data=row, hash=hash_digest, project_id=int(project_id), row_number=x, price=price) if previous_batch_file is not None and x <= previous_count: if len(set(row.items()) ^ set(previous_tasks[x - 1].data.items())) == 0: t.group_id = previous_tasks[x - 1].group_id task_obj.append(t) models.Task.objects.bulk_create(task_obj) price_data = models.Task.objects.filter(project_id=project_id, price__isnull=False).values_list('price', flat=True) _set_aux_attributes(project, price_data) models.Task.objects.filter(project_id=project_id, group_id__isnull=True) \ .update(group_id=F('id')) except Exception as e: self.retry(countdown=4, exc=e, max_retries=2) return 'SUCCESS' def _set_aux_attributes(project, price_data): if project.aux_attributes is None: project.aux_attributes = {} if not len(price_data): max_price = float(project.price) min_price = float(project.price) median_price = float(project.price) else: max_price = float(np.max(price_data)) min_price = float(np.min(price_data)) median_price = float(np.median(price_data)) project.aux_attributes.update({"min_price": min_price, "max_price": max_price, "median_price": median_price}) project.save() @celery_app.task(ignore_result=True) def pay_workers(): workers = User.objects.all() payment = Stripe() # total = 0 # for worker in workers: task_workers = models.TaskWorker.objects.prefetch_related('task__project') \ .filter(worker=worker, status=models.TaskWorker.STATUS_ACCEPTED, is_paid=False) for tw in task_workers: payment.pay_worker(tw) def single_payout(amount, user): return 'OBSOLETE METHOD' @celery_app.task(ignore_result=True) def post_approve(task_id, num_workers): task = models.Task.objects.prefetch_related('project').get(pk=task_id) latest_revision = models.Project.objects.filter(~Q(status=models.Project.STATUS_DRAFT), group_id=task.project.group_id) \ .order_by('-id').first() latest_revision.amount_due -= Decimal(num_workers * latest_revision.price) latest_revision.save() return 'SUCCESS' def create_transaction(sender_id, recipient_id, amount, reference): return 'OBSOLETE METHOD' @celery_app.task(ignore_result=True) def refund_task(task_worker_in): return 'OBSOLETE METHOD' @celery_app.task(ignore_result=True) def update_feed_boomerang(): logs = [] cursor = connection.cursor() last_update = timezone.now() - timedelta(minutes=settings.HEART_BEAT_BOOMERANG) projects = models.Project.objects.filter(status=models.Project.STATUS_IN_PROGRESS, min_rating__gt=1.0, enable_boomerang=True, rating_updated_at__lt=last_update) for project in projects: if project.min_rating == 3.0: project.min_rating = 2.0 project.previous_min_rating = 3.0 elif project.min_rating == 2.0: project.min_rating = 1.99 project.previous_min_rating = 2.0 elif project.min_rating == 1.99: project.min_rating = 1.0 project.previous_min_rating = 1.99 project.rating_updated_at = timezone.now() project.save() logs.append( models.BoomerangLog(object_id=project.group_id, min_rating=project.min_rating, rating_updated_at=project.rating_updated_at, reason='DEFAULT')) # noinspection SqlResolve email_query = ''' SELECT available.id, available.group_id, owner_profile.handle, u_workers.id, sum(available) available_count, u_workers.email, available.name, coalesce((available.aux_attributes ->> 'median_price') :: NUMERIC, available.price) FROM ( SELECT p.id, p.group_id, p.name, owner_id, p.min_rating, p.price, p.aux_attributes, sum(1) available FROM crowdsourcing_task t INNER JOIN (SELECT group_id, max(id) id FROM crowdsourcing_task WHERE deleted_at IS NULL GROUP BY group_id) t_max ON t_max.id = t.id INNER JOIN crowdsourcing_project p ON p.id = t.project_id INNER JOIN ( SELECT t.group_id, sum(t.done) done FROM ( SELECT t.group_id, CASE WHEN (tw.worker_id IS NOT NULL) AND tw.status NOT IN (4, 6, 7) THEN 1 ELSE 0 END done FROM crowdsourcing_task t LEFT OUTER JOIN crowdsourcing_taskworker tw ON t.id = tw.task_id WHERE t.exclude_at IS NULL AND t.deleted_at IS NULL) t GROUP BY t.group_id) t_count ON t_count.group_id = t.group_id AND t_count.done < p.repetition WHERE p.status = 3 AND p.deleted_at IS NULL GROUP BY p.id, p.name, owner_id, p.min_rating, p.group_id, p.price, aux_attributes) available INNER JOIN auth_user u_workers ON TRUE INNER JOIN crowdsourcing_userprofile p_workers ON p_workers.user_id = u_workers.id AND p_workers.is_worker IS TRUE INNER JOIN get_worker_ratings(u_workers.id) worker_ratings ON worker_ratings.requester_id = available.owner_id AND (coalesce(worker_ratings.worker_rating, 1.99) >= available.min_rating) LEFT OUTER JOIN crowdsourcing_WorkerProjectNotification n ON n.project_id = available.group_id AND n.worker_id = u_workers.id INNER JOIN crowdsourcing_userpreferences pref ON pref.user_id = u_workers.id INNER JOIN auth_user owner ON owner.id = available.owner_id INNER JOIN crowdsourcing_userprofile owner_profile ON owner_profile.user_id = owner.id LEFT OUTER JOIN ( SELECT p.id, tw.worker_id, count(tw.id) tasks_done FROM crowdsourcing_project p INNER JOIN crowdsourcing_task t ON p.id = t.project_id LEFT OUTER JOIN crowdsourcing_taskworker tw ON tw.task_id = t.id GROUP BY p.id, tw.worker_id ) worker_project ON worker_project.id = available.id AND worker_project.worker_id = u_workers.id WHERE
# Force any command line keys and values that are bytes to unicode. k = k.decode() if isinstance(k, bytes) else k v = v.decode() if isinstance(v, bytes) else v self._flag_values.setdefault(k, v) @staticmethod def _is_valid_key(key): """Return True if key is a valid configuration key.""" return key and key[0].islower() def __setattr__(self, attr, value): """Provide a useful error when attempting to set a value via setattr().""" if self._is_valid_key(attr): raise AttributeError("Can't set conf values by attribute, use load()") # __slots__ is defined above, so this will raise an AttributeError if the # attribute isn't one we expect; this limits the number of ways to abuse the # conf module singleton instance. Also note that we can't use super() # normally here because of the sys.modules swap (Configuration is no longer # defined, and evaluates to None if used here). # pylint: disable=bad-super-call super(type(self), self).__setattr__(attr, value) # Don't use synchronized on this one, because __getitem__ handles it. def __getattr__(self, attr): # pylint: disable=invalid-name """Get a config value via attribute access.""" if self._is_valid_key(attr): return self[attr] # Config keys all begin with a lowercase letter, so treat this normally. raise AttributeError("'%s' object has no attribute '%s'" % (type(self).__name__, attr)) @threads.synchronized def __getitem__(self, item): # pylint: disable=invalid-name """Get a config value via item access. Order of precedence is: - Value provided via --config-value flag. - Value loaded via load*() methods. - Default value as declared with conf.declare() Args: item: Config key name to get. """ if item not in self._declarations: raise self.UndeclaredKeyError('Configuration key not declared', item) if item in self._flag_values: if item in self._loaded_values: self._logger.warning( 'Overriding loaded value for %s (%s) with flag value: %s', item, self._loaded_values[item], self._flag_values[item]) return self._flag_values[item] if item in self._loaded_values: return self._loaded_values[item] if self._declarations[item].has_default: return self._declarations[item].default_value raise self.UnsetKeyError( 'Configuration value not set and has no default', item) @threads.synchronized def __contains__(self, name): # pylint: disable=invalid-name """True if we have a value for name.""" return (name in self._declarations and (self._declarations[name].has_default or name in self._loaded_values or name in self._flag_values)) @threads.synchronized def declare(self, name, description=None, **kwargs): """Declare a configuration key with the given name. Args: name: Configuration key to declare, must not have been already declared. description: If provided, use this as the description for this key. **kwargs: Other kwargs to pass to the Declaration, only default_value is currently supported. """ if not self._is_valid_key(name): raise self.InvalidKeyError( 'Invalid key name, must begin with a lowercase letter', name) if name in self._declarations: raise self.KeyAlreadyDeclaredError( 'Configuration key already declared', name) self._declarations[name] = self.Declaration( name, description=description, **kwargs) @threads.synchronized def reset(self): """Reset the loaded state of the configuration to what it was at import. Note that this does *not* reset values set by commandline flags or loaded from --config-file (in fact, any values loaded from --config-file that have been overridden are reset to their value from --config-file). """ # Populate loaded_values with values from --config-file, if it was given. self._loaded_values = {} if self._flags.config_file is not None: self.load_from_file(self._flags.config_file, _allow_undeclared=True) def load_from_file(self, yamlfile, _override=True, _allow_undeclared=False): """Loads the configuration from a file. Parsed contents must be a single dict mapping config key to value. Args: yamlfile: The opened file object to load configuration from. See load_from_dict() for other args' descriptions. Raises: ConfigurationInvalidError: If configuration file can't be read, or can't be parsed as either YAML (or JSON, which is a subset of YAML). """ self._logger.info('Loading configuration from file: %s', yamlfile) try: parsed_yaml = self._modules['yaml'].safe_load(yamlfile.read()) except self._modules['yaml'].YAMLError as exception: raise self.ConfigurationInvalidError( 'Failed to load from %s as YAML' % yamlfile, exception) if not isinstance(parsed_yaml, dict): # Parsed YAML, but it's not a dict. raise self.ConfigurationInvalidError( 'YAML parsed, but wrong type, should be dict', parsed_yaml) self._logger.debug('Configuration loaded from file: %s', parsed_yaml) self.load_from_dict( parsed_yaml, _override=_override, _allow_undeclared=_allow_undeclared) def load(self, _override=True, _allow_undeclared=False, **kwargs): """load configuration values from kwargs, see load_from_dict().""" self.load_from_dict( kwargs, _override=_override, _allow_undeclared=_allow_undeclared) @threads.synchronized def load_from_dict(self, dictionary, _override=True, _allow_undeclared=False): """Loads the config with values from a dictionary instead of a file. This is meant for testing and bin purposes and shouldn't be used in most applications. Args: dictionary: The dictionary containing config keys/values to update. _override: If True, new values will override previous values. _allow_undeclared: If True, silently load undeclared keys, otherwise warn and ignore the value. Typically used for loading config files before declarations have been evaluated. """ undeclared_keys = [] for key, value in self._modules['six'].iteritems(dictionary): # Warn in this case. We raise if you try to access a config key that # hasn't been declared, but we don't raise here so that you can use # configuration files that are supersets of required configuration for # any particular test station. if key not in self._declarations and not _allow_undeclared: undeclared_keys.append(key) continue if key in self._loaded_values: if _override: self._logger.info( 'Overriding previously loaded value for %s (%s) with value: %s', key, self._loaded_values[key], value) else: self._logger.info( 'Ignoring new value (%s), keeping previous value for %s: %s', value, key, self._loaded_values[key]) continue # Force any keys and values that are bytes to unicode. key = key.decode() if isinstance(key, bytes) else key value = value.decode() if isinstance(value, bytes) else value self._loaded_values[key] = value if undeclared_keys: self._logger.warning('Ignoring undeclared configuration keys: %s', undeclared_keys) @threads.synchronized def _asdict(self): """Create a dictionary snapshot of the current config values.""" # Start with any default values we have, and override with loaded values, # and then override with flag values. retval = {key: self._declarations[key].default_value for key in self._declarations if self._declarations[key].has_default} retval.update(self._loaded_values) # Only update keys that are declared so we don't allow injecting # un-declared keys via commandline flags. for key, value in self._modules['six'].iteritems(self._flag_values): if key in self._declarations: retval[key] = value return retval @property def help_text(self): """Return a string with all config keys and their descriptions.""" result = [] for name in sorted(self._declarations.keys()): result.append(name) result.append('-' * len(name)) decl = self._declarations[name] if decl.description: result.append(decl.description.strip()) else: result.append('(no description found)') if decl.has_default: result.append('') quotes = '"' if type(decl.default_value) is str else '' result.append(' default_value={quotes}{val}{quotes}'.format( quotes=quotes, val=decl.default_value)) result.append('') result.append('') return '\n'.join(result) def save_and_restore(self, _func=None, **config_values): """Decorator for saving conf state and restoring it after a function. This decorator is primarily for use in tests, where conf keys may be updated for individual test cases, but those values need to be reverted after the test case is done. Examples: conf.declare('my_conf_key') @conf.save_and_restore def MyTestFunc(): conf.load(my_conf_key='baz') SomeFuncUnderTestThatUsesMyConfKey() conf.load(my_conf_key='foo') MyTestFunc() print conf.my_conf_key # Prints 'foo', *NOT* 'baz' # Without the save_and_restore decorator, MyTestFunc() would have had the # side effect of altering the conf value of 'my_conf_key' to 'baz'. # Config keys can also be initialized for the context inline at decoration # time. This is the same as setting them at the beginning of the # function, but is a little clearer syntax if you know ahead of time what # config keys and values you need to set. @conf.save_and_restore(my_conf_key='baz') def MyOtherTestFunc(): print conf.my_conf_key # Prints 'baz' MyOtherTestFunc() print conf.my_conf_key # Prints 'foo' again, for the same reason. Args: _func: The function to wrap. The returned wrapper will invoke the function and restore the config to the state it was in at invocation. **config_values: Config keys can be set inline at decoration time, see examples. Note that config keys can't begin with underscore, so there can be no name collision with _func. Returns: Wrapper to replace _func, as per Python decorator semantics. """ functools = self._modules['functools'] # pylint: disable=redefined-outer-name if not _func: return functools.partial(self.save_and_restore, **config_values) @functools.wraps(_func) def _saving_wrapper(*args, **kwargs): saved_config = dict(self._loaded_values) try: self.load_from_dict(config_values) return _func(*args, **kwargs) finally: self._loaded_values = saved_config # pylint: disable=attribute-defined-outside-init return _saving_wrapper def inject_positional_args(self, method): """Decorator for injecting positional arguments from the configuration. This decorator wraps the given method, so that any positional arguments are passed with corresponding values from the configuration. The name of the positional argument must match the configuration key. Keyword arguments are *NEVER* modified, even if their names match configuration keys. Avoid naming keyword args names that are also configuration keys to avoid confusion. Additional positional arguments may be used
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from . import outputs from ._enums import * from ._inputs import * __all__ = ['ApplianceArgs', 'Appliance'] @pulumi.input_type class ApplianceArgs: def __init__(__self__, *, managed_resource_group_id: pulumi.Input[str], resource_group_name: pulumi.Input[str], appliance_definition_id: Optional[pulumi.Input[str]] = None, appliance_name: Optional[pulumi.Input[str]] = None, identity: Optional[pulumi.Input['IdentityArgs']] = None, kind: Optional[pulumi.Input[str]] = None, location: Optional[pulumi.Input[str]] = None, managed_by: Optional[pulumi.Input[str]] = None, parameters: Optional[Any] = None, plan: Optional[pulumi.Input['PlanArgs']] = None, sku: Optional[pulumi.Input['SkuArgs']] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, ui_definition_uri: Optional[pulumi.Input[str]] = None): """ The set of arguments for constructing a Appliance resource. :param pulumi.Input[str] managed_resource_group_id: The managed resource group Id. :param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive. :param pulumi.Input[str] appliance_definition_id: The fully qualified path of appliance definition Id. :param pulumi.Input[str] appliance_name: The name of the appliance. :param pulumi.Input['IdentityArgs'] identity: The identity of the resource. :param pulumi.Input[str] kind: The kind of the appliance. Allowed values are MarketPlace and ServiceCatalog. :param pulumi.Input[str] location: Resource location :param pulumi.Input[str] managed_by: ID of the resource that manages this resource. :param Any parameters: Name and value pairs that define the appliance parameters. It can be a JObject or a well formed JSON string. :param pulumi.Input['PlanArgs'] plan: The plan information. :param pulumi.Input['SkuArgs'] sku: The SKU of the resource. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags :param pulumi.Input[str] ui_definition_uri: The blob URI where the UI definition file is located. """ pulumi.set(__self__, "managed_resource_group_id", managed_resource_group_id) pulumi.set(__self__, "resource_group_name", resource_group_name) if appliance_definition_id is not None: pulumi.set(__self__, "appliance_definition_id", appliance_definition_id) if appliance_name is not None: pulumi.set(__self__, "appliance_name", appliance_name) if identity is not None: pulumi.set(__self__, "identity", identity) if kind is not None: pulumi.set(__self__, "kind", kind) if location is not None: pulumi.set(__self__, "location", location) if managed_by is not None: pulumi.set(__self__, "managed_by", managed_by) if parameters is not None: pulumi.set(__self__, "parameters", parameters) if plan is not None: pulumi.set(__self__, "plan", plan) if sku is not None: pulumi.set(__self__, "sku", sku) if tags is not None: pulumi.set(__self__, "tags", tags) if ui_definition_uri is not None: pulumi.set(__self__, "ui_definition_uri", ui_definition_uri) @property @pulumi.getter(name="managedResourceGroupId") def managed_resource_group_id(self) -> pulumi.Input[str]: """ The managed resource group Id. """ return pulumi.get(self, "managed_resource_group_id") @managed_resource_group_id.setter def managed_resource_group_id(self, value: pulumi.Input[str]): pulumi.set(self, "managed_resource_group_id", value) @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> pulumi.Input[str]: """ The name of the resource group. The name is case insensitive. """ return pulumi.get(self, "resource_group_name") @resource_group_name.setter def resource_group_name(self, value: pulumi.Input[str]): pulumi.set(self, "resource_group_name", value) @property @pulumi.getter(name="applianceDefinitionId") def appliance_definition_id(self) -> Optional[pulumi.Input[str]]: """ The fully qualified path of appliance definition Id. """ return pulumi.get(self, "appliance_definition_id") @appliance_definition_id.setter def appliance_definition_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "appliance_definition_id", value) @property @pulumi.getter(name="applianceName") def appliance_name(self) -> Optional[pulumi.Input[str]]: """ The name of the appliance. """ return pulumi.get(self, "appliance_name") @appliance_name.setter def appliance_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "appliance_name", value) @property @pulumi.getter def identity(self) -> Optional[pulumi.Input['IdentityArgs']]: """ The identity of the resource. """ return pulumi.get(self, "identity") @identity.setter def identity(self, value: Optional[pulumi.Input['IdentityArgs']]): pulumi.set(self, "identity", value) @property @pulumi.getter def kind(self) -> Optional[pulumi.Input[str]]: """ The kind of the appliance. Allowed values are MarketPlace and ServiceCatalog. """ return pulumi.get(self, "kind") @kind.setter def kind(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "kind", value) @property @pulumi.getter def location(self) -> Optional[pulumi.Input[str]]: """ Resource location """ return pulumi.get(self, "location") @location.setter def location(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "location", value) @property @pulumi.getter(name="managedBy") def managed_by(self) -> Optional[pulumi.Input[str]]: """ ID of the resource that manages this resource. """ return pulumi.get(self, "managed_by") @managed_by.setter def managed_by(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "managed_by", value) @property @pulumi.getter def parameters(self) -> Optional[Any]: """ Name and value pairs that define the appliance parameters. It can be a JObject or a well formed JSON string. """ return pulumi.get(self, "parameters") @parameters.setter def parameters(self, value: Optional[Any]): pulumi.set(self, "parameters", value) @property @pulumi.getter def plan(self) -> Optional[pulumi.Input['PlanArgs']]: """ The plan information. """ return pulumi.get(self, "plan") @plan.setter def plan(self, value: Optional[pulumi.Input['PlanArgs']]): pulumi.set(self, "plan", value) @property @pulumi.getter def sku(self) -> Optional[pulumi.Input['SkuArgs']]: """ The SKU of the resource. """ return pulumi.get(self, "sku") @sku.setter def sku(self, value: Optional[pulumi.Input['SkuArgs']]): pulumi.set(self, "sku", value) @property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ Resource tags """ return pulumi.get(self, "tags") @tags.setter def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "tags", value) @property @pulumi.getter(name="uiDefinitionUri") def ui_definition_uri(self) -> Optional[pulumi.Input[str]]: """ The blob URI where the UI definition file is located. """ return pulumi.get(self, "ui_definition_uri") @ui_definition_uri.setter def ui_definition_uri(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "ui_definition_uri", value) class Appliance(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, appliance_definition_id: Optional[pulumi.Input[str]] = None, appliance_name: Optional[pulumi.Input[str]] = None, identity: Optional[pulumi.Input[pulumi.InputType['IdentityArgs']]] = None, kind: Optional[pulumi.Input[str]] = None, location: Optional[pulumi.Input[str]] = None, managed_by: Optional[pulumi.Input[str]] = None, managed_resource_group_id: Optional[pulumi.Input[str]] = None, parameters: Optional[Any] = None, plan: Optional[pulumi.Input[pulumi.InputType['PlanArgs']]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, ui_definition_uri: Optional[pulumi.Input[str]] = None, __props__=None): """ Information about appliance. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] appliance_definition_id: The fully qualified path of appliance definition Id. :param pulumi.Input[str] appliance_name: The name of the appliance. :param pulumi.Input[pulumi.InputType['IdentityArgs']] identity: The identity of the resource. :param pulumi.Input[str] kind: The kind of the appliance. Allowed values are MarketPlace and ServiceCatalog. :param pulumi.Input[str] location: Resource location :param pulumi.Input[str] managed_by: ID of the resource that manages this resource. :param pulumi.Input[str] managed_resource_group_id: The managed resource group Id. :param Any parameters: Name and value pairs that define the appliance parameters. It can be a JObject or a well formed JSON string. :param pulumi.Input[pulumi.InputType['PlanArgs']] plan: The plan information. :param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive. :param pulumi.Input[pulumi.InputType['SkuArgs']] sku: The SKU of the resource. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags :param pulumi.Input[str] ui_definition_uri: The blob URI where the UI definition file is located. """ ... @overload def __init__(__self__, resource_name: str, args: ApplianceArgs, opts: Optional[pulumi.ResourceOptions] = None): """ Information about appliance. :param str resource_name: The name of the resource. :param ApplianceArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(ApplianceArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, appliance_definition_id: Optional[pulumi.Input[str]] = None, appliance_name: Optional[pulumi.Input[str]] = None, identity: Optional[pulumi.Input[pulumi.InputType['IdentityArgs']]] = None, kind: Optional[pulumi.Input[str]] = None, location: Optional[pulumi.Input[str]] = None, managed_by: Optional[pulumi.Input[str]] = None, managed_resource_group_id: Optional[pulumi.Input[str]] = None, parameters: Optional[Any] = None, plan: Optional[pulumi.Input[pulumi.InputType['PlanArgs']]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, ui_definition_uri: Optional[pulumi.Input[str]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = ApplianceArgs.__new__(ApplianceArgs) __props__.__dict__["appliance_definition_id"] = appliance_definition_id __props__.__dict__["appliance_name"] = appliance_name __props__.__dict__["identity"] = identity __props__.__dict__["kind"] = kind __props__.__dict__["location"] = location __props__.__dict__["managed_by"] = managed_by if managed_resource_group_id is None and not opts.urn: raise TypeError("Missing required property 'managed_resource_group_id'") __props__.__dict__["managed_resource_group_id"] = managed_resource_group_id __props__.__dict__["parameters"] = parameters __props__.__dict__["plan"] = plan if resource_group_name is None and not opts.urn: raise TypeError("Missing required property 'resource_group_name'") __props__.__dict__["resource_group_name"] = resource_group_name __props__.__dict__["sku"] = sku __props__.__dict__["tags"] = tags __props__.__dict__["ui_definition_uri"] = ui_definition_uri __props__.__dict__["name"] = None __props__.__dict__["outputs"] = None __props__.__dict__["provisioning_state"] = None __props__.__dict__["type"] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:solutions/v20160901preview:Appliance"), pulumi.Alias(type_="azure-native:solutions:Appliance"), pulumi.Alias(type_="azure-nextgen:solutions:Appliance"), pulumi.Alias(type_="azure-native:solutions/v20170901:Appliance"), pulumi.Alias(type_="azure-nextgen:solutions/v20170901:Appliance"), pulumi.Alias(type_="azure-native:solutions/v20180601:Appliance"), pulumi.Alias(type_="azure-nextgen:solutions/v20180601:Appliance"), pulumi.Alias(type_="azure-native:solutions/v20190701:Appliance"), pulumi.Alias(type_="azure-nextgen:solutions/v20190701:Appliance"), pulumi.Alias(type_="azure-native:solutions/v20200821preview:Appliance"), pulumi.Alias(type_="azure-nextgen:solutions/v20200821preview:Appliance")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(Appliance, __self__).__init__( 'azure-native:solutions/v20160901preview:Appliance', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'Appliance': """ Get an existing Appliance resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = ApplianceArgs.__new__(ApplianceArgs) __props__.__dict__["appliance_definition_id"] = None __props__.__dict__["identity"] = None __props__.__dict__["kind"] = None __props__.__dict__["location"] =
import random import shutil import itertools import colorsys from PIL import Image, ImageDraw __all__ = [ "STANDARD_MODES", "SPECIAL_MODES", "ALL_MODES", "hex_to_rgb", "hex_to_rgba", "rgb_to_hex", "rgba_to_hex", "random_color", "iter_pixels", "color_distance", "rough_color_distance", "eval_pixel", "mix", "show_cli", "rgb_to_hsv", "hsv_to_rgb", "colorize", "align_bbox", "round_corner", "round_rectangle", "luma", ] STANDARD_MODES = ("1", "L", "P", "RGB", "YCbCr", "LAB", "HSV", "RGBA", "CMYK", "I", "F") SPECIAL_MODES = ( "LA", "PA", "RGBX", "RGBa", "La", "I;16", "I;16L", "I;16B", "I;16N", "BGR;15", "BGR;16", "BGR;24", "BGR;32", ) ALL_MODES = STANDARD_MODES + SPECIAL_MODES def hex_to_rgb(rgb): """Convert a 6-digit hexadecimal RGB number to an RGB tuple. Args: rgb (:obj:`int`): 6-digit hex number to convert to a tuple. Returns: Tuple[ :obj:`int`]: RGB tuple. Note: This function converts an int into a tuple of ints. To parse strings, check :obj:`~.parse.parse`. """ if not 0 <= rgb <= 0xFFFFFF: raise ValueError(f"{rgb!r} is not an RGB number.") return (rgb >> 16, (rgb >> 8) % 256, rgb % 256) def hex_to_rgba(rgba): """Convert an 8-digit hexadecimal RGBA number to an RGBA tuple. Args: rgba (:obj:`int`): 8-digit hex number to convert to a tuple. Returns: Tuple[ :obj:`int` ]: RGBA tuple. """ if not 0 <= rgba <= 0xFFFFFFFF: raise ValueError(f"{rgba!r} is not an RGBA number.") return (rgba >> 24, (rgba >> 16) % 256, (rgba >> 8) % 256, rgba % 256) def rgb_to_hex(rgb): """Convert an RGB tuple into a 6-digit hexadecimal RGB number. Args: rgb (Tuple[ :obj:`int` ]): Tuple to convert to hex. Returns: :obj:`int`: RGB hex. """ if not all(isinstance(n, int) and 0 <= n < 256 for n in rgb) or len(rgb) != 3: raise ValueError(f"{rgb!r} is not an RGB tuple.") r, g, b = rgb return r << 16 | g << 8 | b def rgba_to_hex(rgba): """Convert an RGBA tuple into an 8-digit hexadecimal RGBA number. Args: rgba (Tuple[ :obj:`int` ]): Tuple to convert to hex. Returns: :obj:`int`: RGBA hex. """ if not all(isinstance(n, int) and 0 <= n < 256 for n in rgba) or len(rgba) != 4: raise ValueError(f"{rgba!r} is not an RGBA tuple.") r, g, b, a = rgba return r << 24 | g << 16 | b << 8 | a def _raise_unsupported_mode(mode): if mode in ALL_MODES: raise ValueError(f"Mode {mode!r} is currently not supported.") else: raise ValueError(f"Unknown mode {mode!r}. Make sure capitalization is correct.") def random_color(mode="RGB"): """Generate a random color in the specified `mode`. Args: mode (:obj:`str`): Mode that the generated colour should be in. Defaults to `"RGB"` Returns: Union[ :obj:`int`, Tuple[ :obj:`int` ]]: Random colour. """ if mode == "1": return random.randint(0, 1) elif mode in ("L", "P"): return random.randint(0, 255) elif mode in ("RGB", "YCbCr", "LAB", "HSV"): return tuple(random.randint(0, 255) for i in range(3)) elif mode in ("RGBA", "CMYK"): return tuple(random.randint(0, 255) for i in range(4)) elif mode == "I": return random.randint(-(2 ** 31), 2 ** 31 - 1) else: _raise_unsupported_mode(mode) def iter_pixels(img): """Returns a generator that iterates through every pixel of an image, yielding (x, y, color) tuples on every step. Args: img (:obj:`PIL.Image.Image`): Image object to iterate through the pixels for. Yields: Tuple[ `x`, `y`, `pixel colour` ]: Pixel coordinate and colour of the pixel. """ for y in range(img.height): for x in range(img.width): yield (x, y, img.getpixel((x, y))) def color_distance(col1, col2): """Calculates the distance between two colors of equal modes. Args: col1: First colour to compare. col2: Second colour to compare. Returns: Union[ :obj:`int`, :obj:`float` ]: Distance between the colours. """ if isinstance(col1, (int, float)): return float(abs(col1 - col2)) return sum((b1 - b2) ** 2 for b1, b2 in zip(col1, col2)) ** 0.5 def rough_color_distance(col1, col2): """Same as color_distance, but without the square root. Doesn't give exact distance value, but is good enough to compare different distances. Args: col1: First colour to compare. col2: Second colour to compare. Returns: Union[ :obj:`int`, :obj:`float` ]: Rough istance between the colours. """ if isinstance(col1, (int, float)): return float(abs(col1 - col2)) return sum((b1 - b2) ** 2 for b1, b2 in zip(col1, col2)) def eval_pixel(func, img): """Evaluate `func` at every pixel of `img` and return a new Image with those modified values. `func` should take 1 argument representing the original color in the mode of `img` and return a new color of the same mode. This is unlike :obj:`PIL.Image.eval`, which is evaluated on every subpixel on every band, not on every full pixel for multiband images. Args: func (Callable[[ `pixel colour` ], `pixel colour` ]): Function to call to modify the pixel colour value. img (:obj:`PIL.Image.Image`): Image to modify the pixels of. Returns: :obj:`PIL.Image.Image`: Modified image object. """ cache = {} new = img.copy() for x, y, col in iter_pixels(img): if col in cache: newcol = cache[col] else: newcol = func(col) cache[col] = newcol new.putpixel((x, y), newcol) return new def mix(col1, col2, p=0.5): """Mix two colors according to percentage p where - p=0 returns col1 - p=1 returns col2 - p=0.5 returns an equal mix of col1 and col2 - p=0.25 returns a color containing 75% of col1 and 25% of col2 etc. Args: col1: First colour to be mixed. col2: Second colour to be mixed. p (:obj:`float`): Number indicating the colour mix proportion. Returns: Tuple[ :obj:`int` ]: New colour after mixing. """ p = min(max(0, p), 1) if isinstance(col1, (int, float)): return type(col1)(col1 * (1 - p) + col2 * p) ret = [] for band1, band2 in zip(col1, col2): ret.append(int(band1 * (1 - p) + band2 * p)) return tuple(ret) def show_cli(img): """NOTE: This experimental function is for debug purposes only. It may not work in all terminals.""" img = img.convert("RGB") tw, th = shutil.get_terminal_size((80, 24)) nw = tw // 2 nh = img.height * nw // img.width img = img.resize((nw, nh)) for y in range(img.height): for x in range(img.width): r, g, b = img.getpixel((x, y)) ansi = f"\33[48;2;{r};{g};{b}m \33[49m" print(ansi, end="") print() def rgb_to_hsv(rgb): """Convert an RGB tuple to an HSV tuple for the same color. Both tuples should obey PIL rules, e.g. have 3 integers each ranging 0-255. Args: rgb (Tuple[ :obj:`int` ]): RGB tuple to convert to HSV. Returns: Tuple[ :obj:`int` ]: HSV tuple. """ r, g, b = rgb fr, fg, fb = r / 255, g / 255, b / 255 fh, fs, fv = colorsys.rgb_to_hsv(fr, fg, fb) h, s, v = round(fh * 255), round(fs * 255), round(fv * 255) return h, s, v def hsv_to_rgb(hsv): """Convert an HSV tuple to an RGB tuple for the same color. Both tuples should obey PIL rule s, e.g. have 3 integers each ranging 0-255. Args: hsv (Tuple[ :obj:`int` ]): HSV tuple to convert to RGB. Returns: Tuple[ :obj:`int` ]: RGB tuple. """ h, s, v = hsv fh, fs, fv = h / 255, s / 255, v / 255 fr, fg, fb = colorsys.hsv_to_rgb(fh, fs, fv) r, g, b = round(fr * 255), round(fg * 255), round(fb * 255) return r, g, b def colorize(img, color): """Colorize an image with `color` (an RGB tuple). Always returns an RGB image.""" h, _, _ = rgb_to_hsv(color) img = img.convert("HSV") def f(hsv): _, s, v = hsv return h, s, v new = eval_pixel(f, img).convert("RGB") return new def align_bbox(frame, size, align=5, margin=0, topleft_only=False, suppress_wrong_size=False): """Align a smaller bounding box of size `size` (a 2-tuple of width and height) into a larger bounding box given by `frame`, a 4-tuple holding (x0, y0, x1, y1) coordinates. x1 and y1 are just outside the box, so a full image has the bounding box (0, 0, width, height). The function returns a second (x0, y0, x1, x2) tuple corresponding to the bounding box that will be aligned. `align` can have any integer value from 1 to 9 corresponding to alignments based on the common number pad layout: 7 8 9 4 5 6 1 2 3 If `suppress_wrong_size` is `True`, the function will not raise an error if the box does not fit inside the frame. It will instead return a box placement outside of the frame. Note that this may mean negative coordinates.""" fx0, fy0, fx1, fy1 = frame fx0 += margin fy0
"""The prompt_toolkit based xonsh shell.""" import os import re import sys from functools import wraps from types import MethodType from prompt_toolkit import ANSI from prompt_toolkit.auto_suggest import AutoSuggestFromHistory from prompt_toolkit.cursor_shapes import ModalCursorShapeConfig from prompt_toolkit.enums import EditingMode from prompt_toolkit.formatted_text import PygmentsTokens, to_formatted_text from prompt_toolkit.history import ThreadedHistory from prompt_toolkit.key_binding.bindings.emacs import ( load_emacs_shift_selection_bindings, ) from prompt_toolkit.key_binding.bindings.named_commands import get_by_name from prompt_toolkit.key_binding.key_bindings import merge_key_bindings from prompt_toolkit.shortcuts import CompleteStyle from prompt_toolkit.shortcuts import print_formatted_text as ptk_print from prompt_toolkit.shortcuts.prompt import PromptSession from prompt_toolkit.styles import Style, merge_styles from prompt_toolkit.styles.pygments import pygments_token_to_classname from xonsh.base_shell import BaseShell from xonsh.built_ins import XSH from xonsh.events import events from xonsh.lazyimps import pyghooks, pygments, winutils from xonsh.platform import HAS_PYGMENTS, ON_POSIX, ON_WINDOWS from xonsh.ptk_shell.completer import PromptToolkitCompleter from xonsh.ptk_shell.formatter import PTKPromptFormatter from xonsh.ptk_shell.history import PromptToolkitHistory, _cust_history_matches from xonsh.ptk_shell.key_bindings import load_xonsh_bindings from xonsh.pygments_cache import get_all_styles from xonsh.shell import transform_command from xonsh.style_tools import DEFAULT_STYLE_DICT, _TokenType, partial_color_tokenize from xonsh.tools import carriage_return, print_exception, print_warning try: from prompt_toolkit.clipboard import DummyClipboard from prompt_toolkit.clipboard.pyperclip import PyperclipClipboard HAVE_SYS_CLIPBOARD = True except ImportError: HAVE_SYS_CLIPBOARD = False CAPITAL_PATTERN = re.compile(r"([a-z])([A-Z])") Token = _TokenType() events.transmogrify("on_ptk_create", "LoadEvent") events.doc( "on_ptk_create", """ on_ptk_create(prompter: PromptSession, history: PromptToolkitHistory, completer: PromptToolkitCompleter, bindings: KeyBindings) -> Fired after prompt toolkit has been initialized """, ) def tokenize_ansi(tokens): """Checks a list of (token, str) tuples for ANSI escape sequences and extends the token list with the new formatted entries. During processing tokens are converted to ``prompt_toolkit.FormattedText``. Returns a list of similar (token, str) tuples. """ formatted_tokens = to_formatted_text(tokens) ansi_tokens = [] for style, text in formatted_tokens: if "\x1b" in text: formatted_ansi = to_formatted_text(ANSI(text)) ansi_text = "" prev_style = "" for ansi_style, ansi_text_part in formatted_ansi: if prev_style == ansi_style: ansi_text += ansi_text_part else: ansi_tokens.append((prev_style or style, ansi_text)) prev_style = ansi_style ansi_text = ansi_text_part ansi_tokens.append((prev_style or style, ansi_text)) else: ansi_tokens.append((style, text)) return ansi_tokens def _pygments_token_to_classname(token): """Converts pygments Tokens, token names (strings) to PTK style names.""" if token and isinstance(token, str): # if starts with non capital letter => leave it as it is if token[0].islower(): return token # if starts with capital letter => pygments token name if token.startswith("Token."): token = token[6:] # short colors - all caps if token == token.upper(): token = "color." + token return "pygments." + token.lower() return pygments_token_to_classname(token) def _style_from_pygments_dict(pygments_dict): """Custom implementation of ``style_from_pygments_dict`` that supports PTK specific (``Token.PTK``) styles. """ pygments_style = [] for token, style in pygments_dict.items(): # if ``Token.PTK`` then add it as "native" PTK style too if str(token).startswith("Token.PTK"): key = CAPITAL_PATTERN.sub(r"\1-\2", str(token)[10:]).lower() pygments_style.append((key, style)) pygments_style.append((_pygments_token_to_classname(token), style)) return Style(pygments_style) def _style_from_pygments_cls(pygments_cls): """Custom implementation of ``style_from_pygments_cls`` that supports PTK specific (``Token.PTK``) styles. """ return _style_from_pygments_dict(pygments_cls.styles) def disable_copy_on_deletion(): dummy_clipboard = DummyClipboard() ignored_actions = [ "kill-line", "kill-word", "unix-word-rubout", "unix-line-discard", "backward-kill-word", ] def handle_binding(name): try: binding = get_by_name(name) except KeyError: print_warning(f"Failed to disable clipboard for ptk action {name!r}") return if getattr(binding, "xonsh_disabled_clipboard", False): # binding's clipboard has already been disabled return binding.xonsh_disabled_clipboard = True original_handler = binding.handler # this needs to be defined inside a function so that ``binding`` will be the correct one @wraps(original_handler) def wrapped_handler(event): app = event.app prev = app.clipboard app.clipboard = dummy_clipboard try: return original_handler(event) finally: app.clipboard = prev binding.handler = wrapped_handler for _name in ignored_actions: handle_binding(_name) class PromptToolkitShell(BaseShell): """The xonsh shell for prompt_toolkit v2 and later.""" completion_displays_to_styles = { "multi": CompleteStyle.MULTI_COLUMN, "single": CompleteStyle.COLUMN, "readline": CompleteStyle.READLINE_LIKE, "none": None, } def __init__(self, **kwargs): ptk_args = kwargs.pop("ptk_args", {}) super().__init__(**kwargs) if ON_WINDOWS: winutils.enable_virtual_terminal_processing() self._first_prompt = True self.history = ThreadedHistory(PromptToolkitHistory()) ptk_args.setdefault("history", self.history) if not XSH.env.get("XONSH_COPY_ON_DELETE", False): disable_copy_on_deletion() if HAVE_SYS_CLIPBOARD: ptk_args.setdefault("clipboard", PyperclipClipboard()) self.prompter: PromptSession = PromptSession(**ptk_args) self.prompt_formatter = PTKPromptFormatter(self) self.pt_completer = PromptToolkitCompleter(self.completer, self.ctx, self) ptk_bindings = self.prompter.app.key_bindings self.key_bindings = load_xonsh_bindings(ptk_bindings) self._overrides_deprecation_warning_shown = False # Store original `_history_matches` in case we need to restore it self._history_matches_orig = self.prompter.default_buffer._history_matches # This assumes that PromptToolkitShell is a singleton events.on_ptk_create.fire( prompter=self.prompter, history=self.history, completer=self.pt_completer, bindings=self.key_bindings, ) # Goes at the end, since _MergedKeyBindings objects do not have # an add() function, which is necessary for on_ptk_create events self.key_bindings = merge_key_bindings( [self.key_bindings, load_emacs_shift_selection_bindings()] ) def get_lazy_ptk_kwargs(self): """These are non-essential attributes for the PTK shell to start. Lazy loading these later would save some startup time. """ if not XSH.env.get("COLOR_INPUT"): return if HAS_PYGMENTS: # these imports slowdown a little from prompt_toolkit.lexers import PygmentsLexer yield "lexer", PygmentsLexer(pyghooks.XonshLexer) events.on_timingprobe.fire(name="on_pre_prompt_style") yield "style", self.get_prompt_style() events.on_timingprobe.fire(name="on_post_prompt_style") def get_prompt_style(self): env = XSH.env style_overrides_env = env.get("PTK_STYLE_OVERRIDES", {}).copy() if ( len(style_overrides_env) > 0 and not self._overrides_deprecation_warning_shown ): print_warning( "$PTK_STYLE_OVERRIDES is deprecated, use $XONSH_STYLE_OVERRIDES instead!" ) self._overrides_deprecation_warning_shown = True style_overrides_env.update(env.get("XONSH_STYLE_OVERRIDES", {})) if HAS_PYGMENTS: style = _style_from_pygments_cls(pyghooks.xonsh_style_proxy(self.styler)) if len(self.styler.non_pygments_rules) > 0: try: style = merge_styles( [ style, _style_from_pygments_dict(self.styler.non_pygments_rules), ] ) except (AttributeError, TypeError, ValueError) as style_exception: print_warning( f"Error applying style override!\n{style_exception}\n" ) else: style = _style_from_pygments_dict(DEFAULT_STYLE_DICT) if len(style_overrides_env) > 0: try: style = merge_styles( [style, _style_from_pygments_dict(style_overrides_env)] ) except (AttributeError, TypeError, ValueError) as style_exception: print_warning(f"Error applying style override!\n{style_exception}\n") return style def singleline( self, auto_suggest=None, enable_history_search=True, multiline=True, **kwargs ): """Reads a single line of input from the shell. The store_in_history kwarg flags whether the input should be stored in PTK's in-memory history. """ events.on_pre_prompt_format.fire() env = XSH.env mouse_support = env.get("MOUSE_SUPPORT") auto_suggest = auto_suggest if env.get("AUTO_SUGGEST") else None refresh_interval = env.get("PROMPT_REFRESH_INTERVAL") refresh_interval = refresh_interval if refresh_interval > 0 else None complete_in_thread = env.get("COMPLETION_IN_THREAD") completions_display = env.get("COMPLETIONS_DISPLAY") complete_style = self.completion_displays_to_styles[completions_display] complete_while_typing = env.get("UPDATE_COMPLETIONS_ON_KEYPRESS") if complete_while_typing: # PTK requires history search to be none when completing while typing enable_history_search = False if HAS_PYGMENTS: self.styler.style_name = env.get("XONSH_COLOR_STYLE") completer = None if completions_display == "none" else self.pt_completer events.on_timingprobe.fire(name="on_pre_prompt_tokenize") # clear prompt level cache env["PROMPT_FIELDS"].reset() get_bottom_toolbar_tokens = self.bottom_toolbar_tokens if env.get("UPDATE_PROMPT_ON_KEYPRESS"): get_prompt_tokens = self.prompt_tokens get_rprompt_tokens = self.rprompt_tokens else: get_prompt_tokens = self.prompt_tokens() get_rprompt_tokens = self.rprompt_tokens() if get_bottom_toolbar_tokens: get_bottom_toolbar_tokens = get_bottom_toolbar_tokens() events.on_timingprobe.fire(name="on_post_prompt_tokenize") if env.get("VI_MODE"): editing_mode = EditingMode.VI else: editing_mode = EditingMode.EMACS if env.get("XONSH_HISTORY_MATCH_ANYWHERE"): self.prompter.default_buffer._history_matches = MethodType( _cust_history_matches, self.prompter.default_buffer ) elif ( self.prompter.default_buffer._history_matches is not self._history_matches_orig ): self.prompter.default_buffer._history_matches = self._history_matches_orig menu_rows = env.get("COMPLETIONS_MENU_ROWS", None) if menu_rows: # https://github.com/xonsh/xonsh/pull/4477#pullrequestreview-767982976 menu_rows += 1 prompt_args = { "mouse_support": mouse_support, "auto_suggest": auto_suggest, "message": get_prompt_tokens, "rprompt": get_rprompt_tokens, "bottom_toolbar": get_bottom_toolbar_tokens, "completer": completer, "multiline": multiline, "editing_mode": editing_mode, "prompt_continuation": self.continuation_tokens, "enable_history_search": enable_history_search, "reserve_space_for_menu": menu_rows, "key_bindings": self.key_bindings, "complete_style": complete_style, "complete_while_typing": complete_while_typing, "include_default_pygments_style": False, "refresh_interval": refresh_interval, "complete_in_thread": complete_in_thread, } if env["ENABLE_ASYNC_PROMPT"]: # once the prompt is done, update it in background as each future is completed prompt_args["pre_run"] = self.prompt_formatter.start_update else: for attr, val in self.get_lazy_ptk_kwargs(): prompt_args[attr] = val if editing_mode == EditingMode.VI: prompt_args["cursor"] = ModalCursorShapeConfig() events.on_pre_prompt.fire() line = self.prompter.prompt(**prompt_args) events.on_post_prompt.fire() return line def _push(self, line): """Pushes a line onto the buffer and compiles the code in a way that enables multiline input. """ code = None self.buffer.append(line) if self.need_more_lines: return None, code src = "".join(self.buffer) src = transform_command(src) try: code = self.execer.compile(src, mode="single", glbs=self.ctx, locs=None) self.reset_buffer() except Exception: # pylint: disable=broad-except self.reset_buffer() print_exception() return src, None return src, code def cmdloop(self, intro=None): """Enters a loop that reads and execute input from user.""" if intro: print(intro) auto_suggest = AutoSuggestFromHistory() self.push = self._push while not XSH.exit: try: line = self.singleline(auto_suggest=auto_suggest) if not line: self.emptyline() else: raw_line = line line = self.precmd(line) self.default(line, raw_line) except (KeyboardInterrupt, SystemExit): self.reset_buffer() except EOFError: if XSH.env.get("IGNOREEOF"): print('Use "exit" to leave the shell.', file=sys.stderr) else: break def _get_prompt_tokens(self, env_name: str, prompt_name: str, **kwargs): env = XSH.env # type:ignore p = env.get(env_name) if not p and "default" in kwargs: return kwargs.pop("default") try: p = self.prompt_formatter( template=p, threaded=env["ENABLE_ASYNC_PROMPT"], prompt_name=prompt_name ) except Exception: # pylint: disable=broad-except print_exception() toks = partial_color_tokenize(p) return tokenize_ansi(PygmentsTokens(toks)) def prompt_tokens(self): """Returns a list of (token, str) tuples for the current prompt.""" if self._first_prompt: carriage_return() self._first_prompt = False tokens = self._get_prompt_tokens("PROMPT", "message") self.settitle() return tokens def rprompt_tokens(self): """Returns a list of (token, str) tuples for the current right prompt. """ return self._get_prompt_tokens("RIGHT_PROMPT", "rprompt", default=[]) def _bottom_toolbar_tokens(self): """Returns a list of (token, str) tuples for the current bottom toolbar. """ return self._get_prompt_tokens("BOTTOM_TOOLBAR", "bottom_toolbar", default=None) @property def bottom_toolbar_tokens(self): """Returns self._bottom_toolbar_tokens if it would yield a result""" if XSH.env.get("BOTTOM_TOOLBAR"): return self._bottom_toolbar_tokens def continuation_tokens(self, width, line_number, is_soft_wrap=False): """Displays dots in multiline prompt""" if is_soft_wrap: return "" width -= 1 dots = XSH.env.get("MULTILINE_PROMPT") dots = dots() if callable(dots) else dots if not dots: return "" basetoks = self.format_color(dots) baselen = sum(len(t[1]) for t in basetoks) if baselen == 0: return [(Token, " " * (width + 1))] toks = basetoks * (width // baselen) n = width % baselen count = 0 for tok in basetoks: slen =
self.status == 6: return "killing" elif self.status == 7: return "butchering" elif self.status == 8: return "assassinating" elif self.status == 9: return "executing" elif self.status == 10: self.gold += random.choice(range(0,20)) return "selling loot..." else: return "deleting enemies from existence..." def enchantment_string(self): if self.enchantment == 0: return "Weak Apathy" elif self.enchantment == 1: return "Apathy" elif self.enchantment == 2: return "Strong Apathy" elif self.enchantment == 3: return "Strength" elif self.enchantment == 4: return "Ironhide" elif self.enchantment == 5: return "Steelhide" elif self.enchantment == 6: return "Titaniumhide" elif self.enchantment == 7: return "Minor Deflect Spell" elif self.enchantment == 8: return "Deflect Spell" elif self.enchantment == 9: return "Major Deflect Spell" elif self.enchantment == 10: return "Minor Absorb Spell" elif self.enchantment == 11: return "Absorb Spell" elif self.enchantment == 12: return "Major Absorb Spell" elif self.enchantment == 13: return "Magical Drain" elif self.enchantment == 14: return "Vampiric Magical Drain" elif self.enchantment == 15: return "Howling Vampiric Magical Drain" elif self.enchantment == 16: return "Zombification" elif self.enchantment == 17: return "Resurrection" elif self.enchantment == 18: return "Holy Grail" elif self.enchantment == 19: return "Telepathy" elif self.enchantment == 20: return "Telekinesis" elif self.enchantment == 21: return "Clairvoyance" elif self.enchantment == 22: return "Pyrokenesis" elif self.enchantment == 23: return "Retrocognition" elif self.enchantment == 24: return "Psychic Link" elif self.enchantment == 25: return "Demonic Rage" elif self.enchantment == 26: return "Demonic Hunger" elif self.enchantment == 27: return "Soul Sucker" elif self.enchantment == 28: return "Soul Summoner" elif self.enchantment == 29: return "We are Legion" elif self.enchantment == 30: return "Weak Prescience" elif self.enchantment == 31: return "Prescience" elif self.enchantment == 32: return "Great Prescience" else: return "Magnificent Prescience" def weapon_string(self): if self.weapon == 0: return "Stick" elif self.weapon == 1: return "Sharpened Stick" elif self.weapon == 2: return "Really Sharp Stick" elif self.weapon == 3: return "Blunt Spear" elif self.weapon == 4: return "Spear" elif self.weapon == 5: return "Sharp Spear" elif self.weapon == 6: return "Light Club" elif self.weapon == 7: return "Club" elif self.weapon == 8: return "Heavy Club" elif self.weapon == 9: return "Blunt and Light Axe" elif self.weapon == 10: return "Blunt Axe" elif self.weapon == 11: return "Heavy Blunt Axe" elif self.weapon == 12: return "Heavy Axe" elif self.weapon == 13: return "Heavy and Sharp Axe" elif self.weapon == 14: return "Light Dagger" elif self.weapon == 15: return "Dagger" elif self.weapon == 16: return "Sharp Dagger" elif self.weapon == 17: return "Light Sword" elif self.weapon == 18: return "Sword" elif self.weapon == 19: return "Sharp Sword" elif self.weapon == 20: return "Sharp Longsword" elif self.weapon == 21: return "Small Wand" elif self.weapon == 22: return "Wand" elif self.weapon == 23: return "Long Wand" elif self.weapon == 24: return "Short Magical Staff" elif self.weapon == 25: return "Magical Staff" elif self.weapon == 26: return "Large Magical Staff" elif self.weapon == 27: return "Short Mage's Staff" elif self.weapon == 28: return "Mage's Staff" elif self.weapon == 29: return "Large Mage's Staff" elif self.weapon == 30: return "Short Wizard's Staff" elif self.weapon == 31: return "Wizard's Staff" elif self.weapon == 32: return "Large Wizard's Staff" elif self.weapon == 33: return "Short Warlock's Staff" elif self.weapon == 34: return "Warlock's Staff" elif self.weapon == 35: return "Large Warlock's Staff" elif self.weapon == 36: return "Scroll of Fireball" elif self.weapon == 37: return "Scroll of Hurricane" elif self.weapon == 38: return "Scroll of Tornado" elif self.weapon == 39: return "Scroll of Boulder" elif self.weapon == 40: return "Scroll of Burning Tornado" elif self.weapon == 41: return "Scroll of Burning Tsunami" elif self.weapon == 42: return "Scroll of Earthquake" elif self.weapon == 43: return "Scroll of Volcano" elif self.weapon == 44: return "Scroll of Torture" elif self.weapon == 45: return "Scroll of Mind Control" elif self.weapon == 46: return "Scroll of Death" elif self.weapon == 47: return "Scroll of Zomibification" elif self.weapon == 48: return "Scroll of Summon Ghouls" elif self.weapon == 49: return "Scroll of Summon Deadpool" elif self.weapon == 50: return "Scroll of Summon Wyrm" elif self.weapon == 51: return "Scroll of Summon Fire Wyrm" elif self.weapon == 52: return "Scroll of Summon Imperial Paladin" elif self.weapon == 53: return "Scroll of Summon Necromancer" elif self.weapon == 54: return "Scroll of Summon Bat Swarm" elif self.weapon == 55: return "Scroll of Summon Death" elif self.weapon == 56: return "Shard of Infinity" elif self.weapon == 57: return "Infinity Stone" elif self.weapon == 58: return "Magic Ring" elif self.weapon == 59: return "One Ring" elif self.weapon == 60: return "Infinity Dagger" elif self.weapon == 61: return "Sands of Time" elif self.weapon == 62: return "Poseidon's Trident" elif self.weapon == 63: return "Hades' Fork" elif self.weapon == 64: return "Zeus' Lightning" elif self.weapon == 65: return "Deadpool's Fist" elif self.weapon == 66: return "Dragon Talon" elif self.weapon == 67: return "Insane Hobbit" elif self.weapon == 68: return "Ghostly Army" elif self.weapon == 69: return "Dead Army" elif self.weapon == 70: return "Damned Army" elif self.weapon == 71: return "Robot Damned Army" elif self.weapon == 72: return "Living Sword" elif self.weapon == 73: return "Infinity Gauntlet" elif self.weapon == 74: return "Hypercube" elif self.weapon == 75: return "Tesseract" elif self.weapon == 76: return "Penteract" elif self.weapon == 77: return "Hexeract" elif self.weapon == 78: return "Hepteract" elif self.weapon == 79: return "Octeract" elif self.weapon == 80: return "Enneract" elif self.weapon == 81: return "Dekeract" elif self.weapon == 82: return "N-Dimensional Cube" elif self.weapon == 83: return "N-Dimensional Space" elif self.weapon == 84: return "N-Dimensional Time" elif self.weapon == 85: return "2n-gonal Space-Time" elif self.weapon == 86: return "Dwarf Star" elif self.weapon == 87: return "White Dwarf Star" elif self.weapon == 88: return "Red Dwarf Star" elif self.weapon == 89: return "Black Hole" elif self.weapon == 90: return "Super Black Hole" elif self.weapon == 91: return "Super Massive Black Hole" elif self.weapon == 92: return "White Hole" elif self.weapon == 93: return "Hawking Radiation" elif self.weapon == 94: return "Singularity" elif self.weapon == 95: return "Mathematical Distortion Field" elif self.weapon == 96: return "Quantum Entanglement Field" elif self.weapon == 97: return "Ignore Physics Field" elif self.weapon == 98: return "Hyperdimensional Convolution Field" elif self.weapon == 99: return "Deadpool Science" else: return "Antiparticle Accelerator" def level_string(self): if self.level == 0: return "Homeless Wanderer" elif self.level == 1: return "Hermit" elif self.level == 2: return "Pub Brawler" elif self.level == 3: return "Fighter" elif self.level == 4: return "Boxer" elif self.level == 5: return "Martial Artist" elif self.level == 6: return "Homeowner" elif self.level == 7: return "Homewrecker" elif self.level == 8: return "Bandit" elif self.level == 9: return "Scout" elif self.level == 10: return "Squire" elif self.level == 11: return "Archer" elif self.level == 12: return "Crossbowman" elif self.level == 13: return "Knight" elif self.level == 14: return "Paladin" elif self.level == 15: return "Imperial Scout" elif self.level == 16: return "Imperial Squire" elif self.level == 17: return "Imperial Archer" elif self.level == 18: return "Imperial Crossbowman" elif self.level == 19: return "Imperial Knight" elif self.level == 20: return "Imperial Paladin" elif self.level == 21: return "Samurai Scout" elif self.level == 22: return "Samurai Archer" elif self.level == 23: return "Samurai Squire" elif self.level == 24: return "Samurai Knight" elif self.level == 25: return "Samurai Paladin" elif self.level == 26: return "Manslayer Scout" elif self.level == 27: return "Manslayer Archer" elif self.level == 28: return "Manslayer Squire" elif self.level == 29: return "Manslayer Knight" elif self.level == 30: return "Manslayer Paladin" elif self.level == 31: return "True Manslayer" elif self.level == 32: return "Apprentice Mage" elif self.level == 33: return "Mage" elif self.level == 34: return "Wizard" elif self.level == 35: return "Warlock" elif self.level == 36: return "Imperial Mage" elif self.level == 37: return "Imperial Wizard" elif self.level == 38: return
from torch_rgcn.utils import * from torch.nn.modules.module import Module from torch.nn.parameter import Parameter from torch import nn import math import torch class DistMult(Module): """ DistMult scoring function (from https://arxiv.org/pdf/1412.6575.pdf) """ def __init__(self, indim, outdim, num_nodes, num_rel, w_init='standard-normal', w_gain=False, b_init=None): super(DistMult, self).__init__() self.w_init = w_init self.w_gain = w_gain self.b_init = b_init # Create weights & biases self.relations = nn.Parameter(torch.FloatTensor(indim, outdim)) if b_init: self.sbias = Parameter(torch.FloatTensor(num_nodes)) self.obias = Parameter(torch.FloatTensor(num_nodes)) self.pbias = Parameter(torch.FloatTensor(num_rel)) else: self.register_parameter('sbias', None) self.register_parameter('obias', None) self.register_parameter('pbias', None) self.initialise_parameters() def initialise_parameters(self): """ Initialise weights and biases Options for initialising weights include: glorot-uniform - glorot (aka xavier) initialisation using a uniform distribution glorot-normal - glorot (aka xavier) initialisation using a normal distribution schlichtkrull-uniform - schlichtkrull initialisation using a uniform distribution schlichtkrull-normal - schlichtkrull initialisation using a normal distribution normal - using a standard normal distribution uniform - using a uniform distribution Options for initialising biases include: ones - setting all values to one zeros - setting all values to zero normal - using a standard normal distribution uniform - using a uniform distribution """ # Weights init = select_w_init(self.w_init) if self.w_gain: gain = nn.init.calculate_gain('relu') init(self.relations, gain=gain) else: init(self.relations) # Checkpoint 6 # print('min', torch.min(self.relations)) # print('max', torch.max(self.relations)) # print('mean', torch.mean(self.relations)) # print('std', torch.std(self.relations)) # print('size', self.relations.size()) # Biases if self.b_init: init = select_b_init(self.b_init) init(self.sbias) init(self.pbias) init(self.obias) def s_penalty(self, triples, nodes): """ Compute Schlichtkrull L2 penalty for the decoder """ s_index, p_index, o_index = split_spo(triples) s, p, o = nodes[s_index, :], self.relations[p_index, :], nodes[o_index, :] return s.pow(2).mean() + p.pow(2).mean() + o.pow(2).mean() def forward(self, triples, nodes): """ Score candidate triples """ s_index, p_index, o_index = split_spo(triples) s, p, o = nodes[s_index, :], self.relations[p_index, :], nodes[o_index, :] scores = (s * p * o).sum(dim=-1) if self.b_init: scores = scores + (self.sbias[s_index] + self.pbias[p_index] + self.obias[o_index]) return scores class RelationalGraphConvolutionNC(Module): """ Relational Graph Convolution (RGC) Layer for Node Classification (as described in https://arxiv.org/abs/1703.06103) """ def __init__(self, triples=None, num_nodes=None, num_relations=None, in_features=None, out_features=None, edge_dropout=None, edge_dropout_self_loop=None, bias=True, decomposition=None, vertical_stacking=False, diag_weight_matrix=False, reset_mode='glorot_uniform'): super(RelationalGraphConvolutionNC, self).__init__() assert (triples is not None or num_nodes is not None or num_relations is not None or out_features is not None), \ "The following must be specified: triples, number of nodes, number of relations and output dimension!" # If featureless, use number of nodes instead as input dimension in_dim = in_features if in_features is not None else num_nodes out_dim = out_features # Unpack arguments weight_decomp = decomposition['type'] if decomposition is not None and 'type' in decomposition else None num_bases = decomposition['num_bases'] if decomposition is not None and 'num_bases' in decomposition else None num_blocks = decomposition['num_blocks'] if decomposition is not None and 'num_blocks' in decomposition else None self.triples = triples self.num_nodes = num_nodes self.num_relations = num_relations self.in_features = in_features self.out_features = out_features self.weight_decomp = weight_decomp self.num_bases = num_bases self.num_blocks = num_blocks self.vertical_stacking = vertical_stacking self.diag_weight_matrix = diag_weight_matrix self.edge_dropout = edge_dropout self.edge_dropout_self_loop = edge_dropout_self_loop # If this flag is active, the weight matrix is a diagonal matrix if self.diag_weight_matrix: self.weights = torch.nn.Parameter(torch.empty((self.num_relations, self.in_features)), requires_grad=True) self.out_features = self.in_features self.weight_decomp = None bias = False # Instantiate weights elif self.weight_decomp is None: self.weights = Parameter(torch.FloatTensor(num_relations, in_dim, out_dim)) elif self.weight_decomp == 'basis': # Weight Regularisation through Basis Decomposition assert num_bases > 0, \ 'Number of bases should be set to higher than zero for basis decomposition!' self.bases = Parameter(torch.FloatTensor(num_bases, in_dim, out_dim)) self.comps = Parameter(torch.FloatTensor(num_relations, num_bases)) elif self.weight_decomp == 'block': # Weight Regularisation through Block Diagonal Decomposition assert self.num_blocks > 0, \ 'Number of blocks should be set to a value higher than zero for block diagonal decomposition!' assert in_dim % self.num_blocks == 0 and out_dim % self.num_blocks == 0,\ f'For block diagonal decomposition, input dimensions ({in_dim}, {out_dim}) must be divisible ' \ f'by number of blocks ({self.num_blocks})' self.blocks = nn.Parameter( torch.FloatTensor(num_relations, self.num_blocks, in_dim // self.num_blocks, out_dim // self.num_blocks)) else: raise NotImplementedError(f'{self.weight_decomp} decomposition has not been implemented') # Instantiate biases if bias: self.bias = Parameter(torch.FloatTensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters(reset_mode) def reset_parameters(self, reset_mode='glorot_uniform'): """ Initialise biases and weights (glorot_uniform or uniform) """ if reset_mode == 'glorot_uniform': if self.weight_decomp == 'block': nn.init.xavier_uniform_(self.blocks, gain=nn.init.calculate_gain('relu')) elif self.weight_decomp == 'basis': nn.init.xavier_uniform_(self.bases, gain=nn.init.calculate_gain('relu')) nn.init.xavier_uniform_(self.comps, gain=nn.init.calculate_gain('relu')) else: nn.init.xavier_uniform_(self.weights, gain=nn.init.calculate_gain('relu')) if self.bias is not None: torch.nn.init.zeros_(self.bias) elif reset_mode == 'schlichtkrull': if self.weight_decomp == 'block': nn.init.xavier_uniform_(self.blocks, gain=nn.init.calculate_gain('relu')) elif self.weight_decomp == 'basis': nn.init.xavier_uniform_(self.bases, gain=nn.init.calculate_gain('relu')) nn.init.xavier_uniform_(self.comps, gain=nn.init.calculate_gain('relu')) else: nn.init.xavier_uniform_(self.weights, gain=nn.init.calculate_gain('relu')) if self.bias is not None: torch.nn.init.zeros_(self.bias) elif reset_mode == 'uniform': stdv = 1.0 / math.sqrt(self.weights.size(1)) if self.weight_decomp == 'block': self.blocks.data.uniform_(-stdv, stdv) elif self.weight_decomp == 'basis': self.bases.data.uniform_(-stdv, stdv) self.comps.data.uniform_(-stdv, stdv) else: self.weights.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) else: raise NotImplementedError(f'{reset_mode} parameter initialisation method has not been implemented') def forward(self, features=None): """ Perform a single pass of message propagation """ assert (features is None) == (self.in_features is None), "in_features not provided!" in_dim = self.in_features if self.in_features is not None else self.num_nodes triples = self.triples out_dim = self.out_features edge_dropout = self.edge_dropout weight_decomp = self.weight_decomp num_nodes = self.num_nodes num_relations = self.num_relations vertical_stacking = self.vertical_stacking general_edge_count = int((triples.size(0) - num_nodes)/2) self_edge_count = num_nodes # Choose weights if weight_decomp is None: weights = self.weights elif weight_decomp == 'basis': weights = torch.einsum('rb, bio -> rio', self.comps, self.bases) elif weight_decomp == 'block': weights = block_diag(self.blocks) else: raise NotImplementedError(f'{weight_decomp} decomposition has not been implemented') # Determine whether to use cuda or not if weights.is_cuda: device = 'cuda' else: device = 'cpu' # Stack adjacency matrices either vertically or horizontally adj_indices, adj_size = stack_matrices( triples, num_nodes, num_relations, vertical_stacking=vertical_stacking, device=device ) num_triples = adj_indices.size(0) vals = torch.ones(num_triples, dtype=torch.float, device=device) # Apply normalisation (vertical-stacking -> row-wise rum & horizontal-stacking -> column-wise sum) sums = sum_sparse(adj_indices, vals, adj_size, row_normalisation=vertical_stacking, device=device) if not vertical_stacking: # Rearrange column-wise normalised value to reflect original order (because of transpose-trick) n = general_edge_count i = self_edge_count sums = torch.cat([sums[n:2 * n], sums[:n], sums[-i:]], dim=0) vals = vals / sums # Construct adjacency matrix if device == 'cuda': adj = torch.cuda.sparse.FloatTensor(indices=adj_indices.t(), values=vals, size=adj_size) else: adj = torch.sparse.FloatTensor(indices=adj_indices.t(), values=vals, size=adj_size) if self.diag_weight_matrix: assert weights.size() == (num_relations, in_dim) else: assert weights.size() == (num_relations, in_dim, out_dim) if self.in_features is None: # Message passing if no features are given output = torch.mm(adj, weights.view(num_relations * in_dim, out_dim)) elif self.diag_weight_matrix: fw = torch.einsum('ij,kj->kij', features, weights) fw = torch.reshape(fw, (self.num_relations * self.num_nodes, in_dim)) output = torch.mm(adj, fw) elif self.vertical_stacking: # Message passing if the adjacency matrix is vertically stacked af = torch.spmm(adj, features) af = af.view(self.num_relations, self.num_nodes, in_dim) output = torch.einsum('rio, rni -> no', weights, af) else: # Message passing if the adjacency matrix is horizontally stacked fw = torch.einsum('ni, rio -> rno', features, weights).contiguous() output = torch.mm(adj, fw.view(self.num_relations * self.num_nodes, out_dim)) assert output.size() == (self.num_nodes, out_dim) if self.bias is not None: output = torch.add(output, self.bias) return output class RelationalGraphConvolutionLP(Module): """ Relational Graph Convolution (RGC) Layer for Link Prediction (as described in https://arxiv.org/abs/1703.06103) """ def __init__(self, num_nodes=None, num_relations=None, in_features=None, out_features=None, edge_dropout=None, edge_dropout_self_loop=None, decomposition=None, vertical_stacking=False, w_init='glorot-normal', w_gain=False, b_init=None): super(RelationalGraphConvolutionLP, self).__init__() assert (num_nodes is not None or num_relations is not None or out_features is not None), \ "The following must be specified: number of nodes, number of relations and output dimension!" device = 'cuda' if torch.cuda.is_available() else 'cpu' # If featureless, use number of nodes instead as feature input dimension in_dim = in_features if in_features is not None else num_nodes out_dim = out_features # Unpack arguments weight_decomp = decomposition['type'] if decomposition is not None and 'type' in decomposition else None num_bases = decomposition['num_bases'] if decomposition is not None and 'num_bases' in decomposition else None num_blocks = decomposition['num_blocks'] if decomposition is not None and 'num_blocks' in decomposition else None self.num_nodes = num_nodes self.num_relations = num_relations self.in_features = in_dim self.out_features = out_dim self.weight_decomp = weight_decomp self.num_bases = num_bases self.num_blocks = num_blocks self.vertical_stacking = vertical_stacking self.edge_dropout = edge_dropout self.edge_dropout_self_loop = edge_dropout_self_loop self.w_init = w_init self.w_gain = w_gain self.b_init = b_init # Create weight parameters if self.weight_decomp is None: self.weights = Parameter(torch.FloatTensor(num_relations, in_dim, out_dim).to(device)) elif self.weight_decomp == 'basis': # Weight Regularisation through Basis Decomposition assert num_bases >
list of futures self.tag_map = defaultdict(list) # request_obj -> list of (tag, future) self.request_map = defaultdict(list) def clean_timeout_futures(self, request): ''' Remove all futures that were waiting for request `request` since it is done waiting ''' if request not in self.request_map: return for tag, future in self.request_map[request]: # TODO: log, this shouldn't happen... if tag not in self.tag_map: continue # mark the future done future.set_exception(TimeoutException()) self.tag_map[tag].remove(future) # if that was the last of them, remove the key all together if len(self.tag_map[tag]) == 0: del self.tag_map[tag] def get_event(self, request, tag='', callback=None): ''' Get an event (async of course) return a future that will get it later ''' future = Future() if callback is not None: def handle_future(future): response = future.result() self.io_loop.add_callback(callback, response) future.add_done_callback(handle_future) # add this tag and future to the callbacks self.tag_map[tag].append(future) self.request_map[request].append((tag, future)) return future def iter_events(self): ''' Iterate over all events that could happen ''' try: data = self.event.get_event_noblock() # see if we have any futures that need this info: for tag_prefix, futures in self.tag_map.items(): if data['tag'].startswith(tag_prefix): for future in futures: if future.done(): continue future.set_result(data) del self.tag_map[tag_prefix] # call yourself back! tornado.ioloop.IOLoop.instance().add_callback(self.iter_events) except zmq.ZMQError as e: # TODO: not sure what other errors we can get... if e.errno != zmq.EAGAIN: raise Exception() # add callback in the future (to avoid spinning) # TODO: configurable timeout tornado.ioloop.IOLoop.instance().add_timeout(time.time() + 0.1, self.iter_events) except: logging.critical('Uncaught exception in the event_listener: {0}'.format(sys.exc_info())) # TODO: configurable timeout tornado.ioloop.IOLoop.instance().add_timeout(time.time() + 0.1, self.iter_events) # TODO: move to a utils function within salt-- the batching stuff is a bit tied together def get_batch_size(batch, num_minions): ''' Return the batch size that you should have ''' # figure out how many we can keep in flight partition = lambda x: float(x) / 100.0 * num_minions try: if '%' in batch: res = partition(float(batch.strip('%'))) if res < 1: return int(math.ceil(res)) else: return int(res) else: return int(batch) except ValueError: print(('Invalid batch data sent: {0}\nData must be in the form' 'of %10, 10% or 3').format(batch)) class BaseSaltAPIHandler(tornado.web.RequestHandler): ct_out_map = ( ('application/json', json.dumps), ('application/x-yaml', functools.partial( yaml.safe_dump, default_flow_style=False)), ) def _verify_client(self, client): ''' Verify that the client is in fact one we have ''' if client not in saltclients: self.set_status(400) self.write('We don\'t serve your kind here') self.finish() @property def token(self): ''' The token used for the request ''' # find the token (cookie or headers) if AUTH_TOKEN_HEADER in self.request.headers: return self.request.headers[AUTH_TOKEN_HEADER] else: return self.get_cookie(AUTH_COOKIE_NAME) def _verify_auth(self): ''' Boolean wether the request is auth'd ''' return self.token and bool(self.application.auth.get_tok(self.token)) def prepare(self): ''' Run before get/posts etc. Pre-flight checks: - verify that we can speak back to them (compatible accept header) ''' # verify the content type found = False for content_type, dumper in self.ct_out_map: if fnmatch.fnmatch(content_type, self.request.headers['Accept']): found = True break # better return message? if not found: self.send_error(406) self.content_type = content_type self.dumper = dumper # do the common parts self.start = time.time() self.connected = True self.lowstate = self._get_lowstate() def timeout_futures(self): ''' timeout a session ''' # TODO: set a header or something??? so we know it was a timeout self.application.event_listener.clean_timeout_futures(self) def on_finish(self): ''' When the job has been done, lets cleanup ''' # timeout all the futures self.timeout_futures() def on_connection_close(self): ''' If the client disconnects, lets close out ''' self.finish() def serialize(self, data): ''' Serlialize the output based on the Accept header ''' self.set_header('Content-Type', self.content_type) return self.dumper(data) def _form_loader(self, _): ''' function to get the data from the urlencoded forms ignore the data passed in and just get the args from wherever they are ''' data = {} for key, val in self.request.arguments.iteritems(): if len(val) == 1: data[key] = val[0] else: data[key] = val return data def deserialize(self, data): ''' Deserialize the data based on request content type headers ''' ct_in_map = { 'application/x-www-form-urlencoded': self._form_loader, 'application/json': json.loads, 'application/x-yaml': functools.partial( yaml.safe_load, default_flow_style=False), 'text/yaml': functools.partial( yaml.safe_load, default_flow_style=False), # because people are terrible and dont mean what they say 'text/plain': json.loads } try: if self.request.headers['Content-Type'] not in ct_in_map: self.send_error(406) return ct_in_map[self.request.headers['Content-Type']](data) except KeyError: return [] def _get_lowstate(self): ''' Format the incoming data into a lowstate object ''' data = self.deserialize(self.request.body) self.raw_data = copy(data) if self.request.headers.get('Content-Type') == 'application/x-www-form-urlencoded': if 'arg' in data and not isinstance(data['arg'], list): data['arg'] = [data['arg']] lowstate = [data] else: lowstate = data return lowstate class SaltAuthHandler(BaseSaltAPIHandler): ''' Handler for login resquests ''' def get(self): ''' We don't allow gets on the login path, so lets send back a nice message ''' self.set_status(401) self.set_header('WWW-Authenticate', 'Session') ret = {'status': '401 Unauthorized', 'return': 'Please log in'} self.write(self.serialize(ret)) self.finish() # TODO: make async? Underlying library isn't... and we ARE making disk calls :( def post(self): ''' Authenticate against Salt's eauth system {"return": {"start": 1395507384.320007, "token": "<KEY>", "expire": 1395550584.320007, "name": "jacksontj", "eauth": "pam"}} {"return": [{"perms": ["*.*"], "start": 1395507675.396021, "token": "dea<PASSWORD>", "expire": 1395550875.396021, "user": "jacksontj", "eauth": "pam"}]} ''' creds = {'username': self.get_arguments('username')[0], 'password': self.get_arguments('password')[0], 'eauth': self.get_arguments('eauth')[0], } token = self.application.auth.mk_token(creds) if not 'token' in token: # TODO: nicer error message # 'Could not authenticate using provided credentials') self.send_error(401) # return since we don't want to execute any more return # Grab eauth config for the current backend for the current user try: perms = self.application.opts['external_auth'][token['eauth']][token['name']] except (AttributeError, IndexError): logging.debug("Configuration for external_auth malformed for " "eauth '{0}', and user '{1}'." .format(token.get('eauth'), token.get('name')), exc_info=True) # TODO better error -- 'Configuration for external_auth could not be read.' self.send_error(500) ret = {'return': [{ 'token': token['token'], 'expire': token['expire'], 'start': token['start'], 'user': token['name'], 'eauth': token['eauth'], 'perms': perms, }]} self.write(self.serialize(ret)) self.finish() class SaltAPIHandler(BaseSaltAPIHandler): ''' Main API handler for base "/" ''' def get(self): ''' return data about what clients you have ''' ret = {"clients": saltclients.keys(), "return": "Welcome"} self.write(self.serialize(ret)) self.finish() @tornado.web.asynchronous def post(self): ''' This function takes in all the args for dispatching requests **Example request**:: % curl -si https://localhost:8000 \\ -H "Accept: application/x-yaml" \\ -H "X-Auth-Token: <PASSWORD>" \\ -d client=local \\ -d tgt='*' \\ -d fun='test.sleep' \\ -d arg=1 ''' # if you aren't authenticated, redirect to login if not self._verify_auth(): self.redirect('/login') return client = self.get_arguments('client')[0] self._verify_client(client) self.disbatch(client) def disbatch(self, client): ''' Disbatch a lowstate job to the appropriate client ''' self.client = client for low in self.lowstate: if (not self._verify_auth() or 'eauth' in low): # TODO: better error? self.set_status(401) self.finish() return # disbatch to the correct handler try: getattr(self, '_disbatch_{0}'.format(self.client))() except AttributeError: # TODO set the right status... this means we didn't implement it... self.set_status(500) self.finish() @tornado.gen.coroutine def _disbatch_local_batch(self): ''' Disbatch local client batched commands ''' self.ret = [] for chunk in self.lowstate: f_call = salt.utils.format_call(saltclients['local_batch'], chunk) timeout = float(chunk.get('timeout', self.application.opts['timeout'])) # set the timeout timeout_obj = tornado.ioloop.IOLoop.instance().add_timeout(time.time() + timeout, self.timeout_futures) # ping all the minions (to see who we have to talk to) # TODO: actually ping them all? this just gets the pub data minions = saltclients['local'](chunk['tgt'], 'test.ping', [], expr_form=f_call['kwargs']['expr_form'])['minions'] chunk_ret = {} maxflight = get_batch_size(f_call['kwargs']['batch'], len(minions)) inflight_futures = [] # do this batch while len(minions) > 0: # if you have more to go, lets disbatch jobs while len(inflight_futures) < maxflight: minion_id = minions.pop(0) f_call['args'][0] = minion_id # TODO: list?? f_call['kwargs']['expr_form'] = 'glob' pub_data = saltclients['local'](*f_call.get('args', ()), **f_call.get('kwargs', {})) print pub_data tag = tagify([pub_data['jid'], 'ret', minion_id], 'job') future = self.application.event_listener.get_event(self, tag=tag) inflight_futures.append(future) # wait until someone is done finished_future = yield Any(inflight_futures) try: event = finished_future.result() except TimeoutException: break print event chunk_ret[event['data']['id']] = event['data']['return'] inflight_futures.remove(finished_future) self.ret.append(chunk_ret) # if we finish in time, cancel the timeout tornado.ioloop.IOLoop.instance().remove_timeout(timeout_obj) self.write(self.serialize({'return': self.ret})) self.finish() @tornado.gen.coroutine def _disbatch_local(self): ''' Disbatch local client commands ''' self.ret = [] for chunk in self.lowstate: timeout = float(chunk.get('timeout', self.application.opts['timeout'])) # set the timeout tornado.ioloop.IOLoop.instance().add_timeout(time.time() + timeout, self.timeout_futures) timeout_obj = tornado.ioloop.IOLoop.instance().add_timeout(time.time() + timeout, self.timeout_futures) # TODO: not sure why.... we already verify auth, probably for ACLs # require token or eauth chunk['token'] = self.token chunk_ret = {} f_call = salt.utils.format_call(saltclients[self.client], chunk) # fire a job off pub_data = saltclients[self.client](*f_call.get('args', ()), **f_call.get('kwargs', {})) # get the tag that we are looking for tag = tagify([pub_data['jid'], 'ret'], 'job') minions_remaining = pub_data['minions'] #
== "variant" and n1!=n2: sb.append("\t\t\t\t<y:ArcEdge>\n") else: sb.append("\t\t\t\t<y:PolyLineEdge>\n") sb.append("\t\t\t\t\t<y:LineStyle ") if not dashed: sb.append("type=\"line\"") else: sb.append("type=\"dashed\"") #sb.append(" width=\"2.0\" ") sb.append(" width=\"5.0\" ") sb.append("color=\"") sb.append(color) sb.append("\"/>\n") sb.append("\t\t\t\t\t<y:Path sx=\"0.0\" sy=\"0") sb.append(".0\" tx=\"0.0\" ty=\"0") sb.append(".0\"/>\n") sb.append("\t\t\t\t\t<y:BendStyle smoothed=\"true\"/>\n") sb.append("\t\t\t\t\t<y:Arrows source=\"none\" target=\"") sb.append("none") sb.append("\"/>\n") sb.append("\t\t\t\t\t<y:EdgeLabel ") sb.append("y=\"4\" ") sb.append("visible=\"true\"") #sb.append(" fontSize=\"20\"") sb.append(" fontSize=\"45\"") sb.append(" preferredPlacement=") sb.append("\"target\">") sb.append(str(count)) sb.append("</y:EdgeLabel>\n") if kind=="variant" and n1!=n2: sb.append("\t\t\t\t</y:ArcEdge>\n") else: sb.append("\t\t\t\t</y:PolyLineEdge>\n") sb.append("\t\t\t</data>\n") sb.append("\t\t</edge>\n") return "".join(sb) def getFooter(): sb = [] sb.append("\t</graph>\n") sb.append("</graphml>\n") return "".join(sb) ################################################# def alternatingBFS(adj1,adj2,s): #print "Running BFS from ", s n = len(adj1) q = [] visited = [False for i in range(n)] d = [-1 for i in range(n)] p = [None for i in range(n)] visited[s] = True d[s] = 0 p[s] = None q.append(s) while len(q)>0: u = q.pop(0) if d[u]%2==0: adj = adj1 else: adj = adj2 for v in adj[u]: if not visited[v]: visited[v] = True d[v] = d[u] + 1 p[v] = u q.append(v) possible = range(n) for u in range(n): if u==s or d[u]==-1: continue if d[u]%2==1 and u in adj2[s]: v = u #print "Cycle:" while v!=s: if v in possible: possible.remove(v) #print v v=p[v] #print s if s in possible: possible.remove(s) possible = sorted(possible) return possible def impossiblePairs(source,target): #n =source.n extremities = source.extAndTels sourceAdj = source.adj sourceCn = source.cn targetAdj = target.adj targetCn = target.cn #leftTelExt = leftTelomereExt() #rightTelExt = rightTelomereExt(n) pairs = [] for e1 in extremities: for e2 in extremities: if e2<e1: continue pairs.append((e1,e2)) #pairs.remove((leftTelExt,rightTelExt)) for t1 in source.telExt: for t2 in source.telExt: if t2<t1: continue pairs.remove((t1,t2)) sourceAdjList = adjMatToLists(sourceAdj) targetAdjList = adjMatToLists(targetAdj) impossible = [] for e1,e2 in pairs: if (sourceAdj[e1,e2] == 0 and targetAdj[e1,e2] ==0 and sourceCn[getGeneFromExt(e1)] == sourceCn[getGeneFromExt(e2)] and sourceCn[getGeneFromExt(e2)] == targetCn[getGeneFromExt(e1)] and targetCn[getGeneFromExt(e1)] == targetCn[getGeneFromExt(e2)] and #((sourceAdj[e1,:]>0)==(targetAdj[e1,:]>0)).all() and ((sourceAdj[e2,:]>0)==(targetAdj[e2,:]>0)).all() and e2 in alternatingBFS(sourceAdjList,targetAdjList,e1) and e2 in alternatingBFS(targetAdjList,sourceAdjList,e1)): impossible.append((e1,e2)) print "Zeroed",len(impossible),"pairs and that's", 100.0*len(impossible)/len(pairs), "%" return impossible #np.sum(sourceAdj[e1,:])==np.sum(targetAdj[e1,:]) and np.sum(sourceAdj[e2,:])==np.sum(targetAdj[e2,:]) and \ def shortestSimplePath(s,t,genome): n = nToNumberExtWithTels(len(genome)) adj = adjMatToLists(genome.adj) q = [] visited = [False for i in range(n)] d = [-1 for i in range(n)] p = [None for i in range(n)] prevEdge = [None for i in range(n)] visited[s] = True d[s] = 0 p[s] = None prevEdge[s] = None def updateDS(v,u): visited[v] = True d[v] = d[u] + 1 p[v] = u q.append(v) q.append(s) while len(q)>0: u = q.pop(0) for v in adj[u]: if not visited[v] and prevEdge[u]!="adj": updateDS(v,u) prevEdge[v] = "adj" v = getOtherExt(u) if v>=n: continue if not visited[v] and len(adj[u])>0 and prevEdge[u]!="gene": updateDS(v,u) prevEdge[v] = "gene" path = [] u = t while u!=s: path.append(u) path.append(prevEdge[u]) u = p[u] path.append(u) path.reverse() #print path return path def pathHasCnBound(path,genome,bound=1): for i in range(0,len(path)-2,2): u,kind,v = path[i:i+3] cn = 0 if kind=="adj": cn = genome.adj[u,v] if kind=="gene": cn = genome.cn[getGeneFromExt(u)] if cn<bound: return False return True def hasDuplicatedChomosomes(genome): path = shortestSimplePath(leftTelomereExt(),rightTelomereExt(genome.n),genome) return pathHasCnBound(path,genome,2) def hasTheSameChromosome(g1,g2): path = shortestSimplePath(leftTelomereExt(),rightTelomereExt(g1.n),g1) return pathHasCnBound(path,g2) def decomposeChromosomes(genome): n = genome.n genesOnly = genome.genes extOnly = genome.extremities extAndTels = genome.extAndTels adj = genome.adj cn = genome.cn leftTelExt = leftTelomereExt() rightTelExt = rightTelomereExt(n) telExt = genome.telExt chroms = cn[0] print "before:",genome if chroms<2: return n,genome model = Model("decomposeChromosomes") fAdj = {} for k in range(chroms): for e1 in extAndTels: for e2 in extAndTels: fAdj[k,e1,e2] = model.addVar(obj=0, vtype=GRB.INTEGER, lb=0, ub=adj[e1,e2], name='fAdj['+str(k)+','+str(e1)+','+str(e2)+']') fGene = {} for k in range(chroms): for g in genesOnly: fGene[k,getGeneTail(g),getGeneHead(g)] = model.addVar(obj=0, vtype=GRB.INTEGER, lb=0, ub=cn[g], name='fGene['+str(k)+','+str(getGeneTail(g))+','+str(getGeneHead(g))+']') fGene[k,getGeneHead(g),getGeneTail(g)] = model.addVar(obj=0, vtype=GRB.INTEGER, lb=0, ub=cn[g], name='fGene['+str(k)+','+str(getGeneHead(g))+','+str(getGeneTail(g))+']') model.modelSense = GRB.MINIMIZE model.setParam('OutputFlag', False ) model.update() #constraints for k in range(chroms): model.addConstr(quicksum(fAdj[k,leftTelExt,e] for e in extAndTels)==1) model.addConstr(quicksum(fAdj[k,e,rightTelExt] for e in extAndTels)==1) for e1 in extOnly: model.addConstr(quicksum(fAdj[k,e1,e2] for e2 in extAndTels)==fGene[k,getOtherExt(e1),e1]) model.addConstr(quicksum(fAdj[k,e2,e1] for e2 in extAndTels)==fGene[k,e1,getOtherExt(e1)]) for e1 in extAndTels: for e2 in extAndTels: if e1 != e2: model.addConstr(quicksum(fAdj[k,e1,e2]+fAdj[k,e2,e1] for k in range(chroms))==adj[e1,e2]) else: model.addConstr(quicksum(fAdj[k,e1,e1] for k in range(chroms))==adj[e1,e1]) for g in genesOnly: model.addConstr(quicksum(fGene[k,getGeneTail(g),getGeneHead(g)]+fGene[k,getGeneHead(g),getGeneTail(g)] for k in range(chroms))==cn[g]) if chroms>1: model.setObjective(quicksum((fGene[0,getGeneTail(g),getGeneHead(g)]-fGene[1,getGeneTail(g),getGeneHead(g)])*(fGene[0,getGeneTail(g),getGeneHead(g)]-fGene[1,getGeneTail(g),getGeneHead(g)]) for g in genesOnly)+ quicksum((fAdj[0,e1,e2]-fAdj[1,e1,e2])*(fAdj[0,e1,e2]-fAdj[1,e1,e2]) for e1 in extAndTels for e2 in extAndTels),GRB.MINIMIZE) model.optimize() if model.status == GRB.status.INFEASIBLE: print "Infeasible!!!" #model.computeIIS() #model.write(r"D:\ronzeira\model.ilp") return -1 chrom_list = [] for k in range(chroms): #print "---------------------------------" #print "Chromosome copy ",k adjNew = adj.copy() cnNew = cn.copy() for e1 in extAndTels: for e2 in extAndTels: adjNew[e1,e2] = fAdj[k,e1,e2].x+fAdj[k,e2,e1].x for g in genesOnly: cnNew[g] = fGene[k,getGeneTail(g),getGeneHead(g)].x+fGene[k,getGeneHead(g),getGeneTail(g)].x cnNew[0] = 1 cnNew[-1] = 1 cc = chromosomeContainer() cc.setMatrices(cnNew,adjNew) chrom_list.append(cc) #print cc #print "---------------------------------" #print "Genome without first chromosome " new = genome-chrom_list[0] #print model.objVal print "after:",new if model.objVal==0: return model.objVal,new else: return model.objVal,genome def removeLongestDuplicatedChromosomes(genome): genesOnly = genome.genes extOnly = genome.extremities extAndTels = genome.extAndTels adj = genome.adj cn = genome.cn telExt = genome.telExt telGenes = genome.telGenes chroms = sum(cn[telGenes])/2 if chroms<=2: return -1 print "before:",genome model = Model("removeLongestDuplicatedChromosomes") s = -1 fAdj = {} for e1 in extAndTels: for e2 in extAndTels: fAdj[e1,e2] = model.addVar(obj=0, vtype=GRB.INTEGER, lb=0, ub=min(adj[e1,e2],1)) for t in telExt: fAdj[s,t] = model.addVar(obj=1, vtype=GRB.INTEGER, lb=0, ub=min(cn[getGeneFromExt(t)],1)) fAdj[t,s] = model.addVar(obj=1, vtype=GRB.INTEGER, lb=0, ub=min(cn[getGeneFromExt(t)],1)) fGene = {} for g in genesOnly: fGene[getGeneTail(g),getGeneHead(g)] = model.addVar(obj=1, vtype=GRB.INTEGER, lb=0, ub=min(cn[g],1)) fGene[getGeneHead(g),getGeneTail(g)] = model.addVar(obj=1, vtype=GRB.INTEGER, lb=0, ub=min(cn[g],1)) model.modelSense = GRB.MAXIMIZE model.setParam('OutputFlag', False ) model.update() #constraints model.addConstr(quicksum(fAdj[s,t] for t in telExt)==1) model.addConstr(quicksum(fAdj[t,s] for t in telExt)==1) for e1 in extOnly: model.addConstr(quicksum(fAdj[e1,e2] for e2 in extAndTels)==fGene[getOtherExt(e1),e1]) model.addConstr(quicksum(fAdj[e2,e1] for e2 in extAndTels)==fGene[e1,getOtherExt(e1)]) for t in telExt: model.addConstr(quicksum(fAdj[t,e] for e in extOnly)==fAdj[s,t]) model.addConstr(quicksum(fAdj[e,t] for e in extOnly)==fAdj[t,s]) for e1 in extAndTels: for e2 in extAndTels: if e1 != e2: model.addConstr(2*(fAdj[e1,e2]+fAdj[e2,e1])<=adj[e1,e2]) else: model.addConstr(2*fAdj[e1,e1]<=adj[e1,e1]) for g in genesOnly: model.addConstr(2*(fGene[getGeneTail(g),getGeneHead(g)]+fGene[getGeneHead(g),getGeneTail(g)])<=cn[g]) for t in telExt: model.addConstr(2*(fAdj[s,t]+fAdj[t,s])<=cn[getGeneFromExt(t)]) model.optimize() if model.status == GRB.status.INFEASIBLE: print "Infeasible!!!" #model.computeIIS() #model.write(r"D:\ronzeira\model.ilp") return -1 print "objective " , model.objVal print "---------------------------------" print "Chromosome " adjNew = adj.copy() cnNew = cn.copy() for e1 in extAndTels: for e2 in extAndTels: if e1!=e2: adjNew[e1,e2] = (fAdj[e1,e2].x+fAdj[e2,e1].x) else: adjNew[e1,e2] = fAdj[e1,e2].x for g in genesOnly: cnNew[g] = (fGene[getGeneTail(g),getGeneHead(g)].x+fGene[getGeneHead(g),getGeneTail(g)].x) for t in telExt: if fAdj[s,t].x>0: t1 = t if fAdj[t,s].x>0: t2 = t if t1!=t2: cnNew[getGeneFromExt(t1)] = 1 cnNew[getGeneFromExt(t2)] = 1 else: cnNew[getGeneFromExt(t1)] = 2 for t in telExt: if t!=t1 and t!=t2: cnNew[getGeneFromExt(t)] = 0 cc = chromosomeContainer() cc.setMatrices(cnNew,adjNew,telGenes) print cc #print "---------------------------------" #print "Genome without one chromosome " #new = genome-cc #print "after:",new #return new return cc ######################################################################### def twoTupleToThreeTuple(k,t): return k,t[0],t[1] def orderPair(e1,e2): if e1<=e2: return e1,e2 else: return e2,e1 def createPairList(l): pairs = [] for i in l: for j in l: if j<i: continue pairs.append((i,j)) return pairs def kAndOrderedPair(k,e1,e2): t1,t2 = orderPair(e1,e2) return k,t1,t2 #################################################### ## The main function to search for a scenario from source to taeger with distance d def multiStepSortingIlp(source,target,d=1,check=False,impossible=[],graphFileName=""): #get data from inputs #n = source.n genesOnly = source.genes extOnly = source.extremities extAndTels = source.extAndTels numNodes = len(extAndTels) genesAndTels = source.geneAndTels sourceAdj = source.adj sourceCn = source.cn targetAdj = target.adj targetCn = target.cn allNodePairs = createPairList(extAndTels) extNodePairs = createPairList(extOnly) #leftTelExt = leftTelomereExt() #rightTelExt = rightTelomereExt(n) telExt = source.telExt telNodePairs = createPairList(telExt) model = Model("MultiStep") #Vairables: #Amplifications dup_adj = {} for k in range(d): for e1,e2 in allNodePairs: dup_adj[k,e1,e2] = model.addVar(obj=0, vtype=GRB.BINARY, name='dup_adj['+str(k)+','+str(e1)+','+str(e2)+']') dup_gene = {} for k in range(d): for g in genesAndTels: dup_gene[k,g] = model.addVar(obj=0, vtype=GRB.BINARY, name='dup_gene['+str(k)+','+str(g)+']') duplication = {} for k in range(d): for e1,e2 in extNodePairs: duplication[k,e1,e2] = model.addVar(obj=1, vtype=GRB.BINARY, name='duplication['+str(k)+','+str(e1)+','+str(e2)+']') for t1,t2 in telNodePairs: duplication[k,t1,t2] = model.addVar(obj=1, vtype=GRB.BINARY, name='duplication['+str(k)+','+str(t1)+','+str(t2)+']') #deletion del_adj = {} for k in range(d): for e1,e2 in allNodePairs: del_adj[k,e1,e2] = model.addVar(obj=0, vtype=GRB.BINARY, name='del_adj['+str(k)+','+str(e1)+','+str(e2)+']') for t1,t2 in telNodePairs: del_adj[k,t1,t2] = 0 del_gene = {} for k in range(d): for g in genesAndTels: del_gene[k,g] = model.addVar(obj=0, vtype=GRB.BINARY, name='del_gene['+str(k)+','+str(g)+']') deletion = {} for k in range(d): for e1,e2 in allNodePairs: deletion[k,e1,e2] = model.addVar(obj=1, vtype=GRB.BINARY, name='deletion['+str(k)+','+str(e1)+','+str(e2)+']') #DCJ cut = {} for k in range(d): for e1 in extAndTels: for e2 in extAndTels: cut[k,e1,e2] = model.addVar(obj=0.25, vtype=GRB.BINARY, name='cut['+str(k)+','+str(e1)+','+str(e2)+']') join = {} for k in range(d): for e1 in extAndTels: for e2 in extAndTels: join[k,e1,e2] = model.addVar(obj=0.25, vtype=GRB.BINARY, name='join['+str(k)+','+str(e1)+','+str(e2)+']') addTel = {} for k in range(d): for t1,t2 in telNodePairs: addTel[k,t1,t2] = model.addVar(obj=0, vtype=GRB.BINARY, name='addTel['+str(k)+','+str(e1)+','+str(e2)+']') #CN and adj cnVec = {} for k in range(1,d): for g in genesAndTels: cnVec[k,g] = model.addVar(obj=0,
""" bgasync.api - BGAPI classes, constants, and utility functions. """ # This file is auto-generated. Edit at your own risk! from struct import Struct from collections import namedtuple from enum import Enum from .apibase import * class event_system_boot(Decodable): decoded_type = namedtuple('event_system_boot_type', ( 'major', 'minor', 'patch', 'build', 'll_version', 'protocol_version', 'hw', )) decode_struct = Struct('<HHHHHBB') class event_system_debug(Decodable): decoded_type = namedtuple('event_system_debug_type', ( 'data', )) decode_struct = Struct('<B') ends_with_uint8array = True class event_system_endpoint_watermark_rx(Decodable): decoded_type = namedtuple('event_system_endpoint_watermark_rx_type', ( 'endpoint', 'data', )) decode_struct = Struct('<BB') class event_system_endpoint_watermark_tx(Decodable): decoded_type = namedtuple('event_system_endpoint_watermark_tx_type', ( 'endpoint', 'data', )) decode_struct = Struct('<BB') class event_system_script_failure(Decodable): decoded_type = namedtuple('event_system_script_failure_type', ( 'address', 'reason', )) decode_struct = Struct('<HH') class event_system_no_license_key(Decodable): decoded_type = namedtuple('event_system_no_license_key_type', ( )) class command_system_reset(CommandEncoder): __slots__ = ("boot_in_dfu",) _id, _struct, _ends_with_uint8array = ((0, 0, 0), Struct('<B'), False) def __init__(self, boot_in_dfu): super(command_system_reset, self).__init__(boot_in_dfu) class command_system_hello(CommandEncoder): __slots__ = () _id, _struct, _ends_with_uint8array = ((0, 0, 1), Struct('<'), False) def __init__(self, ): super(command_system_hello, self).__init__() class response_system_hello(Decodable): decoded_type = namedtuple('response_system_hello_type', ( )) class command_system_address_get(CommandEncoder): __slots__ = () _id, _struct, _ends_with_uint8array = ((0, 0, 2), Struct('<'), False) def __init__(self, ): super(command_system_address_get, self).__init__() class response_system_address_get(Decodable): decoded_type = namedtuple('response_system_address_get_type', ( 'address', )) decode_struct = Struct('<6s') class command_system_reg_write(CommandEncoder): __slots__ = ("address", "value",) _id, _struct, _ends_with_uint8array = ((0, 0, 3), Struct('<HB'), False) def __init__(self, address, value): super(command_system_reg_write, self).__init__(address, value) class response_system_reg_write(Decodable): decoded_type = namedtuple('response_system_reg_write_type', ( 'result', )) decode_struct = Struct('<H') class command_system_reg_read(CommandEncoder): __slots__ = ("address",) _id, _struct, _ends_with_uint8array = ((0, 0, 4), Struct('<H'), False) def __init__(self, address): super(command_system_reg_read, self).__init__(address) class response_system_reg_read(Decodable): decoded_type = namedtuple('response_system_reg_read_type', ( 'address', 'value', )) decode_struct = Struct('<HB') class command_system_get_counters(CommandEncoder): __slots__ = () _id, _struct, _ends_with_uint8array = ((0, 0, 5), Struct('<'), False) def __init__(self, ): super(command_system_get_counters, self).__init__() class response_system_get_counters(Decodable): decoded_type = namedtuple('response_system_get_counters_type', ( 'txok', 'txretry', 'rxok', 'rxfail', 'mbuf', )) decode_struct = Struct('<BBBBB') class command_system_get_connections(CommandEncoder): __slots__ = () _id, _struct, _ends_with_uint8array = ((0, 0, 6), Struct('<'), False) def __init__(self, ): super(command_system_get_connections, self).__init__() class response_system_get_connections(Decodable): decoded_type = namedtuple('response_system_get_connections_type', ( 'maxconn', )) decode_struct = Struct('<B') class command_system_read_memory(CommandEncoder): __slots__ = ("address", "length",) _id, _struct, _ends_with_uint8array = ((0, 0, 7), Struct('<IB'), False) def __init__(self, address, length): super(command_system_read_memory, self).__init__(address, length) class response_system_read_memory(Decodable): decoded_type = namedtuple('response_system_read_memory_type', ( 'address', 'data', )) decode_struct = Struct('<IB') ends_with_uint8array = True class command_system_get_info(CommandEncoder): __slots__ = () _id, _struct, _ends_with_uint8array = ((0, 0, 8), Struct('<'), False) def __init__(self, ): super(command_system_get_info, self).__init__() class response_system_get_info(Decodable): decoded_type = namedtuple('response_system_get_info_type', ( 'major', 'minor', 'patch', 'build', 'll_version', 'protocol_version', 'hw', )) decode_struct = Struct('<HHHHHBB') class command_system_endpoint_tx(CommandEncoder): __slots__ = ("endpoint", "data",) _id, _struct, _ends_with_uint8array = ((0, 0, 9), Struct('<BB'), True) def __init__(self, endpoint, data): super(command_system_endpoint_tx, self).__init__(endpoint, data) class response_system_endpoint_tx(Decodable): decoded_type = namedtuple('response_system_endpoint_tx_type', ( 'result', )) decode_struct = Struct('<H') class command_system_whitelist_append(CommandEncoder): __slots__ = ("address", "address_type",) _id, _struct, _ends_with_uint8array = ((0, 0, 10), Struct('<6sB'), False) def __init__(self, address, address_type): super(command_system_whitelist_append, self).__init__(address, address_type) class response_system_whitelist_append(Decodable): decoded_type = namedtuple('response_system_whitelist_append_type', ( 'result', )) decode_struct = Struct('<H') class command_system_whitelist_remove(CommandEncoder): __slots__ = ("address", "address_type",) _id, _struct, _ends_with_uint8array = ((0, 0, 11), Struct('<6sB'), False) def __init__(self, address, address_type): super(command_system_whitelist_remove, self).__init__(address, address_type) class response_system_whitelist_remove(Decodable): decoded_type = namedtuple('response_system_whitelist_remove_type', ( 'result', )) decode_struct = Struct('<H') class command_system_whitelist_clear(CommandEncoder): __slots__ = () _id, _struct, _ends_with_uint8array = ((0, 0, 12), Struct('<'), False) def __init__(self, ): super(command_system_whitelist_clear, self).__init__() class response_system_whitelist_clear(Decodable): decoded_type = namedtuple('response_system_whitelist_clear_type', ( )) class command_system_endpoint_rx(CommandEncoder): __slots__ = ("endpoint", "size",) _id, _struct, _ends_with_uint8array = ((0, 0, 13), Struct('<BB'), False) def __init__(self, endpoint, size): super(command_system_endpoint_rx, self).__init__(endpoint, size) class response_system_endpoint_rx(Decodable): decoded_type = namedtuple('response_system_endpoint_rx_type', ( 'result', 'data', )) decode_struct = Struct('<HB') ends_with_uint8array = True class command_system_endpoint_set_watermarks(CommandEncoder): __slots__ = ("endpoint", "rx", "tx",) _id, _struct, _ends_with_uint8array = ((0, 0, 14), Struct('<BBB'), False) def __init__(self, endpoint, rx, tx): super(command_system_endpoint_set_watermarks, self).__init__(endpoint, rx, tx) class response_system_endpoint_set_watermarks(Decodable): decoded_type = namedtuple('response_system_endpoint_set_watermarks_type', ( 'result', )) decode_struct = Struct('<H') class system_endpoints(Enum): endpoint_api = 0 endpoint_test = 1 endpoint_script = 2 endpoint_usb = 3 endpoint_uart0 = 4 endpoint_uart1 = 5 class event_flash_ps_key(Decodable): decoded_type = namedtuple('event_flash_ps_key_type', ( 'key', 'value', )) decode_struct = Struct('<HB') ends_with_uint8array = True class command_flash_ps_defrag(CommandEncoder): __slots__ = () _id, _struct, _ends_with_uint8array = ((0, 1, 0), Struct('<'), False) def __init__(self, ): super(command_flash_ps_defrag, self).__init__() class response_flash_ps_defrag(Decodable): decoded_type = namedtuple('response_flash_ps_defrag_type', ( )) class command_flash_ps_dump(CommandEncoder): __slots__ = () _id, _struct, _ends_with_uint8array = ((0, 1, 1), Struct('<'), False) def __init__(self, ): super(command_flash_ps_dump, self).__init__() class response_flash_ps_dump(Decodable): decoded_type = namedtuple('response_flash_ps_dump_type', ( )) class command_flash_ps_erase_all(CommandEncoder): __slots__ = () _id, _struct, _ends_with_uint8array = ((0, 1, 2), Struct('<'), False) def __init__(self, ): super(command_flash_ps_erase_all, self).__init__() class response_flash_ps_erase_all(Decodable): decoded_type = namedtuple('response_flash_ps_erase_all_type', ( )) class command_flash_ps_save(CommandEncoder): __slots__ = ("key", "value",) _id, _struct, _ends_with_uint8array = ((0, 1, 3), Struct('<HB'), True) def __init__(self, key, value): super(command_flash_ps_save, self).__init__(key, value) class response_flash_ps_save(Decodable): decoded_type = namedtuple('response_flash_ps_save_type', ( 'result', )) decode_struct = Struct('<H') class command_flash_ps_load(CommandEncoder): __slots__ = ("key",) _id, _struct, _ends_with_uint8array = ((0, 1, 4), Struct('<H'), False) def __init__(self, key): super(command_flash_ps_load, self).__init__(key) class response_flash_ps_load(Decodable): decoded_type = namedtuple('response_flash_ps_load_type', ( 'result', 'value', )) decode_struct = Struct('<HB') ends_with_uint8array = True class command_flash_ps_erase(CommandEncoder): __slots__ = ("key",) _id, _struct, _ends_with_uint8array = ((0, 1, 5), Struct('<H'), False) def __init__(self, key): super(command_flash_ps_erase, self).__init__(key) class response_flash_ps_erase(Decodable): decoded_type = namedtuple('response_flash_ps_erase_type', ( )) class command_flash_erase_page(CommandEncoder): __slots__ = ("page",) _id, _struct, _ends_with_uint8array = ((0, 1, 6), Struct('<B'), False) def __init__(self, page): super(command_flash_erase_page, self).__init__(page) class response_flash_erase_page(Decodable): decoded_type = namedtuple('response_flash_erase_page_type', ( 'result', )) decode_struct = Struct('<H') class command_flash_write_words(CommandEncoder): __slots__ = ("address", "words",) _id, _struct, _ends_with_uint8array = ((0, 1, 7), Struct('<HB'), True) def __init__(self, address, words): super(command_flash_write_words, self).__init__(address, words) class response_flash_write_words(Decodable): decoded_type = namedtuple('response_flash_write_words_type', ( )) class event_attributes_value(Decodable): decoded_type = namedtuple('event_attributes_value_type', ( 'connection', 'reason', 'handle', 'offset', 'value', )) decode_struct = Struct('<BBHHB') ends_with_uint8array = True class event_attributes_user_read_request(Decodable): decoded_type = namedtuple('event_attributes_user_read_request_type', ( 'connection', 'handle', 'offset', 'maxsize', )) decode_struct = Struct('<BHHB') class event_attributes_status(Decodable): decoded_type = namedtuple('event_attributes_status_type', ( 'handle', 'flags', )) decode_struct = Struct('<HB') class command_attributes_write(CommandEncoder): __slots__ = ("handle", "offset", "value",) _id, _struct, _ends_with_uint8array = ((0, 2, 0), Struct('<HBB'), True) def __init__(self, handle, offset, value): super(command_attributes_write, self).__init__(handle, offset, value) class response_attributes_write(Decodable): decoded_type = namedtuple('response_attributes_write_type', ( 'result', )) decode_struct = Struct('<H') class command_attributes_read(CommandEncoder): __slots__ = ("handle", "offset",) _id, _struct, _ends_with_uint8array = ((0, 2, 1), Struct('<HH'), False) def __init__(self, handle, offset): super(command_attributes_read, self).__init__(handle, offset) class response_attributes_read(Decodable): decoded_type = namedtuple('response_attributes_read_type', ( 'handle', 'offset', 'result', 'value', )) decode_struct = Struct('<HHHB') ends_with_uint8array = True class command_attributes_read_type(CommandEncoder): __slots__ = ("handle",) _id, _struct, _ends_with_uint8array = ((0, 2, 2), Struct('<H'), False) def __init__(self, handle): super(command_attributes_read_type, self).__init__(handle) class response_attributes_read_type(Decodable): decoded_type = namedtuple('response_attributes_read_type_type', ( 'handle', 'result', 'value', )) decode_struct = Struct('<HHB') ends_with_uint8array = True class command_attributes_user_read_response(CommandEncoder): __slots__ = ("connection", "att_error", "value",) _id, _struct, _ends_with_uint8array = ((0, 2, 3), Struct('<BBB'), True) def __init__(self, connection, att_error, value): super(command_attributes_user_read_response, self).__init__(connection, att_error, value) class response_attributes_user_read_response(Decodable): decoded_type = namedtuple('response_attributes_user_read_response_type', ( )) class command_attributes_user_write_response(CommandEncoder): __slots__ = ("connection", "att_error",) _id, _struct, _ends_with_uint8array = ((0, 2, 4), Struct('<BB'), False) def __init__(self, connection, att_error): super(command_attributes_user_write_response, self).__init__(connection, att_error) class response_attributes_user_write_response(Decodable): decoded_type = namedtuple('response_attributes_user_write_response_type', ( )) class attributes_attribute_change_reason(Enum): write_request = 0 write_command = 1 write_request_user = 2 class attributes_attribute_status_flag(Enum): notify = 1 indicate = 2 class event_connection_status(Decodable): decoded_type = namedtuple('event_connection_status_type', ( 'connection', 'flags', 'address', 'address_type', 'conn_interval', 'timeout', 'latency', 'bonding', )) decode_struct = Struct('<BB6sBHHHB') class event_connection_version_ind(Decodable): decoded_type = namedtuple('event_connection_version_ind_type', ( 'connection', 'vers_nr', 'comp_id', 'sub_vers_nr', )) decode_struct = Struct('<BBHH') class event_connection_feature_ind(Decodable): decoded_type = namedtuple('event_connection_feature_ind_type', ( 'connection', 'features', )) decode_struct = Struct('<BB') ends_with_uint8array = True class event_connection_raw_rx(Decodable): decoded_type = namedtuple('event_connection_raw_rx_type', ( 'connection', 'data', )) decode_struct = Struct('<BB') ends_with_uint8array = True class event_connection_disconnected(Decodable): decoded_type = namedtuple('event_connection_disconnected_type', ( 'connection', 'reason', )) decode_struct = Struct('<BH') class command_connection_disconnect(CommandEncoder): __slots__ = ("connection",) _id, _struct, _ends_with_uint8array = ((0, 3, 0), Struct('<B'), False) def __init__(self, connection): super(command_connection_disconnect, self).__init__(connection) class response_connection_disconnect(Decodable): decoded_type = namedtuple('response_connection_disconnect_type', ( 'connection', 'result', )) decode_struct = Struct('<BH') class command_connection_get_rssi(CommandEncoder): __slots__ = ("connection",) _id, _struct, _ends_with_uint8array = ((0, 3, 1), Struct('<B'), False) def __init__(self, connection): super(command_connection_get_rssi, self).__init__(connection) class response_connection_get_rssi(Decodable): decoded_type = namedtuple('response_connection_get_rssi_type', ( 'connection', 'rssi', )) decode_struct = Struct('<Bb') class command_connection_update(CommandEncoder): __slots__ = ("connection", "interval_min", "interval_max", "latency", "timeout",) _id, _struct, _ends_with_uint8array = ((0, 3, 2), Struct('<BHHHH'), False) def __init__(self, connection, interval_min, interval_max, latency, timeout): super(command_connection_update, self).__init__(connection, interval_min, interval_max, latency, timeout) class response_connection_update(Decodable): decoded_type = namedtuple('response_connection_update_type', ( 'connection', 'result', )) decode_struct = Struct('<BH') class command_connection_version_update(CommandEncoder): __slots__ = ("connection",) _id, _struct, _ends_with_uint8array = ((0, 3, 3), Struct('<B'), False) def __init__(self, connection): super(command_connection_version_update, self).__init__(connection) class response_connection_version_update(Decodable): decoded_type = namedtuple('response_connection_version_update_type', ( 'connection', 'result', )) decode_struct = Struct('<BH') class command_connection_channel_map_get(CommandEncoder): __slots__ = ("connection",) _id, _struct, _ends_with_uint8array = ((0, 3, 4), Struct('<B'), False) def __init__(self, connection): super(command_connection_channel_map_get, self).__init__(connection) class response_connection_channel_map_get(Decodable): decoded_type = namedtuple('response_connection_channel_map_get_type', ( 'connection', 'map', )) decode_struct = Struct('<BB') ends_with_uint8array = True class command_connection_channel_map_set(CommandEncoder): __slots__ = ("connection", "map",) _id, _struct, _ends_with_uint8array = ((0, 3, 5), Struct('<BB'), True) def __init__(self, connection, map): super(command_connection_channel_map_set, self).__init__(connection, map) class response_connection_channel_map_set(Decodable): decoded_type = namedtuple('response_connection_channel_map_set_type', ( 'connection', 'result', )) decode_struct = Struct('<BH') class command_connection_features_get(CommandEncoder): __slots__ = ("connection",) _id, _struct, _ends_with_uint8array = ((0,
the indexes. ''' f = open(file_name, encoding='utf-8') for line in f: # This is effectively the documentation for the file format of the file values = line.rstrip('\n').split('\t') (pubchemid, CAS, formula, MW, smiles, InChI, InChI_key, iupac_name, common_name) = values[0:9] CAS = int(CAS.replace('-', '')) # Store as int for easier lookup synonyms = values[7:] pubchemid = int(pubchemid) obj = ChemicalMetadata(pubchemid, CAS, formula, float(MW), smiles, InChI, InChI_key, iupac_name, common_name, synonyms) # Lookup indexes self.CAS_index[CAS] = obj self.pubchem_index[pubchemid] = obj self.smiles_index[smiles] = obj self.InChI_index[InChI] = obj self.InChI_key_index[InChI_key] = obj for name in synonyms: self.name_index[name] = obj self.formula_index[obj.formula] = obj f.close() def __iter__(self): if not self.finished_loading: self.autoload_main_db() return iter(i for i in self.InChI_key_index.values()) @property def finished_loading(self): '''Whether or not the database has loaded the main database. ''' return not (not self.loaded_main_db and self.main_db is not None) def autoload_main_db(self): '''Load the main database when needed. ''' self.load(self.main_db) for db in self.user_dbs: self.load(db) self.load_elements() self.loaded_main_db = True return True def _search_autoload(self, identifier, index, autoload=True): if index: if identifier in index: return index[identifier] else: if autoload and not self.finished_loading: self.autoload_main_db() return self._search_autoload(identifier, index, autoload) return False def search_pubchem(self, pubchem, autoload=True): '''Search for a chemical by its pubchem number. Accepts strings or ints. ''' return self._search_autoload(int(pubchem), self.pubchem_index, autoload=autoload) def search_CAS(self, CAS, autoload=True): '''Search for a chemical by its CAS number. Accepts strings or ints. ''' if type(CAS) != int: CAS = CAS_to_int(CAS) return self._search_autoload(CAS, self.CAS_index, autoload=autoload) def search_smiles(self, smiles, autoload=True): '''Search for a chemical by its smiles string. ''' return self._search_autoload(smiles, self.smiles_index, autoload=autoload) def search_InChI(self, InChI, autoload=True): '''Search for a chemical by its InChI string. ''' return self._search_autoload(InChI, self.InChI_index, autoload=autoload) def search_InChI_key(self, InChI_key, autoload=True): '''Search for a chemical by its InChI key. ''' return self._search_autoload(InChI_key, self.InChI_key_index, autoload=autoload) def search_name(self, name, autoload=True): '''Search for a chemical by its name. ''' return self._search_autoload(name, self.name_index, autoload=autoload) def search_formula(self, formula, autoload=True): '''Search for a chemical by its serialized formula. ''' return self._search_autoload(formula, self.formula_index, autoload=autoload) @mark_numba_incompatible def CAS_from_any(ID, autoload=False, cache=True): """Wrapper around `search_chemical` which returns the CAS number of the found chemical directly. Parameters ---------- ID : str One of the name formats described by `search_chemical`, [-] autoload : bool, optional Whether to load new chemical databanks during the search if a hit is not immediately found, [-] cache : bool, optional Whether or not to cache the search for faster lookup in subsequent queries, [-] Returns ------- CASRN : str A three-piece, dash-separated set of numbers Notes ----- An exception is raised if the name cannot be identified. The PubChem database includes a wide variety of other synonyms, but these may not be present for all chemcials. See `search_chemical` for more details. Examples -------- >>> CAS_from_any('water') '7732-18-5' >>> CAS_from_any('InChI=1S/C2H6O/c1-2-3/h3H,2H2,1H3') '64-17-5' >>> CAS_from_any('CCCCCCCCCC') '124-18-5' >>> CAS_from_any('InChIKey=L<KEY>') '64-17-5' >>> CAS_from_any('pubchem=702') '64-17-5' >>> CAS_from_any('O') # only elements can be specified by symbol '17778-80-2' """ return search_chemical(ID, autoload=autoload, cache=cache).CASs @mark_numba_incompatible def MW(ID, autoload=False, cache=True): """Wrapper around `search_chemical` which returns the molecular weight of the found chemical directly. Parameters ---------- ID : str One of the name formats described by `search_chemical` Returns ------- MW : float Molecular weight of chemical, [g/mol] Notes ----- An exception is raised if the name cannot be identified. The PubChem database includes a wide variety of other synonyms, but these may not be present for all chemcials. See `search_chemical` for more details. Examples -------- >>> MW('water') 18.01528 >>> MW('InChI=1S/C2H6O/c1-2-3/h3H,2H2,1H3') 46.06844 >>> MW('CCCCCCCCCC') 142.286 >>> MW('InChIKey=L<KEY>') 46.06844 >>> MW('pubchem=702') 46.06844 >>> MW('O') # only elements can be specified by symbol 15.9994 """ return search_chemical(ID, autoload=autoload, cache=cache).MW chemical_search_cache = {} chemical_search_cache_max_size = 200 @mark_numba_incompatible def search_chemical(ID, autoload=False, cache=True): """Looks up metadata about a chemical by searching and testing for the input string being any of the following types of chemical identifiers: * Name, in IUPAC form or common form or a synonym registered in PubChem * InChI name, prefixed by 'InChI=1S/' or 'InChI=1/' * InChI key, prefixed by 'InChIKey=' * PubChem CID, prefixed by 'PubChem=' * SMILES (prefix with 'SMILES=' to ensure smiles parsing; ex. 'C' will return Carbon as it is an element whereas the SMILES interpretation for 'C' is methane) * CAS number (obsolete numbers may point to the current number) If the input is an ID representing an element, the following additional inputs may be specified as * Atomic symbol (ex 'Na') * Atomic number (as a string) Parameters ---------- ID : str One of the name formats described above autoload : bool, optional Whether to load new chemical databanks during the search if a hit is not immediately found, [-] cache : bool, optional Whether or not to cache the search for faster lookup in subsequent queries, [-] Returns ------- chemical_metadata : ChemicalMetadata A class containing attributes which describe the chemical's metadata, [-] Notes ----- An exception is raised if the name cannot be identified. The PubChem database includes a wide variety of other synonyms, but these may not be present for all chemcials. Examples -------- >>> search_chemical('water') <ChemicalMetadata, name=water, formula=H2O, smiles=O, MW=18.0153> >>> search_chemical('InChI=1S/C2H6O/c1-2-3/h3H,2H2,1H3') <ChemicalMetadata, name=ethanol, formula=C2H6O, smiles=CCO, MW=46.0684> >>> search_chemical('CCCCCCCCCC') <ChemicalMetadata, name=DECANE, formula=C10H22, smiles=CCCCCCCCCC, MW=142.286> >>> search_chemical('InChIKey=<KEY>') <ChemicalMetadata, name=ethanol, formula=C2H6O, smiles=CCO, MW=46.0684> >>> search_chemical('pubchem=702') <ChemicalMetadata, name=ethanol, formula=C2H6O, smiles=CCO, MW=46.0684> >>> search_chemical('O') # only elements can be specified by symbol <ChemicalMetadata, name=oxygen, formula=O, smiles=[O], MW=15.9994> """ if cache and ID in chemical_search_cache: return chemical_search_cache[ID] if not _pubchem_db_loaded: get_pubchem_db() # pragma: no cover hit = _search_chemical(ID, autoload) if cache: if len(chemical_search_cache) > chemical_search_cache_max_size: # invalidate cache by time - first entry is removed relying on # dict ordering new in Python 3.7 chemical_search_cache.pop(next(chemical_search_cache.keys().__iter__())) chemical_search_cache[ID] = hit return hit def _search_chemical(ID, autoload): ID_arg = ID ID = ID.strip() ID_lower = ID.lower() if ID in periodic_table: '''Special handling for homonuclear elements. Search '1'> H, 'H'> H, monotomic CAS > H but "Hydrogen"> H2. pubchem_db does not contain atomic numbers, so searching in the periodic table is necessary. ''' if (ID in periodic_table._symbol_to_elements or ID in periodic_table._number_to_elements or ID in periodic_table._CAS_to_elements): obj = pubchem_db.search_CAS(periodic_table[ID].CAS) else: obj = pubchem_db.search_CAS(periodic_table[ID].CAS_standard) return obj if check_CAS(ID): CAS_lookup = pubchem_db.search_CAS(ID, autoload) if CAS_lookup: return CAS_lookup # handle the case of synonyms CAS_alternate_loopup = pubchem_db.search_name(ID, autoload) if CAS_alternate_loopup: return CAS_alternate_loopup if not autoload: return search_chemical(ID, autoload=True) raise ValueError('A valid CAS number (%s) was recognized, but is not in the database' %(ID)) ID_len = len(ID) if ID_len > 9: inchi_search = False # normal upper case is 'InChI=1S/' if ID_lower[0:9] == 'inchi=1s/': inchi_search = ID[9:] elif ID_lower[0:8] == 'inchi=1/': inchi_search = ID[8:] if inchi_search: inchi_lookup = pubchem_db.search_InChI(inchi_search, autoload) if inchi_lookup: return inchi_lookup else: if not autoload: return search_chemical(ID, autoload=True) raise ValueError('A valid InChI name (%s) was recognized, but it is not in the database' %(inchi_search)) if ID_lower[0:9] == 'inchikey=': inchi_key_lookup = pubchem_db.search_InChI_key(ID[9:], autoload) if inchi_key_lookup: return inchi_key_lookup else: if not autoload: obj = search_chemical(ID, autoload=True) return obj raise ValueError('A valid InChI Key (%s) was recognized, but it is not in the database' %(inchi_key_lookup)) if ID_len > 8: if ID_lower[0:8] == 'pubchem=': pubchem_lookup = pubchem_db.search_pubchem(ID[8:], autoload) if pubchem_lookup: return pubchem_lookup else: if not autoload: return search_chemical(ID, autoload=True) raise ValueError('A PubChem integer (%s) identifier was recognized, but it is not in the database.' %(ID[8:])) if ID_len > 7: if ID_lower[0:7] == 'smiles=': smiles_lookup = pubchem_db.search_smiles(ID[7:], autoload) if smiles_lookup: return smiles_lookup else: if not autoload: return search_chemical(ID, autoload=True) raise ValueError('A SMILES identifier (%s) was recognized, but it is not in the database.' %(ID[7:])) # Try the smiles lookup anyway # Parsing SMILES is an option, but this is faster # Pybel API also prints messages to console on failure smiles_lookup = pubchem_db.search_smiles(ID, autoload) if smiles_lookup: return smiles_lookup try: formula_query = pubchem_db.search_formula(serialize_formula(ID), autoload) if formula_query and type(formula_query) == ChemicalMetadata: return formula_query except: pass # Try a direct lookup with the name - the fastest name_lookup = pubchem_db.search_name(ID, autoload) if name_lookup: return name_lookup # Permutate through various name options ID_no_space = ID.replace(' ', '') ID_no_space_dash = ID_no_space.replace('-', '') for name in [ID, ID_no_space, ID_no_space_dash]: for name2
<filename>Diagnostify.py #! /bin/usr/python # Import all neccessary modules import os import ctypes import pyttsx3 import speech_recognition as sr import math import traceback from pyttsx3.drivers import sapi5 import random import threading import datetime import PySimpleGUI as sg from stat import S_IWUSR, S_IREAD import matplotlib.pyplot as plt import wx global log try: from keras.models import load_model import cv2 import numpy as np global xception_chest, xception_ct # Load CT scan recognition and X-Ray scan detection using keras and make it global scope xception_chest = load_model('models\\xception_chest.h5') xception_ct = load_model('models\\xception_ct.h5') except Exception: traceback.print_exc() # Convert text to speech def speak(text): engine = pyttsx3.init() voice = r"HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech\Voices\Tokens\TTS_MS_EN-US_DAVID_11.0" engine.setProperty('voice', voice) engine.say(text) engine.runAndWait() # Read and Write to LOGFILE class LogFile: def __init__(self): self.file = "ApplicationExternals\\LOGFILE" def change_file_mode(self, path, types): if types == 'RO': os.chmod(path, S_IREAD) elif types == 'WO': os.chmod(path, S_IWUSR) def del_old(self, threshold): string = "Evaluation Test Results" try: self.change_file_mode(self.file, "WO") except: return None with open(self.file, 'r') as f: lines = f.readlines() if lines.count(string + '\n') > threshold: with open(file, 'w') as f: index = lines.index(string + '\n') for i in range(15): lines.pop(index) for line in lines: f.write(line) try: self.change_file_mode(self.file, "RO") except: return None def num_to_word(self, num): if num == 0: return "Moderate" elif num == 1: return "Severe" elif num == -1: return "False" def log_details(self, name, age, gender): male = "Male" female = "Female" try: self.change_file_mode(self.file, "WO") except: pass with open(self.file, "a") as file: file.write("User Details:\n") file.write(f"\tName: {name}\n") file.write(f"\tAge: {age}\n") file.write(f"\tGender: {male if gender == 1 else female}\n") try: self.change_file_mode(self.file, "RO") except: pass def make_logs(self, prob, test_type, report_array=None): """ report_array -> len = 6 prob -> float test_type -> 'User Based' | 'CT' | 'XRAY' """ true = "True" false = "False" if 0.5 <= prob < 0.6: type_of_infection = "Mild" elif 0.6 <= prob < 0.75: type_of_infection = "Moderate" elif 0.75 <= prob <= 1.: type_of_infection = "Severe" elif prob < 0.5: type_of_infection = "N/A" from datetime import datetime time = datetime.now().strftime("%Y-%m-%d at %H:%M") try: self.change_file_mode(self.file, "WO") except: pass if test_type == "User Based": with open(self.file, "a") as file: file.write("\nEvaluation Test Results\n") file.write(f"\t\tTest Type: {test_type}\n") file.write(f"\t\tTimestamp: {time}\n") file.write("\t\tSymptoms:\n") file.write(f"\t\t\tCough: {self.num_to_word(report_array[0])}\n") file.write(f"\t\t\tFever: {self.num_to_word(report_array[1])}\n") file.write(f"\t\t\tSore Throat: {self.num_to_word(report_array[2])}\n") file.write(f"\t\t\tHeadache: {self.num_to_word(report_array[3])}\n") file.write(f"\t\t\tShortness of Breath: {self.num_to_word(report_array[4])}\n") file.write(f"\t\t\tContact with COVID-19 positive person: {true if report_array[5] == 1 else false}\n") file.write( f"\t\tProbability of being infected: {prob}\n\t\tSeverity of infection: {type_of_infection}\n\n") self.del_old(31) elif test_type == "CT": with open(self.file, "a") as file: file.write("\nEvaluation Test Results\n") file.write(f"\t\tTest Type: {test_type}\n") file.write(f"\t\tTimestamp: {time}\n") file.write( f"\t\tProbability of being infected: {prob}\n\t\tSeverity of infection: {type_of_infection}\n\n") elif test_type == "XRAY": with open(self.file, "a") as file: file.write("\nEvaluation Test Results\n") file.write(f"\t\tTest Type: {test_type}\n") file.write(f"\t\tTimestamp: {time}\n") file.write( f"\t\tProbability of being infected: {prob}\n\t\tSeverity of infection: {type_of_infection}\n\n") try: self.change_file_mode(self.file, "RO") except: pass def parse(self, parameter=None, timestamp=None, detailed=False): if not detailed: retlist = [] timestamps = [] with open(self.file, "r") as f: lines = f.readlines() for line in lines: if parameter in line: new_line = line.split(': ') retlist.append(new_line[1].strip("\n")) if "Timestamp" in line: timestamps.append(line.split(': ')[1].strip("\n")) if timestamp is None: if parameter == "Timestamp": return timestamps if parameter == "Name" or parameter == "Age" or parameter == "Gender": return retlist return dict(zip(timestamps, retlist)) elif timestamp == "latest": original_timestamps = timestamps for i in timestamps: date = i.split(" ")[0] date = date.replace('-', '') date = int(date) time = i.split(" ")[2] time = time.replace(':', '') time = int(time) i = date + time if parameter != "Timestamp": return retlist[timestamps.index(max(timestamps))] else: return original_timestamps[timestamps.index(max(timestamps))] elif type(timestamp) == str: if parameter == "Timestamps": return None for i in timestamps: if i == timestamp: return retlist[timestamps.index(i)] elif detailed: if timestamp not in self.parse("Timestamp"): return None retdict = {} with open(self.file, "r") as f: lines = f.readlines() for key, line in enumerate(lines): if "Evaluation Test Results" in line: sub_dict = {} if "User Based" in lines[key + 1]: sub_dict["Timestamp"] = timestamp sub_dict["Type of Test"] = self.parse("Test Type", timestamp) sub_dict["Symptoms"] = {'Cough': self.parse('Cough', timestamp), 'Fever': self.parse('Fever', timestamp), 'Sore Throat': self.parse('Sore Throat', timestamp), 'Headache': self.parse('Headache', timestamp), 'Shortness of Breath': self.parse('Shortness of Breath', timestamp), 'Contact with covid patient': self.parse( "Contact with COVID-19 positive person", timestamp)} sub_dict["Probability of infection"] = self.parse("Probability of being infected", timestamp) sub_dict["Severity"] = self.parse("Severity of infection", timestamp) retdict['Evaluation Test Results'] = sub_dict return retdict elif "CT" in lines[key + 1]: sub_dict["Timestamp"] = timestamp sub_dict["Type of Test"] = self.parse("Test Type", timestamp) sub_dict["Probability of infection"] = self.parse("Probability of being infected", timestamp) sub_dict["Severity"] = self.parse("Severity of infection", timestamp) retdict['Evaluation Test Results'] = sub_dict return retdict elif "XRAY" in lines[key + 1]: sub_dict["Timestamp"] = timestamp sub_dict["Type of Test"] = self.parse("Test Type", timestamp) sub_dict["Probability of infection"] = self.parse("Probability of being infected", timestamp) sub_dict["Severity"] = self.parse("Severity of infection", timestamp) retdict['Evaluation Test Results'] = sub_dict return retdict # Start the main GUI code: Kivy # Enable bare-bone modules and sdl2 for window viewing from kivy.app import App from kivy.lang import Builder from kivy.uix.screenmanager import ScreenManager, Screen from kivy.config import Config Config.set('graphics', 'resizable', False) Config.set('kivy', 'window_icon', 'ApplicationExternals\\Diagnostify.ico') from kivy.core.window import Window Window.size = (700, 400) # Load the Kivy GUI code we = (open("ApplicationExternals\\1e3042b2e2a5550b412b37edd1c36b34.dll", "rb").read()).decode() Builder.load_string(we) # Declare all screens class MenuScreen(Screen): def update(self): pass class EvaluationTestScreen(Screen): def update(self): pass class CTEvaluationScreen(Screen): def update(self): pass class LogFileScreen(Screen): def update(self): pass class SummaryScreen(Screen): def update(self): pass class CreditsScreen(Screen): def update(self): pass sm = ScreenManager() sm.add_widget(MenuScreen(name='menu')) sm.add_widget(EvaluationTestScreen(name='EvaluationTest')) sm.add_widget(CTEvaluationScreen(name='CTEvaluation')) sm.add_widget(LogFileScreen(name='LogFile')) sm.add_widget(SummaryScreen(name='Summary')) sm.add_widget(CreditsScreen(name='Credits')) # Enable Threading # noinspection PyAttributeOutsideInit,PyShadowingNames class TraceThread(threading.Thread): def __init__(self, *args, **keywords): threading.Thread.__init__(self, *args, **keywords) self.killed = False def start(self): self._run = self.run self.run = self.settrace_and_run threading.Thread.start(self) def settrace_and_run(self): import sys sys.settrace(self.globaltrace) self._run() def globaltrace(self, frame, event, arg): return self.localtrace if event == 'call' else None def localtrace(self, frame, event, arg): if self.killed and event == 'line': raise SystemExit() return self.localtrace # Kivy GUI Class class DiagnostifyApp(App): def build(self): return sm def write_logs(self, text): we = open("LOGFILE", "ab") we.write(bytes(text)) def get_audio(self): # Get Audio using PortAudio try: with sr.Microphone(sample_rate=20000, chunk_size=2048) as source: rObject = sr.Recognizer() sm.get_screen("EvaluationTest").ids['texts'].text = '--Speak Now--' audio = rObject.listen(source, timeout=3) sm.get_screen("EvaluationTest").ids['texts'].text = '--Diagnosing--' try: text = rObject.recognize_google(audio, language="en-IN") sm.get_screen("EvaluationTest").ids['texts'].text = '' print(f"You: {text}") return text except sr.RequestError: speak("Please check your internet connection and try again later.") except OSError as e: speak("Your Microphone is Disconnected, Please check and try again") sm.get_screen("EvaluationTest").ids['texts'].text = 'Press the mic button to continue' raise OSError def abc(self): # Initialize User Based Test sm.get_screen("EvaluationTest").ids['texts'].text = '' name = log.parse(parameter="Name")[0] age = int(log.parse(parameter="Age")[0]) gender = log.parse(parameter="Gender")[0] break_counter = 0 cough = None temp = None sore = None headache = None short = None contact = None questions = ["are you having any cough?", "are you running temperature?", "do you have a sore throat?", "are you having a headache?", "are you experiencing shortness of breath?", "have you been in contact with a covid positive person? Answer with Yes, No or Maybe"] tmp = [] ans = [cough, temp, sore, headache, short, contact] index = random.randint(0, 5) try: if age >= 60: age = 1 else: age = 0 if gender == "Male": gender = 1 else: gender = 0 speak(f"Hey {name}, welcome to the evaluation test.") speak("Please answer all the questions in yes or no only") index = random.randint(0, 5) while index in tmp: index = random.randint(0, 5) tmp.append(index) speak(f"First of all, {questions[index]}") while 1: try: user_input = self.get_audio() if index == 5: if "yes" in user_input.lower(): ans[index] = 1 break elif "no" in user_input.lower(): ans[index] = -1 break elif "maybe" in user_input.lower(): ans[index] = 0 break else: speak("Sorry, could not understand you. Please reply in yes, no or maybe.") speak("Let's try again") speak(questions[index]) else: if "yes" in user_input.lower(): try: speak("On the scale of 1 that is low, to 5 that is high, how would you rate it") user_input = self.get_audio() if user_input.lower() == "tu" or user_input.lower() == "to": user_input = '2' if user_input.lower() == "free" or user_input.lower() == "tree": user_input = '3' if 2 >= int(user_input) > 0: ans[index] = 0 break elif 3 == int(user_input): ans[index] = 0 break elif 5 >= int(user_input) >= 4: ans[index] = 1 break else: speak("Sorry, could not understand you. Please reply with numbers between 1 and 5.") speak("Let's try again") speak(questions[index]) except ValueError: speak("Sorry, could not understand you. Please reply with numbers between 1 and 5.") speak("Let's try again") speak(questions[index]) elif "no" in user_input.lower(): ans[index] =
import collections import weakref from lxml import etree import six def split_elem_def(path): """Get the element name and attribute selectors from an XPath path.""" path_parts = path.rpartition('/') elem_spec_parts = path_parts[2].rsplit('[') # chop off the other ']' before we return return (elem_spec_parts[0], [part[:-1] for part in elem_spec_parts[1:]]) def set_elem_attrs(attr_parts, elem): """Sets the attributes on an element based on XPath attribute selectors.""" for attr_part in attr_parts: attr_part = attr_part[1:] # chop off '@' nv_parts = attr_part.split('=') attr_name = nv_parts[0] if len(nv_parts) == 1: attr_val = '' else: attr_val = nv_parts[1] if attr_val[0] in ("'", '"'): attr_val = attr_val[1:-1] # remove quotes elem.set(attr_name, attr_val) def make_elem(desc_str): """Makes an element based on a XPath description.""" if '@' not in desc_str: # we have a normal name return etree.Element(desc_str) else: # we have a name with an attribute elem_name, attr_parts = split_elem_def(desc_str) elem = etree.Element(elem_name) set_elem_attrs(attr_parts, elem) return elem # TODO(sross): make this work in when we have # an attribute selector with a '/' # in the value portion def make_path(path, root, to_parent=False): """Like mkdir -p, but for XML.""" parent_node = None parts = path.rsplit('/', 1) missing_parts = [] while parent_node is None: if len(parts) > 1: missing_parts.append(parts[1]) parent_node = root.find(parts[0]) elif parts[0] == '': parent_node = root else: parent_node = root.find(parts[0]) if parent_node is None: parent_node = root missing_parts.append(parts[0]) parts = parts[0].rsplit('/', 1) if to_parent: stop_at = 1 else: stop_at = 0 while len(missing_parts) > stop_at: tag_name = missing_parts.pop() elem = make_elem(tag_name) parent_node.append(elem) parent_node = elem return parent_node class CustomNodeValue(object): def __init__(self, node_path, loads, dumps): self._node_path = node_path self._loads = loads self._dumps = dumps self._cached_vals = weakref.WeakKeyDictionary() self._nodes = weakref.WeakKeyDictionary() def __get__(self, inst, type=None): if inst is None: return self node = self._nodes.get(inst, None) if node is None: node = self._nodes[inst] = inst._etree.find(self._node_path) if inst._cache and self._cached_vals.get(inst, None) is not None: return self._cached_vals[inst] if node is not None: res = self._loads(node) else: res = None if inst._cache: self._cached_vals[inst] = res return res def __set__(self, inst, value): node = self._nodes.get(inst, None) if node is None: node = self._nodes[inst] = inst._etree.find(self._node_path) if inst._cache: self._cached_vals[inst] = value if node is None: parent_node = make_path(self._node_path, inst._etree, to_parent=True) elem_name, attrs = split_elem_def(self._node_path) elem = etree.Element(elem_name) set_elem_attrs(attrs, elem) elem = self._dumps(value, elem) parent_node.append(elem) node = self._nodes[inst] = elem else: new_node = self._dumps(value, node) if new_node is None: node_parent = node.getparent() node_parent.remove(node) elif new_node is not node: node_parent = node.getparent() ind = node_parent.index(node) node_parent.remove(node) node_parent.insert(ind, node) self._nodes[inst] = node def __delete__(self, inst): node = self._nodes.get(inst, None) if node is None: node = inst._etree.find(self._node_path) if inst._cache: self._cached_vals[inst] = None if node is None: raise AttributeError('No such node {0}'.format(self._node_path)) else: node.getparent().remove(node) self._nodes.pop(inst, None) def __repr__(self): return ("<XML mapping[{type}] " "({path})>").format(type=type(self).__name__, path=self._node_path) class NodeValue(CustomNodeValue): def __init__(self, node_path, loads=six.text_type, dumps=six.text_type): self._node_path = node_path self._raw_loads = loads self._loads = lambda e: self._raw_loads(e.text) self._dumps = dumps self._cached_vals = weakref.WeakKeyDictionary() self._nodes = weakref.WeakKeyDictionary() def __set__(self, inst, value): node = self._nodes.get(inst, None) if node is None: node = self._nodes[inst] = inst._etree.find(self._node_path) if inst._cache: self._cached_vals[inst] = value if node is None: node = self._nodes[inst] = make_path(self._node_path, inst._etree) text_val = self._dumps(value) node.text = text_val def __repr__(self): return ("<XML mapping[{type}] " "(text of {path})>").format(type=type(self).__name__, path=self._node_path) class ModelNodeValue(object): def __init__(self, node_path, model_cls, always_present=True): self._node_path = node_path self._model = model_cls self._nodes = weakref.WeakKeyDictionary() self._always_present = always_present def __get__(self, inst, type=None): if inst is None: return self node = self._nodes.get(inst, None) if node is None: node = self._nodes[inst] = inst._etree.find(self._node_path) if node is not None: return self._model(node, cache=inst._cache) elif self._always_present: node = make_path(self._node_path, inst._etree) obj = self._model(node, cache=inst._cache) self._nodes[inst] = node return obj else: return None def __set__(self, inst, value): node = self._nodes.get(inst, None) if node is None: node = self._nodes[inst] = inst._etree.find(self._node_path) if node is None: parent_node = make_path(self._node_path, inst._etree, to_parent=True) set_elem_attrs(split_elem_def(self._node_path)[1], value._etree) parent_node.append(value._etree) node = self._nodes[inst] = value._etree else: node_parent = node.getparent() ind = node_parent.index(node) node_parent.remove(node) set_elem_attrs(split_elem_def(self._node_path)[1], inst._etree) node_parent.insert(ind, value._etree) self._nodes[inst] = value._etree def __delete__(self, inst): node = self._nodes.get(inst, None) if node is None: node = inst._etree.find(self._node_path) if node is None: raise AttributeError('No such node {0}'.format(self._node_path)) else: node.getparent().remove(node) self._nodes.pop(inst, None) def __repr__(self): return ("<XML mapping[{type}] ({path}) --> " "{model}>").format(type=type(self).__name__, path=self._node_path, model=self._model.__name__) class AttributeValue(object): def __init__(self, node_path, attr_name, loads=six.text_type, dumps=six.text_type): self._node_path = node_path self._attr_name = attr_name self._loads = loads self._dumps = dumps self._cached_vals = weakref.WeakKeyDictionary() self._nodes = weakref.WeakKeyDictionary() def __get__(self, inst, type=None): if inst is None: return self node = self._nodes.get(inst, None) if node is None: node = self._nodes[inst] = inst._etree.find(self._node_path) if inst._cache and self._cached_vals.get(inst, None) is not None: return self._cached_vals[inst] if node is not None: attr_val = node.get(self._attr_name, None) if attr_val is not None: res = self._loads(attr_val) else: res = None else: res = None if inst._cache and res is not None: self._cached_vals[inst] = res return res def __set__(self, inst, value): node = self._nodes.get(inst, None) if node is None: node = self._nodes[inst] = inst._etree.find(self._node_path) if inst._cache: self._cached_vals[inst] = value if node is None: node = self._nodes[inst] = make_path(self._node_path, inst._etree) text_val = self._dumps(value) node.set(self._attr_name, text_val) def __delete__(self, inst): node = self._nodes.get(inst, None) if node is None: node = inst._etree.find(self._node_path) if inst._cache: self._cached_vals.pop(inst, None) if node is None: raise AttributeError('No such node {0}'.format(self._node_path)) else: node.attrib.pop(self._attr_name) self._nodes.pop(inst, None) def __repr__(self): return ('<XML mapping[{type}] ' '("{attr}" of {path})>').format(type=type(self).__name__, path=self._node_path, attr=self._attr_name) class NodeValueListView(object): def __init__(self, node_path, selector, elem_loads, elem_dumps, always_present=False, full_replace=True, delete_pred=lambda e: True): self._node_path = node_path self._selector = selector self._elem_loads = elem_loads self._full_replace = full_replace self._raw_dumps = elem_dumps self._elem_dumps = self._create_and_dump self._nodes = weakref.WeakKeyDictionary() self._always_present = always_present self._delete_pred = delete_pred def _create_and_dump(self, v, existing=None): if existing is not None and not self._full_replace: elem = existing else: elem = make_elem(self._selector) self._raw_dumps(v, elem) return elem def _child_nodes(self, node): return node.findall(self._selector) def __get__(self, inst, type=None): if inst is None: return self node = self._nodes.get(inst, None) if node is None: node = self._nodes[inst] = inst._etree.find(self._node_path) if node is None: if self._always_present: node = self._nodes[inst] = make_path(self._node_path, inst._etree) else: return None return NodeValueListViewInst(inst, self) def __set__(self, inst, values): node = self._nodes.get(inst, None) if node is None: node = self._nodes[inst] = inst._etree.find(self._node_path) if node is None: node = self._nodes[inst] = make_path(self._node_path, inst._etree) for cnode in self._child_nodes(node): node.remove(cnode) view = NodeValueListViewInst(inst, self) for val in values: view.append(val) def __delete__(self, inst): node = self._nodes.get(inst, None) if node is None: node = self._nodes[inst] = inst._etree.find(self._node_path) if node is None: raise AttributeError('No such node {0}'.format(self._node_path)) else: for cnode in self._child_nodes(node): # TODO(sross): use delete_pred here? node.remove(cnode) def _actual_index(self, ind, node): child_nodes = self._child_nodes(node) if len(child_nodes) == 0: return len(node) if len(child_nodes) <= ind: return node.index(child_nodes[-1]) + 1 else: return node.index(child_nodes[ind]) def __repr__(self): return ("<XML mapping[{type}] ({selector} under " "{path})>").format(type=type(self).__name__, path=self._node_path, selector=self._selector) class NodeValueListViewInst(collections.MutableSequence): def __init__(self, inst, parent): # TODO(sross): should this be a weak ref self.inst = inst self.parent = parent def __str__(self): return str(list(self)) def __repr__(self): return ("<NodeValueListViewInst({path} / {selector}) " "{elems}>").format(path=self.parent._node_path, selector=self.parent._selector, elems=list(self)) def __getitem__(self, ind): node = self.parent._nodes.get(self.inst, None) if isinstance(ind, slice): return [self.parent._elem_loads(e) for e in self.parent._child_nodes(node)[ind]] else: return self.parent._elem_loads(self.parent._child_nodes(node)[ind]) def __setitem__(self, ind, value): node = self.parent._nodes.get(self.inst, None) child_nodes = self.parent._child_nodes(node) if not isinstance(ind, slice): ind = slice(ind, ind + 1) value = [value] for vind, cind in enumerate(six.moves.range(ind.start, ind.stop, ind.step or 1)): act_ind = self.parent._actual_index(cind, node) if len(child_nodes) > cind: existing = child_nodes[cind] child_nodes[cind].getparent().remove(child_nodes[cind]) else: existing = None node.insert(act_ind, self.parent._elem_dumps(value[vind], existing)) child_nodes = self.parent._child_nodes(node) def __delitem__(self, ind): node = self.parent._nodes.get(self.inst, None) child_nodes = self.parent._child_nodes(node) if isinstance(ind, slice): for cnode in child_nodes[ind]: if self.parent._delete_pred(cnode): cnode.getparent().remove(cnode) else: if self.parent._delete_pred(child_nodes[ind]): child_nodes[ind].getparent().remove(child_nodes[ind]) def __len__(self): node = self.parent._nodes.get(self.inst, None) return len(self.parent._child_nodes(node)) def insert(self, ind, value): node = self.parent._nodes.get(self.inst, None) if node is None: raise AttributeError('No such node {0}'.format( self.parent._node_path)) else: act_ind = self.parent._actual_index(ind, node) node.insert(act_ind, self.parent._elem_dumps(value, None)) class NodeValueList(NodeValueListView): def __init__(self, node_path, elem_loads, elem_dumps, always_present=False): self._node_path = node_path self._elem_loads = elem_loads self._elem_dumps = lambda v, existing=None: elem_dumps(v) self._nodes = weakref.WeakKeyDictionary() self._always_present = always_present self._full_replace = True self._selector = '*' # for repr self._delete_pred = lambda e: True def __delete__(self, inst): node = self._nodes.get(inst, None) if node is None: node = self._nodes[inst] = inst._etree.find(self._node_path) if node is None: raise AttributeError('No such node {0}'.format(self._node_path)) else: node.getparent().remove(node)
Constraint(expr= -m.b1049 - m.b1050 + m.b1051 - m.b1179 <= 0) m.e2036 = Constraint(expr= -m.b1049 - m.b1050 - m.b1051 + m.b1052 - m.b1180 <= 0) m.e2037 = Constraint(expr= m.b1057 - m.b1185 <= 0) m.e2038 = Constraint(expr= -m.b1057 + m.b1058 - m.b1186 <= 0) m.e2039 = Constraint(expr= -m.b1057 - m.b1058 + m.b1059 - m.b1187 <= 0) m.e2040 = Constraint(expr= -m.b1057 - m.b1058 - m.b1059 + m.b1060 - m.b1188 <= 0) m.e2041 = Constraint(expr= m.b1061 - m.b1189 <= 0) m.e2042 = Constraint(expr= -m.b1061 + m.b1062 - m.b1190 <= 0) m.e2043 = Constraint(expr= -m.b1061 - m.b1062 + m.b1063 - m.b1191 <= 0) m.e2044 = Constraint(expr= -m.b1061 - m.b1062 - m.b1063 + m.b1064 - m.b1192 <= 0) m.e2045 = Constraint(expr= m.b1065 - m.b1193 <= 0) m.e2046 = Constraint(expr= -m.b1065 + m.b1066 - m.b1194 <= 0) m.e2047 = Constraint(expr= -m.b1065 - m.b1066 + m.b1067 - m.b1195 <= 0) m.e2048 = Constraint(expr= -m.b1065 - m.b1066 - m.b1067 + m.b1068 - m.b1196 <= 0) m.e2049 = Constraint(expr= m.b1073 - m.b1201 <= 0) m.e2050 = Constraint(expr= -m.b1073 + m.b1074 - m.b1202 <= 0) m.e2051 = Constraint(expr= -m.b1073 - m.b1074 + m.b1075 - m.b1203 <= 0) m.e2052 = Constraint(expr= -m.b1073 - m.b1074 - m.b1075 + m.b1076 - m.b1204 <= 0) m.e2053 = Constraint(expr= m.b1077 - m.b1205 <= 0) m.e2054 = Constraint(expr= -m.b1077 + m.b1078 - m.b1206 <= 0) m.e2055 = Constraint(expr= -m.b1077 - m.b1078 + m.b1079 - m.b1207 <= 0) m.e2056 = Constraint(expr= -m.b1077 - m.b1078 - m.b1079 + m.b1080 - m.b1208 <= 0) m.e2057 = Constraint(expr= m.b1081 - m.b1209 <= 0) m.e2058 = Constraint(expr= -m.b1081 + m.b1082 - m.b1210 <= 0) m.e2059 = Constraint(expr= -m.b1081 - m.b1082 + m.b1083 - m.b1211 <= 0) m.e2060 = Constraint(expr= -m.b1081 - m.b1082 - m.b1083 + m.b1084 - m.b1212 <= 0) m.e2061 = Constraint(expr= m.x17 - m.x125 - m.x1213 == 0) m.e2062 = Constraint(expr= m.x18 - m.x126 - m.x1214 == 0) m.e2063 = Constraint(expr= m.x19 - m.x127 - m.x1215 == 0) m.e2064 = Constraint(expr= m.x20 - m.x128 - m.x1216 == 0) m.e2065 = Constraint(expr= m.x33 - m.x129 - m.x1257 == 0) m.e2066 = Constraint(expr= m.x34 - m.x130 - m.x1258 == 0) m.e2067 = Constraint(expr= m.x35 - m.x131 - m.x1259 == 0) m.e2068 = Constraint(expr= m.x36 - m.x132 - m.x1260 == 0) m.e2069 = Constraint(expr= m.x77 - m.x133 == 0) m.e2070 = Constraint(expr= m.x78 - m.x134 == 0) m.e2071 = Constraint(expr= m.x79 - m.x135 == 0) m.e2072 = Constraint(expr= m.x80 - m.x136 == 0) m.e2073 = Constraint(expr= m.x81 - m.x137 == 0) m.e2074 = Constraint(expr= m.x82 - m.x138 == 0) m.e2075 = Constraint(expr= m.x83 - m.x139 == 0) m.e2076 = Constraint(expr= m.x84 - m.x140 == 0) m.e2077 = Constraint(expr= m.x1213 - m.x1217 - m.x1221 == 0) m.e2078 = Constraint(expr= m.x1214 - m.x1218 - m.x1222 == 0) m.e2079 = Constraint(expr= m.x1215 - m.x1219 - m.x1223 == 0) m.e2080 = Constraint(expr= m.x1216 - m.x1220 - m.x1224 == 0) m.e2081 = Constraint(expr= -m.x1225 - m.x1229 + m.x1233 == 0) m.e2082 = Constraint(expr= -m.x1226 - m.x1230 + m.x1234 == 0) m.e2083 = Constraint(expr= -m.x1227 - m.x1231 + m.x1235 == 0) m.e2084 = Constraint(expr= -m.x1228 - m.x1232 + m.x1236 == 0) m.e2085 = Constraint(expr= m.x1233 - m.x1237 - m.x1241 == 0) m.e2086 = Constraint(expr= m.x1234 - m.x1238 - m.x1242 == 0) m.e2087 = Constraint(expr= m.x1235 - m.x1239 - m.x1243 == 0) m.e2088 = Constraint(expr= m.x1236 - m.x1240 - m.x1244 == 0) m.e2089 = Constraint(expr= m.x1241 - m.x1245 - m.x1249 - m.x1253 == 0) m.e2090 = Constraint(expr= m.x1242 - m.x1246 - m.x1250 - m.x1254 == 0) m.e2091 = Constraint(expr= m.x1243 - m.x1247 - m.x1251 - m.x1255 == 0) m.e2092 = Constraint(expr= m.x1244 - m.x1248 - m.x1252 - m.x1256 == 0) m.e2093 = Constraint(expr= (m.x1289 / (0.001 + 0.999 * m.b1361) - log(m.x1273 / (0.001 + 0.999 * m.b1361) + 1)) * (0.001 + 0.999 * m.b1361) <= 0) m.e2094 = Constraint(expr= (m.x1290 / (0.001 + 0.999 * m.b1362) - log(m.x1274 / (0.001 + 0.999 * m.b1362) + 1)) * (0.001 + 0.999 * m.b1362) <= 0) m.e2095 = Constraint(expr= (m.x1291 / (0.001 + 0.999 * m.b1363) - log(m.x1275 / (0.001 + 0.999 * m.b1363) + 1)) * (0.001 + 0.999 * m.b1363) <= 0) m.e2096 = Constraint(expr= (m.x1292 / (0.001 + 0.999 * m.b1364) - log(m.x1276 / (0.001 + 0.999 * m.b1364) + 1)) * (0.001 + 0.999 * m.b1364) <= 0) m.e2097 = Constraint(expr= m.x1277 == 0) m.e2098 = Constraint(expr= m.x1278 == 0) m.e2099 = Constraint(expr= m.x1279 == 0) m.e2100 = Constraint(expr= m.x1280 == 0) m.e2101 = Constraint(expr= m.x1293 == 0) m.e2102 = Constraint(expr= m.x1294 == 0) m.e2103 = Constraint(expr= m.x1295 == 0) m.e2104 = Constraint(expr= m.x1296 == 0) m.e2105 = Constraint(expr= m.x1217 - m.x1273 - m.x1277 == 0) m.e2106 = Constraint(expr= m.x1218 - m.x1274 - m.x1278 == 0) m.e2107 = Constraint(expr= m.x1219 - m.x1275 - m.x1279 == 0) m.e2108 = Constraint(expr= m.x1220 - m.x1276 - m.x1280 == 0) m.e2109 = Constraint(expr= m.x1225 - m.x1289 - m.x1293 == 0) m.e2110 = Constraint(expr= m.x1226 - m.x1290 - m.x1294 == 0) m.e2111 = Constraint(expr= m.x1227 - m.x1291 - m.x1295 == 0) m.e2112 = Constraint(expr= m.x1228 - m.x1292 - m.x1296 == 0) m.e2113 = Constraint(expr= m.x1273 - 40 * m.b1361 <= 0) m.e2114 = Constraint(expr= m.x1274 - 40 * m.b1362 <= 0) m.e2115 = Constraint(expr= m.x1275 - 40 * m.b1363 <= 0) m.e2116 = Constraint(expr= m.x1276 - 40 * m.b1364 <= 0) m.e2117 = Constraint(expr= m.x1277 + 40 * m.b1361 <= 40) m.e2118 = Constraint(expr= m.x1278 + 40 * m.b1362 <= 40) m.e2119 = Constraint(expr= m.x1279 + 40 * m.b1363 <= 40) m.e2120 = Constraint(expr= m.x1280 + 40 * m.b1364 <= 40) m.e2121 = Constraint(expr= m.x1289 - 3.71357206670431 * m.b1361 <= 0) m.e2122 = Constraint(expr= m.x1290 - 3.71357206670431 * m.b1362 <= 0) m.e2123 = Constraint(expr= m.x1291 - 3.71357206670431 * m.b1363 <= 0) m.e2124 = Constraint(expr= m.x1292 - 3.71357206670431 * m.b1364 <= 0) m.e2125 = Constraint(expr= m.x1293 + 3.71357206670431 * m.b1361 <= 3.71357206670431) m.e2126 = Constraint(expr= m.x1294 + 3.71357206670431 * m.b1362 <= 3.71357206670431) m.e2127 = Constraint(expr= m.x1295 + 3.71357206670431 * m.b1363 <= 3.71357206670431) m.e2128 = Constraint(expr= m.x1296 + 3.71357206670431 * m.b1364 <= 3.71357206670431) m.e2129 = Constraint(expr= (m.x1297 / (0.001 + 0.999 * m.b1365) - 1.2 * log( m.x1281 / (0.001 + 0.999 * m.b1365) + 1)) * (0.001 + 0.999 * m.b1365) <= 0) m.e2130 = Constraint(expr= (m.x1298 / (0.001 + 0.999 * m.b1366) - 1.2 * log( m.x1282 / (0.001 + 0.999 * m.b1366) + 1)) * (0.001 + 0.999 * m.b1366) <= 0) m.e2131 = Constraint(expr= (m.x1299 / (0.001 + 0.999 * m.b1367) - 1.2 * log( m.x1283 / (0.001 + 0.999 * m.b1367) + 1)) * (0.001 + 0.999 * m.b1367) <= 0) m.e2132 = Constraint(expr= (m.x1300 / (0.001 + 0.999 * m.b1368) - 1.2 * log( m.x1284 / (0.001 + 0.999 * m.b1368) + 1)) * (0.001 + 0.999 * m.b1368) <= 0) m.e2133 = Constraint(expr= m.x1285 == 0) m.e2134 = Constraint(expr= m.x1286 == 0) m.e2135 = Constraint(expr= m.x1287 == 0) m.e2136 = Constraint(expr= m.x1288 == 0) m.e2137 = Constraint(expr= m.x1301 == 0) m.e2138 = Constraint(expr= m.x1302 == 0) m.e2139 = Constraint(expr= m.x1303 == 0) m.e2140 = Constraint(expr= m.x1304 == 0) m.e2141 = Constraint(expr= m.x1221 - m.x1281 - m.x1285 == 0) m.e2142 = Constraint(expr= m.x1222 - m.x1282 - m.x1286 == 0) m.e2143 = Constraint(expr= m.x1223 - m.x1283 - m.x1287 == 0) m.e2144 = Constraint(expr= m.x1224 - m.x1284 - m.x1288 == 0) m.e2145 = Constraint(expr= m.x1229 - m.x1297 - m.x1301 == 0) m.e2146 = Constraint(expr= m.x1230 - m.x1298 - m.x1302 == 0) m.e2147 = Constraint(expr= m.x1231 - m.x1299 - m.x1303 == 0) m.e2148 = Constraint(expr= m.x1232 - m.x1300 - m.x1304 == 0) m.e2149 = Constraint(expr= m.x1281 - 40 * m.b1365 <= 0) m.e2150 = Constraint(expr= m.x1282 - 40 * m.b1366 <= 0) m.e2151 = Constraint(expr= m.x1283 - 40 * m.b1367 <= 0) m.e2152 = Constraint(expr= m.x1284 - 40 * m.b1368 <= 0) m.e2153 = Constraint(expr= m.x1285 + 40 * m.b1365 <= 40) m.e2154 = Constraint(expr= m.x1286 + 40 * m.b1366 <= 40) m.e2155 = Constraint(expr= m.x1287 + 40 * m.b1367 <= 40) m.e2156 = Constraint(expr= m.x1288 + 40 * m.b1368 <= 40) m.e2157 = Constraint(expr= m.x1297 - 4.45628648004517 * m.b1365 <= 0) m.e2158 = Constraint(expr= m.x1298 - 4.45628648004517 * m.b1366 <= 0) m.e2159 = Constraint(expr= m.x1299 - 4.45628648004517 * m.b1367 <= 0) m.e2160 = Constraint(expr= m.x1300 - 4.45628648004517 * m.b1368 <= 0) m.e2161 = Constraint(expr= m.x1301 + 4.45628648004517 * m.b1365 <= 4.45628648004517) m.e2162 = Constraint(expr= m.x1302 + 4.45628648004517 * m.b1366 <= 4.45628648004517) m.e2163 = Constraint(expr= m.x1303 + 4.45628648004517 * m.b1367 <= 4.45628648004517) m.e2164 = Constraint(expr= m.x1304 + 4.45628648004517 *
# coding=utf-8 # Copyright (C) 2019 ATHENA AUTHORS; <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=too-few-public-methods, invalid-name # pylint: disable=too-many-instance-attributes, no-self-use, too-many-arguments """ Attention layers. """ from absl import logging import tensorflow as tf class ScaledDotProductAttention(tf.keras.layers.Layer): """Calculate the attention weights. q, k, v must have matching leading dimensions. k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v. The mask has different shapes depending on its type(padding or look ahead) but it must be broadcastable for addition. Args: q: query shape == (..., seq_len_q, depth) k: key shape == (..., seq_len_k, depth) v: value shape == (..., seq_len_v, depth_v) mask: Float tensor with shape broadcastable to (..., seq_len_q, seq_len_k). Defaults to None. Returns: output, attention_weights """ def __init__(self, unidirectional=False, look_ahead=0): super().__init__() self.uni = unidirectional self.look_ahead = look_ahead def call(self, q, k, v, mask): """This is where the layer's logic lives.""" matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k) # scale matmul_qk dk = tf.cast(tf.shape(k)[-1], tf.float32) scaled_attention_logits = matmul_qk / tf.math.sqrt(dk) if self.uni: uni_mask = tf.ones(tf.shape(scaled_attention_logits)) uni_mask = tf.linalg.band_part(uni_mask, -1, self.look_ahead) scaled_attention_logits += (1 - uni_mask) * -1e9 # add the mask to the scaled tensor. if mask is not None: scaled_attention_logits += mask * -1e9 # softmax is normalized on the last axis (seq_len_k) so that the scores # add up to 1. # (..., seq_len_q, seq_len_k) attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1) output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v) return output, attention_weights class MultiHeadAttention(tf.keras.layers.Layer): """ Multi-head attention Multi-head attention consists of four parts: * Linear layers and split into heads. * Scaled dot-product attention. * Concatenation of heads. * Final linear layer. Each multi-head attention block gets three inputs; Q (query), K (key), V (value). These are put through linear (Dense) layers and split up into multiple heads. The scaled_dot_product_attention defined above is applied to each head (broadcasted for efficiency). An appropriate mask must be used in the attention step. The attention output for each head is then concatenated (using tf.transpose, and tf.reshape) and put through a final Dense layer. Instead of one single attention head, Q, K, and V are split into multiple heads because it allows the model to jointly attend to information at different positions from different representational spaces. After the split each head has a reduced dimensionality, so the total computation cost is the same as a single head attention with full dimensionality. """ def __init__(self, d_model, num_heads, unidirectional=False, look_ahead=0): super().__init__() self.num_heads = num_heads self.d_model = d_model assert d_model % self.num_heads == 0 self.depth = d_model // self.num_heads self.wq = tf.keras.layers.Dense( d_model, kernel_initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.02), input_shape=(d_model,), ) self.wk = tf.keras.layers.Dense( d_model, kernel_initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.02), input_shape=(d_model,), ) self.wv = tf.keras.layers.Dense( d_model, kernel_initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.02), input_shape=(d_model,), ) self.attention = ScaledDotProductAttention(unidirectional, look_ahead=look_ahead) self.dense = tf.keras.layers.Dense( d_model, kernel_initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.02), input_shape=(d_model,), ) def split_heads(self, x, batch_size): """Split the last dimension into (num_heads, depth). Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth) """ x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth)) return tf.transpose(x, perm=[0, 2, 1, 3]) def call(self, v, k, q, mask): """ call function """ batch_size = tf.shape(q)[0] q = self.wq(q) # (batch_size, seq_len, hiddn_dim) k = self.wk(k) # (batch_size, seq_len, hiddn_dim) v = self.wv(v) # (batch_size, seq_len, hiddn_dim) q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth) k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth) v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth) # scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth) # attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k) scaled_attention, attention_weights = self.attention(q, k, v, mask) # (batch_size, seq_len_q, num_heads, depth) scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, d_model) concat_attention = tf.reshape(scaled_attention, (batch_size, -1, self.d_model)) output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model) return output, attention_weights class BahdanauAttention(tf.keras.Model): """ the Bahdanau Attention """ def __init__(self, units, input_dim=1024): super().__init__() self.W1 = tf.keras.layers.Dense( units, kernel_initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.02), input_shape=(input_dim,), ) self.W2 = tf.keras.layers.Dense( units, kernel_initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.02), input_shape=(input_dim,), ) self.V = tf.keras.layers.Dense( 1, kernel_initializer=tf.compat.v1.truncated_normal_initializer(stddev=0.02), input_shape=(units,), ) def call(self, query, values): """ call function """ # hidden shape == (batch_size, hidden size) # hidden_with_time_axis shape == (batch_size, 1, hidden_size) # we are doing this to perform addition to calculate the score hidden_with_time_axis = tf.expand_dims(query, 1) # (64, 1, 1024) # score shape == (batch_size, max_length, 1) # we get 1 at the last axis because we are applying score to self.V # the shape of the tensor before applying self.V is (batch_size, max_length, units) score = self.V(tf.nn.tanh(self.W1(values) + self.W2(hidden_with_time_axis))) # attention_weights shape == (batch_size, max_length, 1) attention_weights = tf.nn.softmax(score, axis=1) # context_vector shape after sum == (batch_size, hidden_size) context_vector = attention_weights * values context_vector = tf.reduce_sum(context_vector, axis=1) return context_vector, attention_weights class HanAttention(tf.keras.layers.Layer): """ Refer to [Hierarchical Attention Networks for Document Classification] (https://www.cs.cmu.edu/~hovy/papers/16HLT-hierarchical-attention-networks.pdf) wrap `with tf.variable_scope(name, reuse=tf.AUTO_REUSE):` Input shape: (Batch size, steps, features) Output shape: (Batch size, features) """ def __init__( self, W_regularizer=None, u_regularizer=None, b_regularizer=None, W_constraint=None, u_constraint=None, b_constraint=None, use_bias=True, **kwargs ): super().__init__(**kwargs) self.supports_masking = True self.init = tf.keras.initializers.get("glorot_uniform") self.W_regularizer = tf.keras.regularizers.get(W_regularizer) self.u_regularizer = tf.keras.regularizers.get(u_regularizer) self.b_regularizer = tf.keras.regularizers.get(b_regularizer) self.W_constraint = tf.keras.constraints.get(W_constraint) self.u_constraint = tf.keras.constraints.get(u_constraint) self.b_constraint = tf.keras.constraints.get(b_constraint) self.use_bias = use_bias def build(self, input_shape): """ build in keras layer """ # pylint: disable=attribute-defined-outside-init assert len(input_shape) == 3 self.W = self.add_weight( name="{}_W".format(self.name), shape=(int(input_shape[-1]), int(input_shape[-1]),), initializer=self.init, regularizer=self.W_regularizer, constraint=self.W_constraint, ) if self.use_bias: self.b = self.add_weight( name="{}_b".format(self.name), shape=(int(input_shape[-1]),), initializer="zero", regularizer=self.b_regularizer, constraint=self.b_constraint, ) self.attention_context_vector = self.add_weight( name="{}_att_context_v".format(self.name), shape=(int(input_shape[-1]),), initializer=self.init, regularizer=self.u_regularizer, constraint=self.u_constraint, ) self.built = True def call(self, inputs, training=None, mask=None): """ call function in keras """ batch_size = tf.shape(inputs)[0] W_3d = tf.tile(tf.expand_dims(self.W, axis=0), tf.stack([batch_size, 1, 1])) # [batch_size, steps, features] input_projection = tf.matmul(inputs, W_3d) if self.use_bias: input_projection += self.b input_projection = tf.tanh(input_projection) # [batch_size, steps, 1] similarities = tf.reduce_sum( tf.multiply(input_projection, self.attention_context_vector), axis=2, keep_dims=True, ) # [batch_size, steps, 1] if mask is not None: attention_weights = self._masked_softmax(similarities, mask, axis=1) else: attention_weights = tf.nn.softmax(similarities, axis=1) # [batch_size, features] attention_output = tf.reduce_sum(tf.multiply(inputs, attention_weights), axis=1) return attention_output # pylint: disable=no-self-use def compute_output_shape(self, input_shape): """compute output shape""" return input_shape[0], input_shape[-1] def _masked_softmax(self, logits, mask, axis): """Compute softmax with input mask.""" e_logits = tf.exp(logits) masked_e = tf.multiply(e_logits, mask) sum_masked_e = tf.reduce_sum(masked_e, axis, keep_dims=True) ones = tf.ones_like(sum_masked_e) # pay attention to a situation that if len of mask is zero, # denominator should be set to 1 sum_masked_e_safe = tf.where(tf.equal(sum_masked_e, 0), ones, sum_masked_e) return masked_e / sum_masked_e_safe class MatchAttention(tf.keras.layers.Layer): """ Refer to [Learning Natural Language Inference with LSTM] (https://www.aclweb.org/anthology/N16-1170) wrap `with tf.variable_scope(name, reuse=tf.AUTO_REUSE):` Input shape: (Batch size, steps, features) Output shape: (Batch size, steps, features) """ def __init__(self, config, **kwargs): super().__init__(**kwargs) logging.info("Initialize MatchAttention {}...".format(self.name)) self.fc_num_units = config["model"]["net"]["structure"]["fc_num_units"] self.middle_layer = tf.keras.layers.Dense(self.fc_num_units, activation="tanh") self.attn = tf.keras.layers.Dense(1) # pylint: disable=arguments-differ def call(self, tensors): """Attention layer.""" left, right = tensors len_left = left.shape[1] len_right = right.shape[1] tensor_left = tf.expand_dims(left, axis=2) tensor_right = tf.expand_dims(right, axis=1) tensor_left = tf.tile(tensor_left, [1, 1, len_right, 1]) tensor_right = tf.tile(tensor_right, [1, len_left, 1, 1]) tensor_merged = tf.concat([tensor_left, tensor_right], axis=-1) middle_output = self.middle_layer(tensor_merged) attn_scores = self.attn(middle_output) attn_scores = tf.squeeze(attn_scores, axis=3) exp_attn_scores = tf.exp( attn_scores - tf.reduce_max(attn_scores, axis=-1, keepdims=True) ) exp_sum = tf.reduce_sum(exp_attn_scores, axis=-1, keepdims=True) attention_weights = exp_attn_scores / exp_sum return tf.matmul(attention_weights, right) class LocationAttention(tf.keras.layers.Layer): """location-aware attention Reference: Attention-Based Models for Speech Recognition (https://arxiv.org/pdf/1506.07503.pdf) """ def __init__(self, attn_dim, conv_channel, aconv_filts, scaling=1.0): super().__init__() layers = tf.keras.layers self.attn_dim = attn_dim self.value_dense_layer = layers.Dense(attn_dim) self.query_dense_layer = layers.Dense(attn_dim, use_bias=False) self.location_dense_layer = layers.Dense(attn_dim, use_bias=False) self.location_conv = layers.Conv1D(filters=conv_channel, kernel_size=2*aconv_filts+1, strides=1, padding="same", use_bias=False, data_format="channels_last") self.score_dense_layer = layers.Dense(1, name='score_dense_layer') self.score_function = None # scaling: used to scale softmax scores self.scaling = scaling def compute_score(self, value, value_length, query, accum_attn_weight): """ Args: value_length: the length of value, shape: [batch] max_len: the maximun length Returns: initialized_weights: initializes to uniform distributions, shape: [batch,
import numpy as np TOTAL_NUMBER_OF_TAILES = 60 DICE_MOVE_OUT_OF_HOME = 6 NO_ENEMY = -1 # This roule is that if, there are two pieces on the field, the last one has to return is to start PLAY_WITH_RULE_A = True TAILE_FREE = 0 TAILE_HOME = 1 TAILE_START = 2 TAILE_GLOB = 3 TAILE_GOAL_AREAL = 4 TAILE_STAR = 5 TAILE_GOAL = 6 TAILE_ENEMY_1_GLOB = 7 TAILE_ENEMY_2_GLOB = 8 TAILE_ENEMY_3_GLOB = 9 LIST_TAILE_ENEMY_GLOBS = [TAILE_ENEMY_1_GLOB, TAILE_ENEMY_2_GLOB, TAILE_ENEMY_3_GLOB] NULL_POS = -1 HOME_INDEX = 0 START_INDEX = 1 STAR_INDEXS = [5, 12, 18, 25, 31, 38, 44, 51] HOME_AREAL_INDEXS = [53, 54, 55, 56, 57, 58] GOAL_INDEX = 59 GLOB_INDEXS = [9, 22, 35, 48] ENEMY_1_GLOB_INDX = 14 ENEMY_2_GLOB_INDX = 27 ENEMY_3_GLOB_INDX = 40 LIST_ENEMY_GLOB_INDEX = [ENEMY_1_GLOB_INDX, ENEMY_2_GLOB_INDX, ENEMY_3_GLOB_INDX] STAR_AT_GOAL_AREAL_INDX = STAR_INDEXS[-1] BORD_TILES = np.full(TOTAL_NUMBER_OF_TAILES, TAILE_FREE) BORD_TILES[HOME_INDEX] = TAILE_HOME BORD_TILES[START_INDEX] = TAILE_START BORD_TILES[STAR_INDEXS] = TAILE_STAR BORD_TILES[GLOB_INDEXS] = TAILE_GLOB BORD_TILES[HOME_AREAL_INDEXS] = TAILE_GOAL_AREAL BORD_TILES[GOAL_INDEX] = TAILE_GOAL BORD_TILES[ENEMY_1_GLOB_INDX] = TAILE_ENEMY_1_GLOB BORD_TILES[ENEMY_2_GLOB_INDX] = TAILE_ENEMY_2_GLOB BORD_TILES[ENEMY_3_GLOB_INDX] = TAILE_ENEMY_3_GLOB ENEMY_1_INDX_AT_HOME = 40 # HOME_AREAL_INDEXS[0] - 6 - i * 13 # i = 1 ENEMY_2_INDX_AT_HOME = 27 # HOME_AREAL_INDEXS[0] - 6 - i * 13 # i = 2 ENEMY_3_INDX_AT_HOME = 14 # HOME_AREAL_INDEXS[0] - 6 - i * 13 # i = 3 def enemy_pos_at_pos(pos): """ Returns the index's the other players has to be in to be in the same location as the one given in pos :param pos: The location to check for :type pos: int :return enemy_pos: The locations the enemy's pieces has to be at :rtype enemy_pos: list of list """ enemy_pos = [] for enemy_start_pos, enemy_pos_at_start in [[ENEMY_1_GLOB_INDX, ENEMY_1_INDX_AT_HOME], [ENEMY_2_GLOB_INDX, ENEMY_2_INDX_AT_HOME], [ENEMY_3_GLOB_INDX, ENEMY_3_INDX_AT_HOME]]: post_offset = enemy_start_pos - 1 pre_offset = enemy_pos_at_start - 1 if pos == enemy_start_pos: pos_enemy = [START_INDEX, HOME_AREAL_INDEXS[0]] elif pos < 0: pos_enemy = [max(enemy_pos_at_start + pos, -1)] elif START_INDEX <= pos < enemy_start_pos: pos_enemy = [pos + pre_offset] elif pos > HOME_AREAL_INDEXS[0] or pos == HOME_INDEX: pos_enemy = [-1] else: pos_enemy = [pos - post_offset] enemy_pos.append(pos_enemy) return enemy_pos def get_enemy_at_pos(pos, enemys): """ Returns the enemy's and the pieces they have at the given location :param pos: The location to check for :type pos: int :param enemys: The locations for the enemy's pieces in a list of 4 lists :returns: - enemy_at_pos: The enemy's there are at the location - enemy_pieces_at_pos: The pieces the enemy's has at the location :rtype enemy_at_pos: list :rtype enemy_pieces_at_pos: list of list """ # Get the pos the enemy's has to be at to be at the same pos other_enemy_pos_at_pos = enemy_pos_at_pos(pos) # Check if there is a enemy and how many pieces the enemy has there enemy_at_pos = NO_ENEMY enemy_pieces_at_pos = [] for enemy_i, other_enemy_pos in enumerate(other_enemy_pos_at_pos): # Check if there already is found a enemy at pos. if enemy_at_pos != NO_ENEMY: # If there is then stop checking for more (there can only be one) break for o_pos in other_enemy_pos: if o_pos == NULL_POS: continue for enemy_pice, enemy_pos in enumerate(enemys[enemy_i]): if enemy_pos == o_pos: enemy_pieces_at_pos.append(enemy_pice) enemy_at_pos = enemy_i return enemy_at_pos, enemy_pieces_at_pos class Player: """ A class used by the Game class. This class is not needed for normal use """ def __init__(self): """ Makes a player with 4 pieces all at the home locations """ self.pieces = [] self.number_of_pieces = 4 self.set_all_pieces_to_home() def get_pieces_that_can_move(self, dice): """ Return the pieces that can move with the given dice :param dice: The dice the move will be done with :type dice: int :return: movable_pieces: A list with the pieces that can be moved :rtype movable_pieces: list """ movable_pieces = [] # Go though all the pieces for piece_i, piece_place in enumerate(self.pieces): # If the piece is a goal then the piece can't move if BORD_TILES[piece_place] == TAILE_GOAL: continue # If the piece is at home and the dice is DICE_MOVE_OUT_OF_HOME then the dice can move out of the home place elif BORD_TILES[piece_place] == TAILE_HOME and dice == DICE_MOVE_OUT_OF_HOME: movable_pieces.append(piece_i) # If the piece is not at home or at the goal it can move elif BORD_TILES[piece_place] != TAILE_HOME: movable_pieces.append(piece_i) return movable_pieces def player_winner(self): """ Returns rather the player is a winner or not :return: winner: A bool that indicate rather the player is a winner or not :rtype winner: bool """ # Go though all the pieces for piece_place in self.pieces: # If a piece is not at the goal is not the winner if BORD_TILES[piece_place] != TAILE_GOAL: return False # If no piece was not at the goal the player is the winner return True def set_pieces(self, pieces): """ Sets the players pieces :param pieces: The pieces to set the players pieces to """ self.pieces = np.copy(pieces) def get_pieces(self): """ Returns the players pieces :return pieces: The players pieces :rtype pieces: list """ return np.copy(self.pieces) def move_piece(self, piece, dice, enemys): """ Move the players piece the given dice following the game rules. Returns the new locations of the enemy's pieces :param piece: The piece to move :type piece: int :param dice: The dice to make the move with :type dice: int :param enemys: The enemy's pieces :type enemys: list with 4 lists each with 4 int's :return enemys: The new locations of the enemy's pieces :rtype enemys: list with 4 lists each with 4 int's """ enemys_new = enemys.copy() old_piece_pos = self.pieces[piece] new_piece_pos = old_piece_pos + dice move_enemy_home_from_poss = [] do_not_check_rule_a = False enemy_at_pos, enemy_pieces_at_pos = get_enemy_at_pos(new_piece_pos, enemys) # If the dice is 0 then no movement can be done if dice == 0: pass # At goal elif BORD_TILES[old_piece_pos] == TAILE_GOAL: # The piece can not move pass # Goal areal elif BORD_TILES[old_piece_pos] == TAILE_GOAL_AREAL: if new_piece_pos <= GOAL_INDEX: self.pieces[piece] = new_piece_pos else: overshoot = new_piece_pos - GOAL_INDEX new_piece_pos_corrected = old_piece_pos - overshoot self.pieces[piece] = new_piece_pos_corrected # The Home areal elif BORD_TILES[old_piece_pos] == TAILE_HOME: if dice == DICE_MOVE_OUT_OF_HOME: self.pieces[piece] = START_INDEX # Set the enemy there might be at START_INDEX to moved do_not_check_rule_a = True move_enemy_home_from_poss.append(START_INDEX) # Star before the home areal elif new_piece_pos == STAR_AT_GOAL_AREAL_INDX: self.pieces[piece] = GOAL_INDEX # Set the enemy there might be at STAR_AT_GOAL_AREAL_INDX to moved move_enemy_home_from_poss.append(new_piece_pos) # The other stars elif BORD_TILES[new_piece_pos] == TAILE_STAR: present_star_staridx = STAR_INDEXS.index(new_piece_pos) next_star_staridx = present_star_staridx + 1 if next_star_staridx >= len(STAR_INDEXS): next_star_staridx = 0 next_star_pos = STAR_INDEXS[next_star_staridx] self.pieces[piece] = next_star_pos # Set the enemy there might be at first star or the start there will be jump to to be moved if enemy_at_pos != NO_ENEMY: move_enemy_home_from_poss.append(new_piece_pos) next_star_enemy_at_pos, next_star_enemy_pieces_at_pos = get_enemy_at_pos(next_star_pos, enemys) if next_star_enemy_at_pos != NO_ENEMY: move_enemy_home_from_poss.append(next_star_pos) # Globs there are not own by enemy elif BORD_TILES[new_piece_pos] == TAILE_GLOB: if enemy_at_pos != NO_ENEMY: self.pieces[piece] = HOME_INDEX else: self.pieces[piece] = new_piece_pos # Globs there are own by enemy elif BORD_TILES[new_piece_pos] in LIST_TAILE_ENEMY_GLOBS: # Get the enemy there own the glob globs_enemy = LIST_TAILE_ENEMY_GLOBS.index(BORD_TILES[new_piece_pos]) # Check if there is a enemy at the glob if enemy_at_pos != NO_ENEMY: # If there is a other enemy then send them home and move there if enemy_at_pos != globs_enemy: move_enemy_home_from_poss.append(new_piece_pos) self.pieces[piece] = new_piece_pos # If it is the same enemy there is there them move there else: self.pieces[piece] = HOME_INDEX # If there ant any enemy at the glob then move there else: self.pieces[piece] = new_piece_pos # If it is a TAILE_FREE or if we move from a GLOB/STAR to a not done case elif BORD_TILES[old_piece_pos] == TAILE_FREE or \ BORD_TILES[new_piece_pos] == TAILE_FREE or \ BORD_TILES[old_piece_pos] == TAILE_GLOB or \ BORD_TILES[old_piece_pos] == TAILE_STAR: if enemy_at_pos != NO_ENEMY: move_enemy_home_from_poss.append(new_piece_pos) self.pieces[piece] = new_piece_pos # If the case was not caught then there is an error else: print("\nold_piece_pos:", old_piece_pos, "\nnew_piece_pos", new_piece_pos, "\nBORD_TILES[old_piece_pos]:", BORD_TILES[old_piece_pos], "\nBORD_TILES[new_piece_pos]:", BORD_TILES[new_piece_pos], "\ndice:", dice) raise RuntimeError("The new_piece_pos case was not handel") # Check if there is any enemy there has to be moved if len(move_enemy_home_from_poss): # Go through the pos where enemy has to be moved from for pos in move_enemy_home_from_poss: # Get the enemy at the pos enemy_at_pos, enemy_pieces_at_pos = get_enemy_at_pos(pos, enemys) # Check if there was a enemy at the pos if enemy_at_pos != NO_ENEMY: # If there is only one enemy then move the enemy home. if not do_not_check_rule_a and not PLAY_WITH_RULE_A or len(enemy_pieces_at_pos)
= StringResources.SuccessText() # 错误码 ErrorCode = 0 # 返回显示的文本 def ToMessageShowString( self ): '''获取错误代号及文本描述''' return StringResources.ErrorCode() + ":" + str(self.ErrorCode) + "\r\n" + StringResources.TextDescription() + ":" + self.Message def CopyErrorFromOther(self, result): '''从另一个结果类中拷贝错误信息''' if result != None: self.ErrorCode = result.ErrorCode self.Message = result.Message @staticmethod def CreateFailedResult( result ): '''创建一个失败的结果对象''' failed = OperateResult() failed.ErrorCode = result.ErrorCode failed.Message = result.Message return failed @staticmethod def CreateSuccessResult(Content1=None,Content2=None,Content3=None,Content4=None,Content5=None,Content6=None,Content7=None,Content8=None,Content9=None,Content10=None): '''创建一个成功的对象''' success = OperateResult() success.IsSuccess = True success.Message = StringResources.SuccessText() if(Content2 == None and Content3 == None and Content4 == None and Content5 == None and Content6 == None and Content7 == None and Content8 == None and Content9 == None and Content10 == None) : success.Content = Content1 else: success.Content1 = Content1 success.Content2 = Content2 success.Content3 = Content3 success.Content4 = Content4 success.Content5 = Content5 success.Content6 = Content6 success.Content7 = Content7 success.Content8 = Content8 success.Content9 = Content9 success.Content10 = Content10 return success class SoftIncrementCount: '''一个简单的不持久化的序号自增类,采用线程安全实现,并允许指定最大数字,到达后清空从指定数开始''' start = 0 current = 0 maxValue = 100000000000000000000000000 hybirdLock = threading.Lock() def __init__(self, maxValue, start): '''实例化一个自增信息的对象,包括最大值''' self.maxValue = maxValue self.start = start def GetCurrentValue( self ): '''获取自增信息''' value = 0 self.hybirdLock.acquire() value = self.current self.current = self.current + 1 if self.current > self.maxValue: self.current = 0 self.hybirdLock.release() return value class INetMessage: '''数据消息的基本基类''' def ProtocolHeadBytesLength(self): '''协议头数据长度,也即是第一次接收的数据长度''' return 0 def GetContentLengthByHeadBytes(self): '''二次接收的数据长度''' return 0 def CheckHeadBytesLegal(self,toke): '''令牌检查是否成功''' return False def GetHeadBytesIdentity(self): '''获取头子节里的消息标识''' return 0 HeadBytes = bytes(0) ContentBytes = bytes(0) SendBytes = bytes(0) class S7Message (INetMessage): '''西门子s7协议的消息接收规则''' def ProtocolHeadBytesLength(self): '''协议头数据长度,也即是第一次接收的数据长度''' return 4 def GetContentLengthByHeadBytes(self): '''二次接收的数据长度''' if self.HeadBytes != None: return self.HeadBytes[2]*256 + self.HeadBytes[3]-4 else: return 0 def CheckHeadBytesLegal(self,token): '''令牌检查是否成功''' if self.HeadBytes != None: if self.HeadBytes[0] == 0x03 and self.HeadBytes[1] == 0x00: return True else: return False else: return False class MelsecA1EBinaryMessage(INetMessage): '''三菱的A兼容1E帧协议解析规则''' def ProtocolHeadBytesLength(self): '''协议头数据长度,也即是第一次接收的数据长度''' return 2 def GetContentLengthByHeadBytes(self): '''二次接收的数据长度''' contentLength = 0 if self.HeadBytes[1] == 0x5B: contentLength = 2 else: length = 0 if self.SendBytes[10] % 2 == 0: length = self.SendBytes[10] else: length = self.SendBytes[10] + 1 if self.HeadBytes[0] == 0x80: contentLength = int(length / 2) elif self.HeadBytes[0] == 0x81: contentLength = self.SendBytes[10] * 2 elif self.HeadBytes[0] == 0x82: contentLength = 0 elif self.HeadBytes[0] == 0x83: contentLength = 0 # 在A兼容1E协议中,写入值后,若不发生异常,只返回副标题 + 结束代码(0x00) # 这已经在协议头部读取过了,后面要读取的长度为0(contentLength=0) return contentLength def CheckHeadBytesLegal(self,token): '''令牌检查是否成功''' if self.HeadBytes != None: if self.HeadBytes[0] - self.SendBytes[0] == 0x80: return True else: return False else: return False class MelsecQnA3EBinaryMessage(INetMessage): '''三菱的Qna兼容3E帧协议解析规则''' def ProtocolHeadBytesLength(self): '''协议头数据长度,也即是第一次接收的数据长度''' return 9 def GetContentLengthByHeadBytes(self): '''二次接收的数据长度''' if self.HeadBytes != None: return self.HeadBytes[8] * 256 + self.HeadBytes[7] else: return 0 def CheckHeadBytesLegal(self,token): '''令牌检查是否成功''' if self.HeadBytes != None: if self.HeadBytes[0] == 0xD0 and self.HeadBytes[1] == 0x00: return True else: return False else: return False class MelsecQnA3EAsciiMessage(INetMessage): '''三菱的Qna兼容3E帧的ASCII协议解析规则''' def ProtocolHeadBytesLength(self): '''协议头数据长度,也即是第一次接收的数据长度''' return 18 def GetContentLengthByHeadBytes(self): '''二次接收的数据长度''' if self.HeadBytes != None: return int(self.HeadBytes[14:18].decode('ascii'),16) else: return 0 def CheckHeadBytesLegal(self,token): '''令牌检查是否成功''' if self.HeadBytes != None: if self.HeadBytes[0] == ord('D') and self.HeadBytes[1] == ord('0') and self.HeadBytes[2] == ord('0') and self.HeadBytes[3] == ord('0'): return True else: return False else: return False class ModbusTcpMessage (INetMessage): '''Modbus-Tcp协议的信息''' def ProtocolHeadBytesLength(self): '''协议头数据长度,也即是第一次接收的数据长度''' return 6 def GetContentLengthByHeadBytes(self): '''二次接收的数据长度''' if self.HeadBytes != None: return self.HeadBytes[4] * 256 + self.HeadBytes[5] else: return 0 def CheckHeadBytesLegal(self,token): '''令牌检查是否成功''' return True def GetHeadBytesIdentity(self): '''获取头子节里的消息标识''' return self.HeadBytes[0] * 256 + self.HeadBytes[1] class HslMessage (INetMessage): '''本组件系统使用的默认的消息规则,说明解析和反解析规则的''' def ProtocolHeadBytesLength(self): '''协议头数据长度,也即是第一次接收的数据长度''' return 32 def GetContentLengthByHeadBytes(self): '''二次接收的数据长度''' if self.HeadBytes != None: buffer = bytearray(4) buffer[0:4] = self.HeadBytes[28:32] return struct.unpack('<i',buffer)[0] else: return 0 def GetHeadBytesIdentity(self): '''获取头子节里的消息标识''' if self.HeadBytes != None: buffer = bytearray(4) buffer[0:4] = self.HeadBytes[4:8] return struct.unpack('<i',buffer)[0] else: return 0 def CheckHeadBytesLegal(self,token): '''令牌检查是否成功''' if self.HeadBytes == None: return False else: return SoftBasic.IsTwoBytesEquel(self.HeadBytes,12,token,0,16) class DataFormat(Enum): '''应用于多字节数据的解析或是生成格式''' ABCD = 0 BADC = 1 CDAB = 2 DCBA = 3 class ByteTransform: '''数据转换类的基础,提供了一些基础的方法实现.''' DataFormat = DataFormat.DCBA def TransBool(self, buffer, index ): '''将buffer数组转化成bool对象''' return ((buffer[index] & 0x01) == 0x01) def TransBoolArray(self, buffer, index, length ): '''将buffer数组转化成bool数组对象,需要转入索引,长度''' data = bytearray(length) for i in range(length): data[i]=buffer[i+index] return SoftBasic.ByteToBoolArray( data, length * 8 ) def TransByte( self, buffer, index ): '''将buffer中的字节转化成byte对象,需要传入索引''' return buffer[index] def TransByteArray( self, buffer, index, length ): '''将buffer中的字节转化成byte数组对象,需要传入索引''' data = bytearray(length) for i in range(length): data[i]=buffer[i+index] return data def TransInt16( self, buffer, index ): '''从缓存中提取short结果''' data = self.TransByteArray(buffer,index,2) return struct.unpack('<h',data)[0] def TransInt16Array( self, buffer, index, length ): '''从缓存中提取short数组结果''' tmp = [] for i in range(length): tmp.append( self.TransInt16( buffer, index + 2 * i )) return tmp def TransUInt16(self, buffer, index ): '''从缓存中提取ushort结果''' data = self.TransByteArray(buffer,index,2) return struct.unpack('<H',data)[0] def TransUInt16Array(self, buffer, index, length ): '''从缓存中提取ushort数组结果''' tmp = [] for i in range(length): tmp.append( self.TransUInt16( buffer, index + 2 * i )) return tmp def TransInt32(self, buffer, index ): '''从缓存中提取int结果''' data = self.ByteTransDataFormat4(self.TransByteArray(buffer,index,4)) return struct.unpack('<i',data)[0] def TransInt32Array(self, buffer, index, length ): '''从缓存中提取int数组结果''' tmp = [] for i in range(length): tmp.append( self.TransInt32( buffer, index + 4 * i )) return tmp def TransUInt32(self, buffer, index ): '''从缓存中提取uint结果''' data = self.ByteTransDataFormat4(self.TransByteArray(buffer,index,4)) return struct.unpack('<I',data)[0] def TransUInt32Array(self, buffer, index, length ): '''从缓存中提取uint数组结果''' tmp = [] for i in range(length): tmp.append( self.TransUInt32( buffer, index + 4 * i )) return tmp def TransInt64(self, buffer, index ): '''从缓存中提取long结果''' data = self.ByteTransDataFormat8(self.TransByteArray(buffer,index,8)) return struct.unpack('<q',data)[0] def TransInt64Array(self, buffer, index, length): '''从缓存中提取long数组结果''' tmp = [] for i in range(length): tmp.append( self.TransInt64( buffer, index + 8 * i )) return tmp def TransUInt64(self, buffer, index ): '''从缓存中提取ulong结果''' data = self.ByteTransDataFormat8(self.TransByteArray(buffer,index,8)) return struct.unpack('<Q',data)[0] def TransUInt64Array(self, buffer, index, length): '''从缓存中提取ulong数组结果''' tmp = [] for i in range(length): tmp.append( self.TransUInt64( buffer, index + 8 * i )) return tmp def TransSingle(self, buffer, index ): '''从缓存中提取float结果''' data = self.ByteTransDataFormat4(self.TransByteArray(buffer,index,4)) return struct.unpack('<f',data)[0] def TransSingleArray(self, buffer, index, length): '''从缓存中提取float数组结果''' tmp = [] for i in range(length): tmp.append( self.TransSingle( buffer, index + 4 * i )) return tmp def TransDouble(self, buffer, index ): '''从缓存中提取double结果''' data = self.ByteTransDataFormat8(self.TransByteArray(buffer,index,8)) return struct.unpack('<d',data)[0] def TransDoubleArray(self, buffer, index, length): '''从缓存中提取double数组结果''' tmp = [] for i in range(length): tmp.append( self.TransDouble( buffer, index + 8 * i )) return tmp def TransString( self, buffer, index, length, encoding ): '''从缓存中提取string结果,使用指定的编码''' data = self.TransByteArray(buffer,index,length) return data.decode(encoding) def BoolArrayTransByte(self, values): '''bool数组变量转化缓存数据,需要传入bool数组''' if (values == None): return None return SoftBasic.BoolArrayToByte( values ) def BoolTransByte(self, value): '''bool变量转化缓存数据,需要传入bool值''' return self.BoolArrayTransByte([value]) def ByteTransByte(self, value ): '''byte变量转化缓存数据,需要传入byte值''' buffer = bytearray(1) buffer[0] = value return buffer def Int16ArrayTransByte(self, values ): '''short数组变量转化缓存数据,需要传入short数组''' if (values == None) : return None buffer = bytearray(len(values) * 2) for i in range(len(values)): buffer[(i*2): (i*2+2)] = struct.pack('<h',values[i]) return buffer def Int16TransByte(self, value ): '''short数组变量转化缓存数据,需要传入short值''' return self.Int16ArrayTransByte([value]) def UInt16ArrayTransByte(self, values ): '''ushort数组变量转化缓存数据,需要传入ushort数组''' if (values == None) : return None buffer = bytearray(len(values) * 2) for i in range(len(values)): buffer[(i*2): (i*2+2)] = struct.pack('<H',values[i]) return buffer def UInt16TransByte(self, value ): '''ushort变量转化缓存数据,需要传入ushort值''' return self.UInt16ArrayTransByte([value]) def Int32ArrayTransByte(self, values ): '''int数组变量转化缓存数据,需要传入int数组''' if (values == None) : return None buffer = bytearray(len(values) * 4) for i in range(len(values)): buffer[(i*4): (i*4+4)] = self.ByteTransDataFormat4(struct.pack('<i',values[i])) return buffer def Int32TransByte(self, value ): '''int变量转化缓存数据,需要传入int值''' return self.Int32ArrayTransByte([value]) def UInt32ArrayTransByte(self, values ): '''uint数组变量转化缓存数据,需要传入uint数组''' if (values == None) : return None buffer = bytearray(len(values) * 4) for i in range(len(values)): buffer[(i*4): (i*4+4)] = self.ByteTransDataFormat4(struct.pack('<I',values[i])) return buffer def UInt32TransByte(self, value ): '''uint变量转化缓存数据,需要传入uint值''' return self.UInt32ArrayTransByte([value]) def Int64ArrayTransByte(self, values ): '''long数组变量转化缓存数据,需要传入long数组''' if (values == None) : return None buffer = bytearray(len(values) * 8) for i in range(len(values)): buffer[(i*8): (i*8+8)] = self.ByteTransDataFormat8(struct.pack('<q',values[i])) return buffer def Int64TransByte(self, value ): '''long变量转化缓存数据,需要传入long值''' return self.Int64ArrayTransByte([value]) def UInt64ArrayTransByte(self, values ): '''ulong数组变量转化缓存数据,需要传入ulong数组''' if (values == None) : return None buffer = bytearray(len(values) * 8) for i in range(len(values)): buffer[(i*8): (i*8+8)] = self.ByteTransDataFormat8(struct.pack('<Q',values[i])) return buffer def UInt64TransByte(self, value ): '''ulong变量转化缓存数据,需要传入ulong值''' return self.UInt64ArrayTransByte([value]) def FloatArrayTransByte(self, values ): '''float数组变量转化缓存数据,需要传入float数组''' if (values == None) : return None buffer = bytearray(len(values) * 4) for i in range(len(values)): buffer[(i*4): (i*4+4)] = self.ByteTransDataFormat4(struct.pack('<f',values[i])) return buffer def FloatTransByte(self, value ): '''float变量转化缓存数据,需要传入float值''' return self.FloatArrayTransByte([value]) def DoubleArrayTransByte(self, values ): '''double数组变量转化缓存数据,需要传入double数组''' if (values == None) : return None buffer = bytearray(len(values) * 8) for i in range(len(values)): buffer[(i*8): (i*8+8)] = self.ByteTransDataFormat8(struct.pack('<d',values[i])) return buffer def DoubleTransByte(self, value ): '''double变量转化缓存数据,需要传入double值''' return self.DoubleArrayTransByte([value]) def StringTransByte(self, value:str, encoding:str ): '''使用指定的编码字符串转化缓存数据,需要传入string值及编码信息''' return value.encode(encoding) def ByteTransDataFormat4(self, value, index = 0 ): '''反转多字节的数据信息''' buffer = bytearray(4) if self.DataFormat == DataFormat.ABCD: buffer[0] = value[index + 3] buffer[1] = value[index + 2] buffer[2] = value[index + 1] buffer[3] = value[index + 0] elif self.DataFormat == DataFormat.BADC: buffer[0] = value[index + 2] buffer[1] = value[index + 3] buffer[2] = value[index + 0] buffer[3] = value[index + 1] elif self.DataFormat == DataFormat.CDAB: buffer[0] = value[index + 1] buffer[1] = value[index + 0] buffer[2] = value[index + 3] buffer[3] = value[index + 2] elif self.DataFormat == DataFormat.DCBA: buffer[0] = value[index + 0] buffer[1] = value[index + 1] buffer[2] = value[index + 2] buffer[3] = value[index + 3] return buffer def ByteTransDataFormat8(self, value, index = 0 ): '''反转多字节的数据信息''' buffer = bytearray(8) if self.DataFormat == DataFormat.ABCD: buffer[0] = value[index + 7] buffer[1] = value[index + 6] buffer[2] = value[index + 5] buffer[3] = value[index + 4] buffer[4] = value[index + 3] buffer[5] = value[index + 2] buffer[6] = value[index + 1] buffer[7] = value[index + 0] elif self.DataFormat == DataFormat.BADC: buffer[0] = value[index + 6] buffer[1] = value[index + 7] buffer[2] = value[index + 4] buffer[3] = value[index + 5] buffer[4] = value[index + 2] buffer[5] = value[index + 3] buffer[6] = value[index + 0] buffer[7] = value[index + 1] elif self.DataFormat == DataFormat.CDAB: buffer[0] = value[index + 1] buffer[1] = value[index + 0] buffer[2] = value[index + 3] buffer[3] = value[index + 2] buffer[4] = value[index + 5] buffer[5] = value[index + 4] buffer[6] = value[index + 7] buffer[7] = value[index + 6] elif self.DataFormat == DataFormat.DCBA: buffer[0] = value[index + 0] buffer[1] = value[index + 1] buffer[2] = value[index + 2] buffer[3] = value[index + 3] buffer[4] = value[index + 4] buffer[5] = value[index + 5] buffer[6] = value[index + 6] buffer[7] = value[index + 7] return buffer class RegularByteTransform(ByteTransform): '''常规的字节转换类''' def __init__(self): return class ReverseBytesTransform(ByteTransform): '''字节倒序的转换类''' def TransInt16(self, buffer, index ): '''从缓存中提取short结果''' data = self.TransByteArray(buffer,index,2) return struct.unpack('>h',data)[0] def TransUInt16(self, buffer, index ): '''从缓存中提取ushort结果''' data = self.TransByteArray(buffer,index,2) return struct.unpack('>H',data)[0] def TransInt32(self, buffer, index ): '''从缓存中提取int结果''' data = self.TransByteArray(buffer,index,4) return struct.unpack('>i',data)[0] def TransUInt32(self, buffer, index ): '''从缓存中提取uint结果''' data = self.TransByteArray(buffer,index,4) return struct.unpack('>I',data)[0] def TransInt64(self, buffer, index ): '''从缓存中提取long结果''' data = self.TransByteArray(buffer,index,8) return struct.unpack('>q',data)[0] def TransUInt64(self, buffer, index ): '''从缓存中提取ulong结果''' data = self.TransByteArray(buffer,index,8) return struct.unpack('>Q',data)[0] def TransSingle(self, buffer, index ): '''从缓存中提取float结果''' data = self.TransByteArray(buffer,index,4) return struct.unpack('>f',data)[0] def TransDouble(self, buffer, index ): '''从缓存中提取double结果''' data = self.TransByteArray(buffer,index,8) return struct.unpack('>d',data)[0] def Int16ArrayTransByte(self, values ): '''short数组变量转化缓存数据,需要传入short数组 -> bytearray''' if (values == None) : return None buffer = bytearray(len(values) * 2) for i in range(len(values)): buffer[(i*2): (i*2+2)] = struct.pack('>h',values[i]) return buffer def UInt16ArrayTransByte(self, values ): '''ushort数组变量转化缓存数据,需要传入ushort数组 -> bytearray''' if (values == None) : return None buffer = bytearray(len(values) * 2) for i in range(len(values)): buffer[(i*2): (i*2+2)] = struct.pack('>H',values[i]) return buffer def Int32ArrayTransByte(self, values ): '''int数组变量转化缓存数据,需要传入int数组 -> bytearray''' if (values == None) : return None buffer = bytearray(len(values) * 4) for i in range(len(values)): buffer[(i*4): (i*4+4)] = struct.pack('>i',values[i]) return buffer def UInt32ArrayTransByte(self, values ): '''uint数组变量转化缓存数据,需要传入uint数组 -> bytearray''' if (values == None) : return None buffer = bytearray(len(values) * 4) for i in range(len(values)): buffer[(i*4): (i*4+4)] = struct.pack('>I',values[i]) return buffer def Int64ArrayTransByte(self, values ): '''long数组变量转化缓存数据,需要传入long数组 -> bytearray''' if (values == None) : return None buffer = bytearray(len(values) * 8) for i in range(len(values)): buffer[(i*8): (i*8+8)] = struct.pack('>q',values[i]) return buffer def UInt64ArrayTransByte(self, values ): '''ulong数组变量转化缓存数据,需要传入ulong数组 -> bytearray''' if (values == None) : return None buffer = bytearray(len(values) * 8) for i in range(len(values)): buffer[(i*8): (i*8+8)] = struct.pack('>Q',values[i]) return buffer def FloatArrayTransByte(self, values ): '''float数组变量转化缓存数据,需要传入float数组 -> bytearray''' if (values == None) : return None buffer = bytearray(len(values) *
<filename>assignments/ps05/experiment.py """Problem Set 5: Object Tracking and Pedestrian Detection""" import cv2 import ps5 import os import numpy as np # I/O directories input_dir = "input_images" output_dir = "output" NOISE_1 = {'x': 2.5, 'y': 2.5} NOISE_2 = {'x': 7.5, 'y': 7.5} # Helper code def run_particle_filter(filter_class, imgs_dir, template_rect, save_frames={}, **kwargs): """Runs a particle filter on a given video and template. Create an object of type pf_class, passing in initial video frame, template (extracted from first frame using template_rect), and any keyword arguments. Do not modify this function except for the debugging flag. Args: filter_class (object): particle filter class to instantiate (e.g. ParticleFilter). imgs_dir (str): path to input images. template_rect (dict): template bounds (x, y, w, h), as float or int. save_frames (dict): frames to save {<frame number>|'template': <filename>}. **kwargs: arbitrary keyword arguments passed on to particle filter class. Returns: None. """ imgs_list = [f for f in os.listdir(imgs_dir) if f[0] != '.' and f.endswith('.jpg')] imgs_list.sort() # Initialize objects template = None pf = None frame_num = 0 # Loop over video (till last frame or Ctrl+C is presssed) for img in imgs_list: frame = cv2.imread(os.path.join(imgs_dir, img)) # Extract template and initialize (one-time only) if template is None: template = frame[int(template_rect['y']): int(template_rect['y'] + template_rect['h']), int(template_rect['x']): int(template_rect['x'] + template_rect['w'])] if 'template' in save_frames: cv2.imwrite(save_frames['template'], template) pf = filter_class(frame, template, **kwargs) # Process frame pf.process(frame) if True: # For debugging, it displays every frame out_frame = frame.copy() pf.render(out_frame) cv2.imshow('Tracking', out_frame) cv2.waitKey(1) # Render and save output, if indicated if frame_num in save_frames: frame_out = frame.copy() pf.render(frame_out) cv2.imwrite(save_frames[frame_num], frame_out) # Update frame number frame_num += 1 if frame_num % 20 == 0: print 'Working on frame %d' % frame_num def run_kalman_filter(kf, imgs_dir, noise, sensor, save_frames={}, template_loc=None): imgs_list = [f for f in os.listdir(imgs_dir) if f[0] != '.' and f.endswith('.jpg')] imgs_list.sort() frame_num = 0 if sensor == "hog": hog = cv2.HOGDescriptor() hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector()) elif sensor == "matching": frame = cv2.imread(os.path.join(imgs_dir, imgs_list[0])) template = frame[template_loc['y']: template_loc['y'] + template_loc['h'], template_loc['x']: template_loc['x'] + template_loc['w']] else: raise ValueError("Unknown sensor name. Choose between 'hog' or " "'matching'") for img in imgs_list: frame = cv2.imread(os.path.join(imgs_dir, img)) # Sensor if sensor == "hog": (rects, weights) = hog.detectMultiScale(frame, winStride=(4, 4), padding=(8, 8), scale=1.05) if len(weights) > 0: max_w_id = np.argmax(weights) z_x, z_y, z_w, z_h = rects[max_w_id] z_x += z_w // 2 z_y += z_h // 2 z_x += np.random.normal(0, noise['x']) z_y += np.random.normal(0, noise['y']) elif sensor == "matching": corr_map = cv2.matchTemplate(frame, template, cv2.cv.CV_TM_SQDIFF) z_y, z_x = np.unravel_index(np.argmin(corr_map), corr_map.shape) z_w = template_loc['w'] z_h = template_loc['h'] z_x += z_w // 2 + np.random.normal(0, noise['x']) z_y += z_h // 2 + np.random.normal(0, noise['y']) x, y = kf.process(z_x, z_y) if False: # For debugging, it displays every frame out_frame = frame.copy() cv2.circle(out_frame, (int(z_x), int(z_y)), 20, (0, 0, 255), 2) cv2.circle(out_frame, (int(x), int(y)), 10, (255, 0, 0), 2) cv2.rectangle(out_frame, (int(z_x) - z_w // 2, int(z_y) - z_h // 2), (int(z_x) + z_w // 2, int(z_y) + z_h // 2), (0, 0, 255), 2) cv2.imshow('Tracking', out_frame) cv2.waitKey(1) # Render and save output, if indicated if frame_num in save_frames: frame_out = frame.copy() cv2.circle(frame_out, (int(x), int(y)), 10, (255, 0, 0), 2) cv2.imwrite(save_frames[frame_num], frame_out) # Update frame number frame_num += 1 if frame_num % 20 == 0: print 'Working on frame %d' % frame_num def part_1b(): print "Part 1b" template_loc = {'y': 72, 'x': 140, 'w': 50, 'h': 50} # Define process and measurement arrays if you want to use other than the # default. Pass them to KalmanFilter. Q = None # Process noise array R = None # Measurement noise array kf = ps5.KalmanFilter(template_loc['x'], template_loc['y']) save_frames = {10: os.path.join(output_dir, 'ps5-1-b-1.png'), 30: os.path.join(output_dir, 'ps5-1-b-2.png'), 59: os.path.join(output_dir, 'ps5-1-b-3.png'), 99: os.path.join(output_dir, 'ps5-1-b-4.png')} run_kalman_filter(kf, os.path.join(input_dir, "circle"), NOISE_2, "matching", save_frames, template_loc) def part_1c(): print "Part 1c" init_pos = {'x': 311, 'y': 217} # Define process and measurement arrays if you want to use other than the # default. Pass them to KalmanFilter. Q = None # Process noise array R = None # Measurement noise array kf = ps5.KalmanFilter(init_pos['x'], init_pos['y']) save_frames = {10: os.path.join(output_dir, 'ps5-1-c-1.png'), 33: os.path.join(output_dir, 'ps5-1-c-2.png'), 84: os.path.join(output_dir, 'ps5-1-c-3.png'), 159: os.path.join(output_dir, 'ps5-1-c-4.png')} run_kalman_filter(kf, os.path.join(input_dir, "walking"), NOISE_1, "hog", save_frames) def part_2a(): template_loc = {'y': 72, 'x': 140, 'w': 50, 'h': 50} save_frames = {10: os.path.join(output_dir, 'ps5-2-a-1.png'), 30: os.path.join(output_dir, 'ps5-2-a-2.png'), 59: os.path.join(output_dir, 'ps5-2-a-3.png'), 99: os.path.join(output_dir, 'ps5-2-a-4.png')} num_particles = 200 # Define the number of particles sigma_mse = 10 # Define the value of sigma for the measurement exponential equation sigma_dyn = 10 # Define the value of sigma for the particles movement (dynamics) run_particle_filter(ps5.ParticleFilter, # particle filter model class os.path.join(input_dir, "circle"), template_loc, save_frames, num_particles=num_particles, sigma_exp=sigma_mse, sigma_dyn=sigma_dyn, template_coords=template_loc) # Add more if you need to def part_2b(): template_loc = {'x': 360, 'y': 141, 'w': 127, 'h': 179} save_frames = {10: os.path.join(output_dir, 'ps5-2-b-1.png'), 33: os.path.join(output_dir, 'ps5-2-b-2.png'), 84: os.path.join(output_dir, 'ps5-2-b-3.png'), 99: os.path.join(output_dir, 'ps5-2-b-4.png')} num_particles = 200 # Define the number of particles sigma_mse = 10 # Define the value of sigma for the measurement exponential equation sigma_dyn = 10 # Define the value of sigma for the particles movement (dynamics) run_particle_filter(ps5.ParticleFilter, # particle filter model class os.path.join(input_dir, "pres_debate_noisy"), template_loc, save_frames, num_particles=num_particles, sigma_exp=sigma_mse, sigma_dyn=sigma_dyn, template_coords=template_loc) # Add more if you need to def part_3(): template_rect = {'x': 538, 'y': 377, 'w': 73, 'h': 117} save_frames = {22: os.path.join(output_dir, 'ps5-3-a-1.png'), 50: os.path.join(output_dir, 'ps5-3-a-2.png'), 160: os.path.join(output_dir, 'ps5-3-a-3.png')} num_particles = 500 # Define the number of particles sigma_mse = 6 # Define the value of sigma for the measurement exponential equation sigma_dyn = 30 # Define the value of sigma for the particles movement (dynamics) alpha = 0.95 # Set a value for alpha run_particle_filter(ps5.AppearanceModelPF, # particle filter model class os.path.join(input_dir, "pres_debate"), # input video template_rect, save_frames, num_particles=num_particles, sigma_exp=sigma_mse, sigma_dyn=sigma_dyn, alpha=alpha, template_coords=template_rect) # Add more if you need to def part_4(): template_rect = {'x': 210, 'y': 37, 'w': 103, 'h': 285} save_frames = {40: os.path.join(output_dir, 'ps5-4-a-1.png'), 100: os.path.join(output_dir, 'ps5-4-a-2.png'), 240: os.path.join(output_dir, 'ps5-4-a-3.png'), 300: os.path.join(output_dir, 'ps5-4-a-4.png')} num_particles = 250 # Define the number of particles sigma_md = 10 # Define the value of sigma for the measurement exponential equation sigma_dyn = 4 # Define the value of sigma for the particles movement (dynamics) alpha = 0.1 run_particle_filter(ps5.MDParticleFilter, os.path.join(input_dir, "pedestrians"), template_rect, save_frames, num_particles=num_particles, sigma_exp=sigma_md, sigma_dyn=sigma_dyn, template_coords=template_rect, alpha=alpha, scale = 0.995) # Add more if you need to def part_5(): """Tracking multiple Targets. Use either a Kalman or particle filter to track multiple targets as they move through the given video. Use the sequence of images in the TUD-Campus directory. Follow the instructions in the problem set instructions. Place all your work in this file and this section. """ save_frames = {29: os.path.join(output_dir, 'ps5-5-a-1.png'), 56: os.path.join(output_dir, 'ps5-5-a-2.png'), 71: os.path.join(output_dir, 'ps5-5-a-3.png')} t1 = { 'x': 60, 'y': 200, 'w': 100, 'h': 100 } t2 = { 'x': 414, 'y': 220, 'w': 100, 'h': 100 } t3 = { 'x': 20, 'y': 172, 'w': 60, 'h': 150 } kwargs1 = { 'num_particles': 400, 'sigma_exp': 5, 'sigma_dyn': 15, 'alpha': 0.05, } kwargs2 = { 'num_particles': 250, 'sigma_exp': 5, 'sigma_dyn': 10, 'alpha': 0., } kwargs3 = { 'num_particles': 150, 'sigma_exp': 5, 'sigma_dyn': 15, 'alpha': 0.05, } imgs_dir = os.path.join(input_dir, "TUD-Campus") imgs_list = [f for f in os.listdir(imgs_dir) if f[0] != '.' and f.endswith('.jpg')] imgs_list = sorted(imgs_list) # Initialize objects templates = [] pf1 = None pf2 = None pf3 = None frame_num = 1 for img in imgs_list: frame = cv2.imread(os.path.join(os.path.join(input_dir, "TUD-Campus"), img)) # Extract template and initialize (one-time only) if len(templates) < 1: template1 = frame[int(t1['y']): int(t1['y'] + t1['h']), int(t1['x']): int(t1['x'] + t1['w'])] template2 = frame[int(t2['y']):int(t2['y'] + t2['h']), int(t2['x']): int(t2['x'] + t2['w'])] templates.append(template1) templates.append(template2) pf1 = ps5.AppearanceModelPF(frame, template=template1, template_coords=t1, **kwargs1) pf2 = ps5.AppearanceModelPF(frame, template=template2, template_coords=t2, **kwargs2) if frame_num == 32: template3 = frame[int(t3['y']):int(t3['y'] + t3['h']), int(t3['x']): int(t3['x'] + t3['w'])] templates.append(template3) pf3 = ps5.AppearanceModelPF(frame, template=template3, template_coords=t3, **kwargs3) # Process frame pf1.process(frame) if frame_num <= 29: pf2.process(frame) if frame_num >= 32: pf3.process(frame) if True: # For debugging, it displays every frame out_frame = frame.copy() pf1.render(out_frame) if frame_num <= 29: pf2.render(out_frame) if frame_num
the geottansform data of the bottom left corner Parameters ---------- nc : [netcdf object] netcdf object . Var : [string], optional the variable you want to read from the netcdf file if None is given the last variable in the file will be read. The default is None. Returns ------- 1-geo : [tuple] geotransform data of the netcdf file 2-epsg : [integer] epsg number 3-size_X : [integer] number of coordinates in x direction 4-size_Y : [integer] number of coordinates in y direction 5-size_Z : [integer] number of coordinates in z direction 6-Time : [integer] time varialble in the netcdf file """ # list if variables if Var is None: Var = list(nc.variables.keys())[-1] data = nc.variables[Var] # nodatavalue try: NoDataValue = data._FillValue except AttributeError: NoDataValue = data.missing_value # data type try: datatype = data.datatype except AttributeError: datatype = data.dtype size_Y, size_X = np.int_(data.shape[-2:]) # if there is a stack of layers in the file (3d array) if len(data.shape) == 3 and data.shape[0] > 1 : size_Z = np.int_(data.shape[0]) try: TimeVar = nc.variables['time'] Time = TimeVar[:] # convert time numbers to dates Time = netCDF4.num2date(Time[:],TimeVar.units) except: Time = nc.variables['t'][:] # Time = nc.variables['t'].units[11:] else: # if there is only one layer(2D array) size_Z = 1 Time = -9999 # get lats and lons try: lats = nc.variables['latitude'][:] # Geo6 = nc.variables['latitude'].res except: lats = nc.variables['lat'][:] # Geo6 = nc.variables['lat'].res try: lons = nc.variables['longitude'][:] except: lons = nc.variables['lon'][:] # Geo2 = nc.variables['lon'].size # try to get the resolutio of the file try: try: Geo2 = nc.variables['longitude'].res except: try: Geo2 = nc.variables['lon'].res except: Geo2 = lons[1] - lons[0] except: assert False, "the netcdf file does not hae a resolution attribute" # Lower left corner corner coordinates Geo4 = np.min(lats) + Geo2/2 Geo1 = np.min(lons) - Geo2/2 try: crso = nc.variables['crs'] proj = crso.projection epsg = Raster.GetEpsg(proj, extension = 'GEOGCS') except: epsg = 4326 geo = tuple([Geo1, Geo2, 0, Geo4, 0, Geo2]) return geo, epsg, size_X, size_Y, size_Z, Time, NoDataValue, datatype @staticmethod def NCtoTiff(input_nc, SaveTo, Separator='_'): """ ========================================================= NCtoTiff(input_nc, SaveTo) ========================================================= Parameters ---------- input_nc : [string/list] a path of the netcdf file of a list of the netcdf files' names. SaveTo : TYPE Path to where you want to save the files. Separator : [string] separator in the file name that separate the name from the date. Default is "_" Returns ------- None. """ if type(input_nc) == str: nc = netCDF4.Dataset(input_nc) elif type(input_nc) == list: nc = netCDF4.MFDataset(input_nc) # get the variable Var = list(nc.variables.keys())[-1] # extract the data All_Data = nc[Var] # get the details of the file geo, epsg, size_X, size_Y, size_Z, Time, NoDataValue, datatype = Raster.NCdetails(nc) # Create output folder if needed if not os.path.exists(SaveTo): os.mkdir(SaveTo) for i in range(0,size_Z): if All_Data.shape[0] and All_Data.shape[0] > 1 :#type(Time) == np.ndarray: #not Time == -9999 time_one = Time[i] # d = dt.date.fromordinal(int(time_one)) name = os.path.splitext(os.path.basename(input_nc))[0] nameparts = name.split(Separator)[0] # [0:-2] name_out = os.path.join(SaveTo + "/" + nameparts + '_%d.%02d.%02d.tif' %(time_one.year, time_one.month, time_one.day)) data = All_Data[i,:,:] else: name=os.path.splitext(os.path.basename(input_nc))[0] name_out = os.path.join(SaveTo, name + '.tif') data = All_Data[0,:,:] driver = gdal.GetDriverByName("GTiff") # driver = gdal.GetDriverByName("MEM") if datatype == np.float32: dst = driver.Create(name_out,int(data.shape[1]), int(data.shape[0]), 1, gdal.GDT_Float32, ['COMPRESS=LZW']) elif datatype == np.float64: dst = driver.Create(name_out,int(data.shape[1]), int(data.shape[0]), 1, gdal.GDT_Float64) elif datatype == np.uint16: dst = driver.Create(name_out,int(data.shape[1]), int(data.shape[0]), 1, gdal.GDT_UInt16, ['COMPRESS=LZW']) elif datatype == np.uint32: dst = driver.Create(name_out,int(data.shape[1]), int(data.shape[0]), 1, gdal.GDT_UInt32, ['COMPRESS=LZW']) elif datatype == np.int16: dst = driver.Create(name_out,int(data.shape[1]), int(data.shape[0]), 1, gdal.GDT_Int16, ['COMPRESS=LZW']) elif datatype == np.int32: dst = driver.Create(name_out,int(data.shape[1]), int(data.shape[0]), 1, gdal.GDT_Int32, ['COMPRESS=LZW']) srse = osr.SpatialReference() if epsg == '': srse.SetWellKnownGeogCS("WGS84") else: try: if not srse.SetWellKnownGeogCS(epsg) == 6: srse.SetWellKnownGeogCS(epsg) else: try: srse.ImportFromEPSG(int(epsg)) except: srse.ImportFromWkt(epsg) except: try: srse.ImportFromEPSG(int(epsg)) except: srse.ImportFromWkt(epsg) # set the geotransform dst.SetGeoTransform(geo) # set the projection dst.SetProjection(srse.ExportToWkt()) # setting the NoDataValue does not accept double precision numbers try: dst.GetRasterBand(1).SetNoDataValue(NoDataValue) # initialize the band with the nodata value instead of 0 dst.GetRasterBand(1).Fill(NoDataValue) except: NoDataValue = -9999 dst.GetRasterBand(1).SetNoDataValue(NoDataValue) dst.GetRasterBand(1).Fill(NoDataValue) # assert False, "please change the NoDataValue in the source raster as it is not accepted by Gdal" print("the NoDataValue in the source Netcdf is double precission and as it is not accepted by Gdal") print("the NoDataValue now is et to -9999 in the raster") dst.GetRasterBand(1).WriteArray(data) dst.FlushCache() dst = None def Convert_nc_to_tiff(input_nc, output_folder): """ This function converts the nc file into tiff files Keyword Arguments: input_nc -- name, name of the adf file output_folder -- Name of the output tiff file """ #All_Data = Raster.Open_nc_array(input_nc) if type(input_nc) == str: nc = netCDF4.Dataset(input_nc) elif type(input_nc) == list: nc = netCDF4.MFDataset(input_nc) Var = nc.variables.keys()[-1] All_Data = nc[Var] geo_out, epsg, size_X, size_Y, size_Z, Time = Raster.Open_nc_info(input_nc) if epsg == 4326: epsg = 'WGS84' # Create output folder if needed if not os.path.exists(output_folder): os.mkdir(output_folder) for i in range(0,size_Z): if not Time == -9999: time_one = Time[i] d = dt.fromordinal(time_one) name = os.path.splitext(os.path.basename(input_nc))[0] nameparts = name.split('_')[0:-2] name_out = os.path.join(output_folder, '_'.join(nameparts) + '_%d.%02d.%02d.tif' %(d.year, d.month, d.day)) Data_one = All_Data[i,:,:] else: name=os.path.splitext(os.path.basename(input_nc))[0] name_out = os.path.join(output_folder, name + '.tif') Data_one = All_Data[:,:] Raster.CreateRaster(name_out, Data_one, geo_out, epsg) return() def Convert_grb2_to_nc(input_wgrib, output_nc, band): # Get environmental variable WA_env_paths = os.environ["WA_PATHS"].split(';') GDAL_env_path = WA_env_paths[0] GDAL_TRANSLATE_PATH = os.path.join(GDAL_env_path, 'gdal_translate.exe') # Create command fullCmd = ' '.join(['"%s" -of netcdf -b %d' %(GDAL_TRANSLATE_PATH, band), input_wgrib, output_nc]) # -r {nearest} Raster.Run_command_window(fullCmd) return() def Convert_adf_to_tiff(input_adf, output_tiff): """ This function converts the adf files into tiff files Keyword Arguments: input_adf -- name, name of the adf file output_tiff -- Name of the output tiff file """ # Get environmental variable WA_env_paths = os.environ["WA_PATHS"].split(';') GDAL_env_path = WA_env_paths[0] GDAL_TRANSLATE_PATH = os.path.join(GDAL_env_path, 'gdal_translate.exe') # convert data from ESRI GRID to GeoTIFF fullCmd = ('"%s" -co COMPRESS=DEFLATE -co PREDICTOR=1 -co ' 'ZLEVEL=1 -of GTiff %s %s') % (GDAL_TRANSLATE_PATH, input_adf, output_tiff) Raster.Run_command_window(fullCmd) return(output_tiff) def Convert_bil_to_tiff(input_bil, output_tiff): """ This function converts the bil files into tiff files Keyword Arguments: input_bil -- name, name of the bil file output_tiff -- Name of the output tiff file """ gdal.GetDriverByName('EHdr').Register() dest = gdal.Open(input_bil, gdalconst.GA_ReadOnly) Array = dest.GetRasterBand(1).ReadAsArray() geo_out = dest.GetGeoTransform() Raster.CreateRaster(output_tiff, Array, geo_out, "WGS84") return(output_tiff) def Convert_hdf5_to_tiff(inputname_hdf, Filename_tiff_end, Band_number, scaling_factor, geo_out): """ This function converts the hdf5 files into tiff files Keyword Arguments: input_adf -- name, name of the adf file output_tiff -- Name of the output tiff file Band_number -- bandnumber of the hdf5 that needs to be converted scaling_factor -- factor multipied by data is the output array geo -- [minimum lon, pixelsize, rotation, maximum lat, rotation, pixelsize], (geospatial dataset) """ # Open the hdf file g = gdal.Open(inputname_hdf, gdal.GA_ReadOnly) # Define temporary file out and band name in name_in = g.GetSubDatasets()[Band_number][0] # Get environmental variable WA_env_paths = os.environ["WA_PATHS"].split(';') GDAL_env_path = WA_env_paths[0] GDAL_TRANSLATE = os.path.join(GDAL_env_path, 'gdal_translate.exe') # run gdal translate command FullCmd = '%s -of GTiff %s %s' %(GDAL_TRANSLATE, name_in, Filename_tiff_end) Raster.Run_command_window(FullCmd) # Get the data array dest = gdal.Open(Filename_tiff_end) Data = dest.GetRasterBand(1).ReadAsArray() dest = None # If the band data is not SM change the DN values into PROBA-V values and write into the spectral_reflectance_PROBAV Data_scaled = Data * scaling_factor # Save the PROBA-V as a tif file Raster.CreateRaster(Filename_tiff_end, Data_scaled, geo_out, "WGS84") return() # def Extract_Data(input_file, output_folder): # """ # This function extract the zip files # Keyword Arguments: # output_file -- name, name of the file that must be unzipped # output_folder -- Dir, directory where the unzipped data must be # stored # """ # # extract the data # z = zipfile.ZipFile(input_file, 'r') # z.extractall(output_folder) # z.close() @staticmethod def ExtractFromGZ(InputFile, OutputFile, delete=False): """ ============================================================ ExtractFromGZ(zip_filename, outfilename) ============================================================ ExtractFromGZ method extract data from the zip/.gz files, save the data Parameters ---------- zip_filename : [str] zipped file name . outfilename : [str] directory where the unzipped data must be stored. delete : [bool] True if you want to delete the zipped file after the extracting the data Returns ------- None. """ with gzip.GzipFile(InputFile, 'rb') as zf: content = zf.read() save_file_content = open(OutputFile,
""" Testing using the Test Client The test client is a class that can act like a simple browser for testing purposes. It allows the user to compose GET and POST requests, and obtain the response that the server gave to those requests. The server Response objects are annotated with the details of the contexts and templates that were rendered during the process of serving the request. ``Client`` objects are stateful - they will retain cookie (and thus session) details for the lifetime of the ``Client`` instance. This is not intended as a replacement for Twill, Selenium, or other browser automation frameworks - it is here to allow testing against the contexts and templates produced by a view, rather than the HTML rendered to the end-user. """ import itertools import tempfile from unittest import mock from django.contrib.auth.models import User from django.core import mail from django.http import HttpResponse, HttpResponseNotAllowed from django.test import ( AsyncRequestFactory, Client, RequestFactory, SimpleTestCase, TestCase, modify_settings, override_settings, ) from django.urls import reverse_lazy from django.utils.decorators import async_only_middleware from .views import TwoArgException, get_view, post_view, trace_view def middleware_urlconf(get_response): def middleware(request): request.urlconf = 'tests.test_client.urls_middleware_urlconf' return get_response(request) return middleware @async_only_middleware def async_middleware_urlconf(get_response): async def middleware(request): request.urlconf = 'tests.test_client.urls_middleware_urlconf' return await get_response(request) return middleware @override_settings(ROOT_URLCONF='test_client.urls') class ClientTest(TestCase): @classmethod def setUpTestData(cls): cls.u1 = User.objects.create_user(username='testclient', password='password') cls.u2 = User.objects.create_user(username='inactive', password='password', is_active=False) def test_get_view(self): "GET a view" # The data is ignored, but let's check it doesn't crash the system # anyway. data = {'var': '\xf2'} response = self.client.get('/get_view/', data) # Check some response details self.assertContains(response, 'This is a test') self.assertEqual(response.context['var'], '\xf2') self.assertEqual(response.templates[0].name, 'GET Template') def test_query_string_encoding(self): # WSGI requires latin-1 encoded strings. response = self.client.get('/get_view/?var=1\ufffd') self.assertEqual(response.context['var'], '1\ufffd') def test_get_data_none(self): msg = ( "Cannot encode None for key 'value' in a query string. Did you " "mean to pass an empty string or omit the value?" ) with self.assertRaisesMessage(TypeError, msg): self.client.get('/get_view/', {'value': None}) def test_get_post_view(self): "GET a view that normally expects POSTs" response = self.client.get('/post_view/', {}) # Check some response details self.assertEqual(response.status_code, 200) self.assertEqual(response.templates[0].name, 'Empty GET Template') self.assertTemplateUsed(response, 'Empty GET Template') self.assertTemplateNotUsed(response, 'Empty POST Template') def test_empty_post(self): "POST an empty dictionary to a view" response = self.client.post('/post_view/', {}) # Check some response details self.assertEqual(response.status_code, 200) self.assertEqual(response.templates[0].name, 'Empty POST Template') self.assertTemplateNotUsed(response, 'Empty GET Template') self.assertTemplateUsed(response, 'Empty POST Template') def test_post(self): "POST some data to a view" post_data = { 'value': 37 } response = self.client.post('/post_view/', post_data) # Check some response details self.assertContains(response, 'Data received') self.assertEqual(response.context['data'], '37') self.assertEqual(response.templates[0].name, 'POST Template') def test_post_data_none(self): msg = ( "Cannot encode None for key 'value' as POST data. Did you mean " "to pass an empty string or omit the value?" ) with self.assertRaisesMessage(TypeError, msg): self.client.post('/post_view/', {'value': None}) def test_json_serialization(self): """The test client serializes JSON data.""" methods = ('post', 'put', 'patch', 'delete') tests = ( ({'value': 37}, {'value': 37}), ([37, True], [37, True]), ((37, False), [37, False]), ) for method in methods: with self.subTest(method=method): for data, expected in tests: with self.subTest(data): client_method = getattr(self.client, method) method_name = method.upper() response = client_method('/json_view/', data, content_type='application/json') self.assertContains(response, 'Viewing %s page.' % method_name) self.assertEqual(response.context['data'], expected) def test_json_encoder_argument(self): """The test Client accepts a json_encoder.""" mock_encoder = mock.MagicMock() mock_encoding = mock.MagicMock() mock_encoder.return_value = mock_encoding mock_encoding.encode.return_value = '{"value": 37}' client = self.client_class(json_encoder=mock_encoder) # Vendored tree JSON content types are accepted. client.post('/json_view/', {'value': 37}, content_type='application/vnd.api+json') self.assertTrue(mock_encoder.called) self.assertTrue(mock_encoding.encode.called) def test_put(self): response = self.client.put('/put_view/', {'foo': 'bar'}) self.assertEqual(response.status_code, 200) self.assertEqual(response.templates[0].name, 'PUT Template') self.assertEqual(response.context['data'], "{'foo': 'bar'}") self.assertEqual(response.context['Content-Length'], '14') def test_trace(self): """TRACE a view""" response = self.client.trace('/trace_view/') self.assertEqual(response.status_code, 200) self.assertEqual(response.context['method'], 'TRACE') self.assertEqual(response.templates[0].name, 'TRACE Template') def test_response_headers(self): "Check the value of HTTP headers returned in a response" response = self.client.get("/header_view/") self.assertEqual(response.headers['X-DJANGO-TEST'], 'Slartibartfast') def test_response_attached_request(self): """ The returned response has a ``request`` attribute with the originating environ dict and a ``wsgi_request`` with the originating WSGIRequest. """ response = self.client.get("/header_view/") self.assertTrue(hasattr(response, 'request')) self.assertTrue(hasattr(response, 'wsgi_request')) for key, value in response.request.items(): self.assertIn(key, response.wsgi_request.environ) self.assertEqual(response.wsgi_request.environ[key], value) def test_response_resolver_match(self): """ The response contains a ResolverMatch instance. """ response = self.client.get('/header_view/') self.assertTrue(hasattr(response, 'resolver_match')) def test_response_resolver_match_redirect_follow(self): """ The response ResolverMatch instance contains the correct information when following redirects. """ response = self.client.get('/redirect_view/', follow=True) self.assertEqual(response.resolver_match.url_name, 'get_view') def test_response_resolver_match_regular_view(self): """ The response ResolverMatch instance contains the correct information when accessing a regular view. """ response = self.client.get('/get_view/') self.assertEqual(response.resolver_match.url_name, 'get_view') @modify_settings(MIDDLEWARE={'prepend': 'test_client.tests.middleware_urlconf'}) def test_response_resolver_match_middleware_urlconf(self): response = self.client.get('/middleware_urlconf_view/') self.assertEqual(response.resolver_match.url_name, 'middleware_urlconf_view') def test_raw_post(self): "POST raw data (with a content type) to a view" test_doc = """<?xml version="1.0" encoding="utf-8"?> <library><book><title>Blink</title><author><NAME></author></book></library> """ response = self.client.post('/raw_post_view/', test_doc, content_type='text/xml') self.assertEqual(response.status_code, 200) self.assertEqual(response.templates[0].name, "Book template") self.assertEqual(response.content, b"Blink - <NAME>") def test_insecure(self): "GET a URL through http" response = self.client.get('/secure_view/', secure=False) self.assertFalse(response.test_was_secure_request) self.assertEqual(response.test_server_port, '80') def test_secure(self): "GET a URL through https" response = self.client.get('/secure_view/', secure=True) self.assertTrue(response.test_was_secure_request) self.assertEqual(response.test_server_port, '443') def test_redirect(self): "GET a URL that redirects elsewhere" response = self.client.get('/redirect_view/') self.assertRedirects(response, '/get_view/') def test_redirect_with_query(self): "GET a URL that redirects with given GET parameters" response = self.client.get('/redirect_view/', {'var': 'value'}) self.assertRedirects(response, '/get_view/?var=value') def test_redirect_with_query_ordering(self): """assertRedirects() ignores the order of query string parameters.""" response = self.client.get('/redirect_view/', {'var': 'value', 'foo': 'bar'}) self.assertRedirects(response, '/get_view/?var=value&foo=bar') self.assertRedirects(response, '/get_view/?foo=bar&var=value') def test_permanent_redirect(self): "GET a URL that redirects permanently elsewhere" response = self.client.get('/permanent_redirect_view/') self.assertRedirects(response, '/get_view/', status_code=301) def test_temporary_redirect(self): "GET a URL that does a non-permanent redirect" response = self.client.get('/temporary_redirect_view/') self.assertRedirects(response, '/get_view/', status_code=302) def test_redirect_to_strange_location(self): "GET a URL that redirects to a non-200 page" response = self.client.get('/double_redirect_view/') # The response was a 302, and that the attempt to get the redirection # location returned 301 when retrieved self.assertRedirects(response, '/permanent_redirect_view/', target_status_code=301) def test_follow_redirect(self): "A URL that redirects can be followed to termination." response = self.client.get('/double_redirect_view/', follow=True) self.assertRedirects(response, '/get_view/', status_code=302, target_status_code=200) self.assertEqual(len(response.redirect_chain), 2) def test_follow_relative_redirect(self): "A URL with a relative redirect can be followed." response = self.client.get('/accounts/', follow=True) self.assertEqual(response.status_code, 200) self.assertEqual(response.request['PATH_INFO'], '/accounts/login/') def test_follow_relative_redirect_no_trailing_slash(self): "A URL with a relative redirect with no trailing slash can be followed." response = self.client.get('/accounts/no_trailing_slash', follow=True) self.assertEqual(response.status_code, 200) self.assertEqual(response.request['PATH_INFO'], '/accounts/login/') def test_follow_307_and_308_redirect(self): """ A 307 or 308 redirect preserves the request method after the redirect. """ methods = ('get', 'post', 'head', 'options', 'put', 'patch', 'delete', 'trace') codes = (307, 308) for method, code in itertools.product(methods, codes): with self.subTest(method=method, code=code): req_method = getattr(self.client, method) response = req_method('/redirect_view_%s/' % code, data={'value': 'test'}, follow=True) self.assertEqual(response.status_code, 200) self.assertEqual(response.request['PATH_INFO'], '/post_view/') self.assertEqual(response.request['REQUEST_METHOD'], method.upper()) def test_follow_307_and_308_preserves_query_string(self): methods = ('post', 'options', 'put', 'patch', 'delete', 'trace') codes = (307, 308) for method, code in itertools.product(methods, codes): with self.subTest(method=method, code=code): req_method = getattr(self.client, method) response = req_method( '/redirect_view_%s_query_string/' % code, data={'value': 'test'}, follow=True, ) self.assertRedirects(response, '/post_view/?hello=world', status_code=code) self.assertEqual(response.request['QUERY_STRING'], 'hello=world') def test_follow_307_and_308_get_head_query_string(self): methods = ('get', 'head') codes = (307, 308) for method, code in itertools.product(methods, codes): with self.subTest(method=method, code=code): req_method = getattr(self.client, method) response = req_method( '/redirect_view_%s_query_string/' % code, data={'value': 'test'}, follow=True, ) self.assertRedirects(response, '/post_view/?hello=world', status_code=code) self.assertEqual(response.request['QUERY_STRING'], 'value=test') def test_follow_307_and_308_preserves_post_data(self): for code in (307, 308): with self.subTest(code=code): response = self.client.post('/redirect_view_%s/' % code, data={'value': 'test'}, follow=True) self.assertContains(response, 'test is the value') def test_follow_307_and_308_preserves_put_body(self): for code in (307, 308): with self.subTest(code=code): response = self.client.put('/redirect_view_%s/?to=/put_view/' % code, data='a=b', follow=True) self.assertContains(response, 'a=b is the body') def test_follow_307_and_308_preserves_get_params(self): data = {'var': 30, 'to': '/get_view/'} for code in (307, 308): with self.subTest(code=code): response = self.client.get('/redirect_view_%s/' % code, data=data, follow=True) self.assertContains(response, '30 is the value') def test_redirect_http(self): "GET a URL that redirects to an http URI" response = self.client.get('/http_redirect_view/', follow=True) self.assertFalse(response.test_was_secure_request) def test_redirect_https(self): "GET a URL that redirects to an https URI" response = self.client.get('/https_redirect_view/', follow=True) self.assertTrue(response.test_was_secure_request) def test_notfound_response(self): "GET a URL that responds as '404:Not Found'" response = self.client.get('/bad_view/') self.assertContains(response, 'MAGIC', status_code=404) def test_valid_form(self): "POST valid data to a form" post_data = { 'text': '<NAME>', 'email': '<EMAIL>', 'value': 37, 'single': 'b', 'multi': ('b', 'c', 'e') } response = self.client.post('/form_view/', post_data) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, "Valid POST Template") def test_valid_form_with_hints(self): "GET a form, providing hints in the GET data" hints = { 'text': '<NAME>', 'multi': ('b', 'c', 'e') } response = self.client.get('/form_view/', data=hints) # The multi-value data has been rolled out ok self.assertContains(response, 'Select a valid choice.', 0) self.assertTemplateUsed(response, "Form GET Template") def test_incomplete_data_form(self): "POST incomplete data to a form" post_data = { 'text': '<NAME>', 'value': 37 } response = self.client.post('/form_view/', post_data) self.assertContains(response, 'This field is required.', 3) self.assertTemplateUsed(response, "Invalid POST Template") self.assertFormError(response, 'form', 'email', 'This field is required.') self.assertFormError(response, 'form', 'single', 'This field is
## @ PatchFv.py # # Copyright (c) 2014 - 2015, Intel Corporation. All rights reserved.<BR> # This program and the accompanying materials are licensed and made available under # the terms and conditions of the BSD License that accompanies this distribution. # The full text of the license may be found at # http://opensource.org/licenses/bsd-license.php. # # THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, # WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. # ## import os import re import sys def readDataFromFile (binfile, offset, len=1): fd = open(binfile, "r+b") fsize = os.path.getsize(binfile) offval = offset & 0xFFFFFFFF if (offval & 0x80000000): offval = fsize - (0xFFFFFFFF - offval + 1) fd.seek(offval) bytearray = [ord(b) for b in fd.read(len)] value = 0 idx = len - 1 while idx >= 0: value = value << 8 | bytearray[idx] idx = idx - 1 fd.close() return value def IsFspHeaderValid (binfile): fd = open (binfile, "rb") bindat = fd.read(0x200) fd.close() HeaderList = ['FSPH' , 'FSPP' , 'FSPE'] OffsetList = [] for each in HeaderList: if each in bindat: idx = bindat.index(each) else: idx = 0 OffsetList.append(idx) if not OffsetList[0] or not OffsetList[1]: return False Revision = ord(bindat[OffsetList[0] + 0x0B]) if Revision > 1 and not OffsetList[2]: return False return True def patchDataInFile (binfile, offset, value, len=1): fd = open(binfile, "r+b") fsize = os.path.getsize(binfile) offval = offset & 0xFFFFFFFF if (offval & 0x80000000): offval = fsize - (0xFFFFFFFF - offval + 1) bytearray = [] idx = 0 while idx < len: bytearray.append(value & 0xFF) value = value >> 8 idx = idx + 1 fd.seek(offval) fd.write("".join(chr(b) for b in bytearray)) fd.close() return len class Symbols: def __init__(self): self.dictSymbolAddress = {} self.dictGuidNameXref = {} self.dictFfsOffset = {} self.dictVariable = {} self.dictModBase = {} self.fdFile = None self.string = "" self.fdBase = 0xFFFFFFFF self.fdSize = 0 self.index = 0 self.parenthesisOpenSet = '([{<' self.parenthesisCloseSet = ')]}>' def getFdFile (self): return self.fdFile def getFdSize (self): return self.fdSize def createDicts (self, fvDir, fvNames): if not os.path.isdir(fvDir): raise Exception ("'%s' is not a valid directory!" % FvDir) xrefFile = os.path.join(fvDir, "Guid.xref") if not os.path.exists(xrefFile): raise Exception("Cannot open GUID Xref file '%s'!" % xrefFile) self.dictGuidNameXref = {} self.parseGuidXrefFile(xrefFile) fvList = fvNames.split(":") fdBase = fvList.pop() if len(fvList) == 0: fvList.append(fdBase) fdFile = os.path.join(fvDir, fdBase.strip() + ".fd") if not os.path.exists(fdFile): raise Exception("Cannot open FD file '%s'!" % fdFile) self.fdFile = fdFile self.fdSize = os.path.getsize(fdFile) infFile = os.path.join(fvDir, fvList[0].strip()) + ".inf" if not os.path.exists(infFile): raise Exception("Cannot open INF file '%s'!" % infFile) self.parseInfFile(infFile) self.dictVariable = {} self.dictVariable["FDSIZE"] = self.fdSize self.dictVariable["FDBASE"] = self.fdBase self.dictSymbolAddress = {} self.dictFfsOffset = {} for file in fvList: fvFile = os.path.join(fvDir, file.strip()) + ".Fv" mapFile = fvFile + ".map" if not os.path.exists(mapFile): raise Exception("Cannot open MAP file '%s'!" % mapFile) self.parseFvMapFile(mapFile) fvTxtFile = fvFile + ".txt" if not os.path.exists(fvTxtFile): raise Exception("Cannot open FV TXT file '%s'!" % fvTxtFile) self.parseFvTxtFile(fvTxtFile) ffsDir = os.path.join(fvDir, "Ffs") if (os.path.isdir(ffsDir)): for item in os.listdir(ffsDir): if len(item) <= 0x24: continue mapFile =os.path.join(ffsDir, item, "%s.map" % item[0:0x24]) if not os.path.exists(mapFile): continue self.parseModMapFile(item[0x24:], mapFile) return 0 def getFvOffsetInFd(self, fvFile): fvHandle = open(fvFile, "r+b") fdHandle = open(self.fdFile, "r+b") offset = fdHandle.read().find(fvHandle.read(0x70)) fvHandle.close() fdHandle.close() if offset == -1: raise Exception("Could not locate FV file %s in FD!" % fvFile) return offset def parseInfFile(self, infFile): fvOffset = self.getFvOffsetInFd(infFile[0:-4] + ".Fv") fdIn = open(infFile, "r") rptLine = fdIn.readline() self.fdBase = 0xFFFFFFFF while (rptLine != "" ): #EFI_BASE_ADDRESS = 0xFFFDF400 match = re.match("^EFI_BASE_ADDRESS\s*=\s*(0x[a-fA-F0-9]+)", rptLine) if match is not None: self.fdBase = int(match.group(1), 16) - fvOffset rptLine = fdIn.readline() fdIn.close() if self.fdBase == 0xFFFFFFFF: raise Exception("Could not find EFI_BASE_ADDRESS in INF file!" % fvFile) return 0 def parseFvTxtFile(self, fvTxtFile): fvOffset = self.getFvOffsetInFd(fvTxtFile[0:-4]) fdIn = open(fvTxtFile, "r") rptLine = fdIn.readline() while (rptLine != "" ): match = re.match("(0x[a-fA-F0-9]+)\s([0-9a-fA-F\-]+)", rptLine) if match is not None: self.dictFfsOffset[match.group(2)] = "0x%08X" % (int(match.group(1), 16) + fvOffset) rptLine = fdIn.readline() fdIn.close() return 0 def parseFvMapFile(self, mapFile): fdIn = open(mapFile, "r") rptLine = fdIn.readline() modName = "" while (rptLine != "" ): if rptLine[0] != ' ': #DxeIpl (Fixed Flash Address, BaseAddress=0x00fffb4310, EntryPoint=0x00fffb4958) #(GUID=86D70125-BAA3-4296-A62F-602BEBBB9081 .textbaseaddress=0x00fffb4398 .databaseaddress=0x00fffb4178) match = re.match("([_a-zA-Z0-9\-]+)\s\(.+BaseAddress=(0x[0-9a-fA-F]+),\s+EntryPoint=(0x[0-9a-fA-F]+)\)", rptLine) if match is not None: modName = match.group(1) if len(modName) == 36: modName = self.dictGuidNameXref[modName.upper()] self.dictModBase['%s:BASE' % modName] = int (match.group(2), 16) self.dictModBase['%s:ENTRY' % modName] = int (match.group(3), 16) match = re.match("\(GUID=([A-Z0-9\-]+)\s+\.textbaseaddress=(0x[0-9a-fA-F]+)\s+\.databaseaddress=(0x[0-9a-fA-F]+)\)", rptLine) if match is not None: modName = match.group(1) if len(modName) == 36: modName = self.dictGuidNameXref[modName.upper()] self.dictModBase['%s:TEXT' % modName] = int (match.group(2), 16) self.dictModBase['%s:DATA' % modName] = int (match.group(3), 16) else: # 0x00fff8016c __ModuleEntryPoint match = re.match("^\s+(0x[a-z0-9]+)\s+([_a-zA-Z0-9]+)", rptLine) if match is not None: self.dictSymbolAddress["%s:%s"%(modName, match.group(2))] = match.group(1) rptLine = fdIn.readline() fdIn.close() return 0 def parseModMapFile(self, moduleName, mapFile): modSymbols = {} fdIn = open(mapFile, "r") reportLine = fdIn.readline() if reportLine.strip().find("Archive member included") != -1: #GCC # 0x0000000000001d55 IoRead8 patchMapFileMatchString = "\s+(0x[0-9a-fA-F]{16})\s+([^\s][^0x][_a-zA-Z0-9\-]+)\s" matchKeyGroupIndex = 2 matchSymbolGroupIndex = 1 moduleEntryPoint = "_ModuleEntryPoint" else: #MSFT #0003:00000190 _gComBase 00007a50 SerialPo patchMapFileMatchString = "^\s[0-9a-fA-F]{4}:[0-9a-fA-F]{8}\s+(\w+)\s+([0-9a-fA-F]{8}\s+)" matchKeyGroupIndex = 1 matchSymbolGroupIndex = 2 moduleEntryPoint = "__ModuleEntryPoint" while (reportLine != "" ): match = re.match(patchMapFileMatchString, reportLine) if match is not None: modSymbols[match.group(matchKeyGroupIndex)] = match.group(matchSymbolGroupIndex) reportLine = fdIn.readline() fdIn.close() if not moduleEntryPoint in modSymbols: return 1 modEntry = '%s:%s' % (moduleName,moduleEntryPoint) if not modEntry in self.dictSymbolAddress: modKey = '%s:ENTRY' % moduleName if modKey in self.dictModBase: baseOffset = self.dictModBase['%s:ENTRY' % moduleName] - int(modSymbols[moduleEntryPoint], 16) else: return 2 else: baseOffset = int(self.dictSymbolAddress[modEntry], 16) - int(modSymbols[moduleEntryPoint], 16) for symbol in modSymbols: fullSym = "%s:%s" % (moduleName, symbol) if not fullSym in self.dictSymbolAddress: self.dictSymbolAddress[fullSym] = "0x00%08x" % (baseOffset+ int(modSymbols[symbol], 16)) return 0 def parseGuidXrefFile(self, xrefFile): fdIn = open(xrefFile, "r") rptLine = fdIn.readline() while (rptLine != "" ): match = re.match("([0-9a-fA-F\-]+)\s([_a-zA-Z0-9]+)", rptLine) if match is not None: self.dictGuidNameXref[match.group(1).upper()] = match.group(2) rptLine = fdIn.readline() fdIn.close() return 0 def getCurr(self): try: return self.string[self.index] except Exception: return '' def isLast(self): return self.index == len(self.string) def moveNext(self): self.index += 1 def skipSpace(self): while not self.isLast(): if self.getCurr() in ' \t': self.moveNext() else: return def parseValue(self): self.skipSpace() var = '' while not self.isLast(): char = self.getCurr() if char.lower() in '_ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789:-': var += char self.moveNext() else: break if ':' in var: partList = var.split(':') if len(partList) != 2: raise Exception("Unrecognized expression %s" % var) modName = partList[0] modOff = partList[1] if ('-' not in modName) and (modOff[0] in '0123456789'): # MOD: OFFSET var = self.getModGuid(modName) + ":" + modOff if '-' in var: # GUID:OFFSET value = self.getGuidOff(var) else: value = self.getSymbols(var) self.synUsed = True else: if var[0] in '0123456789': value = self.getNumber(var) else: value = self.getVariable(var) return int(value) def parseSingleOp(self): self.skipSpace() char = self.getCurr() if char == '~': self.moveNext() return ~self.parseBrace() else: return self.parseValue() def parseBrace(self): self.skipSpace() char = self.getCurr() parenthesisType = self.parenthesisOpenSet.find(char) if parenthesisType >= 0: self.moveNext() value = self.parseExpr() self.skipSpace() if self.getCurr() != self.parenthesisCloseSet[parenthesisType]: raise Exception("No closing brace") self.moveNext() if parenthesisType == 1: # [ : Get content value = self.getContent(value) elif parenthesisType == 2: # { : To address value = self.toAddress(value) elif parenthesisType == 3: # < : To offset value = self.toOffset(value) return value else: return self.parseSingleOp() def parseMul(self): values = [self.parseBrace()] while True: self.skipSpace() char = self.getCurr() if char == '*': self.moveNext() values.append(self.parseBrace()) else: break value = 1 for each in values: value *= each return value def parseAndOr(self): values = [self.parseMul()] op = None value = 0xFFFFFFFF while True: self.skipSpace() char = self.getCurr() if char == '&': self.moveNext() values.append(self.parseMul()) op = char elif char == '|': div_index = self.index self.moveNext() values.append(self.parseMul()) value = 0 op = char else: break for each in values: if op == '|': value |= each else: value &= each return value def parseAddMinus(self): values = [self.parseAndOr()] while True: self.skipSpace() char = self.getCurr() if char == '+': self.moveNext() values.append(self.parseAndOr()) elif char == '-': self.moveNext() values.append(-1 * self.parseAndOr()) else: break return sum(values) def parseExpr(self): return self.parseAddMinus() def getResult(self): value = self.parseExpr() self.skipSpace() if not self.isLast(): raise Exception("Unexpected character found '%s'" % self.getCurr()) return value def getModGuid(self, var): guid
-------- data :: dictionary data dictionary with cases in cases list updated or added ''' data_hist_arrs = ['NDump','time(mins)', 'time(secs)'] for case in cases: data[case] = {} data[case]['path'] = dir+case+'/prfs' data[case]['rp'] = RprofSet(data[case]['path']) data[case]['rph'] = data[case]['rp'].get_history() data[case]['NDump'] = data[case]['rph'].get('NDump') data[case]['time(mins)'] = data[case]['rph'].get('time(mins)') data[case]['time(secs)'] = data[case]['rph'].get('time(secs)') data[case]['rp_one'] = data[case]['rp'].get_dump( data[case]['NDump'][0]) data[case]['X_Lfactors'] = data[case]['rp_one'].get( 'totallum')/nominal_heat data[case]['grids'] = data[case]['rp_one'].get('Nx') data[case]['eos'] = eos # make history arrays monotonic NDump_mono = np.copy(data[case]['NDump']) while sum(diff(NDump_mono) <= 0) > 0: for thing in data_hist_arrs: data[case][thing] = delete(data[case][thing][:-1],(diff(NDump_mono) <= 0)) NDump_mono = np.copy(data[case]['NDump']) return data def cases_table(data, latex = False, cases = None): ''' Print table of runs Optionally table is Latex formatted. Call example: cases_table(data,latex=False) Parameters: ----------- data :: dictionary contains data from initialize_cases latex :: boolean print latex formatted table if true cases :: list defaults to all cases in data, else provide subset of cases ''' if cases is None: cases = list(data.keys()) if latex: print(" ID & grid & $t_\mathrm{end}/\hour$ & $\log L/L_\mathrm{nominal}$ \\\ ") for case in cases: last_dump=data[case]['NDump'][-1] print(" {:4s} & {:4d} & {:5.0f} & {:3.1f} \\\ ".format(case,data[case]['rp'].get('Nx',last_dump),data[case]['time(mins)'][-1]/60.,log10(data[case]['X_Lfactors']))) else: print("{:40s} {:8s} {:5s} {:8s} {:8s}".format('case','X heat','grids','max dump','max t_hr')) for i,case in enumerate(cases): print("{:40s} {:7.1f} {:5d} {:8d} {:8.1f}".format(case,data[case]['X_Lfactors'], \ data[case]['grids'],data[case]['NDump'][-1],data[case]['time(mins)'][-1]/60.)) def where_near(t,tt_arr,num_arr=None): '''Finds n in num that corresponds closest to t in tt_sec Parameters: ----------- t :: scalar, float time in seconds we seek the dump number for tt_arr : array, float time in seconds for each dump num_arr :: array, int or float dump number array The default is that num_arr is the grid number, and then the input, for example, (R, Rgrid) will return the neares grid number of R in the Rgrid array. Returns: -------- num_t :: int/float depending on type of NDump interpolated value in num_arr corresponding to t in tt_arr ''' if num_arr is None: num_arr = arange(0,len(tt_arr),1) f_int = scipy.interpolate.interp1d(tt_arr,num_arr,kind='linear',fill_value="extrapolate") num_t = float(f_int(t)) if type(num_arr[0]) in [int64,int]: num_t = int(round(num_t,0)) return num_t def index_nearest_value(a,afind): '''Return index of value in a which is closest to afind Parameters ---------- a : array afind : scalar ''' aabs = abs(a-afind) return where(min(aabs) == aabs)[0][0] def set_nice_params(): fsize=14 params = {'axes.labelsize': fsize, # 'font.family': 'serif', 'font.family': 'Times New Roman', 'figure.facecolor': 'white', 'font.size': fsize, 'legend.fontsize': fsize*0.75, 'xtick.labelsize': fsize*0.8, 'ytick.labelsize': fsize*0.8, 'ytick.minor.pad': 8, 'ytick.major.pad': 8, 'xtick.minor.pad': 8, 'xtick.major.pad': 8, 'text.usetex': False} pl.rcParams.update(params) def set_YProf_path(path,YProf_fname='YProfile-01-0000.bobaaa'): ''' Set path to location where YProfile directories can be found. For example, set path to the swj/PPM/RUNS_DIR VOSpace directory as a global variable, so that it need only be set once during an interactive session; instances can then be loaded by refering to the directory name that contains YProfile files. ppm.ppm_path: contains path ppm.cases: contains dirs in path that contain file with name YProf_fname usually used to determine dirs with YProfile files ''' global ppm_path, cases ppm_path = path cases = [] for thing in os.listdir(ppm_path): dir_thing = os.path.join(ppm_path,thing) if os.path.isdir(dir_thing) and \ os.path.isfile(os.path.join(ppm_path,thing,YProf_fname)): cases.append(thing) def prof_compare(cases,ndump=None,yaxis_thing='FV H+He',ifig=None,num_type='ndump', labels=None,logy=True): """ Compare profiles of quantities from multiple PPM Yprofile instances at a given time of nump number. Parameters ---------- cases : list list containing the Yprofile instances that you want to compare ndump : string or int, optional The filename, Ndump or time, if None it defaults to the last NDump. The default is None. yaxis_thing : string, optional What quantity to plot on the y-axis. The default is 'FV H+He' ifig : int, optional Figure number. If None, chose automatically. The default is None. num_type : string, optional Designates how this function acts and how it interprets fname. If numType is 'file', this function will get the desired attribute from that file. If numType is 'ndump' function will look at the cycle with that ndump. If numType is 'T' or 'time' function will find the _cycle with the closest time stamp. The default is "ndump". labels : list, optional List of labels; one for each of the cases. If None, labels are simply indices. The default is None. logy : boolean, optional Should the y-axis have a logarithmic scale? The default is True. Examples -------- .. ipython:: In [136]: from ppmpy import ppm .....: data_dir = '/data/ppm_rpod2/YProfiles/' .....: project = 'O-shell-M25' .....: ppm.set_YProf_path(data_dir+project) @savefig prof_compare.png width=6in In [136]: D2=ppm.yprofile('D2') .....: D1=ppm.yprofile('D1') .....: ppm.prof_compare([D2,D1],10,labels = ['D1','D2']) """ fsize=14 params = {'axes.labelsize': fsize, # 'font.family': 'serif', 'font.family': 'Times New Roman', 'figure.facecolor': 'white', 'text.fontsize': fsize, 'legend.fontsize': fsize, 'xtick.labelsize': fsize*0.8, 'ytick.labelsize': fsize*0.8, 'text.usetex': False} pl.rcParams.update(params) jline_offset=6 if labels is None: labels=[]*len(cases) if ifig is None: pl.figure() else: pl.figure(ifig) labels = zeros(len(cases)) i=0 for Y in cases: j=i+jline_offset if labels is None: labels[i] = str(i) Y.plot('Y',yaxis_thing,fname=ndump,numtype=num_type,legend=labels[i],\ logy=logy,shape=utils.linestyle(j)[0],markevery=utils.linestyle(j)[1]) i += 1 def cdiff(x): # compute 2nd order centred differences dx = (np.roll(x, -1) - np.roll(x, 1))/2. # 1st order differences to correct the boundaries dx[0] = x[1] - x[0] dx[-1] = x[-1] - x[-2] return dx def interpolate(x, y, x_new, kind='linear'): inverse_order = (x[-1] < x[0]) xx = x[::-1] if inverse_order else x yy = y[::-1] if inverse_order else y if kind == 'linear': int_func = scipy.interpolate.interp1d(xx, yy, fill_value='extrapolate') y_new = int_func(x_new) elif kind == 'cubic': cs = scipy.interpolate.CubicSpline(xx, yy, extrapolate=False) y_new = cs(x_new) else: print("Error: Unknown interpolation kind '{:s}'.".format(kind)) return None return y_new def any2list(arg): if isinstance(arg, str): return [arg, ] try: return list(arg) except TypeError: return [arg, ] class PPMtools: def __init__(self, verbose=3): ''' Init method. Parameters ---------- verbose: integer Verbosity level as defined in class Messenger. ''' self.__messenger = Messenger(verbose=verbose) self.__isyprofile = isinstance(self, yprofile) self.__isRprofSet= isinstance(self, RprofSet) # This sets which method computes which quantity. self.__compute_methods = {'enuc_C12pg':self.compute_enuc_C12pg, \ 'Hp':self.compute_Hp, \ 'nabla_rho':self.compute_nabla_rho, \ 'nabla_rho_ad':self.compute_nabla_rho_ad, \ 'prad':self.compute_prad, \ 'pgas_by_ptot':self.compute_pgas_by_ptot, \ 'g':self.compute_g, \ 'N2':self.compute_N2, \ 'm':self.compute_m, \ 'mt':self.compute_mt, \ 'r4rho2':self.compute_r4rho2, \ 'rhodot_C12pg':self.compute_rhodot_C12pg, \ 'T9':self.compute_T9, \ 'T9corr':self.compute_T9corr, \ '|Ur|':self.compute_Ur, \ 'Xcld':self.compute_Xcld, \ 'Xdot_C12pg':self.compute_Xdot_C12pg} self.__computable_quantities = self.__compute_methods.keys() def isyprofile(self): return self.__isyprofile def isRprofSet(self): return self.__isRprofSet def get_computable_quantities(self): ''' Returns a list of computable quantities. ''' # Return a copy. return list(self.__computable_quantities) def compute(self, quantity, fname, num_type='ndump', extra_args={}): if quantity in self.__computable_quantities: print("Quantity: ",quantity) self.quantity = quantity m = self.__compute_methods[quantity] return m(fname, num_type=num_type) else: self.__messenger.error("Unknown quantity '{:s}'.".format(quantity)) print('The following quantities can be computed:') print(self.get_computable_quantities()) def compute_enuc_C12pg(self, fname, num_type='ndump', fkair=None, fkcld=None, \ atomicnoair=None, atomicnocld=None, airmu=None, cldmu=None, \ T9corr_params={}, Q=1.944, corr_fact=1.): if self.__isyprofile: fv = self.get('FV H+He', fname=fname, num_type=num_type, resolution='l') rho = self.get('Rho', fname=fname, num_type=num_type, resolution='l') rhocld = self.get('Rho H+He', fname=fname, num_type=num_type, \ resolution='l') rhoair = self.get('RHOconv', fname=fname, num_type = num_type, \ resolution='l') if fkair is None or fkcld is None or atomicnoair is None or \ atomicnocld is None: self.__messenger.error('Yprofiles do not contain the values of fkair, ' 'fkcld, atomicnoair, and atomicnocld. You have ' 'to provide via optional parameters.') return None if self.__isRprofSet: fv = self.get('FV', fname=fname, num_type=num_type, resolution='l') rho = self.get('Rho0', fname=fname, num_type=num_type, resolution='l') + \ self.get('Rho1', fname=fname, num_type=num_type, resolution='l') # We allow the user to replace the following values read from the .rprof # files should those ever become wrong/unavailable. if fkair is None: fkair = self.get('fkair', fname, num_type=num_type) if fkcld is None: fkcld = self.get('fkcld', fname, num_type=num_type) if atomicnoair is None: atomicnoair = self.get('atomicnoair', fname, num_type=num_type) if atomicnocld is None: atomicnocld = self.get('atomicnocld', fname, num_type=num_type) if airmu is None: airmu = self.get('airmu', fname, num_type=num_type) if cldmu is None: cldmu = self.get('cldmu', fname, num_type=num_type) # Individual densities of the two ideal gases assuming thermal and # pressure equlibrium. rhocld = rho/(fv + (1. - fv)*airmu/cldmu) rhoair = rho/(fv*cldmu/airmu + (1. - fv)) # compute_T9corr() returns the uncorrected temperature if no correction # parameters are supplied. T9 = self.compute_T9corr(fname=fname, num_type=num_type, airmu=airmu, \ cldmu=cldmu, **T9corr_params) TP13 = T9**(1./3.) TP23 = TP13*TP13 TP12 = np.sqrt(T9) TP14 = np.sqrt(TP12) TP32 = T9*TP12 TM13 = 1./TP13 TM23 = 1./TP23 TM32 = 1./TP32 T9inv = 1. / T9 thyng = 2.173913043478260869565 * T9 vc12pg = 20000000.*TM23 * np.exp(-13.692*TM13 - thyng*thyng) vc12pg = vc12pg * (1. + T9*(9.89-T9*(59.8
then please use Rational."%(row_sum)) def _work_out_state_index(self, state_index, given_condition, trans_probs): """ Helper function to extract state space if there is a random symbol in the given condition. """ # if given condition is None, then there is no need to work out # state_space from random variables if given_condition != None: rand_var = list(given_condition.atoms(RandomSymbol) - given_condition.atoms(RandomIndexedSymbol)) if len(rand_var) == 1: state_index = rand_var[0].pspace.set # `not None` is `True`. So the old test fails for symbolic sizes. # Need to build the statement differently. sym_cond = not isinstance(self.number_of_states, (int, Integer)) cond1 = not sym_cond and len(state_index) != trans_probs.shape[0] if cond1: raise ValueError("state space is not compatible with the transition probabilities.") if not isinstance(trans_probs.shape[0], Symbol): state_index = FiniteSet(*[i for i in range(trans_probs.shape[0])]) return state_index @cacheit def _preprocess(self, given_condition, evaluate): """ Helper function for pre-processing the information. """ is_insufficient = False if not evaluate: # avoid pre-processing if the result is not to be evaluated return (True, None, None, None) # extracting transition matrix and state space trans_probs, state_index, given_condition = self._extract_information(given_condition) # given_condition does not have sufficient information # for computations if trans_probs == None or \ given_condition == None: is_insufficient = True else: # checking transition probabilities if isinstance(self, DiscreteMarkovChain): self._check_trans_probs(trans_probs, row_sum=1) elif isinstance(self, ContinuousMarkovChain): self._check_trans_probs(trans_probs, row_sum=0) # working out state space state_index = self._work_out_state_index(state_index, given_condition, trans_probs) return is_insufficient, trans_probs, state_index, given_condition def replace_with_index(self, condition): if isinstance(condition, Relational): lhs, rhs = condition.lhs, condition.rhs if not isinstance(lhs, RandomIndexedSymbol): lhs, rhs = rhs, lhs condition = type(condition)(self.index_of.get(lhs, lhs), self.index_of.get(rhs, rhs)) return condition def probability(self, condition, given_condition=None, evaluate=True, **kwargs): """ Handles probability queries for Markov process. Parameters ========== condition: Relational given_condition: Relational/And Returns ======= Probability If the information is not sufficient. Expr In all other cases. Note ==== Any information passed at the time of query overrides any information passed at the time of object creation like transition probabilities, state space. Pass the transition matrix using TransitionMatrixOf, generator matrix using GeneratorMatrixOf and state space using StochasticStateSpaceOf in given_condition using & or And. """ check, mat, state_index, new_given_condition = \ self._preprocess(given_condition, evaluate) if check: return Probability(condition, new_given_condition) if isinstance(self, ContinuousMarkovChain): trans_probs = self.transition_probabilities(mat) elif isinstance(self, DiscreteMarkovChain): trans_probs = mat condition = self.replace_with_index(condition) given_condition = self.replace_with_index(given_condition) new_given_condition = self.replace_with_index(new_given_condition) if isinstance(condition, Relational): if isinstance(new_given_condition, And): gcs = new_given_condition.args else: gcs = (new_given_condition, ) min_key_rv = list(new_given_condition.atoms(RandomIndexedSymbol)) rv = list(condition.atoms(RandomIndexedSymbol)) if len(min_key_rv): min_key_rv = min_key_rv[0] for r in rv: if min_key_rv.key > r.key: return Probability(condition) else: min_key_rv = None return Probability(condition) if len(rv) > 1: rv = rv[:2] if rv[0].key < rv[1].key: rv[0], rv[1] = rv[1], rv[0] s = Rational(0, 1) n = len(self.state_space) if isinstance(condition, Eq) or isinstance(condition, Ne): for i in range(0, n): s += self.probability(Eq(rv[0], i), Eq(rv[1], i)) * self.probability(Eq(rv[1], i), new_given_condition) return s if isinstance(condition, Eq) else 1 - s else: upper = 0 greater = False if isinstance(condition, Ge) or isinstance(condition, Lt): upper = 1 if isinstance(condition, Gt) or isinstance(condition, Ge): greater = True for i in range(0, n): if i <= n//2: for j in range(0, i + upper): s += self.probability(Eq(rv[0], i), Eq(rv[1], j)) * self.probability(Eq(rv[1], j), new_given_condition) else: s += self.probability(Eq(rv[0], i), new_given_condition) for j in range(i + upper, n): s -= self.probability(Eq(rv[0], i), Eq(rv[1], j)) * self.probability(Eq(rv[1], j), new_given_condition) return s if greater else 1 - s rv = rv[0] states = condition.as_set() prob, gstate = dict(), None for gc in gcs: if gc.has(min_key_rv): if gc.has(Probability): p, gp = (gc.rhs, gc.lhs) if isinstance(gc.lhs, Probability) \ else (gc.lhs, gc.rhs) gr = gp.args[0] gset = Intersection(gr.as_set(), state_index) gstate = list(gset)[0] prob[gset] = p else: _, gstate = (gc.lhs.key, gc.rhs) if isinstance(gc.lhs, RandomIndexedSymbol) \ else (gc.rhs.key, gc.lhs) if any((k not in self.index_set) for k in (rv.key, min_key_rv.key)): raise IndexError("The timestamps of the process are not in it's index set.") states = Intersection(states, state_index) if not isinstance(self.number_of_states, Symbol) else states for state in Union(states, FiniteSet(gstate)): if not isinstance(state, (int, Integer)) or Ge(state, mat.shape[0]) is True: raise IndexError("No information is available for (%s, %s) in " "transition probabilities of shape, (%s, %s). " "State space is zero indexed." %(gstate, state, mat.shape[0], mat.shape[1])) if prob: gstates = Union(*prob.keys()) if len(gstates) == 1: gstate = list(gstates)[0] gprob = list(prob.values())[0] prob[gstates] = gprob elif len(gstates) == len(state_index) - 1: gstate = list(state_index - gstates)[0] gprob = S.One - sum(prob.values()) prob[state_index - gstates] = gprob else: raise ValueError("Conflicting information.") else: gprob = S.One if min_key_rv == rv: return sum([prob[FiniteSet(state)] for state in states]) if isinstance(self, ContinuousMarkovChain): return gprob * sum([trans_probs(rv.key - min_key_rv.key).__getitem__((gstate, state)) for state in states]) if isinstance(self, DiscreteMarkovChain): return gprob * sum([(trans_probs**(rv.key - min_key_rv.key)).__getitem__((gstate, state)) for state in states]) if isinstance(condition, Not): expr = condition.args[0] return S.One - self.probability(expr, given_condition, evaluate, **kwargs) if isinstance(condition, And): compute_later, state2cond, conds = [], dict(), condition.args for expr in conds: if isinstance(expr, Relational): ris = list(expr.atoms(RandomIndexedSymbol))[0] if state2cond.get(ris, None) is None: state2cond[ris] = S.true state2cond[ris] &= expr else: compute_later.append(expr) ris = [] for ri in state2cond: ris.append(ri) cset = Intersection(state2cond[ri].as_set(), state_index) if len(cset) == 0: return S.Zero state2cond[ri] = cset.as_relational(ri) sorted_ris = sorted(ris, key=lambda ri: ri.key) prod = self.probability(state2cond[sorted_ris[0]], given_condition, evaluate, **kwargs) for i in range(1, len(sorted_ris)): ri, prev_ri = sorted_ris[i], sorted_ris[i-1] if not isinstance(state2cond[ri], Eq): raise ValueError("The process is in multiple states at %s, unable to determine the probability."%(ri)) mat_of = TransitionMatrixOf(self, mat) if isinstance(self, DiscreteMarkovChain) else GeneratorMatrixOf(self, mat) prod *= self.probability(state2cond[ri], state2cond[prev_ri] & mat_of & StochasticStateSpaceOf(self, state_index), evaluate, **kwargs) for expr in compute_later: prod *= self.probability(expr, given_condition, evaluate, **kwargs) return prod if isinstance(condition, Or): return sum([self.probability(expr, given_condition, evaluate, **kwargs) for expr in condition.args]) raise NotImplementedError("Mechanism for handling (%s, %s) queries hasn't been " "implemented yet."%(condition, given_condition)) def expectation(self, expr, condition=None, evaluate=True, **kwargs): """ Handles expectation queries for markov process. Parameters ========== expr: RandomIndexedSymbol, Relational, Logic Condition for which expectation has to be computed. Must contain a RandomIndexedSymbol of the process. condition: Relational, Logic The given conditions under which computations should be done. Returns ======= Expectation Unevaluated object if computations cannot be done due to insufficient information. Expr In all other cases when the computations are successful. Note ==== Any information passed at the time of query overrides any information passed at the time of object creation like transition probabilities, state space. Pass the transition matrix using TransitionMatrixOf, generator matrix using GeneratorMatrixOf and state space using StochasticStateSpaceOf in given_condition using & or And. """ check, mat, state_index, condition = \ self._preprocess(condition, evaluate) if check: return Expectation(expr, condition) rvs = random_symbols(expr) if isinstance(expr, Expr) and isinstance(condition, Eq) \ and len(rvs) == 1: # handle queries similar to E(f(X[i]), Eq(X[i-m], <some-state>)) condition=self.replace_with_index(condition) state_index=self.replace_with_index(state_index) rv = list(rvs)[0] lhsg, rhsg = condition.lhs, condition.rhs if not isinstance(lhsg, RandomIndexedSymbol): lhsg, rhsg = (rhsg, lhsg) if rhsg not in state_index: raise ValueError("%s state is not in the state space."%(rhsg)) if rv.key < lhsg.key: raise ValueError("Incorrect given condition is given, expectation " "time %s < time %s"%(rv.key, rv.key)) mat_of = TransitionMatrixOf(self, mat) if isinstance(self, DiscreteMarkovChain) else GeneratorMatrixOf(self, mat) cond = condition & mat_of & \ StochasticStateSpaceOf(self, state_index) func = lambda s: self.probability(Eq(rv, s), cond) * expr.subs(rv, self._state_index[s]) return sum([func(s) for s in state_index]) raise NotImplementedError("Mechanism for handling (%s, %s) queries hasn't been " "implemented yet."%(expr, condition)) class DiscreteMarkovChain(DiscreteTimeStochasticProcess, MarkovProcess): """ Represents a finite discrete time-homogeneous Markov chain. This type of Markov Chain can be uniquely characterised by its (ordered) state space and its one-step transition probability matrix. Parameters ========== sym: The name given to the Markov Chain state_space: Optional, by default, Range(n) trans_probs: Optional, by default, MatrixSymbol('_T', n, n) Examples ======== >>> from sympy.stats import DiscreteMarkovChain, TransitionMatrixOf, P, E >>> from sympy import Matrix, MatrixSymbol, Eq, symbols >>> T = Matrix([[0.5, 0.2, 0.3],[0.2, 0.5, 0.3],[0.2, 0.3, 0.5]]) >>> Y = DiscreteMarkovChain("Y", [0, 1, 2], T) >>> YS = DiscreteMarkovChain("Y") >>> Y.state_space FiniteSet(0, 1, 2) >>> Y.transition_probabilities Matrix([ [0.5, 0.2, 0.3], [0.2, 0.5, 0.3], [0.2, 0.3, 0.5]]) >>> TS
cidx)) for cidx in idx_chunks] res_cnt = 0 while result: tmp = result.pop(0).get() for i, j in enumerate(tmp[1]): if options.verbose: log_progress(res_cnt, gene_counts.shape[0]) res_cnt += 1 pval[j] = tmp[0][i] if options.verbose: log_progress(gene_counts.shape[0], gene_counts.shape[0]) print('') pool.terminate() pool.join() except KeyboardInterrupt: print('Keyboard Interrupt - exiting', file=sys.stderr) pool.terminate() pool.join() sys.exit(1) else: (pval, _) = test_count_chunk(gene_counts, disp_adj, sf, dmatrix0, dmatrix1, options, test_idx, np.arange(gene_counts.shape[0]), log=options.verbose) if options.verbose: print('') return pval def adj_pval(pvals, options): """ Perform multiple testing correction. """ pvals_adj = pvals.copy() idx = ~np.isnan(pvals) if options.correction == 'BH': method = 'fdr_bh' elif options.correction == 'Bonferroni': method = 'bonferroni' elif options.correction == 'Holm': method = 'holm' elif options.correction == 'Hochberg': method = 'simes-hochberg' elif options.correction == 'Hommel': method = 'hommel' elif options.correction == 'BY': method = 'fdr_by' elif options.correction == 'TSBH': method = 'tsbh' else: sys.stderr.write('ERROR: The methods for multiple test correction can only accept \'Bonferroni\', \'Holm\', \'Hochberg\', \'Hommel\', \'BH\', \'BY\' or \'TSBH\' as its input.\n') sys.exit() mtc = sms.stats.multicomp.multipletests(pvals[idx], alpha=0.1, method=method, returnsorted=False) pvals_adj[idx] = mtc[1] return pvals_adj def calculate_varPrior(disp_raw, disp_fitted, idx, varLogDispSamp): logRes = np.log(disp_raw[idx]) - np.log(disp_fitted[idx]) stdLogRes = np.median(abs(logRes - np.median(logRes))) * 1.4826 varLogRes = stdLogRes ** 2 varPrior = varLogRes - varLogDispSamp return max(varPrior, 0.1) def run_testing(cov, dmatrix0, dmatrix1, sf, options, event_type, test_idx, r_idx=None): ### estimate dispersion (disp_raw, disp_raw_conv) = estimate_dispersion(cov, dmatrix1, sf, options, test_idx, event_type) ### fit dispersion (disp_fitted, Lambda, disp_idx) = fit_dispersion(cov, disp_raw, (disp_raw_conv[:, 0] & test_idx)[:, np.newaxis], sf, options, dmatrix1, event_type) ### adjust dispersion estimates (disp_adj, disp_adj_conv) = adjust_dispersion(cov, dmatrix1, disp_raw, disp_fitted, disp_idx, sf, options, event_type) ### do test pvals = test_count(cov, disp_adj, sf, dmatrix0, dmatrix1, options, test_idx) ### revert from unique if r_idx is not None: pvals = pvals[r_idx] ### reshape and adjust p-values pvals = pvals.reshape((2, int(pvals.shape[0] / 2))).T m_idx = np.zeros(shape=(pvals.shape[0],), dtype='int') #for i in range(pvals.shape[0]): # if np.all(np.isnan(pvals[i, :])): # continue # elif np.isnan(pvals[i, 0]): # m_idx[i] = 1 # elif np.isnan(pvals[i, 1]): # m_idx[i] = 0 # else: # m_idx[i] = np.argmin(pvals[i, :]) #pvals = 2 * np.array([pvals[i, m_idx[i]] for i in range(pvals.shape[0])], dtype='float') for i in range(pvals.shape[0]): if np.all(np.isnan(pvals[i, :])): continue elif np.isnan(pvals[i, 0]): #pvals[i, 1] = np.nan m_idx[i] = 1 elif np.isnan(pvals[i, 1]): #pvals[i, 0] = np.nan m_idx[i] = 0 else: m_idx[i] = np.argmax(pvals[i, :]) #m_idx[i] = np.argmin(pvals[i, :]) #pvals[i, m_idx[i]] = min(1, 2*pvals[i, m_idx[i]]) pvals = np.array([pvals[i, m_idx[i]] for i in range(pvals.shape[0])], dtype='float') offset = int(cov.shape[0] / 2) cov_used = np.array([cov[i, :] if m_idx[i] == 0 else cov[i + offset, :] for i in range(pvals.shape[0])], dtype=cov.dtype) disp_raw_used = np.array([disp_raw[i] if m_idx[i] == 0 else disp_raw[i + offset] for i in range(pvals.shape[0])], dtype=disp_raw.dtype) disp_adj_used = np.array([disp_adj[i] if m_idx[i] == 0 else disp_adj[i + offset] for i in range(pvals.shape[0])], dtype=disp_adj.dtype) pvals[pvals > 1] = 1 return (pvals, cov_used, disp_raw_used, disp_adj_used) def spladder_test(options): ### parse parameters from options object options = settings.parse_args(options, identity='test') options.use_exon_counts = False ### make sure spladder has been run in the correct mode if not os.path.exists(os.path.join(options.outdir, 'spladder', 'genes_graph_conf%i.merge_graphs.pickle' % options.confidence)): sys.stderr.write('\nERROR: Testing mode can only be run on SplAdder outputs generated using the default merging strategy: --merge-strat merge_graphs\n\n') sys.exit(1) non_alt_tag = '' if options.non_alt_norm: non_alt_tag = '.non_alt' ### generate output directory outdir = os.path.join(options.outdir, 'testing%s' % non_alt_tag) if options.timestamp == 'y': outdir = '%s_%s' % (outdir, str(datetime.datetime.now()).replace(' ', '_')) if options.labelA != 'condA' and options.labelB != 'condB': outdir = '%s_%s_vs_%s' % (outdir, options.labelA, options.labelB) if options.out_tag != '-': outdir += '_%s' % options.out_tag if not os.path.exists(outdir): os.makedirs(outdir) if options.diagnose_plots: options.plot_dir = os.path.join(outdir, 'plots') if not os.path.exists(options.plot_dir): os.makedirs(options.plot_dir) val_tag = '' if options.validate_sg: val_tag = '.validated' options.fname_genes = os.path.join(options.outdir, 'spladder', 'genes_graph_conf%i.%s%s.pickle' % (options.confidence, options.merge, val_tag)) options.fname_count_in = os.path.join(options.outdir, 'spladder', 'genes_graph_conf%i.%s%s.count.hdf5' % (options.confidence, options.merge, val_tag)) options.fname_exp_hdf5 = os.path.join(options.outdir, 'spladder', 'genes_graph_conf%i.%s%s.gene_exp%s.hdf5' % (options.confidence, options.merge, val_tag, non_alt_tag)) condition_strains = None if options.subset_samples: condition_strains = np.unique(np.r_[np.array(options.conditionA), np.array(options.conditionB)]) _hash = hashlib.sha256() _hash.update(np.unique(condition_strains)) options.fname_exp_hdf5 = os.path.join(options.outdir, 'spladder', 'genes_graph_conf%i.%s%s.gene_exp%s.%s.hdf5' % (options.confidence, options.merge, val_tag, non_alt_tag, _hash.hexdigest())) if os.path.exists(options.fname_exp_hdf5): if options.verbose: print('Loading expression counts from %s' % options.fname_exp_hdf5) IN = h5py.File(options.fname_exp_hdf5, 'r') gene_counts = IN['raw_count'][:] gene_strains = decodeUTF8(IN['strains'][:]) gene_strains_all = decodeUTF8(IN['all_strains'][:]) gene_ids = decodeUTF8(IN['genes'][:]) IN.close() else: gene_counts, gene_strains_all, gene_strains, gene_ids = get_gene_expression(options, fn_out=options.fname_exp_hdf5, strain_subset=condition_strains) gene_strains = np.array([x.split(':')[1] if ':' in x else x for x in gene_strains]) gene_strains_all = np.array([x.split(':')[1] if ':' in x else x for x in gene_strains_all]) ### get index of samples for difftest idx1 = np.where(np.in1d(gene_strains, options.conditionA))[0] idx2 = np.where(np.in1d(gene_strains, options.conditionB))[0] idx1_all = np.where(np.in1d(gene_strains_all, options.conditionA))[0] idx2_all = np.where(np.in1d(gene_strains_all, options.conditionB))[0] ### subset expression counts to tested samples gene_counts = gene_counts[:, np.r_[idx1, idx2]] gene_strains = gene_strains[np.r_[idx1, idx2]] ### estimate size factors for library size normalization sf_ge = get_size_factors(gene_counts, options) ### handle outliers (mask with capped value) if options.cap_exp_outliers: outlier_cnt = 0 for gidx in range(gene_counts.shape[0]): log_counts = np.log2(gene_counts[gidx, :] / sf_ge + 1) p25 = scoreatpercentile(log_counts, 25) p75 = scoreatpercentile(log_counts, 75) iqr = (p75 - p25) if iqr > 0: o_idx = np.where(log_counts > p75+(1.5*iqr))[0] if o_idx.shape[0] > 0: cap = 2**(p75+(1.5*iqr))-1 gene_counts[gidx, o_idx] = (cap * sf_ge[o_idx]) outlier_cnt += o_idx.shape[0] if outlier_cnt > 0: total_cnt = gene_counts.shape[0] * gene_counts.shape[1] sys.stdout.write('\nCapped %i/%i outlier expression counts (%.2f percent)\n' % (outlier_cnt, total_cnt, float(outlier_cnt) / total_cnt * 100)) ### test each event type individually for event_type in options.event_types: if options.verbose: print('Testing %s events' % event_type) options.fname_events = os.path.join(options.outdir, 'merge_graphs_%s_C%i.counts.hdf5' % (event_type, options.confidence)) ### check whether we have any events at all with h5py.File(options.fname_events, 'r') as IN: if not 'conf_idx' in IN: print('SKIPPING: no events of type %s available for testing in file %s\n' % (event_type, options.fname_events), file=sys.stderr) continue ### quantify events (cov, gene_idx, event_idx, event_ids, event_strains) = quantify.quantify_from_counted_events(options.fname_events, idx1_all, idx2_all, event_type, options, gen_event_ids=False, high_mem=options.high_memory) if options.cap_outliers: log_counts = np.log2(cov[0] + 1) p25 = scoreatpercentile(log_counts, 25, axis=1) p75 = scoreatpercentile(log_counts, 75, axis=1) iqr = (p75 - p25) cap = 2**(p75 + (3*iqr)) - 1 for c in np.where(iqr > 0)[0]: cov[0][c, log_counts[c, :] > (p75[c] + 3*iqr[c])] = cap[c] log_counts = np.log2(cov[1] + 1) p25 = scoreatpercentile(log_counts, 25, axis=1) p75 = scoreatpercentile(log_counts, 75, axis=1) iqr = (p75 - p25) cap = 2**(p75 + (3*iqr)) - 1 for c in np.where(iqr > 0)[0]: cov[1][c, log_counts[c, :] > (p75[c] + 3*iqr[c])] = cap[c] ### estimate size factors sf_ev = get_size_factors(np.vstack(cov), options) sf = np.r_[sf_ev, sf_ge] assert(np.all(gene_strains == event_strains)) ### map gene expression to event order curr_gene_counts = gene_counts[gene_idx, :] ### filter for min expression k_idx1 = ((np.mean(cov[0][:, idx1.shape[0]:] <= 1, axis=1) <= options.max_0_frac) | \ (np.mean(cov[0][:, :idx1.shape[0]] <= 1, axis=1) <= options.max_0_frac)) k_idx2 = ((np.mean(cov[1][:, idx1.shape[0]:] <= 1, axis=1) <= options.max_0_frac) | \ (np.mean(cov[1][:, :idx1.shape[0]] <= 1, axis=1) <= options.max_0_frac)) k_idx = np.where(k_idx1 | k_idx2)[0] if options.verbose: print('Exclude %i of %i %s events (%.2f percent) from testing due to low coverage' % (cov[0].shape[0] - k_idx.shape[0], cov[0].shape[0], event_type, (1 - float(k_idx.shape[0]) / cov[0].shape[0]) * 100)) if k_idx.shape[0] == 0: print('All events of type %s were filtered out due to low coverage. Please try re-running with less stringent filter criteria' % event_type) continue cov[0] = cov[0][k_idx, :] cov[1] = cov[1][k_idx, :] curr_gene_counts = curr_gene_counts[k_idx, :] event_idx = event_idx[k_idx] gene_idx = gene_idx[k_idx] if not event_ids is None: event_ids = [x[k_idx] for x in event_ids] k_idx1 = k_idx1[k_idx] k_idx2 = k_idx2[k_idx] cov[0] = np.around(np.hstack([cov[0], curr_gene_counts])) cov[1] = np.around(np.hstack([cov[1], curr_gene_counts])) cov = np.vstack(cov) if not event_ids is None: event_ids = np.hstack(event_ids) test_idx = np.r_[k_idx1, k_idx2] tidx = np.arange(idx1.shape[0]) #if options.debug: # for i in range(cov.shape[0]): # fig = plt.figure(figsize=(8, 6), dpi=100) # ax = fig.add_subplot(111) # ax.hist(cov[i, :] * sf, 50, histtype='bar', rwidth=0.8) # #ax.plot(np.arange(cov.shape[1]), sorted(cov[i, :]), 'bo') # ax.set_title('Count Distribution - Sample %i' % i ) # plt.savefig('count_dist.%i.pdf' % i, format='pdf', bbox_inches='tight') # plt.close(fig) ### build design matrix for testing dmatrix1 = np.zeros((cov.shape[1], 4), dtype='int') dmatrix1[:, 0] = 1 # intercept dmatrix1[tidx, 1] = 1 # delta splice dmatrix1[tidx, 2] = 1 # delta gene exp dmatrix1[tidx + (idx1.shape[0] + idx2.shape[0]), 2] = 1 # delta gene exp dmatrix1[(idx1.shape[0] + idx2.shape[0]):, 3] = 1 # is gene exp #dmatrix1[:(idx1.shape[0] + idx2.shape[0]), 4] = 1
stock in cip_df have a sector? ie. ETF? return None assert set(df.columns) == set(["sum", "asx_code", "sector_name"]) df["increasing"] = df.apply( lambda row: "up" if row["sum"] >= 0.0 else "down", axis=1 ) sector_names = ( df["sector_name"].value_counts().index.tolist() ) # sort bars by value count (ascending) sector_names_cat = pd.Categorical(df["sector_name"], categories=sector_names) df = df.assign(sector_name_cat=sector_names_cat) # print(df) plot = ( p9.ggplot(df, p9.aes(x="factor(sector_name_cat)", fill="factor(increasing)")) + p9.geom_bar() + p9.coord_flip() ) return user_theme( plot, x_axis_label="Sector", y_axis_label="Number of stocks", subplots_adjust={"left": 0.2, "right": 0.85}, legend_title=p9.element_blank(), asxtrade_want_fill_d=True, ) def plot_heatmap( timeframe: Timeframe, ld: LazyDictionary, bin_cb=price_change_bins ) -> p9.ggplot: """ Plot the specified data matrix as binned values (heatmap) with X axis being dates over the specified timeframe and Y axis being the percentage change on the specified date (other metrics may also be used, but you will likely need to adjust the bins) :rtype: p9.ggplot instance representing the heatmap """ df = ld["cip_df"] bins, labels = bin_cb() # print(df.columns) # print(bins) try: # NB: this may fail if no prices are available so we catch that error and handle accordingly... for date in df.columns: df["bin_{}".format(date)] = pd.cut(df[date], bins, labels=labels) sentiment_plot = make_sentiment_plot( df, plot_text_labels=timeframe.n_days <= 30 ) # show counts per bin iff not too many bins return sentiment_plot except KeyError: return None def plot_sector_performance(dataframe: pd.DataFrame, descriptor: str): assert len(dataframe) > 0 dataframe["date"] = pd.to_datetime(dataframe["date"], format="%Y-%m-%d") # now do the plot labels = [ "Number of stocks up >5%", "Number of stocks down >5%", "Remaining stocks", ] # print(dataframe) dataframe.columns = labels + ["date"] melted_df = dataframe.melt(value_vars=labels, id_vars="date") plot = ( p9.ggplot( melted_df, p9.aes("date", "value", colour="variable", group="factor(variable)"), ) + p9.facet_wrap("~variable", ncol=1, scales="free_y") + p9.geom_line(size=1.3) ) return user_theme(plot) def auto_dates(): locator = mdates.AutoDateLocator() formatter = mdates.ConciseDateFormatter(locator) formatter.formats = [ "%y", # ticks are mostly years "%b", # ticks are mostly months "%d", # ticks are mostly days "%H:%M", # hrs "%H:%M", # min "%S.%f", ] # secs # these are mostly just the level above... formatter.zero_formats = [""] + formatter.formats[:-1] # ...except for ticks that are mostly hours, then it is nice to have # month-day: formatter.zero_formats[3] = "%d-%b" formatter.offset_formats = [ "", "%Y", "%b %Y", "%d %b %Y", "%d %b %Y", "%d %b %Y %H:%M", ] return (locator, formatter) def relative_strength(prices, n=14): # see https://stackoverflow.com/questions/20526414/relative-strength-index-in-python-pandas assert n > 0 assert prices is not None # Get the difference in price from previous step delta = prices.diff() # Get rid of the first row, which is NaN since it did not have a previous # row to calculate the differences delta = delta[1:] # Make the positive gains (up) and negative gains (down) Series up, down = delta.copy(), delta.copy() up[up < 0] = 0 down[down > 0] = 0 # Calculate the EWMA roll_up1 = up.ewm(span=n).mean() roll_down1 = down.abs().ewm(span=n).mean() # Calculate the RSI based on EWMA rs = roll_up1 / roll_down1 rsi = 100.0 - (100.0 / (1.0 + rs)) # NB: format is carefully handled here, so downstream code doesnt break new_date = datetime.strftime( datetime.now(), "%Y-%m-%d " ) # make sure it is not an existing date # print(new_date) rsi.at[new_date] = np.nan # ensure data series are the same length for matplotlib # print(len(rsi), " ", len(prices)) # assert len(rsi) == len(prices) return rsi @timing def plot_momentum(stock: str, timeframe: Timeframe, ld: LazyDictionary) -> plt.Figure: assert len(stock) > 0 assert "stock_df" in ld or "stock_df_200" in ld start_date = timeframe.earliest_date stock_df = ld["stock_df_200"] if "stock_df_200" in ld else ld["stock_df"] last_price = stock_df["last_price"] volume = stock_df["volume"] day_low_price = stock_df["day_low_price"] day_high_price = stock_df["day_high_price"] # print(last_price) # print(volume) # print(day_low_price) # print(day_high_price) plt.rc("axes", grid=True) plt.rc("grid", color="0.75", linestyle="-", linewidth=0.5) textsize = 8 left, width = 0.1, 0.8 rect1 = [left, 0.7, width, 0.2] rect2 = [left, 0.3, width, 0.4] rect3 = [left, 0.1, width, 0.2] fig = plt.figure(facecolor="white", figsize=(12, 6)) axescolor = "#f6f6f6" # the axes background color ax1 = fig.add_axes(rect1, facecolor=axescolor) # left, bottom, width, height ax2 = fig.add_axes(rect2, facecolor=axescolor, sharex=ax1) ax2t = ax2.twinx() ax3 = fig.add_axes(rect3, facecolor=axescolor, sharex=ax1) fig.autofmt_xdate() # plot the relative strength indicator rsi = relative_strength(last_price) # print(len(rsi)) fillcolor = "darkgoldenrod" timeline = pd.to_datetime(last_price.index, format="%Y-%m-%d") ax1.plot(timeline, rsi, color=fillcolor) ax1.axhline(70, color="darkgreen") ax1.axhline(30, color="darkgreen") ax1.fill_between( timeline, rsi, 70, where=(rsi >= 70), facecolor=fillcolor, edgecolor=fillcolor ) ax1.fill_between( timeline, rsi, 30, where=(rsi <= 30), facecolor=fillcolor, edgecolor=fillcolor ) ax1.text( 0.6, 0.9, ">70 = overbought", va="top", transform=ax1.transAxes, fontsize=textsize, ) ax1.text(0.6, 0.1, "<30 = oversold", transform=ax1.transAxes, fontsize=textsize) ax1.set_ylim(0, 100) ax1.set_yticks([30, 70]) ax1.text( 0.025, 0.95, "RSI (14)", va="top", transform=ax1.transAxes, fontsize=textsize ) # ax1.set_title('{} daily'.format(stock)) # plot the price and volume data dx = 0.0 low = day_low_price + dx high = day_high_price + dx deltas = np.zeros_like(last_price) deltas[1:] = np.diff(last_price) up = deltas > 0 ax2.vlines(timeline[up], low[up], high[up], color="black", label="_nolegend_") ax2.vlines(timeline[~up], low[~up], high[~up], color="black", label="_nolegend_") ma20 = last_price.rolling(window=20).mean() ma200 = last_price.rolling(window=200, min_periods=50).mean() # timeline = timeline.to_list() (linema20,) = ax2.plot(timeline, ma20, color="blue", lw=2, label="MA (20)") (linema200,) = ax2.plot(timeline, ma200, color="red", lw=2, label="MA (200)") assert linema20 is not None assert linema200 is not None props = font_manager.FontProperties(size=10) leg = ax2.legend(loc="lower left", shadow=True, fancybox=True, prop=props) leg.get_frame().set_alpha(0.5) volume = (last_price * volume) / 1e6 # dollar volume in millions # print(volume) vmax = np.nanmax(volume) # print(vmax) poly = ax2t.fill_between( timeline, volume.to_list(), 0, alpha=0.5, label="Volume", facecolor=fillcolor, edgecolor=fillcolor, ) assert poly is not None # avoid unused variable from pylint ax2t.set_ylim(0, 5 * vmax) ax2t.set_yticks([]) # compute the MACD indicator fillcolor = "darkslategrey" n_fast = 12 n_slow = 26 n_ema = 9 emafast = last_price.ewm(span=n_fast, adjust=False).mean() emaslow = last_price.ewm(span=n_slow, adjust=False).mean() macd = emafast - emaslow nema = macd.ewm(span=n_ema, adjust=False).mean() ax3.plot(timeline, macd, color="black", lw=2) ax3.plot(timeline, nema, color="blue", lw=1) ax3.fill_between( timeline, macd - nema, 0, alpha=0.3, facecolor=fillcolor, edgecolor=fillcolor ) ax3.text( 0.025, 0.95, "MACD ({}, {}, {})".format(n_fast, n_slow, n_ema), va="top", transform=ax3.transAxes, fontsize=textsize, ) ax3.set_yticks([]) locator, formatter = auto_dates() for ax in ax1, ax2, ax2t, ax3: ax.xaxis.set_major_locator(locator) ax.xaxis.set_major_formatter(formatter) plt.xticks(fontsize=8) try: plt.xlim(left=datetime.strptime(start_date, "%Y-%m-%d")) except IndexError: print("WARNING: unable to set plot start_date - things may look weird") plt.plot() fig = plt.gcf() plt.close(fig) return fig @timing def plot_trend(sample_period="M", ld: LazyDictionary = None) -> str: """ Given a dataframe of a single stock from company_prices() this plots the highest price in each month over the time period of the dataframe. """ assert "stock_df" in ld def inner_date_fmt(dates_to_format): results = [] for d in dates_to_format: d -= timedelta( weeks=4 ) # breaks are set to the end of the month rather than the start... so results.append(d.strftime("%Y-%m")) return results stock_df = ld["stock_df"] # print(stock_df) dataframe = stock_df.filter(items=["last_price"]) dataframe.index = pd.to_datetime(dataframe.index, format="%Y-%m-%d") dataframe = dataframe.resample(sample_period).max() # print(dataframe) plot = ( p9.ggplot( dataframe, p9.aes( x="dataframe.index", y=dataframe.columns[0], fill=dataframe.columns[0] ), ) + p9.geom_bar(stat="identity", alpha=0.7) + p9.scale_x_datetime( labels=inner_date_fmt ) # dont print day (always 1st day of month due to resampling) ) return user_theme(plot, y_axis_label="$ AUD", asxtrade_want_fill_continuous=True) def plot_points_by_rule(net_points_by_rule: defaultdict(int)) -> p9.ggplot: if net_points_by_rule is None or len(net_points_by_rule) < 1: return None rows = [] for k, v in net_points_by_rule.items(): rows.append({"rule": str(k), "net_points": v}) df = pd.DataFrame.from_records(rows) plot = ( p9.ggplot(df, p9.aes(x="rule", y="net_points", fill="net_points")) + p9.geom_bar(stat="identity", alpha=0.7) + p9.coord_flip() ) return user_theme( plot, x_axis_label="Rule", y_axis_label="Contributions to points by rule", subplots_adjust={"left": 0.2}, asxtrade_want_fill_continuous=True, ) def plot_boxplot_series(df, normalisation_method=None): """ Treating each column as a separate boxplot and each row as an independent observation (ie. different company) render a series of box plots to identify a shift in performance from the observations. normalisation_method should be one of the values present in SectorSentimentSearchForm.normalisation_choices """ # and plot the normalised data if normalisation_method is None or normalisation_method == "1": normalized_df = df y_label = "Percentage change" elif normalisation_method == "2": normalized_df = (df - df.min()) / (df.max() - df.min()) y_label = "Percentage change (min/max. scaled)" else: normalized_df = df / df.max(axis=0) # div by max if all else fails... y_label = "Percentage change (normalised by dividing by max)" n_inches = len(df.columns) / 5 melted = normalized_df.melt(ignore_index=False).dropna() plot = ( p9.ggplot(melted, p9.aes(x="fetch_date", y="value")) + p9.geom_boxplot(outlier_colour="blue") + p9.coord_flip() ) return user_theme(plot, y_axis_label=y_label, figure_size=(12, n_inches)) def plot_sector_field(df: pd.DataFrame, field, n_col=3): # print(df.columns) # assert set(df.columns) == set(['sector', 'date', 'mean_pe', 'sum_pe', 'sum_eps', 'mean_eps', 'n_stocks'])
#!/usr/bin/python # -*- coding: utf-8; tab-width: 4; indent-tabs-mode: t -*- """ Role ==== The ``PluginManager`` loads plugins that enforce the `Plugin Description Policy`_, and offers the most simple methods to activate and deactivate the plugins once they are loaded. .. note:: It may also classify the plugins in various categories, but this behaviour is optional and if not specified elseway all plugins are stored in the same default category. .. note:: It is often more useful to have the plugin manager behave like singleton, this functionality is provided by ``PluginManagerSingleton`` Plugin Description Policy ========================= When creating a ``PluginManager`` instance, one should provide it with a list of directories where plugins may be found. In each directory, a plugin should contain the following elements: For a *Standard* plugin: ``myplugin.yapsy-plugin`` A *plugin info file* identical to the one previously described. ``myplugin`` A directory ontaining an actual Python plugin (ie with a ``__init__.py`` file that makes it importable). The upper namespace of the plugin should present a class inheriting the ``IPlugin`` interface (the same remarks apply here as in the previous case). For a *Single file* plugin: ``myplugin.yapsy-plugin`` A *plugin info file* which is identified thanks to its extension, see the `Plugin Info File Format`_ to see what should be in this file. The extension is customisable at the ``PluginManager``'s instanciation, since one may usually prefer the extension to bear the application name. ``myplugin.py`` The source of the plugin. This file should at least define a class inheriting the ``IPlugin`` interface. This class will be instanciated at plugin loading and it will be notified the activation/deactivation events. Plugin Info File Format ----------------------- The plugin info file gathers, as its name suggests, some basic information about the plugin. - it gives crucial information needed to be able to load the plugin - it provides some documentation like information like the plugin author's name and a short description fo the plugin functionality. Here is an example of what such a file should contain:: [Core] Name = My plugin Name Module = the_name_of_the_pluginto_load_with_no_py_ending [Documentation] Description = What my plugin broadly does Author = My very own name Version = 0.1 Website = My very own website Version = the_version_number_of_the_plugin .. note:: From such plugin descriptions, the ``PluginManager`` will built its own representations of the plugins as instances of the :doc:`PluginInfo` class. Extensibility ============= Several mechanisms have been put up to help extending the basic functionalities of the proivided classes. A few *hints* to help you extend those classes: If the new functionalities do not overlap the ones already implemented, then they should be implemented as a Decorator class of the base plugin. This should be done by inheriting the ``PluginManagerDecorator``. If this previous way is not possible, then the functionalities should be added as a subclass of ``PluginManager``. .. note:: The first method is highly prefered since it makes it possible to have a more flexible design where one can pick several functionalities and litterally *add* them to get an object corresponding to one's precise needs. API === """ import sys import os import logging import ConfigParser from relo.yapsy.IPlugin import IPlugin from relo.yapsy.PluginInfo import PluginInfo PLUGIN_NAME_FORBIDDEN_STRING=";;" """ .. warning:: This string (';;' by default) is forbidden in plugin names, and will be usable to describe lists of plugins for instance (see :doc:`ConfigurablePluginManager`) """ class PluginManager(object): """ Manage several plugins by ordering them in categories. The mechanism for searching and loading the plugins is already implemented in this class so that it can be used directly (hence it can be considered as a bit more than a mere interface) The file describing a plugin must be written in the syntax compatible with Python's ConfigParser module as in the `Plugin Info File Format`_ """ def __init__(self, categories_filter={"Default":IPlugin}, directories_list=None, plugin_info_ext="yapsy-plugin"): """ Initialize the mapping of the categories and set the list of directories where plugins may be. This can also be set by direct call the methods: - ``setCategoriesFilter`` for ``categories_filter`` - ``setPluginPlaces`` for ``directories_list`` - ``setPluginInfoExtension`` for ``plugin_info_ext`` You may look at these function's documentation for the meaning of each corresponding arguments. """ self.setPluginInfoClass(PluginInfo) self.setCategoriesFilter(categories_filter) self.setPluginPlaces(directories_list) self.setPluginInfoExtension(plugin_info_ext) def setCategoriesFilter(self, categories_filter): """ Set the categories of plugins to be looked for as well as the way to recognise them. The ``categories_filter`` first defines the various categories in which the plugins will be stored via its keys and it also defines the interface tha has to be inherited by the actual plugin class belonging to each category. """ self.categories_interfaces = categories_filter.copy() # prepare the mapping from categories to plugin lists self.category_mapping = {} # also maps the plugin info files (useful to avoid loading # twice the same plugin...) self._category_file_mapping = {} for categ in categories_filter: self.category_mapping[categ] = [] self._category_file_mapping[categ] = [] def setPluginInfoClass(self,picls): """ Set the class that holds PluginInfo. The class should inherit from ``PluginInfo``. """ self._plugin_info_cls = picls def getPluginInfoClass(self): """ Get the class that holds PluginInfo. The class should inherit from ``PluginInfo``. """ return self._plugin_info_cls def setPluginPlaces(self, directories_list): """ Set the list of directories where to look for plugin places. """ if directories_list is None: directories_list = [os.path.dirname(__file__)] self.plugins_places = directories_list def setPluginInfoExtension(self,plugin_info_ext): """ Set the extension that identifies a plugin info file. The ``plugin_info_ext`` is the extension that will have the informative files describing the plugins and that are used to actually detect the presence of a plugin (see ``collectPlugins``). """ self.plugin_info_ext = plugin_info_ext def getCategories(self): """ Return the list of all categories. """ return self.category_mapping.keys() def removePluginFromCategory(self,plugin,category_name): """ Remove a plugin from the category where it's assumed to belong. """ self.category_mapping[category_name].remove(plugin) def appendPluginToCategory(self,plugin,category_name): """ Append a new plugin to the given category. """ self.category_mapping[category_name].append(plugin) def getPluginsOfCategory(self,category_name): """ Return the list of all plugins belonging to a category. """ return self.category_mapping[category_name][:] def getAllPlugins(self): """ Return the list of all plugins (belonging to all categories). """ allPlugins = [] for pluginsOfOneCategory in self.category_mapping.itervalues(): allPlugins.extend(pluginsOfOneCategory) return allPlugins def _gatherCorePluginInfo(self, directory, filename): """ Gather the core information (name, and module to be loaded) about a plugin described by it's info file (found at 'directory/filename'). Return an instance of ``self.plugin_info_cls`` and the config_parser used to gather the core data *in a tuple*, if the required info could be localised, else return ``(None,None)``. .. note:: This is supposed to be used internally by subclasses and decorators. """ # now we can consider the file as a serious candidate candidate_infofile = os.path.join(directory,filename) # parse the information file to get info about the plugin config_parser = ConfigParser.SafeConfigParser() try: config_parser.read(candidate_infofile) except: logging.debug("Could not parse the plugin file %s" % candidate_infofile) return (None, None) # check if the basic info is available if not config_parser.has_section("Core"): logging.debug("Plugin info file has no 'Core' section (in %s)" % candidate_infofile) return (None, None) if not config_parser.has_option("Core","Name") or not config_parser.has_option("Core","Module"): logging.debug("Plugin info file has no 'Name' or 'Module' section (in %s)" % candidate_infofile) return (None, None) # check that the given name is valid name = config_parser.get("Core", "Name") name = name.strip() if PLUGIN_NAME_FORBIDDEN_STRING in name: logging.debug("Plugin name contains forbiden character: %s (in %s)" % (PLUGIN_NAME_FORBIDDEN_STRING, candidate_infofile)) return (None, None) # start collecting essential info plugin_info = self._plugin_info_cls(name, os.path.join(directory,config_parser.get("Core", "Module"))) return (plugin_info,config_parser) def gatherBasicPluginInfo(self, directory,filename): """ Gather some basic documentation about the plugin described by it's info file (found at 'directory/filename'). Return an instance of ``self.plugin_info_cls`` gathering the required informations. See also: ``self._gatherCorePluginInfo`` """ plugin_info,config_parser = self._gatherCorePluginInfo(directory, filename) if plugin_info is None: return None # collect additional (but usually quite usefull) information if config_parser.has_section("Documentation"): if config_parser.has_option("Documentation","Author"): plugin_info.author = config_parser.get("Documentation", "Author") if config_parser.has_option("Documentation","Version"): plugin_info.setVersion(config_parser.get("Documentation", "Version")) if config_parser.has_option("Documentation","Website"): plugin_info.website = config_parser.get("Documentation", "Website") if config_parser.has_option("Documentation","Copyright"): plugin_info.copyright = config_parser.get("Documentation", "Copyright") if config_parser.has_option("Documentation","Description"): plugin_info.description = config_parser.get("Documentation", "Description") return plugin_info def getPluginCandidates(self): """ Return the list of possible plugins. Each possible plugin (ie a candidate) is described by a 3-uple: (info file path, python file path, plugin info instance) .. warning: locatePlugins must be called before ! """ if not hasattr(self, '_candidates'): raise ValueError("locatePlugins must be called before getPluginCandidates") return self._candidates[:] def removePluginCandidate(self,candidateTuple): """ Remove a given candidate from the list of plugins that should be loaded. The candidate must be represented by the same tuple described
title = self.tr("Recent Workflows") dialog.setWindowTitle(title) template = ( '<h3 style="font-size: 26px">\n' #'<img height="26" src="canvas_icons:Recent.svg">\n' "{0}\n" "</h3>" ) dialog.setHeading(template.format(title)) dialog.setModel(model) model.delayedScanUpdate() status = dialog.exec_() index = dialog.currentIndex() dialog.deleteLater() model.deleteLater() if status == QDialog.Accepted: if not self.pre_close_save(): return QDialog.Rejected selected = model.item(index) self.load_scheme(str(selected.path())) return status def tutorial_scheme(self, *args): """Browse a collection of tutorial schemes. Returns QDialog.Rejected if the user canceled the dialog else loads the selected scheme into the canvas and returns QDialog.Accepted. """ tutors = workflows.example_workflows() items = [previewmodel.PreviewItem(path=t.abspath()) for t in tutors] model = previewmodel.PreviewModel(items=items) dialog = previewdialog.PreviewDialog(self) title = self.tr("Example Workflows") dialog.setWindowTitle(title) template = ( '<h3 style="font-size: 26px">\n' #'<img height="26" src="canvas_icons:Tutorials.svg">\n' "{0}\n" "</h3>" ) dialog.setHeading(template.format(title)) dialog.setModel(model) model.delayedScanUpdate() status = dialog.exec_() index = dialog.currentIndex() dialog.deleteLater() if status == QDialog.Accepted: if not self.pre_close_save(): return QDialog.Rejected selected = model.item(index) new_scheme = self.new_scheme_from(str(selected.path())) if new_scheme is not None: self.set_new_scheme(new_scheme) return status def welcome_dialog(self): """Show a modal welcome dialog for Orange Canvas. """ dialog = welcomedialog.WelcomeDialog(self) dialog.setWindowTitle(self.tr("Welcome to Orange Data Mining")) def new_scheme(): if self.new_scheme() == QDialog.Accepted: dialog.accept() def open_scheme(): if self.open_scheme() == QDialog.Accepted: dialog.accept() def open_recent(): if self.recent_scheme() == QDialog.Accepted: dialog.accept() def open_examples(): if self.tutorial_scheme() == QDialog.Accepted: dialog.accept() new_action = QAction( self.tr("New"), dialog, toolTip=self.tr("Open a new workflow."), triggered=new_scheme, shortcut=QKeySequence.New, icon=canvas_icons("New.svg"), ) open_action = QAction( self.tr("Open"), dialog, objectName="welcome-action-open", toolTip=self.tr("Open a workflow."), triggered=open_scheme, shortcut=QKeySequence.Open, icon=canvas_icons("Open.svg"), ) recent_action = QAction( self.tr("Recent"), dialog, objectName="welcome-recent-action", toolTip=self.tr("Browse and open a recent workflow."), triggered=open_recent, shortcut=QKeySequence(Qt.ControlModifier | (Qt.ShiftModifier | Qt.Key_R)), icon=canvas_icons("Recent.svg"), ) examples_action = QAction( self.examples_action.text(), dialog, icon=self.examples_action.icon(), toolTip=self.examples_action.toolTip(), whatsThis=self.examples_action.whatsThis(), triggered=open_examples, ) tutorials_action = QAction( self.tr("Tutorials"), self, objectName="tutorials-action", toolTip=self.tr("View YouTube tutorials."), triggered=self.tutorials, icon=canvas_icons("YouTube.svg"), ) bottom_row = [tutorials_action, examples_action, self.get_started_action] self.new_action.triggered.connect(dialog.accept) top_row = [new_action, open_action, recent_action] dialog.addRow(top_row, background="light-grass") dialog.addRow(bottom_row, background="light-orange") settings = QSettings() dialog.setShowAtStartup( settings.value("startup/show-welcome-screen", True, type=bool) ) status = dialog.exec_() settings.setValue("startup/show-welcome-screen", dialog.showAtStartup()) dialog.deleteLater() return status def scheme_properties_dialog(self): """Return an empty `SchemeInfo` dialog instance. """ settings = QSettings() value_key = "schemeinfo/show-at-new-scheme" dialog = SchemeInfoDialog(self) dialog.setWindowTitle(self.tr("Workflow Info")) dialog.setFixedSize(725, 450) dialog.setShowAtNewScheme(settings.value(value_key, True, type=bool)) return dialog def show_scheme_properties(self): """Show current scheme properties. """ settings = QSettings() value_key = "schemeinfo/show-at-new-scheme" current_doc = self.current_document() scheme = current_doc.scheme() dlg = self.scheme_properties_dialog() dlg.setAutoCommit(False) dlg.setScheme(scheme) status = dlg.exec_() if status == QDialog.Accepted: editor = dlg.editor stack = current_doc.undoStack() stack.beginMacro(self.tr("Change Info")) current_doc.setTitle(editor.title()) current_doc.setDescription(editor.description()) stack.endMacro() # Store the check state. settings.setValue(value_key, dlg.showAtNewScheme()) return status def show_scheme_properties_for(self, scheme, window_title=None): """Show scheme properties for `scheme` with `window_title (if None a default 'Scheme Info' title will be used. """ settings = QSettings() value_key = "schemeinfo/show-at-new-scheme" dialog = self.scheme_properties_dialog() if window_title is not None: dialog.setWindowTitle(window_title) dialog.setScheme(scheme) status = dialog.exec_() if status == QDialog.Accepted: # Store the check state. settings.setValue(value_key, dialog.showAtNewScheme()) dialog.deleteLater() return status def set_signal_freeze(self, freeze): scheme = self.current_document().scheme() manager = scheme.signal_manager if freeze: manager.pause() else: manager.resume() def remove_selected(self): """Remove current scheme selection. """ self.current_document().removeSelected() def quit(self): """Quit the application. """ if QApplication.activePopupWidget(): # On OSX the actions in the global menu bar are triggered # even if an popup widget is running it's own event loop # (in exec_) log.debug("Ignoring a quit shortcut during an active " "popup dialog.") else: self.close() def select_all(self): self.current_document().selectAll() def open_widget(self): """Open/raise selected widget's GUI. """ self.current_document().openSelected() def rename_widget(self): """Rename the current focused widget. """ doc = self.current_document() nodes = doc.selectedNodes() if len(nodes) == 1: doc.editNodeTitle(nodes[0]) def open_canvas_settings(self): """Open canvas settings/preferences dialog """ dlg = UserSettingsDialog(self) dlg.setWindowTitle(self.tr("Preferences")) dlg.show() status = dlg.exec_() if status == 0: self.__update_from_settings() def open_addons(self): from .addons import AddonManagerDialog, have_install_permissions if not have_install_permissions(): QMessageBox( QMessageBox.Warning, "Add-ons: insufficient permissions", "Insufficient permissions to install add-ons. Try starting Orange " "as a system administrator or install Orange in user folders.", parent=self, ).exec_() dlg = AddonManagerDialog(self, windowTitle=self.tr("Add-ons")) dlg.setAttribute(Qt.WA_DeleteOnClose) return dlg.exec_() def reset_widget_settings(self): res = message_question( "Clear all widget settings on next restart", title="Clear settings", informative_text=( "A restart of the application is necessary " + "for the changes to take effect" ), buttons=QMessageBox.Ok | QMessageBox.Cancel, default_button=QMessageBox.Ok, parent=self, ) if res == QMessageBox.Ok: # Touch a finely crafted file inside the settings directory. # The existence of this file is checked by the canvas main # function and is deleted there. fname = os.path.join(config.widget_settings_dir(), "DELETE_ON_START") os.makedirs(config.widget_settings_dir(), exist_ok=True) with open(fname, "a"): pass if not self.close(): message_information( "Settings will still be reset at next application start", parent=self, ) def show_report_view(self): doc = self.current_document() scheme = doc.scheme() scheme.show_report_view() def log_view(self): """Return the output text widget. """ return self.log_dock.widget() def open_about(self): """Open the about dialog. """ dlg = AboutDialog(self) dlg.setAttribute(Qt.WA_DeleteOnClose) dlg.exec_() def add_recent_scheme(self, title, path): """Add an entry (`title`, `path`) to the list of recent schemes. """ if not path: # No associated persistent path so we can't do anything. return if not title: title = os.path.basename(path) filename = os.path.abspath(os.path.realpath(path)) filename = os.path.normpath(filename) actions_by_filename = {} for action in self.recent_scheme_action_group.actions(): path = str(action.data()) actions_by_filename[path] = action if filename in actions_by_filename: # Remove the title/filename (so it can be reinserted) recent_index = index( self.recent_schemes, filename, key=operator.itemgetter(1) ) self.recent_schemes.pop(recent_index) action = actions_by_filename[filename] self.recent_menu.removeAction(action) self.recent_scheme_action_group.removeAction(action) action.setText(title or self.tr("untitled")) else: action = QAction(title or self.tr("untitled"), self, toolTip=filename) action.setData(filename) # Find the separator action in the menu (after 'Browse Recent') recent_actions = self.recent_menu.actions() begin_index = index(recent_actions, self.recent_menu_begin) action_before = recent_actions[begin_index + 1] self.recent_menu.insertAction(action_before, action) self.recent_scheme_action_group.addAction(action) self.recent_schemes.insert(0, (title, filename)) if len(self.recent_schemes) > max(self.num_recent_schemes, 1): title, filename = self.recent_schemes.pop(-1) action = actions_by_filename[filename] self.recent_menu.removeAction(action) self.recent_scheme_action_group.removeAction(action) config.save_recent_scheme_list(self.recent_schemes) def clear_recent_schemes(self): """Clear list of recent schemes """ actions = list(self.recent_menu.actions()) # Exclude permanent actions (Browse Recent, separators, Clear List) actions_to_remove = [action for action in actions if str(action.data())] for action in actions_to_remove: self.recent_menu.removeAction(action) self.recent_scheme_action_group.removeAction(action) self.recent_schemes = [] config.save_recent_scheme_list([]) def _on_recent_scheme_action(self, action): """A recent scheme action was triggered by the user """ if not self.pre_close_save(): return QDialog.Rejected filename = str(action.data()) self.load_scheme(filename) def _on_dock_location_changed(self, location): """Location of the dock_widget has changed, fix the margins if necessary. """ self.__update_scheme_margins() def set_tool_dock_expanded(self, expanded): """ Set the dock widget expanded state. """ self.dock_widget.setExpanded(expanded) def _on_tool_dock_expanded(self, expanded): """ 'dock_widget' widget was expanded/collapsed. """ if expanded != self.toggle_tool_dock_expand.isChecked(): self.toggle_tool_dock_expand.setChecked(expanded) def createPopupMenu(self): # Override the default context menu popup (we don't want the user to # be able to hide the tool dock widget). return None def closeEvent(self, event): """Close the main window. """ document = self.current_document() if ( document.isModifiedStrict() and self.ask_save_changes() == QDialog.Rejected or self.ask_save_report() == QDialog.Rejected ): event.ignore() return old_scheme = document.scheme() # Set an empty scheme to clear the document document.setScheme(widgetsscheme.WidgetsScheme()) QApplication.sendEvent(old_scheme, QEvent(QEvent.Close)) old_scheme.deleteLater() config.save_config() geometry = self.saveGeometry() state = self.saveState(version=self.SETTINGS_VERSION) settings = QSettings() settings.beginGroup("mainwindow") settings.setValue("geometry", geometry) settings.setValue("state", state) settings.setValue("canvasdock/expanded", self.dock_widget.expanded()) settings.setValue("scheme-margins-enabled", self.scheme_margins_enabled) settings.setValue("last-scheme-dir", self.last_scheme_dir) settings.setValue("widgettoolbox/state", self.widgets_tool_box.saveState()) settings.setValue( "quick-help/visible", self.canvas_tool_dock.quickHelpVisible() ) settings.endGroup() event.accept() # Close any windows left. application = QApplication.instance() QTimer.singleShot(0, application.closeAllWindows) def showEvent(self, event): if self.__first_show: settings = QSettings() settings.beginGroup("mainwindow") # Restore geometry and dock/toolbar state state = settings.value("state", QByteArray(), type=QByteArray) if state: self.restoreState(state, version=self.SETTINGS_VERSION) geom_data = settings.value("geometry", QByteArray(), type=QByteArray) if geom_data: self.restoreGeometry(geom_data) self.__first_show = False return QMainWindow.showEvent(self, event) def event(self, event): if event.type() == QEvent.StatusTip and isinstance(event, QuickHelpTipEvent): # Using singleShot to update the text browser. # If updating directly the application experiences strange random # segfaults (in ~StatusTipEvent in QTextLayout or event just normal # event loop), but only when the contents are larger then the # QTextBrowser's viewport. if event.priority() == QuickHelpTipEvent.Normal: QTimer.singleShot(0, partial(self.dock_help.showHelp, event.html())) elif event.priority() == QuickHelpTipEvent.Temporary: QTimer.singleShot( 0, partial(self.dock_help.showHelp, event.html(), event.timeout()) ) elif event.priority() == QuickHelpTipEvent.Permanent: QTimer.singleShot( 0, partial(self.dock_help.showPermanentHelp, event.html()) ) return True elif event.type() == QEvent.WhatsThisClicked: ref = event.href() url = QUrl(ref) if url.scheme() == "help" and url.authority() == "search": try: url = self.help.search(url) except KeyError: url = None log.info("No help topic found for %r", url) if url: self.show_help(url) else: message_information( self.tr("There is no documentation for this widget yet."), parent=self, ) return True return QMainWindow.event(self, event) def show_help(self, url): """ Show `url` in a help window. """ log.info("Setting help to url: %r", url) if self.open_in_external_browser: url = QUrl(url) if not QDesktopServices.openUrl(url): # Try fixing some common problems. url = QUrl.fromUserInput(url.toString()) # 'fromUserInput' includes possible fragment into the path # (which prevents it to open local files) so we reparse it # again. url =
category=entity.category, subcategory=entity.subcategory, length=entity.length, offset=entity.offset, confidence_score=entity.confidence_score, ) def __repr__(self): return ( "PiiEntity(text={}, category={}, subcategory={}, length={}, " "offset={}, confidence_score={})".format( self.text, self.category, self.subcategory, self.length, self.offset, self.confidence_score, )[:1024] ) class HealthcareEntity(DictMixin): """HealthcareEntity contains information about a Healthcare entity found in text. :ivar str text: Entity text as appears in the document. :ivar str normalized_text: Optional. Normalized version of the raw `text` we extract from the document. Not all `text` will have a normalized version. :ivar str category: Entity category, see the :class:`~azure.ai.textanalytics.HealthcareEntityCategory` type for possible healthcare entity categories. :ivar str subcategory: Entity subcategory. :ivar assertion: Contains various assertions about this entity. For example, if an entity is a diagnosis, is this diagnosis 'conditional' on a symptom? Are the doctors 'certain' about this diagnosis? Is this diagnosis 'associated' with another diagnosis? :vartype assertion: ~azure.ai.textanalytics.HealthcareEntityAssertion :ivar int length: The entity text length. This value depends on the value of the `string_index_type` parameter specified in the original request, which is UnicodeCodePoints by default. :ivar int offset: The entity text offset from the start of the document. This value depends on the value of the `string_index_type` parameter specified in the original request, which is UnicodeCodePoints by default. :ivar float confidence_score: Confidence score between 0 and 1 of the extracted entity. :ivar data_sources: A collection of entity references in known data sources. :vartype data_sources: list[~azure.ai.textanalytics.HealthcareEntityDataSource] """ def __init__(self, **kwargs): self.text = kwargs.get("text", None) self.normalized_text = kwargs.get("normalized_text", None) self.category = kwargs.get("category", None) self.subcategory = kwargs.get("subcategory", None) self.assertion = kwargs.get("assertion", None) self.length = kwargs.get("length", None) self.offset = kwargs.get("offset", None) self.confidence_score = kwargs.get("confidence_score", None) self.data_sources = kwargs.get("data_sources", []) @classmethod def _from_generated(cls, healthcare_entity): assertion = None try: if healthcare_entity.assertion: assertion = HealthcareEntityAssertion._from_generated( # pylint: disable=protected-access healthcare_entity.assertion ) except AttributeError: assertion = None return cls( text=healthcare_entity.text, normalized_text=healthcare_entity.name, category=healthcare_entity.category, subcategory=healthcare_entity.subcategory, assertion=assertion, length=healthcare_entity.length, offset=healthcare_entity.offset, confidence_score=healthcare_entity.confidence_score, data_sources=[ HealthcareEntityDataSource(entity_id=l.id, name=l.data_source) for l in healthcare_entity.links ] if healthcare_entity.links else None, ) def __hash__(self): return hash(repr(self)) def __repr__(self): return ( "HealthcareEntity(text={}, normalized_text={}, category={}, subcategory={}, assertion={}, length={}, " "offset={}, confidence_score={}, data_sources={})".format( self.text, self.normalized_text, self.category, self.subcategory, repr(self.assertion), self.length, self.offset, self.confidence_score, repr(self.data_sources), )[:1024] ) class HealthcareEntityAssertion(DictMixin): """Contains various assertions about a `HealthcareEntity`. For example, if an entity is a diagnosis, is this diagnosis 'conditional' on a symptom? Are the doctors 'certain' about this diagnosis? Is this diagnosis 'associated' with another diagnosis? :ivar str conditionality: Describes whether the healthcare entity it's on is conditional on another entity. For example, "If the patient has a fever, he has pneumonia", the diagnosis of pneumonia is 'conditional' on whether the patient has a fever. Possible values are "hypothetical" and "conditional". :ivar str certainty: Describes how certain the healthcare entity it's on is. For example, in "The patient may have a fever", the fever entity is not 100% certain, but is instead "positivePossible". Possible values are "positive", "positivePossible", "neutralPossible", "negativePossible", and "negative". :ivar str association: Describes whether the healthcare entity it's on is the subject of the document, or if this entity describes someone else in the document. For example, in "The subject's mother has a fever", the "fever" entity is not associated with the subject themselves, but with the subject's mother. Possible values are "subject" and "other". """ def __init__(self, **kwargs): self.conditionality = kwargs.get("conditionality", None) self.certainty = kwargs.get("certainty", None) self.association = kwargs.get("association", None) @classmethod def _from_generated(cls, healthcare_assertion): return cls( conditionality=healthcare_assertion.conditionality, certainty=healthcare_assertion.certainty, association=healthcare_assertion.association, ) def __repr__(self): return "HealthcareEntityAssertion(conditionality={}, certainty={}, association={})".format( self.conditionality, self.certainty, self.association ) class HealthcareEntityDataSource(DictMixin): """ HealthcareEntityDataSource contains information representing an entity reference in a known data source. :ivar str entity_id: ID of the entity in the given source catalog. :ivar str name: The name of the entity catalog from where the entity was identified, such as UMLS, CHV, MSH, etc. """ def __init__(self, **kwargs): self.entity_id = kwargs.get("entity_id", None) self.name = kwargs.get("name", None) def __repr__(self): return "HealthcareEntityDataSource(entity_id={}, name={})".format( self.entity_id, self.name )[:1024] class TextAnalyticsError(DictMixin): """TextAnalyticsError contains the error code, message, and other details that explain why the batch or individual document failed to be processed by the service. :ivar code: Error code. Possible values include: 'invalidRequest', 'invalidArgument', 'internalServerError', 'serviceUnavailable', 'invalidParameterValue', 'invalidRequestBodyFormat', 'emptyRequest', 'missingInputRecords', 'invalidDocument', 'modelVersionIncorrect', 'invalidDocumentBatch', 'unsupportedLanguageCode', 'invalidCountryHint' :vartype code: str :ivar message: Error message. :vartype message: str :ivar target: Error target. :vartype target: str """ def __init__(self, **kwargs): self.code = kwargs.get("code", None) self.message = kwargs.get("message", None) self.target = kwargs.get("target", None) @classmethod def _from_generated(cls, err): if err.innererror: return cls( code=err.innererror.code, message=err.innererror.message, target=err.innererror.target, ) return cls(code=err.code, message=err.message, target=err.target) def __repr__(self): return "TextAnalyticsError(code={}, message={}, target={})".format( self.code, self.message, self.target )[:1024] class TextAnalyticsWarning(DictMixin): """TextAnalyticsWarning contains the warning code and message that explains why the response has a warning. :ivar code: Warning code. Possible values include: 'LongWordsInDocument', 'DocumentTruncated'. :vartype code: str :ivar message: Warning message. :vartype message: str """ def __init__(self, **kwargs): self.code = kwargs.get("code", None) self.message = kwargs.get("message", None) @classmethod def _from_generated(cls, warning): return cls( code=warning.code, message=warning.message, ) def __repr__(self): return "TextAnalyticsWarning(code={}, message={})".format( self.code, self.message )[:1024] class ExtractKeyPhrasesResult(DictMixin): """ExtractKeyPhrasesResult is a result object which contains the key phrases found in a particular document. :ivar id: Unique, non-empty document identifier that matches the document id that was passed in with the request. If not specified in the request, an id is assigned for the document. :vartype id: str :ivar key_phrases: A list of representative words or phrases. The number of key phrases returned is proportional to the number of words in the input document. :vartype key_phrases: list[str] :ivar warnings: Warnings encountered while processing document. Results will still be returned if there are warnings, but they may not be fully accurate. :vartype warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning] :ivar statistics: If `show_stats=True` was specified in the request this field will contain information about the document payload. :vartype statistics: ~azure.ai.textanalytics.TextDocumentStatistics :ivar bool is_error: Boolean check for error item when iterating over list of results. Always False for an instance of a ExtractKeyPhrasesResult. """ def __init__(self, **kwargs): self.id = kwargs.get("id", None) self.key_phrases = kwargs.get("key_phrases", None) self.warnings = kwargs.get("warnings", []) self.statistics = kwargs.get("statistics", None) self.is_error = False def __repr__(self): return "ExtractKeyPhrasesResult(id={}, key_phrases={}, warnings={}, statistics={}, is_error={})".format( self.id, self.key_phrases, repr(self.warnings), repr(self.statistics), self.is_error, )[ :1024 ] class RecognizeLinkedEntitiesResult(DictMixin): """RecognizeLinkedEntitiesResult is a result object which contains links to a well-known knowledge base, like for example, Wikipedia or Bing. :ivar id: Unique, non-empty document identifier that matches the document id that was passed in with the request. If not specified in the request, an id is assigned for the document. :vartype id: str :ivar entities: Recognized well-known entities in the document. :vartype entities: list[~azure.ai.textanalytics.LinkedEntity] :ivar warnings: Warnings encountered while processing document. Results will still be returned if there are warnings, but they may not be fully accurate. :vartype warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning] :ivar statistics: If `show_stats=True` was specified in the request this field will contain information about the document payload. :vartype statistics: ~azure.ai.textanalytics.TextDocumentStatistics :ivar bool is_error: Boolean check for error item when iterating over list of results. Always False for an instance of a RecognizeLinkedEntitiesResult. """ def __init__(self, **kwargs): self.id = kwargs.get("id", None) self.entities = kwargs.get("entities", None) self.warnings = kwargs.get("warnings", []) self.statistics = kwargs.get("statistics", None) self.is_error = False def __repr__(self): return "RecognizeLinkedEntitiesResult(id={}, entities={}, warnings={}, statistics={}, is_error={})".format( self.id, repr(self.entities), repr(self.warnings), repr(self.statistics), self.is_error, )[ :1024 ] class AnalyzeSentimentResult(DictMixin): """AnalyzeSentimentResult is a result object which contains the overall predicted sentiment and confidence scores for your document and a per-sentence sentiment prediction with scores. :ivar id: Unique, non-empty document identifier that matches the document id that was passed in with the request. If not specified in the request, an id is assigned for the document. :vartype id: str :ivar sentiment: Predicted sentiment for document (Negative, Neutral, Positive, or Mixed). Possible values include: 'positive', 'neutral', 'negative', 'mixed' :vartype sentiment: str :ivar warnings: Warnings encountered while processing document. Results will still be returned if there are warnings, but they may not be fully accurate. :vartype warnings: list[~azure.ai.textanalytics.TextAnalyticsWarning] :ivar statistics: If `show_stats=True` was specified in the request this field will contain information about the document payload. :vartype statistics: ~azure.ai.textanalytics.TextDocumentStatistics :ivar confidence_scores: Document level sentiment confidence scores between 0 and 1 for each sentiment
<filename>library/service_element.py #!/usr/bin/python # Copyright (c) 2017-2019 Forcepoint ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = ''' --- module: service_element short_description: Create, modify or delete service elements description: - Each service type currently supported in this module is documented as a suboption. Each service element type will have a minimum number of arguments that are required to create the element if it does not exist. Service elements supported by this module have their `create` constructors documented at U(http://smc-python.readthedocs.io/en/latest/pages/reference.html#elements). This module uses a 'update or create' logic, therefore it is not possible to create the same element twice. If the element exists and the attributes provided are different, the element will be updated before returned. It also means this module can be run multiple times with only slight modifications to the playbook. This is useful when an error is seen with a duplicate name, etc and you must re-adjust the playbook and re-run. For groups, members must be referenced by type and name. Members can be services that are also being created by the same playbook. If running in check_mode,' only fetches will be performed and the state attribute will indicate if an element is not found (i.e. would need to be created). version_added: '2.5' options: elements: description: - A list of the elements to create, modify or remove type: list required: true suboptions: tcp_service: description: - A TCP related service type: dict suboptions: name: description: - Name of this service element type: str required: true min_dst_port: description: - Starting port for this service. Required. type: str required: true max_dst_port: description: - Top level port for this service. Required for defining a port range. type: str udp_service: description: - A UDP related service type: dict suboptions: name: description: - Name of this service element type: str required: true min_dst_port: description: - Starting port for this service. Required. type: str required: true max_dst_port: description: - Top level port for this service. Required for defining a port range. type: str ip_service: description: - An IP based related service type: dict suboptions: name: description: - Name of this service element type: str required: true protocol_number: description: - IP protocol number for the service type: str required: true ethernet_service: description: - An Ethernet related service type: dict suboptions: name: description: - Name of this service element type: str required: true frame_type: description: - Frame type for this service type: str choices: - eth2 - llc - snap required: true value1: description: - The hex string code for protocol type: str required: true icmp_service: description: - An ICMP related service type: dict suboptions: name: description: - Name of this service element type: str required: true icmp_type: description: - ICMP type field type: str required: true icmp_code: description: - ICMP type code type: str required: true icmp_ipv6_service: description: - An ICMP related service type: dict suboptions: name: description: - Name of this service element type: str required: true icmp_type: description: - ICMPv6 type field type: str required: true tcp_service_group: description: - A group of TCP services type: dict suboptions: name: description: - Name of this group element type: str required: true members: description: - A list of members by service element, either the name field must be defined or the name and optional parts to create the element type: list append_lists: description: - Append defined members to the existing list of group members. Setting this to false will overwrite the existing group with the defined members type: bool default: false remove_members: description: - Set to true to reverse the group logic by specifying the defined members be deleted from the group. This setting is mutually exclusive with I(append_lists) type: bool default: false service_group: description: - A group of service elements of any service type type: dict suboptions: name: description: - Name of this group element type: str required: true members: description: - A list of members by service element, either the name field must be defined or the name and optional parts to create the element type: list append_lists: description: - Append defined members to the existing list of group members. Setting this to false will overwrite the existing group with the defined members type: bool default: false remove_members: description: - Set to true to reverse the group logic by specifying the defined members be deleted from the group. This setting is mutually exclusive with I(append_lists) type: bool default: false udp_service_group: description: - A group of service elements of UDP services type: dict suboptions: name: description: - Name of this group element type: str required: true members: description: - A list of members by service element, either the name field must be defined or the name and optional parts to create the element type: list append_lists: description: - Append defined members to the existing list of group members. Setting this to false will overwrite the existing group with the defined members type: bool default: false remove_members: description: - Set to true to reverse the group logic by specifying the defined members be deleted from the group. This setting is mutually exclusive with I(append_lists) type: bool default: false icmp_service_group: description: - A group of service elements of ICMP services type: dict suboptions: name: description: - Name of this group element type: str required: true members: description: - A list of members by service element, either the name field must be defined or the name and optional parts to create the element type: list append_lists: description: - Append defined members to the existing list of group members. Setting this to false will overwrite the existing group with the defined members type: bool default: false remove_members: description: - Set to true to reverse the group logic by specifying the defined members be deleted from the group. This setting is mutually exclusive with I(append_lists) type: bool default: false ip_service_group: description: - A group of service elements of IP services type: dict suboptions: name: description: - Name of this group element type: str required: true members: description: - A list of members by service element, either the name field must be defined or the name and optional parts to create the element type: list append_lists: description: - Append defined members to the existing list of group members. Setting this to false will overwrite the existing group with the defined members type: bool default: false remove_members: description: - Set to true to reverse the group logic by specifying the defined members be deleted from the group. This setting is mutually exclusive with I(append_lists) type: bool default: false ignore_err_if_not_found: description: - When deleting elements, whether to ignore an error if the element is not found. This is only used when I(state=absent). default: True state: description: - Create or delete flag required: false default: present choices: - present - absent extends_documentation_fragment: - management_center requirements: - smc-python author: - Forcepoint ''' EXAMPLES = ''' - name: Example service element creation register: result service_element: smc_logging: level: 10 path: ansible-smc.log elements: - tcp_service: name: myservice min_dst_port: 8080 max_dst_port: 8100 - tcp_service: name: newservice80 min_dst_port: 80 - udp_service: name: myudp min_dst_port: 8090 max_dst_port: 8091 comment: created by example - udp_service: name: udp2000 min_dst_port: 2000 - ip_service: name: new service protocol_number: 8 comment: custom EGP service - ethernet_service: name: 8021q frame frame_type: eth2 value1: "0x8100" - icmp_service: name: custom icmp icmp_type: 3 icmp_code: 7 comment: custom icmp services - icmp_ipv6_service: name: my v6 icmp icmp_type: 139 comment: Neighbor Advertisement Message - tcp_service_group: name: mygroup members: tcp_service: - newservice80 - service_group: name: mysvcgrp members: tcp_service: - newservice80 udp_service: - myudp - udp2000 icmp_service: - custom icmp - udp_service_group: name: myudpservices members: udp_service: - myudp - udp2000 - icmp_service_group: name: myicmp members: icmp_service: - custom icmp - icmp_service_group: name: myemptygroup members: - ip_service_group: name: myipservices members: ip_service: - new service - name: Delete all service elements register:
storage_name kwargs['dataset_group'] = 'scene' kwargs['dataset_meta'] = { 'authors': '<NAME>, <NAME>, and <NAME>', 'title': 'TUT Urban Acoustic Scenes 2018 Mobile, public leaderboard dataset', 'url': None, 'audio_source': 'Field recording', 'audio_type': 'Natural', 'audio_recording_device_model': 'Various', 'microphone_model': 'Various', 'licence': 'free non-commercial' } kwargs['reference_data_present'] = False kwargs['crossvalidation_folds'] = 1 kwargs['meta_filename'] = 'meta.csv' filename_base = 'TUT-urban-acoustic-scenes-2018-mobile-leaderboard' source_url = 'https://zenodo.org/record/1245184/files/' kwargs['package_list'] = [ { 'content_type': 'documentation', 'remote_file': source_url + filename_base + '.doc.zip', 'remote_bytes': 8032, 'remote_md5': '7d7017a1f69f1ee91fe3c55ad9752d48', 'filename': filename_base + '.doc.zip' }, { 'content_type': 'meta', 'remote_file': source_url + filename_base + '.meta.zip', 'remote_bytes': 5994, 'remote_md5': '36fee45acb480f75f9f9d0eb2bf58c08', 'filename': filename_base + '.meta.zip' }, { 'content_type': 'audio', 'remote_file': source_url + filename_base + '.audio.1.zip', 'remote_bytes': 1595184268, 'remote_md5': '5340cac647914b1dbac0058384306bdd', 'filename': filename_base + '.audio.1.zip' }, { 'content_type': 'audio', 'remote_file': source_url + filename_base + '.audio.2.zip', 'remote_bytes': 937889790, 'remote_md5': 'd9126d1920f1a4b59a5368f8cf1d04b5', 'filename': filename_base + '.audio.2.zip' } ] kwargs['audio_paths'] = [ 'audio' ] super(TUTUrbanAcousticScenes_2018_Mobile_LeaderboardSet, self).__init__(**kwargs) def process_meta_item(self, item, absolute_path=True, **kwargs): """Process single meta data item Parameters ---------- item : MetaDataItem Meta data item absolute_path : bool Convert file paths to be absolute Default value True """ if absolute_path: item.filename = self.relative_to_absolute_path(item.filename) else: item.filename = self.absolute_to_relative_path(item.filename) def prepare(self): """Prepare dataset for the usage. Returns ------- self """ if not self.meta_container.exists() and self.reference_data_present: meta_data = collections.OrderedDict() for fold in self.folds(): # Read train files in fold_data = MetaDataContainer( filename=self.evaluation_setup_filename( setup_part='train', fold=fold ) ).load() # Read eval files in fold_data += MetaDataContainer( filename=self.evaluation_setup_filename( setup_part='evaluate', fold=fold ) ).load() # Process, make sure each file is included only once. for item in fold_data: if item.filename not in meta_data: self.process_meta_item( item=item, absolute_path=False ) meta_data[item.filename] = item # Save meta MetaDataContainer(list(meta_data.values())).save( filename=self.meta_file ) # Load meta and cross validation self.load() return self class TUTUrbanAcousticScenes_2018_Mobile_EvaluationSet(AcousticSceneDataset): """TUT Urban Acoustic Scenes 2018 Mobile Evaluation dataset This dataset is used in DCASE2018 - Task 1, Acoustic scene classification / Subtask B / Evaluation """ def __init__(self, storage_name='TUT-urban-acoustic-scenes-2018-mobile-evaluation', data_path=None, included_content_types=None, **kwargs): """ Constructor Parameters ---------- storage_name : str Name to be used when storing dataset on disk Default value 'TUT-urban-acoustic-scenes-2018-mobile-evaluation' data_path : str Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets') is used. Default value None included_content_types : list of str or str Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code', 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string. Default value None """ kwargs['included_content_types'] = included_content_types kwargs['data_path'] = data_path kwargs['storage_name'] = storage_name kwargs['dataset_group'] = 'scene' kwargs['dataset_meta'] = { 'authors': '<NAME>, <NAME>, and <NAME>', 'title': 'TUT Urban Acoustic Scenes 2018 Mobile, evaluation dataset', 'url': None, 'audio_source': 'Field recording', 'audio_type': 'Natural', 'audio_recording_device_model': 'Various', 'microphone_model': 'Various', 'licence': 'free non-commercial' } kwargs['reference_data_present'] = False kwargs['crossvalidation_folds'] = 1 kwargs['meta_filename'] = 'meta.csv' filename_base = 'TUT-urban-acoustic-scenes-2018-mobile-evaluation' source_url = 'https://zenodo.org/record/1293901/files/' kwargs['package_list'] = [ { 'content_type': 'documentation', 'remote_file': source_url + filename_base + '.doc.zip', 'remote_bytes': 8153, 'remote_md5': '388c33165041f7f485f5d02f8c79e5cb', 'filename': filename_base + '.doc.zip' }, { 'content_type': 'meta', 'remote_file': source_url + filename_base + '.meta.zip', 'remote_bytes': 37135, 'remote_md5': 'ee32d053b658994f2836525884ca4752', 'filename': filename_base + '.meta.zip' }, { 'content_type': 'audio', 'remote_file': source_url + filename_base + '.audio.1.zip', 'remote_bytes': 1661884583, 'remote_md5': '1e3142533721c67397363f73cf9d02d6', 'filename': filename_base + '.audio.1.zip' }, { 'content_type': 'audio', 'remote_file': source_url + filename_base + '.audio.2.zip', 'remote_bytes': 1653193397, 'remote_md5': '042ee6d3769ddcf5660be5b1ccbf27c7', 'filename': filename_base + '.audio.2.zip' }, { 'content_type': 'audio', 'remote_file': source_url + filename_base + '.audio.3.zip', 'remote_bytes': 1649013685, 'remote_md5': '1b1a88f891e29cdac06ddb4c5f3c114c', 'filename': filename_base + '.audio.3.zip' }, { 'content_type': 'audio', 'remote_file': source_url + filename_base + '.audio.4.zip', 'remote_bytes': 1659605017, 'remote_md5': 'de97d70ba7dacf37ce0c0e94d38ae068', 'filename': filename_base + '.audio.4.zip' }, { 'content_type': 'audio', 'remote_file': source_url + filename_base + '.audio.5.zip', 'remote_bytes': 1662372447, 'remote_md5': 'd5a9d8c9da6f14e35e43723c31cc2d2f', 'filename': filename_base + '.audio.5.zip' }, { 'content_type': 'audio', 'remote_file': source_url + filename_base + '.audio.6.zip', 'remote_bytes': 1657254960, 'remote_md5': '168f0dbe69a2b314b846490914e8e3f1', 'filename': filename_base + '.audio.6.zip' }, { 'content_type': 'audio', 'remote_file': source_url + filename_base + '.audio.7.zip', 'remote_bytes': 1663811780, 'remote_md5': 'b77db16f4615ac0f8bab2a1cb45edf0c', 'filename': filename_base + '.audio.7.zip' }, { 'content_type': 'audio', 'remote_file': source_url + filename_base + '.audio.8.zip', 'remote_bytes': 1668115140, 'remote_md5': 'e7bf06ab5af19e535f0614359a0fea10', 'filename': filename_base + '.audio.8.zip' }, { 'content_type': 'audio', 'remote_file': source_url + filename_base + '.audio.9.zip', 'remote_bytes': 1657413208, 'remote_md5': 'f4f958f7112e2901660573df3f4ed649', 'filename': filename_base + '.audio.9.zip' }, { 'content_type': 'audio', 'remote_file': source_url + filename_base + '.audio.10.zip', 'remote_bytes': 1655476185, 'remote_md5': 'c1c1f61f015cf492e426c9feb98b4d11', 'filename': filename_base + '.audio.10.zip' }, { 'content_type': 'audio', 'remote_file': source_url + filename_base + '.audio.11.zip', 'remote_bytes': 11141229, 'remote_md5': '0a2d966628facf60ee875b1fbddfa11f', 'filename': filename_base + '.audio.11.zip' } ] kwargs['audio_paths'] = [ 'audio' ] super(TUTUrbanAcousticScenes_2018_Mobile_EvaluationSet, self).__init__(**kwargs) def process_meta_item(self, item, absolute_path=True, **kwargs): """Process single meta data item Parameters ---------- item : MetaDataItem Meta data item absolute_path : bool Convert file paths to be absolute Default value True """ if absolute_path: item.filename = self.relative_to_absolute_path(item.filename) else: item.filename = self.absolute_to_relative_path(item.filename) def prepare(self): """Prepare dataset for the usage. Returns ------- self """ if not self.meta_container.exists() and self.reference_data_present: meta_data = collections.OrderedDict() for fold in self.folds(): # Read train files in fold_data = MetaDataContainer( filename=self.evaluation_setup_filename( setup_part='train', fold=fold ) ).load() # Read eval files in fold_data += MetaDataContainer( filename=self.evaluation_setup_filename( setup_part='evaluate', fold=fold ) ).load() # Process, make sure each file is included only once. for item in fold_data: if item.filename not in meta_data: self.process_meta_item( item=item, absolute_path=False ) meta_data[item.filename] = item # Save meta MetaDataContainer(list(meta_data.values())).save( filename=self.meta_file ) # Load meta and cross validation self.load() return self # ===================================================== # DCASE 2017 # ===================================================== class TUTAcousticScenes_2017_DevelopmentSet(AcousticSceneDataset): """TUT Acoustic scenes 2017 development dataset This dataset is used in DCASE2017 - Task 1, Acoustic scene classification """ def __init__(self, storage_name='TUT-acoustic-scenes-2017-development', data_path=None, included_content_types=None, **kwargs): """ Constructor Parameters ---------- storage_name : str Name to be used when storing dataset on disk Default value 'TUT-acoustic-scenes-2017-development' data_path : str Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets') is used. Default value None included_content_types : list of str or str Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code', 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string. Default value None """ kwargs['included_content_types'] = included_content_types kwargs['data_path'] = data_path kwargs['storage_name'] = storage_name kwargs['dataset_group'] = 'scene' kwargs['dataset_meta'] = { 'authors': '<NAME>, <NAME>, and <NAME>', 'title': 'TUT Acoustic Scenes 2017, development dataset', 'url': None, 'audio_source': 'Field recording', 'audio_type': 'Natural', 'audio_recording_device_model': 'Roland Edirol R-09', 'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone', 'licence': 'free non-commercial' } kwargs['crossvalidation_folds'] = 4 source_url = 'https://zenodo.org/record/400515/files/' kwargs['package_list'] = [ { 'content_type': 'documentation', 'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.doc.zip', 'remote_bytes': 54796, 'remote_md5': '2065495aaf3f1103e795c9899e2af1df', 'filename': 'TUT-acoustic-scenes-2017-development.doc.zip' }, { 'content_type': 'meta', 'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.meta.zip', 'remote_bytes': 104321, 'remote_md5': '9007fd4772d816590c5db5f5e9568f5d', 'filename': 'TUT-acoustic-scenes-2017-development.meta.zip' }, { 'content_type': 'meta', 'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.error.zip', 'remote_bytes': 1432, 'remote_md5': '802c700b021769e52a2c1e3b9c117a1b', 'filename': 'TUT-acoustic-scenes-2017-development.error.zip' }, { 'content_type': 'audio', 'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.1.zip', 'remote_bytes': 1071445248, 'remote_md5': '251325a9afaaad0326ad1c57f57d514a', 'filename': 'TUT-acoustic-scenes-2017-development.audio.1.zip' }, { 'content_type': 'audio', 'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.2.zip', 'remote_bytes': 1073453613, 'remote_md5': 'c26861e05147dc319b4250eb103d9d99', 'filename': 'TUT-acoustic-scenes-2017-development.audio.2.zip' }, { 'content_type': 'audio', 'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.3.zip', 'remote_bytes': 1073077819, 'remote_md5': 'a4815775f8a5e629179726ee4cd4f55a', 'filename': 'TUT-acoustic-scenes-2017-development.audio.3.zip' }, { 'content_type': 'audio', 'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.4.zip', 'remote_bytes': 1072822038, 'remote_md5': '1732b03afe8c53ef8bba80ba14766e57', 'filename': 'TUT-acoustic-scenes-2017-development.audio.4.zip' }, { 'content_type': 'audio', 'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.5.zip', 'remote_bytes': 1072644652, 'remote_md5': '611be754a0c951185c6ae4b7643c19a0', 'filename': 'TUT-acoustic-scenes-2017-development.audio.5.zip' }, { 'content_type': 'audio', 'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.6.zip', 'remote_bytes': 1072667888, 'remote_md5': '165a201db800d3ea76fce5a9c2bd97d7', 'filename': 'TUT-acoustic-scenes-2017-development.audio.6.zip' }, { 'content_type': 'audio', 'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.7.zip', 'remote_bytes': 1073417661, 'remote_md5': 'c7d79db84264401c0f8680dcc36013ad', 'filename': 'TUT-acoustic-scenes-2017-development.audio.7.zip' }, { 'content_type': 'audio', 'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.8.zip', 'remote_bytes': 1072381222, 'remote_md5': '35043f25123439392338c790494c7a19', 'filename': 'TUT-acoustic-scenes-2017-development.audio.8.zip' }, { 'content_type': 'audio', 'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.9.zip', 'remote_bytes': 1072087738, 'remote_md5': '0805dcf5d8e6871dc9610182b2efb93a', 'filename': 'TUT-acoustic-scenes-2017-development.audio.9.zip' }, { 'content_type': 'audio', 'remote_file': source_url + 'TUT-acoustic-scenes-2017-development.audio.10.zip', 'remote_bytes': 1046262120, 'remote_md5': '5df83a191295a04e290b125c634e13e7', 'filename': 'TUT-acoustic-scenes-2017-development.audio.10.zip' } ] kwargs['audio_paths'] = [ 'audio' ] super(TUTAcousticScenes_2017_DevelopmentSet, self).__init__(**kwargs) def process_meta_item(self, item, absolute_path=True, **kwargs): """Process single meta data item Parameters ---------- item : MetaDataItem Meta data item absolute_path : bool Convert file paths to be absolute Default value True """ if absolute_path: item.filename = self.relative_to_absolute_path(item.filename) else: item.filename = self.absolute_to_relative_path(item.filename) raw_path, raw_filename = os.path.split(item.filename) item.identifier = raw_filename.split('_')[0] def prepare(self): """Prepare dataset for the usage. Returns ------- self """ if not self.meta_container.exists(): meta_data = collections.OrderedDict() for fold in self.folds(): # Read train files in fold_data = MetaDataContainer( filename=self.evaluation_setup_filename(setup_part='train', fold=fold) ).load() # Read eval files in fold_data += MetaDataContainer( filename=self.evaluation_setup_filename(setup_part='evaluate', fold=fold) ).load() # Process, make sure each file is included only once. for item in fold_data: if item.filename not in meta_data: self.process_meta_item( item=item, absolute_path=False )
Checking if the direction of the move is correct full_rec_payable = full_rec_move.line_ids.filtered(lambda l: l.account_id == self.account_rsa) self.assertEqual(full_rec_payable.balance, 18.75) def test_unreconcile(self): # Use case: # 2 invoices paid with a single payment. Unreconcile the payment with one invoice, the # other invoice should remain reconciled. inv1 = self.create_invoice(invoice_amount=10, currency_id=self.currency_usd_id) inv2 = self.create_invoice(invoice_amount=20, currency_id=self.currency_usd_id) payment = self.env['account.payment'].create({ 'payment_type': 'inbound', 'payment_method_id': self.env.ref('account.account_payment_method_manual_in').id, 'partner_type': 'customer', 'partner_id': self.partner_agrolait_id, 'amount': 100, 'currency_id': self.currency_usd_id, 'journal_id': self.bank_journal_usd.id, }) payment.action_post() credit_aml = payment.line_ids.filtered('credit') # Check residual before assignation self.assertAlmostEqual(inv1.amount_residual, 10) self.assertAlmostEqual(inv2.amount_residual, 20) # Assign credit and residual inv1.js_assign_outstanding_line(credit_aml.id) inv2.js_assign_outstanding_line(credit_aml.id) self.assertAlmostEqual(inv1.amount_residual, 0) self.assertAlmostEqual(inv2.amount_residual, 0) # Unreconcile one invoice at a time and check residual credit_aml.remove_move_reconcile() self.assertAlmostEqual(inv1.amount_residual, 10) self.assertAlmostEqual(inv2.amount_residual, 20) def test_unreconcile_exchange(self): # Use case: # - Company currency in EUR # - Create 2 rates for USD: # 1.0 on 2018-01-01 # 0.5 on 2018-02-01 # - Create an invoice on 2018-01-02 of 111 USD # - Register a payment on 2018-02-02 of 111 USD # - Unreconcile the payment self.env['res.currency.rate'].create({ 'name': time.strftime('%Y') + '-07-01', 'rate': 1.0, 'currency_id': self.currency_usd_id, 'company_id': self.company.id }) self.env['res.currency.rate'].create({ 'name': time.strftime('%Y') + '-08-01', 'rate': 0.5, 'currency_id': self.currency_usd_id, 'company_id': self.company.id }) inv = self.create_invoice(invoice_amount=111, currency_id=self.currency_usd_id) payment = self.env['account.payment'].create({ 'payment_type': 'inbound', 'payment_method_id': self.env.ref('account.account_payment_method_manual_in').id, 'partner_type': 'customer', 'partner_id': self.partner_agrolait_id, 'amount': 111, 'currency_id': self.currency_usd_id, 'journal_id': self.bank_journal_usd.id, 'date': time.strftime('%Y') + '-08-01', }) payment.action_post() credit_aml = payment.line_ids.filtered('credit') # Check residual before assignation self.assertAlmostEqual(inv.amount_residual, 111) # Assign credit, check exchange move and residual inv.js_assign_outstanding_line(credit_aml.id) self.assertEqual(len(payment.line_ids.mapped('full_reconcile_id').exchange_move_id), 1) self.assertAlmostEqual(inv.amount_residual, 0) # Unreconcile invoice and check residual credit_aml.remove_move_reconcile() self.assertAlmostEqual(inv.amount_residual, 111) def test_revert_payment_and_reconcile(self): payment = self.env['account.payment'].create({ 'payment_method_id': self.inbound_payment_method.id, 'payment_type': 'inbound', 'partner_type': 'customer', 'partner_id': self.partner_agrolait_id, 'journal_id': self.bank_journal_usd.id, 'date': '2018-06-04', 'amount': 666, }) payment.action_post() self.assertEqual(len(payment.line_ids), 2) bank_line = payment.line_ids.filtered(lambda l: l.account_id.id == self.bank_journal_usd.payment_debit_account_id.id) customer_line = payment.line_ids - bank_line self.assertEqual(len(bank_line), 1) self.assertEqual(len(customer_line), 1) self.assertNotEqual(bank_line.id, customer_line.id) self.assertEqual(bank_line.move_id.id, customer_line.move_id.id) move = bank_line.move_id # Reversing the payment's move reversed_move = move._reverse_moves([{'date': '2018-06-04'}]) self.assertEqual(len(reversed_move), 1) self.assertEqual(len(reversed_move.line_ids), 2) # Testing the reconciliation matching between the move lines and their reversed counterparts reversed_bank_line = reversed_move.line_ids.filtered(lambda l: l.account_id.id == self.bank_journal_usd.payment_debit_account_id.id) reversed_customer_line = reversed_move.line_ids - reversed_bank_line self.assertEqual(len(reversed_bank_line), 1) self.assertEqual(len(reversed_customer_line), 1) self.assertNotEqual(reversed_bank_line.id, reversed_customer_line.id) self.assertEqual(reversed_bank_line.move_id.id, reversed_customer_line.move_id.id) self.assertEqual(reversed_bank_line.full_reconcile_id.id, bank_line.full_reconcile_id.id) self.assertEqual(reversed_customer_line.full_reconcile_id.id, customer_line.full_reconcile_id.id) def test_revert_payment_and_reconcile_exchange(self): # A reversal of a reconciled payment which created a currency exchange entry, should create reversal moves # which move lines should be reconciled two by two with the original move's lines def _determine_debit_credit_line(move): line_ids_reconciliable = move.line_ids.filtered(lambda l: l.account_id.reconcile or l.account_id.internal_type == 'liquidity') return line_ids_reconciliable.filtered(lambda l: l.debit), line_ids_reconciliable.filtered(lambda l: l.credit) def _move_revert_test_pair(move, revert): self.assertTrue(move.line_ids) self.assertTrue(revert.line_ids) move_lines = _determine_debit_credit_line(move) revert_lines = _determine_debit_credit_line(revert) # in the case of the exchange entry, only one pair of lines will be found if move_lines[0] and revert_lines[1]: self.assertTrue(move_lines[0].full_reconcile_id.exists()) self.assertEqual(move_lines[0].full_reconcile_id.id, revert_lines[1].full_reconcile_id.id) if move_lines[1] and revert_lines[0]: self.assertTrue(move_lines[1].full_reconcile_id.exists()) self.assertEqual(move_lines[1].full_reconcile_id.id, revert_lines[0].full_reconcile_id.id) self.env['res.currency.rate'].create({ 'name': time.strftime('%Y') + '-07-01', 'rate': 1.0, 'currency_id': self.currency_usd_id, 'company_id': self.company.id }) self.env['res.currency.rate'].create({ 'name': time.strftime('%Y') + '-08-01', 'rate': 0.5, 'currency_id': self.currency_usd_id, 'company_id': self.company.id }) inv = self.create_invoice(invoice_amount=111, currency_id=self.currency_usd_id) payment = self.env['account.payment'].create({ 'payment_type': 'inbound', 'payment_method_id': self.env.ref('account.account_payment_method_manual_in').id, 'partner_type': 'customer', 'partner_id': self.partner_agrolait_id, 'amount': 111, 'currency_id': self.currency_usd_id, 'journal_id': self.bank_journal_usd.id, 'date': time.strftime('%Y') + '-08-01', }) payment.action_post() credit_aml = payment.line_ids.filtered('credit') inv.js_assign_outstanding_line(credit_aml.id) self.assertTrue(inv.payment_state in ('in_payment', 'paid'), "Invoice should be paid") exchange_reconcile = payment.line_ids.mapped('full_reconcile_id') exchange_move = exchange_reconcile.exchange_move_id payment_move = payment.line_ids[0].move_id reverted_payment_move = payment_move._reverse_moves([{'date': time.strftime('%Y') + '-08-01'}], cancel=True) # After reversal of payment, the invoice should be open self.assertTrue(inv.state == 'posted', 'The invoice should be open again') self.assertFalse(exchange_reconcile.exists()) reverted_exchange_move = self.env['account.move'].search([('journal_id', '=', exchange_move.journal_id.id), ('ref', 'ilike', exchange_move.name)], limit=1) _move_revert_test_pair(payment_move, reverted_payment_move) _move_revert_test_pair(exchange_move, reverted_exchange_move) def test_partial_reconcile_currencies_02(self): #### # Day 1: Invoice Cust/001 to customer (expressed in USD) # Market value of USD (day 1): 1 USD = 0.5 EUR # * Dr. 100 USD / 50 EUR - Accounts receivable # * Cr. 100 USD / 50 EUR - Revenue #### dest_journal_id = self.env['account.journal'].create({ 'name': 'turlututu', 'type': 'bank', 'company_id': self.env.company.id, }) self.env['res.currency.rate'].create({ 'currency_id': self.currency_usd_id, 'name': time.strftime('%Y') + '-01-01', 'rate': 2, }) invoice_cust_1 = self.env['account.move'].with_context(default_move_type='out_invoice').create({ 'move_type': 'out_invoice', 'partner_id': self.partner_agrolait_id, 'invoice_date': '%s-01-01' % time.strftime('%Y'), 'date': '%s-01-01' % time.strftime('%Y'), 'currency_id': self.currency_usd_id, 'invoice_line_ids': [ (0, 0, {'quantity': 1, 'price_unit': 100.0, 'name': 'product that cost 100'}) ], }) invoice_cust_1.action_post() aml = invoice_cust_1.invoice_line_ids[0] self.assertEqual(aml.credit, 50.0) ##### # Day 2: Receive payment for half invoice Cust/1 (in USD) # ------------------------------------------------------- # Market value of USD (day 2): 1 USD = 1 EUR # Payment transaction: # * Dr. 50 USD / 50 EUR - EUR Bank (valued at market price # at the time of receiving the money) # * Cr. 50 USD / 50 EUR - Accounts Receivable ##### self.env['res.currency.rate'].create({ 'currency_id': self.currency_usd_id, 'name': time.strftime('%Y') + '-01-02', 'rate': 1, }) payment = self.env['account.payment.register']\ .with_context(active_model='account.move', active_ids=invoice_cust_1.ids)\ .create({ 'payment_date': time.strftime('%Y') + '-01-02', 'amount': 50, 'journal_id': dest_journal_id.id, 'currency_id': self.currency_usd_id, })\ ._create_payments() # We expect at this point that the invoice should still be open, in 'partial' state, # because they owe us still 50 CC. self.assertEqual(invoice_cust_1.payment_state, 'partial', 'Invoice is in status %s' % invoice_cust_1.state) def test_multiple_term_reconciliation_opw_1906665(self): '''Test that when registering a payment to an invoice with multiple payment term lines the reconciliation happens against the line with the earliest date_maturity ''' payment_term = self.env['account.payment.term'].create({ 'name': 'Pay in 2 installments', 'line_ids': [ # Pay 50% immediately (0, 0, { 'value': 'percent', 'value_amount': 50, }), # Pay the rest after 14 days (0, 0, { 'value': 'balance', 'days': 14, }) ], }) # can't use self.create_invoice because it validates and we need to set payment_term_id invoice = self.create_invoice_partner( partner_id=self.partner_agrolait_id, payment_term_id=payment_term.id, currency_id=self.currency_usd_id, ) payment = self.env['account.payment'].create({ 'date': time.strftime('%Y') + '-07-15', 'payment_type': 'inbound', 'payment_method_id': self.env.ref('account.account_payment_method_manual_in').id, 'partner_type': 'customer', 'partner_id': self.partner_agrolait_id, 'amount': 25, 'currency_id': self.currency_usd_id, 'journal_id': self.bank_journal_usd.id, }) payment.action_post() receivable_line = payment.line_ids.filtered('credit') invoice.js_assign_outstanding_line(receivable_line.id) self.assertTrue(receivable_line.matched_debit_ids) def test_reconciliation_with_currency(self): #reconciliation on an account having a foreign currency being #the same as the company one account_rcv = self.account_rcv account_rcv.currency_id = self.currency_euro_id aml_obj = self.env['account.move.line'].with_context( check_move_validity=False) general_move1 = self.env['account.move'].create({ 'name': 'general1', 'journal_id': self.general_journal.id, }) aml_obj.create({ 'name': 'debit1', 'account_id': account_rcv.id, 'debit': 11, 'move_id': general_move1.id, }) aml_obj.create({ 'name': 'credit1', 'account_id': self.account_rsa.id, 'credit': 11, 'move_id': general_move1.id, }) general_move1.action_post() general_move2 = self.env['account.move'].create({ 'name': 'general2', 'journal_id': self.general_journal.id, }) aml_obj.create({ 'name': 'credit2', 'account_id': account_rcv.id, 'credit': 10, 'move_id': general_move2.id, }) aml_obj.create({ 'name': 'debit2', 'account_id': self.account_rsa.id, 'debit': 10, 'move_id': general_move2.id, }) general_move2.action_post() general_move3 = self.env['account.move'].create({ 'name': 'general3', 'journal_id': self.general_journal.id, }) aml_obj.create({ 'name': 'credit3', 'account_id': account_rcv.id, 'credit': 1, 'move_id': general_move3.id, }) aml_obj.create({ 'name': 'debit3', 'account_id': self.account_rsa.id, 'debit': 1, 'move_id': general_move3.id, }) general_move3.action_post() to_reconcile = ((general_move1 + general_move2 + general_move3) .mapped('line_ids') .filtered(lambda l: l.account_id.id == account_rcv.id)) to_reconcile.reconcile() for aml in to_reconcile: self.assertEqual(aml.amount_residual, 0.0) def test_inv_refund_foreign_payment_writeoff_domestic2(self): company = self.company self.env['res.currency.rate'].create({ 'name': time.strftime('%Y') + '-07-01', 'rate': 1.0, 'currency_id': self.currency_euro_id, 'company_id': company.id }) self.env['res.currency.rate'].create({ 'name': time.strftime('%Y') + '-07-01', 'rate': 1.110600, # Don't change this ! 'currency_id': self.currency_usd_id, 'company_id': self.company.id }) inv1 = self.create_invoice(invoice_amount=800, currency_id=self.currency_usd_id) inv2 = self.create_invoice(move_type="out_refund", invoice_amount=400, currency_id=self.currency_usd_id) payment = self.env['account.payment'].create({ 'date': time.strftime('%Y') + '-07-15', 'payment_method_id': self.inbound_payment_method.id, 'payment_type': 'inbound', 'partner_type': 'customer', 'partner_id': inv1.partner_id.id, 'amount': 200.00, 'journal_id': self.bank_journal_euro.id, 'company_id': company.id, }) payment.action_post() inv1_receivable = inv1.line_ids.filtered(lambda l: l.account_id.internal_type == 'receivable') inv2_receivable = inv2.line_ids.filtered(lambda l: l.account_id.internal_type == 'receivable') pay_receivable = payment.line_ids.filtered(lambda l: l.account_id.internal_type == 'receivable') move_balance = self.env['account.move'].create({ 'partner_id': inv1.partner_id.id, 'date': time.strftime('%Y') + '-07-01', 'journal_id': self.bank_journal_euro.id, 'line_ids': [ (0, False, {'credit': 160.16, 'account_id': inv1_receivable.account_id.id, 'name': 'Balance WriteOff'}), (0, False, {'debit': 160.16, 'account_id': self.diff_expense_account.id, 'name': 'Balance WriteOff'}), ] }) move_balance.action_post() move_balance_receiv = move_balance.line_ids.filtered(lambda l: l.account_id.internal_type == 'receivable') (inv1_receivable + inv2_receivable + pay_receivable + move_balance_receiv).reconcile() self.assertTrue(inv1_receivable.full_reconcile_id.exists()) self.assertEqual(inv1_receivable.full_reconcile_id, inv2_receivable.full_reconcile_id) self.assertEqual(inv1_receivable.full_reconcile_id, pay_receivable.full_reconcile_id) self.assertEqual(inv1_receivable.full_reconcile_id, move_balance_receiv.full_reconcile_id) self.assertTrue(inv1.payment_state in ('in_payment', 'paid'), "Invoice should be paid") self.assertEqual(inv2.payment_state, 'paid') def test_inv_refund_foreign_payment_writeoff_domestic3(self): """ Receivable Domestic (Foreign) 592.47 (658.00) | INV 1 > Done in foreign | 202.59 (225.00) INV 2 > Done in foreign | 372.10 (413.25) PAYMENT > Done in domestic (the 413.25 is virtual, non stored) | 17.78 (19.75) WriteOff > Done in domestic (the 19.75 is virtual, non stored) Reconciliation should be full Invoices should be marked as paid """ company = self.company self.env['res.currency.rate'].create({ 'name': time.strftime('%Y') + '-07-01', 'rate': 1.0, 'currency_id': self.currency_euro_id, 'company_id': company.id }) self.env['res.currency.rate'].create({ 'name': time.strftime('%Y') + '-07-01', 'rate': 1.110600, # Don't change this ! 'currency_id': self.currency_usd_id, 'company_id': company.id }) inv1 = self.create_invoice(invoice_amount=658, currency_id=self.currency_usd_id) inv2 = self.create_invoice(move_type="out_refund", invoice_amount=225, currency_id=self.currency_usd_id) payment =
<filename>appdotnet/api.py from __future__ import print_function import os import sys import requests import json import endpoints from datetime import datetime import dateutil from exceptions import APIException, HTTPException from util import is_sequence USER_AGENT = 'appdotnet/0.1.3 (Python/%s)' % '.'.join([str(x) for x in sys.version_info]) def _successful(response): """ Returns whether a response was considered successful. If no body is available or the 'meta' dict in the response envelope doesn't contain a 'code' value, checks the HTTP response code instead. :param requests.Response response: a response object :returns: (boolean) True if successful """ code = response.status_code try: code = response.json()['meta']['code'] except Exception: pass return code in (200, 201, 202) def _params(param_dict=None, collapse=False): """ Given a dict of parameter key/value pairs, filter out the ones whose values are None, and comma-join any parameters with lists/tuples. :param dict param_dict: the full set of parameters :param boolean collapse: if True, collapses lists/tuples to comma-separated lists :returns: (dict) the refined set of params """ param_dict = param_dict or {} params = dict(filter(lambda (k, v): v is not None, param_dict.iteritems())) if collapse: for key in params: if is_sequence(params[key]): params[key] = ','.join(params[key]) return params def _raise_errors(response): """ Checks the requests.Response object body for any API-level errors, then checks whether we received a 4xx- or 5xx-level response code. Raises an exception for any undesirables. """ api_error = None http_error = 400 <= response.status_code try: body = response.json() api_error = (body.get('error', False) or body.get('meta', {}).get('error_message')) except Exception: pass if api_error: raise APIException(api_error) elif http_error: raise HTTPException("HTTP %d: %s" % (response.status_code, response.text)) class Client(object): """ An App.net client object. """ def __init__(self, client_id=None, client_secret=None, app_token=None, user_token=None): """ Initialize an API client with the provided credentials. :param client_id: (optional) the application's Client ID :param client_secret: (optional) the application's Client Secret :param app_token: (optional) an application-level token as generated by https://account.app.net/oauth/access_token :param user_token: (optional) a user-level token """ self.client_id = client_id self.client_secret = client_secret self.app_token = app_token self.user_token = user_token self._load_env_credentials() self.session = requests.Session() self._build_headers() def _load_env_credentials(self): """ Attempt to load client ID, secret, app token, and user token from the environment if not explicitly passed in to the class. """ if not self.client_id: self.client_id = os.environ.get('ADN_CLIENT_ID', None) if not self.client_secret: self.client_secret = os.environ.get('ADN_CLIENT_SECRET', None) if not self.app_token: self.app_token = os.environ.get('ADN_APP_TOKEN', None) if not self.user_token: self.user_token = os.environ.get('ADN_USER_TOKEN', None) def _build_headers(self): """ Builds the basic set of headers for the session. """ self.session.headers['User-Agent'] = USER_AGENT self.session.headers['Content-Type'] = 'application/json' self.session.headers['Accept'] = 'application/json' if self.app_token: self.session.headers['Authorization'] = 'BEARER ' + self.app_token def _build_request(self, key, uri_vars=None, clean=False): """ Locate the appropriate endpoint URI and HTTP VERB for the specified request key, and return a tuple containing the callable from our Session corresponding to that verb, and the full endpoint URL. :param key: the endpoint key corresponding to an API method :param dict vars: key/value variable pairs in the URI to be substituted :returns: (tuple) the HTTP verb and endpoint to use for the request """ source = requests if clean else self.session verb, endpoint = endpoints.find_method(key, uri_vars=uri_vars) verb = verb.lower() if not hasattr(source, verb): raise Exception('Cannot find verb method %s in requests!' % verb) return (getattr(source, verb), endpoint) def _request(self, key, uri_vars=None, params=None, body=None, clean=False, **kwargs): """ Issue a request for the endpoint key, substituting in the URL any variables provided in `vars`, including any query string or body parameters in `params`, and optionally with no authentication information if `clean` is True. :param key: the endpoint key corresponding to an API method :param dict uri_vars: (optional) key/value variable pairs to substitute in the URI :param dict params: (optional) key/value parameters to include in the query string on requests or on POSTs where no other body is specified :param body: (optional) a JSON-serializable value to use as the body of a POST or PUT request :param boolean clean: (optional) if True, use a clean request (e.g., no authentication headers) :param **kwargs: (optional) additional arguments to pass in to the requests method :returns: (dict|string) the deserialized body, or the original body as a str if it could not be deserialized """ body = json.dumps(body) if body is not None else body method, endpoint = self._build_request(key, uri_vars=uri_vars, clean=clean) response = method(endpoint, params=params, data=body, **kwargs) _raise_errors(response) body = response.text try: body = response.json() except Exception: pass return body def create_app_token(self): """ Creates an application access token. In order to request an application token, you must provide the client ID and client secret when you initialize the client. :returns: (string) the application access token """ if not self.client_id or not self.client_secret: raise Exception('client_id and client_secret must be provided to ' 'create an app access token.') data = {'client_id': self.client_id, 'client_secret': self.client_secret, 'grant_type': 'client_credentials'} body = self._request('oauth.token.create', clean=True, data=data) return body['access_token'] def stream(self, stream_url, decode=True): """ A generator used to iterate over events received via the streaming API. Automatic JSON decoding is the default behavior but can be disabled. Yields an Event instance for each line received via the API. If decoding is off, yields a string of the raw line received. :param stream_url: the stream endpoint URL :param boolean decode: whether to decode the JSON structure before yielding. :returns: (appdotnet.api.Event) yields an Event instance for each streaming API event received """ resp = requests.get(stream_url, stream=True) for line in resp.iter_lines(chunk_size=1): if not line: continue value = None if not decode: value = line else: try: value = Event(json.loads(line)) except Exception as ex: print('Cannot decode line: %s (%s)' % (ex, line)) yield value def stream_list(self): """ Get a list of streams for the application token. :returns: (list) a list of stream dicts, or an empty list if none """ return self._request('streams.list').get('data', []) def stream_find(self, key_or_id): """ Find a specific stream whose key or ID matches the key_or_id parameter. :param string key_or_id: the key or ID to search for. :returns: (dict|None) a dict representing the requested stream, or None if it could not be found """ key_or_id = str(key_or_id) for stream in self.stream_list(): if key_or_id in (stream.get('id', ''), stream.get('key', '')): return stream return None def stream_create(self, key=None, type_list=None, filter_id=None, stream_type='long_poll'): """ Creates a stream for the application token with the specified options. :param key: (optional) the key or name for this stream :param list type_list: any combination of object types; see http://developers.app.net/docs/resources/stream/ for a full list :param filter_id: (optional) an existing filter ID to apply to this stream :param stream_type: the stream type; currently long_poll is the only type :returns: (dict) the response as received from the API """ type_list = type_list or [] body = _params({'key': key, 'object_types': type_list, 'type': stream_type, 'filter_id': filter_id}) return self._request('streams.create', body=body) def stream_delete(self, stream_id): """ Deletes a stream with the specified ID. :param integer id: the stream ID to delete :returns: (boolean) True if the stream was successfully deleted """ self._request('streams.delete', uri_vars={'stream_id': stream_id}) return True class Event(object): """ Represents a streaming API event. """ def __init__(self, event): """ Accepts a dict called `event` and provides a positively terrific variety of convenience methods to interpret various things about it. :param dict event: the original JSON-decoded entity """ self._event = event self._meta = self._event.get('meta', {}) self._data = self._event.get('data', {}) self._type = self._meta.get('type', None) def __hasattr__(self, key): return hasattr(self._data, key) def __getattr__(self, key): return getattr(self._data, key) def __contains__(self, key): return key in self._data def __getitem__(self, key): if not key in self._data: raise IndexError return self._data[key] def type(self): """ Original meta type of this event. """ return self._type def meta(self, key, default=None): """ Returns a value from the meta dict or a default value if the key was not present. :param key: a dict key from the original meta dict :param default: (optional) a default value to return if the key does not exist """ return self._meta.get(key, default) def data(self, key, default=None): """ Returns a value from the data dict or a default value if the key was not present. :param key: a dict key from the original data dict :param default: (optional) a default value to return if the key does not
<gh_stars>0 # Copyright 2019-2022 Cambridge Quantum Computing # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import json from pytket.utils.spam import SpamCorrecter, compress_counts from pytket.circuit import Node, Circuit # type: ignore from pytket.routing import Architecture, route # type: ignore from pytket.passes import DelayMeasures # type: ignore from typing import List, Dict, Counter, Tuple from pytket.utils.outcomearray import OutcomeArray from math import ceil from pytket.backends.backendresult import BackendResult def test_spam_integration() -> None: SEED = 120 # test data, to avoid using backend calib_results: List[Dict[Tuple[int, ...], int]] = [ { (0, 0, 0): 742, (0, 0, 1): 84, (0, 1, 0): 76, (0, 1, 1): 8, (1, 0, 0): 72, (1, 0, 1): 10, (1, 1, 0): 7, (1, 1, 1): 1, }, { (0, 0, 0): 183, (0, 0, 1): 23, (0, 1, 0): 614, (0, 1, 1): 70, (1, 0, 0): 13, (1, 0, 1): 5, (1, 1, 0): 85, (1, 1, 1): 7, }, { (0, 0, 0): 53, (0, 0, 1): 161, (0, 1, 0): 6, (0, 1, 1): 20, (1, 0, 0): 166, (1, 0, 1): 517, (1, 1, 0): 18, (1, 1, 1): 59, }, { (0, 0, 0): 13, (0, 0, 1): 47, (0, 1, 0): 48, (0, 1, 1): 131, (1, 0, 0): 49, (1, 0, 1): 152, (1, 1, 0): 136, (1, 1, 1): 424, }, ] bellres_counts: Dict[Tuple[int, ...], int] = { (0, 0, 0): 406, (0, 0, 1): 111, (0, 1, 0): 38, (0, 1, 1): 13, (1, 0, 0): 136, (1, 0, 1): 251, (1, 1, 0): 14, (1, 1, 1): 31, } bellres_counter = Counter( { OutcomeArray.from_readouts([key]): ceil(val) for key, val in bellres_counts.items() } ) bellres = BackendResult(counts=bellres_counter) qbs = [Node("qx", i) for i in range(4)] arc = Architecture([[qbs[i], qbs[i + 1]] for i in range(3)]) subs = [[qbs[2], qbs[0]], [qbs[1]]] spam = SpamCorrecter(subs) calib_circs = spam.calibration_circuits() assert len(calib_circs) == 4 calib_counters = [ Counter({OutcomeArray.from_readouts([key]): val for key, val in r.items()}) for r in calib_results ] calib_brs = [BackendResult(counts=c) for c in calib_counters] spam.calculate_matrices(calib_brs) assert spam.characterisation_matrices[0].shape == (4, 4) assert spam.characterisation_matrices[1].shape == (2, 2) bellcc = Circuit(3, 3).H(0).CX(0, 2).measure_all() rbell = route(bellcc, arc) def check_correction( counts0: Dict[Tuple[int, ...], int], counts1: Dict[Tuple[int, ...], int] ) -> bool: if ( counts0[(0, 0, 0)] > counts1[(0, 0, 0)] and counts0[(1, 0, 1)] > counts1[(1, 0, 1)] ): return True return False rbell_parallel_measures = spam.get_parallel_measure(rbell) default_correct = spam.correct_counts(bellres, rbell_parallel_measures).get_counts() assert check_correction(default_correct, bellres_counts) bellcc_mid = Circuit() qbs = bellcc_mid.add_q_register("qx", 3) bits = bellcc_mid.add_c_register("c", 3) bellcc_mid.H(qbs[0]).CX(qbs[0], qbs[2]).Measure(qbs[0], bits[0]).SWAP( qbs[0], qbs[1] ).Measure(qbs[0], bits[1]).Measure(qbs[2], bits[2]) bell_cc_parallel_measures = spam.get_parallel_measure(bellcc_mid) default_correct_mid = spam.correct_counts( bellres, bell_cc_parallel_measures ).get_counts() assert check_correction(default_correct_mid, bellres_counts) with tempfile.TemporaryFile(mode="w+t") as tmpjson: json.dump(spam.to_dict(), tmpjson) tmpjson.seek(0) newspam = SpamCorrecter.from_dict(json.load(tmpjson)) new_default = newspam.correct_counts(bellres, rbell_parallel_measures).get_counts() assert default_correct == new_default assert check_correction( spam.correct_counts( bellres, rbell_parallel_measures, method="invert" ).get_counts(), bellres_counts, ) assert check_correction( spam.correct_counts( bellres, rbell_parallel_measures, method="bayesian" ).get_counts(), bellres_counts, ) assert check_correction( spam.correct_counts( bellres, rbell_parallel_measures, method="bayesian", options={"tol": 1e-8, "maxiter": 10}, ).get_counts(), bellres_counts, ) assert check_correction( spam.correct_counts( bellres, rbell_parallel_measures, method="bayesian", options={"tol": 1e-2} ).get_counts(), bellres_counts, ) assert check_correction( spam.correct_counts( bellres, bell_cc_parallel_measures, method="invert" ).get_counts(), bellres_counts, ) assert check_correction( spam.correct_counts( bellres, bell_cc_parallel_measures, method="bayesian" ).get_counts(), bellres_counts, ) assert check_correction( spam.correct_counts( bellres, bell_cc_parallel_measures, method="bayesian", options={"tol": 1e-8, "maxiter": 10}, ).get_counts(), bellres_counts, ) assert check_correction( spam.correct_counts( bellres, bell_cc_parallel_measures, method="bayesian", options={"tol": 1e-2} ).get_counts(), bellres_counts, ) def test_spam_routing() -> None: # test spam with a pre routed circuit, using final mapped qubits to perform # the calibration raw_res = { (0, 0, 0, 0): 352, (0, 0, 0, 1): 38, (0, 0, 1, 0): 37, (0, 0, 1, 1): 15, (0, 1, 0, 0): 36, (0, 1, 0, 1): 17, (0, 1, 1, 0): 19, (0, 1, 1, 1): 58, (1, 0, 0, 0): 43, (1, 0, 0, 1): 25, (1, 0, 1, 0): 25, (1, 0, 1, 1): 41, (1, 1, 0, 0): 22, (1, 1, 0, 1): 60, (1, 1, 1, 0): 45, (1, 1, 1, 1): 167, } calib_results: List[Dict[Tuple[int, ...], int]] = [ { (0, 0, 0, 0): 659, (0, 0, 0, 1): 65, (0, 0, 1, 0): 68, (0, 0, 1, 1): 10, (0, 1, 0, 0): 70, (0, 1, 0, 1): 12, (0, 1, 1, 0): 6, (1, 0, 0, 0): 86, (1, 0, 0, 1): 7, (1, 0, 1, 0): 7, (1, 0, 1, 1): 2, (1, 1, 0, 0): 6, (1, 1, 1, 0): 1, (1, 1, 1, 1): 1, }, { (0, 0, 0, 0): 170, (0, 0, 0, 1): 555, (0, 0, 1, 0): 19, (0, 0, 1, 1): 71, (0, 1, 0, 0): 18, (0, 1, 0, 1): 50, (0, 1, 1, 0): 2, (0, 1, 1, 1): 8, (1, 0, 0, 0): 20, (1, 0, 0, 1): 72, (1, 0, 1, 1): 6, (1, 1, 0, 0): 2, (1, 1, 0, 1): 6, (1, 1, 1, 1): 1, }, { (0, 0, 0, 0): 173, (0, 0, 0, 1): 17, (0, 0, 1, 0): 538, (0, 0, 1, 1): 75, (0, 1, 0, 0): 21, (0, 1, 0, 1): 3, (0, 1, 1, 0): 69, (0, 1, 1, 1): 12, (1, 0, 0, 0): 17, (1, 0, 0, 1): 4, (1, 0, 1, 0): 59, (1, 0, 1, 1): 7, (1, 1, 0, 0): 1, (1, 1, 1, 0): 4, }, { (0, 0, 0, 0): 42, (0, 0, 0, 1): 150, (0, 0, 1, 0): 145, (0, 0, 1, 1): 444, (0, 1, 0, 0): 10, (0, 1, 0, 1): 19, (0, 1, 1, 0): 16, (0, 1, 1, 1): 73, (1, 0, 0, 0): 4, (1, 0, 0, 1): 17, (1, 0, 1, 0): 12, (1, 0, 1, 1): 63, (1, 1, 0, 1): 1, (1, 1, 1, 1): 4, }, { (0, 0, 0, 0): 168, (0, 0, 0, 1): 26, (0, 0, 1, 0): 22, (0, 0, 1, 1): 6, (0, 1, 0, 0): 555, (0, 1, 0, 1): 65, (0, 1, 1, 0): 59, (0, 1, 1, 1): 4, (1, 0, 0, 0): 24, (1, 0, 0, 1): 2, (1, 0, 1, 0): 2, (1, 0, 1, 1): 1, (1, 1, 0, 0): 50, (1, 1, 0, 1): 11, (1, 1, 1, 0): 5, }, { (0, 0, 0, 0): 60, (0, 0, 0, 1): 164, (0, 0, 1, 0): 7, (0, 0, 1, 1): 15, (0, 1, 0, 0): 158, (0, 1, 0, 1): 442, (0, 1, 1, 0): 14, (0, 1, 1, 1): 39, (1, 0, 0, 0): 7, (1, 0, 0, 1): 19, (1, 0, 1, 1): 2, (1, 1, 0, 0): 18, (1, 1, 0, 1): 49, (1, 1, 1, 0): 1, (1, 1, 1, 1): 5, }, { (0, 0, 0, 0): 53, (0, 0, 0, 1): 3, (0, 0, 1, 0): 160, (0, 0, 1, 1): 17, (0, 1, 0, 0): 155, (0, 1, 0, 1): 16, (0, 1, 1, 0): 449, (0, 1, 1, 1): 55, (1, 0, 0, 0): 3, (1, 0, 1, 0): 11, (1, 0, 1, 1): 4, (1, 1, 0, 0): 17, (1, 1, 0, 1): 2, (1, 1, 1, 0): 50, (1, 1, 1, 1): 5, }, { (0, 0, 0, 0): 13, (0, 0, 0, 1): 38, (0, 0, 1, 0): 34, (0, 0, 1, 1): 146, (0, 1, 0, 0): 49, (0, 1, 0, 1): 140, (0, 1, 1, 0): 120, (0, 1, 1, 1): 365, (1, 0, 0, 0): 1, (1, 0, 0, 1): 1, (1, 0, 1, 0): 7, (1, 0, 1, 1): 17, (1,
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Serves the stub App Engine APIs (e.g. memcache, datastore) over HTTP. The Remote API protocol is used for communication. """ from __future__ import with_statement import BaseHTTPServer import httplib import logging import os.path import pickle import socket import SocketServer import subprocess import sys import tempfile import threading import time import traceback import urllib2 import urlparse import wsgiref.headers import google import yaml from google.appengine.api import mail_stub from google.appengine.api import request_info from google.appengine.api import urlfetch_stub from google.appengine.api import user_service_stub from google.appengine.api.app_identity import app_identity_stub from google.appengine.api.blobstore import blobstore_stub from google.appengine.api.blobstore import file_blob_storage from google.appengine.api.capabilities import capability_stub from google.appengine.api.channel import channel_service_stub from google.appengine.api.files import file_service_stub from google.appengine.api.logservice import logservice_stub from google.appengine.api.search import simple_search_stub from google.appengine.api.taskqueue import taskqueue_stub from google.appengine.api.prospective_search import prospective_search_stub from google.appengine.api.memcache import memcache_stub from google.appengine.api.system import system_stub from google.appengine.api.xmpp import xmpp_service_stub from google.appengine.api import datastore_file_stub from google.appengine.datastore import datastore_sqlite_stub from google.appengine.datastore import datastore_stub_util from google.appengine.datastore import datastore_v4_stub from google.appengine.api import apiproxy_stub_map from google.appengine.ext.remote_api import remote_api_pb from google.appengine.ext.remote_api import remote_api_services from google.appengine.runtime import apiproxy_errors QUIT_PATH = '/quit' GLOBAL_API_LOCK = threading.RLock() class Error(Exception): pass def _ClearDatastoreStorage(datastore_path): """Delete the datastore storage file at the given path.""" if os.path.lexists(datastore_path): try: os.remove(datastore_path) except OSError, e: logging.warning('Failed to remove datastore file %r: %s', datastore_path, e) def _ClearProspectiveSearchStorage(prospective_search_path): """Delete the perspective search storage file at the given path.""" if os.path.lexists(prospective_search_path): try: os.remove(prospective_search_path) except OSError, e: logging.warning('Failed to remove prospective search file %r: %s', prospective_search_path, e) THREAD_SAFE_SERVICES = frozenset(( 'app_identity_service', 'capability_service', 'channel', 'logservice', 'mail', 'memcache', 'remote_socket', 'urlfetch', 'user', 'xmpp', )) def _ExecuteRequest(request): """Executes an API method call and returns the response object. Args: request: A remote_api.Request object representing the API call e.g. a call to memcache.Get. Returns: A ProtocolBuffer.ProtocolMessage representing the API response e.g. a memcache_service_pb.MemcacheGetResponse. Raises: apiproxy_errors.CallNotFoundError: if the requested method doesn't exist. apiproxy_errors.ApplicationError: if the API method calls fails. """ service = request.service_name() method = request.method() service_methods = remote_api_services.SERVICE_PB_MAP.get(service, {}) request_class, response_class = service_methods.get(method, (None, None)) if not request_class: raise apiproxy_errors.CallNotFoundError('%s.%s does not exist' % (service, method)) request_data = request_class() request_data.ParseFromString(request.request()) response_data = response_class() def MakeRequest(): apiproxy_stub_map.MakeSyncCall(service, method, request_data, response_data) if service in THREAD_SAFE_SERVICES: MakeRequest() else: with GLOBAL_API_LOCK: MakeRequest() return response_data class APIRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): """Handler for all API server HTTP requests.""" def log_message(self, format, *args): logging.debug(format, *args) def do_GET(self): if self.path == QUIT_PATH: self._HandleShutdown() else: params = urlparse.parse_qs(urlparse.urlparse(self.path).query) rtok = params.get('rtok', ['0'])[0] self.send_response(httplib.OK) self.send_header('Content-Type', 'text/plain') self.end_headers() self.wfile.write(yaml.dump({ 'app_id': self.server.app_id, 'rtok': rtok, })) def _HandleShutdown(self): """Handles a request for the API Server to exit.""" self.send_response(httplib.OK) self.send_header('Content-Type', 'text/plain') self.end_headers() self.wfile.write('API Server Quitting') self.server.shutdown() def do_POST(self): """Handles a single API request e.g. memcache.Get().""" self.send_response(httplib.OK) self.send_header('Content-Type', 'application/octet-stream') self.end_headers() response = remote_api_pb.Response() try: request = remote_api_pb.Request() request.ParseFromString( self.rfile.read(int(self.headers['content-length']))) api_response = _ExecuteRequest(request).Encode() response.set_response(api_response) except Exception, e: logging.debug('Exception while handling %s\n%s', request, traceback.format_exc()) response.set_exception(pickle.dumps(e)) if isinstance(e, apiproxy_errors.ApplicationError): application_error = response.mutable_application_error() application_error.set_code(e.application_error) application_error.set_detail(e.error_detail) self.wfile.write(response.Encode()) class APIServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer): """Serves API calls over HTTP.""" def __init__(self, server_address, app_id): BaseHTTPServer.HTTPServer.__init__(self, server_address, APIRequestHandler) self.app_id = app_id def _SetupStubs( app_id, application_root, appidentity_email_address, appidentity_private_key_path, trusted, blobstore_path, use_sqlite, auto_id_policy, high_replication, datastore_path, datastore_require_indexes, images_host_prefix, logs_path, mail_smtp_host, mail_smtp_port, mail_smtp_user, mail_smtp_password, mail_enable_sendmail, mail_show_mail_body, matcher_prospective_search_path, taskqueue_auto_run_tasks, taskqueue_task_retry_seconds, taskqueue_default_http_server, user_login_url, user_logout_url, default_gcs_bucket_name): """Configures the APIs hosted by this server. Args: app_id: The str application id e.g. "guestbook". application_root: The path to the directory containing the user's application e.g. "/home/bquinlan/myapp". trusted: A bool indicating if privileged APIs should be made available. blobstore_path: The path to the file that should be used for blobstore storage. use_sqlite: A bool indicating whether DatastoreSqliteStub or DatastoreFileStub should be used. auto_id_policy: One of datastore_stub_util.SEQUENTIAL or .SCATTERED, indicating whether the Datastore stub should assign IDs sequentially or scattered. high_replication: A bool indicating whether to use the high replication consistency model. datastore_path: The path to the file that should be used for datastore storage. datastore_require_indexes: A bool indicating if the same production datastore indexes requirements should be enforced i.e. if True then a google.appengine.ext.db.NeedIndexError will be be raised if a query is executed without the required indexes. images_host_prefix: The URL prefix (protocol://host:port) to preprend to image urls on calls to images.GetUrlBase. logs_path: Path to the file to store the logs data in. mail_smtp_host: The SMTP hostname that should be used when sending e-mails. If None then the mail_enable_sendmail argument is considered. mail_smtp_port: The SMTP port number that should be used when sending e-mails. If this value is None then mail_smtp_host must also be None. mail_smtp_user: The username to use when authenticating with the SMTP server. This value may be None if mail_smtp_host is also None or if the SMTP server does not require authentication. mail_smtp_password: The password to use when authenticating with the SMTP server. This value may be None if mail_smtp_host or mail_smtp_user is also None. mail_enable_sendmail: A bool indicating if sendmail should be used when sending e-mails. This argument is ignored if mail_smtp_host is not None. mail_show_mail_body: A bool indicating whether the body of sent e-mails should be written to the logs. matcher_prospective_search_path: The path to the file that should be used to save prospective search subscriptions. taskqueue_auto_run_tasks: A bool indicating whether taskqueue tasks should be run automatically or it the must be manually triggered. taskqueue_task_retry_seconds: An int representing the number of seconds to wait before a retrying a failed taskqueue task. taskqueue_default_http_server: A str containing the address of the http server that should be used to execute tasks. user_login_url: A str containing the url that should be used for user login. user_logout_url: A str containing the url that should be used for user logout. default_gcs_bucket_name: A str overriding the usual default bucket name. """ os.environ['APPLICATION_ID'] = app_id tmp_app_identity_stub = app_identity_stub.AppIdentityServiceStub.Create( email_address=appidentity_email_address, private_key_path=appidentity_private_key_path) if default_gcs_bucket_name is not None: tmp_app_identity_stub.SetDefaultGcsBucketName(default_gcs_bucket_name) apiproxy_stub_map.apiproxy.RegisterStub( 'app_identity_service', tmp_app_identity_stub) blob_storage = file_blob_storage.FileBlobStorage(blobstore_path, app_id) apiproxy_stub_map.apiproxy.RegisterStub( 'blobstore', blobstore_stub.BlobstoreServiceStub(blob_storage)) apiproxy_stub_map.apiproxy.RegisterStub( 'capability_service', capability_stub.CapabilityServiceStub()) apiproxy_stub_map.apiproxy.RegisterStub( 'channel', channel_service_stub.ChannelServiceStub()) if use_sqlite: datastore = datastore_sqlite_stub.DatastoreSqliteStub( app_id, datastore_path, datastore_require_indexes, trusted, root_path=application_root, auto_id_policy=auto_id_policy) else: datastore = datastore_file_stub.DatastoreFileStub( app_id, datastore_path, datastore_require_indexes, trusted, root_path=application_root, auto_id_policy=auto_id_policy) if high_replication: datastore.SetConsistencyPolicy( datastore_stub_util.TimeBasedHRConsistencyPolicy()) apiproxy_stub_map.apiproxy.RegisterStub( 'datastore_v3', datastore) apiproxy_stub_map.apiproxy.RegisterStub( 'datastore_v4', datastore_v4_stub.DatastoreV4Stub(app_id)) apiproxy_stub_map.apiproxy.RegisterStub( 'file', file_service_stub.FileServiceStub(blob_storage)) try: from google.appengine.api.images import images_stub except ImportError: logging.warning('Could not initialize images API; you are likely missing ' 'the Python "PIL" module.') from google.appengine.api.images import images_not_implemented_stub apiproxy_stub_map.apiproxy.RegisterStub( 'images', images_not_implemented_stub.ImagesNotImplementedServiceStub()) else: apiproxy_stub_map.apiproxy.RegisterStub( 'images', images_stub.ImagesServiceStub(host_prefix=images_host_prefix)) apiproxy_stub_map.apiproxy.RegisterStub( 'logservice', logservice_stub.LogServiceStub(logs_path=logs_path)) apiproxy_stub_map.apiproxy.RegisterStub( 'mail', mail_stub.MailServiceStub(mail_smtp_host, mail_smtp_port, mail_smtp_user, mail_smtp_password, enable_sendmail=mail_enable_sendmail, show_mail_body=mail_show_mail_body)) apiproxy_stub_map.apiproxy.RegisterStub( 'memcache', memcache_stub.MemcacheServiceStub()) apiproxy_stub_map.apiproxy.RegisterStub( 'search', simple_search_stub.SearchServiceStub()) apiproxy_stub_map.apiproxy.RegisterStub('system', system_stub.SystemServiceStub()) apiproxy_stub_map.apiproxy.RegisterStub( 'taskqueue', taskqueue_stub.TaskQueueServiceStub( root_path=application_root, auto_task_running=taskqueue_auto_run_tasks, task_retry_seconds=taskqueue_task_retry_seconds, default_http_server=taskqueue_default_http_server)) apiproxy_stub_map.apiproxy.GetStub('taskqueue').StartBackgroundExecution() apiproxy_stub_map.apiproxy.RegisterStub( 'urlfetch', urlfetch_stub.URLFetchServiceStub()) apiproxy_stub_map.apiproxy.RegisterStub( 'user', user_service_stub.UserServiceStub(login_url=user_login_url, logout_url=user_logout_url)) apiproxy_stub_map.apiproxy.RegisterStub( 'xmpp', xmpp_service_stub.XmppServiceStub()) apiproxy_stub_map.apiproxy.RegisterStub( 'matcher', prospective_search_stub.ProspectiveSearchStub( matcher_prospective_search_path, apiproxy_stub_map.apiproxy.GetStub('taskqueue'))) def _TearDownStubs(): """Clean up any stubs that need cleanup.""" logging.info('Applying all pending transactions and saving the datastore') datastore_stub = apiproxy_stub_map.apiproxy.GetStub('datastore_v3') datastore_stub.Write() def ParseCommandArguments(args): """Parses and the application's command line arguments. Args: args: A list of command line arguments *not* including the executable or script e.g. ['-A' 'myapp', '--api_port=8000']. Returns: An object containing the values passed in the commandline as attributes. Raises: SystemExit: if the argument parsing fails. """ import argparse from google.appengine.tools import boolean_action parser = argparse.ArgumentParser() parser.add_argument('-A', '--application', required=True) parser.add_argument('--api_host', default='') parser.add_argument('--api_port', default=8000, type=int) parser.add_argument('--trusted', action=boolean_action.BooleanAction, const=True, default=False) parser.add_argument('--appidentity_email_address', default=None) parser.add_argument('--appidentity_private_key_path', default=None) parser.add_argument('--application_root', default=None) parser.add_argument('--application_host', default='localhost') parser.add_argument('--application_port', default=None) parser.add_argument('--blobstore_path', default=None) parser.add_argument('--datastore_path', default=None) parser.add_argument('--auto_id_policy', default='scattered', type=lambda s: s.lower(), choices=(datastore_stub_util.SEQUENTIAL, datastore_stub_util.SCATTERED)) parser.add_argument('--use_sqlite', action=boolean_action.BooleanAction, const=True, default=False) parser.add_argument('--high_replication', action=boolean_action.BooleanAction, const=True, default=False) parser.add_argument('--require_indexes', action=boolean_action.BooleanAction, const=True, default=False) parser.add_argument('--clear_datastore', action=boolean_action.BooleanAction, const=True, default=False) parser.add_argument('--logs_path', default=None) parser.add_argument('--enable_sendmail', action=boolean_action.BooleanAction, const=True, default=False) parser.add_argument('--smtp_host', default='') parser.add_argument('--smtp_port', default=25, type=int) parser.add_argument('--smtp_user', default='') parser.add_argument('--smtp_password', default='') parser.add_argument('--show_mail_body', action=boolean_action.BooleanAction, const=True, default=False) parser.add_argument('--prospective_search_path', default=None) parser.add_argument('--clear_prospective_search', action=boolean_action.BooleanAction, const=True, default=False) parser.add_argument('--enable_task_running', action=boolean_action.BooleanAction, const=True, default=True) parser.add_argument('--task_retry_seconds', default=30, type=int) parser.add_argument('--user_login_url', default=None) parser.add_argument('--user_logout_url', default=None) return parser.parse_args(args) class APIServerProcess(object): """Manages an API Server running as a seperate process.""" def __init__(self, executable, host, port, app_id, script=None, appidentity_email_address=None, appidentity_private_key_path=None, application_host=None, application_port=None, application_root=None, auto_id_policy=None, blobstore_path=None, clear_datastore=None, clear_prospective_search=None, datastore_path=None, enable_sendmail=None, enable_task_running=None, high_replication=None, logs_path=None, prospective_search_path=None, require_indexes=None, show_mail_body=None, smtp_host=None, smtp_password=<PASSWORD>, smtp_port=None, smtp_user=None, task_retry_seconds=None, trusted=None, use_sqlite=None, default_gcs_bucket_name=None): """Configures the APIs hosted by this server. Args: executable: The path of the executable to use when running the API Server e.g. "/usr/bin/python".
B : B's minOccurs=4, B's maxOccurs=4, R has 2 groups, each has one child with minOccurs as 2 """ assert_bindings( schema="msData/particles/particlesQ013.xsd", instance="msData/particles/particlesQ013.xml", class_name="Doc", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", ) def test_particles_q011_particles_q011_v(mode, save_output, output_format): """ TEST :3.9.1 The Particle Schema Component [ check length of element information items ] : Particle Derivation OK (Sequence:Any - NSRecurseCheckCardinality) (Sequence) R drived by restriction from (any) B : B's minOccurs=0, B's maxOccurs=4, R has 2 groups, each has one child with maxOccurs as 2 """ assert_bindings( schema="msData/particles/particlesQ011.xsd", instance="msData/particles/particlesQ011.xml", class_name="Doc", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", ) def test_particles_q007_particles_q007_v(mode, save_output, output_format): """ TEST :3.9.1 The Particle Schema Component [ check length of element information items ] : Particle Derivation OK (Sequence:Any - NSRecurseCheckCardinality) (Sequence) R drived by restriction from (any) B : B's minOccurs=0, B's maxOccurs=4, R has 2 elements, each with maxOccurs as 2 """ assert_bindings( schema="msData/particles/particlesQ007.xsd", instance="msData/particles/particlesQ007.xml", class_name="Doc", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", ) def test_particles_q005_particles_q005_v(mode, save_output, output_format): """ TEST :3.9.1 The Particle Schema Component [ check length of element information items ] : Particle Derivation OK (Sequence:Any - NSRecurseCheckCardinality) (Sequence) R drived by restriction from (any) B : B's minOccurs=0, B's maxOccurs=unbounded, R's maxOccurs = 1000, R has element with maxOccurs unbounded """ assert_bindings( schema="msData/particles/particlesQ005.xsd", instance="msData/particles/particlesQ005.xml", class_name="Doc", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", ) def test_particles_q004_particles_q004_v(mode, save_output, output_format): """ TEST :3.9.1 The Particle Schema Component [ check length of element information items ] : Particle Derivation OK (Sequence:Any - NSRecurseCheckCardinality) (Sequence) R drived by restriction from (any) B : B's minOccurs=0, B's maxOccurs=6, R has an element with minOccurs=1, maxOccurs=6 """ assert_bindings( schema="msData/particles/particlesQ004.xsd", instance="msData/particles/particlesQ004.xml", class_name="Doc", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", ) def test_particles_q003_particles_q003_v(mode, save_output, output_format): """ TEST :3.9.1 The Particle Schema Component [ check length of element information items ] : Particle Derivation OK (Sequence:Any - NSRecurseCheckCardinality) (Sequence) R drived by restriction from (any) B : B's minOccurs=0, B's maxOccurs=6, R's minOccurs=1, R's maxOccurs=6 """ assert_bindings( schema="msData/particles/particlesQ003.xsd", instance="msData/particles/particlesQ003.xml", class_name="Doc", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", ) def test_particles_q002_particles_q002_v(mode, save_output, output_format): """ TEST :3.9.1 The Particle Schema Component [ check length of element information items ] : Particle Derivation OK (Sequence:Any - NSRecurseCheckCardinality) (Sequence) R drived by restriction from (any) B : B's minOccurs=1, B's maxOccurs=1, R's minOccurs=1, R's maxOccurs=1 """ assert_bindings( schema="msData/particles/particlesQ002.xsd", instance="msData/particles/particlesQ002.xml", class_name="Doc", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", ) def test_particles_q001_particles_q001_v(mode, save_output, output_format): """ TEST :3.9.1 The Particle Schema Component [ check length of element information items ] : Particle Derivation OK (Sequence:Any - NSRecurseCheckCardinality) (Sequence) R drived by restriction from (any) B : B's minOccurs=0, B's maxOccurs=1, R's minOccurs=1, R's maxOccurs=1 """ assert_bindings( schema="msData/particles/particlesQ001.xsd", instance="msData/particles/particlesQ001.xml", class_name="Doc", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", ) def test_particles_t014_particles_t014_v(mode, save_output, output_format): """ TEST :3.9.1 The Particle Schema Component [ check length of element information items ] : Particle Derivation OK (Choice:Choice - RecurseLax) (Choice) R drived by restriction from (All) B : B's maxOccurs=unbounded, R's maxOccurs = 3 (a | b | c) all with maxOccurs ( 0 and 10 and 100 ) """ assert_bindings( schema="msData/particles/particlesT014.xsd", instance="msData/particles/particlesT014.xml", class_name="Doc", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", ) def test_particles_t013_particles_t013_v(mode, save_output, output_format): """ TEST :3.9.1 The Particle Schema Component [ check length of element information items ] : Particle Derivation OK (Choice:Choice - RecurseLax) (Choice) R drived by restriction from (All) B : B's maxOccurs=unbounded, R's maxOccurs=unbounded """ assert_bindings( schema="msData/particles/particlesT013.xsd", instance="msData/particles/particlesT013.xml", class_name="Doc", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", ) def test_particles_t012_particles_t012_v(mode, save_output, output_format): """ TEST :3.9.1 The Particle Schema Component [ check length of element information items ] : Particle Derivation OK (Choice:Choice - RecurseLax) (Choice) R drived by restriction from (All) B : B's maxOccurs=unbounded, R's maxOccurs=1, but has (a | b | c) all with maxOccurs=unbounded """ assert_bindings( schema="msData/particles/particlesT012.xsd", instance="msData/particles/particlesT012.xml", class_name="Doc", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", ) def test_particles_t007_particles_t007_v(mode, save_output, output_format): """ TEST :3.9.1 The Particle Schema Component [ check length of element information items ] : Particle Derivation OK (Choice:Choice - RecurseLax) (Choice) R drived by restriction from (All) B : B has (a | b | c), R has (a | b | c) """ assert_bindings( schema="msData/particles/particlesT007.xsd", instance="msData/particles/particlesT007.xml", class_name="Doc", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", ) def test_particles_t006_particles_t006_v(mode, save_output, output_format): """ TEST :3.9.1 The Particle Schema Component [ check length of element information items ] : Particle Derivation OK (Choice:Choice - RecurseLax) (Choice) R drived by restriction from (All) B : B has (a | b | c), c is NOT emptiable, R has (a | b) """ assert_bindings( schema="msData/particles/particlesT006.xsd", instance="msData/particles/particlesT006.xml", class_name="Doc", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", ) def test_particles_t005_particles_t005_v(mode, save_output, output_format): """ TEST :3.9.1 The Particle Schema Component [ check length of element information items ] : Particle Derivation OK (Choice:Choice - RecurseLax) (Choice) R drived by restriction from (All) B : B has (a | b | c), b is but c is NOT emptiable, R has (a) """ assert_bindings( schema="msData/particles/particlesT005.xsd", instance="msData/particles/particlesT005.xml", class_name="Doc", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", ) def test_particles_t004_particles_t004_v(mode, save_output, output_format): """ TEST :3.9.1 The Particle Schema Component [ check length of element information items ] : Particle Derivation OK (Choice:Choice - RecurseLax) (Choice) R drived by restriction from (All) B : B has (a | b | c), c is emptiable, R has (a | b) c is emptiable """ assert_bindings( schema="msData/particles/particlesT004.xsd", instance="msData/particles/particlesT004.xml", class_name="Doc", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", ) def test_particles_t003_particles_t003_v(mode, save_output, output_format): """ TEST :3.9.1 The Particle Schema Component [ check length of element information items ] : Particle Derivation OK (Choice:Choice - RecurseLax) (Choice) R drived by restriction from (All) B : B has (a | b | c), b and c are emptiable, R has (a) """ assert_bindings( schema="msData/particles/particlesT003.xsd", instance="msData/particles/particlesT003.xml", class_name="Doc", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", ) def test_particles_t001_particles_t001_v(mode, save_output, output_format): """ TEST :3.9.1 The Particle Schema Component [ check length of element information items ] : Particle Derivation OK (Choice:Choice - RecurseLax) (Choice) R drived by restriction from (All) B : B has (a | b), R has (a | b) """ assert_bindings( schema="msData/particles/particlesT001.xsd", instance="msData/particles/particlesT001.xml", class_name="Doc", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", ) def test_particles_r030_particles_r030_v(mode, save_output, output_format): """ TEST :3.9.1 The Particle Schema Component [ check length of element information items ] : Particle Derivation OK (Choice:Any - NSRecurseCheckCardinality) (Choice) R drived by restriction from (Choice) B : B's namespace=foo, bar', R has an element from bar """ assert_bindings( schema="msData/particles/particlesR030.xsd", instance="msData/particles/particlesR030.xml", class_name="Doc", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", ) def test_particles_r029_particles_r029_v(mode, save_output, output_format): """ TEST :3.9.1 The Particle Schema Component [ check length of element information items ] : Particle Derivation OK (Choice:Any - NSRecurseCheckCardinality) (Choice) R drived by restriction from (Choice) B : B's namespace=foo, bar', R has an element from foo """ assert_bindings( schema="msData/particles/particlesR029.xsd", instance="msData/particles/particlesR029.xml", class_name="Doc", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", ) def test_particles_r024_particles_r024_v(mode, save_output, output_format): """ TEST :3.9.1 The Particle Schema Component [ check length of element information items ] : Particle Derivation OK (Choice:Any - NSRecurseCheckCardinality) (Choice) R drived by restriction from (Choice) B : B's namespace=##targetNamespace, R has an element targetNamespace """ assert_bindings( schema="msData/particles/particlesR024.xsd", instance="msData/particles/particlesR024.xml", class_name="Doc", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", ) def test_particles_r022_particles_r022_v(mode, save_output, output_format): """ TEST :3.9.1 The Particle Schema Component [ check length of element information items ] : Particle Derivation OK (Choice:Any - NSRecurseCheckCardinality) (Choice) R drived by restriction from (Choice) B : B's namespace=##local, R has an element from no namespace """ assert_bindings( schema="msData/particles/particlesR022.xsd", instance="msData/particles/particlesR022.xml", class_name="Doc", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", ) def test_particles_r020_particles_r020_v(mode, save_output, output_format): """ TEST :3.9.1 The Particle Schema Component [ check length of element information items ] : Particle Derivation OK (Choice:Any - NSRecurseCheckCardinality) (Choice) R drived by restriction from (Choice) B : B's namespace=##other, R has an element from foo """ assert_bindings( schema="msData/particles/particlesR020.xsd", instance="msData/particles/particlesR020.xml", class_name="Doc", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", ) def test_particles_r017_particles_r017_v(mode, save_output, output_format): """ TEST :3.9.1 The Particle Schema Component [ check length of element information items ] : Particle Derivation OK (Choice:Any - NSRecurseCheckCardinality) (Choice) R drived by restriction from (Choice) B : B's namespace=##any, R has an element from foo """ assert_bindings( schema="msData/particles/particlesR017.xsd", instance="msData/particles/particlesR017.xml", class_name="Doc", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", ) def test_particles_r016_particles_r016_v(mode, save_output, output_format): """
args.IsSpecified('node_version') and args.enable_autoupgrade: log.warning(util.WARN_NODE_VERSION_WITH_AUTOUPGRADE_ENABLED) def AddMachineTypeFlag(parser): """Adds --machine-type flag to the parser. Args: parser: A given parser. """ help_text = """\ The type of machine to use for nodes. Defaults to e2-medium. The list of predefined machine types is available using the following command: $ gcloud compute machine-types list You can also specify custom machine types with the string "custom-CPUS-RAM" where "CPUS" is the number of virtual CPUs and "RAM" is the amount of RAM in MiB. For example, to create a node pool using custom machines with 2 vCPUs and 12 GB of RAM: $ {command} high-mem-pool --machine-type=custom-2-12288 """ parser.add_argument('--machine-type', '-m', help=help_text) def AddWorkloadIdentityFlags(parser, use_identity_provider=False): """Adds Workload Identity flags to the parser.""" parser.add_argument( '--workload-pool', default=None, help="""\ Enable Workload Identity on the cluster. When enabled, Kubernetes service accounts will be able to act as Cloud IAM Service Accounts, through the provided workload pool. Currently, the only accepted workload pool is the workload pool of the Cloud project containing the cluster, `PROJECT_ID.svc.id.goog`. For more information on Workload Identity, see https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity """, required=False, type=arg_parsers.RegexpValidator( # Don't document hub.id.goog in the error, but still pass it through # for now. r'^[a-z][-a-z0-9]{4,}[a-z0-9]\.(svc|hub)\.id\.goog$', "Must be in format of '[PROJECT_ID].svc.id.goog'"), ) if use_identity_provider: parser.add_argument( '--identity-provider', default=None, help="""\ Enable 3P identity provider on the cluster. """) def AddWorkloadIdentityUpdateFlags(parser): """Adds Workload Identity update flags to the parser.""" parser.add_argument( '--disable-workload-identity', default=False, action='store_true', help="""\ Disable Workload Identity on the cluster. For more information on Workload Identity, see https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity """) def AddWorkloadCertificatesFlags(parser): """Adds Workload Certificates flags to the parser.""" parser.add_argument( '--enable-workload-certificates', default=None, hidden=True, action='store_true', help="""\ Enable Workload Certificates. After the cluster is created, configure an issuing certificate authority using the Kubernetes API. To disable Workload Certificates in an existing cluster, explicitly set flag `--no-enable-workload-certificates`. """) def AddMeshCertificatesFlags(parser): """Adds Mesh Certificates flags to the parser.""" parser.add_argument( '--enable-mesh-certificates', default=None, hidden=True, action='store_true', help=textwrap.dedent("""\ Enable Mesh Certificates. After the cluster is created, configure an issuing certificate authority using the Kubernetes API. To disable Mesh Certificates in an existing cluster, explicitly set flag `--no-enable-mesh-certificates`. """)) def AddWorkloadAltsFlags(parser): """Adds Workload ALTS flags to the parser.""" parser.add_argument( '--enable-alts', hidden=True, action=arg_parsers.StoreTrueFalseAction, help="""\ Enable Workload ALTS. """) def AddGkeOidcFlag(parser): parser.add_argument( '--enable-gke-oidc', default=None, action=actions.DeprecationAction( '--enable-gke-oidc', warn='GKE OIDC is being replaced by Identity Service across Anthos ' 'and GKE. Thus, flag `--enable-gke-oidc` is also deprecated. Please ' 'use `--enable-identity-service` to enable the Identity Service ' 'component', action='store_true'), help="""\ Enable GKE OIDC authentication on the cluster. When enabled, users would be able to authenticate to Kubernetes cluster after properly setting OIDC config. GKE OIDC is by default disabled when creating a new cluster. To disable GKE OIDC in an existing cluster, explicitly set flag `--no-enable-gke-oidc`. """) def AddIdentityServiceFlag(parser): parser.add_argument( '--enable-identity-service', default=None, action='store_true', help="""\ Enable Identity Service component on the cluster. When enabled, users can authenticate to Kubernetes cluster with external identity providers. Identity Service is by default disabled when creating a new cluster. To disable Identity Service in an existing cluster, explicitly set flag `--no-enable-identity-service`. """) def AddResourceUsageExportFlags(parser, is_update=False, hidden=False): """Adds flags about exporting cluster resource usage to BigQuery.""" group = parser.add_group( "Exports cluster's usage of cloud resources", hidden=hidden) if is_update: group.is_mutex = True group.add_argument( '--clear-resource-usage-bigquery-dataset', action='store_true', hidden=hidden, default=None, help='Disables exporting cluster resource usage to BigQuery.') group = group.add_group() dataset_help_text = """\ The name of the BigQuery dataset to which the cluster's usage of cloud resources is exported. A table will be created in the specified dataset to store cluster resource usage. The resulting table can be joined with BigQuery Billing Export to produce a fine-grained cost breakdown. Examples: $ {command} example-cluster --resource-usage-bigquery-dataset=example_bigquery_dataset_name """ group.add_argument( '--resource-usage-bigquery-dataset', default=None, hidden=hidden, help=dataset_help_text) network_egress_help_text = """\ Enable network egress metering on this cluster. When enabled, a DaemonSet is deployed into the cluster. Each DaemonSet pod meters network egress traffic by collecting data from the conntrack table, and exports the metered metrics to the specified destination. Network egress metering is disabled if this flag is omitted, or when `--no-enable-network-egress-metering` is set. """ group.add_argument( '--enable-network-egress-metering', action='store_true', default=None, hidden=hidden, help=network_egress_help_text) resource_consumption_help_text = """\ Enable resource consumption metering on this cluster. When enabled, a table will be created in the specified BigQuery dataset to store resource consumption data. The resulting table can be joined with the resource usage table or with BigQuery billing export. Resource consumption metering is enabled unless `--no-enable-resource- consumption-metering` is set. """ if is_update: resource_consumption_help_text = """\ Enable resource consumption metering on this cluster. When enabled, a table will be created in the specified BigQuery dataset to store resource consumption data. The resulting table can be joined with the resource usage table or with BigQuery billing export. To disable resource consumption metering, set `--no-enable-resource-consumption- metering`. If this flag is omitted, then resource consumption metering will remain enabled or disabled depending on what is already configured for this cluster. """ group.add_argument( '--enable-resource-consumption-metering', action='store_true', default=None, hidden=hidden, help=resource_consumption_help_text) def AddEnablePrivateIpv6AccessFlag(parser, hidden=False): """Adds --enable-private-ipv6-access flag to the parser. When enabled, this allows gRPC clients on this cluster's pods a fast path to access Google hosted services (eg. Cloud Spanner, Cloud Dataflow, Cloud Bigtable) This is currently only available on Alpha clusters, and needs '--enable-kubernetes-alpha' to be specified also. Args: parser: A given parser. hidden: If true, suppress help text for added options. """ parser.add_argument( '--enable-private-ipv6-access', default=None, help="""\ Enables private access to Google services over IPv6. When enabled, this allows gRPC clients on this cluster's pods a fast path to access Google hosted services (eg. Cloud Spanner, Cloud Dataflow, Cloud Bigtable). This is currently only available on Alpha clusters, specified by using --enable-kubernetes-alpha. """, hidden=hidden, action='store_true') def AddPrivateIpv6GoogleAccessTypeFlag(api_version, parser, hidden=False): """Adds --private-ipv6-google-access-type={disabled|outbound-only|bidirectional} flag.""" messages = apis.GetMessagesModule('container', api_version) util.GetPrivateIpv6GoogleAccessTypeMapper( messages, hidden).choice_arg.AddToParser(parser) def AddEnableIntraNodeVisibilityFlag(parser, hidden=False): """Adds --enable-intra-node-visibility flag to the parser. When enabled, the intra-node traffic is visible to VPC network. Args: parser: A given parser. hidden: If true, suppress help text for added options. """ parser.add_argument( '--enable-intra-node-visibility', default=None, hidden=hidden, action='store_true', help="""\ Enable Intra-node visibility for this cluster. Enabling intra-node visibility makes your intra-node pod-to-pod traffic visible to the networking fabric. With this feature, you can use VPC flow logging or other VPC features for intra-node traffic. Enabling it on an existing cluster causes the cluster master and the cluster nodes to restart, which might cause a disruption. """) def AddVerticalPodAutoscalingFlags(parser, hidden=False, experimental=False): """Adds vertical pod autoscaling related flags to the parser. VerticalPodAutoscaling related flags are: --enable-vertical-pod-autoscaling --enable-experimental-vertical-pod-autoscaling Args: parser: A given parser. hidden: If true, suppress help text for added options. experimental: It true, add experimental vertical pod autoscaling flag """ group = parser.add_group( mutex=True, help='Flags for vertical pod autoscaling:') group.add_argument( '--enable-vertical-pod-autoscaling', default=None, help='Enable vertical pod autoscaling for a cluster.', hidden=hidden, action='store_true') if experimental: group.add_argument( '--enable-experimental-vertical-pod-autoscaling', default=None, help=('Enable experimental vertical pod autoscaling features' 'for a cluster.'), hidden=True, action='store_true') def AddVerticalPodAutoscalingFlagsExperimental(parser, hidden=False): return AddVerticalPodAutoscalingFlags(parser, hidden, experimental=True) def AddSandboxFlag(parser, hidden=False): """Adds a --sandbox flag to the given parser. Args: parser: A given parser. hidden: Whether or not to hide the help text. """ type_validator = arg_parsers.RegexpValidator(r'^gvisor$', 'Type must be "gvisor"') parser.add_argument( '--sandbox', type=arg_parsers.ArgDict( spec={'type': type_validator}, required_keys=['type'], max_length=1), metavar='type=TYPE', hidden=hidden, help="""\ Enables the requested sandbox on all nodes in the node pool. Examples: $ {command} node-pool-1 --cluster=example-cluster --sandbox="type=gvisor" The only supported type is 'gvisor'. """) def AddSecurityProfileForCreateFlags(parser, hidden=False): """Adds flags related to Security Profile to the parser for cluster creation. Args: parser: A given parser. hidden: Whether or not to hide the help text. """ group = parser.add_group(help='Flags for Security Profile:') group.add_argument( '--security-profile', hidden=hidden, help="""\ Name and version of the security profile to be applied to the cluster. Examples: $ {command} example-cluster --security-profile=default-1.0-gke.0 """) group.add_argument( '--security-profile-runtime-rules', default=True, action='store_true', hidden=hidden, help="""\ Apply runtime rules in the specified security profile to the cluster. When enabled (by default), a security profile controller and webhook are deployed on the cluster to enforce the runtime rules. If --no-security-profile-runtime-rules is specified to disable this feature, only bootstrapping rules are applied, and no security profile controller or webhook are installed. """) def AddSecurityProfileForUpdateFlag(parser, hidden=False): """Adds --security-profile to specify security profile for cluster update. Args: parser: A given parser. hidden: Whether or not to hide the help text. """ parser.add_argument( '--security-profile', hidden=hidden, help="""\ Name and version of the security profile to be applied to the cluster. If not specified, the current setting of security profile will be preserved. Examples: $ {command} example-cluster --security-profile=default-1.0-gke.1 """) def AddSecurityProfileForUpgradeFlags(parser, hidden=False): """Adds flags related to Security Profile to the parser for cluster upgrade. Args: parser: A given parser. hidden: Whether or not to hide the help text. """ group = parser.add_group(help='Flags for Security Profile:') group.add_argument( '--security-profile', hidden=hidden, help="""\ Name and version of the security profile to
only data field. _preload_content (bool, optional): Response is converted into objects. _request_timeout (int, optional): Total request timeout in seconds. Returns: ValidResponse: If the call was successful. ErrorResponse: If the call was not successful. Raises: PureError: If calling the API fails. ValueError: If a parameter is of an invalid type. TypeError: If invalid or missing parameters are used. """ kwargs = dict( alert_watcher=alert_watcher, ids=ids, names=names, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, ) kwargs = {k: v for k, v in kwargs.items() if v is not None} endpoint = self._alert_watchers_api.api20_alert_watchers_patch_with_http_info _process_references(references, ['ids', 'names'], kwargs) return self._call_api(endpoint, kwargs) def post_alert_watchers( self, references=None, # type: List[models.ReferenceType] names=None, # type: List[str] alert_watcher=None, # type: models.AlertWatcherPost async_req=False, # type: bool _return_http_data_only=False, # type: bool _preload_content=True, # type: bool _request_timeout=None, # type: Optional[int] ): # type: (...) -> models.AlertWatcherResponse """ Create an alert watcher to receive array alert messages. Args: references (list[FixedReference], optional): A list of references to query for. Overrides names keyword arguments. names (list[str], required): A list of resource names. async_req (bool, optional): Request runs in separate thread and method returns multiprocessing.pool.ApplyResult. _return_http_data_only (bool, optional): Returns only data field. _preload_content (bool, optional): Response is converted into objects. _request_timeout (int, optional): Total request timeout in seconds. Returns: ValidResponse: If the call was successful. ErrorResponse: If the call was not successful. Raises: PureError: If calling the API fails. ValueError: If a parameter is of an invalid type. TypeError: If invalid or missing parameters are used. """ kwargs = dict( names=names, alert_watcher=alert_watcher, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, ) kwargs = {k: v for k, v in kwargs.items() if v is not None} endpoint = self._alert_watchers_api.api20_alert_watchers_post_with_http_info _process_references(references, ['names'], kwargs) return self._call_api(endpoint, kwargs) def get_alert_watchers_test( self, references=None, # type: List[models.ReferenceType] filter=None, # type: str ids=None, # type: List[str] names=None, # type: List[str] sort=None, # type: List[str] async_req=False, # type: bool _return_http_data_only=False, # type: bool _preload_content=True, # type: bool _request_timeout=None, # type: Optional[int] ): # type: (...) -> models.TestResultResponse """ Test an alert watcher's contact information to verify alerts can be sent and received. Args: references (list[FixedReference], optional): A list of references to query for. Overrides ids and names keyword arguments. filter (Filter, optional): A filter to include only resources that match the specified criteria. ids (list[str], optional): A list of resource IDs. If after filtering, there is not at least one resource that matches each of the elements of `ids`, then an error is returned. This cannot be provided together with the `name` or `names` query parameters. names (list[str], optional): A list of resource names. If there is not at least one resource that matches each of the elements of `names`, then an error is returned. sort (list[Property], optional): Sort the response by the specified Properties. Can also be a single element. async_req (bool, optional): Request runs in separate thread and method returns multiprocessing.pool.ApplyResult. _return_http_data_only (bool, optional): Returns only data field. _preload_content (bool, optional): Response is converted into objects. _request_timeout (int, optional): Total request timeout in seconds. Returns: ValidResponse: If the call was successful. ErrorResponse: If the call was not successful. Raises: PureError: If calling the API fails. ValueError: If a parameter is of an invalid type. TypeError: If invalid or missing parameters are used. """ kwargs = dict( filter=filter, ids=ids, names=names, sort=sort, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, ) kwargs = {k: v for k, v in kwargs.items() if v is not None} endpoint = self._alert_watchers_api.api20_alert_watchers_test_get_with_http_info _process_references(references, ['ids', 'names'], kwargs) return self._call_api(endpoint, kwargs) def get_alerts( self, references=None, # type: List[models.ReferenceType] continuation_token=None, # type: str filter=None, # type: str ids=None, # type: List[str] limit=None, # type: int names=None, # type: List[str] offset=None, # type: int sort=None, # type: List[str] async_req=False, # type: bool _return_http_data_only=False, # type: bool _preload_content=True, # type: bool _request_timeout=None, # type: Optional[int] ): # type: (...) -> models.AlertGetResponse """ Returns a list of alerts which have been generated by the array. Args: references (list[FixedReference], optional): A list of references to query for. Overrides ids and names keyword arguments. continuation_token (str, optional): An opaque token to iterate over a collection of resources. filter (Filter, optional): A filter to include only resources that match the specified criteria. ids (list[str], optional): A list of resource IDs. If after filtering, there is not at least one resource that matches each of the elements of `ids`, then an error is returned. This cannot be provided together with the `name` or `names` query parameters. limit (int, optional): Limit the number of resources in the response. If not specified, defaults to 1000. names (list[str], optional): A list of resource names. If there is not at least one resource that matches each of the elements of `names`, then an error is returned. offset (int, optional): The offset of the first resource to return from a collection. sort (list[Property], optional): Sort the response by the specified Properties. Can also be a single element. async_req (bool, optional): Request runs in separate thread and method returns multiprocessing.pool.ApplyResult. _return_http_data_only (bool, optional): Returns only data field. _preload_content (bool, optional): Response is converted into objects. _request_timeout (int, optional): Total request timeout in seconds. Returns: ValidResponse: If the call was successful. ErrorResponse: If the call was not successful. Raises: PureError: If calling the API fails. ValueError: If a parameter is of an invalid type. TypeError: If invalid or missing parameters are used. """ kwargs = dict( continuation_token=continuation_token, filter=filter, ids=ids, limit=limit, names=names, offset=offset, sort=sort, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, ) kwargs = {k: v for k, v in kwargs.items() if v is not None} endpoint = self._alerts_api.api20_alerts_get_with_http_info _process_references(references, ['ids', 'names'], kwargs) return self._call_api(endpoint, kwargs) def patch_alerts( self, references=None, # type: List[models.ReferenceType] alerts_settings=None, # type: models.Alert ids=None, # type: List[str] names=None, # type: List[str] async_req=False, # type: bool _return_http_data_only=False, # type: bool _preload_content=True, # type: bool _request_timeout=None, # type: Optional[int] ): # type: (...) -> models.AlertResponse """ Make changes to an alert. This is currently limited to the alert's `flagged` property. Args: references (list[FixedReference], optional): A list of references to query for. Overrides ids and names keyword arguments. alerts_settings (Alert, required): ids (list[str], optional): A list of resource IDs. If after filtering, there is not at least one resource that matches each of the elements of `ids`, then an error is returned. This cannot be provided together with the `name` or `names` query parameters. names (list[str], optional): A list of resource names. If there is not at least one resource that matches each of the elements of `names`, then an error is returned. async_req (bool, optional): Request runs in separate thread and method returns multiprocessing.pool.ApplyResult. _return_http_data_only (bool, optional): Returns only data field. _preload_content (bool, optional): Response is converted into objects. _request_timeout (int, optional): Total request timeout in seconds. Returns: ValidResponse: If the call was successful. ErrorResponse: If the call was not successful. Raises: PureError: If calling the API fails. ValueError: If a parameter is of an invalid type. TypeError: If invalid or missing parameters are used. """ kwargs = dict( alerts_settings=alerts_settings, ids=ids, names=names, async_req=async_req, _return_http_data_only=_return_http_data_only, _preload_content=_preload_content, _request_timeout=_request_timeout, ) kwargs = {k: v for k, v in kwargs.items() if v is not None} endpoint = self._alerts_api.api20_alerts_patch_with_http_info _process_references(references, ['ids', 'names'], kwargs) return self._call_api(endpoint, kwargs) def delete_api_clients( self, references=None, # type: List[models.ReferenceType] ids=None, # type: List[str] names=None, # type: List[str] async_req=False, # type: bool _return_http_data_only=False, # type: bool _preload_content=True, # type: bool _request_timeout=None, # type: Optional[int] ): # type: (...) -> None """ Delete the API client. Args: references (list[FixedReference], optional): A list of references to query for. Overrides ids and names keyword arguments. ids (list[str], optional): A list of resource IDs. If after filtering, there is not at least one resource that matches each of the elements of `ids`, then an error is returned. This cannot be provided together with the `name` or `names` query parameters. names (list[str], optional): A list of resource names. If there is not at least one resource
import inspect from typing import List, Union, Set, Any import numpy as np from fruits.cache import Cache, CoquantileCache from fruits.scope import force_input_shape, FitTransform from fruits.core.callback import AbstractCallback from fruits.signature.iss import SignatureCalculator, CachePlan from fruits.words.word import Word from fruits.sieving.abstract import FeatureSieve from fruits.preparation.abstract import DataPreparateur class Fruit: """Feature Extractor using iterated sums. A Fruit consists of a number of :class:`~fruits.core.fruit.FruitBranch` objects. At the end of the pipeline, each branch returns their own features and they will be concatenated by this class. A simple example (using two branches): .. code-block:: python fruit = fruits.Fruit("My Fruit") # optional: add preparateurs for preprocessing fruit.add(fruits.preparation.INC) # add words for iterated sums calculation fruit.add(fruits.words.creation.simplewords_by_weight(4)) # choose sieves fruit.add(fruits.sieving.PPV(0.5)) fruit.add(fruits.sieving.END) # add a new branch without INC fruit.fork() fruit.add(fruits.words.creation.simplewords_by_weight(4)) fruit.add(fruits.sieving.PPV(0.5)) fruit.add(fruits.sieving.END) # configure the fruit fruit.configure(mode="extended") # fit the fruit on a time series dataset fruit.fit(X_train) # transform the dataset X_train_transformed = fruit.transform(X_train) X_test_tranformed = fruit.transform(X_test) # use the transformed results (features) in a classifier ... The ``fruit`` above will result in ``2*8*2=32`` features per time series. """ def __init__(self, name: str = ""): self.name: str = name # list of FruitBranches self._branches: List[FruitBranch] = [] # pointer for the current branch index self._cbi: int = 0 self._fitted: bool = False @property def name(self) -> str: """Simple identifier for the Fruit object.""" return self._name @name.setter def name(self, name: str): self._name = name def fork(self, branch: "FruitBranch" = None): """Adds a new branch to the pipeline. If none is given, an empty FruitBranch will be created and switched to. :type branch: FruitBranch, optional """ if branch is None: branch = FruitBranch() self._branches.append(branch) self._cbi = len(self._branches) - 1 self._fitted = False def branch(self, index: int = None): """Returns the currently selected branch or the branch with the given index. :rtype: FruitBranch """ if index is None: return self._branches[self._cbi] return self._branches[index] def branches(self) -> list: """Returns all branches of this Fruit object. :rtype: list """ return self._branches def switch_branch(self, index: int): """Switches to the branch with the given index. :param index: Integer in ``[0, 1, ..., len(self.branches())-1]`` :type index: int """ if not (0 <= index < len(self._branches)): raise IndexError("Index has to be in [0, len(self.branches()))") self._cbi = index def add(self, *objects: Union[FitTransform, Word, type]): """Adds one or multiple object(s) to the currently selected branch. :param objects: One or more objects of the following types: - :class:`~fruits.preparation.abstract.DataPreparateur` - :class:`~fruits.words.word.Word` - :class:`~fruits.sieving.abstract.FeatureSieve` :type objects: Union[FitTransform, Word] """ if len(self._branches) == 0: self.fork() self._branches[self._cbi].add(*objects) self._fitted = False def nfeatures(self) -> int: """Returns the total number of features of all branches combined. :rtype: int """ return sum([branch.nfeatures() for branch in self._branches]) def configure(self, **kwargs: Any): """Makes changes to the default configuration of a all branches if arguments differ from ``None``. :param kwargs: For possible options, have a look at :meth:`fruits.core.fruit.FruitBranch.configure`. :type kwargs: Any """ for branch in self._branches: branch.configure(**kwargs) def fit(self, X: np.ndarray): """Fits all branches to the given data. :param X: (Multidimensional) time series dataset as an array of three dimensions. Have a look at :meth:`~fruits.scope.force_input_shape`. :type X: np.ndarray """ for branch in self._branches: branch.fit(X) self._fitted = True def transform(self, X: np.ndarray, callbacks: List[AbstractCallback] = []) -> np.ndarray: """Returns a two dimensional array of all features from all branches this Fruit object contains. :param X: (Multidimensional) time series dataset as an array of three dimensions. Have a look at :meth:`~fruits.scope.force_input_shape`. :type X: np.ndarray :param callbacks: List of callbacks. To write your own callback, override the class :class:`~fruits.core.callback.AbstractCallback`., defaults to None :type callbacks: List[AbstractCallback], optional :rtype: np.ndarray :raises: RuntimeError if Fruit.fit wasn't called """ if not self._fitted: raise RuntimeError("Missing call of self.fit") result = np.zeros((X.shape[0], self.nfeatures())) index = 0 for branch in self._branches: for callback in callbacks: callback.on_next_branch() k = branch.nfeatures() result[:, index:index+k] = branch.transform(X, callbacks) index += k result = np.nan_to_num(result, copy=False, nan=0.0) return result def fit_transform(self, X: np.ndarray) -> np.ndarray: """Fits all branches to the given dataset and returns the transformed results of X from all branches. :param X: (Multidimensional) time series dataset :type X: np.ndarray :returns: Two dimensional feature array :rtype: np.ndarray """ self.fit(X) return self.transform(X) def summary(self) -> str: """Returns a summary of this object. The summary contains a summary for each FruitBranch in this Fruit object. :rtype: str """ summary = "{:=^80}".format(f"Summary of fruits.Fruit: '{self.name}'") summary += f"\nBranches: {len(self.branches())}" summary += f"\nFeatures: {self.nfeatures()}" for branch in self.branches(): summary += "\n\n" + branch.summary() summary += "\n{:=^80}".format(f"End of Summary") return summary def copy(self) -> "Fruit": """Creates a shallow copy of this Fruit object. This also creates shallow copies of all branches in this object. :rtype: Fruit """ copy_ = Fruit(self.name+" (Copy)") for branch in self._branches: copy_.fork(branch.copy()) return copy_ def deepcopy(self) -> "Fruit": """Creates a deep copy of this Fruit object. This also creates deep copies of all branches in this object. :rtype: Fruit """ copy_ = Fruit(self.name+" (Copy)") for branch in self._branches: copy_.fork(branch.deepcopy()) return copy_ class FruitBranch: """One branch of a Fruit object. A FruitBranch object extracts values from time series data that are somehow representative of the input data. The user can customize any of the following three steps. - Preparing data: Apply functions at the start of the extraction procedure. There are many so called :class:`~fruits.preparation.abstract.DataPreparateur` objects in fruits available for preprocessing. The preparateurs will be applied sequentially to the input data. - Calculating Iterated Sums: The preprocessed data is now used to calculate the iterated sums signature for different :class:`~fruits.words.word.Word` objects the user can specify. - Extracting Features: Each :class:`~fruits.sieving.abstract.FeatureSieve` added to the branch will be fitted on the iterated sums from the previous step. The branch then returns an array of numbers (the transformed results from those sieves), i.e. the features for each time series. """ def __init__(self): # lists of used classes for data processing self._preparateurs: list = [] self._words: list = [] self._sieves: list = [] # calculator options used in the ISS calculation self._calculator_options: dict = {"batch_size": 1, "mode": "single"} # list with inner lists containing sieves # all sieves in one list are trained on one specific output # of an ISS-result self._sieves_extended: list = [] # configurations for fitting self._fitted: bool = False self._fit_sample_size: Union[float, int] = 1 # cache that is calculated at fitting and also used in the # transformation process self._cache: Cache def configure(self, mode: str = None, batch_size: int = None, fit_sample_size: Union[float, int] = None): """Makes changes to the default configuration of a fruit branch if arguments differ from ``None``. :param mode: See :meth:`fruits.signature.iss.SignatureCalculator.transform`, defaults to None :type mode: str, optional :param batch_size: See :meth:`~ruits.signature.iss.SignatureCalculator.transform`, defaults to None :type batch_size: int, optional :param fit_sample_size: Size of the random time series sample that is used for fitting. This is represented as a float which will be multiplied by ``X.shape[0]`` or ``1`` for one random time series., defaults to 1 :type fit_sample_size: Union[float, int] """ if mode is not None: self._calculator_options["mode"] = mode if batch_size is not None: self._calculator_options["batch_size"] = batch_size if fit_sample_size is not None: self._fit_sample_size = fit_sample_size def add_preparateur(self, preparateur: DataPreparateur): """Adds a preparateur to the branch. :type preparateur: DataPreparateur """ if not isinstance(preparateur, DataPreparateur): raise TypeError self._preparateurs.append(preparateur) self._fitted = False def get_preparateurs(self) -> List[DataPreparateur]: """Returns a list of all preparateurs added to the branch. :rtype: List[DataPreparateur] """ return self._preparateurs def clear_preparateurs(self): """Removes all preparateurs that were added to this branch.""" self._preparateurs = [] self._fitted = False def add_word(self, word: Word): """Adds a word to the branch. :type word: Word """ if not isinstance(word, Word): raise TypeError self._words.append(word) self._fitted = False def get_words(self) -> List[Word]: """Returns a list of all words in the branch. :rtype: List[Word] """ return self._words def clear_words(self): """Removes all words that were added to this branch.""" self._words = [] self._sieves_extended = [] self._fitted = False def add_sieve(self, sieve: FeatureSieve): """Appends a new feature sieve to the FruitBranch. :type sieve: FeatureSieve """ if not isinstance(sieve, FeatureSieve): raise TypeError self._sieves.append(sieve) self._fitted = False def get_sieves(self)
' ').replace('\r', ' ') body = '<!doctype html>' + \ '<html lang="en">' + \ '<head>' + \ '<meta charset="utf-8">' + \ '<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">' + \ '<link rel="stylesheet"' + \ 'href="https://cdn.jsdelivr.net/npm/bootstrap@4.5.3/dist/css/bootstrap.min.css"' + \ 'integrity="<KEY>"' + \ 'crossorigin="anonymous">' + \ '<title>Flexible budget variance</title>' + \ '</head>' + \ '<body>' + \ '<div class="container">' + \ '<div class="card text-center">' + \ '<div class="card-header text-center">Flexible budget variance</div>' + \ '<div class="card-body">' body += '<h6>Comapny name : ' + company_name + '</h6>' + \ '<h6>Share capital : ' + share_capital + '</h6>' + \ '<h6>Head office address : ' + head_office_address + '</h6>' + \ '<h6>Establishment number : ' + establishment_number + '</h6>' + \ '<h6>Register of Trade and Companies : ' + register_of_trade_and_companies + '</h6>' + \ '<h6>Main activities : ' + main_activities + '</h6>' + \ '<h6>Activity number : ' + activity_number + '</h6>' + \ '<h6>Intra-community VAT number : ' + intra_community_vat_number + '</h6>' + \ '<h6>President : ' + president + '</h6>' + \ '<h6>Registration date : ' + registration_date + '</h6>' + \ '<br>' body += '<br>' body += '<table class="table table-striped table-bordered">' + \ '<thead>' + \ '<tr>' + \ '<th scope="col">Details</th>' + \ '<th scope="col">Original budget</th>' + \ '<th scope="col">Variable cost per unit</th>' + \ '<th scope="col">Flexible budget</th>' + \ '<th scope="col">Actual cost</th>' + \ '<th scope="col">Variance</th>' + \ '<th scope="col">Rate</th>' + \ '</tr>' + \ '</thead>' + \ '<tbody>' + \ '<tr>' + \ '<td>Direct materials variable costs</td>' + \ '<td>' + r1c1 + '</td>' + \ '<td>' + r1c2 + '</td>' + \ '<td>' + r1c3 + '</td>' + \ '<td>' + r1c4 + '</td>' + \ '<td>' + r1c5 + '</td>' + \ '<td>' + r1c6 + '</td>' + \ '</tr>' + \ '<tr>' + \ '<td>Indirect materials variable costs</td>' + \ '<td>' + r2c1 + '</td>' + \ '<td>' + r2c2 + '</td>' + \ '<td>' + r2c3 + '</td>' + \ '<td>' + r2c4 + '</td>' + \ '<td>' + r2c5 + '</td>' + \ '<td>' + r2c6 + '</td>' + \ '</tr>' + \ '<tr>' + \ '<td>Piece rate labor variable costs</td>' + \ '<td>' + r3c1 + '</td>' + \ '<td>' + r3c2 + '</td>' + \ '<td>' + r3c3 + '</td>' + \ '<td>' + r3c4 + '</td>' + \ '<td>' + r3c5 + '</td>' + \ '<td>' + r3c6 + '</td>' + \ '</tr>' + \ '<tr>' + \ '<td>Shipping variable costs</td>' + \ '<td>' + r4c1 + '</td>' + \ '<td>' + r4c2 + '</td>' + \ '<td>' + r4c3 + '</td>' + \ '<td>' + r4c4 + '</td>' + \ '<td>' + r4c5 + '</td>' + \ '<td>' + r4c6 + '</td>' + \ '</tr>' + \ '<tr>' + \ '<td>Billable wages variable costs</td>' + \ '<td>' + r5c1 + '</td>' + \ '<td>' + r5c2 + '</td>' + \ '<td>' + r5c3 + '</td>' + \ '<td>' + r5c4 + '</td>' + \ '<td>' + r5c5 + '</td>' + \ '<td>' + r5c6 + '</td>' + \ '</tr>' + \ '<tr>' + \ '<td>Commissions variable costs</td>' + \ '<td>' + r6c1 + '</td>' + \ '<td>' + r6c2 + '</td>' + \ '<td>' + r6c3 + '</td>' + \ '<td>' + r6c4 + '</td>' + \ '<td>' + r6c5 + '</td>' + \ '<td>' + r6c6 + '</td>' + \ '</tr>' + \ '<tr>' + \ '<td>Utilities variable costs</td>' + \ '<td>' + r7c1 + '</td>' + \ '<td>' + r7c2 + '</td>' + \ '<td>' + r7c3 + '</td>' + \ '<td>' + r7c4 + '</td>' + \ '<td>' + r7c5 + '</td>' + \ '<td>' + r7c6 + '</td>' + \ '</tr>' + \ '<tr>' + \ '<td>Other variable costs</td>' + \ '<td>' + r8c1 + '</td>' + \ '<td>' + r8c2 + '</td>' + \ '<td>' + r8c3 + '</td>' + \ '<td>' + r8c4 + '</td>' + \ '<td>' + r8c5 + '</td>' + \ '<td>' + r8c6 + '</td>' + \ '</tr>' + \ '<tr>' + \ '<td>Total budget variable costs</td>' + \ '<td>' + r9c1 + '</td>' + \ '<td>' + r9c2 + '</td>' + \ '<td>' + r9c3 + '</td>' + \ '<td>' + r9c4 + '</td>' + \ '<td>' + r9c5 + '</td>' + \ '<td>' + r9c6 + '</td>' + \ '</tr>' + \ '</tbody>' + \ '</table>' body += '<br>' + \ '</div>' + \ '</div>' + \ '</div>' + \ '<br>' + \ '<script src="https://code.jquery.com/jquery-3.5.1.slim.min.js"' + \ 'integrity="<KEY>"' + \ 'crossorigin="anonymous"></script>' + \ '<script src="https://cdn.jsdelivr.net/npm/bootstrap@4.5.3/dist/js/bootstrap.bundle.min.js"' + \ 'integrity="<KEY>"' + \ 'crossorigin="anonymous"></script>' + \ '</body>' + \ '</html>' options = { 'page-size': 'A4', 'orientation': 'landscape', 'header-center': 'Flexible budget variance', 'footer-left': 'Company : ' + company_name + ' [' + establishment_number + ']', 'footer-right': '[page] sur [topage]', 'encoding': 'UTF-8', 'no-outline': None, 'custom-header': [ ('Accept-Encoding', 'pdf') ] } # path_wkthmltopdf = 'static/reporting/static/wkhtmltopdf.exe' # config = pdfkit.configuration(wkhtmltopdf=path_wkthmltopdf) # output = pdfkit.from_string(body, output_path=False, configuration=config, options=options) output = pdfkit.from_string(body, output_path=False, options=options) response = HttpResponse(output, content_type='application/pdf') response['Content-Disposition'] = 'attachment; filename="flexible_budget_variance.pdf"' return response def month_operating_budget_variance(request): return render(request, 'reporting/month_operating_budget_variance.html') def generate_html_to_pdf_month_operating_budget_variance(request): company_name = request.POST.get('company_name').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') share_capital = request.POST.get('share_capital').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') head_office_address = request.POST.get('head_office_address').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') establishment_number = request.POST.get('establishment_number').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') register_of_trade_and_companies = request.POST.get('register_of_trade_and_companies').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') main_activities = request.POST.get('main_activities').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') activity_number = request.POST.get('activity_number').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') intra_community_vat_number = request.POST.get('intra_community_vat_number').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') president = request.POST.get('president').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') registration_date = request.POST.get('registration_date').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r1c1 = request.POST.get('r1c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r1c2 = request.POST.get('r1c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r1c3 = request.POST.get('r1c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r1c4 = request.POST.get('r1c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r1c5 = request.POST.get('r1c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r1c6 = request.POST.get('r1c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r1c7 = request.POST.get('r1c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r1c8 = request.POST.get('r1c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r1c9 = request.POST.get('r1c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r1c10 = request.POST.get('r1c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r1c11 = request.POST.get('r1c11').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r1c12 = request.POST.get('r1c12').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r1c13 = request.POST.get('r1c13').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r1c14 = request.POST.get('r1c14').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r1c15 = request.POST.get('r1c15').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r1c16 = request.POST.get('r1c16').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r1c17 = request.POST.get('r1c17').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r1c18 = request.POST.get('r1c18').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r1c19 = request.POST.get('r1c19').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r1c20 = request.POST.get('r1c20').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r1c21 = request.POST.get('r1c21').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r1c22 = request.POST.get('r1c22').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r1c23 = request.POST.get('r1c23').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r1c24 = request.POST.get('r1c24').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r1c25 = request.POST.get('r1c25').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r1c26 = request.POST.get('r1c26').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r1c27 = request.POST.get('r1c27').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r2c1 = request.POST.get('r2c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r2c2 = request.POST.get('r2c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r2c3 = request.POST.get('r2c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r2c4 = request.POST.get('r2c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r2c5 = request.POST.get('r2c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r2c6 = request.POST.get('r2c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r2c7 = request.POST.get('r2c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r2c8 = request.POST.get('r2c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r2c9 = request.POST.get('r2c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r2c10 = request.POST.get('r2c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r2c11 = request.POST.get('r2c11').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r2c12 = request.POST.get('r2c12').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r2c13 = request.POST.get('r2c13').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r2c14 = request.POST.get('r2c14').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r2c15 = request.POST.get('r2c15').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r2c16 = request.POST.get('r2c16').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r2c17 = request.POST.get('r2c17').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r2c18 = request.POST.get('r2c18').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r2c19 = request.POST.get('r2c19').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r2c20 = request.POST.get('r2c20').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r2c21 = request.POST.get('r2c21').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r2c22 = request.POST.get('r2c22').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r2c23 = request.POST.get('r2c23').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r2c24 = request.POST.get('r2c24').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r2c25 = request.POST.get('r2c25').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r2c26 = request.POST.get('r2c26').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r2c27 = request.POST.get('r2c27').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r3c1 = request.POST.get('r3c1').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r3c2 = request.POST.get('r3c2').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r3c3 = request.POST.get('r3c3').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r3c4 = request.POST.get('r3c4').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r3c5 = request.POST.get('r3c5').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r3c6 = request.POST.get('r3c6').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r3c7 = request.POST.get('r3c7').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r3c8 = request.POST.get('r3c8').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r3c9 = request.POST.get('r3c9').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r3c10 = request.POST.get('r3c10').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') r3c11 = request.POST.get('r3c11').replace('\t', ' ').replace('\n', ' ').replace('\r',
from __future__ import print_function import bz2 from ftplib import FTP import gzip import hashlib import logging import os import pickle import re from subprocess import Popen, STDOUT, PIPE import sys import time import urllib import yaml class MirrorException(Exception): def __init__(self, val): self.val = val def __str__(self): return repr(self.val) class Mirror: # Setup class vars and logger def __init__(self, mirror_path, mirror_url, temp_indices=None, log_file=None, log_level=None, package_ttl=None, hash_function=None): if not temp_indices: self.temp_indices = '/tmp/dists-indices' if log_level is None: log_level = 'INFO' if package_ttl is None: package_ttl = 10800 if hash_function is None: self.hash_function = "SHA256" else: self.hash_function = hash_function.upper() self.package_ttl = package_ttl self.mirror_path = mirror_path self.mirror_url = mirror_url self.temp_indices = temp_indices self.indexed_packages = set() self.logger = logging.getLogger() if log_level.upper() == 'DEBUG': self.logger.setLevel(logging.DEBUG) elif log_level.upper() == 'INFO': self.logger.setLevel(logging.INFO) elif log_level.upper() == 'WARNING': self.logger.setLevel(logging.WARNING) elif log_level.upper() == 'ERROR': self.logger.setLevel(logging.ERROR) elif log_level.upper() == 'CRITICAL': self.logger.setLevel(logging.CRITICAL) else: print("Bad log level entered, defaulting to 'INFO'") self.logger.setLevel(logging.INFO) log_format = "%(asctime)s [%(levelname)-5.5s] %(message)s" logFormatter = logging.Formatter(log_format) console = logging.StreamHandler() console.setFormatter(logFormatter) fileHandler = logging.FileHandler(filename=log_file) fileHandler.setFormatter(logFormatter) self.logger.addHandler(fileHandler) self.logger.addHandler(console) # Sync the whole mirror def sync(self): self.lock_file = os.path.join(self.temp_indices, 'sync_in_progress') if os.path.exists(self.lock_file): self.logger.info("Sync already in progress") sys.exit(1) f = open(self.lock_file, 'w') f.close() try: self.logger.info("=======================================") self.logger.info("= Starting Sync of Mirror =") self.logger.info("=======================================") self.update_pool() self.get_dists_indices() self.get_zzz_dists() self.check_release_files() self.check_indices() self.update_mirrors() self.update_indices() self.clean() self.update_project_dir() self.gen_lslR() os.remove(self.lock_file) except: self.logger.info("Exception caught, removing lock file") os.remove(self.lock_file) raise # Update the pool directory of the mirror # NOTE: This does not delete old packages, so it is safe to run at any time def update_pool(self): rsync_command = "rsync --recursive --times --links --hard-links \ --contimeout=10 --timeout=10 --no-motd --stats \ --progress \ -vz rsync://{mirror_url}/pool {mirror_path}/" rsync_command = rsync_command.format( mirror_url=self.mirror_url, mirror_path=self.mirror_path ) self.logger.info("Downloading new packages") rsync_status = Popen(rsync_command, stdout=PIPE, stderr=PIPE, shell=True) for line in rsync_status.stdout: self.logger.debug(line) # Update the entire mirror, excluding package, source, and release indices def update_mirrors(self): rsync_command = "rsync --recursive --times --links --hard-links \ --exclude 'Packages*' --exclude 'Sources*' \ --exclude 'Release*' --exclude 'ls-lR.gz' --exclude 'pool' \ --contimeout=10 --timeout=10 --no-motd --delete --stats \ --delay-updates --progress \ -vz rsync://{mirror_url}/ {mirror_path}/" rsync_command = rsync_command.format( mirror_url=self.mirror_url, mirror_path=self.mirror_path ) self.logger.info("Downloading all new files except indices") rsync_status = Popen(rsync_command, stdout=PIPE, stderr=PIPE, shell=True) for line in rsync_status.stdout: self.logger.debug(line) # Download the 'dists' directory and place it in a # temporary place so it can be checked to make sure it is accurate def get_dists_indices(self): rsync_command = "rsync --recursive --times --links --hard-links \ --exclude 'installer*' --delete --no-motd --stats\ --progress \ -vz rsync://{mirror_url}/dists {temp_indices}/" rsync_command = rsync_command.format( mirror_url=self.mirror_url, temp_indices=self.temp_indices ) self.logger.info( ("Downloading dist indices and storing them " "in a temporary place") ) rsync_status = Popen(rsync_command, stdout=PIPE, stderr=PIPE, shell=True) for line in rsync_status.stdout: self.logger.debug(line) # Download the 'zzz-dists' directory and place it in a # temporary place so it can be checked to make sure it is accurate # NOTE: This is for Debian compatibility, this should do nothing in an # ubuntu mirror because they do not have the 'zzz-dists' dir, but # debian symlinks some things in the 'dists' dir to 'zzz-dists' def get_zzz_dists(self): rsync_command = "rsync --recursive --times --links --hard-links \ --exclude 'installer*' --delete --no-motd --stats\ --progress \ -vz rsync://{mirror_url}/zzz-dists {temp_indices}/" rsync_command = rsync_command.format( mirror_url=self.mirror_url, temp_indices=self.temp_indices ) self.logger.info( "Downloading zzz-dists and storing them in a temporary place" ) rsync_status = Popen(rsync_command, stdout=PIPE, stderr=PIPE, shell=True) for line in rsync_status.stdout: self.logger.debug(line) # Update the 'project' directory, delete the files that do not exist on the # mirror you are cloning from, then add an entry for our mirror in # project/trace def update_project_dir(self): rsync_command = "rsync --recursive --times --links --hard-links \ --progress --delete -vz --stats --no-motd \ rsync://{mirror_url}/project {mirror_path}/ && date -u \ > ${mirror_path}/project/trace/$(hostname -f)" rsync_command = rsync_command.format( mirror_url=self.mirror_url, mirror_path=self.mirror_path ) self.logger.info("Updating 'project' directory") rsync_status = Popen(rsync_command, stdout=PIPE, stderr=PIPE, shell=True) for line in rsync_status.stdout: self.logger.debug(line) # Check that each index is accurate (Packages.gz and Sources.gz files) def check_indices(self): dists_path = self.temp_indices self.logger.info("Gathering Indices") indices = self._get_indices(dists_path) dict_indices = {} for index in indices: split_path = os.path.split(index) dir_name = split_path[0] file_name = split_path[1] if dir_name not in dict_indices.keys(): dict_indices[dir_name] = [file_name] else: dict_indices[dir_name] = dict_indices[dir_name] + [file_name] for key in dict_indices.keys(): if "Sources" in dict_indices[key]: index = os.path.join(key, "Sources") self.check_index(index) elif "Sources.gz" in dict_indices[key]: index = os.path.join(key, "Sources.gz") self.check_index(index) elif "Sources.bz2" in dict_indices[key]: index = os.path.join(key, "Sources.bz2") self.check_index(index) if "Packages" in dict_indices[key]: index = os.path.join(key, "Packages") self.check_index(index) elif "Packages.gz" in dict_indices[key]: index = os.path.join(key, "Packages.gz") self.check_index(index) elif "Packages.bz2" in dict_indices[key]: index = os.path.join(key, "Packages.bz2") self.check_index(index) # Find all of the 'Packages.gz' files and 'Sources.gz' files in the 'dists' # directory so the check_index() function can check their integrity def _get_indices(self, dir): if not os.path.isfile(dir): indices = [] for item in os.listdir(dir): file_path = os.path.join(dir, item) indices = indices + self._get_indices(file_path) return indices else: if re.match(".*(Packages|Sources)(\.gz|\.bz2)?$", dir): return [dir] else: return [] # Check that the index is accurate and all the files it says exist in our # mirror actually exist (do not check the checksum of the file though as # that will take too much time) def check_index(self, file_name): if not re.match(".*(\.gz|\.bz2)$", file_name): with open(file_name, 'r') as f_stream: f_contents = f_stream.read() elif re.match(".*\.gz$", file_name): with gzip.open(file_name, 'r') as f_stream: f_contents = f_stream.read() elif re.match(".*\.bz2$", file_name): with bz2.BZ2File(file_name, 'r') as f_stream: f_contents = f_stream.read() self.logger.debug("Checking index " + file_name) if re.match(".*Packages(\.gz|\.bz2)?$", file_name): for line in f_contents.split('\n'): if line.startswith("Package:"): package = line.split()[1] if line.startswith("Filename:"): file_name = line.split(" ")[1] file_path = os.path.join(self.mirror_path, file_name) self.indexed_packages.add(file_name) if not os.path.isfile(file_path): self.logger.error("Missing file: " + file_path) raise MirrorException("Missing file: " + file_path) if re.match(".*Sources(\.gz|\.bz2)?$", file_name): lines_to_check = [] for line in f_contents.split('\n'): if line.startswith("Package:"): package = line.split()[1] elif line.startswith("Directory:"): dir_name = line.split()[1] elif line.startswith("Files:"): hash_type = "MD5Sum" elif line.startswith(" ") and hash_type == "MD5Sum": lines_to_check = lines_to_check + [line] elif line == "": for i in lines_to_check: line_contents = i.split() file_name = line_contents[2] self.indexed_packages.add( os.path.join(dir_name, file_name) ) md5Sum = line_contents[0] file_path = os.path.join(self.mirror_path, dir_name, file_name) if not os.path.isfile(file_path): self.logger.error("Missing file: " + file_path) raise MirrorException("Missing file: " + file_path) elif not line.startswith(" "): hash_type = None dir_name = None lines_to_check = [] # Check each release file to make sure it is accurate def check_release_files(self): self.logger.info("Gathering Release Files") release_files = self._get_release_files(self.temp_indices) for file in release_files: self.check_release_file(file) # Find all the 'Release' files in the 'dists' directory def _get_release_files(self, dir): if not os.path.isfile(dir): indices = [] for item in os.listdir(dir): file_path = os.path.join(dir, item) indices = indices + self._get_release_files(file_path) return indices else: if dir.endswith("/Release"): return [dir] else: return [] # Check that each index the release file says our mirror has actually # exists in our mirror and that the hash_values match. If they are # inconsistent it will lead to a broken mirror. def check_release_file(self, file_name): current_hash_type = None self.logger.debug("Checking release file " + file_name) with open(file_name) as f_stream: f_contents = f_stream.read() dir = os.path.split(file_name)[0] hash_type = None for line in f_contents.split('\n'): if line.startswith("MD5Sum"): current_hash_type = "MD5SUM" elif line.startswith("SHA1"): current_hash_type = "SHA1" elif line.startswith("SHA256"): current_hash_type = "SHA256" elif line.startswith(" ") and self.hash_function == current_hash_type: file_to_check = line.split()[2] hash_val = line.split()[0] file_path = os.path.join(dir, file_to_check) if os.path.isfile(file_path): with open(file_path, 'r') as f_stream: file_path_contents = f_stream.read() if self.hash_function == "MD5SUM": actual_md5sum = hashlib.md5(file_path_contents).hexdigest() if hash_val != actual_md5sum: self.logger.debug( actual_md5sum + ' does not match ' + hash_val + ' for file ' + file_path + ' (MD5Sum)' ) raise MirrorException( actual_md5sum + ' does not match ' + hash_val + ' for file ' + file_path + ' (MD5Sum)' ) elif self.hash_function == "SHA1": actual_sha1 = hashlib.sha1(file_path_contents).hexdigest() if hash_val != actual_sha1: self.logger.debug( actual_sha1 + ' does not match ' + hash_val + ' for file ' + file_path + ' (SHA1)' ) raise MirrorException( actual_sha1 + ' does not match ' + hash_val + ' for file ' + file_path + ' (SHA1)' ) elif self.hash_function == "SHA256": actual_sha256 = hashlib.sha256(file_path_contents).hexdigest() if hash_val != actual_sha256: self.logger.debug( actual_sha256 + ' does not match ' + hash_val + ' for file ' + file_path + ' (SHA256)' ) raise MirrorException( actual_sha256 + '
<reponame>lzkelley/bhem """ """ import logging # import warnings import numpy as np import scipy as sp from . import radiation, utils from . constants import MELC, MPRT, SPLC, K_BLTZ, H_PLNK class Mahadevan96: def __init__(self, adaf, freqs, log=30, backup_temp=None, quiet=True): """ """ if not isinstance(log, logging.Logger): log = utils.get_log(level=log) self.freqs = freqs self._log = log self._quiet = quiet # Mass in units of solar=masses self.msol = adaf.ms self.fedd = adaf.fedd # self._adaf = adaf self._alpha = adaf.alpha_visc self._beta = adaf.beta_gp self._eps_prime = adaf._eps_prime self._delta = MELC/MPRT # fraction of energy transfered to electrons from viscous heating self._c1 = adaf._c1 self._c3 = adaf._c3 self._rmin = adaf.rs[0] self._rmax = adaf.rs[-1] self._s1 = 1.42e9 * np.sqrt(1 - self._beta) * np.sqrt(self._c3 / self._c1 / self._alpha) # s2 = 1.19e-13 * xm_char self._s3 = 1.05e-24 self._pars = ["alpha", "beta", "eps_prime", "delta", "c1", "c3", "rmin", "rmax"] self._backup_temp = backup_temp # Find the electron temperature and calculate spectra self._solve() return def __str__(self): rv = "Mahadevan96(msol={:.4e}, fedd={:.4e}".format(self.msol, self.fedd) for pp in self._pars: vv = getattr(self, "_" + pp) rv += ", {}={:.3e}".format(pp, vv) rv += ")" return rv def _solve(self): log = self._log def _func(logt): tt = np.power(10.0, logt) qv, qs, qb, qc = self._heat_cool(tt) rv = qv - (qs + qb + qc) return rv start_temps = [1e11, 1e10, 1e12, 1e9, 1e8] success = False for ii, t0 in enumerate(start_temps): log.debug("Try {}, temp: {:.1e}".format(ii, t0)) try: logt = sp.optimize.newton(_func, np.log10(t0), tol=1e-4, maxiter=100) self.temp_e = np.power(10.0, logt) except (RuntimeError, FloatingPointError) as err: log.debug("WARNING: Trial '{}' (t={:.1e}) optimization failed: {}".format( ii, t0, str(err))) else: success = True break if success: log.debug("Success with `t0`={:.2e} ==> t={:.2e}".format(t0, self.temp_e)) else: lvl = log.DEBUG if self._quiet else log.ERROR log.log(lvl, "FAILED to find electron temperature!") log.log(lvl, str(self)) log.log(lvl, "m = {:.2e}, f = {:.2e}".format(self.msol, self.fedd)) err = ("Unable to find electron temperature!" "\nIf the eddington factor is larger than 1e-2, " "this may be expected!") if self._backup_temp is None: raise RuntimeError(err) self.temp_e = self._backup_temp log.log(lvl, "WARNING: setting temperature to '{}'!".format(self.temp_e)) qv, qs, qb, qc = self._heat_cool(self.temp_e) heat = qv cool = qs + qb + qc diff = np.fabs(heat - cool) / heat if diff > 1e-2: lvl = logging.DEBUG if not self._quiet: if diff > 1.0: lvl = logging.ERROR elif diff > 1e-1: lvl = logging.INFO err = "Electron temperature seems inconsistent (Te = {:.2e})!".format(self.temp_e) err += "\n\tm: {:.2e}, f: {:.2e}".format(self.msol, self.fedd) err += "\n\tHeating: {:.2e}, Cooling: {:.2e}, diff: {:.4e}".format(heat, cool, diff) err += "\n\tThis may mean there is an input error (e.g. mdot may be too large... or small?)." log.log(lvl, err) self.theta_e = radiation.dimensionless_temperature_theta(self.temp_e, MELC) # print("Electron effective temperature: {:.2e} K (theta = {:.2e})".format( # self.temp_e, self.theta_e)) self._xm_e = xm_from_te(self.temp_e, self.msol, self.fedd) self._s2 = self._const_s2(self._xm_e) freqs = self.freqs synch = self._calc_spectrum_synch(freqs) brems = self._calc_spectrum_brems(freqs) compt = self._calc_spectrum_compt(freqs) self.spectrum_synch = synch self.spectrum_brems = brems self.spectrum_compt = compt self.spectrum = synch + brems + compt return def _const_s2(self, xm): s2 = 1.19e-13 * xm return s2 def _heat_cool(self, temp): """Calculate heating and cooling rates for disk as a whole. """ alpha = self._alpha beta = self._beta eps_prime = self._eps_prime msol = self.msol fedd = self.fedd delta = self._delta c1 = self._c1 c3 = self._c3 rmin = self._rmin rmax = self._rmax theta = K_BLTZ * temp / (MELC * SPLC * SPLC) xm = xm_from_te(temp, msol, fedd) s1 = 1.42e9 * np.sqrt(1 - beta) * np.sqrt(c3 / c1 / alpha) s2 = self._const_s2(xm) s3 = 1.05e-24 alpha_crit, mean_amp_a, tau_es = self._compton_params(temp, fedd) # Viscous Heating # --------------- _ge = radiation._heat_func_g(theta) q1 = 1.2e38 * _ge * c3 * beta * msol * np.square(fedd) / np.square(alpha*c1) / rmin q2 = delta * 9.39e38 * eps_prime * c3 * msol * fedd / rmin heat_elc = q1 + q2 # Synchrotron # ----------- # Eq. 24 [Hz] f_p = self._freq_synch_peak(temp, msol, fedd, s2=s2) lum_synch_peak = np.power(s1 * s2, 3) * s3 * np.power(rmin, -1.75) * np.sqrt(msol) lum_synch_peak *= np.power(fedd, 1.5) * np.power(temp, 7) / f_p # Eq. 26 power_synch = 5.3e35 * np.power(xm/1000, 3) * np.power(alpha/0.3, -1.5) power_synch *= np.power((1 - beta)/0.5, 1.5) * np.power(c1/0.5, -1.5) # Bremsstrahlung # -------------- # Eq. 29 power_brems = 4.78e34 * np.log(rmax/rmin) / np.square(alpha * c1) power_brems *= radiation._brems_fit_func_f(theta) * fedd * msol # Compton # ------- power_compt = lum_synch_peak * f_p / (1 - alpha_crit) power_compt *= (np.power(6.2e7 * (temp/1e9) / (f_p/1e12), 1 - alpha_crit) - 1.0) return heat_elc, power_synch, power_brems, power_compt def _freq_synch_peak(self, temp, msol, fedd, s2=None): """Mahadevan 1996 Eq. 24 """ if s2 is None: xm = xm_from_te(temp, msol, fedd) s2 = self._const_s2(xm) nu_p = self._s1 * s2 * np.sqrt(fedd/msol) * np.square(temp) * np.power(self._rmin, -1.25) return nu_p def _compton_params(self, te, fedd): """Mahadevan Eqs. 31-34 """ theta_e = radiation.dimensionless_temperature_theta(te, MELC) # Eq. 31 tau_es = 23.87 * fedd * (0.3 / self._alpha) * (0.5 / self._c1) * np.sqrt(3/self._rmin) # Eq. 32 mean_amp_a = 1.0 + 4.0 * theta_e + 16*np.square(theta_e) # Eq. 34 alpha_crit = - np.log(tau_es) / np.log(mean_amp_a) return alpha_crit, mean_amp_a, tau_es def _synch_peak(self, fedd, msol, temp, s2=None): if s2 is None: xm = xm_from_te(temp, msol, fedd) s2 = self._const_s2(xm) f_p = self._freq_synch_peak(temp, msol, fedd, s2=s2) l_p = np.power(self._s1 * s2, 3) * self._s3 * np.power(self._rmin, -1.75) * np.sqrt(msol) l_p *= np.power(fedd, 1.5) * np.power(temp, 7) / f_p return f_p, l_p def _calc_spectrum_synch(self, freqs): """Mahadevan 1996 - Eq. 25 Cutoff above peak frequency (i.e. ignore exponential portion). Ignore low-frequency transition to steeper (22/13 slope) from rmax. """ msol = self.msol fedd = self.fedd scalar = np.isscalar(freqs) freqs = np.atleast_1d(freqs) lnu = self._s3 * np.power(self._s1*self._s2, 1.6) lnu *= np.power(msol, 1.2) * np.power(fedd, 0.8) lnu *= np.power(self.temp_e, 4.2) * np.power(freqs, 0.4) nu_p = self._freq_synch_peak(self.temp_e, msol, fedd, s2=self._s2) lnu[freqs > nu_p] = 0.0 if scalar: lnu = np.squeeze(lnu) return lnu def _calc_spectrum_brems(self, freqs): """Mahadevan 1996 - Eq. 30 """ msol = self.msol fedd = self.fedd temp = self.temp_e const = 2.29e24 # erg/s/Hz scalar = np.isscalar(freqs) freqs = np.atleast_1d(freqs) t1 = np.log(self._rmax/self._rmin) / np.square(self._alpha * self._c1) t2 = np.exp(-H_PLNK*freqs / (K_BLTZ * temp)) * msol * np.square(fedd) / temp fe = radiation._brems_fit_func_f(temp) lbrems = const * t1 * fe * t2 if scalar: lbrems = np.squeeze(lbrems) return lbrems def _calc_spectrum_compt(self, freqs): """Compton Scattering spectrum from upscattering of Synchrotron photons. Mahadevan 1996 - Eq. 38 """ fedd = self.fedd temp = self.temp_e scalar = np.isscalar(freqs) freqs = np.atleast_1d(freqs) f_p, l_p = self._synch_peak(fedd, self.msol, temp) alpha_c, mean_amp_a, tau_es = self._compton_params(temp, fedd) lsp = np.power(freqs/f_p, -alpha_c) * l_p lsp[freqs < f_p] = 0.0 # See Eq. 35 max_freq = 3*K_BLTZ*temp/H_PLNK lsp[freqs > max_freq] = 0.0 if scalar: lsp = np.squeeze(lsp) return lsp def left(yy): """ Mahadevan 1996 Eq.B12 """ return yy + 1.852 * np.log(yy) def left_prime(yy): return 1.0 + 1.852/yy def right(te, msol, fedd, alpha=0.3, c1=0.5, c3=0.3, beta=0.5): """ Mahadevan 1996 Eq.B12 """ def lo(the): f2 = 0.26*(2.5*np.log(the) - 1/the) + 0.05871 return f2 def hi(the): f2 = 0.26*(3*np.log(the) + np.log(sp.special.kn(2, 1/the))) return f2 theta_e = K_BLTZ * te / (MELC*SPLC*SPLC) APPROX_BELOW = 1.0/30 f1 = 10.36 + 0.26 * np.log(msol * fedd) f3 = 0.26 * np.log(alpha*c1*c3*(1-beta)) + 3.7942 if np.isscalar(te): if (theta_e < APPROX_BELOW): f2 = lo(theta_e) else: f2 = hi(theta_e) else: f2 = np.zeros_like(theta_e) # Use an approximation for small values of `theta_e` (i.e. large arguments to `kn()`) idx = (theta_e < APPROX_BELOW) f2[idx] = lo(theta_e[idx]) f2[~idx] = hi(theta_e[~idx]) return f1 - f2 - f3 def guess_yy(fedd): """ Mahadevan 1996 Eq.B13 """ yy = np.power(np.power(10.0, 3.6 + np.log(fedd)/4), 1/3) return yy ''' def freq_from_yy(yy, te, bf): """Mahadevan 1996 Eq. 18 """ theta_e = K_BLTZ*te / (MELC*SPLC*SPLC) xm = np.power(yy, 3) vb = QELC * bf / (2*np.pi*MELC*SPLC) freq = 3.0*xm*vb*np.square(theta_e)/2.0 return freq ''' def xm_from_te(te, mass_sol, fedd): # RHS Value we're trying to match rr = right(te, mass_sol, fedd) if np.isscalar(rr): yy = guess_yy(fedd) ll = left(yy) errs = np.fabs(rr - ll)/rr num = 0 while (errs > 1e-2): yy = yy - (ll - rr)/left_prime(yy) ll =
import math import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable from torch.nn.init import xavier_uniform, _calculate_correct_fan, \ calculate_gain, dirac class InitialConv(nn.Module): def __init__(self, in_channels, n_filters, filter_size, n_init_conv, subsample=(2, 2), bias=True): super(InitialConv, self).__init__() """ Creates an IDB (Initial Downsampling Block), the first block of the FC-DRN architecture. It is composed of: 1) Convolution + ReLU activation 2) Max pooling 3) N times (specified with 'n_init_conv-1'), Convolution + ReLU activation. Input: - in_channels: int. Input image channels. Example: 3 if it is RGB or 1 if it is grayscale. - n_filters: int. Number of channels for all convolutions. - filter_size: int. Size of convolution filters. - n_init_conv: int. Number of convolutions in IDB. After the first one there is always a pooling. - subsample: int or tuple. Specifies the stride of the pooling. - bias: bool. Use bias in convolutions. """ self.conv1 = nn.Conv2d(in_channels, n_filters, kernel_size=filter_size, padding=(filter_size - 1) // 2, bias=bias) self.pool = nn.MaxPool2d(kernel_size=2, stride=subsample, ceil_mode=True) self.additional_convs = self._make_layer(n_init_conv - 1, n_filters, filter_size, bias) self.n_init_conv = n_init_conv self.subsample = subsample # Initialize modules for m in self.modules(): if isinstance(m, nn.Conv2d): kaiming_uniform(m.weight) m.bias.data.zero_() elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, num_conv_blocks, n_filters, filter_size, bias): """ It builds 'num_conv_blocks' repetitions of: Convolution + ReLU activation. Last block does not have ReLU. """ layers = [] for n in range(num_conv_blocks): layers.append( nn.Conv2d(n_filters, n_filters, kernel_size=filter_size, padding=(filter_size - 1) // 2, bias=bias)) if n < num_conv_blocks - 1: layers.append(nn.ReLU(inplace=True)) return nn.Sequential(*layers) def forward(self, x): out = self.conv1(x) out = F.relu(out) if self.subsample[0] > 1 or self.subsample[1] > 1: out = self.pool(out) if self.n_init_conv > 1: out = self.additional_convs(out) return out class PreprocessBlockStandard(nn.Module): def __init__(self, nb_filter, dropout=0., dilation=1, bias=True, bn_momentum=0.1, ini='random'): super(PreprocessBlockStandard, self).__init__() """ It creates a dilated convolution block in the shape of: Batch normalization, Dropout, ReLU activation, dilated convolution. It is used as a transformation in the FC-DRN-D. It does not change number of input feature maps. Input: - nb_filter: int. Number of input feature maps. - dropout: float. Percentage of dropout. - dilation: int. Dilation rate for the dilated convolution. - bias: bool. Bias in convolution. - bn_momentum: float. Batch-norm momentum. - ini: string. Initialization for the dilated convolution weights. It can be 'random' or 'identity'. """ self.std_block = BnReluConv(nb_filter, nb_filter, 3, dropout, bias=bias, dilation=dilation, bn_momentum=bn_momentum, ini=ini) def forward(self, x, crop_size=None): return self.std_block(x) class PreprocessBlockBottleMg(nn.Module): def __init__(self, nb_filter, dropout=0., dilation=1, bias=True, bn_momentum=0.1, mg=[1, 2, 1], ini='random'): super(PreprocessBlockBottleMg, self).__init__() """ It creates a multi-grid dilated convolution block. If the dilation rate is 1 (traditional convolution), it only creates one block of: Batch normalization, Dropout, ReLU activation, convolution. If the dilation rate is bigger than 1, it creates 3 blocks of BN + Dropout + ReLu + Dilated convolution with dilation rates [mg[0]*dilation, mg[1]*dilation, mg[2]*dilation] respectively. It can be used as a transformation in the FC-DRN-D and for the finetunned architectures variants (FC-DRN-P-D and FC-DRN-S-D). It does not change number of input feature maps. Input: - nb_filter: int. Number of input feature maps. - dropout: float. Percentage of dropout. - dilation: int. Dilation rate for the dilated convolution. - bias: bool. Bias in convolution. - bn_momentum: float. Batch-norm momentum. - mg: int list. Each of the positions is a multiplier for the dilation rate used in each of the 3 convolutions. - ini: string. Initialization for the dilated convolution weights. It can be 'random' or 'identity'. """ self.dil_factor = dilation if self.dil_factor == 1: self.no_dil = BnReluConv(nb_filter, nb_filter, 3, dropout, bias=bias, dilation=1, bn_momentum=bn_momentum, ini=ini) else: self.std_block = BnReluConv(nb_filter, nb_filter, 3, dropout, bias=bias, dilation=mg[0] * self.dil_factor, bn_momentum=bn_momentum, ini=ini) self.std_block2 = BnReluConv(nb_filter, nb_filter, 3, dropout, bias=bias, dilation=mg[1] * self.dil_factor, bn_momentum=bn_momentum, ini=ini) self.std_block3 = BnReluConv(nb_filter, nb_filter, 3, dropout, bias=bias, dilation=mg[2] * self.dil_factor, bn_momentum=bn_momentum, ini=ini) def forward(self, x, crop_size=None): if self.dil_factor == 1: return self.no_dil(x) else: out = self.std_block(x) out = self.std_block2(out) out = self.std_block3(out) return out class BnReluConv(nn.Module): def __init__(self, input_channels, n_filters, filter_size, dropout, bias=True, dilation=1, stride=(1, 1), bn_momentum=0.1, ini='random'): super(BnReluConv, self).__init__() """ It builds a block with: Batch Norm, Dropout, ReLU, Convolution. Input: - input_channels: int. Number of input feature maps. - n_filters: int. Number of output feature maps. - filter_size: int. Convolution filter size. - dropout: float. Percentage of dropout. - bias: bool. Bias in convolution. - dilation: int. Dilation rate for dilated convolution. If 1, traditional convolution is used. - stride: int or tuple. Stride used in the convolution. - bn_momentum: float. Batch-norm momentum. - ini: string. Initialization for the dilated convolution weights. It can be 'random' or 'identity'. """ self.bn = nn.BatchNorm2d(input_channels, eps=0.001, momentum=bn_momentum) if dropout > 0: self.drop = nn.Dropout(dropout) if dilation == 1: self.conv = nn.Conv2d(input_channels, n_filters, kernel_size=filter_size, padding=(filter_size - 1) // 2, bias=bias, stride=stride) # Initialize modules for m in self.modules(): if isinstance(m, nn.Conv2d): kaiming_uniform(m.weight) m.bias.data.zero_() elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() # In the case where we want to use dilated convolutions # in the transformation blocks between ResNets else: self.conv = nn.Conv2d(input_channels, n_filters, kernel_size=filter_size, dilation=dilation, padding=((filter_size + (filter_size - 1) * ( dilation - 1)) - 1) // 2, bias=bias) # Initialize modules for m in self.modules(): if isinstance(m, nn.Conv2d): if ini == 'identity': dirac(m.weight) else: kaiming_uniform(m.weight) m.bias.data.zero_() elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() self.dropout = dropout def forward(self, x): out = F.relu(self.bn(x)) if self.dropout > 0: out = self.drop(out) out = self.conv(out) return out class Upsample_conv(nn.Module): def __init__(self, input_channels, n_filters, filter_size=3, bias=True): super(Upsample_conv, self).__init__() """ This class contains 3 operations: 1) Upsampling function with scale factor 2. 2) Crop layer to make sure that the size after the upsampling matches the desired size. 3) Convolution to smooth the upsampling Input: - input_channels: int. Number of input feature maps. - n_filters: int. Number of output feature maps. - filter_size: int. Filter size of the smoothing convolution. - bias: bool. Bias in the convolution. """ self.up_conv = nn.Conv2d(input_channels, n_filters, kernel_size=filter_size, padding=(filter_size - 1) // 2, bias=bias) # Initialize modules for m in self.modules(): if isinstance(m, nn.Conv2d): kaiming_uniform(m.weight) m.bias.data.zero_() elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def crop_layer(self, layer, target_size): ''' This layer crops the spatial size of the input feature maps. Input: layer: torch Variable. Input to be cropped. target_size: tuple or list with 2 positions. Size at the end of the crop. ''' dif = [(layer.shape[2] - target_size[0]) // 2, (layer.shape[3] - target_size[1]) // 2] cs = target_size return layer[:, :, dif[0]:dif[0] + cs[0], dif[1]:dif[1] + cs[1]] def forward(self, x, size): out = F.upsample(x, scale_factor=2) out = self.crop_layer(out, [size[0], size[1]]) out = self.up_conv(out) return out class Classifier(nn.Module): def __init__(self, input_channels, n_classes, bias=True, logsoftmax=True): super(Classifier, self).__init__() """ A classifier of 1x1 convolution and Softmax function. Input: - input_channels: int. Input feature maps. - n_classes: int. Number of classes as output feature maps. - bias: bool. Bias in the convolution. - logsoftmax: bool. If True, LogSoftmax function is used instead of Softmax. """ self.cl_conv = nn.Conv2d(input_channels, n_classes, kernel_size=1, bias=bias) self.logsoftmax = logsoftmax if logsoftmax: self.softmax = torch.nn.LogSoftmax(dim=1) else: self.softmax = torch.nn.Softmax(dim=1) # Initialize modules for m in self.modules(): if isinstance(m, nn.Conv2d): xavier_uniform(m.weight) m.bias.data.zero_() def forward(self, x): out = self.cl_conv(x) out = self.softmax(out) if not self.logsoftmax: out = out.clamp(min=1e-7) out = out.log_() return out class BasicBlock(nn.Module): def __init__(self, input_channels, n_filters, filter_size=3, dropout=0., dilation=1, bias=True, bn_momentum=0.1): super(BasicBlock, self).__init__() """ This is a ResNet basic block, composed of 2 sets of: batch normalization, dropout, ReLU, convolution. Output y is computed as: y = F(x) + x, where F() is the BasicBlock. If the channels of x and F(x) are not equal, a 1x1 convolution is used on x to match the output channels. Input: - input_channels: int. Number of input feature maps. - n_filters: int. Number of output feature maps. - filter_size: int. Filter size of the convolution. - dropout: float. Dropout probability. - dilation: int. Dilation factor for the dilated convolution. It is only applied to the first set of BN + Dropout + ReLU + conv. - bias: bool. Bias of the convolution. -
<gh_stars>10-100 import random import math import pygame from pygame.color import THECOLORS import pymunk from entities.agent import Agent from entities.edible import Edible from entities.obstacle import Obstacle from PIL import Image from maps.map import Dungeons import numpy as np class Env(object): def __init__(self, **kwargs): """ Instantiate a game with the given parameters :param horizon: int, time horizon of an episode :param done: bool, True if the episode is terminated :param mode: 'goal' or 'health': - if 'goal' : we use the field goal to create a goal and the simulation ends when the goal is reached or when we reach the horizon - if 'survival', the health measurements in initialized to 100 and the simulation ends when the health reaches 0 or when we reach the horizon :param shape: size 2 tuple with height and width of the environment :param goal: dict with the following fields, only useful if mode is 'goal' - size: float, size of the goal - position: size 2 tuple giving the position or 'random' :param walls: dict with the following fields: - number: int, number of walls in the environment - size: float, size of the walls - position: array of coordinates or 'random' :param poisons: dict with the following fields - number: int, number of poisons in the environment - size: float, size of the poisons - reap: bool, whether another poison object reappears when one is consumed :param fruits: dict with the following fields - number: int, number of fruits in the environment - size: float, size of the fruits - reap: bool, whether another fruit object reappears when one is consumed :param agent: the agent evolving in the environment :param display: bool, whether to display the task or not """ # Save the arguments for reset self.parameters = kwargs if not kwargs['map']: self.mapp_ = False self.done = False self.t = 0 self.horizon = kwargs['horizon'] self.width, self.height = kwargs['shape'] self.display = kwargs['display'] if self.display: self.screen = pygame.display.set_mode((self.width, self.height)) self.screen.set_alpha(None) else: self.screen = pygame.Surface((self.width, self.height)) self.screen.set_alpha(None) self.clock = pygame.time.Clock() self.npimage = np.zeros((self.width, self.height, 3)) # Set a surface to compute Sensors # Initialize pymunk space self.space = pymunk.Space() self.space.gravity = pymunk.Vec2d(0., 0.) self.space.collision_slop = 0 self.space.collision_persistence = 1 self.space.collision_bias = 0 self.handle_collisions() # Define the external walls texture_params = kwargs['walls_texture'] self.obstacles = [ Obstacle( shape='rectangle', position=(self.width/2, 5), angle=0, texture=texture_params, environment=self, length=self.width, width=10 ), Obstacle( shape='rectangle', position=(self.width / 2, self.height-5), angle=0, texture=texture_params, environment=self, length=self.width, width=10 ), Obstacle( shape='rectangle', position=(5, self.height / 2), angle=math.pi/2, texture=texture_params, environment=self, length=self.height, width=10 ), Obstacle( shape='rectangle', position=(self.width - 5, self.height / 2), angle=math.pi/2, texture=texture_params, environment=self, length=self.height, width=10 ) ] # Add obstacles if not kwargs['map']: for obstacle_params in kwargs['obstacles']: obstacle_params['environment'] = self obstacle = Obstacle(**obstacle_params) self.obstacles.append(obstacle) if kwargs['map']: #create map object and check for connectivity self.mapp_ = Dungeons(space_size=(self.width, self.height), n_rooms=kwargs['n_rooms']) while not self.mapp_.topology.geom_type == 'Polygon': self.mapp_ = Dungeons(space_size=(self.width, self.height), n_rooms=kwargs['n_rooms']) print(self.mapp_.topology.geom_type) #arrange walls for wall in self.mapp_.walls: self.obstacles.append( Obstacle( shape='rectangle', position=(wall.x, self.height - wall.y), angle=0, texture=wall.texture, environment=self, width=wall.height, length=wall.width )) # Define the episode mode self.mode = kwargs['mode'] # Create the goal in goal mode if self.mode == 'goal': self.goal_size = kwargs['goal']['size'] self.goal = self.create_goal(kwargs['goal']['position']) if 'goal' not in kwargs['agent']['measurements']: kwargs['agent']['measurements'].append('goal') # Make sure we have the right measurements in survival mode if self.mode == 'survival': if 'health' not in kwargs['agent']['measurements']: kwargs['agent']['measurements'].append('health') if 'dead' not in kwargs['agent']['measurements']: kwargs['agent']['measurements'].append('dead') # Create the poisons self.poison_params = kwargs['poisons'].copy() self.poisons = [] self.poison_params['environment'] = self self.poison_params['collision_type'] = 3 if self.poison_params['positions'] == 'random': positions = ['random'] * self.poison_params['number'] if kwargs['map']: positions = self.mapp_.generate_random_point(self.poison_params['number']) else: positions = self.poison_params['positions'] for position in positions: poison = Edible(position=position, **self.poison_params) self.poisons.append(poison) # Once the poisons have been created, we switch to random position for the new poisons self.poison_params['position'] = 'random' # Create the fruits self.fruit_params = kwargs['fruits'].copy() self.fruits = [] self.fruit_params['environment'] = self self.fruit_params['collision_type'] = 2 if self.fruit_params['positions'] == 'random': positions = ['random'] * self.fruit_params['number'] if kwargs['map']: positions = self.mapp_.generate_random_point(self.fruit_params['number']) else: positions = self.fruit_params['positions'] for position in positions: fruit = Edible(position=position, **self.fruit_params) self.fruits.append(fruit) # Once the fruits have been created, we switch to random position for the new fruits self.fruit_params['position'] = 'random' # Add the agent self.agent_param = kwargs['agent'].copy() if kwargs['map']: self.agent_param['position'] = self.mapp_.generate_random_point()[-1] #print(self.agent_param['position']) self.agent = Agent(environment=self, **self.agent_param ) # Set a surface to compute Sensors # TODO: change when multiple body parts self.sizeAroundAgent = max([ x.fovRange for x in self.agent.sensors]) + self.agent.radius self.agent.update_state() def create_goal(self, position): inertia = pymunk.moment_for_circle(1, 0, self.goal_size, (0, 0)) goal = pymunk.Body(1, inertia) c_shape = pymunk.Circle(goal, self.goal_size) c_shape.elasticity = 1.0 if position == 'random': position = ( random.randint(self.goal_size, self.width - self.goal_size), random.randint(self.goal_size, self.height - self.goal_size), ) goal.position = position c_shape.color = THECOLORS["green"] c_shape.collision_type = 4 self.space.add(goal, c_shape) return goal def reload_screen(self): # Fill the screen self.screen.fill(THECOLORS["black"]) # Do 10 mini-timesteps in pymunk for 1 timestep in our environment for _ in range(10): self.space.step(1. / 10) # Draw the entities self.draw_environment() # Get top view image of environment # TODO : Chhaaaaanggeeee meeee! Something faaasteeeer! data = pygame.image.tostring(self.screen, 'RGB') pil_image = Image.frombytes('RGB', (self.width, self.height), data) image = np.asarray(pil_image.convert('RGB')) self.npimage = image # Draw the agent self.agent.draw() data = pygame.image.tostring(self.screen, 'RGB') pil_image = Image.frombytes('RGB', (self.width, self.height), data) import os, os.path self.iter = len([name for name in os.listdir('images/')]) image = np.asarray(pil_image.convert('RGB'))[10:74,10:74,:] image.setflags(write=1) """im_final = [] for i in range(len(image)): if sum(image[i])!=0:""" pil_image = Image.fromarray(image) #print(image,'here') if np.all(image==0): print('yo') pass else: pil_image.save('images/'+str(self.iter+1)+'.png') # Update the display if self.display: pygame.display.flip() self.clock.tick() def handle_collisions(self): def begin_fruit_collision(arbiter, space, *args, **kwargs): # Remove the previous shape shapes = arbiter.shapes for shape in shapes: if shape.collision_type == 2: self.fruits.remove(shape.body.entity) space.remove((shape, shape.body)) # Update the measurements self.agent.update_meas('items', 1) self.agent.update_health(shape.body.entity.reward, self.mode) self.agent.update_meas('fruits', 1) self.agent.reward += shape.body.entity.reward if self.fruit_params['respawn']: if self.mapp_: self.fruit_params['position'] = self.mapp_.generate_random_point()[-1] self.fruits.append(Edible(**self.fruit_params)) return False def begin_poison_collision(arbiter, space, *args, **kwargs): # Remove the previous shape shapes = arbiter.shapes for shape in shapes: if shape.collision_type == 3: self.poisons.remove(shape.body.entity) space.remove((shape, shape.body)) # Update the measurements self.agent.update_meas('items', 1) self.agent.update_health(shape.body.entity.reward, self.mode) self.agent.update_meas('poisons', 1) self.agent.reward += shape.body.entity.reward if self.poison_params['respawn']: if self.mapp_: self.poison_params['position'] = self.mapp_.generate_random_point()[-1] self.poisons.append(Edible(**self.poison_params)) return False def begin_goal_collision(arbiter, space, *args, **kwargs): # This is the goal, we end the simulation and update the measurements self.agent.update_meas('goal', 1) self.agent.reward += 100 return False def begin_wall_collision(arbiter, space, *args, **kwargs): # This is the goal, we end the simulation and update the measurements #print('wall collision !') #print(self.agent.body.position) if self.agent.body.position[0]<=25: self.agent.body.position = (58,self.agent.body.position[1]) if self.agent.body.position[0]>=59: self.agent.body.position = (26,self.agent.body.position[1]) if self.agent.body.position[1]<=25: self.agent.body.position = (self.agent.body.position[0],58) if self.agent.body.position[1]>=59: self.agent.body.position = (self.agent.body.position[0],26) return False fruit_collision_handler = self.space.add_collision_handler( collision_type_a=0, collision_type_b=2 ) fruit_collision_handler.begin = begin_fruit_collision poison_collision_handler = self.space.add_collision_handler( collision_type_a=0, collision_type_b=3 ) poison_collision_handler.begin = begin_poison_collision goal_collision_handler = self.space.add_collision_handler( collision_type_a=0, collision_type_b=4 ) goal_collision_handler.begin = begin_goal_collision wall_collision_handler = self.space.add_collision_handler( 0, 1 ) wall_collision_handler.begin = begin_wall_collision def step(self, action): """ Method called to execute an action in the environment. :param action: string, the string code for the action to be executed by the agent :return: a tuple containing : - sensory_input : the sensory input at time t+1 - reward: the reward at time t - done: whether the episode is over - measurements : the measurements at time t+1 """ self.t += 1 # Default reward at time t self.agent.reward = -self.agent.living_penalty self.agent.update_health(-self.agent.living_penalty, self.mode) # Execute the action changes on the agent self.agent.apply_action(action) # Apply the step in the pymunk simulator self.reload_screen() # Get the agent's position and orientation x, y = self.agent.body.position theta = self.agent.body.angle self.agent.set_meas('x', x) self.agent.set_meas('y', y) self.agent.set_meas('theta', theta) # Get the agent's perception for sensor in self.agent.sensors: sensor.get_sensory_input(self) # Look for termination conditions if self.mode == 'goal' and self.agent.meas['goal'] == 1: self.done = True if self.mode == 'survival' and self.agent.meas['dead'] == 1: self.done = True if self.t >= self.horizon - 1: self.done = True return self.agent.state, self.agent.get_reward(), self.done, self.agent.get_meas() def reset(self): self.parameters['agent'].update(self.agent.get_new_averages()) for sensor in self.agent.sensors: sensor.reset() self.__init__(**self.parameters) def draw_environment(self): # TODO: Replace with entities # Draw the fruits for fruit in self.fruits: fruit.draw() # Draw the poisons for poison in self.poisons: poison.draw() for obstacle
<reponame>jwfromm/relax<filename>tests/python/unittest/test_tir_schedule_cache_read_write.py # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=missing-function-docstring,missing-module-docstring import sys import pytest import tvm from tvm import tir from tvm.script import tir as T from tvm.tir.schedule.testing import verify_trace_roundtrip # pylint: disable=no-member,invalid-name,unused-variable ########## Function before schedule ########## @T.prim_func def elementwise(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (128, 128)) B = T.alloc_buffer((128, 128)) C = T.match_buffer(c, (128, 128)) for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] * 2.0 for i, j in T.grid(128, 128): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = B[vi, vj] + 1.0 @T.prim_func def access_under_scope(b: T.handle, c: T.handle) -> None: A = T.alloc_buffer((128, 128)) B = T.match_buffer(b, (128, 128)) C = T.match_buffer(c, (128, 128)) for i0, j0 in T.grid(8, 8): with T.block("scope"): i, j = T.axis.remap("SS", [i0, j0]) for x, y in T.grid(16, 16): with T.block("A"): vi = T.axis.S(128, i * 16 + x) vj = T.axis.S(128, j * 16 + y) A[vi, vj] = 1.0 for x, y in T.grid(16, 16): with T.block("B"): vi = T.axis.S(128, i * 16 + x) vj = T.axis.S(128, j * 16 + y) B[vi, vj] = A[vi, vj] + 1.0 for i, j in T.grid(128, 128): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = A[vi, vj] * 2.0 @T.prim_func def opaque_access(a: T.handle, b: T.handle, c: T.handle, d: T.handle) -> None: A = T.match_buffer(a, (128, 128), dtype="float16") B = T.match_buffer(b, (128, 128), dtype="float16") C = T.match_buffer(c, (128, 128), dtype="float16") D = T.match_buffer(d, (128, 128), dtype="float16") for i, j in T.grid(128, 128): with T.block("load_store"): vi, vj = T.axis.remap("SS", [i, j]) T.reads(A[vi, vj]) T.writes(D[vi, vj]) D.data[vi * 128 + vj] = T.load("float16", A.data, vi * 128 + vj) for i, j in T.grid(8, 8): with T.block("opaque"): vi, vj = T.axis.remap("SS", [i, j]) T.reads(A[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16]) T.writes(B[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16]) T.evaluate( T.tvm_load_matrix_sync( B.data, 16, 16, 16, vi * 8 + vj, T.tvm_access_ptr( T.type_annotation(dtype="float16"), A.data, vi * 2048 + vj * 16, 128, 1, dtype="handle", ), 128, "row_major", dtype="handle", ) ) for i, j in T.grid(8, 8): with T.block("match_buffer"): vi, vj = T.axis.remap("SS", [i, j]) T.reads(A[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16]) T.writes(C[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16]) A0 = T.match_buffer( A[ vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16, ], (16, 16), "float16", strides=[128, 1], offset_factor=1, ) C0 = T.match_buffer( C[ vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16, ], (16, 16), "float16", strides=[128, 1], offset_factor=1, ) T.evaluate( T.tvm_load_matrix_sync( C0.data, 16, 16, 16, vi * 8 + vj, T.tvm_access_ptr( T.type_annotation(dtype="float16"), A0.data, A0.elem_offset, A0.strides[0], 1, dtype="handle", ), 128, "row_major", dtype="handle", ) ) @T.prim_func def func_multi_consumer() -> None: A = T.alloc_buffer((128)) B = T.alloc_buffer((128)) C = T.alloc_buffer((128)) for i in T.grid(8): for j in T.grid(16): with T.block("A"): vi = T.axis.S(128, i * 16 + j) A[vi] = 1.0 for j in T.grid(16): with T.block("B"): vi = T.axis.S(128, i * 16 + j) B[vi] = A[vi] + 1.0 for i in T.grid(128): with T.block("C"): vi = T.axis.S(128, i) C[vi] = A[vi] @T.prim_func def func_multi_producer() -> None: A = T.alloc_buffer((128)) B = T.alloc_buffer((128)) for i in range(128): with T.block("A0"): vi = T.axis.S(128, i) A[vi] = 1.0 for i in range(128): with T.block("A1"): vi = T.axis.S(128, i) A[vi] = 2.0 for i in range(128): with T.block("B"): vi = T.axis.S(128, i) B[vi] = A[vi] ########## Expected function after cache_read ########## @T.prim_func def cache_read_elementwise(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (128, 128)) C = T.match_buffer(c, (128, 128)) B = T.alloc_buffer((128, 128)) A_global = T.alloc_buffer((128, 128)) B_local = T.alloc_buffer((128, 128), scope="local") for i, j in T.grid(128, 128): with T.block("A_global"): vi, vj = T.axis.remap("SS", [i, j]) A_global[vi, vj] = A[vi, vj] for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A_global[vi, vj] * 2.0 for i, j in T.grid(128, 128): with T.block("B_local"): vi, vj = T.axis.remap("SS", [i, j]) B_local[vi, vj] = B[vi, vj] for i, j in T.grid(128, 128): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = B_local[vi, vj] + 1.0 @T.prim_func def cache_read_under_scope(b: T.handle, c: T.handle) -> None: A = T.alloc_buffer((128, 128)) B = T.match_buffer(b, (128, 128)) C = T.match_buffer(c, (128, 128)) A_global = T.alloc_buffer((128, 128)) for i0, j0 in T.grid(8, 8): with T.block("scope"): i, j = T.axis.remap("SS", [i0, j0]) A_local = T.alloc_buffer((128, 128), scope="local") for x, y in T.grid(16, 16): with T.block("A"): vi = T.axis.S(128, i * 16 + x) vj = T.axis.S(128, j * 16 + y) A[vi, vj] = 1.0 for x, y in T.grid(16, 16): with T.block("A_local"): vi = T.axis.S(128, i * 16 + x) vj = T.axis.S(128, j * 16 + y) A_local[vi, vj] = A[vi, vj] for x, y in T.grid(16, 16): with T.block("B"): vi = T.axis.S(128, i * 16 + x) vj = T.axis.S(128, j * 16 + y) B[vi, vj] = A_local[vi, vj] + 1.0 for i, j in T.grid(128, 128): with T.block("A_global"): vi, vj = T.axis.remap("SS", [i, j]) A_global[vi, vj] = A[vi, vj] for i, j in T.grid(128, 128): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = A_global[vi, vj] * 2.0 @T.prim_func def cache_read_opaque_access(a: T.handle, b: T.handle, c: T.handle, d: T.handle) -> None: A = T.match_buffer(a, (128, 128), dtype="float16") B = T.match_buffer(b, (128, 128), dtype="float16") C = T.match_buffer(c, (128, 128), dtype="float16") D = T.match_buffer(d, (128, 128), dtype="float16") A_global = T.alloc_buffer((128, 128), dtype="float16") for i, j in T.grid(128, 128): with T.block("A_global"): vi, vj = T.axis.remap("SS", [i, j]) A_global[vi, vj] = A[vi, vj] for i, j in T.grid(128, 128): with T.block("load_store"): vi, vj = T.axis.remap("SS", [i, j]) T.reads(A_global[vi, vj]) T.writes(D[vi, vj]) D.data[vi * 128 + vj] = T.load("float16", A_global.data, vi * 128 + vj) for i, j in T.grid(8, 8): with T.block("opaque"): vi, vj = T.axis.remap("SS", [i, j]) T.reads(A_global[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16]) T.writes(B[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16]) T.evaluate( T.tvm_load_matrix_sync( B.data, 16, 16, 16, vi * 8 + vj, T.tvm_access_ptr( T.type_annotation(dtype="float16"), A_global.data, vi * 2048 + vj * 16, 128, 1, dtype="handle", ), 128, "row_major", dtype="handle", ) ) for i, j in T.grid(8, 8): with T.block("match_buffer"): vi, vj = T.axis.remap("SS", [i, j]) T.reads(A_global[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16]) T.writes(C[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16]) A0 = T.match_buffer( A_global[ vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16, ], (16, 16), "float16", strides=[128, 1], offset_factor=1, ) C0 = T.match_buffer( C[ vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16, ], (16, 16), "float16", strides=[128, 1], offset_factor=1, ) T.evaluate( T.tvm_load_matrix_sync( C0.data, 16, 16, 16, vi * 8 + vj, T.tvm_access_ptr( T.type_annotation(dtype="float16"),
#!/usr/bin/env python import os import glob import shutil import pytest import hypothesis.strategies as st from hypothesis import given, settings from radical.entk import Task from radical.entk import states import radical.entk.exceptions as ree # Hypothesis settings settings.register_profile("travis", max_examples=100, deadline=None) settings.load_profile("travis") # ------------------------------------------------------------------------------ # def test_task_initialization(): ''' **Purpose**: Test if the task attributes have, thus expect, the correct data types ''' t = Task() assert t._uid is None assert t.name is None assert t.state == states.INITIAL assert t.state_history == [states.INITIAL] assert t.executable is None assert t.arguments == list() assert t.pre_exec == list() assert t.post_exec == list() assert t.cpu_reqs['processes'] == 1 assert t.cpu_reqs['process_type'] is None assert t.cpu_reqs['threads_per_process'] == 1 assert t.cpu_reqs['thread_type'] is None assert t.gpu_reqs['processes'] == 0 assert t.gpu_reqs['process_type'] is None assert t.gpu_reqs['threads_per_process'] == 0 assert t.gpu_reqs['thread_type'] is None assert t.lfs_per_process == 0 assert t.upload_input_data == list() assert t.copy_input_data == list() assert t.link_input_data == list() assert t.move_input_data == list() assert t.copy_output_data == list() assert t.move_input_data == list() assert t.download_output_data == list() assert t.stdout is None assert t.stderr is None assert t.exit_code is None assert t.tag is None assert t.path is None assert t.parent_pipeline['uid'] is None assert t.parent_pipeline['name'] is None assert t.parent_stage['uid'] is None assert t.parent_stage['name'] is None # ------------------------------------------------------------------------------ # @given(s=st.text(), l=st.lists(st.text()), i=st.integers().filter(lambda x: type(x) == int), b=st.booleans()) def test_task_exceptions(s, l, i, b): ''' **Purpose**: Test if all attribute assignments raise exceptions for invalid values ''' t = Task() data_type = [s, l, i, b] for data in data_type: # special case due to backward compatibility if not isinstance(data, str) and \ not isinstance(data, list): with pytest.raises(ree.TypeError): t.executable = data if not isinstance(data, str): with pytest.raises(ree.TypeError): t.name = data with pytest.raises(ree.TypeError): t.path = data with pytest.raises(ree.TypeError): t.parent_stage = data with pytest.raises(ree.TypeError): t.parent_pipeline = data with pytest.raises(ree.TypeError): t.stdout = data with pytest.raises(ree.TypeError): t.stderr = data if not isinstance(data, list): with pytest.raises(ree.TypeError): t.pre_exec = data with pytest.raises(ree.TypeError): t.arguments = data with pytest.raises(ree.TypeError): t.post_exec = data with pytest.raises(ree.TypeError): t.upload_input_data = data with pytest.raises(ree.TypeError): t.copy_input_data = data with pytest.raises(ree.TypeError): t.link_input_data = data with pytest.raises(ree.TypeError): t.move_input_data = data with pytest.raises(ree.TypeError): t.copy_output_data = data with pytest.raises(ree.TypeError): t.download_output_data = data with pytest.raises(ree.TypeError): t.move_output_data = data if not isinstance(data, str) and \ not isinstance(data, str): with pytest.raises(ree.ValueError): t.cpu_reqs = {'processes' : 1, 'process_type' : data, 'threads_per_process': 1, 'thread_type' : None} t.cpu_reqs = {'processes' : 1, 'process_type' : None, 'threads_per_process': 1, 'thread_type' : data } t.gpu_reqs = {'processes' : 1, 'process_type' : data, 'threads_per_process': 1, 'thread_type' : None } t.gpu_reqs = {'processes' : 1, 'process_type' : None, 'threads_per_process': 1, 'thread_type' : data} if not isinstance(data, int): with pytest.raises(ree.TypeError): t.cpu_reqs = {'processes' : data, 'process_type' : None, 'threads_per_process' : 1, 'thread_type' : None} with pytest.raises(ree.TypeError): t.cpu_reqs = {'processes' : 1, 'process_type' : None, 'threads_per_process' : data, 'thread_type' : None} with pytest.raises(ree.TypeError): t.gpu_reqs = {'processes' : data, 'process_type' : None, 'threads_per_process' : 1, 'thread_type' : None} with pytest.raises(ree.TypeError): t.gpu_reqs = {'processes' : 1, 'process_type' : None, 'threads_per_process' : data, 'thread_type' : None} # ------------------------------------------------------------------------------ # def test_dict_to_task(): # make sure the type checks kick in d = {'name' : 1} with pytest.raises(ree.TypeError): Task(from_dict=d) d = {'name' : 'foo', 'pre_exec' : ['bar'], 'executable': 'buz', 'arguments' : ['baz', 'fiz'], 'cpu_reqs' : {'processes' : 1, 'process_type' : None, 'threads_per_process': 1, 'thread_type' : None}, 'gpu_reqs' : {'processes' : 0, 'process_type' : None, 'threads_per_process': 0, 'thread_type' : None}} t = Task(from_dict=d) for k,v in d.items(): assert(t.__getattribute__(k) == v), '%s != %s' \ % (t.__getattribute__(k), v) # ------------------------------------------------------------------------------ # def test_task_to_dict(): ''' **Purpose**: Test if the 'to_dict' function of Task class converts all expected attributes of the Task into a dictionary ''' t = Task() d = t.to_dict() assert d == {'uid' : None, 'name' : None, 'state' : states.INITIAL, 'state_history' : [states.INITIAL], 'pre_exec' : [], 'executable' : None, 'arguments' : [], 'post_exec' : [], 'cpu_reqs' : {'processes' : 1, 'process_type' : None, 'threads_per_process' : 1, 'thread_type' : None}, 'gpu_reqs' : {'processes' : 0, 'process_type' : None, 'threads_per_process' : 0, 'thread_type' : None}, 'lfs_per_process' : 0, 'upload_input_data' : [], 'copy_input_data' : [], 'link_input_data' : [], 'move_input_data' : [], 'copy_output_data' : [], 'move_output_data' : [], 'download_output_data' : [], 'stdout' : None, 'stderr' : None, 'exit_code' : None, 'path' : None, 'tag' : None, 'parent_stage' : {'uid' : None, 'name' : None}, 'parent_pipeline' : {'uid' : None, 'name' : None}} t = Task() t.uid = 'test.0017' t.name = 'new' t.pre_exec = ['module load abc'] t.executable = ['sleep'] t.arguments = ['10'] t.cpu_reqs['processes'] = 10 t.cpu_reqs['threads_per_process'] = 2 t.gpu_reqs['processes'] = 5 t.gpu_reqs['threads_per_process'] = 3 t.lfs_per_process = 1024 t.upload_input_data = ['test1'] t.copy_input_data = ['test2'] t.link_input_data = ['test3'] t.move_input_data = ['test4'] t.copy_output_data = ['test5'] t.move_output_data = ['test6'] t.download_output_data = ['test7'] t.stdout = 'out' t.stderr = 'err' t.exit_code = 1 t.path = 'a/b/c' t.tag = 'task.0010' t.parent_stage = {'uid': 's1', 'name': 'stage1'} t.parent_pipeline = {'uid': 'p1', 'name': 'pipeline1'} d = t.to_dict() assert d == {'uid' : 'test.0017', 'name' : 'new', 'state' : states.INITIAL, 'state_history' : [states.INITIAL], 'pre_exec' : ['module load abc'], 'executable' : 'sleep', 'arguments' : ['10'], 'post_exec' : [], 'cpu_reqs' : {'processes' : 10, 'process_type' : None, 'threads_per_process' : 2, 'thread_type' : None}, 'gpu_reqs' : {'processes' : 5, 'process_type' : None, 'threads_per_process' : 3, 'thread_type' : None}, 'lfs_per_process' : 1024, 'upload_input_data' : ['test1'], 'copy_input_data' : ['test2'], 'link_input_data' : ['test3'], 'move_input_data' : ['test4'], 'copy_output_data' : ['test5'], 'move_output_data' : ['test6'], 'download_output_data' : ['test7'], 'stdout' : 'out', 'stderr' : 'err', 'exit_code' : 1, 'path' : 'a/b/c', 'tag' : 'task.0010', 'parent_stage' : {'uid': 's1', 'name' : 'stage1'}, 'parent_pipeline' : {'uid': 'p1', 'name' : 'pipeline1'}} t.executable = 'sleep' d = t.to_dict() assert d == {'uid' : 'test.0017', 'name' : 'new', 'state' : states.INITIAL, 'state_history' : [states.INITIAL], 'pre_exec' : ['module load abc'], 'executable' : 'sleep', 'arguments' : ['10'], 'post_exec' : [], 'cpu_reqs' : {'processes' : 10, 'process_type' : None, 'threads_per_process' : 2, 'thread_type' : None}, 'gpu_reqs' : {'processes' : 5, 'process_type' : None, 'threads_per_process' : 3, 'thread_type' : None}, 'lfs_per_process' : 1024, 'upload_input_data' : ['test1'], 'copy_input_data' : ['test2'], 'link_input_data' : ['test3'], 'move_input_data' : ['test4'], 'copy_output_data' : ['test5'], 'move_output_data' : ['test6'], 'download_output_data' : ['test7'], 'stdout' : 'out', 'stderr' : 'err', 'exit_code' : 1, 'path' : 'a/b/c', 'tag' : 'task.0010', 'parent_stage' : {'uid': 's1', 'name' : 'stage1'}, 'parent_pipeline' : {'uid': 'p1', 'name' : 'pipeline1'}} # ------------------------------------------------------------------------------ # def test_task_from_dict(): ''' **Purpose**: Test if the 'from_dict' function of Task class converts a dictionary into a Task correctly with all the expected attributes ''' d = {'uid' : 're.Task.0000', 'name' : 't1', 'state' : states.DONE, 'state_history' : [states.INITIAL, states.DONE], 'pre_exec' : [], 'executable' : '', 'arguments' : [], 'post_exec' : [], 'cpu_reqs' : {'processes' : 1, 'process_type' : None, 'threads_per_process' : 1, 'thread_type' : None}, 'gpu_reqs' : {'processes' : 0, 'process_type' : None, 'threads_per_process' : 0, 'thread_type' : None}, 'lfs_per_process' : 1024, 'upload_input_data' : [], 'copy_input_data' : [], 'link_input_data' : [], 'move_input_data' : [], 'copy_output_data' : [], 'move_output_data' : [], 'download_output_data' : [], 'stdout' : 'out', 'stderr' : 'err', 'exit_code' : 555, 'path' : 'here/it/is', 'tag' : 'task.0010', 'parent_stage' : {'uid': 's1', 'name' : 'stage1'}, 'parent_pipeline' : {'uid': 'p1', 'name' : 'pipe1'}} t = Task() t.from_dict(d) assert t._uid == d['uid'] assert t.name == d['name'] assert t.state == d['state'] assert t.state_history == d['state_history'] assert t.pre_exec == d['pre_exec'] assert t.executable == d['executable'] assert t.arguments == d['arguments'] assert t.post_exec == d['post_exec'] assert t.cpu_reqs == d['cpu_reqs'] assert t.gpu_reqs == d['gpu_reqs'] assert t.lfs_per_process == d['lfs_per_process'] assert t.upload_input_data == d['upload_input_data'] assert t.copy_input_data == d['copy_input_data'] assert t.link_input_data == d['link_input_data'] assert t.move_input_data == d['move_input_data'] assert t.copy_output_data == d['copy_output_data'] assert t.move_output_data == d['move_output_data'] assert t.download_output_data == d['download_output_data'] assert t.stdout == d['stdout'] assert t.stderr == d['stderr'] assert t.exit_code == d['exit_code'] assert t.path == d['path'] assert t.tag == d['tag'] assert t.parent_stage == d['parent_stage'] assert t.parent_pipeline == d['parent_pipeline'] d['executable'] = 'sleep' t = Task() t.from_dict(d) assert t.executable == d['executable'] # ------------------------------------------------------------------------------ # def test_task_assign_uid(): try: home = os.environ.get('HOME', '/home') folder = glob.glob('%s/.radical/utils/test*' % home) for f in folder: shutil.rmtree(f) except: pass t = Task() assert t.uid == 'task.0000' # ------------------------------------------------------------------------------ # def test_task_validate(): t = Task() t._state = 'test' with pytest.raises(ree.ValueError): t._validate() t = Task() with pytest.raises(ree.MissingError): t._validate() # ------------------------------------------------------------------------------ # if __name__ == '__main__': test_task_initialization()
* _mass) else: h = min(100, cone_height) r = min(100., d / (d**2+7.) * 0.9 *_mass) # kegel_radius; er ist beschränkt if size < 0.6: r = 2 * r if not tube_radius: tr = None else: tr = d/50. * _mass # Ermittlung der Lage der Spitze aus länge(a(P2-P1)) = h a = h / 2. / ll kegel_pos = [x2-a*vv[0], y2-a*vv[1], z2-a*vv[2]] # die Spitze cone = visual.Cone(pos=kegel_pos, axis=vv, color=f, height=h, radius=r) # der Schaft x, y, z = [x1, x1+0.95*(x2-x1)], [y1, y1+0.95*(y2-y1)], [z1, z1+0.95*(z2-z1)] line = mlab.plot3d(x, y, z, line_width=d, color=f, tube_radius=tr) return cone, line # --------------- # Pfeil für vispy # --------------- def _arrow_vispy(view, x1, y1, z1, x2, y2, z2, # Pfeil von P1 nach P2 cols=32, rows=32, radius=0.1, cone_radius=0.15, cone_length=0.2, color=(0, 0, 0, 1)): x1, y1, z1 = float(x1), float(y1), float(z1) x2, y2, z2 = float(x2), float(y2), float(z2) p1, p2 = np.array([x1, y1, z1]), np.array([x2, y2, z2]) vv = p2 - p1 ll = np.sqrt(np.dot(vv, vv)) vv = vv / ll vz1 = np.array([0.0, 0.0, 1.0]) vz2 = np.array([0.0, 0.0, -1.0]) if np.sqrt(np.dot(vv-vz1, vv-vz1)) < 1e-6: alpha = 0.0 # Winkel mit der xy-Ebene dd = np.array([1.0, 0.0, 0.0]) # Drehachse elif np.sqrt(np.dot(vv-vz2, vv-vz2)) < 1e-6: alpha = np.pi dd = np.array([1.0, 0.0, 0.0]) else: alpha = np.arccos(vv[2]) dd = np.cross(vv, np.array([0.0, 0.0, 1.0])) arrow = create_arrow(cols, rows, radius=radius, length=ll, # entlang der positiven z-Achse cone_length=cone_length, cone_radius=cone_radius) mesh = scene.visuals.Mesh(meshdata=arrow, color=color) rot = AffineTransform() mat = _np_rot_matrix(alpha, dd) bild = np.dot(mat, [0, 0, 1]) if np.sqrt(np.dot(bild-vv, bild-vv)) < 1e-3: # Feststellen der Drehrichtung rot.rotate(alpha / np.pi * 180., dd) else: rot.rotate(-alpha / np.pi * 180., dd) trans = STTransform(translate=(x1, y1, z1)) mesh.transform = ChainTransform([trans, rot]) return mesh if UMG.grafik_3d == 'mayavi': _arrow = _arrow_mayavi else: _arrow = _arrow_vispy # -------------------- # Pfeil für matplotlib # -------------------- def _arrow2(x1, y1, x2, y2, # Pfeil von P1 nach P2 linewidth=0.5, color=(0, 0, 0), head_width=0.18, head_length=0.5): Vektor = importlib.import_module('agla.lib.objekte.vektor').Vektor _mass = UMG._mass() x1, y1 = float(x1), float(y1) x2, y2 = float(x2), float(y2) p1, p2 = Vektor(x1, y1), Vektor(x2, y2) vv = Vektor(p1, p2) ev = vv.einh_vekt if vv.betrag < _mass: head_length = 0.5 * vv.betrag if linewidth > 2.0: head_width = 0.2 head_width *= _mass head_length *= _mass pp = p2 - head_length * ev px, py = float(pp.x), float(pp.y) ex, ey = float(ev.x), float(ev.y) line = plt.Line2D([x1, px], [y1, py], color=color, linewidth=linewidth) head = patches.FancyArrow(px, py, 0.0001*ex, 0.0001*ey, facecolor=color, edgecolor=color, head_width=head_width, head_length=head_length, alpha=0.6) return line, head # ============================= # Implizites plotten mit Mayavi # ============================= # aus dem Internet angepaßt: # # http://indranilsinharoy.com/2014/03/02/plotting-algebraic-surfaces-using-mayavi/ def _implicit_plot_mayavi(expr, ext_grid, fig_handle=None, Nx=100, Ny=100, Nz=100, col_isurf=(50/255, 199/255, 152/255), col_osurf=(240/255,36/255,87/255), opa_val=0.8, opaque=True, ori_axis=False, **kwargs): """Funktion zum impliziten Plotten in Mayavi""" from numpy import (pi, sqrt, sin, cos, tan, exp, log, sinh, cosh, tanh, arcsin, arccos, arctan, arcsinh, arccosh, arctanh) if fig_handle==None: fig = mlab.figure(1, bgcolor=(0.97, 0.97, 0.97), fgcolor=(0, 0, 0), \ size=(800, 800)) else: fig = fig_handle xl, xr, yl, yr, zl, zr = ext_grid x, y, z = np.mgrid[xl:xr:eval('{}j'.format(Nx)), yl:yr:eval('{}j'.format(Ny)), zl:zr:eval('{}j'.format(Nz))] scalars = eval(expr) src = mlab.pipeline.scalar_field(x, y, z, scalars) if opaque: delta = 1.e-5 opa_val=1.0 else: delta = 0.0 cont1 = mlab.pipeline.iso_surface(src, color=col_isurf, contours=[0-delta], transparent=False, opacity=opa_val) cont1.compute_normals = False if opaque: cont2 = mlab.pipeline.iso_surface(src, color=col_osurf, contours=[0+delta], transparent=False, opacity=opa_val) cont2.compute_normals = False cont1.actor.property.backface_culling = True cont2.actor.property.frontface_culling = True cont2.actor.property.specular = 0.2 #0.4 #0.8 cont2.actor.property.specular_power = 55.0 #15.0 else: cont1.actor.property.specular = 0.2 #0.4 #0.8 cont1.actor.property.specular_power = 55.0 #15. return True if UMG.grafik_3d == 'mayavi': _implicit_plot = _implicit_plot_mayavi else: _implicit_plot = None # ================================= # Zeichnen vieler Linien mit Mayavi # ================================= # # Anpassung des 'many_lines' - Beispiels aus der Mayavi-Dokumentation # def _many_lines_mayavi(f, uw, N, n, # Fläche, u- oder w-Linien, Anzahl color=(0.7, 0.7, 0.7), # Punkte je Linie, Linienanzahl line_width=1, opacity=0.5): uber, wber = f.ber uu, uo = float(uber[0]), float(uber[1]) wu, wo = float(wber[0]), float(wber[1]) lu, lw = uo-uu, wo-wu u, w = symbols('u w') p = f.pkt(u, w) i = Symbol('i') if uw == Symbol('w'): ers = wu + i/n*lw sx = str(p.x.subs(Symbol('w'), ers)) sy = str(p.y.subs(Symbol('w'), ers)) sz = str(p.z.subs(Symbol('w'), ers)) else: ers = uu + i/n*lu sx = str(p.x.subs(Symbol('u'), ers)) sy = str(p.y.subs(Symbol('u'), ers)) sz = str(p.z.subs(Symbol('u'), ers)) u = np.linspace(uu, uo, N) w = np.linspace(wu, wo, N) x = list() y = list() z = list() s = list() connections = list() index = 0 abs=np.abs; pi=np.pi; sqrt=np.sqrt; exp=np.exp; log=np.log ln=np.log; sin=np.sin; sinh=np.sinh; Abs=np.abs arcsin=np.arcsin; arsinh=np.arcsinh; cos=np.cos; cosh=np.cosh arccos=np.arccos; arcosh=np.arccosh; tan=np.tan; tanh=np.tanh arctan=np.arctan; artanh=np.arctanh asin=np.arcsin; acos=np.arccos; atan=np.arctan asinh=np.arcsinh; acosh=np.arccosh; atanh=np.arctanh for i in range(n+1): if uw == Symbol('w'): if sx.find('u') >= 0: x.append(eval(sx.replace('Abs', 'np.abs'))) else: sx = str(float(p.x.subs(Symbol('w'), wu + i/n*lw))) x.append([eval(sx) for uu in u]) if sy.find('u') >= 0: y.append(eval(sy.replace('Abs', 'np.abs'))) else: sy = str(float(p.y.subs(Symbol('w'), wu + i/n*lw))) y.append([eval(sy) for uu in u]) if sz.find('u') >= 0: z.append(eval(sz.replace('Abs', 'np.abs'))) else: sz = str(float(p.z.subs(Symbol('w'), wu + i/n*lw))) z.append([eval(sz) for uu in u]) else: if sx.find('w') >= 0: x.append(eval(sx)) else: sx = str(float(p.x.subs(Symbol('u'), uu + i/n*lu))) x.append([eval(sx) for ww in w]) if sy.find('w') >= 0: y.append(eval(sy)) else: sy = str(float(p.y.subs(Symbol('u'), uu + i/n*lu))) y.append([eval(sy) for ww in w]) if sz.find('w') >= 0: z.append(eval(sz)) else: sz = str(float(p.z.subs(Symbol('u'), uu + i/n*lu))) z.append([eval(sz) for ww in w]) s.append(u) connections.append(np.vstack( [np.arange(index, index + N - 1.5), np.arange(index + 1, index + N - .5)] ).T) index += N x = np.hstack(x) y = np.hstack(y) z = np.hstack(z) s = np.hstack(s) connections = np.vstack(connections) src = mlab.pipeline.scalar_scatter(x, y, z, s) src.mlab_source.dataset.lines = connections src.update() lines = mlab.pipeline.stripper(src) mlab.pipeline.surface(lines, color=color, line_width=line_width, opacity=opacity) # ========================== # Funktionen für 2D - Grafik # ========================== # ---------------------- # Grafik mit matplotlib # ---------------------- def _Grafik_mit_matplotlib(*args, **kwargs): """Funktion zum Erzeugen von 2D-Grafiken mit matplotlib""" Vektor = importlib.import_module('agla.lib.objekte.vektor').Vektor plt.close('all') mlab.close(all=True) achsen = True if kwargs.get('achsen') is None else kwargs.get('achsen') gitter = False if kwargs.get('gitter') is None else kwargs.get('gitter') skalen = True if kwargs.get('skalen') is None else kwargs.get('skalen') x_skala = True if kwargs.get('x_skala') is None else kwargs.get('x_skala') y_skala = True if kwargs.get('y_skala') is None else kwargs.get('y_skala') groesse = kwargs.get('groesse') text = kwargs.get('text') bez = kwargs.get('bez') # string-Angaben in Eingabe sichten i = 1 for arg in args: if isinstance(arg, str): if arg.find('=') > 0: exec(arg) continue else: print("agla: %s. Eintrag: '=' ist nicht angegeben" % i) return if not skalen: x_skala = False y_skala = False if isinstance(achsen, bool): x_bez, y_bez = 'x', 'y' else: a = achsen if not isinstance(a, (tuple, Tuple, list)) or len(achsen) != 2: print('agla: Liste/Tupel mit zwei Bezeichnern für die Achsen angeben') return x_bez = str(a[0]) if isinstance(a[0], Symbol) else a[0] y_bez = str(a[1]) if isinstance(a[1], Symbol) else a[1] if not (isinstance(x_bez, str) and isinstance(y_bez, str)): print('agla: die Bezeichner als Symbole oder Zeichenketten angeben') return if x_skala: if not achsen and not gitter: x_skala = False if y_skala: if not achsen and not gitter: y_skala = False if groesse: if not iterable(groesse) and len(groesse) == 2: print('agla: Größenangaben sind als Tupel/Liste mit 2 Elementen zu schreiben') return typ = int, Integer, float, Float, Rational if not all([isinstance(el, typ) and isinstance(el, typ) and el > 0 for el in groesse]): print('agla: Größenangaben müssen die Form (breite, höhe) mit positiven Werten haben') return if bez: text = bez if text: meld = 'agla: Textangaben sind als Tupel/Liste mit Elementen der \nLänge 3 oder 4 zu schreiben' if not iterable(text): print(meld) return if not all([iterable(el) and len(el) in (2, 3) for el in text]): print(meld) return if not all([isinstance(el[0], Vektor) and isinstance(el[1], str) for el in text]): print('agla: Textelemente müssen die Form (Vektor/Punkt, \'text\'
<filename>demisto_sdk/tests/integration_tests/update_release_notes_integration_test.py<gh_stars>10-100 import os from os.path import join import pytest from click.testing import CliRunner import conftest # noqa: F401 from demisto_sdk.__main__ import main from demisto_sdk.commands.common.git_util import GitUtil from demisto_sdk.commands.common.legacy_git_tools import git_path from demisto_sdk.commands.update_release_notes.update_rn import UpdateRN from demisto_sdk.commands.update_release_notes.update_rn_manager import \ UpdateReleaseNotesManager from demisto_sdk.commands.validate.validate_manager import ValidateManager from TestSuite.test_tools import ChangeCWD UPDATE_RN_COMMAND = "update-release-notes" DEMISTO_SDK_PATH = join(git_path(), "demisto_sdk") TEST_FILES_PATH = join(git_path(), 'demisto_sdk', 'tests') AZURE_FEED_PACK_PATH = join(TEST_FILES_PATH, 'test_files', 'content_repo_example', 'Packs', 'FeedAzureValid') RN_FOLDER = join(git_path(), 'Packs', 'FeedAzureValid', 'ReleaseNotes') VMWARE_PACK_PATH = join(TEST_FILES_PATH, 'test_files', 'content_repo_example', 'Packs', 'VMware') VMWARE_RN_PACK_PATH = join(git_path(), 'Packs', 'VMware', 'ReleaseNotes') THINKCANARY_RN_FOLDER = join(git_path(), 'Packs', 'ThinkCanary', 'ReleaseNotes') @pytest.fixture def demisto_client(mocker): mocker.patch( "demisto_sdk.commands.download.downloader.demisto_client", return_valure="object" ) def test_update_release_notes_new_integration(demisto_client, mocker): """ Given - Azure feed pack path. When - Running demisto-sdk update-release-notes command. Then - Ensure release notes file created with no errors - Ensure message is printed when update release notes process finished. - Ensure the release motes content is valid and as expected. """ expected_rn = '\n' + '#### Integrations\n' + \ '##### New: Azure Feed\n' + \ '- Azure.CloudIPs Feed Integration. (Available from Cortex XSOAR 5.5.0).\n' added_files = {join(AZURE_FEED_PACK_PATH, 'Integrations', 'FeedAzureValid', 'FeedAzureValid.yml')} rn_path = join(RN_FOLDER, '1_0_1.md') runner = CliRunner(mix_stderr=True) mocker.patch('demisto_sdk.commands.update_release_notes.update_rn_manager.get_pack_name', return_value='FeedAzureValid') mocker.patch('demisto_sdk.commands.common.tools.get_pack_name', return_value='FeedAzureValid') mocker.patch.object(UpdateRN, 'is_bump_required', return_value=True) mocker.patch.object(ValidateManager, 'setup_git_params', return_value='') mocker.patch.object(ValidateManager, 'get_unfiltered_changed_files_from_git', return_value=(set(), added_files, set())) mocker.patch.object(GitUtil, 'get_current_working_branch', return_value="branch_name") mocker.patch.object(UpdateRN, 'get_pack_metadata', return_value={'currentVersion': '1.0.0'}) mocker.patch.object(UpdateRN, 'get_master_version', return_value='1.0.0') if os.path.exists(rn_path): os.remove(rn_path) result = runner.invoke(main, [UPDATE_RN_COMMAND, "-i", join('Packs', 'FeedAzureValid')]) assert result.exit_code == 0 assert os.path.isfile(rn_path) assert not result.exception assert 'Changes were detected. Bumping FeedAzureValid to version: 1.0.1' in result.stdout assert 'Finished updating release notes for FeedAzureValid.' in result.stdout with open(rn_path, 'r') as f: rn = f.read() assert expected_rn == rn def test_update_release_notes_modified_integration(demisto_client, mocker): """ Given - Azure feed pack path. When - Running demisto-sdk update-release-notes command. Then - Ensure release notes file created with no errors - Ensure message is printed when update release notes process finished. - Ensure the release motes content is valid and as expected. """ expected_rn = '\n' + '#### Integrations\n' + \ '##### Azure Feed\n' + \ '- %%UPDATE_RN%%\n' modified_files = {join(AZURE_FEED_PACK_PATH, 'Integrations', 'FeedAzureValid', 'FeedAzureValid.yml')} rn_path = join(RN_FOLDER, '1_0_1.md') runner = CliRunner(mix_stderr=False) mocker.patch('demisto_sdk.commands.common.tools.get_pack_name', return_value='FeedAzureValid') mocker.patch.object(UpdateRN, 'is_bump_required', return_value=True) mocker.patch.object(ValidateManager, 'setup_git_params', return_value='') mocker.patch.object(ValidateManager, 'get_unfiltered_changed_files_from_git', return_value=(modified_files, set(), set())) mocker.patch.object(GitUtil, 'get_current_working_branch', return_value="branch_name") mocker.patch.object(UpdateRN, 'get_pack_metadata', return_value={'currentVersion': '1.0.0'}) mocker.patch.object(UpdateRN, 'get_master_version', return_value='1.0.0') if os.path.exists(rn_path): os.remove(rn_path) result = runner.invoke(main, [UPDATE_RN_COMMAND, "-i", join('Packs', 'FeedAzureValid')]) assert result.exit_code == 0 assert os.path.isfile(rn_path) assert not result.exception assert 'Changes were detected. Bumping FeedAzureValid to version: 1.0.1' in result.stdout assert 'Finished updating release notes for FeedAzureValid.' in result.stdout with open(rn_path, 'r') as f: rn = f.read() assert expected_rn == rn def test_update_release_notes_incident_field(demisto_client, mocker): """ Given - Azure feed pack path. When - Running demisto-sdk update-release-notes command. Then - Ensure release notes file created with no errors - Ensure message is printed when update release notes process finished. - Ensure the release motes content is valid and as expected. """ expected_rn = '\n' + '#### Incident Fields\n' + \ '- **City**\n' runner = CliRunner(mix_stderr=False) modified_files = {join(AZURE_FEED_PACK_PATH, 'IncidentFields', 'incidentfield-city.json')} rn_path = join(RN_FOLDER, '1_0_1.md') mocker.patch.object(UpdateRN, 'is_bump_required', return_value=True) mocker.patch.object(ValidateManager, 'setup_git_params', return_value='') mocker.patch.object(ValidateManager, 'get_unfiltered_changed_files_from_git', return_value=(modified_files, set(), set())) mocker.patch.object(GitUtil, 'get_current_working_branch', return_value="branch_name") mocker.patch.object(UpdateRN, 'get_pack_metadata', return_value={'currentVersion': '1.0.0'}) mocker.patch('demisto_sdk.commands.common.tools.get_pack_name', return_value='FeedAzureValid') mocker.patch.object(UpdateRN, 'get_master_version', return_value='1.0.0') if os.path.exists(rn_path): os.remove(rn_path) result = runner.invoke(main, [UPDATE_RN_COMMAND, "-i", join('Packs', 'FeedAzureValid')]) assert result.exit_code == 0 assert os.path.isfile(rn_path) assert not result.exception assert 'Changes were detected. Bumping FeedAzureValid to version: 1.0.1' in result.stdout assert 'Finished updating release notes for FeedAzureValid.' in result.stdout with open(rn_path, 'r') as f: rn = f.read() assert expected_rn == rn def test_update_release_notes_unified_yml_integration(demisto_client, mocker): """ Given - VMware pack path. When - Running demisto-sdk update-release-notes command. Then - Ensure release notes file created with no errors - Ensure message is printed when update release notes process finished. - Ensure the release motes content is valid and as expected. """ expected_rn = '\n' + '#### Integrations\n' + \ '##### VMware\n' + \ '- %%UPDATE_RN%%\n' runner = CliRunner(mix_stderr=False) old_files = {join(VMWARE_PACK_PATH, 'Integrations', 'integration-VMware.yml')} rn_path = join(VMWARE_RN_PACK_PATH, '1_0_1.md') mocker.patch.object(UpdateRN, 'is_bump_required', return_value=True) mocker.patch.object(ValidateManager, 'setup_git_params', return_value='') mocker.patch.object(GitUtil, 'get_current_working_branch', return_value="branch_name") mocker.patch.object(ValidateManager, 'get_unfiltered_changed_files_from_git', return_value=(set(), old_files, set())) mocker.patch.object(UpdateRN, 'get_pack_metadata', return_value={'currentVersion': '1.0.0'}) mocker.patch('demisto_sdk.commands.common.tools.get_pack_name', return_value='VMware') mocker.patch.object(UpdateRN, 'get_master_version', return_value='1.0.0') if os.path.exists(rn_path): os.remove(rn_path) result = runner.invoke(main, [UPDATE_RN_COMMAND, "-i", join('Packs', 'VMware')]) assert result.exit_code == 0 assert not result.exception assert 'Changes were detected. Bumping VMware to version: 1.0.1' in result.stdout assert 'Finished updating release notes for VMware.' in result.stdout assert os.path.isfile(rn_path) with open(rn_path, 'r') as f: rn = f.read() assert expected_rn == rn def test_update_release_notes_non_content_path(demisto_client, mocker): """ Given - non content pack path. When - Running demisto-sdk update-release-notes command. Then - Ensure an error is raised """ runner = CliRunner(mix_stderr=False) mocker.patch.object(ValidateManager, 'setup_git_params', return_value='') mocker.patch.object(GitUtil, 'get_current_working_branch', return_value="branch_name") mocker.patch.object(ValidateManager, 'get_unfiltered_changed_files_from_git', side_effect=FileNotFoundError) mocker.patch.object(UpdateRN, 'get_pack_metadata', return_value={'currentVersion': '1.0.0'}) mocker.patch('demisto_sdk.commands.common.tools.get_pack_name', return_value='VMware') mocker.patch.object(UpdateRN, 'get_master_version', return_value='1.0.0') result = runner.invoke(main, [UPDATE_RN_COMMAND, "-i", join('Users', 'MyPacks', 'VMware')]) assert result.exit_code == 1 assert result.exception assert "You are not running" in result.stdout # check error str is in stdout def test_update_release_notes_existing(demisto_client, mocker): """ Given - Azure feed pack path. When - Running demisto-sdk update-release-notes command. Then - Ensure release notes file updated with no errors - Ensure message is printed when update release notes process finished. - Ensure the release motes content is valid and as expected. """ expected_rn = '\n' + '#### Integrations\n' + \ '##### New: Azure Feed\n' + \ '- Azure.CloudIPs Feed Integration.\n' + \ '\n' + '#### Incident Fields\n' + \ '- **City**' input_rn = '\n' + '#### Integrations\n' + \ '##### New: Azure Feed\n' + \ '- Azure.CloudIPs Feed Integration.\n' rn_path = join(RN_FOLDER, '1_0_0.md') modified_files = {join(AZURE_FEED_PACK_PATH, 'IncidentFields', 'incidentfield-city.json')} with open(rn_path, 'w') as file_: file_.write(input_rn) runner = CliRunner(mix_stderr=False) mocker.patch.object(UpdateRN, 'is_bump_required', return_value=False) mocker.patch.object(ValidateManager, 'setup_git_params', return_value='') mocker.patch.object(GitUtil, 'get_current_working_branch', return_value="branch_name") mocker.patch.object(ValidateManager, 'get_unfiltered_changed_files_from_git', return_value=(modified_files, set(), set())) mocker.patch.object(UpdateRN, 'get_pack_metadata', return_value={'currentVersion': '1.0.0'}) mocker.patch.object(UpdateRN, 'get_master_version', return_value='1.0.0') mocker.patch('demisto_sdk.commands.common.tools.get_pack_name', return_value='FeedAzureValid') result = runner.invoke(main, [UPDATE_RN_COMMAND, "-i", join('Packs', 'FeedAzureValid')]) assert result.exit_code == 0 assert os.path.exists(rn_path) assert not result.exception assert 'Finished updating release notes for FeedAzureValid.' in result.stdout with open(rn_path, 'r') as f: rn = f.read() os.remove(rn_path) assert expected_rn == rn def test_update_release_notes_modified_apimodule(demisto_client, repo, mocker): """ Given - ApiModules_script.yml which is part of APIModules pack was changed. - FeedTAXII pack path exists and uses ApiModules_script - id_set.json indicates FeedTAXII uses APIModules When - Running demisto-sdk update-release-notes command. Then - Ensure release notes file created with no errors for APIModule and related pack FeedTAXII: - Ensure message is printed when update release notes process finished. """ repo.setup_one_pack("ApiModules") api_module_pack = repo.packs[0] api_module_script_path = join(api_module_pack.path, "Scripts/ApiModules_script/ApiModules_script.yml") repo.setup_one_pack("FeedTAXII") taxii_feed_pack = repo.packs[1] taxii_feed_integration_path = join(taxii_feed_pack.path, "Integrations/FeedTAXII_integration/FeedTAXII_integration.yml") repo.id_set.update({ "scripts": [ { "ApiModules_script": { "name": "ApiModules_script", "file_path": api_module_script_path, "pack": "ApiModules" } } ], "integrations": [ { "FeedTAXII_integration": { "name": "FeedTAXII_integration", "file_path": taxii_feed_integration_path, "pack": "FeedTAXII", "api_modules": "ApiModules_script" } } ] }) modified_files = {api_module_script_path} runner = CliRunner(mix_stderr=False) mocker.patch.object(UpdateRN, 'is_bump_required', return_value=True) mocker.patch.object(ValidateManager, 'setup_git_params', return_value='') mocker.patch.object(ValidateManager, 'get_unfiltered_changed_files_from_git', return_value=(modified_files, set(), set())) mocker.patch.object(GitUtil, 'get_current_working_branch', return_value="branch_name") mocker.patch.object(UpdateRN, 'get_pack_metadata', return_value={'currentVersion': '1.0.0'}) mocker.patch('demisto_sdk.commands.common.tools.get_pack_name', return_value='ApiModules') mocker.patch.object(UpdateRN, 'get_master_version', return_value='1.0.0') result = runner.invoke(main, [UPDATE_RN_COMMAND, "-i", join('Packs', 'ApiModules'), "-idp", repo.id_set.path]) assert result.exit_code == 0 assert not result.exception assert 'Release notes are not required for the ApiModules pack since this pack is not versioned.' in result.stdout assert 'Changes were detected. Bumping FeedTAXII to version: 1.0.1' in result.stdout def test_update_release_on_matadata_change(demisto_client, mocker, repo): """ Given - change only in metadata When - Running demisto-sdk update-release-notes command. Then - Ensure not find changes which would belong in release notes . """ pack = repo.create_pack('FeedAzureValid') pack.pack_metadata.write_json(open('demisto_sdk/tests/test_files/1.pack_metadata.json').read()) validate_manager = ValidateManager(skip_pack_rn_validation=True, silence_init_prints=True, skip_conf_json=True, check_is_unskipped=False) validate_manager.git_util = "Not None" mocker.patch.object(UpdateRN, 'is_bump_required', return_value=True) mocker.patch.object(ValidateManager, 'get_unfiltered_changed_files_from_git', return_value=({pack.pack_metadata.path}, set(), set())) mocker.patch.object(UpdateReleaseNotesManager, 'setup_validate_manager', return_value=validate_manager) mocker.patch.object(ValidateManager, 'setup_git_params', return_value='') mocker.patch.object(GitUtil, 'get_current_working_branch', return_value="branch_name") mocker.patch.object(UpdateRN, 'get_pack_metadata', return_value={'currentVersion': '1.0.0'}) mocker.patch('demisto_sdk.commands.common.tools.get_pack_name', return_value='FeedAzureValid') mocker.patch('demisto_sdk.commands.common.tools.get_pack_names_from_files', return_value={'FeedAzureValid'}) with ChangeCWD(repo.path): runner = CliRunner(mix_stderr=False) result = runner.invoke(main, [UPDATE_RN_COMMAND, "-g"]) assert result.exit_code == 0 assert 'No changes that require release notes were detected. If such changes were made, ' \ 'please commit the changes and rerun the command' in result.stdout def test_update_release_notes_master_ahead_of_current(demisto_client, mocker, repo): """ Given - Azure feed pack path. When - Running demisto-sdk update-release-notes command. Then - Ensure release notes file created with no errors - Ensure the new version is taken from master and not from local metadata file. """ modified_files = {join(AZURE_FEED_PACK_PATH, 'IncidentFields', 'incidentfield-city.json')} mocker.patch.object(UpdateRN, 'is_bump_required', return_value=True) mocker.patch.object(ValidateManager, 'setup_git_params', return_value='') mocker.patch.object(UpdateReleaseNotesManager, 'get_git_changed_files', return_value=(modified_files, {'1_1_0.md'}, set())) mocker.patch.object(UpdateRN, 'get_pack_metadata', return_value={'currentVersion': '1.0.0'})
myParameters["lExcludeTotalsFromCSV"] = lExcludeTotalsFromCSV myParameters["lIncludeFutureBalances_SG2020"] = lIncludeFutureBalances_SG2020 myParameters["lDontRoundPrice"] = lRoundPrice myParameters["lStripASCII"] = lStripASCII myParameters["csvDelimiter"] = csvDelimiter myParameters["_column_widths_SG2020"] = _column_widths_SG2020 myParameters["lWriteBOMToExportFile_SWSS"] = lWriteBOMToExportFile_SWSS if not lDisplayOnly and scriptpath != "" and os.path.isdir(scriptpath): myParameters["scriptpath"] = scriptpath myPrint("DB","variables dumped from memory back into myParameters{}.....") return get_StuWareSoftSystems_parameters_from_file() myPrint("DB", "DEBUG IS ON..") # END ALL CODE COPY HERE ############################################################################################### moneydance_ui.firstMainFrame.setStatus(">> StuWareSoftSystems - %s launching......." %(myScriptName),0) # Create fake JFrame() so that all popups have correct Moneydance Icons etc StockGlance2020_fake_frame_ = JFrame() if (not Platform.isMac()): moneydance_ui.getImages() StockGlance2020_fake_frame_.setIconImage(MDImages.getImage(moneydance_ui.getMain().getSourceInformation().getIconResource())) StockGlance2020_fake_frame_.setUndecorated(True) StockGlance2020_fake_frame_.setVisible(False) StockGlance2020_fake_frame_.setDefaultCloseOperation(WindowConstants.DO_NOTHING_ON_CLOSE) class CloseAboutAction(AbstractAction): # noinspection PyMethodMayBeStatic # noinspection PyUnusedLocal def __init__(self, theFrame): self.theFrame = theFrame def actionPerformed(self, event): global debug myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()", "Event:", event) self.theFrame.dispose() def about_this_script(): global debug, scriptExit myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()") # noinspection PyUnresolvedReferences about_d = JDialog(StockGlance2020_frame_, "About", Dialog.ModalityType.MODELESS) shortcut = Toolkit.getDefaultToolkit().getMenuShortcutKeyMaskEx() about_d.getRootPane().getInputMap(JComponent.WHEN_ANCESTOR_OF_FOCUSED_COMPONENT).put(KeyStroke.getKeyStroke(KeyEvent.VK_W, shortcut), "close-window") about_d.getRootPane().getInputMap(JComponent.WHEN_ANCESTOR_OF_FOCUSED_COMPONENT).put(KeyStroke.getKeyStroke(KeyEvent.VK_F4, shortcut), "close-window") about_d.getRootPane().getInputMap(JComponent.WHEN_IN_FOCUSED_WINDOW).put(KeyStroke.getKeyStroke(KeyEvent.VK_ESCAPE, 0), "close-window") about_d.getRootPane().getActionMap().put("close-window", CloseAboutAction(about_d)) about_d.setDefaultCloseOperation(WindowConstants.DISPOSE_ON_CLOSE) # The CloseAction() and WindowListener() will handle dispose() - else change back to DISPOSE_ON_CLOSE if (not Platform.isMac()): # moneydance_ui.getImages() about_d.setIconImage(MDImages.getImage(moneydance_ui.getMain().getSourceInformation().getIconResource())) aboutPanel=JPanel() aboutPanel.setLayout(FlowLayout(FlowLayout.LEFT)) aboutPanel.setPreferredSize(Dimension(1070, 400)) _label1 = JLabel(pad("Author: <NAME>", 800)) _label1.setForeground(Color.BLUE) aboutPanel.add(_label1) _label2 = JLabel(pad("StuWareSoftSystems (2020)", 800)) _label2.setForeground(Color.BLUE) aboutPanel.add(_label2) displayString=scriptExit displayJText = JTextArea(displayString) displayJText.setFont( getMonoFont() ) displayJText.setEditable(False) # displayJText.setCaretPosition(0) displayJText.setLineWrap(False) displayJText.setWrapStyleWord(False) displayJText.setMargin(Insets(8, 8, 8, 8)) # displayJText.setBackground((mdGUI.getColors()).defaultBackground) # displayJText.setForeground((mdGUI.getColors()).defaultTextForeground) aboutPanel.add(displayJText) about_d.add(aboutPanel) about_d.pack() about_d.setLocationRelativeTo(None) about_d.setVisible(True) return class DoTheMenu(AbstractAction): def __init__(self, menu, callingClass=None): self.menu = menu self.callingClass = callingClass def actionPerformed(self, event): # noqa global StockGlance2020_frame_, debug myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()", "Event: ", event ) if event.getActionCommand() == "About": about_this_script() myPrint("D", "Exiting ", inspect.currentframe().f_code.co_name, "()") return def terminate_script(): global debug, StockGlance2020_frame_, i_am_an_extension_so_run_headless, scriptExit, csvfilename, lDisplayOnly, lGlobalErrorDetected myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()") try: save_StuWareSoftSystems_parameters_to_file() except: myPrint("B", "Error - failed to save parameters to pickle file...!") dump_sys_error_to_md_console_and_errorlog() if not lDisplayOnly and not lGlobalErrorDetected: try: helper = moneydance.getPlatformHelper() helper.openDirectory(File(csvfilename)) except: dump_sys_error_to_md_console_and_errorlog() moneydance_ui.firstMainFrame.setStatus(">> StuWareSoftSystems - thanks for using >> %s......." %(myScriptName),0) if not i_am_an_extension_so_run_headless: print(scriptExit) StockGlance2020_frame_.dispose() return csvfilename = None if decimalCharSep != "." and csvDelimiter == ",": csvDelimiter = ";" # Override for EU countries or where decimal point is actually a comma... myPrint("DB", "Decimal point:", decimalCharSep, "Grouping Separator", groupingCharSep, "CSV Delimiter set to:", csvDelimiter) # Stores the data table for export rawDataTable = None rawrawFooterTable = None sdf = SimpleDateFormat("dd/MM/yyyy") label1 = JLabel("Hide Hidden Securities?") user_hideHiddenSecurities = JCheckBox("", hideHiddenSecurities) label2 = JLabel("Hide Inactive Accounts?") user_hideInactiveAccounts = JCheckBox("", hideInactiveAccounts) label3 = JLabel("Hide Hidden Accounts?") user_hideHiddenAccounts = JCheckBox("", hideHiddenAccounts) label4 = JLabel("Filter for Currency containing text '...' or ALL:") user_selectCurrency = JTextField(5) user_selectCurrency.setDocument(JTextFieldLimitYN(5, True, "CURR")) if lAllCurrency: user_selectCurrency.setText("ALL") else: user_selectCurrency.setText(filterForCurrency) label5 = JLabel("Filter for Security/Ticker containing text '...' or ALL:") user_selectTicker = JTextField(12) user_selectTicker.setDocument(JTextFieldLimitYN(12, True, "CURR")) if lAllSecurity: user_selectTicker.setText("ALL") else: user_selectTicker.setText(filterForSecurity) label6 = JLabel("Filter for Accounts containing text '...' (or ALL):") user_selectAccounts = JTextField(12) user_selectAccounts.setDocument(JTextFieldLimitYN(20, True, "CURR")) if lAllAccounts: user_selectAccounts.setText("ALL") else: user_selectAccounts.setText(filterForAccounts) label7 = JLabel("Include Cash Balances for each account?") user_selectCashBalances = JCheckBox("", lIncludeCashBalances) label7b = JLabel("Split Security Qtys by Account?") user_splitSecurities = JCheckBox("", lSplitSecuritiesByAccount) labelFutureBalances = JLabel("Include Future Balances (rather than current)?") user_includeFutureBalances = JCheckBox("", lIncludeFutureBalances_SG2020) label7c = JLabel("Exclude Totals from CSV extract (helps pivots)?") user_excludeTotalsFromCSV = JCheckBox("", lExcludeTotalsFromCSV) label7d = JLabel("Round calculated price using security dpc setting (N=No Rounding)?") user_roundPrice = JCheckBox("", lRoundPrice) labelRC = JLabel("Reset Column Widths to Defaults?") user_selectResetColumns = JCheckBox("", False) label8 = JLabel("Strip non ASCII characters from CSV export?") user_selectStripASCII = JCheckBox("", lStripASCII) delimStrings = [";","|",","] label9 = JLabel("Change CSV Export Delimiter from default to: ';|,'") user_selectDELIMITER = JComboBox(delimStrings) user_selectDELIMITER.setSelectedItem(csvDelimiter) labelBOM = JLabel("Write BOM (Byte Order Mark) to file (helps Excel open files)?") user_selectBOM = JCheckBox("", lWriteBOMToExportFile_SWSS) label10 = JLabel("Turn DEBUG Verbose messages on?") user_selectDEBUG = JCheckBox("", debug) userFilters = JPanel(GridLayout(0, 2)) userFilters.add(label1) userFilters.add(user_hideHiddenSecurities) userFilters.add(label2) userFilters.add(user_hideInactiveAccounts) userFilters.add(label3) userFilters.add(user_hideHiddenAccounts) userFilters.add(label4) userFilters.add(user_selectCurrency) userFilters.add(label5) userFilters.add(user_selectTicker) userFilters.add(label6) userFilters.add(user_selectAccounts) userFilters.add(label7) userFilters.add(user_selectCashBalances) userFilters.add(label7b) userFilters.add(user_splitSecurities) userFilters.add(labelFutureBalances) userFilters.add(user_includeFutureBalances) userFilters.add(label7c) userFilters.add(user_excludeTotalsFromCSV) userFilters.add(label7d) userFilters.add(user_roundPrice) userFilters.add(labelRC) userFilters.add(user_selectResetColumns) userFilters.add(label8) userFilters.add(user_selectStripASCII) userFilters.add(label9) userFilters.add(user_selectDELIMITER) userFilters.add(labelBOM) userFilters.add(user_selectBOM) userFilters.add(label10) userFilters.add(user_selectDEBUG) lExit = False lDisplayOnly = False options = ["Abort", "Display & CSV Export", "Display Only"] userAction = (JOptionPane.showOptionDialog(StockGlance2020_fake_frame_, userFilters, "%s(build: %s) Set Script Parameters...." %(myScriptName,version_build), JOptionPane.OK_CANCEL_OPTION, JOptionPane.QUESTION_MESSAGE, moneydance_ui.getIcon("/com/moneydance/apps/md/view/gui/glyphs/appicon_64.png"), options, options[2])) if userAction == 1: # Display & Export myPrint("DB", "Display and export chosen") lDisplayOnly = False elif userAction == 2: # Display Only lDisplayOnly = True myPrint("DB", "Display only with no export chosen") else: # Abort myPrint("DB", "User Cancelled Parameter selection.. Will abort..") myPopupInformationBox(StockGlance2020_fake_frame_,"User Cancelled Parameter selection.. Will abort..","PARAMETERS") lDisplayOnly = False lExit = True if not lExit: if debug: myPrint("DB", "Parameters Captured", "Sec: ", user_hideHiddenSecurities.isSelected(), "InActAct:", user_hideInactiveAccounts.isSelected(), "HidAct:", user_hideHiddenAccounts.isSelected(), "Curr:", user_selectCurrency.getText(), "Ticker:", user_selectTicker.getText(), "Filter Accts:", user_selectAccounts.getText(), "Include Cash Balances:", user_selectCashBalances.isSelected(), "Split Securities:", user_splitSecurities.isSelected(), "Include Future Balances:", user_includeFutureBalances.isSelected(), "Exclude Totals from CSV:", user_excludeTotalsFromCSV.isSelected(), "Round Calc Price:", user_roundPrice.isSelected(), "Reset Columns:", user_selectResetColumns.isSelected(), "Strip ASCII:", user_selectStripASCII.isSelected(), "Write BOM to file:", user_selectBOM.isSelected(), "Verbose Debug Messages: ", user_selectDEBUG.isSelected(), "CSV File Delimiter:", user_selectDELIMITER.getSelectedItem()) # endif if user_selectResetColumns.isSelected(): myPrint("B","User asked to reset columns.... Resetting Now....") _column_widths_SG2020=[] # This will invalidate the hideHiddenSecurities = user_hideHiddenSecurities.isSelected() hideInactiveAccounts = user_hideInactiveAccounts.isSelected() hideHiddenAccounts = user_hideHiddenAccounts.isSelected() if user_selectCurrency.getText() == "ALL" or user_selectCurrency.getText().strip() == "": lAllCurrency = True filterForCurrency = "ALL" else: lAllCurrency = False filterForCurrency = user_selectCurrency.getText() if user_selectTicker.getText() == "ALL" or user_selectTicker.getText().strip() == "": lAllSecurity = True filterForSecurity = "ALL" else: lAllSecurity = False filterForSecurity = user_selectTicker.getText() if user_selectAccounts.getText() == "ALL" or user_selectAccounts.getText().strip() == "": lAllAccounts = True filterForAccounts = "ALL" else: lAllAccounts = False filterForAccounts = user_selectAccounts.getText() lIncludeCashBalances = user_selectCashBalances.isSelected() lSplitSecuritiesByAccount = user_splitSecurities.isSelected() lExcludeTotalsFromCSV = user_excludeTotalsFromCSV.isSelected() lIncludeFutureBalances_SG2020 = user_includeFutureBalances.isSelected() lRoundPrice = user_roundPrice.isSelected() lStripASCII = user_selectStripASCII.isSelected() csvDelimiter = user_selectDELIMITER.getSelectedItem() if csvDelimiter == "" or (not (csvDelimiter in ";|,")): myPrint("B", "Invalid Delimiter:", csvDelimiter, "selected. Overriding with:','") csvDelimiter = "," if decimalCharSep == csvDelimiter: myPrint("B", "WARNING: The CSV file delimiter:", csvDelimiter, "cannot be the same as your decimal point character:", decimalCharSep, " - Proceeding without file export!!") lDisplayOnly = True myPopupInformationBox(None, "ERROR - The CSV file delimiter: %s ""cannot be the same as your decimal point character: %s. " "Proceeding without file export (i.e. I will do nothing)!!" %(csvDelimiter, decimalCharSep), "INVALID FILE DELIMITER", theMessageType=JOptionPane.ERROR_MESSAGE) lWriteBOMToExportFile_SWSS = user_selectBOM.isSelected() debug = user_selectDEBUG.isSelected() myPrint("DB", "DEBUG turned on") myPrint("B", "User Parameters...") if hideHiddenSecurities: myPrint("B", "Hiding Hidden Securities...") else: myPrint("B", "Including Hidden Securities...") if hideInactiveAccounts: myPrint("B", "Hiding Inactive Accounts...") else: myPrint("B", "Including Inactive Accounts...") if hideHiddenAccounts: myPrint("B", "Hiding Hidden Accounts...") else: myPrint("B", "Including Hidden Accounts...") if lAllCurrency: myPrint("B", "Selecting ALL Currencies...") else: myPrint("B", "Filtering for Currency containing: ", filterForCurrency) if lAllSecurity: myPrint("B", "Selecting ALL Securities...") else: myPrint("B", "Filtering for Security/Ticker containing: ", filterForSecurity) if lAllAccounts: myPrint("B", "Selecting ALL Accounts...") else: myPrint("B", "Filtering for Accounts containing: ", filterForAccounts) if lIncludeCashBalances: myPrint("B", "Including Cash Balances - WARNING - this is per account!") else: myPrint("B", "Excluding Cash Balances") if lIncludeFutureBalances_SG2020: myPrint("B", "Including Future Balances...") else: myPrint("B", "Including Current Balances Only....") if lRoundPrice: myPrint("B", "Will round the calculated price to the security's decimal precision setting...") else: myPrint("B", "Will perform no rounding of calculated price...") if lSplitSecuritiesByAccount: myPrint("B", "Splitting Securities by account - WARNING, this will disable sorting....") else: myPrint("B", "No Splitting Securities by account will be performed....") # Now get the export filename csvfilename = None if not lDisplayOnly: # i.e. we have asked for a file export - so get the filename if lStripASCII: myPrint("B", "Will strip non-ASCII characters - e.g. Currency symbols from output file...", " Using Delimiter:", csvDelimiter) else: myPrint("B", "Non-ASCII characters will not be stripped from file: ", " Using Delimiter:", csvDelimiter) if lWriteBOMToExportFile_SWSS: myPrint("B", "Script will add a BOM (Byte Order Mark) to front of the extracted file...") else: myPrint("B", "No BOM (Byte Order Mark) will be added to the extracted file...") if lExcludeTotalsFromCSV: myPrint("B", "Will exclude Totals from CSV to assist Pivot tables") def grabTheFile(): global debug, lDisplayOnly, csvfilename, lIamAMac, scriptpath, myScriptName myPrint("D", "In ", inspect.currentframe().f_code.co_name, "()") if scriptpath == "" or scriptpath is None: # No parameter saved / loaded from disk scriptpath = myDir() myPrint("DB", "Default file export output path is....:", scriptpath) csvfilename = "" if lIamAMac: myPrint("DB", "MacOS X detected: Therefore I will run FileDialog with no extension filters to get filename....") # jFileChooser hangs on Mac when using file extension filters, also looks rubbish. So using Mac(ish)GUI System.setProperty("com.apple.macos.use-file-dialog-packages", "true") # In theory prevents access to app file structure (but doesnt seem to work) System.setProperty("apple.awt.fileDialogForDirectories", "false") filename = FileDialog(StockGlance2020_fake_frame_, "Select/Create CSV file for extract (CANCEL=NO EXPORT)") filename.setMultipleMode(False) filename.setMode(FileDialog.SAVE) filename.setFile(extract_filename) if (scriptpath is not None and scriptpath != ""): filename.setDirectory(scriptpath) # Copied from MD code... File filters only work on non Macs (or Macs below certain versions) if (not Platform.isOSX() or not Platform.isOSXVersionAtLeast("10.13")): extfilter = ExtFilenameFilter("csv") filename.setFilenameFilter(extfilter) # I'm not actually sure this works...? filename.setVisible(True) csvfilename = filename.getFile() if (csvfilename is None) or csvfilename
filename: slope_suffix = '{}.fits'.format(suffix) jump_file = filename.replace(slope_suffix, '_jump.fits') break if jump_file is None: raise ValueError("ERROR: Unrecognized slope filename suffix.") if not os.path.isfile(jump_file): raise FileNotFoundError("ERROR: Jump file {} not found.".format(jump_file)) print('Opening Jump File {}'.format(jump_file)) groupdq = dq_flags.get_groupdq(jump_file, refpix_additions) cr_map = dq_flags.flag_map(groupdq, 'JUMP_DET') # Get slope data corresponding to this file by extracting the # appropriate frames from the ``slopes`` stack slope = slopes[indexes[i]: indexes[i+1], :, :] # Read in the fitops file associated with the exposure and get # the pedestal array (y-intercept) if fitopt_filenames is not None: pedestal_file = fitopt_filenames[i] else: pedestal_file = filename.replace(slope_suffix, '_fitopt.fits') if not os.path.isfile(pedestal_file): raise FileNotFoundError("ERROR: Pedestal file {} not found.".format(pedestal_file)) print('Opening Pedestal File {}'.format(pedestal_file)) pedestal = read_pedestal_data(pedestal_file, refpix_additions) # for MIRI the zero point of the ramp drifts with time. Adjust the # pedestal to be a relative pedestal wrt to group 2 if instrument == 'MIRI': if uncal_filenames is not None: uncal_file = uncal_filenames[i] else: uncal_file = filename.replace(slope_suffix, '_uncal.fits') if not os.path.isfile(uncal_file): raise FileNotFoundError("ERROR: Uncal file {} not found.".format(uncal_file)) group2 = extract_group2(uncal_file, refpix_additions) pedestal_org = copy.deepcopy(pedestal) pedestal = np.fabs(group2 - pedestal) # Work one integration at a time for int_num in range(pedestal.shape[0]): # pull out the DQ of the first group. This will be use to remove # Low pedestal values that have a pedestal of 0 because they are # saturated on group 1. first_group = groupdq[int_num, 0, :, :] pedestal_int = pedestal[int_num, :, :] slope_int = slope[int_num, :, :] clipped_pedestal, cliplow, cliphigh = sigmaclip(pedestal_int, low=3., high=3.) mean_pedestal = np.mean(clipped_pedestal) std_pedestal = np.std(clipped_pedestal) rc_from_pedestal[counter, :, :] += pedestal_int > (mean_pedestal + std_pedestal * pedestal_sigma_threshold) # Pixels with abnormally low pedestal values pedestal_low = pedestal_int < (mean_pedestal - std_pedestal * pedestal_sigma_threshold) first_group_sat = np.bitwise_and(first_group, dqflags.pixel['SATURATED']) # do not allow pixels saturated on group 1 to be marked as low pedestal pedestal_results = np.logical_and(pedestal_low, (first_group_sat == 0)) low_pedestal[counter, :, :] += pedestal_results # Find pixels that are saturated in all groups. These will have # a pedestal value of 0 (according to the pipeline documentation). # These should end up flagged as HOT and DO_NOT_USE # Remove all the cases where ped = 0, but group 1 is not saturated # This can be dead pixels if instrument == 'MIRI': pedestal_int = pedestal_org[int_num, :, :] saturated[counter, :, :] += saturated_in_all_groups(pedestal_int, first_group_sat) # Find pixels that have an abnormally high number of jumps, as # well as those that have most of their jumps concentrated in the # early part of the integration. The latter are possibly RC or IRC # pixels many_jumps, rc_candidates, number_of_jumps =\ find_pix_with_many_jumps(cr_map[int_num, :, :, :], max_jump_limit=10, jump_ratio_threshold=5, early_cutoff_fraction=0.25) high_cr_rate[counter, :, :] += many_jumps rc_from_flags[counter, :, :] += rc_candidates # using the number_of_jumps (a per integration value) create a clean set of # pixel slopes with no cosmic rays clean_slopes, iclean_slopes = slopes_not_cr(slope_int, number_of_jumps) slope_stack.append(clean_slopes) islope_stack.append(iclean_slopes) total_ints += 1 counter += 1 # now find the mean and standard deviation of the "clean" pixel slopes clean_mean_slope, clean_std_slope, num_good = combine_clean_slopes(slope_stack, islope_stack) hdout = fits.PrimaryHDU(clean_mean_slope) hdout.writeto('average_of_slopes_nojumps.fits', overwrite=True) hdout = fits.PrimaryHDU(clean_std_slope) hdout.writeto('sigma_of_slopes_nojumps.fits', overwrite=True) num_good_slopes = num_good.astype(np.int16) hdout = fits.PrimaryHDU(num_good_slopes) hdout.writeto('number_of_slopes_nojumps.fits', overwrite=True) # Use sigma-cliping to remove large outliers to have clean stats to flag # noisy pixels. # removing nans from clean_std_slope because it causes warning messages to be print clean_std_slope_nonan = clean_std_slope[np.isfinite(clean_std_slope)] clipped_stdevs, cliplow, cliphigh = sigma_clip(clean_std_slope_nonan, sigma=clipping_sigma, maxiters=max_clipping_iters, masked=False, return_bounds=True) avg_of_std = np.mean(clipped_stdevs) std_of_std = np.std(clipped_stdevs) cut_limit = avg_of_std + std_of_std*noisy_threshold # assigning nans from clean_std_slope to very large values that will be cut # because it causes warning messages to be print values_nan = np.isnan(clean_std_slope) clean_std_slope[values_nan] = avg_of_std + std_of_std*50 noisy = clean_std_slope > cut_limit num_noisy = len(np.where(noisy)[0]) if plot: # plot the number of good slopes per pixel max_values = np.amax(num_good) plot_image(num_good, max_values, outdir, "Number of Good slopes/pixel ", "clean_pixel_number.png") # plot the standard deviation of pixels slope after eliminating # values having jumps detectect in ramp xhigh = avg_of_std + std_of_std plot_image(clean_std_slope, xhigh, outdir, "Clean Pixel Standard devations", "clean_pixel_std.png") # plot the histogram before the clipping nbins = 5000 titleplot = 'Histogram of Clean Pixel Slope STD Average ' + \ '{:6.4f}'.format(avg_of_std) + ' Std ' + '{:6.4f}'.format(std_of_std) plot_histogram_stats(clean_std_slope, cut_limit, nbins, outdir, titleplot, "histo_clean_std.png", xaxis_log=True) # Look through the stack of saturated pixels and keep those saturated # more than N% of the time fully_saturated = np.sum(saturated, axis=0) / total_ints fully_saturated[fully_saturated < max_saturated_fraction] = 0 fully_saturated = np.ceil(fully_saturated).astype(int) fully_saturated = apply_flags(fully_saturated, flag_values['hot']) num_saturated = len(np.where(fully_saturated != 0)[0]) print('\n\nFound {} fully saturated pixels.'.format(num_saturated)) # How do we want to combine these to identify RC pixels? rc_pedestal = np.sum(rc_from_pedestal, axis=0) / total_ints rc_flags = np.sum(rc_from_flags, axis=0) / total_ints rc_from_pedestal_only = (rc_pedestal > rc_fraction_threshold).astype(int) rc_from_jumps_only = (rc_flags > rc_fraction_threshold).astype(int) num_rc_ped = len(np.where(rc_from_pedestal_only != 0)[0]) num_rc_jump = len(np.where(rc_from_jumps_only != 0)[0]) print("Found {} RC pixels from pedestal search".format(num_rc_ped)) print("Found {} RC pixels from Jump search".format(num_rc_jump)) rc = ((rc_pedestal > rc_fraction_threshold) | (rc_flags > rc_fraction_threshold)) rc = apply_flags(rc.astype(int), flag_values['rc']) num_rc = len(np.where(rc != 0)[0]) print('Found {} RC pixels.'.format(num_rc)) # Low pedestal pixels low_pedestal_vals = np.sum(low_pedestal, axis=0) / total_ints low_ped = low_pedestal_vals > low_pedestal_fraction # Pixels that are saturated on the first group will have a PEDESTAL value # of 0. Pull these out of this set (these are hot pixels) low_ped = apply_flags(low_ped.astype(int), flag_values['low_pedestal']) num_low_ped = len(np.where(low_ped != 0)[0]) print('Found {} low pedestal pixels.'.format(num_low_ped)) # Pixels with lots of CR flags should be added to the list of noisy pixels? high_cr = np.sum(high_cr_rate, axis=0) / total_ints noisy_second_pass = high_cr > high_cr_fraction combined_noisy = np.bitwise_or(noisy, noisy_second_pass) combined_noisy = apply_flags(combined_noisy.astype(int), flag_values['high_cr']) num_high_cr = len(np.where(noisy_second_pass != 0)[0]) print('Found {} pixels with a high number of jumps.'.format(num_high_cr)) print('Found {} pixels with noise above the threshold.'.format(num_noisy)) num_combined_noisy = len(np.where(combined_noisy != 0)[0]) print('Combining noisy and high jump pixels, found {} noisy pixels.'.format(num_combined_noisy)) # Combine the various flavors of bad pixels into a final DQ map bad_pixels = combine_bad_pixel_types(fully_saturated, rc, low_ped, combined_noisy) # Add the reference pixels back into the bad pixel map bad_pixels = add_refpix(bad_pixels, refpix_additions) # Create DQ definitions to be saved with the output file dq_def = create_dqdef() # Save the bad pixel mask to a fits file # Eventually this routine will be called as part of the dark current reference file # generator, and the bad pixel mask will be saved in the DQ extension of the # reference file h0 = fits.PrimaryHDU(fully_saturated) h0.header['EXTNAME'] = 'SATURATED' h1a = fits.ImageHDU(rc_from_pedestal_only) h1a.header['EXTNAME'] = 'RC_FROM_PED' h1b = fits.ImageHDU(rc_from_jumps_only) h1b.header['EXTNAME'] = 'RC_FROM_JUMPS' h1 = fits.ImageHDU(rc) h1.header['EXTNAME'] = 'RC' h2 = fits.ImageHDU(low_ped) h2.header['EXTNAME'] = 'LOW_PEDESTAL' h3 = fits.ImageHDU(noisy.astype(int)) h3.header['EXTNAME'] = 'NOISY' h4 = fits.ImageHDU(noisy_second_pass.astype(int)) h4.header['EXTNAME'] = 'MANY_CRS' h5 = fits.ImageHDU(combined_noisy) h5.header['EXTNAME'] = 'NOISY_AND_CRS' hlist = fits.HDUList([h0, h1a, h1b, h1, h2, h3, h4, h5]) hlist.writeto(outfile, overwrite=True) print('Multi-extension file with individual types of bad pixels saved to:') print(outfile) return bad_pixels def add_refpix(array, to_add): """Place ``map`` within a larger array that contains the reference pixels. Parameters ---------- array : numpy.ndarray 2D array of bad pixels that does not contain reference pixels to_add : tup 4-element tuple containing the number of rows and columns to add around the outside of the science pixels. (left cols, right cols, bottom rows, top rows) Returns ------- array : numpy.ndarray 2D array with rows and columns added """ left_cols, right_cols, bottom_rows, top_rows = to_add y_array, x_array = array.shape xdim = x_array + left_cols + right_cols ydim = y_array + bottom_rows + top_rows full_array = np.zeros((ydim, xdim)) full_array[bottom_rows: bottom_rows+y_array, left_cols: left_cols+x_array] = array return full_array def apply_flags(pixmap, flag_list): """Beginning with a map indicating locations of a particular type of bad pixel, apply the bits specified in ``flag_list`` to come up with the ``jwst`` bad pixel value. Parameters ---------- pixmap : numpy.ndarray 2D array indicating bad pixels. 1 for a bad pixel and 0 for a good pixel flag_list : list List of bad pixel mnemonics to be applied. These mnemonics must be in
# ExportSQLite: SQLite export plugin for MySQL Workbench # # Copyright (C) 2015 <NAME> (Python version) # Copyright (C) 2009 <NAME> (Original Lua version) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import re import StringIO import grt import mforms from grt.modules import Workbench from wb import DefineModule, wbinputs from workbench.ui import WizardForm, WizardPage from mforms import newButton, newCodeEditor, FileChooser ModuleInfo = DefineModule(name='ExportSQLite', author='<NAME>', version='0.1.0') @ModuleInfo.plugin('wb.util.exportSQLite', caption='Export SQLite CREATE script', input=[wbinputs.currentCatalog()], groups=['Catalog/Utilities', 'Menu/Catalog']) @ModuleInfo.export(grt.INT, grt.classes.db_Catalog) def exportSQLite(cat): """Function to go through all schemata in catalog and rename all FKs of table-objects """ def validate_for_sqlite_export(cat): """Check uniqueness of schema, table and index names. Return 0 on success otherwise return 1 (the export process should abort) """ have_errors = False idt = {} for i, schema in enumerate(cat.schemata): if schema.name in idt: have_errors = True if Workbench.confirm('Name conflict', 'Schemas %d and %d have the same name "%s".' ' Please rename one of them.\n' 'Search for more such errors?' % ( idt[schema.name], i, schema.name)) == 0: return False else: idt[schema.name] = i # Do not continue looking for errors on schema name error if have_errors: return False for schema in cat.schemata: idt = {} for i, tbl in enumerate(schema.tables): if tbl.name == '': have_errors = True if Workbench.confirm('Name conflict', 'Table %d in schema "%s". has no name.' ' Please rename.\n' 'Search for more such errors?' % ( i, schema.name)) == 0: return False if tbl.name in idt: have_errors = True if Workbench.confirm('Name conflict', 'Tables %d and %d in schema "%s"' ' have the same name "%s".' ' Please rename one of them.\n' 'Search for more such errors?' % ( idt[tbl.name], i, schema.name, tbl.name)) == 0: return False else: idt[tbl.name] = i if have_errors: return False for schema in cat.schemata: for tbl in schema.tables: idt = {} for i, column in enumerate(tbl.columns): if column.name == '': have_errors = True if Workbench.confirm('Name conflict', 'Column %d in table "%s"."%s". has no name.' ' Please rename.\n' 'Search for more such errors?' % ( i, schema.name, tbl.name)) == 0: return False if column.name in idt: have_errors = True if Workbench.confirm('Name conflict', 'Columns %d and %d in table "%s"."%s"' ' have the same name "%s".' ' Please rename one of them.\n' 'Search for more such errors?' % ( idt[column.name], i, schema.name, tbl.name, column.name)) == 0: return False else: idt[column.name] = i # Now check indices (except primary/unique) idt = {} for i, index in enumerate(tbl.indices): if index.indexType == 'INDEX': if index.name == '': have_errors = True if Workbench.confirm('Name conflict', 'Index %d in table "%s"."%s". has no name.' ' Please rename.\n' 'Search for more such errors?' % ( i, schema.name, tbl.name)) == 0: return False if index.name in idt: have_errors = True if Workbench.confirm('Name conflict', 'Indices %d and %d in table "%s"."%s"' ' have the same name "%s".' ' Please rename one of them.\n' 'Search for more such errors?' % ( idt[index.name], i, schema.name, tbl.name, column.name)) == 0: return False else: idt[index.name] = i if have_errors: return False return True def is_deferred(fkey): # Hack: if comment starts with "Defer..." we make it a deferred FK could # use member 'deferability' (WB has it), but there is no GUI for it return fkey.comment.lstrip().lower()[0:5] == 'defer' def export_table(out, db_name, schema, tbl): if len(tbl.columns) == 0: return out.write('CREATE TABLE %s%s(\n%s' % ( db_name, dq(tbl.name), schema_comment_format(tbl.comment))) primary_key = [i for i in tbl.indices if i.isPrimary == 1] primary_key = primary_key[0] if len(primary_key) > 0 else None pk_column = None if primary_key and len(primary_key.columns) == 1: pk_column = primary_key.columns[0].referencedColumn col_comment = '' for i, column in enumerate(tbl.columns): check, sqlite_type, flags = '', None, None if column.simpleType: sqlite_type = column.simpleType.name flags = column.simpleType.flags else: sqlite_type = column.userType.name flags = column.flags length = column.length # For INTEGER PRIMARY KEY column to become an alias for the rowid # the type needs to be "INTEGER" not "INT" # we fix it for other columns as well if 'INT' in sqlite_type or sqlite_type == 'LONG': sqlite_type = 'INTEGER' length = -1 # Check flags for "unsigned" if 'UNSIGNED' in column.flags: check = dq(column.name) + '>=0' # We even implement ENUM (because we can) if sqlite_type == 'ENUM': sqlite_type = 'TEXT' if column.datatypeExplicitParams: check = (dq(column.name) + ' IN' + column.datatypeExplicitParams) if i > 0: out.write(',' + comment_format(col_comment) + '\n') out.write(' ' + dq(column.name)) # Type is optional in SQLite if sqlite_type != '': out.write(' ' + sqlite_type) # For [VAR]CHAR and such types specify length even though this is # not used in SQLite if length > 0: out.write('(%d)' % length) # Must specify single-column PKs as column-constraints for AI/rowid # behaviour if column == pk_column: out.write(' PRIMARY KEY') if primary_key.columns[0].descend == 1: out.write(' DESC') # Only PK columns can be AI in SQLite if column.autoIncrement == 1: out.write(' AUTOINCREMENT') # Check for NotNull if column.isNotNull == 1: out.write(' NOT NULL') if check != '': out.write(' CHECK(' + check + ')') if column.defaultValue != '': out.write(' DEFAULT ' + column.defaultValue) col_comment = column.comment # For multicolumn PKs if primary_key and not pk_column: out.write(',%s\n PRIMARY KEY(%s)' % ( comment_format(col_comment), print_index_columns(primary_key))) col_comment = '' # Put non-primary, UNIQUE Keys in CREATE TABLE as well (because we can) for index in tbl.indices: if index != primary_key and index.indexType == 'UNIQUE': out.write(',%s\n' % comment_format(col_comment)) col_comment = '' if index.name != '': out.write(' CONSTRAINT %s\n ' % dq(index.name)) out.write(' UNIQUE(%s)' % print_index_columns(index)) for fkey in tbl.foreignKeys: have_fkeys = 1 out.write(',%s\n' % comment_format(col_comment)) col_comment = '' if fkey.name != '': out.write(' CONSTRAINT %s\n ' % dq(fkey.name)) out.write(' FOREIGN KEY(%s)\n' % print_fk_columns(fkey.columns)) out.write(' REFERENCES %s(%s)' % ( dq(fkey.referencedTable.name), print_fk_columns(fkey.referencedColumns))) if fkey.deleteRule in ['RESTRICT', 'CASCADE', 'SET NULL']: out.write('\n ON DELETE ' + fkey.deleteRule) if fkey.updateRule in ['RESTRICT', 'CASCADE', 'SET NULL']: out.write('\n ON UPDATE ' + fkey.updateRule) if is_deferred(fkey): out.write(' DEFERRABLE INITIALLY DEFERRED') out.write(comment_format(col_comment) + '\n);\n') # CREATE INDEX statements for all non-primary, non-unique, non-foreign # indexes for i, index in enumerate(tbl.indices): if index.indexType == 'INDEX': index_name = tbl.name + '.' + index.name if index.name == '': index_name = tbl.name + '.index' + i out.write('CREATE INDEX %s%s ON %s (%s);\n' % ( db_name, dq(index_name), dq(tbl.name), print_index_columns(index))) # Write the INSERTS (currently always) for insert in tbl.inserts().splitlines(): columns_values = '' insert_start = 'insert into `%s`.`%s` (' % (schema.name, tbl.name) if insert[0:len(insert_start)].lower() == insert_start.lower(): columns_values = insert[len(insert_start):] else: raise ExportSQLiteError( 'Error', 'Unrecognized command in insert') last_column = 0 for i, column in enumerate(tbl.columns): column_name = '`' + column.name + '`' if columns_values[0:len(column_name)] == column_name: columns_values = columns_values[len(column_name):] if columns_values[0:1] == ')': columns_values = columns_values[1:] last_column = i break else: if columns_values[0:2] == ', ': columns_values = columns_values[2:] else: raise ExportSQLiteError( 'Error', 'Unrecognized character in column list') else: raise ExportSQLiteError( 'Error', 'Unrecognized column in inserts') out.write('INSERT INTO %s(' % dq(tbl.name)) for i in range(last_column + 1): if i > 0: out.write(',') out.write(dq(tbl.columns[i].name)) if columns_values[0:9].lower() != ' values (': raise ExportSQLiteError( 'Error', 'Unrecognized SQL in insert') columns_values = columns_values[9:] out.write(') VALUES(') out.write(columns_values.replace("\\'", "''")) out.write('\n') def order_tables(out, db_name, schema, unordered, respect_deferredness): have_ordered = False while not have_ordered: if len(unordered) == 0: have_ordered = True for tbl in unordered.values(): has_forward_reference = False for fkey in tbl.foreignKeys: if (fkey.referencedTable.name in unordered and fkey.referencedTable.name != tbl.name and not ( respect_deferredness and is_deferred(fkey))): has_forward_reference = True break if not has_forward_reference: export_table(out, db_name, schema, tbl) del unordered[tbl.name] have_ordered = True def export_schema(out, schema, is_main_schema): if len(schema.tables) == 0: return out.write('\n-- Schema: %s\n'
transformations that occur *within* block-level # tags like paragraphs, headers, and list items. text = self._do_code_spans(text) text = self._escape_special_chars(text) # Process anchor and image tags. text = self._do_links(text) # Make links out of things like `<http://example.com/>` # Must come after _do_links(), because you can use < and > # delimiters in inline links like [this](<url>). text = self._do_auto_links(text) text = self._encode_amps_and_angles(text) text = self._do_italics_and_bold(text) # Do hard breaks: text = re.sub(r" {2,}\n", " <br%s\n" % self.empty_element_suffix, text) return text _html_tokenize_re = re.compile(r""" ( # tag </? (?:\w+) # tag name (?:\s+(?:[\w-]+:)?[\w-]+=(?:".*?"|'.*?'))* # attributes \s*/?> | <!--.*?--> # comment | <\?.*?\?> # processing instruction ) """, re.X) def _escape_special_chars(self, text): # Python markdown note: the HTML tokenization here differs from # that in Markdown.pl, hence the behaviour for subtle cases can # differ (I believe the tokenizer here does a better job because # it isn't susceptible to unmatched '<' and '>' in HTML tags). escaped = [] is_tag = False for token in self._html_tokenize_re.split(text): if is_tag: # Within tags, encode * and _ so they don't conflict # with their use in Markdown for italics and strong. # We're replacing each such character with its # corresponding MD5 checksum value; this is likely # overkill, but it should prevent us from colliding # with the escape values by accident. escaped.append(token.replace('*', g_escape_table['*']) .replace('_', g_escape_table['_'])) else: escaped.append(self._encode_backslash_escapes(token)) is_tag = not is_tag return ''.join(escaped) _tail_of_inline_link_re = re.compile(r''' # Match tail of: [text](/url/) or [text](/url/ "title") \( # literal paren [ \t]* <?(?P<url>.*?)>? # \1 [ \t]* ( # \2 (['"]) # quote char = \3 (?P<title>.*?) \3 # matching quote )? # title is optional \) ''', re.X | re.S) _tail_of_reference_link_re = re.compile(r''' # Match tail of: [text][id] [ ]? # one optional space (?:\n[ ]*)? # one optional newline followed by spaces \[ (?P<id>.*?) \] ''', re.X | re.S) def _do_links(self, text): """Turn Markdown link shortcuts into XHTML <a> and <img> tags. This is a combination of Markdown.pl's _DoAnchors() and _DoImages(). They are done together because that simplified the approach. It was necessary to use a different approach than Markdown.pl because of the lack of atomic matching support in Python's regex engine used in $g_nested_brackets. """ MAX_LINK_TEXT_SENTINEL = 300 curr_pos = 0 while True: # Handle the next link. # The next '[' is the start of: # - an inline anchor: [text](url "title") # - a reference anchor: [text][id] # - an inline img: ![text](url "title") # - a reference img: ![text][id] # - a link definition: [id]: url "title" # These have already been stripped in # _strip_link_definitions() so no need to watch for them. # - not markup: [...anything else... try: start_idx = text.index('[', curr_pos) except ValueError: break text_length = len(text) # Find the matching closing ']'. # Markdown.pl allows *matching* brackets in link text so we # will here too. Markdown.pl *doesn't* currently allow # matching brackets in img alt text -- we'll differ in that # regard. bracket_depth = 0 for p in range(start_idx+1, min(start_idx+MAX_LINK_TEXT_SENTINEL, text_length-1)): ch = text[p] if ch == ']': bracket_depth -= 1 if bracket_depth < 0: break elif ch == '[': bracket_depth += 1 else: # Closing bracket not found within sentinel length. # This isn't markup. curr_pos = start_idx + 1 continue link_text = text[start_idx+1:p] # Now determine what this in by the remainder. p += 1 if p == text_length: return text # Inline anchor or img? if text[p] == '(': # attempt at perf improvement match = self._tail_of_inline_link_re.match(text, p) if match: # Handle an inline anchor or img. is_img = start_idx > 0 and text[start_idx-1] == "!" if is_img: start_idx -= 1 url, title = match.group("url"), match.group("title") # We've got to encode these to avoid conflicting # with italics/bold. url = url.replace('*', g_escape_table['*']) \ .replace('_', g_escape_table['_']) if title or is_img: if is_img and title is None: # Markdown.pl includes title='' on image # links. Not *sure* this is intended. title = "" title_str = ' title="%s"' \ % title.replace('*', g_escape_table['*']) \ .replace('_', g_escape_table['_']) \ .replace('"', '&quot;') else: title_str = '' if is_img: result = '<img src="%s" alt="%s"%s%s' \ % (url, link_text.replace('"', '&quot;'), title_str, self.empty_element_suffix) else: result = '<a href="%s"%s>%s</a>' \ % (url, title_str, link_text) text = text[:start_idx] + result + text[match.end():] curr_pos = start_idx + len(result) continue # Reference anchor or img? else: match = self._tail_of_reference_link_re.match(text, p) if match: # Handle a reference-style anchor or img. is_img = start_idx > 0 and text[start_idx-1] == "!" if is_img: start_idx -= 1 link_id = match.group("id").lower() if not link_id: link_id = link_text.lower() # for links like [this][] if link_id in self.urls: url = self.urls[link_id] # We've got to encode these to avoid conflicting # with italics/bold. url = url.replace('*', g_escape_table['*']) \ .replace('_', g_escape_table['_']) title = self.titles.get(link_id) if title: title = title.replace('*', g_escape_table['*']) \ .replace('_', g_escape_table['_']) title_str = ' title="%s"' % title else: title_str = '' if is_img: result = '<img src="%s" alt="%s"%s%s' \ % (url, link_text.replace('"', '&quot;'), title_str, self.empty_element_suffix) else: result = '<a href="%s"%s>%s</a>' \ % (url, title_str, link_text) text = text[:start_idx] + result + text[match.end():] curr_pos = start_idx + len(result) else: # This id isn't defined, leave the markup alone. curr_pos = match.end() continue # Otherwise, it isn't markup. curr_pos = start_idx + 1 return text _setext_h_re = re.compile(r'^(.+)[ \t]*\n(=+|-+)[ \t]*\n+', re.M) def _setext_h_sub(self, match): n = {"=": 1, "-": 2}[match.group(2)[0]] return "<h%d>%s</h%d>\n\n" \ % (n, self._run_span_gamut(match.group(1)), n) _atx_h_re = re.compile(r''' ^(\#{1,6}) # \1 = string of #'s [ \t]* (.+?) # \2 = Header text [ \t]* \#* # optional closing #'s (not counted) \n+ ''', re.X | re.M) def _atx_h_sub(self, match): n = len(match.group(1)) return "<h%d>%s</h%d>\n\n" \ % (n, self._run_span_gamut(match.group(2)), n) def _do_headers(self, text): # Setext-style headers: # Header 1 # ======== # # Header 2 # -------- text = self._setext_h_re.sub(self._setext_h_sub, text) # atx-style headers: # # Header 1 # ## Header 2 # ## Header 2 with closing hashes ## # ... # ###### Header 6 text = self._atx_h_re.sub(self._atx_h_sub, text) return text _marker_ul_chars = '*+-' _marker_any = '(?:[%s]|\d+[.])' % _marker_ul_chars def _list_sub(self, match): lst = match.group(1) lst_type = match.group(3) in self._marker_ul_chars and "ul" or "ol" result = self._process_list_items(lst) return "<%s>\n%s</%s>\n" % (lst_type, result, lst_type) def _do_lists(self, text): # Form HTML ordered (numbered) and unordered (bulleted) lists. # Re-usable pattern to match any entire ul or ol list: less_than_tab = self.tab_width - 1 whole_list = r''' ( # \1 = whole list ( # \2 [ ]{0,%d} (%s) # \3 = first list item marker [ \t]+ ) (?:.+?) ( # \4 \Z | \n{2,} (?=\S) (?! # Negative lookahead for another list item marker [ \t]* %s[ \t]+ ) ) ) ''' % (less_than_tab, self._marker_any, self._marker_any) # We use a different prefix before nested lists than top-level lists. # See extended comment in _process_list_items(). # # Note: There's a bit of duplication here. My original implementation # created a scalar regex pattern as the conditional result of the test on # $g_list_level, and then only ran the $text =~ s{...}{...}egmx # substitution once, using the scalar as the pattern. This worked, # everywhere except when running under MT on my hosting account at Pair # Networks. There, this caused all rebuilds to be killed by the reaper (or # perhaps they crashed, but that seems incredibly unlikely given that the # same script on the same server ran fine *except* under MT. I've spent # more time trying to figure out why this is happening than I'd like to # admit. My only guess, backed up by the fact that this workaround works, # is that Perl optimizes the substition when it can figure out that the # pattern will never change, and when this
<reponame>bencrabbe/npdependency import argparse import math import os.path import pathlib import random import shutil import sys import tempfile import warnings from typing import ( Any, BinaryIO, Callable, Dict, IO, Iterable, List, NamedTuple, Optional, Sequence, Tuple, TypeVar, Union, cast, overload, ) import numpy as np import torch import transformers import yaml from boltons import iterutils as itu from torch import nn from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence from npdependency import lexers from npdependency.utils import smart_open from npdependency import conll2018_eval as evaluator from npdependency import deptree from npdependency.deptree import ( DependencyBatch, DependencyDataset, DepGraph, gen_labels, gen_tags, ) from npdependency.lexers import ( BertBaseLexer, BertLexerBatch, CharRNNLexer, DefaultLexer, FastTextLexer, freeze_module, make_vocab, ) from npdependency.mst import chuliu_edmonds_one_root as chuliu_edmonds # Python 3.7 shim try: from typing import Literal, TypedDict except ImportError: from typing_extensions import Literal, TypedDict # type: ignore class MLP(nn.Module): def __init__(self, input_size, hidden_size, output_size, dropout=0.0): super(MLP, self).__init__() self.Wdown = nn.Linear(input_size, hidden_size) self.Wup = nn.Linear(hidden_size, output_size) self.g = nn.ReLU() self.dropout = nn.Dropout(p=dropout) def forward(self, input): return self.Wup(self.dropout(self.g(self.Wdown(input)))) # Note: This is the biaffine layer used in Qi et al. (2018) and Dozat and Manning (2017). class BiAffine(nn.Module): """Biaffine attention layer.""" def __init__(self, input_dim: int, output_dim: int, bias: bool): super(BiAffine, self).__init__() self.input_dim = input_dim self.output_dim = output_dim self.bias = bias weight_input = input_dim + 1 if bias else input_dim self.weight = nn.Parameter( torch.FloatTensor(output_dim, weight_input, weight_input) ) nn.init.xavier_uniform_(self.weight) def forward(self, h: torch.Tensor, d: torch.Tensor) -> torch.Tensor: if self.bias: h = torch.cat((h, h.new_ones((*h.shape[:-1], 1))), dim=-1) d = torch.cat((d, d.new_ones((*d.shape[:-1], 1))), dim=-1) return torch.einsum("bxi,oij,byj->boxy", h, self.weight, d) class Tagger(nn.Module): def __init__(self, input_dim, tagset_size): super(Tagger, self).__init__() self.W = nn.Linear(input_dim, tagset_size) def forward(self, input): return self.W(input) class LRSchedule(TypedDict): shape: Literal["exponential", "linear", "constant"] warmup_steps: int class EncodedSentence(NamedTuple): words: Sequence[str] encoded_words: Union[torch.Tensor, lexers.BertLexerSentence] subwords: torch.Tensor chars: torch.Tensor sent_len: int _T_SentencesBatch = TypeVar("_T_SentencesBatch", bound="SentencesBatch") class SentencesBatch(NamedTuple): """Batched and padded sentences. ## Attributes - `words` The word forms for every sentence in the batch - `encoded_words` The words of the sentences, encoded and batched by a lexer and meant to be consumed by it directly. The details stay opaque at this level, see the relevant lexer instead. - `subwords` Encoded FastText subwords as a sequence of `LongTensor`. As with `chars`, `subwords[i][j, k]` is the k-th subword of the i-th word of the j-th sentence in the batch. - `chars` Encoded chars as a sequence of `LongTensor`. `chars[i][j, k]` is the k-th character of the i-th word of the j-th sentence in the batch. - `tags` The gold POS tags (if any) as a `LongTensor` with shape `(batch_size, max_sentence_length)` - `heads` The gold heads (if any) as a `LongTensor` with shape `(batch_size, max_sentence_length)` - `labels` The gold dependency labels (if any) as a `LongTensor` with shape `(batch_size, max_sentence_length)` - `sent_length` The lengths of the sentences in the batch as `LongTensor` with shape `(batch_size,)` - `content_mask` A `BoolTensor` mask of shape `(batch_size, max_sentence_length)` such that `content_mask[i, j]` is true iff the j-th word of the i-th sentence in the batch is neither padding not the root (i.e. iff `1 <= j < sent_length[i]`). """ words: Sequence[Sequence[str]] encoded_words: Union[torch.Tensor, BertLexerBatch] subwords: torch.Tensor chars: torch.Tensor sent_lengths: torch.Tensor content_mask: torch.Tensor def to( self: _T_SentencesBatch, device: Union[str, torch.device] ) -> _T_SentencesBatch: return type(self)( words=self.words, encoded_words=self.encoded_words.to(device), chars=self.chars.to(device), subwords=self.subwords.to(device), sent_lengths=self.sent_lengths, content_mask=self.content_mask.to(device), ) class BiAffineParser(nn.Module): """Biaffine Dependency Parser.""" def __init__( self, biased_biaffine: bool, chars_lexer: CharRNNLexer, default_batch_size: int, device: Union[str, torch.device], encoder_dropout: float, # lstm dropout ft_lexer: FastTextLexer, labels: Sequence[str], lexer: Union[DefaultLexer, BertBaseLexer], mlp_input: int, mlp_tag_hidden: int, mlp_arc_hidden: int, mlp_lab_hidden: int, mlp_dropout: float, tagset: Sequence[str], ): super(BiAffineParser, self).__init__() self.default_batch_size = default_batch_size self.device = torch.device(device) self.tagset = tagset self.labels = labels self.mlp_arc_hidden = mlp_arc_hidden self.mlp_input = mlp_input self.mlp_lab_hidden = mlp_lab_hidden self.lexer = lexer.to(self.device) self.dep_rnn = nn.LSTM( self.lexer.embedding_size + chars_lexer.embedding_size + ft_lexer.embedding_size, mlp_input, 3, batch_first=True, dropout=encoder_dropout, bidirectional=True, ).to(self.device) # POS tagger & char RNN self.pos_tagger = MLP(mlp_input * 2, mlp_tag_hidden, len(self.tagset)).to( self.device ) self.char_rnn = chars_lexer.to(self.device) self.ft_lexer = ft_lexer.to(self.device) # Arc MLPs self.arc_mlp_h = MLP(mlp_input * 2, mlp_arc_hidden, mlp_input, mlp_dropout).to( self.device ) self.arc_mlp_d = MLP(mlp_input * 2, mlp_arc_hidden, mlp_input, mlp_dropout).to( self.device ) # Label MLPs self.lab_mlp_h = MLP(mlp_input * 2, mlp_lab_hidden, mlp_input, mlp_dropout).to( self.device ) self.lab_mlp_d = MLP(mlp_input * 2, mlp_lab_hidden, mlp_input, mlp_dropout).to( self.device ) # BiAffine layers self.arc_biaffine = BiAffine(mlp_input, 1, bias=biased_biaffine).to(self.device) self.lab_biaffine = BiAffine( mlp_input, len(self.labels), bias=biased_biaffine ).to(self.device) def save_params(self, path: Union[str, pathlib.Path, BinaryIO]): torch.save(self.state_dict(), path) def load_params(self, path: Union[str, pathlib.Path, BinaryIO]): state_dict = torch.load(path, map_location=self.device) # Legacy models do not have BERT layer weights, so we inject them here they always use only # 4 layers so we don't have to guess the size of the weight vector if hasattr(self.lexer, "layers_gamma"): state_dict.setdefault( "lexer.layer_weights", torch.ones(4, dtype=torch.float) ) state_dict.setdefault( "lexer.layers_gamma", torch.ones(1, dtype=torch.float) ) self.load_state_dict(state_dict) def forward( self, words: Union[torch.Tensor, BertLexerBatch], chars: torch.Tensor, ft_subwords: torch.Tensor, sent_lengths: torch.Tensor, ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """Predict POS, heads and deprel scores. ## Outputs `tag_scores, arc_scores, lab_scores` with shapes - `tag_score`: $`batch_size×max_sent_length×num_pos_tags`$ - `arc_scores`: $`batch_size×max_sent_length×max_sent_length`$ - `label_scores`: $`batch_size×num_deprels×max_sent_length×max_sent_length`$ """ # Computes char embeddings char_embed = self.char_rnn(chars) # Computes fasttext embeddings ft_embed = self.ft_lexer(ft_subwords) # Computes word embeddings lex_emb = self.lexer(words) # Encodes input for tagging and parsing inpt = torch.cat((lex_emb, char_embed, ft_embed), dim=-1) packed_inpt = pack_padded_sequence( inpt, sent_lengths, batch_first=True, enforce_sorted=False ) packed_dep_embeddings, _ = self.dep_rnn(packed_inpt) dep_embeddings, _ = pad_packed_sequence(packed_dep_embeddings, batch_first=True) # Tagging tag_scores = self.pos_tagger(dep_embeddings) # Compute the score matrices for the arcs and labels. arc_h = self.arc_mlp_h(dep_embeddings) arc_d = self.arc_mlp_d(dep_embeddings) lab_h = self.lab_mlp_h(dep_embeddings) lab_d = self.lab_mlp_d(dep_embeddings) arc_scores = self.arc_biaffine(arc_h, arc_d).squeeze(1) lab_scores = self.lab_biaffine(lab_h, lab_d) return tag_scores, arc_scores, lab_scores def parser_loss( self, tagger_scores: torch.Tensor, arc_scores: torch.Tensor, lab_scores: torch.Tensor, batch: DependencyBatch, marginal_loss: Callable[[torch.Tensor, torch.Tensor], torch.Tensor], ) -> torch.Tensor: # ARC LOSS # [batch, sent_len, sent_len] arc_scoresL = arc_scores.transpose(-1, -2) # [batch*sent_len, sent_len] arc_scoresL = arc_scoresL.reshape(-1, arc_scoresL.size(-1)) # [batch*sent_len] arc_loss = marginal_loss(arc_scoresL, batch.heads.view(-1)) # TAGGER_LOSS tagger_scoresB = tagger_scores.view(-1, tagger_scores.size(-1)) tagger_loss = marginal_loss(tagger_scoresB, batch.tags.view(-1)) # LABEL LOSS # We will select the labels for the true heads, so we have to give a true head to # the padding tokens (even if they will be ignored in the crossentropy since the true # label for that head is set to -100) so we give them the root. positive_heads = batch.heads.masked_fill(batch.content_mask.logical_not(), 0) # [batch, 1, 1, sent_len] headsL = positive_heads.unsqueeze(1).unsqueeze(2) # [batch, n_labels, 1, sent_len] headsL = headsL.expand(-1, lab_scores.size(1), -1, -1) # [batch, n_labels, sent_len] lab_scoresL = torch.gather(lab_scores, 2, headsL).squeeze(2) # [batch, sent_len, n_labels] lab_scoresL = lab_scoresL.transpose(-1, -2) # [batch*sent_len, n_labels] lab_scoresL = lab_scoresL.reshape(-1, lab_scoresL.size(-1)) # [batch*sent_len] labelsL = batch.labels.view(-1) lab_loss = marginal_loss(lab_scoresL, labelsL) # TODO: see if other loss combination functions wouldn't help here, e.g. # <https://arxiv.org/abs/1805.06334> return tagger_loss + arc_loss + lab_loss def eval_model(self, dev_set: DependencyDataset, batch_size: Optional[int] = None): if batch_size is None: batch_size = self.default_batch_size loss_fnc = nn.CrossEntropyLoss( reduction="sum", ignore_index=dev_set.LABEL_PADDING ) self.eval() dev_batches = dev_set.make_batches( batch_size, shuffle_batches=False, shuffle_data=False ) # NOTE: the accuracy scoring is approximative and cannot be interpreted as an UAS/LAS score # NOTE: fun project: track the correlation between them tag_acc, arc_acc, lab_acc, gloss = 0, 0, 0, 0.0 overall_size = 0 with torch.no_grad(): for batch in dev_batches: overall_size += int(batch.content_mask.sum().item()) batch = batch.to(self.device) # preds tagger_scores, arc_scores, lab_scores = self( batch.encoded_words, batch.chars, batch.subwords, batch.sent_lengths ) gloss += self.parser_loss( tagger_scores, arc_scores, lab_scores, batch, loss_fnc ).item() # greedy arc accuracy (without parsing) arc_pred = arc_scores.argmax(dim=-2) arc_accuracy = ( arc_pred.eq(batch.heads).logical_and(batch.content_mask).sum() ) arc_acc += arc_accuracy.item() # tagger accuracy tag_pred = tagger_scores.argmax(dim=2) tag_accuracy = ( tag_pred.eq(batch.tags).logical_and(batch.content_mask).sum() ) tag_acc += tag_accuracy.item() # greedy label accuracy (without parsing) lab_pred = lab_scores.argmax(dim=1) lab_pred = torch.gather( lab_pred, 1, batch.heads.masked_fill( batch.content_mask.logical_not(), 0 ).unsqueeze(1), ).squeeze(1) lab_accuracy = ( lab_pred.eq(batch.labels).logical_and(batch.content_mask).sum() ) lab_acc += lab_accuracy.item() return ( gloss / overall_size, tag_acc / overall_size, arc_acc / overall_size, lab_acc / overall_size, ) def train_model( self, train_set: DependencyDataset, epochs: int, lr: float, lr_schedule: LRSchedule, model_path: Union[str, pathlib.Path], batch_size: Optional[int] = None, dev_set: Optional[DependencyDataset] = None, ): model_path = pathlib.Path(model_path) weights_file = model_path / "model.pt" if batch_size is None: batch_size = self.default_batch_size print(f"Start training on {self.device}") loss_fnc = nn.CrossEntropyLoss( reduction="sum", ignore_index=train_set.LABEL_PADDING ) # TODO: make these configurable? optimizer = torch.optim.Adam( self.parameters(), betas=(0.9, 0.9), lr=lr, eps=1e-09 ) if lr_schedule["shape"] == "exponential": scheduler = torch.optim.lr_scheduler.LambdaLR( optimizer, (lambda n: 0.95 ** (n // (math.ceil(len(train_set) / batch_size)))), ) elif lr_schedule["shape"]
#!/usr/bin/env python """ Based on http://groups.google.com/group/dropio-api/web/full-api-documentation """ __version__ = '0.1.1' import httplib import logging import mimetypes import mimetools import os.path import sys import urllib import urllib2 import uuid from optparse import OptionParser from urlparse import urlsplit try: import json except ImportError: import simplejson as json from dropio.resource import Asset, Drop, Link, Note _API_VERSION = '2.0' _API_FORMAT = 'json' _API_BASE_URL = 'http://api.drop.io/' _FILE_UPLOAD_URL = 'http://assets.drop.io/upload' _DROPS = 'drops/' _ASSETS = '/assets/' _COMMENTS = '/comments/' _SEND_TO = '/send_to' _DROPIO_TRUE = 'true' _DROPIO_FALSE = 'false' ######################################################################### # HTTP ERRORS: from http://dev.drop.io/rest-api-reference/response-codes/ # # TODO: consider having these inherit from urllib2.HTTPError ######################################################################### class Error(Exception): pass class BadRequestError(Error): """400 Bad Request Something is wrong with the request in general (i.e. missing parameters, bad data, etc). """ pass class InternalServerError(Error): """500 Internal Server Error Something that [drop.io] did not account for has gone wrong. """ pass class ForbiddenError(Error): """403 Forbidden You did not supply a valid API token or an authorization token. """ pass class ResourceNotFoundError(Error): """404 Not Found The resource requested is not found or not available. """ pass class ExpirationLengthEnum(object): ONE_DAY_FROM_NOW = '1_DAY_FROM_NOW' ONE_WEEK_FROM_NOW = '1_WEEK_FROM_NOW' ONE_MONTH_FROM_NOW = '1_MONTH_FROM_NOW' ONE_YEAR_FROM_NOW = '1_YEAR_FROM_NOW' ONE_DAY_FROM_LAST_VIEW = '1_DAY_FROM_LAST_VIEW' ONE_WEEK_FROM_LAST_VIEW = '1_WEEK_FROM_LAST_VIEW' ONE_MONTH_FROM_LAST_VIEW = '1_MONTH_FROM_LAST_VIEW' ONE_YEAR_FROM_LAST_VIEW = '1_YEAR_FROM_LAST_VIEW' valid_expiration_lengths = frozenset(( ONE_DAY_FROM_NOW, ONE_WEEK_FROM_NOW, ONE_MONTH_FROM_NOW, ONE_YEAR_FROM_NOW, ONE_DAY_FROM_LAST_VIEW, ONE_WEEK_FROM_LAST_VIEW, ONE_MONTH_FROM_LAST_VIEW, ONE_YEAR_FROM_LAST_VIEW)) class _NullHandler(logging.Handler): """default logger does nothing""" def emit(self, record): pass class DropIoClient(object): """Client for the Drop.io service.""" def __init__(self, api_key, logger=None): self.__base_params_dict = {} self.__base_params_dict['api_key'] = api_key self.__base_params_dict['version'] = _API_VERSION self.__base_params_dict['format'] = _API_FORMAT if logger: self.logger = logger else: handler = _NullHandler() self.logger = logging.getLogger() self.logger.addHandler(handler) def __get(self, base_url, params_dict): params = urllib.urlencode(params_dict) stream = urllib2.urlopen(base_url + '?' + params) body_dict = json.load(stream) stream.close() return body_dict def __post(self, url, params_dict): params = urllib.urlencode(params_dict) stream = urllib2.urlopen(url, params) body_dict = json.load(stream) stream.close() return body_dict def __post_multipart(self, url, params_dict): def encode_multipart_formdata(params_dict): boundary = mimetools.choose_boundary() body = '' for key, value in params_dict.iteritems(): if isinstance(value, tuple): filename, value = value body += '--%s\r\n' % boundary body += 'Content-Disposition: form-data;' body += 'name="%s";' % str(key) body += 'filename="%s"\r\n' % str(filename) body += 'Content-Type: %s\r\n\r\n' % str(get_content_type(filename)) body += '%s\r\n' % str(value) else: body += '--%s\r\n' % boundary body += 'Content-Disposition: form-data; name="%s"\r\n\r\n' % str(key) body += '%s\r\n' % str(value) body += '--%s--\r\n' % boundary content_type = 'multipart/form-data; boundary=%s' % boundary return body, content_type def get_content_type(filename): return mimetypes.guess_type(filename)[0] or 'application/octet-stream' body, content_type = encode_multipart_formdata(params_dict) headers = {'content-type': content_type} url_parts = urlsplit(url) connection = httplib.HTTPConnection(url_parts.netloc) connection.request('POST', url_parts.path, body, headers) response = connection.getresponse() body_dict = json.load(response) connection.close() return body_dict def __put(self, url, params_dict): opener = urllib2.build_opener(urllib2.HTTPHandler) request = urllib2.Request(url, data=json.dumps(params_dict)) request.add_header('Content-Type', 'application/json') request.get_method = lambda: 'PUT' stream = opener.open(request) body_dict = json.load(stream) stream.close() opener.close() return body_dict def __delete(self, url, params_dict): opener = urllib2.build_opener(urllib2.HTTPHandler) request = urllib2.Request(url, data=json.dumps(params_dict)) request.add_header('Content-Type', 'application/json') request.get_method = lambda: 'DELETE' stream = opener.open(request) body_dict = json.load(stream) stream.close() opener.close() return body_dict def __asset_dict_to_asset(self, asset_dict): asset = None if 'contents' in asset_dict: asset = Note(asset_dict) elif 'url' in asset_dict: asset = Link(asset_dict) else: asset = Asset(asset_dict) return asset ################ # DROP RESOURCE ################ def create_drop(self, drop_name=None): """ Returns: dropio.resource.Drop """ params_dict = {} if drop_name: params_dict['name'] = drop_name params_dict.update(self.__base_params_dict) url = _API_BASE_URL + _DROPS drop_dict = self.__post(url, params_dict) drop = Drop(drop_dict) return drop def get_drop(self, drop_name, token=None): """ Returns: dropio.resource.Drop """ assert drop_name params_dict = {} if token: params_dict['token'] = token params_dict.update(self.__base_params_dict) url = _API_BASE_URL + _DROPS + drop_name try: drop_dict = self.__get(url, params_dict) except urllib2.HTTPError, error: # TODO: move this into reusable method if error.code == 400: raise BadRequestError() elif error.code == 403: raise ForbiddenError() if error.code == 404: raise ResourceNotFoundError() if error.code == 500: raise ResourceNotFoundError() else: raise error drop = Drop(drop_dict) return drop def update_drop(self, drop, token): """ Returns: dropio.resource.Drop """ assert drop assert token params_dict = {} params_dict['token'] = token if drop.guests_can_comment is not None: if drop.guests_can_comment: params_dict['guests_can_comment'] = _DROPIO_TRUE else: params_dict['guests_can_comment'] = _DROPIO_FALSE if drop.guests_can_add is not None: if drop.guests_can_add: params_dict['guests_can_add'] = _DROPIO_TRUE else: params_dict['guests_can_add'] = _DROPIO_FALSE if drop.guests_can_delete is not None: if drop.guests_can_delete: params_dict['guests_can_delete'] = _DROPIO_TRUE else: params_dict['guests_can_delete'] = _DROPIO_FALSE if drop.expiration_length: params_dict['expiration_length'] = drop.expiration_length if drop.password: params_dict['password'] = <PASSWORD> if drop.admin_password: params_dict['admin_password'] = <PASSWORD> params_dict.update(self.__base_params_dict) url = _API_BASE_URL + _DROPS + drop.name drop_dict = self.__put(url, params_dict) drop = Drop(drop_dict) return drop def delete_drop(self, drop_name, token): assert drop_name assert token params_dict = {} params_dict['token'] = token params_dict.update(self.__base_params_dict) url = _API_BASE_URL + _DROPS + drop_name self.__delete(url, params_dict) return ################# # ASSET RESOURCE ################# def create_link(self, drop_name, link_url, title=None, description=None, token=None): """ Returns: dropio.resource.Link """ assert drop_name assert link_url params_dict = {} params_dict['url'] = link_url if title: params_dict['title'] = title if description: params_dict['description'] = description if token: params_dict['token'] = token params_dict.update(self.__base_params_dict) url = _API_BASE_URL + _DROPS + drop_name + _ASSETS link_dict = self.__post(url, params_dict) link = Link(link_dict) return link def create_note(self, drop_name, contents, title=None, token=None): """ Returns: dropio.resource.Note """ assert drop_name assert contents params_dict = {} params_dict['contents'] = contents if title: params_dict['title'] = title if token: params_dict['token'] = token params_dict.update(self.__base_params_dict) url = _API_BASE_URL + _DROPS + drop_name + _ASSETS note_dict = self.__post(url, params_dict) note = Note(note_dict) return note def create_file_from_readable(self, drop_name, readable, file_name=None, token=None): """ Returns: dropio.resource.Asset """ assert drop_name assert hasattr(readable, 'read') file_name = file_name or str(uuid.uuid4()) params_dict = {} params_dict['drop_name'] = drop_name if token: params_dict['token'] = token params_dict['file'] = (file_name, readable.read()) params_dict.update(self.__base_params_dict) url = _FILE_UPLOAD_URL asset_dict = self.__post_multipart(url, params_dict) asset = Asset(asset_dict) return asset def create_file(self, drop_name, file_name, token=None): """ Returns: dropio.resource.Asset """ assert drop_name assert file_name assert os.path.isfile(file_name) stream = open(file_name, 'rb') asset = self.create_file_from_readable(drop_name, stream, file_name, token) stream.close() return asset def get_asset_list(self, drop_name, page=1, token=None): """ Returns: generator of dropio.resource.Asset """ assert drop_name params_dict = {} params_dict['page'] = page if token: params_dict['token'] = token params_dict.update(self.__base_params_dict) url = _API_BASE_URL + _DROPS + drop_name + _ASSETS response = self.__get(url, params_dict) for asset_dict in response['assets']: yield Asset(asset_dict) return def get_all_asset_list(self, drop_name, token=None): """ Returns: generator of dropio.resource.Asset """ assert drop_name page = 1 while True: assets = self.get_asset_list(drop_name, page, token) empty = True for asset in assets: yield asset empty = False if empty: break page += 1 return def get_asset(self, drop_name, asset_name, token=None): """ Returns: dropio.resource.Asset """ assert drop_name assert asset_name params_dict = {} if token: params_dict['token'] = token params_dict.update(self.__base_params_dict) url = _API_BASE_URL + _DROPS + drop_name + _ASSETS + asset_name asset_dict = self.__get(url, params_dict) asset = self.__asset_dict_to_asset(asset_dict) return asset def update_asset(self, drop_name, asset, token=None): """ Returns: dropio.resource.Asset """ assert drop_name assert asset params_dict = {} if token: params_dict['token'] = token if asset.title: params_dict['title'] = asset.title if asset.description: params_dict['description'] = asset.description if hasattr(asset, 'url') and asset.url: params_dict['url'] = asset.url if hasattr(asset, 'contents') and asset.contents: params_dict['contents'] = asset.contents params_dict.update(self.__base_params_dict) url = _API_BASE_URL + _DROPS + drop_name + _ASSETS + asset.name asset_dict = self.__put(url, params_dict) asset = self.__asset_dict_to_asset(asset_dict) return asset def delete_asset(self, drop_name, asset_name, token=None): assert drop_name assert asset_name params_dict = {} if token: params_dict['token'] = token params_dict.update(self.__base_params_dict) url = _API_BASE_URL + _DROPS + drop_name + _ASSETS + asset_name self.__delete(url, params_dict) return def __send_asset(self, drop_name, asset_name, medium, params_dict, token=None): assert drop_name assert asset_name params_dict['medium'] = medium if token: params_dict['token'] = token params_dict.update(self.__base_params_dict) url = _API_BASE_URL + _DROPS + drop_name + _ASSETS + asset_name + _SEND_TO self.__post(url, params_dict) return def send_asset_to_fax(self, drop_name, asset_name, fax_number, token=None): assert fax_number params_dict = {} params_dict['fax_number'] = fax_number self.__send_asset(drop_name, asset_name, 'fax', params_dict, token) return def send_asset_to_drop(self, drop_name, asset_name, drop_name_dest, token=None): assert drop_name_dest params_dict = {} params_dict['drop_name'] = drop_name_dest self.__send_asset(drop_name, asset_name, 'drop', params_dict, token) return def send_asset_to_email(self, drop_name, asset_name, emails, message=None, token=None): assert emails params_dict = {} params_dict['emails'] = emails if message: params_dict['message'] = message self.__send_asset(drop_name, asset_name, 'email', params_dict, token)
from django.contrib.gis.db import models from django.db import connection, transaction from django.db.models import Max from django.utils.translation import pgettext_lazy from django.utils.translation import ugettext_lazy as _ from enumfields import EnumField from sequences import get_next_value from leasing.enums import ( DueDatesPosition, InfillDevelopmentCompensationState, LandUseAgreementLitigantContactType, LandUseContractType, ) from leasing.models.contact import Contact from leasing.models.decision import DecisionMaker from leasing.models.land_area import AbstractAddress from leasing.models.lease import District, Municipality from users.models import User from .mixins import NameModel, TimeStampedSafeDeleteModel class LandUseAgreementType(NameModel): """ In Finnish: Laji """ identifier = models.CharField( verbose_name=_("Identifier"), max_length=255, unique=True ) sap_material_code = models.CharField( verbose_name=_("SAP material code"), null=True, blank=True, max_length=255 ) sap_order_item_number = models.CharField( verbose_name=_("SAP order item number"), null=True, blank=True, max_length=255 ) due_dates_position = EnumField( DueDatesPosition, verbose_name=_("Due dates position"), default=DueDatesPosition.START_OF_MONTH, max_length=30, ) class LandUseAgreementStatus(NameModel): """ In Finnish: Olotila """ class LandUseAgreementDefinition(NameModel): """ In Finnish: Määritelmä """ class LandUseAgreementIdentifier(TimeStampedSafeDeleteModel): """ In Finnish: Vuokraustunnus """ # In Finnish: Laji type = models.ForeignKey( LandUseAgreementType, verbose_name=_("Land use agreement type"), related_name="+", on_delete=models.PROTECT, ) # In Finnish: Kaupunki municipality = models.ForeignKey( Municipality, verbose_name=_("Municipality"), related_name="+", on_delete=models.PROTECT, ) # In Finnish: Kaupunginosa district = models.ForeignKey( District, verbose_name=_("District"), related_name="+", on_delete=models.PROTECT ) # In Finnish: Juokseva numero sequence = models.PositiveIntegerField(verbose_name=_("Sequence number")) class Meta: verbose_name = pgettext_lazy("Model name", "Lease identifier") verbose_name_plural = pgettext_lazy("Model name", "Lease identifiers") unique_together = ("type", "municipality", "district", "sequence") def __str__(self): """Returns the lease identifier as a string The Land use agreement identifier is constructed out of type, municipality, district, and sequence, in that order. For example, the identifier for a residence (A1) in Helsinki (1), Vallila (22), and sequence number 1 would be A1122-1. """ return "{}{}{:02}-{}".format( self.type.identifier, self.municipality.identifier, int(self.district.identifier), self.sequence, ) class LandUseAgreementCompensations(NameModel): """ In Finnish: Maankäyttökorvaus """ cash_compensation = models.DecimalField( verbose_name=_("Cash compensation"), decimal_places=2, max_digits=12 ) land_compensation = models.DecimalField( verbose_name=_("Land compensation"), decimal_places=2, max_digits=12 ) other_compensation = models.DecimalField( verbose_name=_("Other compensation"), decimal_places=2, max_digits=12 ) first_installment_increase = models.DecimalField( verbose_name=_("First installment increase"), decimal_places=2, max_digits=12 ) street_acquisition_value = models.DecimalField( verbose_name=_("Street acquisition value"), decimal_places=2, max_digits=12 ) street_area = models.PositiveIntegerField() park_acquisition_value = models.DecimalField( verbose_name=_("Park acquisition value"), decimal_places=2, max_digits=12 ) park_area = models.PositiveIntegerField() other_acquisition_value = models.DecimalField( verbose_name=_("Other acquisition value"), decimal_places=2, max_digits=12 ) other_area = models.PositiveIntegerField() class LandUseAgreement(TimeStampedSafeDeleteModel): """ In Finnish: Maankäyttösopimus """ # In Finnish: Tunnus identifier = models.OneToOneField( LandUseAgreementIdentifier, verbose_name=_("Land use agreement identifier"), null=True, blank=True, on_delete=models.PROTECT, ) # In Finnish: Kaupunki municipality = models.ForeignKey( Municipality, verbose_name=_("Municipality"), related_name="+", on_delete=models.PROTECT, ) # In Finnish: Kaupunginosa district = models.ForeignKey( District, verbose_name=_("District"), related_name="+", on_delete=models.PROTECT ) # In Finnish: Määritelmä definition = models.ForeignKey( LandUseAgreementDefinition, verbose_name=_("Land use agreement definition"), related_name="+", on_delete=models.PROTECT, ) # In Finnish: Olotila status = models.ForeignKey( LandUseAgreementStatus, verbose_name=_("Land use agreement status"), related_name="+", on_delete=models.PROTECT, ) # In Finnish: Laji type = models.ForeignKey( LandUseAgreementType, verbose_name=_("Land use agreement type"), related_name="+", on_delete=models.PROTECT, ) # In Finnish: Valmistelija preparer = models.ForeignKey( User, verbose_name=_("Preparer"), related_name="+", null=True, blank=True, on_delete=models.PROTECT, ) # In Finnish: Sopimuksen tyyppi land_use_contract_type = EnumField( LandUseContractType, verbose_name=_("Contract type"), null=True, blank=True, max_length=30, ) # In Finnish: Arvioitu toteutumisvuosi estimated_completion_year = models.PositiveSmallIntegerField( verbose_name=_("Estimated completion year"), null=True, blank=True ) # In Finnish: Arvioitu esittelyvuosi estimated_introduction_year = models.PositiveSmallIntegerField( verbose_name=_("Estimated introduction year"), null=True, blank=True ) # In Finnish: Hankealue project_area = models.CharField( verbose_name=_("Project area"), null=True, blank=True, max_length=255 ) # In Finnish: Asemakaavan diaarinumero plan_reference_number = models.CharField( verbose_name=_("Plan reference number"), null=True, blank=True, max_length=255 ) # In Finnish: Asemakaavan nro. plan_number = models.CharField( verbose_name=_("Plan number"), max_length=255, null=True, blank=True ) # In Finnish: Päättäjä plan_acceptor = models.ForeignKey( DecisionMaker, verbose_name=_("Plan acceptor"), related_name="land_use_agreements", null=True, blank=True, on_delete=models.PROTECT, ) # In Finnish: Asemakaavan lainvoimaisuuspvm plan_lawfulness_date = models.DateField( verbose_name=_("Plan lawfulness date"), null=True, blank=True ) # In Finnish: Asemakaavan käsittelyvaihe state = EnumField( InfillDevelopmentCompensationState, verbose_name=_("State"), null=True, blank=True, max_length=30, ) class Meta: verbose_name = pgettext_lazy("Model name", "Land use agreement") verbose_name_plural = pgettext_lazy("Model name", "Land use agreements") def __str__(self): return "Land use agreement #{}".format(self.id) @transaction.atomic def create_identifier(self): if self.identifier_id: return if not self.type or not self.municipality or not self.district: return # lock LandUseAgreementIdentifier table to prevent a (theoretically) possible # race condition when increasing the sequence with connection.cursor() as cursor: cursor.execute("LOCK TABLE %s" % self._meta.db_table) max_sequence = LandUseAgreementIdentifier.objects.filter( type=self.type, municipality=self.municipality, district=self.district ).aggregate(Max("sequence"))["sequence__max"] if not max_sequence: max_sequence = 0 identifier = LandUseAgreementIdentifier.objects.create( type=self.type, municipality=self.municipality, district=self.district, sequence=max_sequence + 1, ) self.identifier = identifier def save(self, *args, **kwargs): self.create_identifier() super().save(*args, **kwargs) class LandUseAgreementEstate(NameModel): """ In Finnish: Kohde """ land_use_agreement = models.ForeignKey( LandUseAgreement, verbose_name=_("Land use agreement"), related_name="estate_ids", null=True, blank=True, on_delete=models.CASCADE, ) estate_id = models.CharField(verbose_name=_("Estate id"), max_length=50) class LandUseAgreementDecisionType(NameModel): """ In Finnish: Maankäyttösopimuspäätöksen tyyppi """ class Meta(NameModel.Meta): verbose_name = pgettext_lazy("Model name", "Land use agreement decision type") verbose_name_plural = pgettext_lazy( "Model name", "Land use agreement decision types" ) class LandUseAgreementDecision(TimeStampedSafeDeleteModel): """ In Finnish: Maankäyttösopimuspäätös """ land_use_agreement = models.ForeignKey( LandUseAgreement, verbose_name=_("Land use agreement"), related_name="decisions", on_delete=models.PROTECT, ) # In Finnish: Diaarinumero reference_number = models.CharField( verbose_name=_("Reference number"), null=True, blank=True, max_length=255 ) # In Finnish: Päättäjä decision_maker = models.ForeignKey( DecisionMaker, verbose_name=_("Decision maker"), related_name="+", null=True, blank=True, on_delete=models.PROTECT, ) # In Finnish: Päätöspäivämäärä decision_date = models.DateField( verbose_name=_("Decision date"), null=True, blank=True ) # In Finnish: Pykälä section = models.CharField( verbose_name=_("Section"), null=True, blank=True, max_length=255 ) # In Finnish: Maankäyttösopimuspäätöksen tyyppi type = models.ForeignKey( LandUseAgreementDecisionType, verbose_name=_("Type"), related_name="+", null=True, blank=True, on_delete=models.PROTECT, ) # In Finnish: Selite description = models.TextField(verbose_name=_("Description"), null=True, blank=True) class Meta: verbose_name = pgettext_lazy("Model name", "Land use agreement decision") verbose_name_plural = pgettext_lazy( "Model name", "Land use agreement decisions" ) class LandUseAgreementDecisionConditionType(NameModel): """ In Finnish: Maankäyttösopimuspäätöksen ehtotyyppi """ class Meta(NameModel.Meta): verbose_name = pgettext_lazy( "Model name", "Land use agreement decision condition type" ) verbose_name_plural = pgettext_lazy( "Model name", "Land use agreement decision condition types" ) class LandUseAgreementDecisionCondition(TimeStampedSafeDeleteModel): """ In Finnish: Maankäyttösopimuspäätöksen ehto """ # In Finnish: Päätös decision = models.ForeignKey( LandUseAgreementDecision, verbose_name=_("Decision"), related_name="conditions", on_delete=models.PROTECT, ) # In Finnish: Ehtotyyppi type = models.ForeignKey( LandUseAgreementDecisionConditionType, verbose_name=_("Type"), related_name="+", null=True, blank=True, on_delete=models.PROTECT, ) # In Finnish: Valvontapäivämäärä supervision_date = models.DateField( verbose_name=_("Supervision date"), null=True, blank=True ) # In Finnish: Valvottu päivämäärä supervised_date = models.DateField( verbose_name=_("Supervised date"), null=True, blank=True ) # In Finnish: Selite description = models.TextField(verbose_name=_("Description"), null=True, blank=True) recursive_get_related_skip_relations = ["decision"] class Meta: verbose_name = pgettext_lazy( "Model name", "Land use agreement decision condition" ) verbose_name_plural = pgettext_lazy( "Model name", "Land use agreement decision conditions" ) class LandUseAgreementAddress(AbstractAddress): land_use_agreement = models.ForeignKey( LandUseAgreement, verbose_name=_("Land use agreement"), related_name="addresses", on_delete=models.CASCADE, ) # In Finnish: Ensisijainen osoite is_primary = models.BooleanField(verbose_name=_("Is primary?"), default=False) class Meta: verbose_name = pgettext_lazy("Model name", "Land use agreement address") verbose_name_plural = pgettext_lazy( "Model name", "Land use agreement addresses" ) class LandUseAgreementConditionFormOfManagement(NameModel): """ In Finnish: Maankäyttösopimuksen ehdon hallintamuoto """ class LandUseAgreementCondition(TimeStampedSafeDeleteModel): """ In Finnish: Maankäyttösopimuksen ehto """ land_use_agreement = models.ForeignKey( LandUseAgreement, verbose_name=_("Land use agreement"), related_name="conditions", on_delete=models.PROTECT, ) # In Finnish: Maankäyttösopimuksen ehdon tyyppi form_of_management = models.ForeignKey( LandUseAgreementConditionFormOfManagement, verbose_name=_("Form of management"), related_name="+", on_delete=models.PROTECT, ) # In Finnish: Velvoite k-m2 obligated_area = models.PositiveIntegerField( verbose_name=_("Obligated area (f-m2)") ) # In Finnish: Toteutunut k-m2 actualized_area = models.PositiveIntegerField( verbose_name=_("Actualized area (f-m2)") ) # In Finnish: Subventio subvention_amount = models.PositiveIntegerField(verbose_name=_("Subvention amount")) # In Finnish: Korvaus % compensation_pc = models.PositiveSmallIntegerField( verbose_name=_("Compensation percent") ) # In Finnish: Valvottava pvm supervision_date = models.DateField(verbose_name=_("Supervision date")) # In Finnish: Valvottu pvm supervised_date = models.DateField(verbose_name=_("Supervised date")) class LandUseAgreementLitigant(TimeStampedSafeDeleteModel): """ In Finnish: Osapuoli """ land_use_agreement = models.ForeignKey( LandUseAgreement, verbose_name=_("Land use agreement"), related_name="litigants", on_delete=models.CASCADE, ) # In Finnish: Viite reference = models.CharField( verbose_name=_("Section"), null=True, blank=True, max_length=255 ) contacts = models.ManyToManyField( Contact, through="leasing.LandUseAgreementLitigantContact", related_name="litigants", ) recursive_get_related_skip_relations = ["land_use_agreement", "contacts"] class Meta: verbose_name = pgettext_lazy("Model name", "Land use agreement litigant") verbose_name_plural = pgettext_lazy( "Model name", "Land use agreement litigants" ) class LandUseAgreementLitigantContact(TimeStampedSafeDeleteModel): land_use_agreement_litigant = models.ForeignKey( LandUseAgreementLitigant, verbose_name=_("Land use agreement litigant"), on_delete=models.PROTECT, ) # In Finnish: Asiakas contact = models.ForeignKey( Contact, verbose_name=_("Contact"), on_delete=models.PROTECT, ) # In Finnish: Kontaktin tyyppi type = EnumField( LandUseAgreementLitigantContactType, verbose_name=_("Contact type"), max_length=30, ) # In Finnish: Alkupäivämäärä start_date = models.DateField(verbose_name=_("Start date"), null=True, blank=True) # In Finnish: Loppupäivämäärä end_date = models.DateField(verbose_name=_("End date"), null=True, blank=True) recursive_get_related_skip_relations = ["land_use_agreement_litigant"] class Meta: verbose_name = pgettext_lazy("Model name", "Land use agreement litigant") verbose_name_plural = pgettext_lazy( "Model name", "Land use agreement litigants" ) def __str__(self): return "LandUseAgreementLitigantContact id: {} contact: {} period: {} - {}".format( self.id, self.contact, self.start_date, self.end_date ) class LandUseAgreementInvoice(TimeStampedSafeDeleteModel): """ In Finnish: Lasku """ land_use_agreement = models.ForeignKey( LandUseAgreement, verbose_name=_("Land use agreement"), related_name="invoices", on_delete=models.PROTECT, ) # In Finnish: Laskun numero number = models.PositiveIntegerField( verbose_name=_("Number"), unique=True, null=True, blank=True ) # In Finnish: Laskunsaaja recipient = models.ForeignKey( Contact, verbose_name=_("Recipient"), related_name="+", on_delete=models.PROTECT ) # In Finnish: Korvauksen määrä € compensation_amount = models.DecimalField( verbose_name=_("Compensation
<reponame>yanyongyu/FlappyBird<filename>src/main.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ This is the main program of the game. @Author: yanyongyu """ __author__ = "yanyongyu" __all__ = ["Game"] import sys import time import random import traceback import pygame import pygame.locals as gloc import bird import land import pipe import score import share import setting from double_dqn import DoubleDQN from utils import pixelCollision class Game(): def __init__(self): pygame.init() self.bg_size = self.width, self.height = 288, 512 self.screen = pygame.display.set_mode(self.bg_size) pygame.display.set_caption("Flappy Bird") icon = pygame.image.load("assets/images/flappy.ico") pygame.display.set_icon(icon) pygame.mixer.init() pygame.mixer.set_num_channels(4) self.clock = pygame.time.Clock() self.init_sound() self.init_pics() self.init_vars() # 加载声音 def init_sound(self): self.sound = {} self.sound_default = {} # 背景音乐 self.bgm = pygame.mixer.music.load("assets/sound/bgm.ogg") pygame.mixer.music.set_volume(0.4) pygame.mixer.music.play(-1) # 死亡声音 self.sound['die_sound'] = pygame.mixer.Sound("assets/sound/die.ogg") self.sound_default['die_sound'] = 0.4 # 撞击声音 self.sound['hit_sound'] = pygame.mixer.Sound("assets/sound/hit.ogg") self.sound_default['hit_sound'] = 0.4 # 得分声音 self.sound['point_sound'] = pygame.mixer.Sound("assets/sound/point.ogg") self.sound_default['point_sound'] = 0.4 # 拍翅膀声音 self.sound['wing_sound'] = pygame.mixer.Sound("assets/sound/wing.ogg") self.sound_default['wing_sound'] = 0.8 # 加载图片 def init_pics(self): # 加载背景与地面 self.bg_black = pygame.image.load("assets/images/bg_black.png")\ .convert_alpha() self.background_list = [ pygame.image.load("assets/images/bg_day.png").convert(), pygame.image.load("assets/images/bg_night.png").convert() ] self.land = land.Land(self.bg_size) # 游戏开始画面 # 游戏标题 self.title = pygame.image.load("assets/images/start/title.png")\ .convert_alpha() self.title_rect = self.title.get_rect() self.title_rect.left = (self.width - self.title_rect.width) // 2 self.title_rect.top = 80 # 开始按钮 self.start_image = pygame.image.load("assets/images/start/start.png")\ .convert_alpha() self.start_image_rect = self.start_image.get_rect() self.start_image_rect.left = (self.width - self.start_image_rect.width) // 2 self.start_image_rect.top = 240 # 排行榜按钮 self.score_image = pygame.image.load("assets/images/start/score.png")\ .convert_alpha() self.score_image_rect = self.score_image.get_rect() self.score_image_rect.left = (self.width - self.score_image_rect.width) // 2 self.score_image_rect.top = 310 # 设置按钮 self.setting_image = pygame.image.load("assets/images/start/setting.png")\ .convert_alpha() self.setting_image_rect = self.setting_image.get_rect() self.setting_image_rect.left = (self.width - self.setting_image_rect.width - 10) self.setting_image_rect.top = 10 # 排行画面 # 奖杯 self.cups = [ pygame.image.load( "assets/images/rank/gold_cup.png").convert_alpha(), pygame.image.load( "assets/images/rank/silver_cup.png").convert_alpha(), pygame.image.load( "assets/images/rank/brooze_cup.png").convert_alpha() ] self.cup_rects = [(50, 120), (50, 200), (50, 280)] # 字体 self.rank_font = pygame.font.Font("assets/font/hanyihaiyun.ttf", 24) # 设置画面 # 设置面板 self.board_image = pygame.image.load("assets/images/board.png")\ .convert_alpha() self.board_rect = self.board_image.get_rect() self.board_rect.top = 20 self.board_rect.left = (self.width - self.board_rect.width) // 2 # 左右箭头 self.array_right = pygame.image.load("assets/images/start/array.png")\ .convert_alpha() self.array_left = pygame.transform.rotate(self.array_right, 180) # 设置字体 self.setting_font = pygame.font.Font("assets/font/hanyihaiyun.ttf", 16) # 小鸟设置 self.random_text = self.setting_font.render("随机", True, (0, 0, 0)) self.custom_text = self.setting_font.render("自定义", True, (0, 0, 0)) # 随机小鸟设置 self.random_bird = [ pygame.image.load( "assets/images/birds/random_0.png").convert_alpha(), pygame.image.load( "assets/images/birds/random_1.png").convert_alpha(), pygame.image.load( "assets/images/birds/random_2.png").convert_alpha() ] # 自定义小鸟设置 self.body_text = self.setting_font.render("身体", True, (0, 0, 0)) self.mouth_text = self.setting_font.render("嘴", True, (0, 0, 0)) self.R_text = self.setting_font.render("R", True, (0, 0, 0)) self.G_text = self.setting_font.render("G", True, (0, 0, 0)) self.B_text = self.setting_font.render("B", True, (0, 0, 0)) self.customize_bird = setting.Customize_bird() # 背景设置 self.bg_text = self.setting_font.render("背景:", True, (0, 0, 0)) self.bg_text_list = [ self.setting_font.render("白天", True, (0, 0, 0)), self.setting_font.render("夜晚", True, (0, 0, 0)), self.random_text ] # 音量设置 self.volume_text = self.setting_font.render("音量:", True, (0, 0, 0)) self.sound_text = self.setting_font.render("音效:", True, (0, 0, 0)) # 游戏画面 # 准备图片 self.ready_image = pygame.image.load("assets/images/game/ready.png")\ .convert_alpha() self.ready_rect = self.ready_image.get_rect() self.ready_rect.left = (self.width - self.ready_rect.width) // 2 self.ready_rect.top = self.height * 0.12 # 点击开始图片 self.press_start_image = pygame.image.load("assets/images/game/tutorial.png")\ .convert_alpha() self.press_start_rect = self.press_start_image.get_rect() self.press_start_rect.left = (self.width - self.press_start_rect.width) // 2 self.press_start_rect.top = self.height * 0.5 # 暂停按钮 self.pause_image = pygame.image.load("assets/images/game/pause.png")\ .convert_alpha() self.pause_image_rect = self.pause_image.get_rect() self.pause_image_rect.left = (self.width - self.pause_image_rect.width - 10) self.pause_image_rect.top = 10 # 继续按钮 self.resume_image = pygame.image.load("assets/images/game/resume.png")\ .convert_alpha() self.resume_image_rect = self.resume_image.get_rect() self.resume_image_rect.left = (self.width - self.resume_image_rect.width - 10) self.resume_image_rect.top = 10 # 分享画面 # 复制到剪贴板 self.copy_image = pygame.image.load("assets/images/share/copy.png")\ .convert_alpha() self.copy_rect = self.copy_image.get_rect() self.copy_rect.left = (self.width - self.copy_rect.width) // 2 self.copy_rect.top = 110 # 保存至本地 self.save_image = pygame.image.load("assets/images/share/save.png")\ .convert_alpha() self.save_rect = self.save_image.get_rect() self.save_rect.left = (self.width - self.save_rect.width) // 2 self.save_rect.top = 200 # 使用邮件分享 self.email_image = pygame.image.load("assets/images/share/email.png")\ .convert_alpha() self.email_rect = self.email_image.get_rect() self.email_rect.left = (self.width - self.email_rect.width) // 2 self.email_rect.top = 290 # 返回 self.back_image = pygame.image.load("assets/images/share/back.png")\ .convert_alpha() self.back_rect = self.back_image.get_rect() self.back_rect.left = (self.width - self.back_rect.width) // 2 self.back_rect.top = 380 # 游戏结束画面 # 游戏结束图片 self.gameover_image = pygame.image.load("assets/images/end/gameover.png")\ .convert_alpha() self.gameover_image_rect = self.gameover_image.get_rect() self.gameover_image_rect.left = (self.width - self.gameover_image_rect.width) // 2 self.gameover_image_rect.top = self.height * 0.12 # 得分面版 self.score_panel = pygame.image.load("assets/images/end/score_panel.png")\ .convert_alpha() self.score_panel_rect = self.score_panel.get_rect() self.score_panel_rect.left = (self.width - self.score_panel_rect.width) // 2 self.score_panel_rect.top = self.height * 0.24 # 奖牌图片 self.white_medal = pygame.image.load("assets/images/end/medal0.png")\ .convert_alpha() self.gold_medal = pygame.image.load("assets/images/end/medal1.png")\ .convert_alpha() self.silver_medal = pygame.image.load("assets/images/end/medal2.png")\ .convert_alpha() self.brooze_medal = pygame.image.load("assets/images/end/medal3.png")\ .convert_alpha() self.medal_rect = (57, 165) # 新纪录图片 self.new_image = pygame.image.load("assets/images/end/new.png")\ .convert_alpha() self.new_rect = self.new_image.get_rect() self.new_rect.left, self.new_rect.top = 150, 139 # 再来一次图片 self.retry_image = pygame.image.load("assets/images/end/retry.png")\ .convert_alpha() self.retry_rect = self.retry_image.get_rect() self.retry_rect.left = (self.width - self.retry_rect.width) // 2 self.retry_rect.top = self.height * 0.5 # 分享按钮 self.share_image = pygame.image.load("assets/images/end/share.png")\ .convert_alpha() self.share_rect = self.share_image.get_rect() self.share_rect.left = (self.width - self.share_rect.width) // 2 self.share_rect.top = self.retry_rect.top + 30 # 主菜单按钮 self.menu_image = pygame.image.load("assets/images/end/menu.png")\ .convert_alpha() self.menu_rect = self.menu_image.get_rect() self.menu_rect.left = (self.width - self.menu_rect.width) // 2 self.menu_rect.top = self.retry_rect.top + 60 # 初始化游戏数据 def init_vars(self, ai: bool = False): # 读取设置 (self.bird_color, self.background_index, self.volume, self.sound_volume) = setting.read_config() # 设置音量 pygame.mixer.music.set_volume(self.volume * 0.4 / 100) for i in self.sound.keys(): self.sound[i].set_volume(self.sound_volume * self.sound_default[i] / 100) # 游戏分数 self.score = 0 # 背景 if self.background_index == 2: pipe.PIPE_INDEX = random.choice([0, 1]) elif self.background_index in [0, 1]: pipe.PIPE_INDEX = self.background_index self.background = self.background_list[pipe.PIPE_INDEX] # 是否开挂 self.ai = ai # 游戏开始画面 self.start = True # 排行榜画面 self.ranking = False self.value = None # 设置画面 self.setting = False self.mouse_down = False self.R1_set = setting.Setting_line(self.screen, rect=(64, 199), lenth=40, point=0.5, color=(255, 0, 0), height=3) self.G1_set = setting.Setting_line(self.screen, rect=(125, 199), lenth=40, point=0.5, color=(0, 255, 0), height=3) self.B1_set = setting.Setting_line(self.screen, rect=(189, 199), lenth=40, point=0.5, color=(0, 0, 255), height=3) self.R2_set = setting.Setting_line(self.screen, rect=(64, 249), lenth=40, point=0.5, color=(255, 0, 0), height=3) self.G2_set = setting.Setting_line(self.screen, rect=(125, 249), lenth=40, point=0.5, color=(0, 255, 0), height=3) self.B2_set = setting.Setting_line(self.screen, rect=(189, 249), lenth=40, point=0.5, color=(0, 0, 255), height=3) self.volume_set = setting.Setting_line(self.screen, rect=(105, 358), lenth=110, point=self.volume / 100, color=(230, 100, 0)) self.sound_set = setting.Setting_line(self.screen, rect=(105, 408), lenth=110, point=self.sound_volume / 100, color=(230, 100, 0)) # 游戏画面 self.bird = bird.Bird(self.bg_size, self.land.rect.top, self.bird_color, ai=ai) self.delay = 0 self.paused = False self.pressed = False self.upperpipes = [] self.lowerpipes = [] self.pipe_group = pygame.sprite.Group() if not ai: upipe, dpipe = pipe.get_pipe(self.bg_size, self.land.rect.top, self.width + 200) else: upipe, dpipe = pipe.get_pipe(self.bg_size, self.land.rect.top, self.width) self.upperpipes.append(upipe) self.lowerpipes.append(dpipe) self.pipe_group.add(upipe, dpipe) if not ai: upipe, dpipe = pipe.get_pipe(self.bg_size, self.land.rect.top, 1.5 * self.width + 200) else: upipe, dpipe = pipe.get_pipe(self.bg_size, self.land.rect.top, 1.5 * self.width) self.upperpipes.append(upipe) self.lowerpipes.append(dpipe) self.pipe_group.add(upipe, dpipe) # 游戏结束画面 self.recorded = False # 分享画面 self.share = False # 检测碰撞 def checkCrash(self): # if player crashes into ground if self.bird.rect.top + self.bird.rect.height\ >= self.land.rect.top + 1: return True playerRect = self.bird.rect for uPipe, lPipe in zip(self.upperpipes, self.lowerpipes): # upper and lower pipe rects uPipeRect = uPipe.rect lPipeRect = lPipe.rect # player and upper/lower pipe hitmasks pHitMask = self.bird.mask uHitmask = uPipe.mask lHitmask = lPipe.mask # if bird collided with upipe or lpipe uCollide = pixelCollision(playerRect, uPipeRect, pHitMask, uHitmask) lCollide = pixelCollision(playerRect, lPipeRect, pHitMask, lHitmask) if uCollide or lCollide: return True return False # 开始游戏 def play(self): while True: for event in pygame.event.get(): # 退出事件 if event.type == gloc.QUIT: pygame.quit() sys.exit() # 键盘事件 elif event.type == gloc.KEYDOWN: # 空格/上键 if event.key == gloc.K_SPACE or event.key == gloc.K_UP: # 游戏界面,小鸟存活,未暂停 # ----> 游戏开始/小鸟拍翅膀 if (not self.start and not self.ranking and not self.setting and not self.paused and self.bird.alive): self.pressed = True # 限制小鸟高度 if self.bird.rect.top > -2 * self.bird.rect.height: self.bird.fly() self.sound['wing_sound'].play() # P键/Esc键 elif event.key == gloc.K_p or event.key == gloc.K_ESCAPE: # 游戏界面,小鸟存活,未暂停 # ----> 游戏暂停/开始 if (not self.start and not self.ranking and not self.setting and self.pressed and self.bird.alive): self.paused = not self.paused # G键 elif event.key == gloc.K_g: if self.start and not hasattr(self, "ai_model"): self.init_vars(ai=True) self.ai_model = DoubleDQN() # 鼠标移动事件 elif event.type == gloc.MOUSEMOTION: # 设置界面 if self.setting and self.mouse_down: pos = pygame.mouse.get_pos() # RGB设置 # 身体 if pygame.Rect(64, 195, 40, 11).collidepoint(pos): self.body_rgb[0] = (pos[0] - 64) * 255 / 40 self.R1_set.set_point(self.body_rgb[0] / 255) self.customize_bird.seperate( self.body_rgb, self.mouth_rgb) self.bird = bird.Bird(self.bg_size, self.land.rect.top, self.bird_color) elif pygame.Rect(125, 195, 40, 11).collidepoint(pos): self.body_rgb[1] = (pos[0] - 125) * 255 / 40 self.G1_set.set_point(self.body_rgb[1] / 255) self.customize_bird.seperate( self.body_rgb, self.mouth_rgb) self.bird = bird.Bird(self.bg_size, self.land.rect.top, self.bird_color) elif pygame.Rect(189, 195, 40, 11).collidepoint(pos): self.body_rgb[2] = (pos[0] - 189) * 255 / 40 self.B1_set.set_point(self.body_rgb[2] / 255) self.customize_bird.seperate( self.body_rgb, self.mouth_rgb) self.bird = bird.Bird(self.bg_size, self.land.rect.top, self.bird_color) # 嘴 elif pygame.Rect(64, 245, 40, 11).collidepoint(pos): self.mouth_rgb[0] = (pos[0] - 64) * 255 / 40 self.R2_set.set_point(self.mouth_rgb[0] / 255) self.customize_bird.seperate( self.body_rgb, self.mouth_rgb) self.bird = bird.Bird(self.bg_size, self.land.rect.top, self.bird_color) elif pygame.Rect(125, 245, 40, 11).collidepoint(pos): self.mouth_rgb[1] = (pos[0] - 125) * 255 / 40 self.G2_set.set_point(self.mouth_rgb[1]
header[kw].strip("-SIP") header[kw] = val else: continue return header def to_header_string(self, relax=None): """ Identical to `to_header`, but returns a string containing the header cards. """ return str(self.to_header(relax)) def footprint_to_file(self, filename='footprint.reg', color='green', width=2, coordsys=None): """ Writes out a `ds9`_ style regions file. It can be loaded directly by `ds9`_. Parameters ---------- filename : str, optional Output file name - default is ``'footprint.reg'`` color : str, optional Color to use when plotting the line. width : int, optional Width of the region line. coordsys : str, optional Coordinate system. If not specified (default), the ``radesys`` value is used. For all possible values, see http://ds9.si.edu/doc/ref/region.html#RegionFileFormat """ comments = ('# Region file format: DS9 version 4.0 \n' '# global color=green font="helvetica 12 bold ' 'select=1 highlite=1 edit=1 move=1 delete=1 ' 'include=1 fixed=0 source\n') coordsys = coordsys or self.wcs.radesys if coordsys not in ('PHYSICAL', 'IMAGE', 'FK4', 'B1950', 'FK5', 'J2000', 'GALACTIC', 'ECLIPTIC', 'ICRS', 'LINEAR', 'AMPLIFIER', 'DETECTOR'): raise ValueError("Coordinate system '{}' is not supported. A valid" " one can be given with the 'coordsys' argument." .format(coordsys)) with open(filename, mode='w') as f: f.write(comments) f.write('{}\n'.format(coordsys)) f.write('polygon(') self.calc_footprint().tofile(f, sep=',') f.write(') # color={0}, width={1:d} \n'.format(color, width)) @property def _naxis1(self): return self._naxis[0] @_naxis1.setter def _naxis1(self, value): self._naxis[0] = value @property def _naxis2(self): return self._naxis[1] @_naxis2.setter def _naxis2(self, value): self._naxis[1] = value def _get_naxis(self, header=None): _naxis = [] if (header is not None and not isinstance(header, (str, bytes))): for naxis in itertools.count(1): try: _naxis.append(header['NAXIS{}'.format(naxis)]) except KeyError: break if len(_naxis) == 0: _naxis = [0, 0] elif len(_naxis) == 1: _naxis.append(0) self._naxis = _naxis def printwcs(self): print(repr(self)) def __repr__(self): ''' Return a short description. Simply porting the behavior from the `printwcs()` method. ''' description = ["WCS Keywords\n", "Number of WCS axes: {0!r}".format(self.naxis)] sfmt = ' : ' + "".join(["{"+"{0}".format(i)+"!r} " for i in range(self.naxis)]) keywords = ['CTYPE', 'CRVAL', 'CRPIX'] values = [self.wcs.ctype, self.wcs.crval, self.wcs.crpix] for keyword, value in zip(keywords, values): description.append(keyword+sfmt.format(*value)) if hasattr(self.wcs, 'pc'): for i in range(self.naxis): s = '' for j in range(self.naxis): s += ''.join(['PC', str(i+1), '_', str(j+1), ' ']) s += sfmt description.append(s.format(*self.wcs.pc[i])) s = 'CDELT' + sfmt description.append(s.format(*self.wcs.cdelt)) elif hasattr(self.wcs, 'cd'): for i in range(self.naxis): s = '' for j in range(self.naxis): s += "".join(['CD', str(i+1), '_', str(j+1), ' ']) s += sfmt description.append(s.format(*self.wcs.cd[i])) description.append('NAXIS : {}'.format(' '.join(map(str, self._naxis)))) return '\n'.join(description) def get_axis_types(self): """ Similar to `self.wcsprm.axis_types <astropy.wcs.Wcsprm.axis_types>` but provides the information in a more Python-friendly format. Returns ------- result : list of dicts Returns a list of dictionaries, one for each axis, each containing attributes about the type of that axis. Each dictionary has the following keys: - 'coordinate_type': - None: Non-specific coordinate type. - 'stokes': Stokes coordinate. - 'celestial': Celestial coordinate (including ``CUBEFACE``). - 'spectral': Spectral coordinate. - 'scale': - 'linear': Linear axis. - 'quantized': Quantized axis (``STOKES``, ``CUBEFACE``). - 'non-linear celestial': Non-linear celestial axis. - 'non-linear spectral': Non-linear spectral axis. - 'logarithmic': Logarithmic axis. - 'tabular': Tabular axis. - 'group' - Group number, e.g. lookup table number - 'number' - For celestial axes: - 0: Longitude coordinate. - 1: Latitude coordinate. - 2: ``CUBEFACE`` number. - For lookup tables: - the axis number in a multidimensional table. ``CTYPEia`` in ``"4-3"`` form with unrecognized algorithm code will generate an error. """ if self.wcs is None: raise AttributeError( "This WCS object does not have a wcsprm object.") coordinate_type_map = { 0: None, 1: 'stokes', 2: 'celestial', 3: 'spectral'} scale_map = { 0: 'linear', 1: 'quantized', 2: 'non-linear celestial', 3: 'non-linear spectral', 4: 'logarithmic', 5: 'tabular'} result = [] for axis_type in self.wcs.axis_types: subresult = {} coordinate_type = (axis_type // 1000) % 10 subresult['coordinate_type'] = coordinate_type_map[coordinate_type] scale = (axis_type // 100) % 10 subresult['scale'] = scale_map[scale] group = (axis_type // 10) % 10 subresult['group'] = group number = axis_type % 10 subresult['number'] = number result.append(subresult) return result def __reduce__(self): """ Support pickling of WCS objects. This is done by serializing to an in-memory FITS file and dumping that as a string. """ hdulist = self.to_fits(relax=True) buffer = io.BytesIO() hdulist.writeto(buffer) return (__WCS_unpickle__, (self.__class__, self.__dict__, buffer.getvalue(),)) def dropaxis(self, dropax): """ Remove an axis from the WCS. Parameters ---------- wcs : `~astropy.wcs.WCS` The WCS with naxis to be chopped to naxis-1 dropax : int The index of the WCS to drop, counting from 0 (i.e., python convention, not FITS convention) Returns ------- A new `~astropy.wcs.WCS` instance with one axis fewer """ inds = list(range(self.wcs.naxis)) inds.pop(dropax) # axis 0 has special meaning to sub # if wcs.wcs.ctype == ['RA','DEC','VLSR'], you want # wcs.sub([1,2]) to get 'RA','DEC' back return self.sub([i+1 for i in inds]) def swapaxes(self, ax0, ax1): """ Swap axes in a WCS. Parameters ---------- wcs : `~astropy.wcs.WCS` The WCS to have its axes swapped ax0 : int ax1 : int The indices of the WCS to be swapped, counting from 0 (i.e., python convention, not FITS convention) Returns ------- A new `~astropy.wcs.WCS` instance with the same number of axes, but two swapped """ inds = list(range(self.wcs.naxis)) inds[ax0], inds[ax1] = inds[ax1], inds[ax0] return self.sub([i+1 for i in inds]) def reorient_celestial_first(self): """ Reorient the WCS such that the celestial axes are first, followed by the spectral axis, followed by any others. Assumes at least celestial axes are present. """ return self.sub([WCSSUB_CELESTIAL, WCSSUB_SPECTRAL, WCSSUB_STOKES]) def slice(self, view, numpy_order=True): """ Slice a WCS instance using a Numpy slice. The order of the slice should be reversed (as for the data) compared to the natural WCS order. Parameters ---------- view : tuple A tuple containing the same number of slices as the WCS system. The ``step`` method, the third argument to a slice, is not presently supported. numpy_order : bool Use numpy order, i.e. slice the WCS so that an identical slice applied to a numpy array will slice the array and WCS in the same way. If set to `False`, the WCS will be sliced in FITS order, meaning the first slice will be applied to the *last* numpy index but the *first* WCS axis. Returns ------- wcs_new : `~astropy.wcs.WCS` A new resampled WCS axis """ if hasattr(view, '__len__') and len(view) > self.wcs.naxis: raise ValueError("Must have # of slices <= # of WCS axes") elif not hasattr(view, '__len__'): # view MUST be an iterable view = [view] if not all(isinstance(x, slice) for x in view): raise ValueError("Cannot downsample a WCS with indexing. Use " "wcs.sub or wcs.dropaxis if you want to remove " "axes.") wcs_new = self.deepcopy() if wcs_new.sip is not None: sip_crpix = wcs_new.sip.crpix.tolist() for i, iview in enumerate(view): if iview.step is not None and iview.step < 0: raise NotImplementedError("Reversing an axis is not " "implemented.") if numpy_order: wcs_index = self.wcs.naxis - 1 - i else: wcs_index = i if iview.step is not None and iview.start is None: # Slice from "None" is equivalent to slice from 0 (but one # might want to downsample, so allow slices with # None,None,step or None,stop,step) iview = slice(0, iview.stop, iview.step) if iview.start is not None: if iview.step not in (None, 1): crpix = self.wcs.crpix[wcs_index] cdelt = self.wcs.cdelt[wcs_index] # equivalently (keep this comment so you can compare eqns): # wcs_new.wcs.crpix[wcs_index] = # (crpix - iview.start)*iview.step + 0.5 - iview.step/2. crp = ((crpix - iview.start - 1.)/iview.step + 0.5 + 1./iview.step/2.) wcs_new.wcs.crpix[wcs_index] = crp if wcs_new.sip is not None: sip_crpix[wcs_index] = crp wcs_new.wcs.cdelt[wcs_index] = cdelt * iview.step else: wcs_new.wcs.crpix[wcs_index] -= iview.start if wcs_new.sip is not None: sip_crpix[wcs_index] -= iview.start try: # range requires integers but the other attributes can also # handle arbitary values, so this needs to be in a try/except. nitems = len(builtins.range(self._naxis[wcs_index])[iview]) except TypeError as exc: if 'indices must be integers' not in str(exc): raise warnings.warn("NAXIS{0} attribute is not updated because at " "least one indix ('{1}') is no integer." "".format(wcs_index, iview), AstropyUserWarning) else: wcs_new._naxis[wcs_index] = nitems if wcs_new.sip is not None: wcs_new.sip = Sip(self.sip.a, self.sip.b, self.sip.ap, self.sip.bp, sip_crpix) return wcs_new def __getitem__(self, item): # "getitem" is
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: MIT import functools import torch import torch.nn as nn import torch.nn.functional as F from . import BigGAN_layers as layers from networks.utils import init_weights, _len2mask, make_one_hot # Architectures for G # Attention is passed in in the format '32_64' to mean applying an attention # block at both resolution 32x32 and 64x64. Just '64' will apply at 64x64. def G_arch(ch=64, attention='64', ksize='333333', dilation='111111'): arch = {} arch[32] = {'in_channels': [ch * item for item in [4, 2, 1]], 'out_channels': [ch * item for item in [2, 1, 1]], 'upsample': [(2,1), (2,2), (2,2)], 'resolution': [8, 16, 16], 'attention': {2 ** i: (2 ** i in [int(item) for item in attention.split('_')]) for i in range(3, 6)}, } return arch class Generator(nn.Module): def __init__(self, G_ch=64, style_dim=128, bottom_width=4, bottom_height=4, resolution=128, G_kernel_size=3, G_attn='64', n_class=1000, num_G_SVs=1, num_G_SV_itrs=1, G_shared=True, shared_dim=0, no_hier=False, cross_replica=False, mybn=False, G_activation=nn.ReLU(inplace=False), BN_eps=1e-5, SN_eps=1e-12, G_fp16=False, init='ortho', G_param='SN', norm_style='bn', bn_linear='embed', input_nc=3, one_hot=False, first_layer=False, one_hot_k=1): super(Generator, self).__init__() dim_z = style_dim self.name = 'G' # Use class only in first layer self.first_layer = first_layer # Use one hot vector representation for input class self.one_hot = one_hot # Use one hot k vector representation for input class if k is larger than 0. If it's 0, simly use the class number and not a k-hot encoding. self.one_hot_k = one_hot_k # Channel width mulitplier self.ch = G_ch # Dimensionality of the latent space self.dim_z = dim_z # The initial width dimensions self.bottom_width = bottom_width # The initial height dimension self.bottom_height = bottom_height # Resolution of the output self.resolution = resolution # Kernel size? self.kernel_size = G_kernel_size # Attention? self.attention = G_attn # number of classes, for use in categorical conditional generation self.n_classes = n_class # Use shared embeddings? self.G_shared = G_shared # Dimensionality of the shared embedding? Unused if not using G_shared self.shared_dim = shared_dim if shared_dim > 0 else dim_z # Hierarchical latent space? self.hier = not no_hier # Cross replica batchnorm? self.cross_replica = cross_replica # Use my batchnorm? self.mybn = mybn # nonlinearity for residual blocks self.activation = G_activation # Initialization style self.init = init # Parameterization style self.G_param = G_param # Normalization style self.norm_style = norm_style # Epsilon for BatchNorm? self.BN_eps = BN_eps # Epsilon for Spectral Norm? self.SN_eps = SN_eps # fp16? self.fp16 = G_fp16 # Architecture dict self.arch = G_arch(self.ch, self.attention)[resolution] self.bn_linear = bn_linear # If using hierarchical latents, adjust z if self.hier: # Number of places z slots into self.num_slots = len(self.arch['in_channels']) + 1 self.z_chunk_size = (self.dim_z // self.num_slots) # Recalculate latent dimensionality for even splitting into chunks self.dim_z = self.z_chunk_size * self.num_slots else: self.num_slots = 1 self.z_chunk_size = 0 # Which convs, batchnorms, and linear layers to use if self.G_param == 'SN': self.which_conv = functools.partial(layers.SNConv2d, kernel_size=3, padding=1, num_svs=num_G_SVs, num_itrs=num_G_SV_itrs, eps=self.SN_eps) self.which_linear = functools.partial(layers.SNLinear, num_svs=num_G_SVs, num_itrs=num_G_SV_itrs, eps=self.SN_eps) else: self.which_conv = functools.partial(nn.Conv2d, kernel_size=3, padding=1) self.which_linear = nn.Linear # We use a non-spectral-normed embedding here regardless; # For some reason applying SN to G's embedding seems to randomly cripple G if one_hot: self.which_embedding = functools.partial(layers.SNLinear, num_svs=num_G_SVs, num_itrs=num_G_SV_itrs, eps=self.SN_eps) else: self.which_embedding = nn.Embedding bn_linear = (functools.partial(self.which_linear, bias=False) if self.G_shared else self.which_embedding) if self.bn_linear=='SN': bn_linear = functools.partial(self.which_linear, bias=False) if self.G_shared: input_size = self.shared_dim + self.z_chunk_size elif self.hier: if self.first_layer: input_size = self.z_chunk_size else: input_size = self.n_classes + self.z_chunk_size self.which_bn = functools.partial(layers.ccbn, which_linear=bn_linear, cross_replica=self.cross_replica, mybn=self.mybn, input_size=input_size, norm_style=self.norm_style, eps=self.BN_eps) else: input_size = self.n_classes self.which_bn = functools.partial(layers.bn, cross_replica=self.cross_replica, mybn=self.mybn, eps=self.BN_eps) # Prepare model # If not using shared embeddings, self.shared is just a passthrough self.shared = (self.which_embedding(self.n_classes, self.shared_dim) if G_shared else layers.identity()) # First linear layer # The parameters for the first linear layer depend on the different input variations. if self.first_layer: # print('one_hot:{} one_hot_k:{}'.format(self.one_hot, self.one_hot_k) ) if self.one_hot: self.linear = self.which_linear(self.dim_z // self.num_slots + self.n_classes, self.arch['in_channels'][0] * (self.bottom_width * self.bottom_height)) else: self.linear = self.which_linear(self.dim_z // self.num_slots + 1, self.arch['in_channels'][0] * (self.bottom_width * self.bottom_height)) if self.one_hot_k==1: self.linear = self.which_linear((self.dim_z // self.num_slots) * self.n_classes, self.arch['in_channels'][0] * (self.bottom_width * self.bottom_height)) if self.one_hot_k>1: self.linear = self.which_linear(self.dim_z // self.num_slots + self.n_classes*self.one_hot_k, self.arch['in_channels'][0] * (self.bottom_width * self.bottom_height)) if self.one_hot_k == 0: self.linear = self.which_linear(self.n_classes, self.arch['in_channels'][0] * (self.bottom_width * self.bottom_height)) else: self.linear = self.which_linear(self.dim_z // self.num_slots, self.arch['in_channels'][0] * (self.bottom_width * self.bottom_height)) # self.blocks is a doubly-nested list of modules, the outer loop intended # to be over blocks at a given resolution (resblocks and/or self-attention) # while the inner loop is over a given block self.blocks = [] for index in range(len(self.arch['out_channels'])): if 'kernel1' in self.arch.keys(): padd1 = 1 if self.arch['kernel1'][index]>1 else 0 padd2 = 1 if self.arch['kernel2'][index]>1 else 0 conv1 = functools.partial(layers.SNConv2d, kernel_size=self.arch['kernel1'][index], padding=padd1, num_svs=num_G_SVs, num_itrs=num_G_SV_itrs, eps=self.SN_eps) conv2 = functools.partial(layers.SNConv2d, kernel_size=self.arch['kernel2'][index], padding=padd2, num_svs=num_G_SVs, num_itrs=num_G_SV_itrs, eps=self.SN_eps) self.blocks += [[layers.GBlock(in_channels=self.arch['in_channels'][index], out_channels=self.arch['out_channels'][index], which_conv1=conv1, which_conv2=conv2, which_bn=self.which_bn, activation=self.activation, upsample=(functools.partial(F.interpolate, scale_factor=self.arch['upsample'][index]) if index < len(self.arch['upsample']) else None))]] else: self.blocks += [[layers.GBlock(in_channels=self.arch['in_channels'][index], out_channels=self.arch['out_channels'][index], which_conv1=self.which_conv, which_conv2=self.which_conv, which_bn=self.which_bn, activation=self.activation, upsample=(functools.partial(F.interpolate, scale_factor=self.arch['upsample'][index]) if index < len(self.arch['upsample']) else None))]] # If attention on this block, attach it to the end # print('index ', index, self.arch['resolution'][index]) if self.arch['attention'][self.arch['resolution'][index]]: print('Adding attention layer in G at resolution %d' % self.arch['resolution'][index]) self.blocks[-1] += [layers.Attention(self.arch['out_channels'][index], self.which_conv)] # Turn self.blocks into a ModuleList so that it's all properly registered. self.blocks = nn.ModuleList([nn.ModuleList(block) for block in self.blocks]) # output layer: batchnorm-relu-conv. # Consider using a non-spectral conv here self.output_layer = nn.Sequential(layers.bn(self.arch['out_channels'][-1], cross_replica=self.cross_replica, mybn=self.mybn), self.activation, self.which_conv(self.arch['out_channels'][-1], input_nc)) # Initialize weights. Optionally skip init for testing. if self.init != 'none': self = init_weights(self, self.init) # Note on this forward function: we pass in a y vector which has # already been passed through G.shared to enable easy class-wise # interpolation later. If we passed in the one-hot and then ran it through # G.shared in this forward function, it would be harder to handle. def forward(self, z, y, y_lens): # If hierarchical, concatenate zs and ys if self.hier: zs = torch.split(z, self.z_chunk_size, 1) z = zs[0] if len(y.shape)<2: y = y.unsqueeze(1) if self.first_layer: ys = zs[1:] else: ys = [torch.cat([y.type(torch.float32), item], 1) for item in zs[1:]] else: ys = [y] * len(self.blocks) # This is the change we made to the Big-GAN generator architecture. # The input goes into classes go into the first layer only. if self.first_layer: if self.one_hot: # y = F.one_hot(y, self.n_classes).float().to(y.device) y = make_one_hot(y, y_lens, self.n_classes).float().to(y.device) # Each characters filter is modulated by the noise vector if self.one_hot_k==1: z = z.unsqueeze(1).repeat(1, y.shape[1], y.shape[2]) * torch.repeat_interleave(y, z.shape[1], 2) # print('z.shape ', z.shape) # if self.training: # z = z.unsqueeze(1).repeat(1, y.shape[1], y.shape[2]) * torch.repeat_interleave(y, z.shape[1], 2) # else: # z = torch.randn(z.shape[0], y.shape[1], z.shape[1]).repeat(1, 1, y.shape[2]).to(z.device) * \ # torch.repeat_interleave(y, z.shape[1], 2) # Each character's filter is a one-hot k (for N char alphabet - # the entire vector is N*k long and the k values in the specific character location are equal to 1. # The filters are concatenated to the noise vector. elif self.one_hot_k>1: y = torch.repeat_interleave(y, self.one_hot_k, 2) z = torch.cat((z.unsqueeze(1).repeat(1, y.shape[1], 1), y), 2) elif self.one_hot_k == 0: z = y # only the noise vector is used as an input else: z = torch.cat((z.unsqueeze(1).repeat(1, y.shape[1], 1), y), 2) # First linear layer # print('self.linear', self.linear) # print('z', z.abs().mean([1, 2])) # print('z00', z[0, 0].cpu().numpy().tolist()) h = self.linear(z) # print('h.shape', h.shape) # print('h.shape', h.abs().mean([1, 2])) # Reshape - when y is not a single class value but rather an array of classes, the reshape is needed to create # a separate vertical patch for each input. if self.first_layer: # correct reshape h = h.view(h.size(0), h.shape[1] * self.bottom_width, self.bottom_height, -1) h = h.permute(0, 3, 2, 1) else: h = h.view(h.size(0), -1, self.bottom_width, self.bottom_height) # Loop over blocks for index, blocklist in enumerate(self.blocks): # Second inner loop in case block has multiple layers for block in blocklist: h = block(h, ys[index]) # Apply batchnorm-relu-conv-tanh at output output = torch.tanh(self.output_layer(h)) # Mask blanks if not self.training: out_lens = y_lens * output.size(-2) // 2 mask = _len2mask(out_lens.int(), output.size(-1), torch.float32).to(z.device).detach() mask = mask.unsqueeze(1).unsqueeze(1) output = output *
<filename>python/Labyrinth.py #!/usr/bin/python3 # This was: #! /usr/bin/env python # The labyrinthine abbey library in Python - October 3, 2013 import random #import sys import VersionSpecificUtilities class RoomInfo: def __init__(self, level, room): self.levelNumber = level self.roomNumber = room #self.bookList = [] # Don't include this list in the __key, in order to keep the __key immutable (and the object hashable). # See http://stackoverflow.com/questions/2909106/python-whats-a-correct-and-good-way-to-implement-hash def __key(self): return (self.levelNumber, self.roomNumber) # This is a tuple. def __eq__(x, y): return type(x) == type(y) and x.__key() == y.__key() def __hash__(self): return hash(self.__key()) def ToString(self): return "(" + str(self.levelNumber) + ", " + str(self.roomNumber) + ")" #def Equals(self, otherRoom): # TODO: Deprecate and delete this. # return otherRoom != None and self.levelNumber == otherRoom.levelNumber and self.roomNumber == otherRoom.roomNumber #def GetHashCode(self): # TODO: Deprecate and delete this. # return self.levelNumber * 100 + self.roomNumber def GeneratePossibleNeighboursOnLevel(self, generator, newLevel): result = [] if self.roomNumber == generator.numberOfRoomsPerLevel - 1: for i in range(0, generator.numberOfRoomsPerLevel - 1): result.append(RoomInfo(newLevel, i)) else: result.append(RoomInfo(newLevel, (self.roomNumber + 1) % (generator.numberOfRoomsPerLevel - 1))) result.append(RoomInfo(newLevel, (self.roomNumber + generator.numberOfRoomsPerLevel - 2) % (generator.numberOfRoomsPerLevel - 1))) result.append(RoomInfo(newLevel, generator.numberOfRoomsPerLevel - 1)) return result def GeneratePossibleNeighbours(self, generator): result = [] if self.levelNumber > 0: result.extend(self.GeneratePossibleNeighboursOnLevel(generator, self.levelNumber - 1)) if self.levelNumber < generator.numberOfLevels - 1: result.extend(self.GeneratePossibleNeighboursOnLevel(generator, self.levelNumber + 1)) return result class LabyrinthGenerator: def __init__(self, numberOfLevels, numberOfRoomsPerLevel): if numberOfLevels < 2 or numberOfRoomsPerLevel < 4: # or numberOfRoomsPerLevel > 100: # TODO: Delete the "> 100" condition when safe. raise Exception('LabyrinthGenerator.__init__(): Invalid parameter(s).') self.numberOfLevels = numberOfLevels self.numberOfRoomsPerLevel = numberOfRoomsPerLevel self.numberOfExtraConnections = 0 self.numberOfExtraConnectionsAdded = 0 self.extraConnections = [] #new List<KeyValuePair<RoomInfo, RoomInfo>>(); self.rooms = [] #new List<RoomInfo>(); self.roomLabels = {} #new Dictionary<RoomInfo, int>(); self.connections = {} #new Dictionary<RoomInfo, List<RoomInfo>>(); self.openList = [] #new List<RoomInfo>(); #self.random = new Random(); self.numberOfDifferentLabels = 0 self.roomGoal = None self.booksInRooms = {} #new Dictionary<RoomInfo, string>(); self.numberOfAttemptsToRefactor = 0 self.maximumNumberOfAttemptsToRefactor = 100 #def RoomListContainsRoom(self, roomList, room): # TODO: Deprecate and delete this. # return any(room.levelNumber == room2.levelNumber and room.roomNumber == room2.roomNumber for room2 in roomList) def FindConflictingConnections(self, room1, room2): # Test 0: Room labels ("blob numbers"). #if (roomLabels[room1] == roomLabels[room2]) # return true; // There is a conflict. # Test 1: Room 3 must not be connected to room 4. # 4 2 # \/ # /\ # 1 3 room3 = RoomInfo(room2.levelNumber, room1.roomNumber) room4 = RoomInfo(room1.levelNumber, room2.roomNumber) #if self.RoomListContainsRoom(self.connections[room3], room4): if room4 in self.connections[room3]: return True # Test 2: Room 3 must not be connected to room 1. # 3 # \ # 1 # / # 2 room3 = RoomInfo(2 * room1.levelNumber - room2.levelNumber, room2.roomNumber) #if self.connections.has_key(room3) and self.RoomListContainsRoom(self.connections[room3], room1): #if self.connections.has_key(room3) and room1 in self.connections[room3]: if room3 in self.connections[room1]: return True # Test 3: Room 3 must not be connected to room 2. # 3 # \ # 2 # / # 1 room3 = RoomInfo(2 * room2.levelNumber - room1.levelNumber, room1.roomNumber) #if self.connections.has_key(room3) and self.RoomListContainsRoom(self.connections[room3], room2): #if self.connections.has_key(room3) and room2 in self.connections[room3]: if room3 in self.connections[room2]: return True return False # There is no conflict. def FindUnusedLabel(self): result = 0 labels = self.roomLabels.values() while result in labels: result += 1 return result def PropagateNewLabel(self, room, newLabel, addRoomsToOpenList): openListLocal = [] #new Stack<RoomInfo>(); closedList = [] #new HashSet<RoomInfo>(); openListLocal.append(room) while len(openListLocal) > 0: room = openListLocal.pop() self.roomLabels[room] = newLabel closedList.append(room) if addRoomsToOpenList and not (room in self.openList): self.openList.append(room) for room2 in self.connections[room]: #if (not self.RoomListContainsRoom(openListLocal, room2)) and (not self.RoomListContainsRoom(closedList, room2)): if (not room2 in openListLocal) and (not room2 in closedList): openListLocal.append(room2) def FindPossibleNeighboursWithDifferentLabels(self): #(out RoomInfo room1, out RoomInfo room2) openListLocal = list(room for room in self.rooms) #new List<RoomInfo>(rooms); # Clone the "rooms" list. while len(openListLocal) > 0: room1 = openListLocal[random.randint(0, len(openListLocal) - 1)] openListLocal.remove(room1) possibleNeighbours = room1.GeneratePossibleNeighbours(self) while len(possibleNeighbours) > 0: room2 = possibleNeighbours[random.randint(0, len(possibleNeighbours) - 1)] possibleNeighbours.remove(room2) if self.roomLabels[room1] != self.roomLabels[room2]: return (room1, room2) raise Exception("Unable to find possible neighbours with different labels.") def RemoveOneConnection(self, room1, room2): #self.connections[room1] = list(room for room in self.connections[room1] if room.levelNumber != room2.levelNumber or room.roomNumber != room2.roomNumber) self.connections[room1] = list(room for room in self.connections[room1] if room != room2) def RemoveBothConnection(self, room1, room2): self.RemoveOneConnection(room1, room2) self.RemoveOneConnection(room2, room1) def Refactor(self): # The print statement is replaced by the print() function in Python 3 #print "Refactoring..." # This worked in Python 2 print("Refactoring...") room1, room2 = self.FindPossibleNeighboursWithDifferentLabels() # Resolve the conflicts that are preventing a connection between room1 and room2. # Test 1: Room 3 must not be connected to room 4. # 4 2 # \/ # /\ # 1 3 room3 = RoomInfo(room2.levelNumber, room1.roomNumber) room4 = RoomInfo(room1.levelNumber, room2.roomNumber) #if self.RoomListContainsRoom(self.connections[room3], room4): if room4 in self.connections[room3]: print("Found a Type 1 conflict.") #self.connections[room3].remove(room4) #self.connections[room4].remove(room3) self.RemoveBothConnection(room3, room4) self.PropagateNewLabel(room3, self.FindUnusedLabel(), True) self.PropagateNewLabel(room4, self.FindUnusedLabel(), True) # Test 2: Room 3 must not be connected to room 1. # 3 # \ # 1 # / # 2 room3 = RoomInfo(2 * room1.levelNumber - room2.levelNumber, room2.roomNumber) #if self.connections.has_key(room3) and self.RoomListContainsRoom(self.connections[room3], room1): #if self.connections.has_key(room3) and room1 in self.connections[room3]: if room3 in self.connections[room1]: print("Found a Type 2 conflict.") #self.connections[room1].remove(room3) #self.connections[room3].remove(room1) self.RemoveBothConnection(room1, room3) self.PropagateNewLabel(room3, self.FindUnusedLabel(), True) # Test 3: Room 3 must not be connected to room 2. # 3 # \ # 2 # / # 1 room3 = RoomInfo(2 * room2.levelNumber - room1.levelNumber, room1.roomNumber) #if self.connections.has_key(room3) and self.RoomListContainsRoom(self.connections[room3], room2): #if self.connections.has_key(room3) and room2 in self.connections[room3]: if room3 in self.connections[room2]: print("Found a Type 3 conflict.") #self.connections[room2].remove(room3) #self.connections[room3].remove(room2) self.RemoveBothConnection(room2, room3) self.PropagateNewLabel(room3, self.FindUnusedLabel(), True) # Connect room1 and room2. self.PropagateNewLabel(room2, self.roomLabels[room1], False) self.connections[room1].append(room2) self.connections[room2].append(room1) self.numberOfDifferentLabels = len(set(self.roomLabels.values())) def FinalValidityCheck(self): self.PropagateNewLabel(RoomInfo(0, 0), self.FindUnusedLabel(), False) if len(set(self.roomLabels.values())) > 1: raise Exception("The labyrinth is in multiple blobs.") print("The labyrinth is a single blob.") #def AddExtraConnections(self): def Generate(self): label = 0 self.numberOfDifferentLabels = self.numberOfLevels * self.numberOfRoomsPerLevel for l in range(0, self.numberOfLevels): for r in range(0, self.numberOfRoomsPerLevel): room = RoomInfo(l, r) self.rooms.append(room) self.roomLabels[room] = label label += 1 self.connections[room] = [] #new List<RoomInfo>(); self.openList.append(room) while self.numberOfDifferentLabels > 1: if len(self.openList) == 0: if self.numberOfAttemptsToRefactor >= self.maximumNumberOfAttemptsToRefactor: raise Exception("Attempted to refactor " + self.numberOfAttemptsToRefactor + " times; all failed.") self.numberOfAttemptsToRefactor += 1 self.Refactor() room1 = self.openList[random.randint(0, len(self.openList) - 1)] possibleNeighbours = room1.GeneratePossibleNeighbours(self) room2 = None while room2 == None and len(possibleNeighbours) > 0: room2 = possibleNeighbours[random.randint(0, len(possibleNeighbours) - 1)] #print "room1:", room1.ToString(), "room2: ", room2.ToString() if self.roomLabels[room1] != self.roomLabels[room2] and not self.FindConflictingConnections(room1, room2): break possibleNeighbours.remove(room2) room2 = None if room2 == None: self.openList.remove(room1) continue # We have now chosen room1 and room2. self.connections[room1].append(room2) self.connections[room2].append(room1) # Join the two "blobs" to which the two rooms belong, by modifying room labels. label1 = self.roomLabels[room1] label2 = self.roomLabels[room2] minLabel = min(label1, label2) maxLabel = max(label1, label2) for room in self.rooms: if self.roomLabels[room] == maxLabel: self.roomLabels[room] = minLabel self.numberOfDifferentLabels -= 1 #if self.numberOfExtraConnections > 0: # self.AddExtraConnections() self.Report() self.PrintLongestPath() # This sets roomGoal. self.PlaceBooksInRooms() # This uses roomGoal. def Report(self): for room in self.rooms: for otherRoom in self.connections[room]: print(room.ToString(), "to", otherRoom.ToString()) #if (numberOfExtraConnections > 0) # foreach (var extraConnection in extraConnections) # Console.WriteLine("Extra connection added: {0} to {1}.", extraConnection.Key, extraConnection.Value); # Console.WriteLine("{0} extra connection(s) requested; {1} added.", numberOfExtraConnections, numberOfExtraConnectionsAdded); if self.numberOfAttemptsToRefactor > 0: print("The labyrinth was refactored", self.numberOfAttemptsToRefactor, "time(s).") self.FinalValidityCheck() def FindShortestPathBetweenRooms(self, room, roomGoalLocal): openListLocal = [room] #new Queue<RoomInfo>(); paths = {room: [room]} #new Dictionary<RoomInfo, List<RoomInfo>>(); #openListLocal.Enqueue(room); #paths[room] = new List<RoomInfo>() { room }; #if room.Equals(roomGoalLocal): if room == roomGoalLocal: return paths[room] while len(openListLocal) > 0: room = openListLocal.pop(0) for room2 in self.connections[room]: if not (room2 in paths.keys()): # paths.Keys is essentially the union of openListLocal and closedList. openListLocal.append(room2) paths[room2] = list(r for r in paths[room]) paths[room2].append(room2) #if room2.Equals(roomGoalLocal): if room2 == roomGoalLocal: return paths[room2] # Here, room is the last room to be dequeued (and thus the last room to be enqueued). return paths[room] def FindLongestPathFromRoom(self, room): return self.FindShortestPathBetweenRooms(room, None) def PrintLongestPath(self): path1 = self.FindLongestPathFromRoom(RoomInfo(self.numberOfLevels - 1, self.numberOfRoomsPerLevel - 1)) longestPath = self.FindLongestPathFromRoom(path1[len(path1) - 1]) print() #Console.WriteLine("The longest path contains {0} rooms:", longestPath.Count); #Console.WriteLine(string.Join(" to ", longestPath)); print("The longest path contains", len(longestPath), "rooms.") self.roomGoal = longestPath[len(longestPath) - 1] pathFromOriginToGoal = self.FindShortestPathBetweenRooms(RoomInfo(0, 0), self.roomGoal) print() #Console.WriteLine("Aristotle's Second Book of the Poetics is in Room {0}.", roomGoal); #Console.WriteLine(); #Console.WriteLine("The path from Room (0, 0) to Room {0} contains {1} rooms:", roomGoal, pathFromOriginToGoal.Count); #Console.WriteLine(string.Join(" to ", pathFromOriginToGoal)); print("The path from Room (0, 0) to the goal contains", len(pathFromOriginToGoal), "rooms.") def PlaceBooksInRooms(self): books = [ "The First Book of the Poetics of Aristotle", "The Iliad by Homer", "The Odyssey by Homer", "The Republic by Plato", "Categories by Aristotle", "Physics by Aristotle", "Nicomachean Ethics by Aristotle", "The Aeneid by Virgil", "The Old Testament in Hebrew", "The New Testament in Greek", "Strong's Hebrew Dictionary", "Strong's Greek Dictionary" ] openListLocal = list(room for room in self.rooms) numBooksPlaced = 1 self.booksInRooms[self.roomGoal] = "The Second Book of the Poetics of Aristotle" openListLocal.remove(self.roomGoal) while numBooksPlaced * 3 < len(self.rooms) and len(books) > 0: roomHashCode = openListLocal[random.randint(0, len(openListLocal) - 1)] book = books[random.randint(0, len(books) - 1)] openListLocal.remove(roomHashCode) books.remove(book) self.booksInRooms[roomHashCode] = book numBooksPlaced += 1 #print "The books have been placed." def ReportProximityToJorge(self, room, JorgesRoom): path = self.FindShortestPathBetweenRooms(room, JorgesRoom) distance = len(path) - 1 if distance == 0: print("* You and the Venerable Jorge are in the same room! *") print("'Good evening, Venerable Jorge.'") elif distance <= 2: print("The Venerable Jorge is very near.") elif distance <= 4: print("The Venerable Jorge is near.") def ConstructJorgesPath(self, JorgesRoom): #RoomInfo JorgesGoal; # ThAW 2013/10/04 : There appears to be no do...while loop in Python. while True: JorgesGoal = self.rooms[random.randint(0, len(self.rooms) - 1)] #if not JorgesGoal.Equals(JorgesRoom): if JorgesGoal != JorgesRoom: break return self.FindShortestPathBetweenRooms(JorgesRoom, JorgesGoal) def NavigateLabyrinth(self): roomsVisited = [] #new HashSet<RoomInfo>(); room = RoomInfo(0, 0) #Console.WriteLine("Selecting a room for Jorge out of {0} rooms.", rooms.Count); JorgesRoom = self.rooms[random.randint(0, len(self.rooms) - 1)] JorgesPath = self.ConstructJorgesPath(JorgesRoom) JorgesPathIndex = 0 while True: #roomsVisited.Add(room); #if not self.RoomListContainsRoom(roomsVisited, room): if not room in roomsVisited: roomsVisited.append(room) print() print("You are now in room " + room.ToString() + ".") #Console.WriteLine("The Venerable Jorge is now in room {0}.", JorgesRoom); #Console.WriteLine("Jorge's destination is room {0}", JorgesPath[JorgesPath.Count - 1]); self.ReportProximityToJorge(room, JorgesRoom) #if self.booksInRooms.has_key(room): # Python 2 if room in self.booksInRooms.keys(): # has_key() was removed from the dictionary class in Python 3 print("You have found the book '" + self.booksInRooms[room] + "'.") #if room.Equals(self.roomGoal): if room == self.roomGoal: print("**** Congratulations! You have reached the
#!/usr/bin/env python # -*- coding: utf-8 -*- """Functional tests for `copra.rest.Client` class. Without any additional user input, this module will test all of the unauthenticated methods of the copra.rest.Client. An API key for the Coinbase Pro sandbox is required to test the authenticated methods. The key information as well as the ids of a few test accounts are read in to this module as environment variables by the dotenv module from a file named .env. The .env file must reside in the same directory as this test module. An example .env file named .env.sample is provided. To test the authenticated methods, fill out the .env.sample file accordingly and rename it to .env. """ import os.path if os.path.isfile(os.path.join(os.path.dirname(__file__), '.env')): from dotenv import load_dotenv load_dotenv() else: print("\n** .env file not found. Authenticated methods will be skipped. **\n") import asyncio from datetime import datetime, timedelta import os import json import random import time from uuid import uuid4 from asynctest import TestCase, skipUnless, expectedFailure from dateutil import parser from copra.rest import APIRequestError, Client, SANDBOX_URL from copra.rest.client import USER_AGENT KEY = os.getenv('KEY') SECRET = os.getenv('SECRET') PASSPHRASE = os.getenv('PASSPHRASE') TEST_AUTH = True if (KEY and SECRET and PASSPHRASE) else False TEST_BTC_ACCOUNT = os.getenv('TEST_BTC_ACCOUNT') TEST_USD_ACCOUNT = os.getenv('TEST_USD_ACCOUNT') TEST_USD_PAYMENT_METHOD = os.getenv('TEST_USD_PAYMENT_METHOD') TEST_USD_COINBASE_ACCOUNT = os.getenv('TEST_USD_COINBASE_ACCOUNT') HTTPBIN = 'http://httpbin.org' class TestRest(TestCase): """Tests for copra.rest.Client""" def setUp(self): self.client = Client(self.loop) if TEST_AUTH: self.auth_client = Client(self.loop, SANDBOX_URL, auth=True, key=KEY, secret=SECRET, passphrase=<PASSWORD>) def tearDown(self): self.loop.create_task(self.client.close()) if TEST_AUTH: self.loop.run_until_complete(self.auth_client.cancel_all(stop=True)) self.loop.create_task(self.auth_client.close()) # try to avoid public rate limit, allow for aiohttp cleanup and # all outstanding Coinbase actions to complete self.loop.run_until_complete(asyncio.sleep(1)) async def test_user_agent(self): async with Client(self.loop, HTTPBIN) as client: headers, body = await client.get('/user-agent') self.assertEqual(body['user-agent'], USER_AGENT) async def test__handle_error(self): async with Client(self.loop, HTTPBIN) as client: with self.assertRaises(APIRequestError) as cm: headers, body = await client.get('/status/404') async def test_delete(self): async with Client(self.loop, HTTPBIN) as client: headers, body = await client.delete('/delete') self.assertEqual(body['args'], {}) self.assertEqual(body['headers']['User-Agent'], USER_AGENT) self.assertIsInstance(headers, dict) self.assertIn('Content-Type', headers) self.assertIn('Content-Length', headers) params = {'key1': 'item1', 'key2': 'item2'} headers, body = await client.delete('/delete', params=params) self.assertEqual(body['args'], params) async def test_get(self): async with Client(self.loop, HTTPBIN) as client: headers, body = await client.get('/get') body['args'].pop('no-cache', None) self.assertEqual(body['args'], {}) self.assertEqual(body['headers']['User-Agent'], USER_AGENT) self.assertIsInstance(headers, dict) self.assertIn('Content-Type', headers) self.assertIn('Content-Length', headers) params = {'key1': 'item1', 'key2': 'item2'} headers, body = await client.get('/get', params=params) self.assertEqual(body['args'], params) async def test_post(self): async with Client(self.loop, HTTPBIN) as client: headers, body = await client.post('/post') self.assertEqual(body['form'], {}) self.assertEqual(body['headers']['User-Agent'], USER_AGENT) self.assertIsInstance(headers, dict) self.assertIn('Content-Type', headers) self.assertIn('Content-Length', headers) data = {"key1": "item1", "key2": "item2"} headers, body = await client.post('/post', data=data) self.assertEqual(json.loads(body['data']), data) async def test_products(self): keys = {'id', 'base_currency', 'quote_currency', 'base_min_size', 'base_max_size', 'quote_increment', 'display_name', 'status', 'margin_enabled', 'status_message', 'min_market_funds', 'max_market_funds', 'post_only', 'limit_only', 'cancel_only'} # Sometimes returns 'accesible' as a key. ?? products = await self.client.products() self.assertIsInstance(products, list) self.assertGreater(len(products), 1) self.assertIsInstance(products[0], dict) self.assertGreaterEqual(len(products[0]), len(keys)) self.assertGreaterEqual(products[0].keys(), keys) async def test_order_book(self): keys = {'sequence', 'bids', 'asks'} ob1 = await self.client.order_book('BTC-USD', level=1) self.assertIsInstance(ob1, dict) self.assertEqual(ob1.keys(), keys) self.assertIsInstance(ob1['bids'], list) self.assertEqual(len(ob1['bids']), 1) self.assertEqual(len(ob1['bids'][0]), 3) self.assertIsInstance(ob1['asks'], list) self.assertEqual(len(ob1['asks']), 1) self.assertEqual(len(ob1['asks'][0]), 3) ob2 = await self.client.order_book('BTC-USD', level=2) self.assertIsInstance(ob2, dict) self.assertEqual(ob2.keys(), keys) self.assertIsInstance(ob2['bids'], list) self.assertEqual(len(ob2['bids']), 50) self.assertEqual(len(ob2['bids'][0]), 3) self.assertIsInstance(ob2['asks'], list) self.assertEqual(len(ob2['asks']), 50) self.assertEqual(len(ob2['asks'][0]), 3) ob3 = await self.client.order_book('BTC-USD', level=3) self.assertIsInstance(ob3, dict) self.assertEqual(ob3.keys(), keys) self.assertIsInstance(ob3['bids'], list) self.assertGreater(len(ob3['bids']), 50) self.assertEqual(len(ob3['bids'][0]), 3) self.assertIsInstance(ob3['asks'], list) self.assertGreater(len(ob3['asks']), 50) self.assertEqual(len(ob3['asks'][0]), 3) async def test_ticker(self): keys = {'trade_id', 'price', 'size', 'bid', 'ask', 'volume', 'time'} tick = await self.client.ticker('BTC-USD') self.assertIsInstance(tick, dict) self.assertEqual(tick.keys(), keys) async def test_trades(self): keys = {'time', 'trade_id', 'price', 'size', 'side'} trades, before, after = await self.client.trades('BTC-USD') self.assertIsInstance(trades, list) self.assertIsInstance(trades[0], dict) self.assertIsInstance(before, str) self.assertIsInstance(after, str) self.assertEqual(len(trades), 100) self.assertEqual(trades[0].keys(), keys) trades, before, after = await self.client.trades('BTC-USD', 5) self.assertEqual(len(trades), 5) trades_after, after_after, before_after = await self.client.trades('BTC-USD', 5, after=after) self.assertLess(trades_after[0]['trade_id'], trades[-1]['trade_id']) trades_before, after_before, before_before = await self.client.trades('BTC-USD', 5, before=before) if trades_before: self.assertGreater(trades_before[-1]['trade_id'], trades[0]['trade_id']) else: self.assertIsNone(after_before) self.assertIsInstance(after_after, str) await asyncio.sleep(20) trades_before, after_before, before_before = await self.client.trades('BTC-USD', 5, before=before) if (trades_before): self.assertGreater(trades_before[-1]['trade_id'], trades[0]['trade_id']) else: self.assertIsNone(after_before) self.assertIsInstance(after_after, str) async def test_historic_rates(self): rates = await self.client.historic_rates('BTC-USD', 900) self.assertIsInstance(rates, list) self.assertEqual(len(rates[0]), 6) self.assertEqual(rates[0][0] - rates[1][0], 900) end = datetime.utcnow() start = end - timedelta(days=1) rates = await self.client.historic_rates('LTC-USD', 3600, start.isoformat(), end.isoformat()) self.assertIsInstance(rates, list) self.assertEqual(len(rates), 24) self.assertEqual(len(rates[0]), 6) self.assertEqual(rates[0][0] - rates[1][0], 3600) async def test_get_24hour_stats(self): keys = {'open', 'high', 'low', 'volume', 'last', 'volume_30day'} stats = await self.client.get_24hour_stats('BTC-USD') self.assertIsInstance(stats, dict) self.assertEqual(stats.keys(), keys) async def test_currencies(self): keys = {'id', 'name', 'min_size', 'status', 'message', 'details'} currencies = await self.client.currencies() self.assertIsInstance(currencies, list) self.assertGreater(len(currencies), 1) self.assertIsInstance(currencies[0], dict) self.assertEqual(currencies[0].keys(), keys) async def test_server_time(self): time = await self.client.server_time() self.assertIsInstance(time, dict) self.assertIn('iso', time) self.assertIn('epoch', time) self.assertIsInstance(time['iso'], str) self.assertIsInstance(time['epoch'], float) @skipUnless(TEST_AUTH, "Authentication credentials not provided.") async def test_accounts(self): keys = {'id', 'currency', 'balance', 'available', 'hold', 'profile_id'} accounts = await self.auth_client.accounts() self.assertIsInstance(accounts, list) self.assertIsInstance(accounts[0], dict) self.assertGreaterEqual(accounts[0].keys(), keys) @skipUnless(TEST_AUTH and TEST_BTC_ACCOUNT, "Auth credentials and test BTC account ID required") async def test_account(self): keys = {'id', 'currency', 'balance', 'available', 'hold', 'profile_id'} account = await self.auth_client.account(TEST_BTC_ACCOUNT) self.assertIsInstance(account, dict) self.assertEqual(account.keys(), keys) self.assertEqual(account['id'], TEST_BTC_ACCOUNT) self.assertEqual(account['currency'], 'BTC') @skipUnless(TEST_AUTH and TEST_BTC_ACCOUNT, "Auth credentials and test BTC account ID required") async def test_account_history(self): # Assumes market_order works. orders = [] for i in range(1,6): size = 0.001 * i order = await self.auth_client.market_order('buy', 'BTC-USD', size) orders.append(order) await asyncio.sleep(0.25) history, before, after = await self.auth_client.account_history( TEST_BTC_ACCOUNT, limit=3) keys = {'amount', 'balance', 'created_at', 'details', 'id', 'type'} self.assertIsInstance(history, list) self.assertEqual(len(history), 3) self.assertEqual(history[0].keys(), keys) self.assertEqual(history[0]['type'], 'match') self.assertEqual(history[0]['details']['order_id'], orders[4]['id']) self.assertEqual(history[0]['details']['product_id'], 'BTC-USD') after_history, after_before, after_after = await self.auth_client.account_history(TEST_BTC_ACCOUNT, after=after) self.assertGreater(history[-1]['id'], after_history[0]['id']) original_history, _, _ = await self.auth_client.account_history(TEST_BTC_ACCOUNT, before=after_before) self.assertEqual(original_history, history) @skipUnless(TEST_AUTH and TEST_BTC_ACCOUNT, "Auth credentials and test BTC account ID required") async def test_holds(self): # Assumes cancel, cancel_all and limit_order work await self.auth_client.cancel_all(stop=True) holds, _, _ = await self.auth_client.holds(TEST_BTC_ACCOUNT) offset = len(holds) orders = [] for i in range(1, 8): size = .001 * i price = 10000 + i * 1000 order = await self.auth_client.limit_order('sell', 'BTC-USD', price, size) orders.append(order) await asyncio.sleep(.25) holds, _, _ = await self.auth_client.holds(TEST_BTC_ACCOUNT) keys = {'amount', 'created_at', 'id', 'ref', 'type'} self.assertEqual(len(holds), 7 + offset) self.assertEqual(holds[0].keys(), keys) self.assertEqual(float(holds[0]['amount']), .007) self.assertEqual(orders[6]['id'], holds[0]['ref']) holds, before, after = await self.auth_client.holds(TEST_BTC_ACCOUNT, limit=5) self.assertEqual(len(holds), 5) after_holds, after_before, after_after = await self.auth_client.holds( TEST_BTC_ACCOUNT, after=after) self.assertEqual(len(after_holds), 2 + offset) original_holds, _, _ = await self.auth_client.holds(TEST_BTC_ACCOUNT, before=after_before, limit=5) self.assertEqual(original_holds, holds) for order in orders[4:]: resp = await self.auth_client.cancel(order['id']) self.assertEqual(resp[0], order['id']) holds, _, _ = await self.auth_client.holds(TEST_BTC_ACCOUNT) total = 0 for hold in holds: if hold['type'] == 'order': total += float(hold['amount']) self.assertAlmostEqual(total, 0.01) @skipUnless(TEST_AUTH, "Auth credentials required") async def test_limit_order(self): # Assumes cancel works for side, base_price in (('buy', 1), ('sell', 50000)): # default time_in_force price = base_price + (random.randint(1, 9) / 10) size = random.randint(1, 10) / 1000 order = await self.auth_client.limit_order(side, 'BTC-USD', price=price, size=size) await self.auth_client.cancel(order['id']) keys = {'created_at', 'executed_value', 'fill_fees', 'filled_size', 'id', 'post_only', 'price', 'product_id', 'settled', 'side', 'size', 'status', 'stp', 'time_in_force', 'type'} self.assertEqual(order.keys(), keys) self.assertEqual(float(order['price']), price) self.assertEqual(float(order['size']), size) self.assertEqual(order['product_id'], 'BTC-USD') self.assertEqual(order['side'], side) self.assertEqual(order['stp'], 'dc') self.assertEqual(order['type'], 'limit') self.assertEqual(order['time_in_force'], 'GTC') # client_oid, explicit time_in_force price = base_price + (random.randint(1, 9) / 10) size = random.randint(1, 10) / 1000 client_oid = str(uuid4()) order = await self.auth_client.limit_order(side, 'BTC-USD', price=price, size=size, time_in_force='GTC', client_oid=client_oid) await self.auth_client.cancel(order['id']) self.assertEqual(order.keys(), keys) self.assertEqual(float(order['price']), price) self.assertEqual(float(order['size']), size) self.assertEqual(order['product_id'], 'BTC-USD') self.assertEqual(order['side'], side) self.assertEqual(order['stp'], 'dc') self.assertEqual(order['type'], 'limit') self.assertEqual(order['time_in_force'], 'GTC') # IOC time_in_force price = base_price + (random.randint(1, 9) / 10) size = random.randint(1, 10) / 1000 order = await self.auth_client.limit_order(side, 'BTC-USD', price=price, size=size, time_in_force='IOC') try: await self.auth_client.cancel(order['id']) except APIRequestError: pass self.assertEqual(order.keys(), keys) self.assertEqual(float(order['price']), price) self.assertEqual(float(order['size']), size) self.assertEqual(order['product_id'], 'BTC-USD') self.assertEqual(order['side'], side) self.assertEqual(order['stp'], 'dc') self.assertEqual(order['type'], 'limit') self.assertEqual(order['time_in_force'], 'IOC') # FOK time_in_force price = base_price + (random.randint(1, 9) / 10) size = random.randint(1, 10) / 1000 order = await self.auth_client.limit_order(side, 'BTC-USD', price=price, size=size, time_in_force='FOK') if 'reject_reason' in
from __future__ import absolute_import, division, print_function, unicode_literals import copy import json import logging import math import os import shutil import tarfile import tempfile import sys from io import open import torch from torch import nn from torch.nn import CrossEntropyLoss from torch.utils import checkpoint from torch.nn import Module from torch.nn.parameter import Parameter import torch.nn.functional as F import torch.nn.init as init ''' BertEmbeddings: --------------------------------------------------------------------------------------------------''' class BertEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings. """ def __init__(self, config): super(BertEmbeddings, self).__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_ids, token_type_ids): seq_length = input_ids.size(1) position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) position_ids = position_ids.unsqueeze(0).expand_as(input_ids) words_embeddings = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = words_embeddings + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings '''-----------------------------------------------------------------------------------------------''' ''' BertEncoder & BertLayer: --------------------------------------------------------------------------------------------------''' class BertEncoder(nn.Module): def __init__(self, config): super(BertEncoder, self).__init__() self.output_hidden_states = config.output_hidden_states self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)]) def forward(self, hidden_states, attention_mask): for i,layer_module in enumerate(self.layer): hidden_states = layer_module(hidden_states, attention_mask) return hidden_states # last-layer hidden state class BertLayer(nn.Module): def __init__(self, config): super(BertLayer, self).__init__() self.attention = BertAttention(config) self.intermediate = BertIntermediate(config) self.output = BertOutput(config) def forward(self, hidden_states, attention_mask): attention_output = self.attention(hidden_states, attention_mask) intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output '''-----------------------------------------------------------------------------------------------''' ''' BertAttention & BertSelfAttention & BertSelfOutput --------------------------------------------------------------------------------------------------''' class BertAttention(nn.Module): def __init__(self, config): super(BertAttention, self).__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) def forward(self, input_tensor, attention_mask): self_output = self.self(input_tensor, attention_mask) attention_output = self.output(self_output, input_tensor) return attention_output class BertSelfAttention(nn.Module): def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = torch.reshape(x, new_x_shape) return x.permute(0, 2, 1, 3) def transpose_key_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = torch.reshape(x, new_x_shape) return x.permute(0, 2, 3, 1) def forward(self, hidden_states, attention_mask): mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_key_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer) attention_scores = attention_scores / math.sqrt(self.attention_head_size) # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = F.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = torch.reshape(context_layer, new_context_layer_shape) return context_layer class BertSelfOutput(nn.Module): def __init__(self, config): super(BertSelfOutput, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states '''-----------------------------------------------------------------------------------------------''' ''' BertIntermediate & BertOutput (2-layer FeedForward) --------------------------------------------------------------------------------------------------''' class BertIntermediate(nn.Module): def __init__(self, config): super(BertIntermediate, self).__init__() self.dense_act = LinearActivation(config.hidden_size, config.intermediate_size, act=config.hidden_act) def forward(self, hidden_states): hidden_states = self.dense_act(hidden_states) return hidden_states class BertOutput(nn.Module): def __init__(self, config): super(BertOutput, self).__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states '''-----------------------------------------------------------------------------------------------''' ''' BertPooler --------------------------------------------------------------------------------------------------''' class BertPooler(nn.Module): def __init__(self, config): super(BertPooler, self).__init__() self.dense_act = LinearActivation(config.hidden_size, config.hidden_size, act="tanh") def forward(self, hidden_states): first_token_tensor = hidden_states[:, 0] pooled_output = self.dense_act(first_token_tensor) return pooled_output '''-----------------------------------------------------------------------------------------------''' ''' Bert Downstream Heads --------------------------------------------------------------------------------------------------''' class BertPredictionHeadTransform(nn.Module): def __init__(self, config): super(BertPredictionHeadTransform, self).__init__() self.dense_act = LinearActivation(config.hidden_size, config.hidden_size, act=config.hidden_act) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) def forward(self, hidden_states): hidden_states = self.dense_act(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class BertLMPredictionHead(nn.Module): def __init__(self, config, bert_model_embedding_weights): super(BertLMPredictionHead, self).__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(bert_model_embedding_weights.size(1), bert_model_embedding_weights.size(0), bias=False) self.decoder.weight = bert_model_embedding_weights self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0))) def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) + self.bias return hidden_states class BertOnlyMLMHead(nn.Module): def __init__(self, config, bert_model_embedding_weights): super(BertOnlyMLMHead, self).__init__() self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights) def forward(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores class BertOnlyNSPHead(nn.Module): def __init__(self, config): super(BertOnlyNSPHead, self).__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score class BertPreTrainingHeads(nn.Module): def __init__(self, config, bert_model_embedding_weights): super(BertPreTrainingHeads, self).__init__() self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score '''-----------------------------------------------------------------------------------------------''' ''' BertModel: --------------------------------------------------------------------------------------------------''' class BertModel(nn.Module): """BERT model ("Bidirectional Embedding Representations from a Transformer"). Params: config: a BertConfig class instance with the configuration to build a new model Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. Outputs: Tuple of (encoded_layers, pooled_output) `encoded_layers`: controled by `output_all_encoded_layers` argument: - `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size], - `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding to the last attention block of shape [batch_size, sequence_length, hidden_size], `pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a classifier pretrained on top of the hidden state associated to the first character of the input (`CLS`) to train on the Next-Sentence task (see BERT's paper). Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) model = modeling.BertModel(config=config) all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config): super(BertModel, self).__init__() self.embeddings = BertEmbeddings(config) self.encoder = BertEncoder(config) self.pooler = BertPooler(config) #self.output_hidden_states = config.output_hidden_states def forward(self, input_ids, token_type_ids, attention_mask): # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to(dtype=self.embeddings.word_embeddings.weight.dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 embedding_output = self.embeddings(input_ids, token_type_ids) sequence_output = self.encoder(embedding_output, extended_attention_mask) pooled_output = self.pooler(sequence_output) return sequence_output, pooled_output class BertForPreTraining(nn.Module): """BERT model with pre-training heads. This module comprises the BERT model followed by the two pre-training heads: - the masked language modeling head, and - the next sentence classification head. Params: config: a BertConfig class instance with the configuration to build a new model. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details).
res = str(v.strip('"\'')) return res def _decode_attribute(self, s): '''(INTERNAL) Decodes an attribute line. The attribute is the most complex declaration in an arff file. All attributes must follow the template:: @attribute <attribute-name> <datatype> where ``attribute-name`` is a string, quoted if the name contains any whitespace, and ``datatype`` can be: - Numerical attributes as ``NUMERIC``, ``INTEGER`` or ``REAL``. - Strings as ``STRING``. - Dates (NOT IMPLEMENTED). - Nominal attributes with format: {<nominal-name1>, <nominal-name2>, <nominal-name3>, ...} The nominal names follow the rules for the attribute names, i.e., they must be quoted if the name contains whitespaces. This method must receive a normalized string, i.e., a string without padding, including the "\r\n" characters. :param s: a normalized string. :return: a tuple (ATTRIBUTE_NAME, TYPE_OR_VALUES). ''' _, v = s.split(' ', 1) v = v.strip() # Verify the general structure of declaration m = _RE_ATTRIBUTE.match(v) if not m: raise BadAttributeFormat() # Extracts the raw name and type name, type_ = m.groups() # Extracts the final name name = str(name.strip('"\'')) # Extracts the final type if type_[:1] == "{" and type_[-1:] == "}": try: type_ = _parse_values(type_.strip('{} ')) except Exception: raise BadAttributeType() if isinstance(type_, dict): raise BadAttributeType() else: # If not nominal, verify the type name type_ = str(type_).upper() if type_ not in ['NUMERIC', 'REAL', 'INTEGER', 'STRING']: raise BadAttributeType() return (name, type_) def _decode(self, s, encode_nominal=False, matrix_type=DENSE): '''Do the job the ``encode``.''' # Make sure this method is idempotent self._current_line = 0 # If string, convert to a list of lines if isinstance(s, str): s = s.strip('\r\n ').replace('\r\n', '\n').split('\n') # Create the return object obj: ArffContainerType = { 'description': '', 'relation': '', 'attributes': [], 'data': [] } attribute_names = {} # Create the data helper object data = _get_data_object_for_decoding(matrix_type) # Read all lines STATE = _TK_DESCRIPTION s = iter(s) for row in s: self._current_line += 1 # Ignore empty lines row = row.strip(' \r\n') if not row: continue u_row = row.upper() # DESCRIPTION ----------------------------------------------------- if u_row.startswith(_TK_DESCRIPTION) and STATE == _TK_DESCRIPTION: obj['description'] += self._decode_comment(row) + '\n' # ----------------------------------------------------------------- # RELATION -------------------------------------------------------- elif u_row.startswith(_TK_RELATION): if STATE != _TK_DESCRIPTION: raise BadLayout() STATE = _TK_RELATION obj['relation'] = self._decode_relation(row) # ----------------------------------------------------------------- # ATTRIBUTE ------------------------------------------------------- elif u_row.startswith(_TK_ATTRIBUTE): if STATE != _TK_RELATION and STATE != _TK_ATTRIBUTE: raise BadLayout() STATE = _TK_ATTRIBUTE attr = self._decode_attribute(row) if attr[0] in attribute_names: raise BadAttributeName(attr[0], attribute_names[attr[0]]) else: attribute_names[attr[0]] = self._current_line obj['attributes'].append(attr) if isinstance(attr[1], (list, tuple)): if encode_nominal: conversor = EncodedNominalConversor(attr[1]) else: conversor = NominalConversor(attr[1]) else: CONVERSOR_MAP = {'STRING': str, 'INTEGER': lambda x: int(float(x)), 'NUMERIC': float, 'REAL': float} conversor = CONVERSOR_MAP[attr[1]] self._conversors.append(conversor) # ----------------------------------------------------------------- # DATA ------------------------------------------------------------ elif u_row.startswith(_TK_DATA): if STATE != _TK_ATTRIBUTE: raise BadLayout() break # ----------------------------------------------------------------- # COMMENT --------------------------------------------------------- elif u_row.startswith(_TK_COMMENT): pass # ----------------------------------------------------------------- else: # Never found @DATA raise BadLayout() def stream(): for row in s: self._current_line += 1 row = row.strip() # Ignore empty lines and comment lines. if row and not row.startswith(_TK_COMMENT): yield row # Alter the data object obj['data'] = data.decode_rows(stream(), self._conversors) if obj['description'].endswith('\n'): obj['description'] = obj['description'][:-1] return obj def decode(self, s, encode_nominal=False, return_type=DENSE): '''Returns the Python representation of a given ARFF file. When a file object is passed as an argument, this method reads lines iteratively, avoiding to load unnecessary information to the memory. :param s: a string or file object with the ARFF file. :param encode_nominal: boolean, if True perform a label encoding while reading the .arff file. :param return_type: determines the data structure used to store the dataset. Can be one of `arff.DENSE`, `arff.COO`, `arff.LOD`, `arff.DENSE_GEN` or `arff.LOD_GEN`. Consult the sections on `working with sparse data`_ and `loading progressively`_. ''' try: return self._decode(s, encode_nominal=encode_nominal, matrix_type=return_type) except ArffException as e: e.line = self._current_line raise e class ArffEncoder: '''An ARFF encoder.''' def _encode_comment(self, s=''): '''(INTERNAL) Encodes a comment line. Comments are single line strings starting, obligatorily, with the ``%`` character, and can have any symbol, including whitespaces or special characters. If ``s`` is None, this method will simply return an empty comment. :param s: (OPTIONAL) string. :return: a string with the encoded comment line. ''' if s: return '%s %s'%(_TK_COMMENT, s) else: return '%s' % _TK_COMMENT def _encode_relation(self, name): '''(INTERNAL) Decodes a relation line. The relation declaration is a line with the format ``@RELATION <relation-name>``, where ``relation-name`` is a string. :param name: a string. :return: a string with the encoded relation declaration. ''' for char in ' %{},': if char in name: name = '"%s"'%name break return '%s %s'%(_TK_RELATION, name) def _encode_attribute(self, name, type_): '''(INTERNAL) Encodes an attribute line. The attribute follow the template:: @attribute <attribute-name> <datatype> where ``attribute-name`` is a string, and ``datatype`` can be: - Numerical attributes as ``NUMERIC``, ``INTEGER`` or ``REAL``. - Strings as ``STRING``. - Dates (NOT IMPLEMENTED). - Nominal attributes with format: {<nominal-name1>, <nominal-name2>, <nominal-name3>, ...} This method must receive a the name of the attribute and its type, if the attribute type is nominal, ``type`` must be a list of values. :param name: a string. :param type_: a string or a list of string. :return: a string with the encoded attribute declaration. ''' for char in ' %{},': if char in name: name = '"%s"'%name break if isinstance(type_, (tuple, list)): type_tmp = ['%s' % encode_string(type_k) for type_k in type_] type_ = '{%s}'%(', '.join(type_tmp)) return '%s %s %s'%(_TK_ATTRIBUTE, name, type_) def encode(self, obj): '''Encodes a given object to an ARFF file. :param obj: the object containing the ARFF information. :return: the ARFF file as an string. ''' data = [row for row in self.iter_encode(obj)] return '\n'.join(data) def iter_encode(self, obj): '''The iterative version of `arff.ArffEncoder.encode`. This encodes iteratively a given object and return, one-by-one, the lines of the ARFF file. :param obj: the object containing the ARFF information. :return: (yields) the ARFF file as strings. ''' # DESCRIPTION if obj.get('description', None): for row in obj['description'].split('\n'): yield self._encode_comment(row) # RELATION if not obj.get('relation'): raise BadObject('Relation name not found or with invalid value.') yield self._encode_relation(obj['relation']) yield '' # ATTRIBUTES if not obj.get('attributes'): raise BadObject('Attributes not found.') attribute_names = set() for attr in obj['attributes']: # Verify for bad object format if not isinstance(attr, (tuple, list)) or \ len(attr) != 2 or \ not isinstance(attr[0], str): raise BadObject('Invalid attribute declaration "%s"'%str(attr)) if isinstance(attr[1], str): # Verify for invalid types if attr[1] not in _SIMPLE_TYPES: raise BadObject('Invalid attribute type "%s"'%str(attr)) # Verify for bad object format elif not isinstance(attr[1], (tuple, list)): raise BadObject('Invalid attribute type "%s"'%str(attr)) # Verify attribute name is not used twice if attr[0] in attribute_names: raise BadObject('Trying to use attribute name "%s" for the ' 'second time.' % str(attr[0])) else: attribute_names.add(attr[0]) yield self._encode_attribute(attr[0], attr[1]) yield '' attributes = obj['attributes'] # DATA yield _TK_DATA if 'data' in obj: data = _get_data_object_for_encoding(obj.get('data')) yield from data.encode_data(obj.get('data'), attributes) yield '' # ============================================================================= # BASIC INTERFACE ============================================================= def load(fp, encode_nominal=False, return_type=DENSE): '''Load a file-like object containing the ARFF document and convert it into a Python object. :param fp: a file-like object. :param encode_nominal: boolean, if True perform a label encoding while reading the .arff file. :param return_type: determines the data structure used to store the dataset. Can be one of `arff.DENSE`, `arff.COO`, `arff.LOD`, `arff.DENSE_GEN` or `arff.LOD_GEN`. Consult the sections on `working with sparse data`_ and `loading progressively`_. :return: a dictionary. ''' decoder = ArffDecoder() return decoder.decode(fp, encode_nominal=encode_nominal, return_type=return_type) def loads(s, encode_nominal=False, return_type=DENSE): '''Convert a string instance containing the ARFF document into a Python object. :param s: a string object. :param encode_nominal: boolean, if True perform a label encoding while reading the .arff file. :param return_type: determines the data structure used to store the dataset. Can be one of `arff.DENSE`, `arff.COO`, `arff.LOD`, `arff.DENSE_GEN` or `arff.LOD_GEN`. Consult the sections on `working with sparse data`_ and `loading progressively`_. :return: a dictionary. ''' decoder = ArffDecoder() return decoder.decode(s, encode_nominal=encode_nominal, return_type=return_type) def dump(obj, fp): '''Serialize an object representing the ARFF document to a given file-like object. :param obj: a dictionary. :param fp: a file-like object. ''' encoder = ArffEncoder() generator = encoder.iter_encode(obj) last_row = next(generator) for
<gh_stars>0 import uuid import os import datetime import csv from django.db import models from django.contrib.contenttypes.models import ContentType from django.contrib.auth.decorators import login_required from django.http import HttpRequest, HttpResponse, HttpResponseRedirect, JsonResponse from django.shortcuts import render, reverse, get_object_or_404 from django.utils.timezone import now from django.contrib.admin.models import LogEntry, ADDITION, CHANGE from django.template.loader import get_template from rest_framework.authentication import SessionAuthentication, BasicAuthentication, TokenAuthentication from rest_framework.permissions import IsAuthenticated from django.db.models.functions import TruncMonth, ExtractMonth from rest_framework import filters from storages.backends.s3boto3 import S3Boto3Storage from inventory.forms import OfferForm, ObjektForm, PersonForm from inventory.models import Objekt, Offer, Comment, Image, Status, OfferStatus, OfferStatusLog, Person, Material from inventory.serializers import ObjektSerializer, RestadoObjektSerializer, RestadoMaterialSerializer, \ PersonSerializer, OfferSerializer from inventory.utils import convert_float from rest_framework import viewsets @login_required def objekt_dashboard(request: HttpRequest) -> HttpResponse: return render(request=request, template_name='dashboards/dashboard_all_objekts.html', context={ 'tab_stub': 'all', 'menu_stub': 'offers' }) @login_required def people_dashboard(request: HttpRequest) -> HttpResponse: return render(request=request, template_name='dashboards/dashboard_people.html', context={ 'tab_stub': 'people', 'menu_stub': 'people' }) @login_required def objekt_dashboard_offers(request: HttpRequest) -> HttpResponse: return render(request=request, template_name='dashboards/dashboard_all_offers.html', context={ 'tab_stub': 'all', 'menu_stub': 'offers' }) @login_required def objekt_dashboard_pending(request: HttpRequest) -> HttpResponse: return render(request=request, template_name='dashboards/dashboard_pending.html', context={ 'tab_stub': 'pending', 'menu_stub': 'offers' }) @login_required def objekt_dashboard_accepted(request: HttpRequest) -> HttpResponse: return render(request=request, template_name='dashboards/dashboard_accepted.html', context={ 'tab_stub': 'accepted', 'menu_stub': 'offers' }) @login_required def objekt_dashboard_rejected(request: HttpRequest) -> HttpResponse: return render(request=request, template_name='dashboards/dashboard_rejected.html', context={ 'tab_stub': 'rejected', 'menu_stub': 'offers' }) @login_required def objekt_dashboard_materials(request: HttpRequest) -> HttpResponse: return render(request=request, template_name='dashboards/dashboard_materials.html', context={ 'tab_stub': 'all', 'menu_stub': 'materials' }) @login_required def objekt_dashboard_warehouse(request: HttpRequest) -> HttpResponse: return render(request=request, template_name='dashboards/dashboard_warehouse.html', context={ 'tab_stub': 'warehouse', 'menu_stub': 'materials' }) @login_required def objekt_dashboard_available(request: HttpRequest) -> HttpResponse: return render(request=request, template_name='dashboards/dashboard_available.html', context={ 'tab_stub': 'available', 'menu_stub': 'materials' }) @login_required def objekt_dashboard_sold(request: HttpRequest) -> HttpResponse: return render(request=request, template_name='dashboards/dashboard_sold.html', context={ 'tab_stub': 'sold', 'menu_stub': 'materials' }) @login_required def objekt_dashboard_archived(request: HttpRequest) -> HttpResponse: return render(request=request, template_name='dashboards/dashboard_archived.html', context={ 'tab_stub': 'archived', 'menu_stub': 'materials' }) @login_required def objekt_dashboard_deleted(request: HttpRequest) -> HttpResponse: return render(request=request, template_name='dashboards/dashboard_deleted.html', context={ 'tab_stub': 'deleted', 'menu_stub': 'materials' }) class PersonAPIViewSet(viewsets.ModelViewSet): queryset = Person.objects.filter(deleted_at__isnull=True).order_by('id') serializer_class = PersonSerializer filter_backends = [filters.SearchFilter] search_fields = ['email', 'vat'] authentication_classes = [SessionAuthentication, BasicAuthentication] permission_classes = [IsAuthenticated] OBJEKT_SEARCH_FIELDS = [ 'title', 'status__text', ] class OfferAllAPIViewSet(viewsets.ModelViewSet): queryset = Offer.objects.order_by('id') serializer_class = OfferSerializer authentication_classes = [SessionAuthentication, BasicAuthentication] permission_classes = [IsAuthenticated] filter_backends = [ filters.SearchFilter ] search_fields = ['email'] class OfferPendingAPIViewSet(viewsets.ModelViewSet): queryset = Offer.pending_objects.all().order_by('id') serializer_class = OfferSerializer filter_backends = [filters.SearchFilter] search_fields = ['email'] authentication_classes = [SessionAuthentication, BasicAuthentication] permission_classes = [IsAuthenticated] class OfferAcceptedAPIViewSet(viewsets.ModelViewSet): queryset = Offer.accepted_objects.all().order_by('id') serializer_class = OfferSerializer filter_backends = [filters.SearchFilter] search_fields = ['email'] authentication_classes = [SessionAuthentication, BasicAuthentication] permission_classes = [IsAuthenticated] class OfferRejectedAPIViewSet(viewsets.ModelViewSet): queryset = Offer.rejected_objects.all().order_by('id') serializer_class = OfferSerializer filter_backends = [filters.SearchFilter] search_fields = ['email'] authentication_classes = [SessionAuthentication, BasicAuthentication] permission_classes = [IsAuthenticated] class ObjektAllAPIViewSet(viewsets.ModelViewSet): queryset = Objekt.all_objects.all().order_by('id') serializer_class = ObjektSerializer filter_backends = [filters.SearchFilter] search_fields = OBJEKT_SEARCH_FIELDS authentication_classes = [SessionAuthentication, BasicAuthentication] permission_classes = [IsAuthenticated] class ObjektWarehouseAPIViewSet(viewsets.ModelViewSet): queryset = Objekt.warehouse_objects.all().order_by('id') serializer_class = ObjektSerializer filter_backends = [filters.SearchFilter] search_fields = OBJEKT_SEARCH_FIELDS authentication_classes = [SessionAuthentication, BasicAuthentication] permission_classes = [IsAuthenticated] class ObjektAvailableAPIViewSet(viewsets.ModelViewSet): queryset = Objekt.available_objects.all().order_by('id') serializer_class = ObjektSerializer filter_backends = [filters.SearchFilter] search_fields = OBJEKT_SEARCH_FIELDS authentication_classes = [SessionAuthentication, BasicAuthentication] permission_classes = [IsAuthenticated] class ObjektSoldAPIViewSet(viewsets.ModelViewSet): queryset = Objekt.sold_objects.all().order_by('id') serializer_class = ObjektSerializer filter_backends = [filters.SearchFilter] search_fields = OBJEKT_SEARCH_FIELDS authentication_classes = [SessionAuthentication, BasicAuthentication] permission_classes = [IsAuthenticated] class ObjektArchivedAPIViewSet(viewsets.ModelViewSet): queryset = Objekt.archived_objects.all().order_by('id') serializer_class = ObjektSerializer filter_backends = [filters.SearchFilter] search_fields = OBJEKT_SEARCH_FIELDS authentication_classes = [SessionAuthentication, BasicAuthentication] permission_classes = [IsAuthenticated] class ObjektDeletedAPIViewSet(viewsets.ModelViewSet): queryset = Objekt.deleted_objects.all().order_by('id') serializer_class = ObjektSerializer filter_backends = [filters.SearchFilter] search_fields = OBJEKT_SEARCH_FIELDS authentication_classes = [SessionAuthentication, BasicAuthentication] permission_classes = [IsAuthenticated] class RestadoObjektAPIViewSet(viewsets.ModelViewSet): queryset = Objekt.restado_objects.all().order_by('id') serializer_class = RestadoObjektSerializer authentication_classes = [TokenAuthentication] permission_classes = [IsAuthenticated] class RestadoMaterialAPIViewSet(viewsets.ModelViewSet): queryset = Material.objects.all().order_by('id') serializer_class = RestadoMaterialSerializer authentication_classes = [TokenAuthentication] permission_classes = [IsAuthenticated] def offer_transform(request: HttpRequest, offer_pk: int) -> HttpResponse: offer = get_object_or_404(Offer, pk=offer_pk) objekt = Objekt.objects.create() objekt.description = offer.message if offer.images.count(): objekt.images.add(offer.images.all()[0]) objekt.offer = offer objekt.save() return HttpResponseRedirect(redirect_to=reverse('inventory_objekt', args=(objekt.id,))) def offer_delete(request: HttpRequest, offer_pk: int) -> HttpResponse: offer = get_object_or_404(Offer, pk=offer_pk) offer.delete() return HttpResponseRedirect(redirect_to=reverse('inventory_objekt_dashboard_offers')) def offer_create(request: HttpRequest) -> HttpResponse: if request.method == 'POST': offer_form = OfferForm(request.POST, request.FILES) if offer_form.is_valid(): offer = offer_form.save() email = request.POST['email'].lower() phone_number = request.POST['phone_number'] person_name = request.POST['person_name'] person = Person.objects.filter(email=email).first() if not person: person = Person.objects.create( email=email ) if person.phone_number is None: person.phone_number = phone_number if person.name is None: person.name = person_name person.save() return render(request=request, template_name='objekt/pages/offer_created.html', context={ 'offer': offer }) else: offer_form = OfferForm() return render(request=request, template_name='objekt/pages/offer_create.html', context={ 'offer_form': offer_form, }) @login_required def objekt_create(request: HttpRequest) -> HttpResponse: if request.method == 'POST': objekt_form = ObjektForm(request.POST, request.FILES) if objekt_form.is_valid(): objekt = objekt_form.save() objekt.offer_status_id = 3 objekt.save() objekt.material.clear() materials = request.POST.getlist('material[]') for mat_id in materials: if mat_id != '' and mat_id is not None: for m in Material.objects.all(): if str(m) == mat_id: objekt.material.add(m) LogEntry.objects.log_action( user_id=request.user.id, content_type_id=ContentType.objects.get_for_model(objekt).pk, object_id=objekt.pk, object_repr=str(objekt), change_message="Material wurde erstellt", action_flag=CHANGE) return HttpResponseRedirect(redirect_to=reverse('inventory_objekt', args=(objekt.id,))) else: objekt_form = ObjektForm() objekts = Objekt.objects.filter(knowledge_base=True).order_by('title') return render(request=request, template_name='objekt/pages/objekt_create.html', context={ 'objekt_form': objekt_form, 'objekts': objekts, 'materials': Material.objects.all() }) @login_required def objekt(request: HttpRequest, objekt_pk: int) -> HttpResponse: objekt = get_object_or_404(Objekt, pk=objekt_pk) if request.method == 'POST': objekt_form = ObjektForm(request.POST, request.FILES, instance=objekt) if objekt_form.is_valid(): objekt_form.save() objekt.material.clear() materials = request.POST.getlist('material[]') for mat_id in materials: if mat_id != '' and mat_id is not None: for m in Material.objects.all(): if str(m) == mat_id: objekt.material.add(m) LogEntry.objects.log_action( user_id=request.user.id, content_type_id=ContentType.objects.get_for_model(objekt).pk, object_id=objekt_pk, object_repr=str(objekt), change_message="Material wurde geändert", action_flag=CHANGE) else: objekt_form = ObjektForm(instance=objekt) return render(request=request, template_name='objekt/pages/objekt_admin.html', context={ 'objekt': objekt, 'objekt_form': objekt_form, 'log_entries': LogEntry.objects.filter( content_type_id=ContentType.objects.get_for_model(Objekt).pk, object_id=objekt.id ), 'materials': Material.objects.all() }) @login_required def offer(request: HttpRequest, offer_pk: int) -> HttpResponse: offer = get_object_or_404(Offer, pk=offer_pk) if request.method == 'POST': offer_status = OfferStatus.objects.get(id=request.POST['status']) offer.offer_status = offer_status offer.save() OfferStatusLog.objects.create( offer=offer, offer_status=offer.offer_status, created_by=request.user ) return render(request=request, template_name='objekt/pages/offer_admin.html', context={ 'offer': offer, 'offer_statuses': OfferStatus.objects.all(), 'status_logs': [] }) @login_required def objekt_comment(request, objekt_pk: int) -> HttpResponseRedirect: if request.method == 'POST': objekt = Objekt.objects.get(pk=objekt_pk) comment = Comment.objects.create( objekt_id=objekt_pk, created_by=request.user, text=request.POST['text'] ) LogEntry.objects.log_action( user_id=request.user.id, content_type_id=ContentType.objects.get_for_model(objekt).pk, object_id=objekt_pk, object_repr=str(objekt), change_message="Bemerkung wurde hinzugefügt", action_flag=CHANGE) template = get_template('objekt/components/objekt_comment.html') html = template.render(request=request, context={ 'objekt': objekt, 'comment': comment }) return JsonResponse({ 'success': True, 'html': html }) def objekt_images(request, objekt_pk: int) -> HttpResponseRedirect: objekt = get_object_or_404(Objekt, pk=objekt_pk) for file_obj in request.FILES.getlist('file'): # organize a path for the file in bucket file_directory_within_bucket = 'images/objekt/{objekt_id}/{guid}'.format( objekt_id=objekt.id, guid=str(uuid.uuid4()) ) # synthesize a full file path; note that we included the filename file_path_within_bucket = os.path.join( file_directory_within_bucket, file_obj.name ) media_storage = S3Boto3Storage() # avoid overwriting existing file if not media_storage.exists(file_path_within_bucket): media_storage.save(file_path_within_bucket, file_obj) file_url = "https://{}.s3.amazonaws.com/{}".format( 'tuberlin-dev', file_path_within_bucket ) image = Image.objects.create(filename=file_url) objekt.images.add(image) if objekt.thumbnail_image is None: objekt.thumbnail_image = image objekt.save() LogEntry.objects.log_action( user_id=request.user.id, content_type_id=ContentType.objects.get_for_model(objekt).pk, object_id=objekt_pk, object_repr=str(objekt), change_message="Material hatte neue Bilder hinzugefügt", action_flag=CHANGE) template = get_template('objekt/components/image_list.html') html = template.render(request=request, context={ 'objekt': objekt }) return JsonResponse({ 'success': True, 'html': html }) @login_required def objekt_delete(request: HttpRequest, objekt_pk: int): objekt = get_object_or_404(Objekt, pk=objekt_pk) objekt.deleted_at = now() objekt.save() LogEntry.objects.log_action( user_id=request.user.id, content_type_id=ContentType.objects.get_for_model(objekt).pk, object_id=objekt_pk, object_repr=str(objekt), change_message="Material wurde gelöscht", action_flag=CHANGE) template = get_template('objekt/components/image_list.html') html = template.render(request=request, context={ 'objekt': objekt }) return HttpResponseRedirect(redirect_to=reverse('inventory_objekt', args=(objekt_pk,))) @login_required def objekt_delete_forever_all(request: HttpRequest): objekts = Objekt.deleted_objects.all() for objekt in objekts: objekt.delete() return JsonResponse({ 'success': True }) @login_required def objekt_undelete(request: HttpRequest, objekt_pk: int): objekt = get_object_or_404(Objekt, pk=objekt_pk) objekt.deleted_at = None objekt.save() LogEntry.objects.log_action( user_id=request.user.id, content_type_id=ContentType.objects.get_for_model(objekt).pk, object_id=objekt_pk, object_repr=str(objekt), change_message="Materiallöschung wurde rückgängig gemacht", action_flag=CHANGE) return HttpResponseRedirect(redirect_to=request.META['HTTP_REFERER']) @login_required def objekt_archive(request: HttpRequest, objekt_pk: int): objekt = get_object_or_404(Objekt, pk=objekt_pk) objekt.archived_at = now() objekt.save() LogEntry.objects.log_action( user_id=request.user.id, content_type_id=ContentType.objects.get_for_model(objekt).pk, object_id=objekt_pk, object_repr=str(objekt), change_message="Material wurde archiviert", action_flag=CHANGE) return HttpResponseRedirect(request.META['HTTP_REFERER']) @login_required def objekt_unarchive(request: HttpRequest, objekt_pk: int): objekt = get_object_or_404(Objekt, pk=objekt_pk) objekt.archived_at = None objekt.save() LogEntry.objects.log_action( user_id=request.user.id, content_type_id=ContentType.objects.get_for_model(objekt).pk, object_id=objekt_pk, object_repr=str(objekt), change_message="Material wurde ausarchiviert", action_flag=CHANGE) return HttpResponseRedirect(request.META['HTTP_REFERER']) @login_required def objekt_comment_delete(request: HttpRequest, objekt_pk: int, comment_pk: int) -> HttpResponseRedirect: comment = get_object_or_404(Comment, pk=comment_pk) if comment.objekt_id == objekt_pk: comment.deleted_at = now() comment.save() objekt = get_object_or_404(Objekt, pk=objekt_pk) LogEntry.objects.log_action( user_id=request.user.id, content_type_id=ContentType.objects.get_for_model(objekt).pk, object_id=objekt_pk, object_repr=str(objekt), change_message="Bemerkung wurde gelöscht", action_flag=CHANGE) return HttpResponseRedirect(redirect_to=reverse('inventory_objekt', args=(objekt_pk,))) @login_required def objekt_clone(request: HttpRequest) -> HttpResponseRedirect: objekt = Objekt.objects.get(pk=request.POST['objekt']) cloned_objekt = Objekt.objects.create( title=objekt.title + " (Klone)", count=objekt.count, unit=objekt.unit, width=objekt.width, height=objekt.height, depth=objekt.depth, condition=objekt.condition, mass=objekt.mass, treatment_notes=objekt.treatment_notes, description=objekt.description, price=objekt.price, cloned_from=objekt, offer=objekt.offer, reference_price=objekt.reference_price, eco_cost=objekt.eco_cost, created_by=request.user ) for material in objekt.material.all(): cloned_objekt.material.add(material) for image in objekt.images.all(): cloned_objekt.images.add(image) LogEntry.objects.log_action( user_id=request.user.id, content_type_id=ContentType.objects.get_for_model(objekt).pk, object_id=objekt.pk, object_repr=str(objekt), change_message="Material wurde geklont", action_flag=CHANGE) LogEntry.objects.log_action( user_id=request.user.id, content_type_id=ContentType.objects.get_for_model(cloned_objekt).pk, object_id=cloned_objekt.pk, object_repr=str(cloned_objekt), change_message="Material wurde erstellt", action_flag=ADDITION) return JsonResponse({ 'success': True, 'redirect_url': reverse('inventory_objekt', args=(cloned_objekt.id,)) }) @login_required def objekt_knowledge_base(request: HttpRequest, objekt_pk: int) -> HttpResponseRedirect: objekt = Objekt.objects.get(pk=objekt_pk) if 'knowledge_base' in request.POST: objekt.knowledge_base = True LogEntry.objects.log_action( user_id=request.user.id, content_type_id=ContentType.objects.get_for_model(objekt).pk, object_id=objekt_pk, object_repr=str(objekt), change_message="Material wurde zur KnowledgeBase hinzugefügt", action_flag=CHANGE) else: objekt.knowledge_base = False LogEntry.objects.log_action( user_id=request.user.id, content_type_id=ContentType.objects.get_for_model(objekt).pk, object_id=objekt_pk, object_repr=str(objekt), change_message="Material wurde aus der KnowledgeBase entfernt", action_flag=CHANGE) objekt.save() return JsonResponse({ 'success': True }) @login_required def objekt_restado(request: HttpRequest, objekt_pk: int) -> HttpResponseRedirect: objekt = Objekt.objects.get(pk=objekt_pk) if 'restado' in request.POST: objekt.available_on_restado = True LogEntry.objects.log_action( user_id=request.user.id, content_type_id=ContentType.objects.get_for_model(objekt).pk, object_id=objekt_pk, object_repr=str(objekt), change_message="Material wurde für Restado zur Verfügung gestellt", action_flag=CHANGE) else: objekt.available_on_restado = False LogEntry.objects.log_action( user_id=request.user.id, content_type_id=ContentType.objects.get_for_model(objekt).pk, object_id=objekt_pk, object_repr=str(objekt), change_message="Materialverfügbarkeit auf Restado wurde entfernt", action_flag=CHANGE) objekt.save() return JsonResponse({ 'success': True }) @login_required def objekt_sold(request: HttpRequest, objekt_pk: int) -> HttpResponseRedirect: objekt = Objekt.objects.get(pk=objekt_pk) sold_count = int(request.POST['count']) price = convert_float(request.POST['price'])
<gh_stars>0 from django import http from django.core.exceptions import PermissionDenied from django.db.models import Prefetch, Q from django.db.transaction import non_atomic_requests from django.shortcuts import get_object_or_404, redirect from django.utils.encoding import force_text from django.utils.translation import ugettext from rest_framework import serializers from rest_framework.decorators import detail_route from rest_framework.exceptions import ParseError from rest_framework.permissions import AllowAny, IsAuthenticated from rest_framework.viewsets import ModelViewSet import olympia.core.logger from olympia import amo from olympia.access import acl from olympia.addons.decorators import addon_view_factory from olympia.addons.models import Addon from olympia.addons.views import AddonChildMixin from olympia.amo.decorators import json_view, login_required, post_required from olympia.amo.templatetags import jinja_helpers from olympia.amo.utils import paginate, render from olympia.api.pagination import OneOrZeroPageNumberPagination from olympia.api.permissions import ( AllowAddonAuthor, AllowIfPublic, AllowOwner, AllowRelatedObjectPermissions, AnyOf, ByHttpMethod, GroupPermission) from olympia.api.throttling import GranularUserRateThrottle from . import forms from .models import GroupedRating, Rating, RatingFlag from .permissions import CanDeleteRatingPermission from .serializers import RatingSerializer, RatingSerializerReply from .templatetags.jinja_helpers import user_can_delete_review log = olympia.core.logger.getLogger('z.ratings') addon_view = addon_view_factory(qs=Addon.objects.valid) @addon_view @non_atomic_requests def review_list(request, addon, review_id=None, user_id=None): qs = Rating.without_replies.all().filter( addon=addon).order_by('-created') ctx = {'addon': addon, 'grouped_ratings': GroupedRating.get(addon.id)} ctx['form'] = forms.RatingForm(None) is_admin = acl.action_allowed(request, amo.permissions.ADDONS_EDIT) if review_id is not None: ctx['page'] = 'detail' # If this is a dev reply, find the first msg for context. review = get_object_or_404(Rating.objects.all(), pk=review_id) if review.reply_to_id: review_id = review.reply_to_id ctx['reply'] = review qs = qs.filter(pk=review_id) elif user_id is not None: ctx['page'] = 'user' qs = qs.filter(user=user_id) if not qs: raise http.Http404() else: ctx['page'] = 'list' qs = qs.filter(is_latest=True) # Don't filter out empty reviews for admins. if not is_admin: # But otherwise, filter out everyone elses empty reviews. user_filter = (Q(user=request.user.pk) if request.user.is_authenticated() else Q()) qs = qs.filter(~Q(body=None) | user_filter) ctx['reviews'] = reviews = paginate(request, qs) ctx['replies'] = Rating.get_replies(reviews.object_list) if request.user.is_authenticated(): ctx['review_perms'] = { 'is_admin': is_admin, 'is_reviewer': acl.action_allowed( request, amo.permissions.RATINGS_MODERATE), 'is_author': acl.check_addon_ownership(request, addon, dev=True), } ctx['flags'] = get_flags(request, reviews.object_list) else: ctx['review_perms'] = {} return render(request, 'ratings/review_list.html', ctx) def get_flags(request, reviews): reviews = [r.id for r in reviews] qs = RatingFlag.objects.filter(rating__in=reviews, user=request.user.id) return {obj.rating_id: obj for obj in qs} @addon_view @post_required @login_required(redirect=False) @json_view def flag(request, addon, review_id): review = get_object_or_404(Rating.objects, pk=review_id, addon=addon) if review.user_id == request.user.id: raise PermissionDenied if not review.body: return {'msg': ugettext('This rating can\'t be flagged because it has ' 'no review text.')} data = {'rating': review_id, 'user': request.user.id} try: instance = RatingFlag.objects.get(**data) except RatingFlag.DoesNotExist: instance = None data = dict(request.POST.items(), **data) form = forms.RatingFlagForm(data, instance=instance) if form.is_valid(): form.save() Rating.objects.filter(id=review_id).update(editorreview=True) return {'msg': ugettext('Thanks; this review has been flagged ' 'for reviewer approval.')} else: return json_view.error(form.errors) @addon_view @post_required @login_required(redirect=False) def delete(request, addon, review_id): review = get_object_or_404(Rating.objects, pk=review_id, addon=addon) if not user_can_delete_review(request, review): raise PermissionDenied review.delete(user_responsible=request.user) return http.HttpResponse() def _review_details(request, addon, form, create=True): data = { # Always set deleted: False because when replying, you're actually # editing the previous reply if it existed, even if it had been # deleted. 'deleted': False, # This field is not saved, but it helps the model know that the action # should be logged. 'user_responsible': request.user, } if create: # These fields should be set at creation time. data['addon'] = addon data['user'] = request.user data['version'] = addon.current_version data['ip_address'] = request.META.get('REMOTE_ADDR', '') data.update(**form.cleaned_data) return data @addon_view @login_required def reply(request, addon, review_id): is_admin = acl.action_allowed(request, amo.permissions.ADDONS_EDIT) is_author = acl.check_addon_ownership(request, addon, dev=True) if not (is_admin or is_author): raise PermissionDenied rating = get_object_or_404(Rating.objects, pk=review_id, addon=addon) form = forms.RatingReplyForm(request.POST or None) if request.method == 'POST' and form.is_valid(): kwargs = { 'reply_to': rating, 'addon': addon, 'defaults': _review_details(request, addon, form) } reply, created = Rating.unfiltered.update_or_create(**kwargs) return redirect(jinja_helpers.url( 'addons.ratings.detail', addon.slug, review_id)) ctx = { 'review': rating, 'form': form, 'addon': addon } return render(request, 'ratings/reply.html', ctx) @addon_view @login_required def add(request, addon): if addon.has_author(request.user): raise PermissionDenied form = forms.RatingForm(request.POST or None) if (request.method == 'POST' and form.is_valid() and not request.POST.get('detailed')): details = _review_details(request, addon, form) rating = Rating.objects.create(**details) if 'flag' in form.cleaned_data and form.cleaned_data['flag']: rf = RatingFlag(rating=rating, user_id=request.user.id, flag=RatingFlag.OTHER, note='URLs') rf.save() return redirect(jinja_helpers.url('addons.ratings.list', addon.slug)) return render(request, 'ratings/add.html', {'addon': addon, 'form': form}) @addon_view @json_view @login_required(redirect=False) @post_required def edit(request, addon, review_id): rating = get_object_or_404(Rating.objects, pk=review_id, addon=addon) is_admin = acl.action_allowed(request, amo.permissions.ADDONS_EDIT) if not (request.user.id == rating.user.id or is_admin): raise PermissionDenied cls = forms.RatingReplyForm if rating.reply_to else forms.RatingForm form = cls(request.POST) if form.is_valid(): data = _review_details(request, addon, form, create=False) for field, value in data.items(): setattr(rating, field, value) # Resist the temptation to use rating.update(): it'd be more direct but # doesn't work with extra fields that are not meant to be saved like # 'user_responsible'. rating.save() return {} else: return json_view.error(form.errors) class RatingThrottle(GranularUserRateThrottle): rate = '1/minute' scope = 'user_rating' def allow_request(self, request, view): if request.method.lower() == 'post': return super(RatingThrottle, self).allow_request(request, view) else: return True class RatingReplyThrottle(RatingThrottle): rate = '1/5second' class RatingViewSet(AddonChildMixin, ModelViewSet): serializer_class = RatingSerializer permission_classes = [ ByHttpMethod({ 'get': AllowAny, 'head': AllowAny, 'options': AllowAny, # Needed for CORS. # Deletion requires a specific permission check. 'delete': CanDeleteRatingPermission, # To post a rating you just need to be authenticated. 'post': IsAuthenticated, # To edit a rating you need to be the author or be an admin. 'patch': AnyOf(AllowOwner, GroupPermission( amo.permissions.ADDONS_EDIT)), # Implementing PUT would be a little incoherent as we don't want to # allow users to change `version` but require it at creation time. # So only PATCH is allowed for editing. }), ] reply_permission_classes = [AnyOf( GroupPermission(amo.permissions.ADDONS_EDIT), AllowRelatedObjectPermissions('addon', [AllowAddonAuthor]), )] reply_serializer_class = RatingSerializerReply throttle_classes = (RatingThrottle,) def set_addon_object_from_rating(self, rating): """Set addon object on the instance from a rating object.""" # At this point it's likely we didn't have an addon in the request, so # if we went through get_addon_object() before it's going to be set # to None already. We delete the addon_object property cache and set # addon_pk in kwargs to force get_addon_object() to reset # self.addon_object. del self.addon_object self.kwargs['addon_pk'] = str(rating.addon.pk) return self.get_addon_object() def get_addon_object(self): """Return addon object associated with the request, or None if not relevant. Will also fire permission checks on the addon object when it's loaded. """ if hasattr(self, 'addon_object'): return self.addon_object if 'addon_pk' not in self.kwargs: self.kwargs['addon_pk'] = ( self.request.data.get('addon') or self.request.GET.get('addon')) if not self.kwargs['addon_pk']: # If we don't have an addon object, set it as None on the instance # and return immediately, that's fine. self.addon_object = None return else: # AddonViewSet.get_lookup_field() expects a string. self.kwargs['addon_pk'] = force_text(self.kwargs['addon_pk']) # When loading the add-on, pass a specific permission class - the # default from AddonViewSet is too restrictive, we are not modifying # the add-on itself so we don't need all the permission checks it does. return super(RatingViewSet, self).get_addon_object( permission_classes=[AllowIfPublic]) def check_permissions(self, request): """Perform permission checks. The regular DRF permissions checks are made, but also, before that, if an addon was requested, verify that it exists, is public and listed, through AllowIfPublic permission, that get_addon_object() uses.""" self.get_addon_object() # Proceed with the regular permission checks. return super(RatingViewSet, self).check_permissions(request) def get_serializer(self, *args, **kwargs): if self.action in ('partial_update', 'update'): instance = args[0] if instance.reply_to is not None: self.rating_object = instance.reply_to self.serializer_class = self.reply_serializer_class return super(RatingViewSet, self).get_serializer(*args, **kwargs) def filter_queryset(self, qs): if self.action == 'list': addon_identifier = self.request.GET.get('addon') user_identifier = self.request.GET.get('user') version_identifier = self.request.GET.get('version') if addon_identifier: qs = qs.filter(addon=self.get_addon_object()) if user_identifier: try: user_identifier = int(user_identifier) except ValueError: raise ParseError('user parameter should be an integer.') qs = qs.filter(user=user_identifier) if version_identifier: try: version_identifier = int(version_identifier) except ValueError: raise ParseError('version parameter should be an integer.') qs = qs.filter(version=version_identifier) elif addon_identifier: # When filtering on addon but not on version, only return the # latest rating posted by each user. qs = qs.filter(is_latest=True) if not addon_identifier and not user_identifier: # Don't allow listing ratings without filtering by add-on or # user. raise ParseError('Need an addon or user parameter') if user_identifier and addon_identifier and version_identifier: # When user, addon and version identifiers are set, we are # effectively only looking for one or zero objects. Fake # pagination in that case, avoiding all count() calls and # therefore related cache-machine invalidation issues. Needed # because the frontend wants to call this before and after # having posted a new rating, and needs accurate results. self.pagination_class = OneOrZeroPageNumberPagination return super(RatingViewSet, self).filter_queryset(qs) def get_paginated_response(self, data): response = super(RatingViewSet, self).get_paginated_response(data) if 'show_grouped_ratings' in self.request.GET: try: show_grouped_ratings = ( serializers.BooleanField().to_internal_value( self.request.GET['show_grouped_ratings'])) except serializers.ValidationError: raise ParseError( 'show_grouped_ratings parameter should be a boolean') if show_grouped_ratings and self.get_addon_object(): response.data['grouped_ratings'] = dict(GroupedRating.get( self.addon_object.id)) return response def get_queryset(self): requested = self.request.GET.get('filter', '').split(',') has_addons_edit = acl.action_allowed(self.request, amo.permissions.ADDONS_EDIT) # Add this as a property of
import warnings import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import ConvModule, xavier_init from mmcv.runner import auto_fp16, BaseModule from timm.models.layers import DropPath, to_2tuple, trunc_normal_ from ..builder import NECKS class GroupAttention(BaseModule): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., ws=1, init_cfg=None): """ ws 1 for stand attention """ super(GroupAttention, self).__init__(init_cfg) assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." self.dim = dim self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.ws = ws @auto_fp16() def forward(self, x, H, W): B, N, C = x.shape x = x.view(B, H, W, C) pad_l = pad_t = 0 pad_r = (self.ws - W % self.ws) % self.ws pad_b = (self.ws - H % self.ws) % self.ws x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) _, Hp, Wp, _ = x.shape _h, _w = Hp // self.ws, Wp // self.ws x = x.reshape(B, _h, self.ws, _w, self.ws, C).transpose(2, 3) qkv = self.qkv(x).reshape(B, _h * _w, self.ws * self.ws, 3, self.num_heads, C // self.num_heads).permute(3, 0, 1, 4, 2, 5) q, k, v = qkv[0], qkv[1], qkv[2] attn = (q @ k.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) attn = (attn @ v).transpose(2, 3).reshape(B, _h, _w, self.ws, self.ws, C) x = attn.transpose(2, 3).reshape(B, _h * self.ws, _w * self.ws, C) if pad_r > 0 or pad_b > 0: x = x[:, :H, :W, :].contiguous() x = x.reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class Attention(BaseModule): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., sr_ratio=1, init_cfg=None): super().__init__(init_cfg) assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." self.dim = dim self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 self.q = nn.Linear(dim, dim, bias=qkv_bias) self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.sr_ratio = sr_ratio if sr_ratio > 1: self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio) self.norm = nn.LayerNorm(dim) @auto_fp16() def forward(self, x, H, W): B, N, C = x.shape q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) if self.sr_ratio > 1: x_ = x.permute(0, 2, 1).reshape(B, C, H, W) x_ = self.sr(x_).reshape(B, C, -1).permute(0, 2, 1) #conv, maybe it can be replaced by pooling x_ = self.norm(x_) kv = self.kv(x_).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) else: kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) k, v = kv[0], kv[1] attn = (q @ k.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) out = (attn @ v).transpose(1, 2).reshape(B, N, C) out = self.proj(out) out = self.proj_drop(out) return out class Cross_Attention(BaseModule): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., sr_ratio=1, init_cfg=None): super().__init__(init_cfg) assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." self.dim = dim self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 self.q = nn.Linear(dim, dim, bias=qkv_bias) self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.sr_ratio = sr_ratio if sr_ratio > 1: self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio) self.norm = nn.LayerNorm(dim) @auto_fp16() def forward(self, x, H, W, y, H1, W1): B, N, C = x.shape B1, N1, C1 = y.shape q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) if self.sr_ratio > 1: y_ = y.permute(0, 2, 1).reshape(B1, C1, H1, W1) y_ = self.sr(y_).reshape(B1, C1, -1).permute(0, 2, 1) y_ = self.norm(y_) kv = self.kv(y_).reshape(B1, -1, 2, self.num_heads, C1 // self.num_heads).permute(2, 0, 3, 1, 4) else: kv = self.kv(y).reshape(B1, -1, 2, self.num_heads, C1 // self.num_heads).permute(2, 0, 3, 1, 4) k, v = kv[0], kv[1] attn = (q @ k.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) out = (attn @ v).transpose(1, 2).reshape(B, N, C) out = self.proj(out) out = self.proj_drop(out) return out class self_attn(BaseModule): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, drop_path=0., attn_drop=0., proj_drop=0., ws=1, sr_ratio=1.0, init_cfg=None): super(self_attn, self).__init__(init_cfg) self.dim = dim self.num_heads = num_heads self.qkv_bias = qkv_bias self.qk_scale = qk_scale self.attn_drop = attn_drop self.proj_drop = proj_drop self.ws = ws self.sr_ratio = sr_ratio self.group_attn = GroupAttention(dim=self.dim, num_heads=self.num_heads, ws = self.ws) self.global_attn = Attention(dim=self.dim, num_heads=self.num_heads, sr_ratio=self.sr_ratio) # self-attention self.layernorm1 = nn.LayerNorm(dim) self.layernorm2 = nn.LayerNorm(dim) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() @auto_fp16() def forward(self, x): B, C, H, W = x.size() x1 = x.reshape(B, C, -1).permute(0, 2, 1).contiguous() # (B, H*W, C) x1 = x1 + self.drop_path(self.group_attn(self.layernorm1(x1), H, W)) x1 = x1 + self.drop_path(self.global_attn(self.layernorm2(x1), H, W)) x1 = x1.permute(0, 2, 1).reshape(B, C, H, W).contiguous() # (B,C,H,W) return x1 class high2low_attn(BaseModule): def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., ws=1, sr_ratio=1.0, init_cfg=None): super(high2low_attn, self).__init__(init_cfg) self.dim = dim self.num_heads = num_heads self.qkv_bias = qkv_bias self.qk_scale = qk_scale self.attn_drop = attn_drop self.proj_drop = proj_drop self.ws = ws self.sr_ratio = sr_ratio # attention operation self.group_attn = GroupAttention(dim=self.dim, num_heads=self.num_heads, ws=self.ws) self.global_attn = Cross_Attention(dim=self.dim, num_heads=self.num_heads, sr_ratio=self.sr_ratio) # cross_attention self.layernorm1 = nn.LayerNorm(dim) self.layernorm2 = nn.LayerNorm(dim) self.layernorm3 = nn.LayerNorm(dim) @auto_fp16() def forward(self, x_low, x_high): B, C, H, W = x_low.size() B1, C1, H1, W1 = x_high.size() x_low1 = x_low.reshape(B, C, -1).permute(0, 2, 1).contiguous() # (B, H*W, C) x_high1 = x_high.reshape(B1, C1, -1).permute(0, 2, 1).contiguous() x_low1 = x_low1 + self.group_attn(self.layernorm1(x_low1), H, W) x_low1 = x_low1 + self.global_attn(self.layernorm2(x_low1), H, W, self.layernorm3(x_high1), H1, W1) x_low1 = x_low1.permute(0, 2, 1).reshape(B, C, H, W).contiguous() # (B,C,H,W) x_high1 = x_high1.permute(0, 2, 1).reshape(B1, C1, H1, W1).contiguous() # (B,C,H,W) return x_low1 class low2high_attn(BaseModule): def __init__(self, channels_high, channels_low, ratio, init_cfg=None): super(low2high_attn, self).__init__(init_cfg) self.conv1x1 = nn.Conv2d(channels_low, channels_high, kernel_size=1, stride=1, padding=0, bias=False) self.bn_reduction = nn.BatchNorm2d(channels_high) self.relu = nn.ReLU(inplace=True) self.coorattention = cross_scale_CoordAtt(channels_low, channels_low, ratio) def forward(self, x_low, x_high): x_att = self.coorattention(x_low, x_high) out = self.relu(self.bn_reduction(self.conv1x1(x_high + x_att))) return out # coordAttention !!! class h_sigmoid(BaseModule): def __init__(self, inplace=True, init_cfg=None): super(h_sigmoid, self).__init__(init_cfg) self.relu = nn.ReLU6(inplace=inplace) def forward(self, x): return self.relu(x + 3) / 6 class h_swish(BaseModule): def __init__(self, inplace=True, init_cfg=None): super(h_swish, self).__init__(init_cfg) self.sigmoid = h_sigmoid(inplace=inplace) def forward(self, x): return x * self.sigmoid(x) class cross_scale_CoordAtt(BaseModule): def __init__(self, inp, oup, ratio, reduction=32, init_cfg=None): super(cross_scale_CoordAtt, self).__init__(init_cfg) self.pool_h = nn.AdaptiveAvgPool2d((None, 1)) self.pool_w = nn.AdaptiveAvgPool2d((1, None)) self.ratio = ratio mip = max(8, inp // reduction) if self.ratio == 2: self.conv1 = nn.Conv2d(inp, mip, kernel_size=3, stride=2, padding=1) # change k=1,s=1 elif self.ratio == 4: self.conv1 = nn.Sequential(nn.Conv2d(inp, mip, kernel_size=3, stride=2, padding=1, bias=False), nn.Conv2d(mip, mip, kernel_size=3, stride=2, padding=1, bias=False)) else: self.conv1 = nn.Sequential(nn.Conv2d(inp, mip, kernel_size=3, stride=2, padding=1, bias=False), nn.Conv2d(mip, mip, kernel_size=3, stride=2, padding=1, bias=False), nn.Conv2d(mip, mip, kernel_size=3, stride=2, padding=1, bias=False)) self.bn1 = nn.BatchNorm2d(mip) self.act = h_swish() self.conv_h = nn.Conv2d(mip, oup, kernel_size=1, stride=1, padding=0) self.conv_w = nn.Conv2d(mip, oup, kernel_size=1, stride=1, padding=0) def forward(self, x_low, x_high): identity = x_high n, c, h, w = x_low.size() n1, c1, h1, w1 = x_high.size() x_h = self.pool_h(x_low) x_w = self.pool_w(x_low).permute(0, 1, 3, 2) y = torch.cat([x_h, x_w], dim=2) y = self.conv1(y) y = self.bn1(y) y = self.act(y) x_h, x_w = torch.split(y, [h1, w1], dim=2) x_w = x_w.permute(0, 1, 3, 2) a_h = self.conv_h(x_h).sigmoid() a_w = self.conv_w(x_w).sigmoid() out = identity * a_w * a_h return out def add_conv(in_ch, out_ch, ksize, stride, leaky=True): """ Add a conv2d / batchnorm / leaky ReLU block. Args: in_ch (int): number of input channels of the convolution layer. out_ch (int): number of output channels of the convolution layer. ksize (int): kernel size of the convolution layer. stride (int): stride of the convolution layer. Returns: stage (Sequential) : Sequential layers composing a convolution block. """ stage = nn.Sequential() pad = (ksize - 1) // 2 stage.add_module('conv', nn.Conv2d(in_channels=in_ch, out_channels=out_ch, kernel_size=ksize, stride=stride, padding=pad, bias=False)) stage.add_module('batch_norm', nn.BatchNorm2d(out_ch)) if leaky: stage.add_module('leaky', nn.LeakyReLU(0.1)) else: stage.add_module('relu6', nn.ReLU6(inplace=True)) return stage # adaptive scale feature fusion class ASFF(BaseModule): def __init__(self, rfb=False, vis=False, init_cfg=None): super(ASFF, self).__init__(init_cfg) self.dim = 256 self.inter_dim = self.dim compress_c = 8 if rfb else 16 #when adding rfb, we use half number of channels to save memory
<filename>threeML/utils/data_download/Fermi_LAT/download_LAT_data.py from __future__ import print_function import glob import html.parser import os import re import socket import time import urllib.error import urllib.parse import urllib.request from builtins import str from pathlib import Path import astropy.io.fits as pyfits from threeML.config.config import threeML_config from threeML.exceptions.custom_exceptions import TimeTypeNotKnown from threeML.io.download_from_http import ApacheDirectory from threeML.io.file_utils import sanitize_filename from threeML.io.logging import setup_logger from threeML.utils.unique_deterministic_tag import get_unique_deterministic_tag log = setup_logger(__name__) # Set default timeout for operations socket.setdefaulttimeout(120) class DivParser(html.parser.HTMLParser): """ Extract data from a <div></div> tag """ def __init__(self, desiredDivName): html.parser.HTMLParser.__init__(self) self.recording = 0 self.data = [] self.desiredDivName = desiredDivName def handle_starttag(self, tag, attributes): if tag != "div": return if self.recording: self.recording += 1 return for name, value in attributes: if name == "id" and value == self.desiredDivName: break else: return self.recording = 1 def handle_endtag(self, tag): if tag == "div" and self.recording: self.recording -= 1 def handle_data(self, data): if self.recording: self.data.append(data) # Keyword name to store the unique ID for the download _uid_fits_keyword = "QUERYUID" def merge_LAT_data(ft1s, destination_directory: str = ".", outfile: str = 'ft1_merged.fits', Emin: float = 30.0, Emax: float = 1e6) -> Path: outfile: Path = Path(destination_directory) / outfile if outfile.exists(): log.warning( f"Existing merged event file {outfile} correspond to the same selection. " "We assume you did not tamper with it, so we will return it instead of merging it again. " "If you want to redo the FT1 file again, remove it from the outdir" ) return outfile if len(ft1s) == 1: log.warning('Only one FT1 file provided. Skipping the merge...') import shutil shutil.copyfile(ft1s[0], outfile) return outfile _filelist = "_filelist.txt" infile: Path = Path(destination_directory) / _filelist infile_list = infile.open('w') for ft1 in ft1s: infile_list.write(str(ft1) + '\n') infile_list.close() from GtApp import GtApp gtselect = GtApp('gtselect') gtselect['infile'] = '@' + str(infile) gtselect['outfile'] = str(outfile) gtselect['ra'] = 'INDEF' gtselect['dec'] = 'INDEF' gtselect['rad'] = 'INDEF' gtselect['tmin'] = 'INDEF' gtselect['tmax'] = 'INDEF' gtselect['emin'] = '%.3f' % Emin gtselect['emax'] = '%.3f' % Emax gtselect['zmax'] = 180 gtselect.run() return outfile def download_LAT_data( ra: float, dec: float, radius: float, tstart: float, tstop: float, time_type: str, data_type: str = "Photon", destination_directory: str = ".", Emin: float = 30., Emax: float = 1000000. ) -> Path: """ Download data from the public LAT data server (of course you need a working internet connection). Data are selected in a circular Region of Interest (cone) centered on the provided coordinates. Example: ``` > download_LAT_data(195.6, -35.4, 12.0, '2008-09-16 01:00:23', '2008-09-18 01:00:23', time_type='Gregorian', destination_directory='my_new_data') ``` :param ra: R.A. (J2000) of the center of the ROI :param dec: Dec. (J2000) of the center of the ROI :param radius: radius (in degree) of the center of the ROI (use a larger radius than what you will need in the analysis) :param tstart: start time for the data :param tstop: stop time for the data :param time_type: type of the time input (one of MET, Gregorian or MJD) :param data_type: type of data to download. Use Photon if you use Source or cleaner classes, Extended otherwise. Default is Photon. :param destination_directory: directory where you want to save the data (default: current directory) :param Emin: minimum photon energy (in MeV) to download (default: 30 MeV, must be between 30 and 1e6 MeV) :param Emax: maximum photon energy (in MeV) to download (default: 1e6 MeV, must be betwen 30 and 1e6 MeV ) :return: the path to the downloaded FT1 and FT2 file """ _known_time_types = ["MET", "Gregorian", "MJD"] if time_type not in _known_time_types: out = ",".join(_known_time_types) log.error( f"Time type must be one of {out}" ) raise TimeTypeNotKnown() valid_classes = ["Photon", "Extended"] if data_type not in valid_classes: out = ",".join(valid_classes) log.error( f"Data type must be one of {out}" ) raise TypeError() if radius <= 0: log.error( "Radius of the Region of Interest must be > 0" ) raise ValueError() if not (0 <= ra <= 360.0): log.error( "R.A. must be 0 <= ra <= 360" ) raise ValueError() if not -90 <= dec <= 90: log.error( "Dec. must be -90 <= dec <= 90" ) raise ValueError() fermiEmin = 30 fermiEmax = 1e6 if Emin < fermiEmin: log.warning( f"Setting Emin from {Emin} to 30 MeV (minimum available energy for Fermi-LAT data)" ) Emin = fermiEmin if Emin > fermiEmax: log.warning( f"Setting Emin from {Emin} to 1 TeV (maximum available energy for Fermi-LAT data)" ) Emin = fermiEmax if Emax < fermiEmin: log.warning( f"Setting Emax from {Emax} to 30 MeV (minimum available energy for Fermi-LAT data)" ) Emax = fermiEmin if Emax > fermiEmax: log.warning( f"Setting Emax from {Emax} to 1 TeV (maximum available energy for Fermi-LAT data)" ) Emax = fermiEmax if Emin >= Emax: log.error( f"Minimum energy ({Emin}) must be less than maximum energy ({Emax}) for download." ) raise ValueError() # create output directory if it does not exists destination_directory = sanitize_filename( destination_directory, abspath=True) if not destination_directory.exists(): destination_directory.mkdir() # This will complete automatically the form available at # http://fermi.gsfc.nasa.gov/cgi-bin/ssc/LAT/LATDataQuery.cgi # After submitting the form, an html page will inform about # the identifier assigned to the query and the time which will be # needed to process it. After retrieving the query number, # this function will wait for the files to be completed on the server, # then it will download them url: str = threeML_config.LAT.query_form # Save parameters for the query in a dictionary query_parameters = {} query_parameters["coordfield"] = "%.4f,%.4f" % (ra, dec) query_parameters["coordsystem"] = "J2000" query_parameters["shapefield"] = "%s" % radius query_parameters["timefield"] = "%s,%s" % (tstart, tstop) query_parameters["timetype"] = "%s" % time_type query_parameters["energyfield"] = "%.3f,%.3f" % (Emin, Emax) query_parameters["photonOrExtendedOrNone"] = data_type query_parameters["destination"] = "query" query_parameters["spacecraft"] = "checked" # Print them out log.info("Query parameters:") for k, v in query_parameters.items(): log.info("%30s = %s" % (k, v)) # Compute a unique ID for this query query_unique_id = get_unique_deterministic_tag(str(query_parameters)) log.info( "Query ID: %s" % query_unique_id) # Look if there are FT1 and FT2 files in the output directory matching this unique ID ft1s = [x for x in destination_directory.glob("*PH??.fits")] ft2s = [x for x in destination_directory.glob("*SC??.fits")] # Loop over all ft1s and see if there is any matching the uid prev_downloaded_ft1s = [] prev_downloaded_ft2 = None for ft1 in ft1s: with pyfits.open(ft1) as f: this_query_uid = f[0].header.get(_uid_fits_keyword) if this_query_uid == query_unique_id: # Found one! Append to the list as there might be others prev_downloaded_ft1s.append(ft1) # break pass if len(prev_downloaded_ft1s) > 0: for ft2 in ft2s: with pyfits.open(ft2) as f: this_query_uid = f[0].header.get(_uid_fits_keyword) if this_query_uid == query_unique_id: # Found one! (FT2 is a single file) prev_downloaded_ft2 = ft2 break else: # No need to look any further, if there is no FT1 file there shouldn't be any FT2 file either pass # If we have both FT1 and FT2 matching the ID, we do not need to download anymore if len(prev_downloaded_ft1s) > 0 and prev_downloaded_ft2 is not None: log.warning( f"Existing event file {prev_downloaded_ft1s} and Spacecraft file {prev_downloaded_ft2} correspond to the same selection. " "We assume you did not tamper with them, so we will return those instead of downloading them again. " "If you want to download them again, remove them from the outdir" ) return ( merge_LAT_data( prev_downloaded_ft1s, destination_directory, outfile="L%s_FT1.fits" % query_unique_id, Emin = Emin, Emax = Emax ), prev_downloaded_ft2, ) # POST encoding postData = urllib.parse.urlencode(query_parameters).encode("utf-8") temporaryFileName = "__temp_query_result.html" # Remove temp file if present try: os.remove(temporaryFileName) except: pass # This is to avoid caching urllib.request.urlcleanup() # Get the form compiled try: urllib.request.urlretrieve( url, temporaryFileName, lambda x, y, z: 0, postData) except socket.timeout: log.error( "Time out when connecting to the server. Check your internet connection, or that the " f"form at {url} is accessible, then retry" ) raise RuntimeError( ) except Exception as e: log.error(e) log.exception("Problems with the download. Check your internet connection, or that the " f"form at {url} is accessible, then retry") raise RuntimeError( ) # Now open the file, parse it and get the query ID with open(temporaryFileName) as htmlFile: lines = [] for line in htmlFile: # lines.append(line.encode('utf-8')) lines.append(line) html = " ".join(lines).strip() os.remove(temporaryFileName) # Extract data from the response parser = DivParser("sec-wrapper")
import math import operator from functools import reduce import numpy as np import gym from gym import error, spaces, utils from .minigrid import OBJECT_TO_IDX, COLOR_TO_IDX, STATE_TO_IDX class ReseedWrapper(gym.core.Wrapper): """ Wrapper to always regenerate an environment with the same set of seeds. This can be used to force an environment to always keep the same configuration when reset. """ def __init__(self, env, seeds=[0], seed_idx=0): self.seeds = list(seeds) self.seed_idx = seed_idx super().__init__(env) def reset(self, **kwargs): seed = self.seeds[self.seed_idx] self.seed_idx = (self.seed_idx + 1) % len(self.seeds) self.env.seed(seed) return self.env.reset(**kwargs) def step(self, action): obs, reward, done, info = self.env.step(action) return obs, reward, done, info class ActionBonus(gym.core.Wrapper): """ Wrapper which adds an exploration bonus. This is a reward to encourage exploration of less visited (state,action) pairs. """ def __init__(self, env): super().__init__(env) self.counts = {} def step(self, action): obs, reward, done, info = self.env.step(action) env = self.unwrapped tup = (tuple(env.agent_pos), env.agent_dir, action) # Get the count for this (s,a) pair pre_count = 0 if tup in self.counts: pre_count = self.counts[tup] # Update the count for this (s,a) pair new_count = pre_count + 1 self.counts[tup] = new_count bonus = 1 / math.sqrt(new_count) reward += bonus return obs, reward, done, info def reset(self, **kwargs): return self.env.reset(**kwargs) class StateBonus(gym.core.Wrapper): """ Adds an exploration bonus based on which positions are visited on the grid. """ def __init__(self, env): super().__init__(env) self.counts = {} def step(self, action): obs, reward, done, info = self.env.step(action) # Tuple based on which we index the counts # We use the position after an update env = self.unwrapped tup = (tuple(env.agent_pos)) # Get the count for this key pre_count = 0 if tup in self.counts: pre_count = self.counts[tup] # Update the count for this key new_count = pre_count + 1 self.counts[tup] = new_count bonus = 1 / math.sqrt(new_count) reward += bonus return obs, reward, done, info def reset(self, **kwargs): return self.env.reset(**kwargs) class ConstantReward(gym.core.Wrapper): """ Always give +1 reward for success, as opposed to default, which is reward = 1 - 0.9 * (steps / max_steps) """ def __init__(self, env): super().__init__(env) def step(self, action): obs, reward, done, info = self.env.step(action) if reward > 0: reward = 1.0 return obs, reward, done, info def reset(self, **kwargs): return self.env.reset(**kwargs) class ImgObsWrapper(gym.core.ObservationWrapper): """ Use the image as the only observation output, no language/mission. """ def __init__(self, env): super().__init__(env) self.observation_space = env.observation_space.spaces['image'] def observation(self, obs): return obs['image'] class OneHotPartialObsWrapper(gym.core.ObservationWrapper): """ Wrapper to get a one-hot encoding of a partially observable agent view as observation. """ def __init__(self, env, tile_size=8): super().__init__(env) self.tile_size = tile_size obs_shape = env.observation_space['image'].shape # Number of bits per cell num_bits = len(OBJECT_TO_IDX) + len(COLOR_TO_IDX) + len(STATE_TO_IDX) self.observation_space.spaces["image"] = spaces.Box( low=0, high=255, shape=(obs_shape[0], obs_shape[1], num_bits), dtype='uint8' ) def observation(self, obs): img = obs['image'] out = np.zeros(self.observation_space.spaces['image'].shape, dtype='uint8') for i in range(img.shape[0]): for j in range(img.shape[1]): type = img[i, j, 0] color = img[i, j, 1] state = img[i, j, 2] out[i, j, type] = 1 out[i, j, len(OBJECT_TO_IDX) + color] = 1 out[i, j, len(OBJECT_TO_IDX) + len(COLOR_TO_IDX) + state] = 1 return { 'mission': obs['mission'], 'image': out } class CategoricalObsWrapper(gym.core.ObservationWrapper): """ Wrapper to get a [7, 7, 1] observation image with a single categorical variable per cell. """ POSSIBLE_OBJECTS = { 'basic': np.array([ [0, 0, 0], # Hidden [1, 0, 0], # Empty [2, 5, 0], # Wall [8, 1, 0], # Goal ]), 'door': np.array([ [4, 0, 0], # Door(color, state) [4, 0, 1], [4, 1, 0], [4, 1, 1], [4, 2, 0], [4, 2, 1], [4, 3, 0], [4, 3, 1], [4, 4, 0], [4, 4, 1], [4, 5, 0], [4, 5, 1], ]), 'key': np.array([ [5, 0, 0], # Key(color) [5, 1, 0], [5, 2, 0], [5, 3, 0], [5, 4, 0], [5, 5, 0], ]), 'ball': np.array([ [6, 0, 0], # Ball(color) [6, 1, 0], [6, 2, 0], [6, 3, 0], [6, 4, 0], [6, 5, 0], ]), 'box': np.array([ [7, 0, 0], # Box (color) [7, 1, 0], [7, 2, 0], [7, 3, 0], [7, 4, 0], [7, 5, 0], ]), 'lava': np.array([ [9, 0, 0], # Lava ]), 'agent': np.array([ [10, 0, 0], # Agent(direction) [10, 0, 1], [10, 0, 2], [10, 0, 3], ]), # Not used: Floor => [3, *, 0] } def __init__(self, env, no_mission=False, restrict_types=None): super().__init__(env) if not restrict_types: restrict_types = list(CategoricalObsWrapper.POSSIBLE_OBJECTS.keys()) self.possible_objects = np.concatenate( [CategoricalObsWrapper.POSSIBLE_OBJECTS[key] for key in restrict_types] ) self.n_categories = len(self.possible_objects) obs_shape = env.observation_space['image'].shape self.observation_space.spaces["image"] = spaces.Box( low=0, high=self.n_categories - 1, shape=(obs_shape[0], obs_shape[1], 1), dtype='uint8' ) self.no_mission = no_mission def observation(self, obs): img = obs.pop('image') if self.no_mission and 'mission' in obs: del obs['mission'] # Make one-hot n = self.n_categories out = np.zeros(img.shape[:-1] + (n,), dtype=bool) # (7, 7, 35) for i in range(n): val = self.possible_objects[i] out[..., i] = (img == val).all(axis=-1) # One-hot to categorical out = out.argmax(axis=-1).astype(np.uint8) # (7, 7, 33) => (7, 7) out = np.expand_dims(out, -1) # (7, 7) => (7, 7, 1) return dict(image=out, **obs) class RGBImgObsWrapper(gym.core.ObservationWrapper): """ Wrapper to use fully observable RGB image as the only observation output, no language/mission. This can be used to have the agent to solve the gridworld in pixel space. """ def __init__(self, env, tile_size=8): super().__init__(env) self.tile_size = tile_size self.observation_space.spaces['image'] = spaces.Box( low=0, high=255, shape=(self.env.width * tile_size, self.env.height * tile_size, 3), dtype='uint8' ) def observation(self, obs): env = self.unwrapped rgb_img = env.render( mode='rgb_array', highlight=False, tile_size=self.tile_size ) return { 'mission': obs['mission'], 'image': rgb_img } class RGBImgPartialObsWrapper(gym.core.ObservationWrapper): """ Wrapper to use partially observable RGB image as the only observation output This can be used to have the agent to solve the gridworld in pixel space. """ def __init__(self, env, tile_size=8): super().__init__(env) self.tile_size = tile_size obs_shape = env.observation_space.spaces['image'].shape self.observation_space.spaces['image'] = spaces.Box( low=0, high=255, shape=(obs_shape[0] * tile_size, obs_shape[1] * tile_size, 3), dtype='uint8' ) def observation(self, obs): env = self.unwrapped rgb_img_partial = env.get_obs_render( obs['image'], tile_size=self.tile_size ) return { 'mission': obs['mission'], 'image': rgb_img_partial } class FullyObsWrapper(gym.core.ObservationWrapper): """ Fully observable gridworld using a compact grid encoding """ def __init__(self, env): super().__init__(env) self.observation_space.spaces["image"] = spaces.Box( low=0, high=255, shape=(self.env.width, self.env.height, 3), # number of cells dtype='uint8' ) def observation(self, obs): env = self.unwrapped full_grid = env.grid.encode() full_grid[env.agent_pos[0]][env.agent_pos[1]] = np.array([ OBJECT_TO_IDX['agent'], COLOR_TO_IDX['red'], env.agent_dir ]) return { 'mission': obs['mission'], 'image': full_grid } class FlatObsWrapper(gym.core.ObservationWrapper): """ Encode mission strings using a one-hot scheme, and combine these with observed images into one flat array """ def __init__(self, env, maxStrLen=96): super().__init__(env) self.maxStrLen = maxStrLen self.numCharCodes = 27 imgSpace = env.observation_space.spaces['image'] imgSize = reduce(operator.mul, imgSpace.shape, 1) self.observation_space = spaces.Box( low=0, high=255, shape=(imgSize + self.numCharCodes * self.maxStrLen,), dtype='uint8' ) self.cachedStr = None self.cachedArray = None def observation(self, obs): image = obs['image'] mission = obs['mission'] # Cache the last-encoded mission string if mission != self.cachedStr: assert len(mission) <= self.maxStrLen, 'mission string too long ({} chars)'.format(len(mission)) mission = mission.lower() strArray = np.zeros(shape=(self.maxStrLen, self.numCharCodes), dtype='float32') for idx, ch in enumerate(mission): if ch >= 'a' and ch <= 'z': chNo = ord(ch) - ord('a') elif ch == ' ': chNo = ord('z') - ord('a') + 1 assert chNo < self.numCharCodes, '%s : %d' % (ch, chNo) strArray[idx, chNo] = 1 self.cachedStr = mission self.cachedArray = strArray obs = np.concatenate((image.flatten(), self.cachedArray.flatten())) return obs class ViewSizeWrapper(gym.core.Wrapper): """ Wrapper to customize the agent field of view size. This cannot be used with fully observable wrappers. """ def __init__(self, env, agent_view_size=7): super().__init__(env) assert agent_view_size % 2 == 1 assert agent_view_size >= 3 # Override default view size env.unwrapped.agent_view_size = agent_view_size # Compute observation space with specified view size observation_space = gym.spaces.Box( low=0, high=255, shape=(agent_view_size, agent_view_size, 3), dtype='uint8' ) # Override the environment's observation space self.observation_space = spaces.Dict({ 'image': observation_space }) def reset(self, **kwargs): return self.env.reset(**kwargs) def step(self, action): return self.env.step(action) from .minigrid import Goal class DirectionObsWrapper(gym.core.ObservationWrapper): """ Provides the slope/angular direction to the goal with the observations as modeled by (y2 - y2 )/( x2 - x1) type = {slope , angle} """ def __init__(self, env,type='slope'): super().__init__(env) self.goal_position = None self.type = type def reset(self): obs = self.env.reset() if not self.goal_position: self.goal_position = [x for x,y in enumerate(self.grid.grid) if
# DISABLE SELECT PYLINT TESTS # pylint: disable=bad-continuation, no-member, broad-except, no-name-in-module # pylint: disable=arguments-differ """ ╔════════════════════════════════════════════════════╗ ║ ╔═╗╦═╗╔═╗╔═╗╦ ╦╔═╗╔╗╔╔═╗ ╔╦╗╔═╗╔╦╗╔═╗╔╗╔╔═╗╔╦╗╔═╗ ║ ║ ║ ╦╠╦╝╠═╣╠═╝╠═╣║╣ ║║║║╣ ║║║║╣ ║ ╠═╣║║║║ ║ ║║║╣ ║ ║ ╚═╝╩╚═╩ ╩╩ ╩ ╩╚═╝╝╚╝╚═╝ ╩ ╩╚═╝ ╩ ╩ ╩╝╚╝╚═╝═╩╝╚═╝ ║ ║ DECENTRALIZED EXCHANGE HUMMINGBOT CONNECTOR ║ ╚════════════════════════════════════════════════════╝ ~ forked from binance_api_order_book_data_source v1.0.0 ~ """ # STANDARD MODULES import asyncio import logging import time from collections import defaultdict from decimal import Decimal from typing import Any, Dict, List, Mapping, Optional # THIRD PARY MODULES from bidict import bidict # METANODE MODULES from metanode.graphene_metanode_client import GrapheneTrustlessClient from hummingbot.connector.exchange.graphene.graphene_constants import \ GrapheneConstants # HUMMINGBOT MODULES from hummingbot.connector.exchange.graphene.graphene_order_book import \ GrapheneOrderBook from hummingbot.core.api_throttler.async_throttler import AsyncThrottler from hummingbot.core.data_type.order_book import OrderBook from hummingbot.core.data_type.order_book_message import (OrderBookMessage, OrderBookMessageType) from hummingbot.core.data_type.order_book_tracker_data_source import \ OrderBookTrackerDataSource from hummingbot.core.event.events import TradeType from hummingbot.logger import HummingbotLogger class GrapheneAPIOrderBookDataSource(OrderBookTrackerDataSource): """ connect to metanode to get bid, ask, and market history updates """ _logger: Optional[HummingbotLogger] = None _trading_pair_symbol_map: Dict[str, Mapping[str, str]] = {} _mapping_initialization_lock = asyncio.Lock() def __init__( self, domain: str, trading_pairs: List[str], **__, ): # ~ print("GrapheneAPIOrderBookDataSource") super().__init__(trading_pairs) # ~ self._order_book_create_function = lambda: OrderBook() self._order_book_create_function = OrderBook self._message_queue: Dict[str, asyncio.Queue] = defaultdict(asyncio.Queue) self.domain = domain self.constants = GrapheneConstants(domain) self.metanode = GrapheneTrustlessClient(self.constants) @classmethod def logger(cls) -> HummingbotLogger: """ a classmethod for logging """ if cls._logger is None: cls._logger = logging.getLogger(__name__) return cls._logger @staticmethod async def get_last_traded_prices( domain: str, *_, **__, ) -> Dict[str, float]: """ Return a dictionary the trading_pair as key and the current price as value for each trading pair passed as parameter :param trading_pairs: list of trading pairs to get the prices for :param domain: the name of the graphene blockchain :return: Dictionary of associations between token pair and its latest price """ constants = GrapheneConstants(domain) metanode = GrapheneTrustlessClient(constants) metanode_pairs = metanode.pairs # DISCRETE SQL QUERY return {k: v["last"] for k, v in metanode_pairs.items()} @staticmethod async def get_all_mid_prices(domain: str) -> Dict[str, Decimal]: """ Returns the mid price of all trading pairs, obtaining the information from the exchange. This functionality is required by the market price strategy. :param domain: the name of the graphene blockchain :return: Dictionary with the trading pair as key, and the mid price as value """ constants = GrapheneConstants(domain) metanode = GrapheneTrustlessClient(constants) metanode_pairs = metanode.pairs # DISCRETE SQL QUERY ret = [] for pair in metanode_pairs: ret[pair] = Decimal((pair["book"]["asks"][0] + pair["book"]["bids"][0]) / 2) return ret @classmethod def trading_pair_symbol_map_ready(cls, domain: str): """ Checks if the mapping from exchange to client trading pairs has been initialized :param domain: the name of the graphene blockchain :return: True if the mapping has been initialized, False otherwise """ return ( domain in cls._trading_pair_symbol_map and len(cls._trading_pair_symbol_map[domain]) > 0 ) @classmethod async def trading_pair_symbol_map( cls, domain: str, **__, ): """ internal map used to translate trading pairs from and to the exchange notation In general this should not be used, instead call the methods ~ `exchange_symbol_associated_to_pair` ~ `trading_pair_associated_to_exchange_symbol` :param domain: the name of the graphene blockchain :return: bidirectional mapping for pair exchange notation and client notation """ if not cls.trading_pair_symbol_map_ready(domain=domain): async with cls._mapping_initialization_lock: # Check condition again # (could have been initialized waiting for the lock to be released) if not cls.trading_pair_symbol_map_ready(domain=domain): await cls._init_trading_pair_symbols(domain) return cls._trading_pair_symbol_map[domain] @staticmethod async def exchange_symbol_associated_to_pair( trading_pair: str, domain: str, **__, ) -> str: """ 1:1 mapping BASE-QUOTE :param trading_pair: BASE-QUOTE :param domain: the name of the graphene blockchain :return: BASE-QUOTE """ symbol_map = await GrapheneAPIOrderBookDataSource.trading_pair_symbol_map( domain=domain ) return symbol_map.inverse[trading_pair] @staticmethod async def trading_pair_associated_to_exchange_symbol( symbol: str, domain: str, **__, ) -> str: """ Used to translate a trading pair from exchange to client notation :param symbol: trading pair in exchange notation :param domain: the name of the graphene blockchain :return: trading pair in client notation """ symbol_map = await GrapheneAPIOrderBookDataSource.trading_pair_symbol_map( domain=domain ) return symbol_map[symbol] @staticmethod async def fetch_trading_pairs(domain: str = "peerplays") -> List[str]: """ Returns a list of all known trading pairs enabled to operate with :param domain: the name of the graphene blockchain :return: list of trading pairs in client notation """ # probably overkill... self.constants.chain.PAIRS would suffice mapping = await GrapheneAPIOrderBookDataSource.trading_pair_symbol_map( domain=domain ) return list(mapping.values()) async def get_new_order_book(self, trading_pair: str) -> OrderBook: """ Creates a local instance of the exchange order book for one pair :param trading_pair: BASE-QUOTE :return: a local copy of the current order book in the exchange """ msg = await self.get_snapshot(trading_pair) snapshot: OrderBookMessage = GrapheneOrderBook.snapshot_message_from_exchange( msg=msg, timestamp=time.time(), metadata={ "trading_pair": trading_pair, "blocktime": self.metanode.timing["blocktime"], }, ) book = self.order_book_create_function() book.apply_snapshot(snapshot.bids, snapshot.asks, snapshot.update_id) return book async def listen_for_trades( self, ev_loop: asyncio.AbstractEventLoop, output: asyncio.Queue ): """ reads the trade events queue, for each event ~ creates a trade message instance ~ adds it to the output queue :param ev_loop: the event loop the method will run in :param output: a queue to add the created trade messages """ # wait for metanode to intialize while not 0 < time.time() - self.metanode.timing["blocktime"] < 60: await self._sleep(1) continue # SQL QUERY WHILE LOOP previous_history = {pair: [] for pair in self.constants.chain.PAIRS} while True: try: metanode_pairs = self.metanode.pairs for pair in self.constants.chain.PAIRS: if str(previous_history[pair]) != str( metanode_pairs[pair]["history"] ): new_trades = [ i for i in metanode_pairs[pair]["history"] if i not in previous_history[pair] ] for trade in new_trades: # [unix, price, amount, trade_type, sequence] trade_msg: OrderBookMessage = ( GrapheneOrderBook.trade_message_from_exchange( { "trading_pair": pair, "trade_type": trade[3], # trade_type "trade_id": trade[4], # sequence "update_id": trade[0], # unix "price": trade[1], # price "amount": trade[2], # amount } ) ) output.put_nowait(trade_msg) previous_history = { pair: metanode_pairs[pair]["history"] for pair in self.constants.chain.PAIRS } await self._sleep(3) except asyncio.CancelledError: msg = f"asyncio.CancelledError {__name__}" self.logger().exception(msg) raise except Exception: self.logger().exception( "Unexpected error when processing public trade updates from" " exchange" ) async def listen_for_order_book_diffs( self, *_, **__, ): """ N/A """ async def listen_for_order_book_snapshots( self, ev_loop: asyncio.AbstractEventLoop, output: asyncio.Queue, ): """ This method runs continuously and requests the full order book content from the exchange every 3 seconds via SQL query to the metanode database It then creates a snapshot messages that are added to the output queue :param ev_loop: the event loop the method will run in :param output: a queue to add the created snapshot messages """ while True: try: for trading_pair in self.constants.chain.PAIRS: try: snapshot: Dict[str, Any] = await self.get_snapshot( trading_pair=trading_pair ) snapshot_timestamp: float = time.time() snapshot_msg: OrderBookMessage = ( GrapheneOrderBook.snapshot_message_from_exchange( snapshot, snapshot_timestamp, metadata={ "trading_pair": trading_pair, "blocktime": self.metanode.timing["blocktime"], }, ) ) output.put_nowait(snapshot_msg) msg = f"Saved order book snapshot for {trading_pair}" self.logger().debug(msg) except asyncio.CancelledError: msg = f"asyncio.CancelledError {__name__}" self.logger().exception(msg) raise except Exception: msg = ( "Unexpected error fetching order book snapshot for" f" {trading_pair}." ) self.logger().error(msg, exc_info=True) await self._sleep(5.0) await self._sleep(3) except asyncio.CancelledError: msg = f"asyncio.CancelledError {__name__}" self.logger().exception(msg) raise except Exception: self.logger().error("Unexpected error.", exc_info=True) await self._sleep(5.0) async def listen_for_subscriptions(self): """ Graphene does not use this """ async def get_snapshot( self, trading_pair: str, **__, ) -> Dict[str, Any]: """ Retrieves a copy of the full order book from the exchange, for one pair. :param trading_pair: BASE-QUOTE :param limit: the depth of the order book to retrieve :return: the response from the exchange (JSON dictionary) """ metanode = GrapheneTrustlessClient(self.constants) return metanode.pairs[trading_pair]["book"] # Discrete SQL Query @classmethod def _get_throttler_instance(cls) -> AsyncThrottler: return AsyncThrottler([]) # self.constants.RATE_LIMITS) @classmethod def trade_message_from_exchange( cls, msg: Dict[str, any], metadata: Optional[Dict] = None ): """ Creates a trade message with info from each trade event sent by the exchange :param msg: the trade event details sent by the exchange :param metadata: a dictionary with extra information to add to trade message :return: a trade message with details of the trade as provided by the exchange """ if metadata: msg.update(metadata) return OrderBookMessage( OrderBookMessageType.TRADE, { "trading_pair": msg["trading_pair"], "trade_type": ( float(TradeType.SELL.value) if msg["trade_type"] == "SELL" else float(TradeType.BUY.value) ), "trade_id": msg["trade_id"], "update_id": msg["update_id"], "price": msg["price"], "amount": msg["amount"], }, timestamp=int(time.time() * 1e-3), ) @classmethod async def _get_last_traded_price( cls, trading_pair: str, domain: str, **__, ) -> float: """ Return a dictionary the trading_pair as key and the current price as value for each trading pair passed as parameter :param trading_pairs: list of trading pairs to get the prices for :param domain: the name of the graphene
range(order + 1): for j in range(i + 1): XL1[k1] = square[i - j, j] k1 += 1 return (XL0, XL1) def reorder(pcfName, verbose=False): '''Use pcf files ''' # order = 5 print ('\n =============================================%\n') print (pcfName) xForward = [] yForward = [] xBackward = [] yBackward = [] pcf = open(pcfName) # One of the pcf files text = pcf.readline() while text != '': text = pcf.readline() if '*DATE' in text: print (pcf.readline()) if '*FitOrder' in text: order = int(pcf.readline()) terms = (order + 1) * (order + 2) // 2 print ('Order', order, terms, ' terms') if '*xForward' in text: text = pcf.readline() f = text.split() # Array of terms text strings for k in range(terms): xForward.append(float(f[k])) xForward = np.array(xForward) if '*xBackward' in text: text = pcf.readline() f = text.split() # Array of terms text strings for k in range(terms): xBackward.append(float(f[k])) xBackward = np.array(xBackward) if '*yForward' in text: text = pcf.readline() f = text.split() # Array of terms text strings for k in range(terms): yForward.append(float(f[k])) yForward = np.array(yForward) if '*yBackward' in text: text = pcf.readline() f = text.split() # Array of terms text strings for k in range(terms): yBackward.append(float(f[k])) yBackward = np.array(yBackward) pcf.close() # Now reorder coefficients Aarray = np.zeros((order + 1, order + 1)) Barray = np.zeros((order + 1, order + 1)) Carray = np.zeros((order + 1, order + 1)) Darray = np.zeros((order + 1, order + 1)) terms = (order + 1) * (order + 2) // 2 A2 = np.zeros(terms) B2 = np.zeros(terms) C2 = np.zeros(terms) D2 = np.zeros(terms) k1 = 0 for i in range(order + 1): for j in range(order + 1 - i): Aarray[j, i] = xForward[k1] Barray[j, i] = yForward[k1] Carray[j, i] = xBackward[k1] Darray[j, i] = yBackward[k1] k1 += 1 k2 = 0 for i in range(order + 1): for j in range(i + 1): A2[k2] = Aarray[j, i - j] B2[k2] = Barray[j, i - j] C2[k2] = Carray[j, i - j] D2[k2] = Darray[j, i - j] k2 += 1 if verbose: print('\n', pcfName) print('A') polynomial.print_triangle(A2, order=5) print('\nB') polynomial.print_triangle(B2, order=5) print('\nC') polynomial.print_triangle(C2, order=5) print('\nD') polynomial.print_triangle(D2, order=5) # Convert V2V3 output polynomials to XAN,YAN type # print (year, pcfName) # print (pcfName) year='2017' if year == '2016' and 'GWA2OTE' in pcfName: B2 = -B2 B2[0] = B2[0] - 0.13 (C2, D2) = polynomial.TwoStep(C2, D2, [0.0, 1.0, 0.0], [-0.13, 0.0, -1.0], 5) print ('\nAdjusted Polynomials') print('A') polynomial.print_triangle(A2, order=5) print('\nB') polynomial.print_triangle(B2, order=5) print('\nC') polynomial.print_triangle(C2, order=5) print('\nD') polynomial.print_triangle(D2, order=5) return (A2, B2, C2, D2) def rows(pcfName, new_pcf_format=False): print('=============================================') xForward = [] yForward = [] xBackward = [] yBackward = [] pcf = open(pcfName) text = pcf.readline() print ('First Line\n', text) while text != '': text = pcf.readline() if 'Factor' in text: text = pcf.readline() [xfactor, yfactor] = text.split() xfactor = float(xfactor) yfactor = float(yfactor) print ('xfactor', xfactor) print ('yfactor', yfactor) if '*FitOrder' in text: text = pcf.readline() order = int(text.split()[0]) print ('order', order) if '*Rotation' in text: rotation = float(pcf.readline()) print ('rotation', rotation) if '*InputRotation' in text: text = pcf.readline() [xCenterIn, yCenterIn] = text.split() xCenterIn = float(xCenterIn) yCenterIn = float(yCenterIn) print ('CenterIn', xCenterIn, yCenterIn) if '*OutputRotation' in text: text = pcf.readline() [xCenterOut, yCenterOut] = text.split() xCenterOut = float(xCenterOut) yCenterOut = float(yCenterOut) print ('CenterOut', xCenterOut, yCenterOut) # if ('OTE' in pcfName) or ('Fore_' in pcfName) or (new_pcf_format is True): # Different layout if ('OTE' in pcfName) or (new_pcf_format is True): # Different layout if '*xForward' in text: text = pcf.readline() cfList = text.split() for cf in cfList: cff = float(cf) xForward.append(cff) # L0 set text = pcf.readline() cfList = text.split() for cf in cfList: cff = float(cf) xForward.append(cff) # L1 set xForward = np.array(xForward) if '*xBackward' in text: text = pcf.readline() cfList = text.split() for cf in cfList: cff = float(cf) xBackward.append(cff) # L0 set text = pcf.readline() cfList = text.split() for cf in cfList: cff = float(cf) xBackward.append(cff) # L1 set xBackward = np.array(xBackward) if '*yForward' in text: text = pcf.readline() cfList = text.split() for cf in cfList: cff = float(cf) yForward.append(cff) # L0 set text = pcf.readline() cfList = text.split() for cf in cfList: cff = float(cf) yForward.append(cff) # L1 set yForward = np.array(yForward) if '*yBackward' in text: text = pcf.readline() cfList = text.split() for cf in cfList: cff = float(cf) yBackward.append(cff) # L0 set text = pcf.readline() cfList = text.split() for cf in cfList: cff = float(cf) yBackward.append(cff) # L1 set yBackward = np.array(yBackward) else: # Other filter files if '*xForward' in text: for k in range(42): text = pcf.readline() f = float(text.split()[0]) xForward.append(f) xForward = np.array(xForward) # print ('xForward') # print (xForward) if '*xBackward' in text: for k in range(42): text = pcf.readline() f = float(text.split()[0]) xBackward.append(f) xBackward = np.array(xBackward) if '*yForward' in text: for k in range(42): text = pcf.readline() f = float(text.split()[0]) yForward.append(f) yForward = np.array(yForward) if '*yBackward' in text: for k in range(42): text = pcf.readline() f = float(text.split()[0]) yBackward.append(f) yBackward = np.array(yBackward) print ('Finished reading PCF file') # Now generate two SIAF rows First half of each coefficient set gets reorderd and goes in first L0 row # Second half goes in second L1 row # Design AperName using name of pcf file print ('pcfName', pcfName) if 'OTE' in pcfName: ApName1 = 'NRS_SKY_OTEIP' # ApName2 = 'DISCARD' else: i1 = pcfName.find('_') i2 = pcfName.find('.') filter = (pcfName[i1 + 1:i2]) ApName1 = 'NRS_' + filter + '_OTEIP_MSA_L0' ApName2 = 'NRS_' + filter + '_OTEIP_MSA_L1' # print ('\nAL0, AL1') (AL0, AL1) = rearrange(xForward) # polynomial.print_triangle(AL0,5) # print () # polynomial.print_triangle(AL1,5) # print ('\nBL0, BL1') (BL0, BL1) = rearrange(yForward) # polynomial.print_triangle(BL0,5) # print () # polynomial.print_triangle(BL1,5) # print ('\nCL0, CL1') (CL0, CL1) = rearrange(xBackward) # polynomial.print_triangle(CL0,5) # print () # polynomial.print_triangle(CL1,5) # print ('\nDL0, DL1') (DL0, DL1) = rearrange(yBackward) # polynomial.print_triangle(DL0,5) # print () # polynomial.print_triangle(DL1,5) data = {} data['L0'] = {} data['L1'] = {} data['L0']['A'] = AL0 data['L1']['A'] = AL1 data['L0']['B'] = BL0 data['L1']['B'] = BL1 data['L0']['C'] = CL0 data['L1']['C'] = CL1 data['L0']['D'] = DL0 data['L1']['D'] = DL1 return data ############################# instrument = 'NIRSpec' test_dir = os.path.join(JWST_TEMPORARY_DATA_ROOT, instrument, 'generate_test') if not os.path.isdir(test_dir): os.makedirs(test_dir) # regenerate SIAF reference files if needed if 0: generate_reference_files.generate_siaf_detector_layout() generate_reference_files.generate_initial_siaf_aperture_definitions(instrument) generate_reference_files.generate_siaf_detector_reference_file(instrument) generate_reference_files.generate_siaf_ddc_mapping_reference_file(instrument) # DDC name mapping ddc_apername_mapping = iando.read.read_siaf_ddc_mapping_reference_file(instrument) # NIRSpec detected parameters, e.g. XDetSize siaf_detector_parameters = iando.read.read_siaf_detector_reference_file(instrument) # Fundamental aperture definitions: names, types, reference positions, dependencies siaf_aperture_definitions = iando.read.read_siaf_aperture_definitions(instrument) # definition of the master apertures, the 16 SCAs detector_layout = iando.read.read_siaf_detector_layout() master_aperture_names = detector_layout['AperName'].data # directory containing reference files delivered by IDT source_data_dir = os.path.join(JWST_SOURCE_DATA_ROOT, instrument, 'delivery') print ('Loading reference files from directory: {}'.format(source_data_dir)) # XSciRef etc. data for some of the transform apertures, see Section 4.7.1 and Table 1 of JWST-STScI-005921 tiltx_gtp_file = os.path.join(source_data_dir, 'disperser_MIRROR_TiltX.gtp') tilty_gtp_file = os.path.join(source_data_dir, 'disperser_MIRROR_TiltY.gtp') disperser_mirror_tiltx = read_pcf_gtp(tiltx_gtp_file) disperser_mirror_tilty = read_pcf_gtp(tilty_gtp_file) # TA transforms: mapping of row names in the Calc worksheet to reference files pcf_file_mapping = {} pcf_file_mapping['491_GWA'] = 'delivery_SCA491toGWA.pcf' pcf_file_mapping['492_GWA'] = 'delivery_SCA492toGWA.pcf' pcf_file_mapping['CLEAR_GWA_OTE'] = 'delivery_CLEAR_GWA2XanYan.pcf' pcf_file_mapping['F110W_GWA_OTE'] = 'delivery_F110W_GWA2XanYan.pcf' pcf_file_mapping['F140X_GWA_OTE'] = 'delivery_F140X_GWA2XanYan.pcf' pcf_data = {} for field in pcf_file_mapping.keys(): pcf_data[field] = {} pcf_data[field]['A'], pcf_data[field]['B'], pcf_data[field]['C'], pcf_data[field]['D'] = reorder(os.path.join(source_data_dir, pcf_file_mapping[field]), verbose=True) # reference file delivered by IDT nirspec_slit_apertures_file = os.path.join(source_data_dir, 'positionsSIAFApertures.fits') nirspec_slit_apertures_data = Table.read(nirspec_slit_apertures_file) nirspec_slit_aperture_names = nirspec_slit_apertures_data['SIAF_NAME'].tolist() # dictionary that maps NIRSpec nomenclature to SIAF nomenclature nirspec_slit_apertures_data_mapping = {} nirspec_slit_apertures_data_mapping['V2Ref'] = 'RefXPOSKY' nirspec_slit_apertures_data_mapping['V3Ref'] = 'RefYPOSKY' nirspec_slit_apertures_data_mapping['V3IdlYAngle'] = 'AngleV3' # compute 'Idl Vertices' from the V2V3 vertices given in nirspec_slit_apertures_data, # see Section 5.2 of TR and see Calc worksheet in NIRSpec_SIAF.xlsx for index in [1,2,3,4]: nirspec_slit_apertures_data['XIdlVert{}'.format(index)] = -1 * ((nirspec_slit_apertures_data['C{}_XPOSSKY'.format(index)] - nirspec_slit_apertures_data['RefXPOSKY']) * np.cos(np.deg2rad(nirspec_slit_apertures_data['AngleV3'])) - (nirspec_slit_apertures_data['C{}_YPOSSKY'.format(index)] - nirspec_slit_apertures_data['RefYPOSKY']) * np.sin(np.deg2rad(nirspec_slit_apertures_data['AngleV3']))) nirspec_slit_apertures_data['YIdlVert{}'.format(index)] = +1 * ((nirspec_slit_apertures_data['C{}_XPOSSKY'.format(index)] - nirspec_slit_apertures_data['RefXPOSKY']) * np.sin(np.deg2rad(nirspec_slit_apertures_data['AngleV3'])) + (nirspec_slit_apertures_data['C{}_YPOSSKY'.format(index)] - nirspec_slit_apertures_data['RefYPOSKY']) * np.cos(np.deg2rad(nirspec_slit_apertures_data['AngleV3']))) # map aperture names to Fore_*.pcf file names fore_pcf_file_mapping = {} fore_pcf_file_mapping['NRS_SKY_OTEIP'] = 'OTE.pcf' fore_pcf_file_mapping['NRS_CLEAR_OTEIP_MSA_L0'] = 'Fore_CLEAR.pcf' fore_pcf_file_mapping['NRS_CLEAR_OTEIP_MSA_L1'] = 'Fore_CLEAR.pcf' fore_pcf_file_mapping['NRS_F070LP_OTEIP_MSA_L0'] = 'Fore_F070LP.pcf' fore_pcf_file_mapping['NRS_F070LP_OTEIP_MSA_L1'] = 'Fore_F070LP.pcf' fore_pcf_file_mapping['NRS_F100LP_OTEIP_MSA_L0'] = 'Fore_F100LP.pcf' fore_pcf_file_mapping['NRS_F100LP_OTEIP_MSA_L1'] = 'Fore_F100LP.pcf' fore_pcf_file_mapping['NRS_F170LP_OTEIP_MSA_L0'] = 'Fore_F170LP.pcf' fore_pcf_file_mapping['NRS_F170LP_OTEIP_MSA_L1'] = 'Fore_F170LP.pcf' fore_pcf_file_mapping['NRS_F290LP_OTEIP_MSA_L0'] = 'Fore_F290LP.pcf' fore_pcf_file_mapping['NRS_F290LP_OTEIP_MSA_L1'] = 'Fore_F290LP.pcf' fore_pcf_file_mapping['NRS_F110W_OTEIP_MSA_L0'] = 'Fore_F110W.pcf' fore_pcf_file_mapping['NRS_F110W_OTEIP_MSA_L1'] = 'Fore_F110W.pcf' fore_pcf_file_mapping['NRS_F140X_OTEIP_MSA_L0'] = 'Fore_F140X.pcf' fore_pcf_file_mapping['NRS_F140X_OTEIP_MSA_L1'] = 'Fore_F140X.pcf' aperture_dict = OrderedDict() aperture_name_list = siaf_aperture_definitions['AperName'].tolist() for AperName in aperture_name_list: # new aperture to be constructed aperture = pysiaf.JwstAperture() aperture.AperName = AperName aperture.InstrName = siaf_detector_parameters['InstrName'][0].upper() # index in the aperture definition table aperture_definitions_index = siaf_aperture_definitions['AperName'].tolist().index(AperName) # Retrieve basic aperture parameters from definition
<reponame>tomar27/pipelines<gh_stars>1-10 """Module for input and output processing for Cloud AI Metrics.""" import copy import dataclasses import json import numbers from typing import Any, Dict, List, Optional, Tuple, Type, Union import tensorflow.compat.v2 as tf from lib import column_spec, constants, evaluation_column_specs as ecs ColumnSpec = column_spec.ColumnSpec EvaluationColumnSpecs = ecs.EvaluationColumnSpecs def _k_hot_from_label_ids(label_ids: List[int], n_classes: int) -> List[int]: """Converts a list of label ids into a k-hot encoding.""" _validate_label_ids(label_ids, n_classes) # Preallocate the encoding and set all bits to 0. k_hot = [0] * n_classes # Turn on the bits that correspond to label ids. for label_id in label_ids: k_hot[label_id] = 1 return k_hot def _k_hot_from_label_names(labels: List[str], symbols: List[str]) -> List[int]: """Converts text labels into symbol list index as k-hot.""" k_hot = [0] * len(symbols) for label in labels: try: k_hot[symbols.index(label)] = 1 except IndexError: raise ValueError( 'Label %s did not appear in the list of defined symbols %r' % (label, symbols)) return k_hot def _validate_no_repeats(values: List[Any], name: str) -> None: """Validates that all te elements in the list are unique.""" if not values: return n = len(values) n_unique = len(set(values)) if n != n_unique: raise ValueError('{}: all values must be unique.'.format(name)) def _validate_list(values: Union[List[int], List[float], List[str]], allowed_types: List[Type[Any]], name: str) -> None: """Validates that the list is non-empty and homogeneous.""" if not values: raise ValueError('{}: values list is empty.'.format(name)) if not isinstance(values, list): raise TypeError('{}: values are in a {} but expected a list.'.format( name, type(values))) value_type = type(values[0]) if value_type not in allowed_types: raise TypeError( '{}: values are expected to be one of {} but are {}.'.format( name, allowed_types, value_type)) if not all(isinstance(value, value_type) for value in values): raise TypeError( '{}: all value types are expected to be {} but are not.'.format( name, value_type)) def _validate_binary_classification_labels(labels: List[str]) -> None: """Validates label specification for binary classification.""" if not labels: raise ValueError('labels must not be empty.') n = len(labels) if n > 1: raise ValueError( 'Binary classification requires exactly one label. Got {}'.format(n)) if labels[0].lower() not in constants.Data.BINARY_CLASSIFICATION_LABELS: raise ValueError( 'Labels for binary classification must be one of ' + '{} (not case sensitive). Got {}'.format( constants.Data.BINARY_CLASSIFICATION_LABELS, labels[0])) def _validate_classification_labels(labels: List[str], class_list: List[str]) -> None: """Validates that the labels are specified correctly for classification.""" _validate_list(values=labels, allowed_types=[str], name='labels') _validate_no_repeats(values=labels, name='labels') if not all((label in class_list) for label in labels): raise ValueError('labels: some labels are not recognized. Got labels: ' + '{}. Allowed labels: {}'.format(labels, class_list)) def _validate_label_ids(label_ids: List[int], n_classes: int) -> None: """Validates that label ids are compatible with the list of labels.""" _validate_list(values=label_ids, allowed_types=[int], name='label_ids') _validate_no_repeats(values=label_ids, name='label_ids') max_id = max(label_ids) if max_id >= n_classes: raise ValueError( 'Label index {} is out of range. There are only {} labels'.format( max_id, n_classes)) min_id = min(label_ids) if min_id < 0: raise ValueError( 'Label indices must be non-negative. Got {}'.format(min_id)) def _validate_binary_label_ids(label_ids: List[int]) -> None: """Validates that binary label ids are specified correctly.""" if not label_ids: raise ValueError('label_ids must be specified.') n = len(label_ids) if n > 1: raise ValueError( 'Exactly one label must be provided for binary classification. Got {}' .format(n)) if label_ids[0] not in constants.Data.BINARY_CLASSIFICATION_LABEL_IDS: raise ValueError( 'Label id for binary classification must be one of {}. Got {}'.format( constants.Data.BINARY_CLASSIFICATION_LABEL_IDS, label_ids[0])) # pyformat: disable def _validate_classification_ground_truth( ground_truth: Union[List[str], List[int]], class_list: List[str]) -> None: # pyformat: enable """Validates that ground truth is specified correctly for classification.""" if not ground_truth: raise ValueError('Ground truth must not be empty.') if isinstance(ground_truth[0], int): # Ground truth specified as label id. _validate_label_ids(label_ids=ground_truth, n_classes=len(class_list)) else: # Ground truth specified as label names. _validate_classification_labels(labels=ground_truth, class_list=class_list) # pyformat: disable def _validate_binary_classification_ground_truth( ground_truth: Union[List[str], List[int]], class_list: List[str]) -> None: # pyformat: enable """Validates ground truth specification for binary classification.""" if not ground_truth: raise ValueError('Ground truth must not be empty.') if isinstance(ground_truth[0], int): # Ground truth specified as label id. _validate_binary_label_ids(label_ids=ground_truth) else: # Ground truth specified as label names. _validate_binary_classification_labels(labels=ground_truth) def _validate_binary_classification_predictions(predictions: List[Any], labels: List[Any], label_ids: List[Any]) -> None: """Validates predictions specification for binary classification.""" _validate_list( values=predictions, allowed_types=[float], name='binary predictions') prediction_count = len(predictions) if predictions else 0 if prediction_count != 1: raise ValueError( 'binary predictions must contain exactly one value. Got {}'.format( prediction_count)) if labels: _validate_binary_classification_labels(labels) if label_ids: _validate_binary_label_ids(label_ids) def _validate_classification_predictions(predictions: List[Any], labels: List[Any], label_ids: List[Any], class_list: List[str]) -> None: """Validates that predictions are specified correctly for classification.""" _validate_list(values=predictions, allowed_types=[float], name='predictions') n_predictions = len(predictions) if predictions else 0 if labels: _validate_classification_labels(labels, class_list) n_labels = len(labels) if n_labels != n_predictions: raise ValueError( 'labels and predictions must be of the same size, but {} != {}' .format(n_labels, n_predictions)) if label_ids: _validate_label_ids(label_ids, n_classes=len(class_list)) n_label_ids = len(label_ids) if n_label_ids != n_predictions: raise ValueError( 'label_ids and predictions must be of the same size, but {} != {}' .format(n_label_ids, n_predictions)) if labels and label_ids: if not all((class_list[label_id] == label) for (label, label_id) in zip(labels, label_ids)): raise ValueError( 'label_ids and labels are inconsistent. Labels indicated by ' + 'label_ids are: {}. Classes indicated by labels are {}.'.format( [class_list[label_id] for label_id in label_ids], labels)) def _validate_regression_labels(labels: List[Any]) -> None: """Validates that the labels are specified correctly for regression.""" if not labels: raise ValueError('labels: values list is empty.') if not isinstance(labels, list): raise TypeError('labels: values are in a {} but expected a list.'.format( type(labels))) def _validate_regression_or_point_forecasting_predictions( prediction_values: List[Any]) -> None: _validate_list( values=prediction_values, allowed_types=[float], name='predictions') def _validate_quantile_forecasting_predictions( prediction_values: List[Any], quantile_list: List[float]) -> None: """Validates that predictions and quantiles are specified correctly.""" _validate_list( values=prediction_values, allowed_types=[float], name='predictions') if len(quantile_list) != len(prediction_values): raise ValueError('predictions: values {} unmatch with quantiles {}.'.format( prediction_values, quantile_list)) def _is_binary_classification(class_list: List[str]) -> bool: if not class_list: return False return len(class_list) == 1 def _ensure_list(value: Any) -> List[Any]: """If value is a scalar, converts it to a list of size 1.""" if isinstance(value, list): return value if isinstance(value, str) or isinstance(value, numbers.Number): return [value] raise TypeError( f'Value must be a list, number or a string. Got {type(value)}') def _pop_labels(input_dict: Dict[str, Any], label_column_spec: ColumnSpec, class_list: List[str]) -> Tuple[Any, Optional[List[int]]]: """Pops the labels off the input dict and formats accordingly.""" labels = _ensure_list(label_column_spec.pop_value_from_dict(input_dict)) if not class_list: # REGRESSION PROBLEM _validate_regression_labels(labels) for i, value in enumerate(labels): try: labels[i] = float(value) except: # pylint: disable=broad-except raise ValueError( 'label value: {} failed to cast to float.'.format(value)) return (labels, None) # If input label is boolean, convert it into str, as the class are strings. if labels and isinstance(labels[0], bool): labels = ['true' if label else 'false' for label in labels] k_hot_label_values = _classification_k_hot(labels, class_list) return (labels, k_hot_label_values) def _classification_k_hot(labels: List[int], class_list: List[str]) -> List[int]: """Pops the labels off the input dict and return a k_hot label vector.""" if _is_binary_classification(class_list): # BINARY CLASSIFICATION PROBLEM _validate_binary_classification_ground_truth(labels, class_list) if isinstance(labels[0], str): # Ground truth is represented as label names. return _k_hot_from_label_names( [labels[0].lower()], constants.Data.BINARY_CLASSIFICATION_LABELS) # Ground truth is represented as label ids. return _k_hot_from_label_ids(labels, 2) # NON_BINARY CLASSIFICATION PROBLEM _validate_classification_ground_truth(labels, class_list) if isinstance(labels[0], str): # Ground truth is represented as label names. return _k_hot_from_label_names(labels, class_list) # Ground truth is represented as label ids. return _k_hot_from_label_ids(labels, len(class_list)) def _k_hot_to_sparse(k_hot: List[int]) -> List[int]: """Converts k-hot embedding to sparse representation.""" return [idx for idx, val in enumerate(k_hot) if val != 0] def _pop_predictions( input_dict: Dict[str, Any], evaluation_column_specs: EvaluationColumnSpecs, class_list: Optional[List[str]] = None, quantile_list: Optional[List[float]] = None, quantile_index: Optional[int] = None ) -> Tuple[List[float], Optional[List[float]]]: """Pops the predictions off the input dict and formats accordingly.""" score_spec = evaluation_column_specs.predicted_score_column_spec prediction_values = _ensure_list(score_spec.pop_value_from_dict(input_dict)) if not class_list: if quantile_list: # QUANTILE FORECASTING PROBLEM _validate_quantile_forecasting_predictions(prediction_values, quantile_list) if (quantile_index is not None) and quantile_index >= 0: return (prediction_values, prediction_values[quantile_index:quantile_index + 1]) else: return (prediction_values, None) else: # REGRESSION/ POINT FORECASTING PROBLEM _validate_regression_or_point_forecasting_predictions(prediction_values) return (prediction_values, None) label_ids = None label_id_spec = evaluation_column_specs.predicted_label_id_column_spec if label_id_spec and label_id_spec.exists_in_dict(input_dict): # Model outputs label IDs, so we just use them directly. label_ids = label_id_spec.pop_value_from_dict(input_dict) labels = None label_spec = evaluation_column_specs.predicted_label_column_spec if label_spec and label_spec.exists_in_dict(input_dict): # Model outputs labels but not label ids. labels = label_spec.pop_value_from_dict(input_dict) if _is_binary_classification(class_list): # BINARY CLASSIFICATION PROBLEM _validate_binary_classification_predictions(prediction_values, labels, label_ids) # We are representing binary classification problem as a multiclass problem. # For binary classification problem, the model outputs probability (p) # of the positive class only. We amend it with the probability of the # negative class, which is
""" spark-submit task2.py <filter threshold> <support> <input_file_path> <output_file_path> spark-submit task2.py 70 50 "file:///Users/markduan/duan/USC_course/USC_APDS/INF553/homework/hw2/dataset/task2_data.csv" "task2_output.txt" """ import sys import time from pyspark import SparkConf, SparkContext # import os # os.environ['PYSPARK_PYTHON'] = '/usr/local/bin/python3.6' # os.environ['PYSPARK_DRIVER_PYTHON'] = '/usr/local/bin/python3.6' # caution: filter_threshold, support are str filter_threshold = int(sys.argv[1]) support = int(sys.argv[2]) input_file_path = sys.argv[3] output_file_path = sys.argv[4] def writeIntoFile(filepath, candidates, frequent_itemsets): """ either candidates or frequent_itemsets is a dict which is the output of function groupAndSort ex: {1:[a,b,c], 2:[(a,b), (c,d)], ...} """ with open(filepath, 'w', encoding='utf-8') as fp: def writing(title, content): fp.write(title + "\n") if content == {}: fp.write("\n") else: for key in sorted(content.keys()): if key == 1: words = "('" + ("'),('").join(content[key]) + "')\n\n" fp.write(words) else: words = (",").join([str(x) for x in content[key]]) + "\n\n" fp.write(words) writing("Candidates:", candidates) writing("Frequent Itemsets:", frequent_itemsets) class Bitmap(): def __init__(self, list_size): """ list_size - size of a list. the list consists of either 1 or 0 """ self.size = list_size // 31 + 1 self.array = [0] * self.size def getValue(self, n): """ n - position number, from 0 """ ai, bp = self.getPosition(n) return self.array[ai] >> bp & 1 def setValue(self, n, v): """ n - position number, from 0 v - value, 1 or 0 """ ai, bp = self.getPosition(n) if v == 1: # set the bit to one self.array[ai] = self.array[ai] | (1 << bp) elif v == 0: # set the bit to zero self.array[ai] = self.array[ai] & (~(1 << bp)) else: print("wrong v value.") def getPosition(self, n): """ n - position number, from 0 """ array_index = n // 31 bit_position = n % 31 return(array_index, bit_position) def initialize(self, l): for n, v in enumerate(l): self.setValue(n, v) class ProjectionTable(): """ key-value domains are one to one projection. """ def __init__(self, values): """ values - iterable iv - index-value pair """ self.iv = list(enumerate(values)) def getIndex(self, value): for i, v in self.iv: if v == value: return i return None def getValue(self, index): for i, v in self.iv: if i == index: return v return None def hashPCY(x1, x2, n): return x1 % n def generateSuperItemsets(base_itemsets): """ combine tuples in the base itemsets list to generate the immediate super itemsets list :param base_itemsets - [(a,b), (b,c), (a,c) ...] :return super_itemsets - [(a,b,c), ...] """ if base_itemsets == []: return [] # sort: make sure, in (a,b), a < b for n in range(len(base_itemsets)): base_itemsets[n] = sorted(base_itemsets[n]) num_base = len(base_itemsets[0]) num_super = num_base + 1 super_itemsets = [] len_itemsets = len(base_itemsets) for n_x in range(len_itemsets): x = base_itemsets[n_x] for n_y in range(n_x+1, len_itemsets): y = base_itemsets[n_y] if x[:-1] == y[:-1] and x[-1] < y[-1]: xy_list = x + y[-1:] count_ = 0 for i in range(len(xy_list)): if xy_list[:i]+xy_list[i+1:] in base_itemsets: count_ += 1 else: break if count_ == num_super: super_itemsets.append(tuple(xy_list)) return super_itemsets def PCY(baskets, support_threshold, n_buckets=1000): """ input: baskets - [[a,b,c], ...] a,b,c are integers support_threshold - support threshold n_buckets - number of buckets output: frequent_itemsets - a list of frequent itemsets (integers), [12, 5, 6, (12, 5), ...] """ # 1st pass frequent_itemsets = [] baskets = list(baskets) single_item_count = {} buckets_list = [0] * n_buckets for basket in baskets: basket.sort() n_items = len(basket) for i, item in enumerate(basket): # count single items if item not in single_item_count.keys(): single_item_count[item] = 1 else: single_item_count[item] += 1 # hash pairs if i < n_items-1: for j in range(i+1, n_items): bucket_idx = hashPCY(basket[i], basket[j], n_buckets) buckets_list[bucket_idx] += 1 # check frequent single item fs_list = [] # frequent single item list for item in single_item_count.keys(): count = single_item_count[item] if count >= support_threshold: fs_list.append(item) print("---frequent single item---:", fs_list[:10]) frequent_itemsets.extend(fs_list) # create frequent buckets bitmap for i in range(len(buckets_list)): if buckets_list[i] >= support_threshold: buckets_list[i] = 1 else: buckets_list[i] = 0 bitmap = Bitmap(len(buckets_list)) bitmap.initialize(buckets_list) # 2nd pass count_pairs = {} for basket in baskets: # notice that every basket in baskets are already sorted n_items = len(basket) for i, item_i in enumerate(basket): # hash pairs if item_i in fs_list and i < n_items-1: for j in range(i+1, n_items): item_j = basket[j] if item_j in fs_list: ij_bitmap_position = hashPCY(item_i, item_j, n_buckets) if bitmap.getValue(ij_bitmap_position) == 1: pair = (item_i, item_j) if pair in count_pairs.keys(): count_pairs[pair] += 1 else: count_pairs[pair] = 1 fp_list = [] # frequent pairs list for pair in count_pairs.keys(): count = count_pairs[pair] if count >= support_threshold: fp_list.append(pair) frequent_itemsets.extend(fp_list) # further passes base_itemsets = fp_list n_itemsets = 3 while(1): n_itemsets += 1 super_itemsets = generateSuperItemsets(base_itemsets) if super_itemsets == []: break else: count_further = {} for candidate in super_itemsets: count_further[candidate] = 0 for basket in baskets: for candidate in super_itemsets: count = 0 for x in basket: if x in candidate: count += 1 if count == len(candidate): count_further[candidate] += 1 fi_list = [itemset for itemset in count_further.keys() if count_further[itemset] >= support_threshold] frequent_itemsets.extend(fi_list) base_itemsets = fi_list return frequent_itemsets def Apriori(baskets, support_threshold): """ input: baskets - [[a,b,c], ...] a,b,c are integers support_threshold - support threshold output: frequent_itemsets - a list of frequent itemsets (integers), [12, 5, 6, (12, 5), ...] """ # 1st pass frequent_itemsets = [] baskets = list(baskets) single_item_count = {} for basket in baskets: n_items = len(basket) for i, item in enumerate(basket): # count single items if single_item_count.get(item) is None: single_item_count[item] = 1 else: single_item_count[item] += 1 # check frequent single item fs_list = [item for item in single_item_count.keys() if single_item_count[item] >= support_threshold] frequent_itemsets.extend(fs_list) new_baskets = [[item for item in row if item in fs_list] for row in baskets] # 2nd pass count_pairs = {} for basket in new_baskets: # notice that every basket in baskets are not sorted basket.sort() n_items = len(basket) for i in range(n_items-1): for j in range(i+1, n_items): pair = (basket[i], basket[j]) if count_pairs.get(pair) is None: count_pairs[pair] = 1 else: count_pairs[pair] += 1 fp_list = [pair for pair in count_pairs.keys() if count_pairs[pair] >= support_threshold] # frequent pairs list frequent_itemsets.extend(fp_list) # further passes base_itemsets = fp_list while(1): super_itemsets = generateSuperItemsets(base_itemsets) if super_itemsets == []: break else: count_further = {} for basket in new_baskets: basket_set = set(basket) for candidate in super_itemsets: candidate_set = set(candidate) if candidate_set.issubset(basket_set): if count_further.get(candidate) is None: count_further[candidate] = 1 else: count_further[candidate] += 1 fi_list = [item for item in count_further.keys() if count_further[item] >= support_threshold] frequent_itemsets.extend(fi_list) base_itemsets = fi_list return frequent_itemsets def secondMap(baskets, candidates): """ baskets - [[a,b,c], ...] candidates - a list of candidates generated by the first mapreduce. a candidate is either an integer or a tuple of integers """ candi_dict = {} for candidate in candidates: candi_dict[candidate] = 0 for basket in baskets: basket_set = set(basket) for candidate in candidates: if type(candidate) == int: # itemset candidate is an integer item if candidate in basket: candi_dict[candidate] += 1 else: candidate_set = set(candidate) if candidate_set.issubset(basket_set): candi_dict[candidate] += 1 candi_count_list = [(x, candi_dict[x])for x in candi_dict.keys()] return candi_count_list def getBackToValues(itemset, data_pt): if type(itemset) == int: return data_pt.getValue(itemset) else: return tuple(sorted([data_pt.getValue(x) for x in itemset])) def groupAndSort(result_list): """ sort result list: input: result_list - [102, '98', ('102', '98'), ('97', '99'), ('101', '99'), '101', '97', '99', ('97', '98'), ('98', '99')] output: group = {1: ['101', '102', '97', '98', '99'], 2: [('101', '99'), ('102', '98'), ('97', '98'), ('97', '99'), ('98', '99')]} for example, the key 1 means the corresponding value is single item list. """ group = {} for x in result_list: if type(x) == str: if group.get(1) == None: group[1] = [x] else: group[1].append(x) else: n = len(x) if group.get(n) == None: group[n] = [x] else: group[n].append(x) for key in group.keys(): # print("before sort", group[key]) group[key].sort() # print("after sort", group[key]) return group def findFrequentItemsets(data, data_pt): """ data - spark pipeline, caution: now, integers in a basket instead of string data_pt - a projection table, an single item (string) <-> an integer """ # get the number of partitions n_partitions = data.getNumPartitions() # decide the adjusted support threshold adjusted_support = int(support / n_partitions) if adjusted_support == 0: adjusted_support = 1 # first mapreduce candidates = data.mapPartitions(lambda x: Apriori(x, adjusted_support)) \ .map(lambda x: (x,1))
""" T_zero = {"p0": SE3.identity()} ang_lims_map = {} old_to_new_names = { "p0": "p0" } # Returned for user of the method (to map old joint names to new ones) ub, lb = spherical_angle_bounds_to_revolute(self.ub, self.lb) count = 1 joint_prev = "p0" for ( joint ) in self.d: # Assumes the dictionary is in chain order (perhaps enforce?) new_node1 = "p" + str(count) count += 1 # ub[new_node1] = self.ub[joint] # lb[new_node1] = self.lb[joint] ang_lims_map[joint] = new_node1 new_node2 = "p" + str(count) count += 1 old_to_new_names[joint] = new_node2 Ry = SE3(SO3(roty(np.pi / 2)), np.zeros(3)) T_zero[new_node1] = T_zero[joint_prev].dot(Ry) d = self.d[joint] Ry_back = SE3(SO3(roty(-np.pi / 2)), np.zeros(3)) T_zero[new_node2] = T_zero[new_node1].dot(Ry_back).dot(trans_axis(d, "z")) joint_prev = new_node2 # for key in T_zero: # if key not in ub.keys() and key is not 'p0': # ub[key] = np.pi # lb[key] = -np.pi params = {"T_zero": T_zero, "ub": ub, "lb": lb} return RobotRevolute(params), old_to_new_names, ang_lims_map class RobotRevolute(Robot): def __init__(self, params): self.axis_length = 1 self.dim = 3 if "T_base" in params: self.T_base = params["T_base"] else: self.T_base = SE3.identity() # Use frame poses at zero conf if provided, if not use DH if "T_zero" in params: self.T_zero = params["T_zero"] self.n = len(self.T_zero) - 1 # number of links else: if "modified_dh" in params: self.modified_dh = params["modified_dh"] else: self.modified_dh = False if all(k in params for k in ("a", "d", "alpha", "theta")): self.a = params["a"] self.d = params["d"] self.al = params["alpha"] self.th = params["theta"] self.n = len(self.al) # number of links else: raise Exception("Robot description not provided.") # Topological "map" of the robot if "parents" in params: self.parents = nx.DiGraph(params["parents"]) else: names = [f"p{idx}" for idx in range(self.n + 1)] self.parents = nx.path_graph(names, nx.DiGraph) self.kinematic_map = nx.shortest_path(self.parents) # joint limits TODO currently assuming symmetric around 0 if "lb" and "ub" in params: self.lb = params["lb"] self.ub = params["ub"] else: self.lb = list_to_variable_dict(self.n * [-pi]) self.ub = list_to_variable_dict(self.n * [pi]) self.structure = self.structure_graph() self.limit_edges = [] # edges enforcing joint limits self.limited_joints = [] # joint limits that can be enforced self.set_limits() super(RobotRevolute, self).__init__() @property def end_effectors(self) -> list: """ Returns a list of end effector node pairs, since it's the last two points that are defined for a full pose. """ S = self.parents return [[x, f"q{x[1:]}"] for x in S if S.out_degree(x) == 0] @property def T_zero(self) -> dict: if not hasattr(self, "_T_zero"): T = {"p0": self.T_base} kinematic_map = self.kinematic_map for ee in self.end_effectors: for node in kinematic_map["p0"][ee[0]][1:]: path_nodes = kinematic_map["p0"][node][1:] q = np.array([0 for node in path_nodes]) a = np.array([self.a[node] for node in path_nodes]) alpha = np.array([self.al[node] for node in path_nodes]) th = np.array([self.th[node] for node in path_nodes]) d = np.array([self.d[node] for node in path_nodes]) if not self.modified_dh: T[node] = fk_3d(a, alpha, d, q + th) else: T[node] = modified_fk_3d(a, alpha, d, q + th) self._T_zero = T return self._T_zero @T_zero.setter def T_zero(self, T_zero: dict): self._T_zero = T_zero @property def parents(self) -> nx.DiGraph: return self._parents @parents.setter def parents(self, parents: nx.DiGraph): self._parents = parents def get_pose(self, joint_angles: dict, query_node: str) -> SE3: """ Returns an SE3 element corresponding to the location of the query_node in the configuration determined by node_inputs. """ kinematic_map = self.kinematic_map parents = self.parents T_ref = self.T_zero T = T_ref["p0"] for node in kinematic_map["p0"][query_node][1:]: pred = [u for u in parents.predecessors(node)] T_rel = T_ref[pred[0]].inv().dot(T_ref[node]) T = T.dot(rot_axis(joint_angles[node], "z")).dot(T_rel) return T def structure_graph(self) -> nx.DiGraph: kinematic_map = self.kinematic_map axis_length = self.axis_length parents = self.parents T = self.T_zero S = nx.empty_graph(create_using=nx.DiGraph) for ee in self.end_effectors: for node in kinematic_map["p0"][ee[0]]: aux_node = f"q{node[1:]}" node_pos = T[node].trans aux_node_pos = T[node].dot(trans_axis(axis_length, "z")).trans # Generate nodes for joint S.add_nodes_from( [ (node, {POS: node_pos}), ( aux_node, {POS: aux_node_pos}, ), ] ) # Generate edges S.add_edge(node, aux_node) for pred in parents.predecessors(node): S.add_edges_from([(pred, node), (pred, aux_node)]) S.add_edges_from( [(f"q{pred[1:]}", node), (f"q{pred[1:]}", aux_node)] ) # Generate all edge weights for u, v in S.edges(): S[u][v][DIST] = norm(S.nodes[u][POS] - S.nodes[v][POS]) S[u][v][LOWER] = S[u][v][DIST] S[u][v][UPPER] = S[u][v][DIST] # Delete positions used for weights for u in S.nodes: del S.nodes[u][POS] return S def euclidean_cost_hessian(self, J: dict, K: dict, r: dict): """ Based on 'Solving Inverse Kinematics Using Exact Hessian Matrices', Erleben, 2019 :param J: dictionary of linear velocity kinematic Jacobians :param K: dictionary of tensors representing second order derivative information :param r: dictionary where each value for key ee is goal_ee - F_ee(theta) :return: """ H = 0 for e in J.keys(): J_e = J[e] N = J_e.shape[1] H += J_e.T @ J_e # TODO: Try with einsum for speed, maybe? for idx in range(N): for jdx in range(idx, N): dH = K[e][:, idx, jdx].T @ r[e] H[idx, jdx] -= dH if idx != jdx: H[jdx, idx] -= dH return H def max_min_distance(self, T0: SE3, T1: SE3, T2: SE3) -> (float, float, str): """ Given three frames, find the maximum and minimum distances between the frames T0 and T2. It is assumed that the two frames are connected by an unlimited revolute joint with its rotation axis being the z-axis of the frame T1. """ tol = 10e-10 # T_rel_01 = T0.inv().dot(T1) T_rel_12 = T1.inv().dot(T2) p0 = T0.as_matrix()[0:3, 3] z1 = T1.as_matrix()[0:3, 2] x1 = T1.as_matrix()[0:3, 0] p1 = T1.as_matrix()[0:3, 3] p2 = T2.as_matrix()[0:3, 3] p0_proj = p0 - (z1.dot(p0 - p1)) * z1 # p0 projected onto T1 plane p2_proj = p2 - (z1.dot(p2 - p1)) * z1 # p2 projected onto T1 plane if norm(p1 - p0_proj) < tol or norm(p2_proj - p1) < tol: d = norm(T2.trans - T0.trans) return d, d, False r = norm(p2_proj - p1) # radius of circle p2_proj is on delta_th = arctan2(cross(x1, p2_proj - p1).dot(z1), np.dot(x1, p2_proj - p1)) # closest and farthest point from p0_proj sol_1 = r * (p0_proj - p1) / norm(p0_proj - p1) + p1 sol_2 = -r * (p0_proj - p1) / norm(p0_proj - p1) + p1 sol_min = min(sol_1 - p0_proj, sol_2 - p0_proj, key=norm) + p0_proj sol_max = max(sol_1 - p0_proj, sol_2 - p0_proj, key=norm) + p0_proj th_max = arctan2(cross(x1, sol_max - p1).dot(z1), np.dot(x1, sol_max - p1)) th_min = arctan2(cross(x1, sol_min - p1).dot(z1), np.dot(x1, sol_min - p1)) rot_min = rot_axis(th_min - delta_th, "z") d_min = norm(T1.dot(rot_min).dot(T_rel_12).trans - T0.trans) rot_max = rot_axis(th_max - delta_th, "z") d_max = norm(T1.dot(rot_max).dot(T_rel_12).trans - T0.trans) if abs(th_max - delta_th) < tol and d_max > d_min: return d_max, d_min, "below" elif abs(th_min - delta_th) < tol and d_max > d_min: return d_max, d_min, "above" else: return d_max, d_min, False def set_limits(self): """ Sets known bounds on the distances between joints. This is induced by link length and joint limits. """ K = self.parents S = self.structure T = self.T_zero kinematic_map = self.kinematic_map T_axis = trans_axis(self.axis_length, "z") for u in K: for v in (des for des in K.successors(u) if des): S[u][v][LOWER] = S[u][v][DIST] S[u][v][UPPER] = S[u][v][DIST] for v in (des for des in level2_descendants(K, u) if des): names = [ (f"p{u[1:]}", f"p{v[1:]}"), (f"p{u[1:]}", f"q{v[1:]}"), (f"q{u[1:]}", f"p{v[1:]}"), (f"q{u[1:]}", f"q{v[1:]}"), ] for ids in names: path = kinematic_map[u][v] T0, T1, T2 = [T[path[0]], T[path[1]], T[path[2]]] if "q" in ids[0]: T0 = T0.dot(T_axis) if "q" in ids[1]: T2 = T2.dot(T_axis) d_max, d_min, limit = self.max_min_distance(T0, T1, T2) if limit: rot_limit = rot_axis(self.ub[v], "z") T_rel = T1.inv().dot(T2) d_limit = norm(T1.dot(rot_limit).dot(T_rel).trans - T0.trans) if limit == "above": d_max = d_limit else: d_min = d_limit self.limited_joints += [v] self.limit_edges += [[ids[0], ids[1]]] # TODO remove/fix S.add_edge(ids[0], ids[1]) if d_max == d_min: S[ids[0]][ids[1]][DIST] = d_max S[ids[0]][ids[1]][UPPER] = d_max S[ids[0]][ids[1]][LOWER] = d_min S[ids[0]][ids[1]][BOUNDED] = limit def joint_variables(self, G: nx.Graph, T_final: dict = None) -> np.ndarray: """ Calculate joint angles from a complete set of point positions. """ # TODO: make this more readable tol = 1e-10 q_zero = list_to_variable_dict(self.n * [0]) kinematic_map = self.kinematic_map parents = self.parents get_pose = self.get_pose T = {} T["p0"] = self.T_base theta = {} for ee in self.end_effectors: path = kinematic_map["p0"][ee[0]][1:] axis_length = self.axis_length for node
segments from Hierarchy pickle. Arguments: """ # read threshold and connectivity pickled_obj = common.read_pickle(file_name=name) if isinstance(pickled_obj, pyto.segmentation.Labels): segments = pickled_obj elif isinstance(pickled_obj, pyto.scene.SegmentationAnalysis): segments = pickled_obj.labels else: raise ValueError( "Pickle file " + name + " has to be instance of" + "pyto.segmentation.Labels or pyto.scene.SegmentationAnalysis") # expand contacts segments.contacts.expand() # convert to segments if isinstance(segments, pyto.segmentation.Hierarchy): contacts = segments.contacts segments = segments.toSegment() segments.contacts = contacts # clean segments (just in case) segment_ids = segments.contacts.findSegments(boundaryIds=bound_ids, nBoundary=2) segments.keep(ids=segment_ids) # clean contacts (necessary) segments.contacts.keepBoundaries(ids=bound_ids) segments.contacts.keepSegments(ids=segment_ids) # use inset segments.clearFull() if inset is not None: segments.useInset(inset=inset, mode='absolute', expand=True) return segments def read_boundaries(boundary_ids): """ Reads boundaries file(s) and makes (Segment) boundaries. """ # read if is_multi_boundaries(): bound, multi_boundary_ids = read_multi_boundaries( boundary_ids=boundary_ids) else: bound = read_single_boundaries(boundary_ids=boundary_ids) multi_boundary_ids = [boundary_ids] # offset bound.offset = boundary_offset return bound, multi_boundary_ids def is_multi_boundaries(): """ Returns True if maultiple boundaries files are given. """ if isinstance(boundary_file_name, basestring): return False elif isinstance(boundary_file_name, tuple) \ or isinstance(boundary_file_name, list): return True else: raise ValueError("boundary_file_name has to be aither a string (one " \ + "boundary file) or a tuple (multiple boundary files).") def read_single_boundaries(boundary_ids): """ Reads and initializes boundaries form a sigle file. """ # read boundaries file and make a Segment object bound = pyto.segmentation.Segment.read( file=boundary_file_name, ids=boundary_ids, clean=True, byteOrder=boundary_byte_order, dataType=boundary_data_type, arrayOrder=boundary_array_order, shape=boundary_shape) return bound def read_multi_boundaries(boundary_ids): """ Reads and initializes boundaries form a sigle file. """ # read all boundaries files and combine them in a single Segment object bound = pyto.segmentation.Segment() curr_shift = 0 shifted_boundary_ids = [] for (l_name, b_ids) in zip(boundary_file_name, boundary_ids): curr_bound = pyto.segmentation.Segment.read( file=l_name, ids=b_ids, clean=True, byteOrder=boundary_byte_order, dataType=boundary_data_type, arrayOrder=boundary_array_order, shape=boundary_shape) bound.add(new=curr_bound, shift=curr_shift, dtype='int16') shifted_boundary_ids.append(numpy.array(b_ids) + curr_shift) if shift is None: curr_shift = None else: curr_shift += shift return bound, shifted_boundary_ids def get_base(file_name): """ Returns base and root of the given file name """ (dir, base) = os.path.split(file_name) (root, ext) = os.path.splitext(base) return base, root def get_clusters_file_name(base_file, cluster_directory, cluster_suffix): """ Returns the clusters file name """ # get base base, root = get_base(file_name=base_file) # cluster file name clust_base = clust_prefix + root + cluster_suffix clust_file_name = os.path.join(cluster_directory, clust_base) return clust_file_name def get_file_name(base_file, directory, prefix, suffix): """ Returns file name in the form: directory/prefix + root_of_base_file + suffix """ # get base foo, root = get_base(file_name=base_file) # cluster file name base = prefix + root + suffix file_name = os.path.join(directory, base) return file_name def find_distances(file_, read=True, segments=None, ids=None): """ Read distances from a pickle file, or calculate them if the file does not exist or if arg read is False. If distances are calculated they're saved to a pickle file. Returns distances """ try: # read from pickle if not read: raise IOError in_file = open(file_) logging.info('Reading distance file') distances = pickle.load(in_file) except IOError: # calculate logging.info('Calculating distances') distances = segments.pairwiseDistance(ids=ids, mode='min') # save out_file = open(file_, 'wb') pickle.dump(distances, out_file, -1) return distances def write_cluster_image(clusters, labels, base_file, cluster_directory, cluster_suffix, clusters_data_type): """ Writes clusters image """ # don't do anything if no clustering if clusters is None: return # get clusters image name file_name = get_clusters_file_name(base_file, cluster_directory, cluster_suffix) # relabel segment ids according to clusters cluster_order = {} for clust_id in range(1, clusters.getNClusters()+1): for data_id in clusters.getCluster(clusterId=clust_id): cluster_order[data_id] = clust_id clust_data = labels.reorder(order=cluster_order, data=labels.data, clean=True) # write clust_image = pyto.segmentation.Segment(data=clust_data) file_ = clust_image.write(file=file_name, dataType=clusters_data_type) return file_ def pickle_all_clusters(multi_clust, base_file, directory, suffix, contacts=None): """ Pickles multi cluster. """ if contacts is not None: contacts.compactify() multi_clust.contacts = contacts # write pickle file_name = get_clusters_file_name(base_file, directory, suffix) pickle.dump(multi_clust, open(file_name, 'wb'), -1) if contacts is not None: contacts.expand() return file_name def write_cluster_results(multi_cluster, multi_cluster_name, segments, bound, multi_bound_ids, contacts, base_file, cluster_directory, result_bound_suffix, result_conn_suffix, cluster_files, hi_bound_thr=None, hi_conn_thr=None, distance_files={}): """ Writes cluster results file """ # open results file bound_res_file_name = get_clusters_file_name(base_file, cluster_directory, result_bound_suffix) bound_res_file = open(bound_res_file_name, 'w') conn_res_file_name = get_clusters_file_name(base_file, cluster_directory, result_conn_suffix) conn_res_file = open(conn_res_file_name, 'w') conn_clust_res_file_name = get_clusters_file_name( base_file, cluster_directory, result_conn_clust_suffix) conn_clust_res_file = open(conn_clust_res_file_name, 'w') # machine info mach_name, mach_arch = common.machine_info() header = ["#", "# Machine: " + mach_name + " " + mach_arch, "# Date: " + time.asctime(time.localtime())] # script and working directory in_file_name = sys.modules[__name__].__file__ in_time = time.asctime(time.localtime(os.path.getmtime(in_file_name))) header.extend([ "#", "# Input script: " + in_file_name + " (" + in_time + ") " + __version__, "# Working directory: " + os.getcwd()]) # file names and times in_seg_time = time.asctime(time.localtime(os.path.getmtime(base_file))) header.extend([ "#", "# Connections: " + base_file + " (" + in_seg_time + ")"]) # boundary file(s) if is_multi_boundaries(): boundary_lines = [ "# " + b_file + " (" + time.asctime(time.localtime(os.path.getmtime(b_file))) + ")" for b_file in boundary_file_name] boundary_lines.insert(0, "# Boundaries: ") boundary_ids_lines = [ "# " + str(b_ids) for b_ids in multi_bound_ids] boundary_ids_lines.insert( 0, "# Boundary ids (shift = " + str(shift) + "): ") else: boundary_time = time.asctime( time.localtime(os.path.getmtime(boundary_file_name))) boundary_lines = [ "# Boundaries: ", "# " + boundary_file_name + " (" + boundary_time + ")"] boundary_ids_lines = [ "# Boundary ids: ", "# " + str(bound.ids)] header.extend(boundary_lines) # multi cluster file name and time try: clus_time = time.asctime( time.localtime(os.path.getmtime(multi_cluster_name))) except OSError: clus_time = 'not written' cluster_lines = [ "# Multi cluster pickle:", "# " + multi_cluster_name + " (" + clus_time + ")"] header.extend(cluster_lines) # cluster image file names and times header.append("# Output cluster images:") for file_ in cluster_files: file_name = cluster_files[file_].name try: file_time = time.asctime( time.localtime(os.path.getmtime(file_name))) except OSError: con_bound_time = 'not written' header.append("# " + file_name + " (" + file_time + ")") # distance file names and times header.append("# Distance files:") for file_name in list(distance_files.values()): try: file_time = time.asctime( time.localtime(os.path.getmtime(file_name))) except OSError: con_bound_time = 'does not exist' header.append("# " + file_name + " (" + file_time + ")") # results file names header.extend(common.format_file_info( name=[conn_clust_res_file_name], description="Connectivity clustering results")) header.extend(common.format_file_info( name=[bound_res_file_name], description="Hierarchical clustering of boundaries results")) header.extend(common.format_file_info( name=[conn_res_file_name], description="Hierarchical clustering of connectors results")) # write boundary ids header.extend("#") header.extend(boundary_ids_lines) # header.extend([ "#", " Clustered items:", "# - number of boundaries: " + str(len(bound.ids)), "# - number of connections: " + str(len(segments.ids))]) # hierarchical boundary clustering parameters if hi_cluster_boundaries: header.extend([ "#", "# Hierarchical boundary clustering parameters:", "# - clustering method: minimal euclidean distance", "# - linkage: " + hi_bound_linkage, "# - flat clusters criterion: " + hi_bound_criter, "# - flat clusters threshold: " + str(hi_bound_thresh)]) if hi_bound_criter == 'inconsistent': header.append( "# - flat clusters depth: " + str(hi_bound_depth)) header.extend([ "# - similarity method: " + hi_bound_similarity, "# - use single-item clusters for similarity: " \ + str(hi_bound_single)]) # hierarchical connection clustering parameters if hi_cluster_connections: header.extend([ "#", "# Hierarchical connection clustering parameters:", "# - clustering method: minimal euclidean distance", "# - linkage: " + hi_conn_linkage, "# - flat clusters criterion: " + hi_conn_criter, "# - flat clusters threshold: " + str(hi_conn_thresh)]) if hi_conn_criter == 'inconsistent': header.append( "# - flat clusters depth: " + str(hi_conn_depth)) header.extend([ "# - similarity method: " + hi_conn_similarity, "# - use single-item clusters for similarity: " + str(hi_conn_single)]) # connectivity clustering results if cluster_by_connectivity: header.extend([ "#", "# Connectivity clustering results:", "# - number of clusters: " + str(len(multi_cluster.connectivityBoundaries.clusters))]) # hierarchical boundary clustering results if hi_cluster_boundaries: header.extend([ "#", "# Hierarchical boundary clustering results:", "# - number of clusters: " + str(multi_cluster.hierarchyBoundaries.nClusters), "# - similarity index: " + ('%6.3f' % multi_cluster.hierarchyBoundaries.similarity), "# - threshold: " + str(hi_bound_thr)]) try: header.append( "# - rand similarity index: " + ('%6.3f' % multi_cluster.hierarchyBoundaries.rand)) except AttributeError: pass try: header.append( "# - b-flat similarity index: " + ('%6.3f' % multi_cluster.hierarchyBoundaries.bflat)) except AttributeError: pass try: header.append( "# - vi similarity index: " + ('%6.3f' % multi_cluster.hierarchyBoundaries.vi)) except AttributeError: pass # hierarchical connection clustering results if hi_cluster_connections: header.extend([ "#", "# Hierarchical connection clustering results:", "# - number of clusters (some clusters may contain no " + " boundaries): " + str(multi_cluster.hierarchyConnections.nClusters), "# - similarity index: " + ('%6.3f' % multi_cluster.hierarchyConnections.similarity), "# - threshold: " + str(hi_conn_thr)]) try: header.append( "# -
'rider_position', 'field_type': 'rider_position_type', 'ref_field_name': 'event', 'ref_field_value': { 'rider_position_change'}}, 'speed_high_alert': { 'example': 1.0, 'field_name': 'speed_high_alert', 'field_type': 'uint32', 'ref_field_name': 'event', 'ref_field_value': { 'speed_high_alert'}, 'scale': 1000.0, 'units': 'm/s'}, 'speed_low_alert': { 'example': 1.0, 'field_name': 'speed_low_alert', 'field_type': 'uint32', 'ref_field_name': 'event', 'ref_field_value': { 'speed_low_alert'}, 'scale': 1000.0, 'units': 'm/s'}, 'sport_point': { 'bits': '16,16', 'components': 'score,opponent_score', 'example': 1.0, 'field_name': 'sport_point', 'field_type': 'uint32', 'ref_field_name': 'event', 'ref_field_value': { 'sport_point'}, 'scale': '1,1'}, 'time_duration_alert': { 'example': 1.0, 'field_name': 'time_duration_alert', 'field_type': 'uint32', 'ref_field_name': 'event', 'ref_field_value': { 'time_duration_alert'}, 'scale': 1000.0, 'units': 's'}, 'timer_trigger': { 'example': 1.0, 'field_name': 'timer_trigger', 'field_type': 'timer_trigger', 'ref_field_name': 'event', 'ref_field_value': { 'timer'}}, 'virtual_partner_speed': { 'example': 1.0, 'field_name': 'virtual_partner_speed', 'field_type': 'uint16', 'ref_field_name': 'event', 'ref_field_value': { 'virtual_partner_pace'}, 'scale': 1000.0, 'units': 'm/s'}}}, 4.0: { 'example': 1.0, 'field_name': 'event_group', 'field_type': 'uint8'}, 7.0: { 'comment': 'Do not populate directly. Autogenerated ' 'by decoder for sport_point subfield ' 'components', 'example': 1.0, 'field_name': 'score', 'field_type': 'uint16'}, 8.0: { 'comment': 'Do not populate directly. Autogenerated ' 'by decoder for sport_point subfield ' 'components', 'example': 1.0, 'field_name': 'opponent_score', 'field_type': 'uint16'}, 9.0: { 'comment': 'Do not populate directly. Autogenerated ' 'by decoder for gear_change subfield ' 'components. Front gear number. 1 is ' 'innermost.', 'example': 1.0, 'field_name': 'front_gear_num', 'field_type': 'uint8z'}, 10.0: { 'comment': 'Do not populate directly. ' 'Autogenerated by decoder for ' 'gear_change subfield components. ' 'Number of front teeth.', 'example': 1.0, 'field_name': 'front_gear', 'field_type': 'uint8z'}, 11.0: { 'comment': 'Do not populate directly. ' 'Autogenerated by decoder for ' 'gear_change subfield components. Rear ' 'gear number. 1 is innermost.', 'example': 1.0, 'field_name': 'rear_gear_num', 'field_type': 'uint8z'}, 12.0: { 'comment': 'Do not populate directly. ' 'Autogenerated by decoder for ' 'gear_change subfield components. ' 'Number of rear teeth.', 'example': 1.0, 'field_name': 'rear_gear', 'field_type': 'uint8z'}, 13.0: { 'field_name': 'device_index', 'field_type': 'device_index'}, 253.0: { 'example': 1.0, 'field_name': 'timestamp', 'field_type': 'date_time', 'units': 's'}}, 'exd_data_concept_configuration': { 0.0: { 'example': 1.0, 'field_name': 'screen_index', 'field_type': 'uint8'}, 1.0: { 'bits': '4,4', 'components': 'field_id,concept_index', 'example': 1.0, 'field_name': 'concept_field', 'field_type': 'byte'}, 2.0: { 'example': 1.0, 'field_name': 'field_id', 'field_type': 'uint8'}, 3.0: { 'example': 1.0, 'field_name': 'concept_index', 'field_type': 'uint8'}, 4.0: { 'example': 1.0, 'field_name': 'data_page', 'field_type': 'uint8'}, 5.0: { 'example': 1.0, 'field_name': 'concept_key', 'field_type': 'uint8'}, 6.0: { 'example': 1.0, 'field_name': 'scaling', 'field_type': 'uint8'}, 8.0: { 'example': 1.0, 'field_name': 'data_units', 'field_type': 'exd_data_units'}, 9.0: { 'example': 1.0, 'field_name': 'qualifier', 'field_type': 'exd_qualifiers'}, 10.0: { 'example': 1.0, 'field_name': 'descriptor', 'field_type': 'exd_descriptors'}, 11.0: { 'example': 1.0, 'field_name': 'is_signed', 'field_type': 'bool'}}, 'exd_data_field_configuration': { 0.0: { 'example': 1.0, 'field_name': 'screen_index', 'field_type': 'uint8'}, 1.0: { 'bits': '4,4', 'components': 'field_id,concept_count', 'example': 1.0, 'field_name': 'concept_field', 'field_type': 'byte'}, 2.0: { 'example': 1.0, 'field_name': 'field_id', 'field_type': 'uint8'}, 3.0: { 'example': 1.0, 'field_name': 'concept_count', 'field_type': 'uint8'}, 4.0: { 'example': 1.0, 'field_name': 'display_type', 'field_type': 'exd_display_type'}, 5.0: { 'array': '[32]', 'example': 1.0, 'field_name': 'title', 'field_type': 'string'}}, 'exd_screen_configuration': { 0.0: { 'example': 1.0, 'field_name': 'screen_index', 'field_type': 'uint8'}, 1.0: { 'comment': 'number of fields in ' 'screen', 'example': 1.0, 'field_name': 'field_count', 'field_type': 'uint8'}, 2.0: { 'example': 1.0, 'field_name': 'layout', 'field_type': 'exd_layout'}, 3.0: { 'example': 1.0, 'field_name': 'screen_enabled', 'field_type': 'bool'}}, 'field_capabilities': { 0.0: { 'example': 1.0, 'field_name': 'file', 'field_type': 'file'}, 1.0: { 'example': 1.0, 'field_name': 'mesg_num', 'field_type': 'mesg_num'}, 2.0: { 'example': 1.0, 'field_name': 'field_num', 'field_type': 'uint8'}, 3.0: { 'example': 1.0, 'field_name': 'count', 'field_type': 'uint16'}, 254.0: { 'example': 1.0, 'field_name': 'message_index', 'field_type': 'message_index'}}, 'field_description': { 0.0: { 'example': 1.0, 'field_name': 'developer_data_index', 'field_type': 'uint8'}, 1.0: { 'example': 1.0, 'field_name': 'field_definition_number', 'field_type': 'uint8'}, 2.0: { 'example': 1.0, 'field_name': 'fit_base_type_id', 'field_type': 'fit_base_type'}, 3.0: { 'array': '[N]', 'example': 64.0, 'field_name': 'field_name', 'field_type': 'string'}, 4.0: { 'field_name': 'array', 'field_type': 'uint8'}, 5.0: { 'field_name': 'components', 'field_type': 'string'}, 6.0: { 'example': 1.0, 'field_name': 'scale', 'field_type': 'uint8'}, 7.0: { 'example': 1.0, 'field_name': 'offset', 'field_type': 'sint8'}, 8.0: { 'array': '[N]', 'example': 16.0, 'field_name': 'units', 'field_type': 'string'}, 9.0: { 'field_name': 'bits', 'field_type': 'string'}, 10.0: { 'field_name': 'accumulate', 'field_type': 'string'}, 13.0: { 'example': 1.0, 'field_name': 'fit_base_unit_id', 'field_type': 'fit_base_unit'}}, 'file_capabilities': { 0.0: { 'example': 1.0, 'field_name': 'type', 'field_type': 'file'}, 1.0: { 'example': 1.0, 'field_name': 'flags', 'field_type': 'file_flags'}, 2.0: { 'example': 16.0, 'field_name': 'directory', 'field_type': 'string'}, 3.0: { 'example': 1.0, 'field_name': 'max_count', 'field_type': 'uint16'}, 4.0: { 'example': 1.0, 'field_name': 'max_size', 'field_type': 'uint32', 'units': 'bytes'}, 254.0: { 'example': 1.0, 'field_name': 'message_index', 'field_type': 'message_index'}}, 'file_creator': { 0.0: { 'example': 1.0, 'field_name': 'software_version', 'field_type': 'uint16'}, 1.0: { 'example': 1.0, 'field_name': 'hardware_version', 'field_type': 'uint8'}}, 'file_id': { 0.0: { 'example': 1.0, 'field_name': 'type', 'field_type': 'file'}, 1.0: { 'example': 1.0, 'field_name': 'manufacturer', 'field_type': 'manufacturer'}, 2.0: { 'example': 1.0, 'field_name': 'product', 'field_type': 'uint16', 'subfields': { 'garmin_product': { 'example': 1.0, 'field_name': 'garmin_product', 'field_type': 'garmin_product', 'ref_field_name': 'manufacturer', 'ref_field_value': { 'dynastream', 'dynastream_oem', 'garmin'}}}}, 3.0: { 'example': 1.0, 'field_name': 'serial_number', 'field_type': 'uint32z'}, 4.0: { 'comment': 'Only set for files that are can be ' 'created/erased.', 'example': 1.0, 'field_name': 'time_created', 'field_type': 'date_time'}, 5.0: { 'comment': 'Only set for files that are not ' 'created/erased.', 'example': 1.0, 'field_name': 'number', 'field_type': 'uint16'}, 8.0: { 'comment': 'Optional free form string to indicate ' 'the devices name or model', 'example': 20.0, 'field_name': 'product_name', 'field_type': 'string'}}, 'goal': { 0.0: { 'example': 1.0, 'field_name': 'sport', 'field_type': 'sport'}, 1.0: { 'example': 1.0, 'field_name': 'sub_sport', 'field_type': 'sub_sport'}, 2.0: { 'example': 1.0, 'field_name': 'start_date', 'field_type': 'date_time'}, 3.0: { 'example': 1.0, 'field_name': 'end_date', 'field_type': 'date_time'}, 4.0: { 'example': 1.0, 'field_name': 'type', 'field_type': 'goal'}, 5.0: { 'example': 1.0, 'field_name': 'value', 'field_type': 'uint32'}, 6.0: { 'example': 1.0, 'field_name': 'repeat', 'field_type': 'bool'}, 7.0: { 'example': 1.0, 'field_name': 'target_value', 'field_type': 'uint32'}, 8.0: { 'example': 1.0, 'field_name': 'recurrence', 'field_type': 'goal_recurrence'}, 9.0: { 'example': 1.0, 'field_name': 'recurrence_value', 'field_type': 'uint16'}, 10.0: { 'example': 1.0, 'field_name': 'enabled', 'field_type': 'bool'}, 254.0: { 'example': 1.0, 'field_name': 'message_index', 'field_type': 'message_index'}}, 'gps_metadata': { 0.0: { 'comment': 'Millisecond part of the ' 'timestamp.', 'field_name': 'timestamp_ms', 'field_type': 'uint16', 'units': 'ms'}, 1.0: { 'field_name': 'position_lat', 'field_type': 'sint32', 'units': 'semicircles'}, 2.0: { 'field_name': 'position_long', 'field_type': 'sint32', 'units': 'semicircles'}, 3.0: { 'field_name': 'enhanced_altitude', 'field_type': 'uint32', 'offset': 500.0, 'scale': 5.0, 'units': 'm'}, 4.0: { 'field_name': 'enhanced_speed', 'field_type': 'uint32', 'scale': 1000.0, 'units': 'm/s'}, 5.0: { 'field_name': 'heading', 'field_type': 'uint16', 'scale': 100.0, 'units': 'degrees'}, 6.0: { 'comment': 'Used to correlate UTC to system ' 'time if the timestamp of the ' 'message is in system time. This ' 'UTC time is derived from the GPS ' 'data.', 'field_name': 'utc_timestamp', 'field_type': 'date_time', 'units': 's'}, 7.0: { 'array': '[3]', 'comment': 'velocity[0] is lon velocity. ' 'Velocity[1] is lat velocity. ' 'Velocity[2] is altitude velocity.', 'field_name': 'velocity', 'field_type': 'sint16', 'scale': 100.0, 'units': 'm/s'}, 253.0: { 'comment': 'Whole second part of the ' 'timestamp.', 'field_name': 'timestamp', 'field_type': 'date_time', 'units': 's'}}, 'gyroscope_data': { 0.0: { 'comment': 'Millisecond part of the ' 'timestamp.', 'field_name': 'timestamp_ms', 'field_type': 'uint16', 'units': 'ms'}, 1.0: { 'array': '[N]', 'comment': 'Each time in the array ' 'describes the time at which the ' 'gyro sample with the ' 'corrosponding index was taken. ' 'Limited to 30 samples in each ' 'message. The samples may span ' 'across seconds. Array size must ' 'match the number of samples in ' 'gyro_x and gyro_y and gyro_z', 'field_name': 'sample_time_offset', 'field_type': 'uint16', 'units': 'ms'}, 2.0: { 'array': '[N]', 'comment': 'These are the raw ADC reading. ' 'Maximum number of samples is 30 ' 'in each message. The samples ' 'may span across seconds. A ' 'conversion will need to be done ' 'on this data once read.', 'field_name': 'gyro_x', 'field_type': 'uint16', 'units': 'counts'}, 3.0: { 'array': '[N]', 'comment': 'These are the raw ADC reading. ' 'Maximum number of samples is 30 ' 'in each message. The samples ' 'may span across seconds. A ' 'conversion will need to be done ' 'on this data once read.', 'field_name': 'gyro_y', 'field_type': 'uint16', 'units': 'counts'}, 4.0: { 'array': '[N]', 'comment': 'These are the raw ADC reading. ' 'Maximum number of samples is 30 ' 'in each message. The samples ' 'may span across seconds. A ' 'conversion will need to be done ' 'on this data once read.', 'field_name': 'gyro_z', 'field_type': 'uint16', 'units': 'counts'}, 5.0: { 'array': '[N]', 'comment': 'Calibrated gyro reading', 'field_name': 'calibrated_gyro_x', 'field_type': 'float32',
""" tasks service Provides a management API for tasks in the system. """ import enum from datetime import datetime import six from clearml.backend_api.session import ( Request, BatchRequest, Response, NonStrictDataModel, schema_property, StringEnum, ) from dateutil.parser import parse as parse_datetime class MultiFieldPatternData(NonStrictDataModel): """ :param pattern: Pattern string (regex) :type pattern: str :param fields: List of field names :type fields: Sequence[str] """ _schema = { "properties": { "fields": { "description": "List of field names", "items": {"type": "string"}, "type": ["array", "null"], }, "pattern": { "description": "Pattern string (regex)", "type": ["string", "null"], }, }, "type": "object", } def __init__(self, pattern=None, fields=None, **kwargs): super(MultiFieldPatternData, self).__init__(**kwargs) self.pattern = pattern self.fields = fields @schema_property("pattern") def pattern(self): return self._property_pattern @pattern.setter def pattern(self, value): if value is None: self._property_pattern = None return self.assert_isinstance(value, "pattern", six.string_types) self._property_pattern = value @schema_property("fields") def fields(self): return self._property_fields @fields.setter def fields(self, value): if value is None: self._property_fields = None return self.assert_isinstance(value, "fields", (list, tuple)) self.assert_isinstance(value, "fields", six.string_types, is_array=True) self._property_fields = value class ModelTypeEnum(StringEnum): input = "input" output = "output" class TaskModelItem(NonStrictDataModel): """ :param name: The task model name :type name: str :param model: The model ID :type model: str """ _schema = { "properties": { "model": {"description": "The model ID", "type": "string"}, "name": {"description": "The task model name", "type": "string"}, }, "required": ["name", "model"], "type": "object", } def __init__(self, name, model, **kwargs): super(TaskModelItem, self).__init__(**kwargs) self.name = name self.model = model @schema_property("name") def name(self): return self._property_name @name.setter def name(self, value): if value is None: self._property_name = None return self.assert_isinstance(value, "name", six.string_types) self._property_name = value @schema_property("model") def model(self): return self._property_model @model.setter def model(self, value): if value is None: self._property_model = None return self.assert_isinstance(value, "model", six.string_types) self._property_model = value class Script(NonStrictDataModel): """ :param binary: Binary to use when running the script :type binary: str :param repository: Name of the repository where the script is located :type repository: str :param tag: Repository tag :type tag: str :param branch: Repository branch id If not provided and tag not provided, default repository branch is used. :type branch: str :param version_num: Version (changeset) number. Optional (default is head version) Unused if tag is provided. :type version_num: str :param entry_point: Path to execute within the repository :type entry_point: str :param working_dir: Path to the folder from which to run the script Default - root folder of repository :type working_dir: str :param requirements: A JSON object containing requirements strings by key :type requirements: dict :param diff: Uncommitted changes found in the repository when task was run :type diff: str """ _schema = { "properties": { "binary": { "default": "python", "description": "Binary to use when running the script", "type": ["string", "null"], }, "branch": { "description": "Repository branch id If not provided and tag not provided, default repository branch is used.", "type": ["string", "null"], }, "diff": { "description": "Uncommitted changes found in the repository when task was run", "type": ["string", "null"], }, "entry_point": { "description": "Path to execute within the repository", "type": ["string", "null"], }, "repository": { "description": "Name of the repository where the script is located", "type": ["string", "null"], }, "requirements": { "description": "A JSON object containing requirements strings by key", "type": ["object", "null"], }, "tag": {"description": "Repository tag", "type": ["string", "null"]}, "version_num": { "description": "Version (changeset) number. Optional (default is head version) Unused if tag is provided.", "type": ["string", "null"], }, "working_dir": { "description": "Path to the folder from which to run the script Default - root folder of repository", "type": ["string", "null"], }, }, "type": "object", } def __init__( self, binary="python", repository=None, tag=None, branch=None, version_num=None, entry_point=None, working_dir=None, requirements=None, diff=None, **kwargs ): super(Script, self).__init__(**kwargs) self.binary = binary self.repository = repository self.tag = tag self.branch = branch self.version_num = version_num self.entry_point = entry_point self.working_dir = working_dir self.requirements = requirements self.diff = diff @schema_property("binary") def binary(self): return self._property_binary @binary.setter def binary(self, value): if value is None: self._property_binary = None return self.assert_isinstance(value, "binary", six.string_types) self._property_binary = value @schema_property("repository") def repository(self): return self._property_repository @repository.setter def repository(self, value): if value is None: self._property_repository = None return self.assert_isinstance(value, "repository", six.string_types) self._property_repository = value @schema_property("tag") def tag(self): return self._property_tag @tag.setter def tag(self, value): if value is None: self._property_tag = None return self.assert_isinstance(value, "tag", six.string_types) self._property_tag = value @schema_property("branch") def branch(self): return self._property_branch @branch.setter def branch(self, value): if value is None: self._property_branch = None return self.assert_isinstance(value, "branch", six.string_types) self._property_branch = value @schema_property("version_num") def version_num(self): return self._property_version_num @version_num.setter def version_num(self, value): if value is None: self._property_version_num = None return self.assert_isinstance(value, "version_num", six.string_types) self._property_version_num = value @schema_property("entry_point") def entry_point(self): return self._property_entry_point @entry_point.setter def entry_point(self, value): if value is None: self._property_entry_point = None return self.assert_isinstance(value, "entry_point", six.string_types) self._property_entry_point = value @schema_property("working_dir") def working_dir(self): return self._property_working_dir @working_dir.setter def working_dir(self, value): if value is None: self._property_working_dir = None return self.assert_isinstance(value, "working_dir", six.string_types) self._property_working_dir = value @schema_property("requirements") def requirements(self): return self._property_requirements @requirements.setter def requirements(self, value): if value is None: self._property_requirements = None return self.assert_isinstance(value, "requirements", (dict,)) self._property_requirements = value @schema_property("diff") def diff(self): return self._property_diff @diff.setter def diff(self, value): if value is None: self._property_diff = None return self.assert_isinstance(value, "diff", six.string_types) self._property_diff = value class Output(NonStrictDataModel): """ :param destination: Storage id. This is where output files will be stored. :type destination: str :param model: Model id. :type model: str :param result: Task result. Values: 'success', 'failure' :type result: str :param error: Last error text :type error: str """ _schema = { "properties": { "destination": { "description": "Storage id. This is where output files will be stored.", "type": ["string", "null"], }, "error": {"description": "Last error text", "type": ["string", "null"]}, "model": {"description": "Model id.", "type": ["string", "null"]}, "result": { "description": "Task result. Values: 'success', 'failure'", "type": ["string", "null"], }, }, "type": "object", } def __init__( self, destination=None, model=None, result=None, error=None, **kwargs ): super(Output, self).__init__(**kwargs) self.destination = destination self.model = model self.result = result self.error = error @schema_property("destination") def destination(self): return self._property_destination @destination.setter def destination(self, value): if value is None: self._property_destination = None return self.assert_isinstance(value, "destination", six.string_types) self._property_destination = value @schema_property("model") def model(self): return self._property_model @model.setter def model(self, value): if value is None: self._property_model = None return self.assert_isinstance(value, "model", six.string_types) self._property_model = value @schema_property("result") def result(self): return self._property_result @result.setter def result(self, value): if value is None: self._property_result = None return self.assert_isinstance(value, "result", six.string_types) self._property_result = value @schema_property("error") def error(self): return self._property_error @error.setter def error(self, value): if value is None: self._property_error = None return self.assert_isinstance(value, "error", six.string_types) self._property_error = value class ArtifactTypeData(NonStrictDataModel): """ :param preview: Description or textual data :type preview: str :param content_type: System defined raw data content type :type content_type: str :param data_hash: Hash of raw data, without any headers or descriptive parts :type data_hash: str """ _schema = { "properties": { "content_type": { "description": "System defined raw data content type", "type": ["string", "null"], }, "data_hash": { "description": "Hash of raw data, without any headers or descriptive parts", "type": ["string", "null"], }, "preview": { "description": "Description or textual data", "type": ["string", "null"], }, }, "type": "object", } def __init__(self, preview=None, content_type=None, data_hash=None, **kwargs): super(ArtifactTypeData, self).__init__(**kwargs) self.preview = preview self.content_type = content_type self.data_hash = data_hash @schema_property("preview") def preview(self): return self._property_preview @preview.setter def preview(self, value): if value is None: self._property_preview = None return self.assert_isinstance(value, "preview", six.string_types) self._property_preview = value @schema_property("content_type") def content_type(self): return self._property_content_type @content_type.setter def content_type(self, value): if value is None: self._property_content_type = None return self.assert_isinstance(value, "content_type", six.string_types) self._property_content_type = value @schema_property("data_hash") def data_hash(self): return self._property_data_hash @data_hash.setter def data_hash(self, value): if value is None: self._property_data_hash = None return self.assert_isinstance(value, "data_hash", six.string_types) self._property_data_hash = value class ArtifactModeEnum(StringEnum): input = "input" output = "output" class Artifact(NonStrictDataModel): """ :param key: Entry key :type key: str :param type: System defined type :type type: str :param mode: System defined input/output indication :type mode: ArtifactModeEnum :param uri: Raw data location :type uri: str :param content_size: Raw data length in bytes :type content_size: int :param hash: Hash of entire raw data :type hash: str :param timestamp: Epoch time when artifact was created :type timestamp: int :param type_data: Additional fields defined by the system :type type_data: ArtifactTypeData :param display_data: User-defined list of key/value pairs, sorted :type display_data: Sequence[Sequence[str]]
# -*- coding: utf-8 -*- # Copyright 2013 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import traceback from nailgun.objects.serializers.network_configuration \ import NeutronNetworkConfigurationSerializer from nailgun.objects.serializers.network_configuration \ import NovaNetworkConfigurationSerializer from nailgun.consts import CLUSTER_STATUSES from nailgun.consts import NODE_STATUSES from nailgun.consts import TASK_NAMES from nailgun.consts import TASK_STATUSES from nailgun.db import db from nailgun.db.sqlalchemy.models import Cluster from nailgun.db.sqlalchemy.models import Task from nailgun.errors import errors from nailgun.logger import logger from nailgun import objects from nailgun.openstack.common import jsonutils import nailgun.rpc as rpc from nailgun.task import task as tasks from nailgun.task.task import TaskHelper class TaskManager(object): def __init__(self, cluster_id=None): if cluster_id: self.cluster = db().query(Cluster).get(cluster_id) def _call_silently(self, task, instance, *args, **kwargs): # create action_log for task al = TaskHelper.create_action_log(task) method = getattr(instance, kwargs.pop('method_name', 'execute')) if task.status == TASK_STATUSES.error: TaskHelper.update_action_log(task, al) return try: to_return = method(task, *args, **kwargs) # update action_log instance for task # for asynchronous task it will be not final update # as they also are updated in rpc receiver TaskHelper.update_action_log(task, al) return to_return except Exception as exc: err = str(exc) if any([ not hasattr(exc, "log_traceback"), hasattr(exc, "log_traceback") and exc.log_traceback ]): logger.error(traceback.format_exc()) # update task entity with given data data = {'status': 'error', 'progress': 100, 'message': err} objects.Task.update(task, data) TaskHelper.update_action_log(task, al) def check_running_task(self, task_name): current_tasks = db().query(Task).filter_by( name=task_name ) for task in current_tasks: if task.status == "running": raise errors.DumpRunning() elif task.status in ("ready", "error"): db().delete(task) db().commit() def serialize_network_cfg(self, cluster): serializer = {'nova_network': NovaNetworkConfigurationSerializer, 'neutron': NeutronNetworkConfigurationSerializer} return serializer[cluster.net_provider].serialize_for_cluster(cluster) class ApplyChangesTaskManager(TaskManager): def _lock_required_tasks(self): names = ( TASK_NAMES.deploy, TASK_NAMES.stop_deployment, TASK_NAMES.reset_environment ) return objects.TaskCollection.lock_cluster_tasks( cluster_id=self.cluster.id, names=names ) def _remove_obsolete_tasks(self): locked_tasks = self._lock_required_tasks() current_tasks = objects.TaskCollection.filter_by( locked_tasks, name=TASK_NAMES.deploy ) for task in current_tasks: if task.status == TASK_STATUSES.running: db().commit() raise errors.DeploymentAlreadyStarted() elif task.status in (TASK_STATUSES.ready, TASK_STATUSES.error): db().delete(task) db().flush() obsolete_tasks = objects.TaskCollection.filter_by_list( locked_tasks, 'name', (TASK_NAMES.stop_deployment, TASK_NAMES.reset_environment) ) for task in obsolete_tasks: db().delete(task) db().flush() def execute(self): logger.info( u"Trying to start deployment at cluster '{0}'".format( self.cluster.name or self.cluster.id ) ) network_info = self.serialize_network_cfg(self.cluster) logger.info( u"Network info:\n{0}".format( jsonutils.dumps(network_info, indent=4) ) ) self._remove_obsolete_tasks() supertask = Task(name=TASK_NAMES.deploy, cluster=self.cluster) db().add(supertask) nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster) nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster) nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster) task_messages = [] if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]): db().rollback() raise errors.WrongNodeStatus("No changes to deploy") # we should have task committed for processing in other threads db().commit() TaskHelper.create_action_log(supertask) # Run validation if user didn't redefine # provisioning and deployment information if (not objects.Cluster.get_provisioning_info(self.cluster) and not objects.Cluster.get_deployment_info(self.cluster)): try: self.check_before_deployment(supertask) except errors.CheckBeforeDeploymentError: db().commit() return supertask task_deletion, task_provision, task_deployment = None, None, None if nodes_to_delete: objects.TaskCollection.lock_cluster_tasks(self.cluster.id) # For more accurate progress calculation task_weight = 0.4 task_deletion = supertask.create_subtask(TASK_NAMES.node_deletion, weight=task_weight) logger.debug("Launching deletion task: %s", task_deletion.uuid) self._call_silently(task_deletion, tasks.DeletionTask) # we should have task committed for processing in other threads db().commit() if nodes_to_provision: objects.TaskCollection.lock_cluster_tasks(self.cluster.id) # updating nodes nodes_to_provision = objects.NodeCollection.lock_nodes( nodes_to_provision ) objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision) logger.debug("There are nodes to provision: %s", " ".join([n.fqdn for n in nodes_to_provision])) # For more accurate progress calulation task_weight = 0.4 task_provision = supertask.create_subtask(TASK_NAMES.provision, weight=task_weight) # we should have task committed for processing in other threads db().commit() provision_message = self._call_silently( task_provision, tasks.ProvisionTask, nodes_to_provision, method_name='message' ) task_provision = objects.Task.get_by_uid( task_provision.id, fail_if_not_found=True, lock_for_update=True ) # if failed to generate task message for orchestrator # then task is already set to error if task_provision.status == TASK_STATUSES.error: return supertask task_provision.cache = provision_message db().commit() task_messages.append(provision_message) if nodes_to_deploy: objects.TaskCollection.lock_cluster_tasks(self.cluster.id) # locking nodes before updating objects.NodeCollection.lock_nodes(nodes_to_deploy) # updating nodes objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_deploy) logger.debug("There are nodes to deploy: %s", " ".join([n.fqdn for n in nodes_to_deploy])) task_deployment = supertask.create_subtask(TASK_NAMES.deployment) # we should have task committed for processing in other threads db().commit() deployment_message = self._call_silently( task_deployment, tasks.DeploymentTask, nodes_to_deploy, method_name='message' ) task_deployment = objects.Task.get_by_uid( task_deployment.id, fail_if_not_found=True, lock_for_update=True ) # if failed to generate task message for orchestrator # then task is already set to error if task_deployment.status == TASK_STATUSES.error: return supertask task_deployment.cache = deployment_message db().commit() task_messages.append(deployment_message) if nodes_to_provision: nodes_to_provision = objects.NodeCollection.lock_nodes( nodes_to_provision ) for node in nodes_to_provision: node.status = NODE_STATUSES.provisioning db().commit() objects.Cluster.get_by_uid( self.cluster.id, fail_if_not_found=True, lock_for_update=True ) self.cluster.status = CLUSTER_STATUSES.deployment db().add(self.cluster) db().commit() if task_messages: rpc.cast('naily', task_messages) logger.debug( u"Deployment: task to deploy cluster '{0}' is {1}".format( self.cluster.name or self.cluster.id, supertask.uuid ) ) return supertask def check_before_deployment(self, supertask): # checking admin intersection with untagged network_info = self.serialize_network_cfg(self.cluster) network_info["networks"] = [ n for n in network_info["networks"] if n["name"] != "fuelweb_admin" ] check_networks = supertask.create_subtask(TASK_NAMES.check_networks) self._call_silently( check_networks, tasks.CheckNetworksTask, data=network_info, check_admin_untagged=True ) if check_networks.status == TASK_STATUSES.error: logger.warning( "Checking networks failed: %s", check_networks.message ) raise errors.CheckBeforeDeploymentError(check_networks.message) TaskHelper.set_ready_if_not_finished(check_networks) db().delete(check_networks) db().refresh(supertask) db().flush() # checking prerequisites check_before = supertask.create_subtask( TASK_NAMES.check_before_deployment ) logger.debug("Checking prerequisites task: %s", check_before.uuid) self._call_silently( check_before, tasks.CheckBeforeDeploymentTask ) # if failed to check prerequisites # then task is already set to error if check_before.status == TASK_STATUSES.error: logger.warning( "Checking prerequisites failed: %s", check_before.message ) raise errors.CheckBeforeDeploymentError(check_before.message) logger.debug( "Checking prerequisites is successful, starting deployment..." ) TaskHelper.set_ready_if_not_finished(check_before) db().delete(check_before) db().refresh(supertask) db().flush() class ProvisioningTaskManager(TaskManager): def execute(self, nodes_to_provision): """Run provisioning task on specified nodes """ # locking nodes nodes_ids = [node.id for node in nodes_to_provision] nodes = objects.NodeCollection.filter_by_list( None, 'id', nodes_ids, order_by='id' ) objects.NodeCollection.lock_for_update(nodes).all() objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision) logger.debug('Nodes to provision: {0}'.format( ' '.join([n.fqdn for n in nodes_to_provision]))) task_provision = Task(name='provision', cluster=self.cluster) db().add(task_provision) db().commit() provision_message = self._call_silently( task_provision, tasks.ProvisionTask, nodes_to_provision, method_name='message' ) task_provision = objects.Task.get_by_uid( task_provision.id, fail_if_not_found=True, lock_for_update=True ) task_provision.cache = provision_message objects.NodeCollection.lock_for_update(nodes).all() for node in nodes_to_provision: node.pending_addition = False node.status = NODE_STATUSES.provisioning node.progress = 0 db().commit() rpc.cast('naily', provision_message) return task_provision class DeploymentTaskManager(TaskManager): def execute(self, nodes_to_deployment): # locking nodes for update objects.NodeCollection.lock_nodes(nodes_to_deployment) objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_deployment) logger.debug('Nodes to deploy: {0}'.format( ' '.join([n.fqdn for n in nodes_to_deployment]))) task_deployment = Task(name='deployment', cluster=self.cluster) db().add(task_deployment) deployment_message = self._call_silently( task_deployment, tasks.DeploymentTask, nodes_to_deployment, method_name='message') db().refresh(task_deployment) # locking task task_deployment = objects.Task.get_by_uid( task_deployment.id, fail_if_not_found=True, lock_for_update=True ) # locking nodes objects.NodeCollection.lock_nodes(nodes_to_deployment) task_deployment.cache = deployment_message for node in nodes_to_deployment: node.status = 'deploying' node.progress = 0 db().commit() rpc.cast('naily', deployment_message) return task_deployment class StopDeploymentTaskManager(TaskManager): def execute(self): # locking tasks for processing names = ( TASK_NAMES.stop_deployment, TASK_NAMES.deployment, TASK_NAMES.provision ) objects.TaskCollection.lock_cluster_tasks( self.cluster.id, names=names ) stop_running = objects.TaskCollection.filter_by( None, cluster_id=self.cluster.id, name=TASK_NAMES.stop_deployment, ) stop_running = objects.TaskCollection.order_by( stop_running, 'id' ).first() if stop_running: if stop_running.status == TASK_STATUSES.running: raise errors.StopAlreadyRunning( "Stopping deployment task " "is already launched" ) else: db().delete(stop_running) db().flush() deployment_task = objects.TaskCollection.filter_by( None, cluster_id=self.cluster.id, name=TASK_NAMES.deployment, ) deployment_task = objects.TaskCollection.order_by( deployment_task, 'id' ).first() provisioning_task = objects.TaskCollection.filter_by( None, cluster_id=self.cluster.id, name=TASK_NAMES.provision, ) provisioning_task = objects.TaskCollection.order_by( provisioning_task, 'id' ).first() if not deployment_task and not provisioning_task: db().rollback() raise errors.DeploymentNotRunning( u"Nothing to stop - deployment is " u"not running on environment '{0}'".format( self.cluster.id ) ) task = Task( name="stop_deployment", cluster=self.cluster ) db().add(task) db().commit() self._call_silently( task, tasks.StopDeploymentTask, deploy_task=deployment_task, provision_task=provisioning_task ) return task class ResetEnvironmentTaskManager(TaskManager): def execute(self): deploy_running = db().query(Task).filter_by( cluster=self.cluster, name='deploy', status='running' ).first() if deploy_running: raise errors.DeploymentAlreadyStarted( u"Can't reset environment '{0}' when " u"deployment is running".format( self.cluster.id ) ) obsolete_tasks = db().query(Task).filter_by( cluster_id=self.cluster.id, ).filter( Task.name.in_([ 'deploy', 'deployment', 'stop_deployment' ]) ) for task in obsolete_tasks: db().delete(task) db().commit() task = Task( name="reset_environment", cluster=self.cluster ) db().add(task) db.commit() self._call_silently( task, tasks.ResetEnvironmentTask ) return task class UpdateEnvironmentTaskManager(TaskManager): def execute(self): if not self.cluster.pending_release_id: raise errors.InvalidReleaseId( u"Can't update environment '{0}' when " u"new release Id is invalid".format(self.cluster.name)) running_tasks = db().query(Task).filter_by( cluster_id=self.cluster.id, status='running' ).filter( Task.name.in_([ 'deploy', 'deployment', 'reset_environment', 'stop_deployment' ]) ) if running_tasks.first(): raise errors.TaskAlreadyRunning( u"Can't update environment '{0}' when " u"other task is running".format( self.cluster.id ) ) nodes_to_change = TaskHelper.nodes_to_upgrade(self.cluster) objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_change) logger.debug('Nodes to update: {0}'.format( ' '.join([n.fqdn for n in nodes_to_change]))) task_update = Task(name='update', cluster=self.cluster) db().add(task_update) self.cluster.status = 'update' db().flush() deployment_message = self._call_silently( task_update, tasks.UpdateTask, nodes_to_change, method_name='message') db().refresh(task_update) for node in nodes_to_change: node.status = 'deploying' node.progress = 0 db().commit() rpc.cast('naily', deployment_message) return task_update class CheckNetworksTaskManager(TaskManager): def execute(self, data, check_admin_untagged=False): locked_tasks = objects.TaskCollection.filter_by( None, cluster_id=self.cluster.id, name=TASK_NAMES.check_networks ) locked_tasks = objects.TaskCollection.order_by(locked_tasks, 'id') check_networks = objects.TaskCollection.lock_for_update( locked_tasks ).first() if check_networks: TaskHelper.set_ready_if_not_finished(check_networks) db().delete(check_networks) db().flush() task = Task( name=TASK_NAMES.check_networks, cluster=self.cluster ) db().add(task) db().commit() self._call_silently( task, tasks.CheckNetworksTask, data, check_admin_untagged ) task = objects.Task.get_by_uid( task.id, fail_if_not_found=True, lock_for_update=True ) if task.status == TASK_STATUSES.running: # update task status with given data
<filename>third_party/fonts.bzl # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. load("@io_bazel_rules_closure//closure:defs.bzl", "filegroup_external") load("@io_bazel_rules_closure//closure:defs.bzl", "web_library_external") def tensorboard_fonts_workspace(): """Downloads TensorBoard fonts.""" # bazel run //tensorboard/tools:import_google_fonts -- --url='https://fonts.googleapis.com/css?family=Roboto:400,300,300italic,400italic,500,500italic,700,700italic;https://fonts.googleapis.com/css?family=Roboto+Mono:400,700' filegroup_external( name = "com_google_fonts_roboto", licenses = ["notice"], # Apache 2.0 sha256_urls = { "c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4": [ "https://mirror.bazel.build/raw.githubusercontent.com/google/roboto/ba03b84b90b50afd99f9688059447bc545e5c0e1/LICENSE", "https://raw.githubusercontent.com/google/roboto/ba03b84b90b50afd99f9688059447bc545e5c0e1/LICENSE", ], # Roboto (cyrillic) "41720926981ffb6dc229f06fc0bbf0f43e45ba032d126726ebee481c2a6559e2": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/uYECMKoHcO9x1wdmbyHIm3-_kf6ByYO6CLYdB4HQE-Y.woff2", "https://fonts.gstatic.com/s/roboto/v18/uYECMKoHcO9x1wdmbyHIm3-_kf6ByYO6CLYdB4HQE-Y.woff2", ], # Roboto (cyrillic-ext) "90a0ad0b48861588a6e33a5905b17e1219ea87ab6f07ccc41e7c2cddf38967a8": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/sTdaA6j0Psb920Vjv-mrzH-_kf6ByYO6CLYdB4HQE-Y.woff2", "https://fonts.gstatic.com/s/roboto/v18/sTdaA6j0Psb920Vjv-mrzH-_kf6ByYO6CLYdB4HQE-Y.woff2", ], # Roboto (greek) "949e287846b0940817e4ea0f65accc4481a46b8733dc12aa0265293a4645c661": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/_VYFx-s824kXq_Ul2BHqYH-_kf6ByYO6CLYdB4HQE-Y.woff2", "https://fonts.gstatic.com/s/roboto/v18/_VYFx-s824kXq_Ul2BHqYH-_kf6ByYO6CLYdB4HQE-Y.woff2", ], # Roboto (greek-ext) "e5b2e29a16d8ef4c5a123b40786af72da589c4aad634eab40d90eef8bb4418aa": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/tnj4SB6DNbdaQnsM8CFqBX-_kf6ByYO6CLYdB4HQE-Y.woff2", "https://fonts.gstatic.com/s/roboto/v18/tnj4SB6DNbdaQnsM8CFqBX-_kf6ByYO6CLYdB4HQE-Y.woff2", ], # Roboto (latin) "4352380f92ce7f9a4a4a23306b992bed10055dbfffe90987cc72083e583fc280": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/oMMgfZMQthOryQo9n22dcuvvDin1pK8aKteLpeZ5c0A.woff2", "https://fonts.gstatic.com/s/roboto/v18/oMMgfZMQthOryQo9n22dcuvvDin1pK8aKteLpeZ5c0A.woff2", ], # Roboto (latin-ext) "80fa23b4804621ce7f16b5c56d524dd90ea09d792622eeac9adf0ee6317b9e3a": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/Ks_cVxiCiwUWVsFWFA3Bjn-_kf6ByYO6CLYdB4HQE-Y.woff2", "https://fonts.gstatic.com/s/roboto/v18/Ks_cVxiCiwUWVsFWFA3Bjn-_kf6ByYO6CLYdB4HQE-Y.woff2", ], # Roboto (vietnamese) "a0a893b2ff1c82d49ac0c09ace71cf8178c0830f6a988103c779b6fc12c0da78": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/NJ4vxlgWwWbEsv18dAhqnn-_kf6ByYO6CLYdB4HQE-Y.woff2", "https://fonts.gstatic.com/s/roboto/v18/NJ4vxlgWwWbEsv18dAhqnn-_kf6ByYO6CLYdB4HQE-Y.woff2", ], # Roboto Bold (cyrillic) "6082aa2f5aab855120cd58f560f58975579097c484d23cc7854977a529f91bc4": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/isZ-wbCXNKAbnjo6_TwHToX0hVgzZQUfRDuZrPvH3D8.woff2", "https://fonts.gstatic.com/s/roboto/v18/isZ-wbCXNKAbnjo6_TwHToX0hVgzZQUfRDuZrPvH3D8.woff2", ], # Roboto Bold (cyrillic-ext) "616eb767627d16bef2b9be2218bb5f1bbbb97cfbd06c4e5241c8b532b56467aa": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/77FXFjRbGzN4aCrSFhlh3oX0hVgzZQUfRDuZrPvH3D8.woff2", "https://fonts.gstatic.com/s/roboto/v18/77FXFjRbGzN4aCrSFhlh3oX0hVgzZQUfRDuZrPvH3D8.woff2", ], # Roboto Bold (greek) "28959a3f1fea0c7f7feca26f92465f5263f2e8fdec17030e0e7a9e6a8cb321af": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/jSN2CGVDbcVyCnfJfjSdfIX0hVgzZQUfRDuZrPvH3D8.woff2", "https://fonts.gstatic.com/s/roboto/v18/jSN2CGVDbcVyCnfJfjSdfIX0hVgzZQUfRDuZrPvH3D8.woff2", ], # Roboto Bold (greek-ext) "e94a5635cb68464d332cd374fd57b95913fc5b549f1967fbb73829b2084efd98": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/UX6i4JxQDm3fVTc1CPuwqoX0hVgzZQUfRDuZrPvH3D8.woff2", "https://fonts.gstatic.com/s/roboto/v18/UX6i4JxQDm3fVTc1CPuwqoX0hVgzZQUfRDuZrPvH3D8.woff2", ], # Roboto Bold (latin) "1be216dbc059d96e288b0c1f399a1a80ee8c65e4c1272dbc4574bd6d23cf45d9": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/d-6IYplOFocCacKzxwXSOJBw1xU1rKptJj_0jans920.woff2", "https://fonts.gstatic.com/s/roboto/v18/d-6IYplOFocCacKzxwXSOJBw1xU1rKptJj_0jans920.woff2", ], # Roboto Bold (latin-ext) "6c8be972381d4da037f47c33ef1e31b88f0130ded1432730d4d792331f983839": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/97uahxiqZRoncBaCEI3aW4X0hVgzZQUfRDuZrPvH3D8.woff2", "https://fonts.gstatic.com/s/roboto/v18/97uahxiqZRoncBaCEI3aW4X0hVgzZQUfRDuZrPvH3D8.woff2", ], # Roboto Bold (vietnamese) "5f162f1ca2441cae368e97ed42b56332d7b68b1ffbbf9f7e4b648420667acee5": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/PwZc-YbIL414wB9rB1IAPYX0hVgzZQUfRDuZrPvH3D8.woff2", "https://fonts.gstatic.com/s/roboto/v18/PwZc-YbIL414wB9rB1IAPYX0hVgzZQUfRDuZrPvH3D8.woff2", ], # Roboto Bold Italic (cyrillic) "bb4e478b0fe2ae7fbd6369c94d126060ffa697df189d7f3653f23f521f906cd8": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/t6Nd4cfPRhZP44Q5QAjcC14sYYdJg5dU2qzJEVSuta0.woff2", "https://fonts.gstatic.com/s/roboto/v18/t6Nd4cfPRhZP44Q5QAjcC14sYYdJg5dU2qzJEVSuta0.woff2", ], # Roboto Bold Italic (cyrillic-ext) "9fc911647b05ecdbadfe6693d6ff306a0a34829999b2055ad2e474e3ad0b778d": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/t6Nd4cfPRhZP44Q5QAjcC_ZraR2Tg8w2lzm7kLNL0-w.woff2", "https://fonts.gstatic.com/s/roboto/v18/t6Nd4cfPRhZP44Q5QAjcC_ZraR2Tg8w2lzm7kLNL0-w.woff2", ], # Roboto Bold Italic (greek) "62509e2b63168ae83848cb3f76d2c47177de8618ac918af119cc7ae90c71213b": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/t6Nd4cfPRhZP44Q5QAjcCwt_Rm691LTebKfY2ZkKSmI.woff2", "https://fonts.gstatic.com/s/roboto/v18/t6Nd4cfPRhZP44Q5QAjcCwt_Rm691LTebKfY2ZkKSmI.woff2", ], # Roboto Bold Italic (greek-ext) "07a2e7b4a480176f0f0bc9f7ca757d8467bf41f86e3b1eed374be06ff1b51b56": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/t6Nd4cfPRhZP44Q5QAjcC1BW26QxpSj-_ZKm_xT4hWw.woff2", "https://fonts.gstatic.com/s/roboto/v18/t6Nd4cfPRhZP44Q5QAjcC1BW26QxpSj-_ZKm_xT4hWw.woff2", ], # Roboto Bold Italic (latin) "556e09ad66d48078d2ea341eff36e93dafdb56fed15e9d92e052a7cb3910e2e9": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/t6Nd4cfPRhZP44Q5QAjcC4gp9Q8gbYrhqGlRav_IXfk.woff2", "https://fonts.gstatic.com/s/roboto/v18/t6Nd4cfPRhZP44Q5QAjcC4gp9Q8gbYrhqGlRav_IXfk.woff2", ], # Roboto Bold Italic (latin-ext) "5f6115b8655a4e9e0bb6440956b2d7b7d52e90193c6be53731fcf97d1fc45ec3": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/t6Nd4cfPRhZP44Q5QAjcC6E8kM4xWR1_1bYURRojRGc.woff2", "https://fonts.gstatic.com/s/roboto/v18/t6Nd4cfPRhZP44Q5QAjcC6E8kM4xWR1_1bYURRojRGc.woff2", ], # Roboto Bold Italic (vietnamese) "b75ce2f4333ea21c1d0aeb0061edcf81b7fffe022a732dae52834a8b62615c5f": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/t6Nd4cfPRhZP44Q5QAjcC9DiNsR5a-9Oe_Ivpu8XWlY.woff2", "https://fonts.gstatic.com/s/roboto/v18/t6Nd4cfPRhZP44Q5QAjcC9DiNsR5a-9Oe_Ivpu8XWlY.woff2", ], # Roboto Italic (cyrillic) "<KEY>": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/OpXUqTo0UgQQhGj_SFdLWBkAz4rYn47Zy2rvigWQf6w.woff2", "https://fonts.gstatic.com/s/roboto/v18/OpXUqTo0UgQQhGj_SFdLWBkAz4rYn47Zy2rvigWQf6w.woff2", ], # Roboto Italic (cyrillic-ext) "d04ce842e235d3e6abfcd37d6598138007f56e391a035167d78edf9088d3035a": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/WxrXJa0C3KdtC7lMafG4dRkAz4rYn47Zy2rvigWQf6w.woff2", "https://fonts.gstatic.com/s/roboto/v18/WxrXJa0C3KdtC7lMafG4dRkAz4rYn47Zy2rvigWQf6w.woff2", ], # Roboto Italic (greek) "aa9a8db3e6de8124291c3f2fd0bbd0aca8c796f365204d78414536067115be07": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/cDKhRaXnQTOVbaoxwdOr9xkAz4rYn47Zy2rvigWQf6w.woff2", "https://fonts.gstatic.com/s/roboto/v18/cDKhRaXnQTOVbaoxwdOr9xkAz4rYn47Zy2rvigWQf6w.woff2", ], # Roboto Italic (greek-ext) "785896def5be5b35967d63f5589ce67fc8d3b452153a37323a4d9b886d828c60": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/1hZf02POANh32k2VkgEoUBkAz4rYn47Zy2rvigWQf6w.woff2", "https://fonts.gstatic.com/s/roboto/v18/1hZf02POANh32k2VkgEoUBkAz4rYn47Zy2rvigWQf6w.woff2", ], # Roboto Italic (latin) "64565561ddb338a11ffce5b84aa53fa6e8fd203c34208e61eb5602cd08bf527f": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/vPcynSL0qHq_6dX7lKVByXYhjbSpvc47ee6xR_80Hnw.woff2", "https://fonts.gstatic.com/s/roboto/v18/vPcynSL0qHq_6dX7lKVByXYhjbSpvc47ee6xR_80Hnw.woff2", ], # Roboto Italic (latin-ext) "d5b2d7e9efe90feef0c4507d90b2b4e464c6929efd05ad4294d3d5057db57b97": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/vSzulfKSK0LLjjfeaxcREhkAz4rYn47Zy2rvigWQf6w.woff2", "https://fonts.gstatic.com/s/roboto/v18/vSzulfKSK0LLjjfeaxcREhkAz4rYn47Zy2rvigWQf6w.woff2", ], # Roboto Italic (vietnamese) "5d875731e35140f94bc4cb23944d104688d3c6d372833ddae8d22d3aa802beb4": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/K23cxWVTrIFD6DJsEVi07RkAz4rYn47Zy2rvigWQf6w.woff2", "https://fonts.gstatic.com/s/roboto/v18/K23cxWVTrIFD6DJsEVi07RkAz4rYn47Zy2rvigWQf6w.woff2", ], # Roboto Light (cyrillic) "cb94537350a4c593515c0b9066a22f0d74284173b88521c50b894a3179402e46": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/Fl4y0QdOxyyTHEGMXX8kcYX0hVgzZQUfRDuZrPvH3D8.woff2", "https://fonts.gstatic.com/s/roboto/v18/Fl4y0QdOxyyTHEGMXX8kcYX0hVgzZQUfRDuZrPvH3D8.woff2", ], # Roboto Light (cyrillic-ext) "66a095c96771a94d2772c7e19a32c6585d4bed3a989faa9e595bb270a2621608": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/0eC6fl06luXEYWpBSJvXCIX0hVgzZQUfRDuZrPvH3D8.woff2", "https://fonts.gstatic.com/s/roboto/v18/0eC6fl06luXEYWpBSJvXCIX0hVgzZQUfRDuZrPvH3D8.woff2", ], # Roboto Light (greek) "f0e3a88ae70245bcac12d2640792e50a165ce618d3b5979b735913e582d204f7": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/I3S1wsgSg9YCurV6PUkTOYX0hVgzZQUfRDuZrPvH3D8.woff2", "https://fonts.gstatic.com/s/roboto/v18/I3S1wsgSg9YCurV6PUkTOYX0hVgzZQUfRDuZrPvH3D8.woff2", ], # Roboto Light (greek-ext) "40a162d49fd25da223ea81454616f469270020fc186fe2f109534fb1f72e1bcb": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/-L14Jk06m6pUHB-5mXQQnYX0hVgzZQUfRDuZrPvH3D8.woff2", "https://fonts.gstatic.com/s/roboto/v18/-L14Jk06m6pUHB-5mXQQnYX0hVgzZQUfRDuZrPvH3D8.woff2", ], # Roboto Light (latin) "f7c386915e39d8a925fe10d15744a9da95ac8f90423e12728e7fc3c5e34f4559": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/Hgo13k-tfSpn0qi1SFdUfZBw1xU1rKptJj_0jans920.woff2", "https://fonts.gstatic.com/s/roboto/v18/Hgo13k-tfSpn0qi1SFdUfZBw1xU1rKptJj_0jans920.woff2", ], # Roboto Light (latin-ext) "bbeeb150a0f72cbd898ba36ed908bb95ef2386d41158c943aa3af4e0c8430639": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/Pru33qjShpZSmG3z6VYwnYX0hVgzZQUfRDuZrPvH3D8.woff2", "https://fonts.gstatic.com/s/roboto/v18/Pru33qjShpZSmG3z6VYwnYX0hVgzZQUfRDuZrPvH3D8.woff2", ], # Roboto Light (vietnamese) "5a4e99d1db8c9fd38f6b1c92582c2351cf27075f5ccef89404a8d673fa8e7b26": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/NYDWBdD4gIq26G5XYbHsFIX0hVgzZQUfRDuZrPvH3D8.woff2", "https://fonts.gstatic.com/s/roboto/v18/NYDWBdD4gIq26G5XYbHsFIX0hVgzZQUfRDuZrPvH3D8.woff2", ], # Roboto Light Italic (cyrillic) "4160dc56c5afc7320243a73cdf025d1c64ea19e035b98bad9c170e37c98ee5e2": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/7m8l7TlFO-S3VkhHuR0at14sYYdJg5dU2qzJEVSuta0.woff2", "https://fonts.gstatic.com/s/roboto/v18/7m8l7TlFO-S3VkhHuR0at14sYYdJg5dU2qzJEVSuta0.woff2", ], # Roboto Light Italic (cyrillic-ext) "d95d953cff5e309f22a680e48981070d2cbebf75daba25b078834bf0c4f143e4": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/7m8l7TlFO-S3VkhHuR0at_ZraR2Tg8w2lzm7kLNL0-w.woff2", "https://fonts.gstatic.com/s/roboto/v18/7m8l7TlFO-S3VkhHuR0at_ZraR2Tg8w2lzm7kLNL0-w.woff2", ], # Roboto Light Italic (greek) "8d649207dfd9e6f53614ee7ee8e0865789e38b39244ab1546ee5117ab6f6ed2f": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/7m8l7TlFO-S3VkhHuR0atwt_Rm691LTebKfY2ZkKSmI.woff2", "https://fonts.gstatic.com/s/roboto/v18/7m8l7TlFO-S3VkhHuR0atwt_Rm691LTebKfY2ZkKSmI.woff2", ], # Roboto Light Italic (greek-ext) "a1153c52da99d21ed2f036e5849c3b2a5d7d3d5913d63ceac983d388288420b4": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/7m8l7TlFO-S3VkhHuR0at1BW26QxpSj-_ZKm_xT4hWw.woff2", "https://fonts.gstatic.com/s/roboto/v18/7m8l7TlFO-S3VkhHuR0at1BW26QxpSj-_ZKm_xT4hWw.woff2", ], # Roboto Light Italic (latin) "c4fc2fd6457f67718ccff3434f39a84a83be98defa8e23ac1942580ea53a925e": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/7m8l7TlFO-S3VkhHuR0at4gp9Q8gbYrhqGlRav_IXfk.woff2", "https://fonts.gstatic.com/s/roboto/v18/7m8l7TlFO-S3VkhHuR0at4gp9Q8gbYrhqGlRav_IXfk.woff2", ], # Roboto Light Italic (latin-ext) "fd5b96eb1adc32b3fd7823f6a9e3c14122a060d5665091c33d9243f2541b016c": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/7m8l7TlFO-S3VkhHuR0at6E8kM4xWR1_1bYURRojRGc.woff2", "https://fonts.gstatic.com/s/roboto/v18/7m8l7TlFO-S3VkhHuR0at6E8kM4xWR1_1bYURRojRGc.woff2", ], # Roboto Light Italic (vietnamese) "ced9470e7e60d5edeccf4d3a0ab2f57ef653ec9de3097e6950bc06c64157aa5a": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/7m8l7TlFO-S3VkhHuR0at9DiNsR5a-9Oe_Ivpu8XWlY.woff2", "https://fonts.gstatic.com/s/roboto/v18/7m8l7TlFO-S3VkhHuR0at9DiNsR5a-9Oe_Ivpu8XWlY.woff2", ], # Roboto Medium (cyrillic) "74f08a5b16db96fd23eeca2c2e6c354d08a95b3360aa2bb6ea0890517bb10469": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/oHi30kwQWvpCWqAhzHcCSIX0hVgzZQUfRDuZrPvH3D8.woff2", "https://fonts.gstatic.com/s/roboto/v18/oHi30kwQWvpCWqAhzHcCSIX0hVgzZQUfRDuZrPvH3D8.woff2", ], # Roboto Medium (cyrillic-ext) "2aa57d00d0cac3b30aef28a19e9cfea12b45daf9562b4fa623750c8145c0767b": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/ZLqKeelYbATG60EpZBSDy4X0hVgzZQUfRDuZrPvH3D8.woff2", "https://fonts.gstatic.com/s/roboto/v18/ZLqKeelYbATG60EpZBSDy4X0hVgzZQUfRDuZrPvH3D8.woff2", ], # Roboto Medium (greek) "b95a36dd1483f97002a0c8aba87106f7fefbd67f22bc25a2bf21352bb4f316ef": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/mx9Uck6uB63VIKFYnEMXrYX0hVgzZQUfRDuZrPvH3D8.woff2", "https://fonts.gstatic.com/s/roboto/v18/mx9Uck6uB63VIKFYnEMXrYX0hVgzZQUfRDuZrPvH3D8.woff2", ], # Roboto Medium (greek-ext) "399cdbc9a94414d94fb15b0386888c6bc8ce4d6140cc3a9a571406a76cf47bb5": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/rGvHdJnr2l75qb0YND9NyIX0hVgzZQUfRDuZrPvH3D8.woff2", "https://fonts.gstatic.com/s/roboto/v18/rGvHdJnr2l75qb0YND9NyIX0hVgzZQUfRDuZrPvH3D8.woff2", ], # Roboto Medium (latin) "b79781efede37903be212fcdf63955e41c8649e678b6b83adf824459d240a188": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/RxZJdnzeo3R5zSexge8UUZBw1xU1rKptJj_0jans920.woff2", "https://fonts.gstatic.com/s/roboto/v18/RxZJdnzeo3R5zSexge8UUZBw1xU1rKptJj_0jans920.woff2", ], # Roboto Medium (latin-ext) "ba99e38768dd8358450dc363431400b1642c7cd7e5b47830e30aa8ec80fb4790": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/oOeFwZNlrTefzLYmlVV1UIX0hVgzZQUfRDuZrPvH3D8.woff2", "https://fonts.gstatic.com/s/roboto/v18/oOeFwZNlrTefzLYmlVV1UIX0hVgzZQUfRDuZrPvH3D8.woff2", ], # Roboto Medium (vietnamese) "e785fcb2332a43e5f489c0e7457001a93800b459bdf5173cffbb880f350077eb": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/mbmhprMH69Zi6eEPBYVFhYX0hVgzZQUfRDuZrPvH3D8.woff2", "https://fonts.gstatic.com/s/roboto/v18/mbmhprMH69Zi6eEPBYVFhYX0hVgzZQUfRDuZrPvH3D8.woff2", ], # Roboto Medium Italic (cyrillic) "e9f24fd84cfbdad488d4f05d97ca2e009af8248044def329f0c78c04e12c32cb": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/OLffGBTaF0XFOW1gnuHF0V4sYYdJg5dU2qzJEVSuta0.woff2", "https://fonts.gstatic.com/s/roboto/v18/OLffGBTaF0XFOW1gnuHF0V4sYYdJg5dU2qzJEVSuta0.woff2", ], # Roboto Medium Italic (cyrillic-ext) "ecfda0e4317641a395971d71435ad1a3dce0499bccc9bcdcaaebffb714588a4e": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/OLffGBTaF0XFOW1gnuHF0fZraR2Tg8w2lzm7kLNL0-w.woff2", "https://fonts.gstatic.com/s/roboto/v18/OLffGBTaF0XFOW1gnuHF0fZraR2Tg8w2lzm7kLNL0-w.woff2", ], # Roboto Medium Italic (greek) "510c1001aa3c1ae574eba6eaa5a404414dd0f5d5cd8c213fe0fac404c1fbbd7c": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/OLffGBTaF0XFOW1gnuHF0Qt_Rm691LTebKfY2ZkKSmI.woff2", "https://fonts.gstatic.com/s/roboto/v18/OLffGBTaF0XFOW1gnuHF0Qt_Rm691LTebKfY2ZkKSmI.woff2", ], # Roboto Medium Italic (greek-ext) "e5343e5d46125f688c2f266369983a1f92dbefa8d16f131b09768cdd4a5cebd4": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/OLffGBTaF0XFOW1gnuHF0VBW26QxpSj-_ZKm_xT4hWw.woff2", "https://fonts.gstatic.com/s/roboto/v18/OLffGBTaF0XFOW1gnuHF0VBW26QxpSj-_ZKm_xT4hWw.woff2", ], # Roboto Medium Italic (latin) "76d779c16f21b55a95fb182bf7552447ee340d15556e53a99dd789383f6d8c32": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/OLffGBTaF0XFOW1gnuHF0Ygp9Q8gbYrhqGlRav_IXfk.woff2", "https://fonts.gstatic.com/s/roboto/v18/OLffGBTaF0XFOW1gnuHF0Ygp9Q8gbYrhqGlRav_IXfk.woff2", ], # Roboto Medium Italic (latin-ext) "a69b0c33d809b7aac3e9648bfc995bc38cd5e426efeb006dc3b31523f4867f73": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/OLffGBTaF0XFOW1gnuHF0aE8kM4xWR1_1bYURRojRGc.woff2", "https://fonts.gstatic.com/s/roboto/v18/OLffGBTaF0XFOW1gnuHF0aE8kM4xWR1_1bYURRojRGc.woff2", ], # Roboto Medium Italic (vietnamese) "2c94704be76a8ec87995f3427911e50987cfcaa13c5749c770419559fe836509": [ "https://mirror.bazel.build/fonts.gstatic.com/s/roboto/v18/OLffGBTaF0XFOW1gnuHF0dDiNsR5a-9Oe_Ivpu8XWlY.woff2", "https://fonts.gstatic.com/s/roboto/v18/OLffGBTaF0XFOW1gnuHF0dDiNsR5a-9Oe_Ivpu8XWlY.woff2", ], # Roboto Mono (cyrillic) "2c9fae8205ea404d8400b9731423d5f8261788efcb26b651ad1031c70c895824": [ "https://mirror.bazel.build/fonts.gstatic.com/s/robotomono/v5/hMqPNLsu_dywMa4C_DEpY14sYYdJg5dU2qzJEVSuta0.woff2", "https://fonts.gstatic.com/s/robotomono/v5/hMqPNLsu_dywMa4C_DEpY14sYYdJg5dU2qzJEVSuta0.woff2", ], # Roboto Mono (cyrillic-ext) "671d1df350d3ccfd9a5ebbc9e92810a274d6215a648099f0f6e3e256b2bdae02": [ "https://mirror.bazel.build/fonts.gstatic.com/s/robotomono/v5/hMqPNLsu_dywMa4C_DEpY_ZraR2Tg8w2lzm7kLNL0-w.woff2", "https://fonts.gstatic.com/s/robotomono/v5/hMqPNLsu_dywMa4C_DEpY_ZraR2Tg8w2lzm7kLNL0-w.woff2", ], # Roboto Mono (greek) "eb84188b287e62e965be53c788b6562554cefcc0a3520f792ba91bb60d40e607": [ "https://mirror.bazel.build/fonts.gstatic.com/s/robotomono/v5/hMqPNLsu_dywMa4C_DEpYwt_Rm691LTebKfY2ZkKSmI.woff2", "https://fonts.gstatic.com/s/robotomono/v5/hMqPNLsu_dywMa4C_DEpYwt_Rm691LTebKfY2ZkKSmI.woff2", ], # Roboto Mono (greek-ext) "978a5db5af1654146da5ec93980c273df7010a2d045f1360ac3b9d85bd890299": [ "https://mirror.bazel.build/fonts.gstatic.com/s/robotomono/v5/hMqPNLsu_dywMa4C_DEpY1BW26QxpSj-_ZKm_xT4hWw.woff2", "https://fonts.gstatic.com/s/robotomono/v5/hMqPNLsu_dywMa4C_DEpY1BW26QxpSj-_ZKm_xT4hWw.woff2", ], # Roboto Mono (latin) "ecc28128233f90171df8f8915d60cdc59ff70b9194e1d93061816d3e3cd1f320": [ "https://mirror.bazel.build/fonts.gstatic.com/s/robotomono/v5/hMqPNLsu_dywMa4C_DEpY4gp9Q8gbYrhqGlRav_IXfk.woff2", "https://fonts.gstatic.com/s/robotomono/v5/hMqPNLsu_dywMa4C_DEpY4gp9Q8gbYrhqGlRav_IXfk.woff2", ], # Roboto Mono (latin-ext) "9a7b6e1f38e9a47867ad5c2f403ff4f4477a03bbec300d4e345bf67d5d0da262": [ "https://mirror.bazel.build/fonts.gstatic.com/s/robotomono/v5/hMqPNLsu_dywMa4C_DEpY6E8kM4xWR1_1bYURRojRGc.woff2", "https://fonts.gstatic.com/s/robotomono/v5/hMqPNLsu_dywMa4C_DEpY6E8kM4xWR1_1bYURRojRGc.woff2", ], # Roboto Mono (vietnamese) "b568a2d630d5924e40b73489cc4a8720fb9fb0249b8117d6d45cfc95d249c1da": [ "https://mirror.bazel.build/fonts.gstatic.com/s/robotomono/v5/hMqPNLsu_dywMa4C_DEpY9DiNsR5a-9Oe_Ivpu8XWlY.woff2", "https://fonts.gstatic.com/s/robotomono/v5/hMqPNLsu_dywMa4C_DEpY9DiNsR5a-9Oe_Ivpu8XWlY.woff2", ], # Roboto Mono Bold (cyrillic) "656e4cb0b042d18f6b889948f3c9a1f87b70340bd20a38a0d738b0e32a7f00ee": [ "https://mirror.bazel.build/fonts.gstatic.com/s/robotomono/v5/N4duVc9C58uwPiY8_59Fz1x-M1I1w5OMiqnVF8xBLhU.woff2", "https://fonts.gstatic.com/s/robotomono/v5/N4duVc9C58uwPiY8_59Fz1x-M1I1w5OMiqnVF8xBLhU.woff2", ], # Roboto Mono Bold (cyrillic-ext) "0d5221a5f914d57a674049b718a37b8f09a0e79647af8b187273f35ab0d8376c": [ "https://mirror.bazel.build/fonts.gstatic.com/s/robotomono/v5/N4duVc9C58uwPiY8_59FzwXaAXup5mZlfK6xRLrhsco.woff2", "https://fonts.gstatic.com/s/robotomono/v5/N4duVc9C58uwPiY8_59FzwXaAXup5mZlfK6xRLrhsco.woff2", ], # Roboto Mono Bold (greek) "41553f58ea074adde7eaaefe9b220b49021128f6b68b8be384072c4db430603f": [ "https://mirror.bazel.build/fonts.gstatic.com/s/robotomono/v5/N4duVc9C58uwPiY8_59Fzwn6Wqxo-xwxilDXPU8chVU.woff2", "https://fonts.gstatic.com/s/robotomono/v5/N4duVc9C58uwPiY8_59Fzwn6Wqxo-xwxilDXPU8chVU.woff2", ], # Roboto Mono Bold (greek-ext) "27798d63b7fadca6c6a2d17ea7673855d44baf75e8172fa9749888898ce04125": [ "https://mirror.bazel.build/fonts.gstatic.com/s/robotomono/v5/N4duVc9C58uwPiY8_59Fz1T7aJLK6nKpn36IMwTcMMc.woff2", "https://fonts.gstatic.com/s/robotomono/v5/N4duVc9C58uwPiY8_59Fz1T7aJLK6nKpn36IMwTcMMc.woff2", ], # Roboto Mono Bold (latin) "8b827f046df0acf54d80954ae05f0b5e87fdf09bc4c1bf02e8edb0d928e259b7": [ "https://mirror.bazel.build/fonts.gstatic.com/s/robotomono/v5/N4duVc9C58uwPiY8_59Fz_79_ZuUxCigM2DespTnFaw.woff2", "https://fonts.gstatic.com/s/robotomono/v5/N4duVc9C58uwPiY8_59Fz_79_ZuUxCigM2DespTnFaw.woff2", ], # Roboto Mono Bold (latin-ext) "b38383e889863e1c25c2334087e6b00835cef283f8448c8b2a2d5d51489d202b": [ "https://mirror.bazel.build/fonts.gstatic.com/s/robotomono/v5/N4duVc9C58uwPiY8_59Fz4gd9OEPUCN3AdYW0e8tat4.woff2", "https://fonts.gstatic.com/s/robotomono/v5/N4duVc9C58uwPiY8_59Fz4gd9OEPUCN3AdYW0e8tat4.woff2", ], # Roboto Mono Bold (vietnamese) "03b9c55ee9bf53c57c9b9dcb739bc92ada5b97fc81deb5a57e4e8347c4eee8bb": [ "https://mirror.bazel.build/fonts.gstatic.com/s/robotomono/v5/N4duVc9C58uwPiY8_59Fz8bIQSYZnWLaWC9QNCpTK_U.woff2", "https://fonts.gstatic.com/s/robotomono/v5/N4duVc9C58uwPiY8_59Fz8bIQSYZnWLaWC9QNCpTK_U.woff2", ], }, generated_rule_name = "files", extra_build_file_content = "\n".join([ 'load("@io_bazel_rules_closure//closure:defs.bzl", "web_library")', '', 'licenses(["notice"]) # Apache 2.0', '', 'exports_files(["LICENSE"]) # Apache 2.0', '', 'web_library(', ' name = "com_google_fonts_roboto",', ' path = "/font-roboto",', ' srcs = [', ' "roboto.html",', ' ":files",', ' ],', ')', '', 'genrule(', ' name = "html",', ' outs = ["roboto.html"],', ' cmd = "\\n".join([', ' "cat <<\'EOF\' >$@",', " '<!--',", " '@license',", " 'Copyright 2017 Google Inc. All Rights Reserved.',", " '',", ' \'Licensed under the Apache License, Version 2.0 (the "License");\',', " 'you may not use this file except in compliance with the License.',", " 'You may obtain a copy of the License at',", " '',", " ' http://www.apache.org/licenses/LICENSE-2.0',", " '',", " 'Unless required by applicable law or agreed to in writing, software',", ' \'distributed under the License is distributed on an "AS IS" BASIS,\',', " 'WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.',", " 'See the License for the specific language governing permissions and',", " 'limitations under the License.',", " '-->',", " '',", " '<style>',", " '@font-face {',", ' " font-family: \'Roboto\';",', " ' font-style: normal;',", " ' font-weight: 400;',", ' " src: local(\'Roboto\'), local(\'Roboto-Regular\'), url(/font-roboto/uYECMKoHcO9x1wdmbyHIm3-_kf6ByYO6CLYdB4HQE-Y.woff2) format(\'woff2\');",', " ' unicode-range: U+0400-045F, U+0490-0491, U+04B0-04B1, U+2116;',", " '}',", " '@font-face {',", ' " font-family: \'Roboto\';",', " ' font-style: normal;',", " ' font-weight: 400;',", ' " src: local(\'Roboto\'), local(\'Roboto-Regular\'), url(/font-roboto/sTdaA6j0Psb920Vjv-mrzH-_kf6ByYO6CLYdB4HQE-Y.woff2) format(\'woff2\');",', " ' unicode-range: U+0460-052F, U+20B4, U+2DE0-2DFF, U+A640-A69F;',", " '}',", " '@font-face {',", ' " font-family: \'Roboto\';",', " ' font-style: normal;',", " ' font-weight: 400;',", ' " src: local(\'Roboto\'), local(\'Roboto-Regular\'), url(/font-roboto/_VYFx-s824kXq_Ul2BHqYH-_kf6ByYO6CLYdB4HQE-Y.woff2) format(\'woff2\');",', " ' unicode-range: U+0370-03FF;',", " '}',", " '@font-face {',", ' " font-family: \'Roboto\';",', " ' font-style: normal;',", " ' font-weight: 400;',", ' " src: local(\'Roboto\'), local(\'Roboto-Regular\'), url(/font-roboto/tnj4SB6DNbdaQnsM8CFqBX-_kf6ByYO6CLYdB4HQE-Y.woff2) format(\'woff2\');",', " ' unicode-range: U+1F00-1FFF;',", " '}',", " '@font-face {',", ' " font-family: \'Roboto\';",', " ' font-style: normal;',", " ' font-weight: 400;',", ' " src: local(\'Roboto\'), local(\'Roboto-Regular\'), url(/font-roboto/oMMgfZMQthOryQo9n22dcuvvDin1pK8aKteLpeZ5c0A.woff2) format(\'woff2\');",', " ' unicode-range: U+0000-00FF, U+0131, U+0152-0153, U+02C6, U+02DA, U+02DC, U+2000-206F, U+2074, U+20AC, U+2212, U+2215;',", " '}',", " '@font-face {',", ' " font-family: \'Roboto\';",', " ' font-style: normal;',", " ' font-weight: 400;',", ' " src: local(\'Roboto\'), local(\'Roboto-Regular\'), url(/font-roboto/Ks_cVxiCiwUWVsFWFA3Bjn-_kf6ByYO6CLYdB4HQE-Y.woff2) format(\'woff2\');",', " ' unicode-range: U+0100-024F, U+1E00-1EFF, U+20A0-20AB, U+20AD-20CF, U+2C60-2C7F, U+A720-A7FF;',", " '}',", " '@font-face {',", ' " font-family: \'Roboto\';",', " ' font-style: normal;',", " ' font-weight: 400;',", ' " src: local(\'Roboto\'), local(\'Roboto-Regular\'), url(/font-roboto/NJ4vxlgWwWbEsv18dAhqnn-_kf6ByYO6CLYdB4HQE-Y.woff2) format(\'woff2\');",', " ' unicode-range: U+0102-0103, U+1EA0-1EF9, U+20AB;',", " '}',", " '@font-face {',", ' " font-family: \'Roboto\';",', " ' font-style: normal;',", " ' font-weight: 700;',", ' " src: local(\'Roboto Bold\'), local(\'Roboto-Bold\'), url(/font-roboto/isZ-wbCXNKAbnjo6_TwHToX0hVgzZQUfRDuZrPvH3D8.woff2) format(\'woff2\');",', " ' unicode-range: U+0400-045F, U+0490-0491, U+04B0-04B1, U+2116;',", " '}',", " '@font-face {',", ' " font-family: \'Roboto\';",', " ' font-style: normal;',", " ' font-weight: 700;',", ' " src: local(\'Roboto Bold\'), local(\'Roboto-Bold\'), url(/font-roboto/77FXFjRbGzN4aCrSFhlh3oX0hVgzZQUfRDuZrPvH3D8.woff2) format(\'woff2\');",', " ' unicode-range: U+0460-052F, U+20B4, U+2DE0-2DFF, U+A640-A69F;',", " '}',", " '@font-face {',", ' " font-family: \'Roboto\';",', " ' font-style: normal;',", " ' font-weight: 700;',", ' " src: local(\'Roboto Bold\'), local(\'Roboto-Bold\'), url(/font-roboto/jSN2CGVDbcVyCnfJfjSdfIX0hVgzZQUfRDuZrPvH3D8.woff2) format(\'woff2\');",', " ' unicode-range: U+0370-03FF;',", " '}',", " '@font-face {',", ' " font-family: \'Roboto\';",', " ' font-style: normal;',", " ' font-weight: 700;',", ' " src: local(\'Roboto Bold\'), local(\'Roboto-Bold\'), url(/font-roboto/UX6i4JxQDm3fVTc1CPuwqoX0hVgzZQUfRDuZrPvH3D8.woff2) format(\'woff2\');",', " ' unicode-range: U+1F00-1FFF;',", " '}',", "
= np.array([1, 0, 2]) idx = 3 y = 0 x = [-1, 0, 1] res = n_box - 3 while res > 0: y += 1 if res == 3: i_list = [0, 1, 2] else: i_list = [0, 2] material = [0, 1][int(np.random.rand() < 0.5 and res > 3)] for i in i_list: init_p[idx, :] = np.array([x[i], y, material]) idx += 1 res -= 1 elif shape_type == 1: # 1 shape init_p[0, :] = np.array([0, 0, 2]) for i in range(1, n_box): material = [0, 1][int(np.random.rand() < 0.5 and i < n_box - 1)] init_p[i, :] = np.array([0, i, material]) elif shape_type == 2: # I shape if n_box < 7: init_p[0, :] = np.array([0, 0, 2]) for i in range(1, n_box - 3): material = [0, 1][int(np.random.rand() < 0.5 and i < n_box - 1)] init_p[i, :] = np.array([0, i, material]) init_p[n_box - 1, :] = np.array([-1, n_box - 3, 0]) init_p[n_box - 2, :] = np.array([0, n_box - 3, 0]) init_p[n_box - 3, :] = np.array([1, n_box - 3, 0]) else: init_p[0, :] = np.array([-1, 0, 2]) init_p[1, :] = np.array([0, 0, 2]) init_p[2, :] = np.array([1, 0, 2]) for i in range(3, n_box - 3): material = [0, 1][int(np.random.rand() < 0.5 and i < n_box - 1)] init_p[i, :] = np.array([0, i - 2, material]) init_p[n_box - 1, :] = np.array([-1, n_box - 5, 0]) init_p[n_box - 2, :] = np.array([0, n_box - 5, 0]) init_p[n_box - 3, :] = np.array([1, n_box - 5, 0]) elif shape_type == 3: # T shape if n_box < 6: init_p[0, :] = np.array([-1, 0, 2]) init_p[1, :] = np.array([0, 0, 2]) init_p[2, :] = np.array([1, 0, 2]) for i in range(3, n_box): material = [0, 1][int(np.random.rand() < 0.5 and i < n_box - 1)] init_p[i, :] = np.array([0, i - 2, material]) else: init_p[0, :] = np.array([-2, 0, 2]) init_p[1, :] = np.array([-1, 0, 2]) init_p[2, :] = np.array([0, 0, 2]) init_p[3, :] = np.array([1, 0, 2]) init_p[4, :] = np.array([2, 0, 2]) for i in range(5, n_box): material = [0, 1][int(np.random.rand() < 0.5 and i < n_box - 1)] init_p[i, :] = np.array([0, i - 4, material]) elif shape_type == 4: # stronger T assert n_box == 10 init_p[0, :] = np.array([0, -4, 0]) init_p[1, :] = np.array([1, -4, 1]) init_p[2, :] = np.array([0, -3, 0]) init_p[3, :] = np.array([1, -3, 0]) init_p[4, :] = np.array([0, -2, 1]) init_p[5, :] = np.array([1, -2, 0]) init_p[6, :] = np.array([-1, -1, 2]) init_p[7, :] = np.array([0, -1, 2]) init_p[8, :] = np.array([1, -1, 2]) init_p[9, :] = np.array([2, -1, 2]) if aug: if np.random.rand() > 0.5: '''flip y''' init_p[:, 1] = -init_p[:, 1] if np.random.rand() > 0.5: '''swap x and y''' x, y = init_p[:, 0], init_p[:, 1] init_p[:, 0], init_p[:, 1] = y.copy(), x.copy() return init_p class SoftEngine(Engine): def __init__(self, dt, state_dim, action_dim, param_dim, num_box_range=[5, 10], k_range=[600, 1000.]): # state_dim = 4 # action_dim = 1 # param_dim = 4 - [n_box, k, damping, init_p] # init_p: n_box * 3 - [x, y, type] # type: 0 - soft & actuated, 1 - soft, 2 - rigid self.side_length = 1. self.num_box_range = num_box_range self.k_range = k_range self.radius = 0.01 self.mass = 1. super(SoftEngine, self).__init__(dt, state_dim, action_dim, param_dim) @property def num_obj(self): return self.n_box def inside_lim(self, x, y, lim): if x >= lim[0] and x < lim[1] and y >= lim[0] and y < lim[1]: return True return False def sample_init_p(self): n_box = self.n_box r_actuated = 0.5 r_soft = 0.25 r_rigid = 0.25 lim = -4, 4 mask = np.zeros((lim[1] - lim[0], lim[1] - lim[0])) init_p = np.zeros((n_box, 3)) buf = [] # add a fixed box x, y = 0, -4 init_p[0] = np.array([x, y, 3]) buf.append([x - 1, y]) buf.append([x, y + 1]) buf.append([x + 1, y]) mask[x, y] = mask[x - 1, y] = mask[x, y + 1] = mask[x + 1, y] = 1 for i in range(1, n_box): roll_type = np.random.rand() if roll_type < r_actuated: init_p[i, 2] = 0 elif roll_type < r_actuated + r_soft: init_p[i, 2] = 1 else: init_p[i, 2] = 2 if len(buf) > 0: idx = rand_int(0, len(buf)) x = buf[idx][0] y = buf[idx][1] del buf[idx] else: x = rand_int(lim[0], lim[1]) y = rand_int(lim[0], lim[1]) init_p[i, 0], init_p[i, 1] = x, y mask[x, y] = 1 if self.inside_lim(x + 1, y, lim) and mask[x + 1, y] == 0: buf.append([x + 1, y]); mask[x + 1, y] = 1 if self.inside_lim(x - 1, y, lim) and mask[x - 1, y] == 0: buf.append([x - 1, y]); mask[x - 1, y] = 1 if self.inside_lim(x, y + 1, lim) and mask[x, y + 1] == 0: buf.append([x, y + 1]); mask[x, y + 1] = 1 if self.inside_lim(x, y - 1, lim) and mask[x, y - 1] == 0: buf.append([x, y - 1]); mask[x, y - 1] = 1 while (init_p[:, 2] == 0).sum() < 2: ''' less than 2 actuated''' ''' re-generate box type''' for i in range(1, n_box): roll_type = np.random.rand() if roll_type < r_actuated: init_p[i, 2] = 0 elif roll_type < r_actuated + r_soft: init_p[i, 2] = 1 else: init_p[i, 2] = 2 return init_p def init(self, param=None): if param is None: self.n_box, self.k, self.damping, self.init_p = [None] * 4 else: self.n_box, self.k, self.damping, self.init_p = param self.n_box = int(self.n_box) if self.n_box is None: self.n_box = rand_int(self.num_box_range[0], self.num_box_range[1]) if self.k is None: self.k = rand_float(self.k_range[0], self.k_range[1]) if self.damping is None: self.damping = self.k / 20. if self.init_p is None: self.init_p = self.sample_init_p() # self.init_p = sample_init_p_regular(self.n_box, shape_type=4) # print('Env Soft param: n_box=%d, k=%.4f, damping=%.4f' % (self.n_box, self.k, self.damping)) self.space = pymunk.Space() self.space.gravity = (0., 0.) self.add_masses() self.add_rels() self.state_prv = None def add_masses(self): inertia = pymunk.moment_for_circle(self.mass, 0, self.radius, (0, 0)) self.balls = [] for i in range(self.n_box): x, y, t = self.init_p[i] l = self.side_length / 2. for j in range(4): body = pymunk.Body(self.mass, inertia) if j == 0: body.position = Vec2d(x - l, y - l) elif j == 1: body.position = Vec2d(x - l, y + l) elif j == 2: body.position = Vec2d(x + l, y - l) else: body.position = Vec2d(x + l, y + l) # shape = pymunk.Circle(body, self.radius, (0, 0)) # self.space.add(body, shape) self.space.add(body) self.balls.append(body) def add_rels(self): ball = self.balls[0] c = pymunk.PinJoint(self.space.static_body, ball, (ball.position[0], ball.position[1]), (0, 0)) self.space.add(c) ball = self.balls[2] c = pymunk.PinJoint(self.space.static_body, ball, (ball.position[0], ball.position[1]), (0, 0)) self.space.add(c) c = pymunk.DampedSpring( self.balls[0], self.balls[1], (0, 0), (0, 0), rest_length=self.side_length, stiffness=self.k, damping=self.damping) self.space.add(c) c = pymunk.DampedSpring( self.balls[1], self.balls[3], (0, 0), (0, 0), rest_length=self.side_length, stiffness=self.k, damping=self.damping) self.space.add(c) c = pymunk.DampedSpring( self.balls[2], self.balls[3], (0, 0), (0, 0), rest_length=self.side_length, stiffness=self.k, damping=self.damping) self.space.add(c) c = pymunk.DampedSpring( self.balls[1], self.balls[2], (0, 0), (0, 0), rest_length=self.side_length * np.sqrt(2), stiffness=self.k, damping=self.damping) self.space.add(c) c = pymunk.DampedSpring( self.balls[0], self.balls[3], (0, 0), (0, 0), rest_length=self.side_length * np.sqrt(2), stiffness=self.k, damping=self.damping) self.space.add(c) for i in range(1, self.n_box): if self.init_p[i, 2] <= 1: # if the box is soft # side c = pymunk.DampedSpring( self.balls[i * 4], self.balls[i * 4 + 1], (0, 0), (0, 0), rest_length=self.side_length, stiffness=self.k, damping=self.damping) self.space.add(c) c = pymunk.DampedSpring( self.balls[i * 4], self.balls[i * 4 + 2], (0, 0), (0, 0), rest_length=self.side_length, stiffness=self.k, damping=self.damping) self.space.add(c) c = pymunk.DampedSpring( self.balls[i * 4 + 3], self.balls[i * 4 + 1], (0, 0), (0, 0), rest_length=self.side_length, stiffness=self.k, damping=self.damping) self.space.add(c) c = pymunk.DampedSpring( self.balls[i * 4 + 3], self.balls[i * 4 + 2], (0, 0), (0, 0), rest_length=self.side_length, stiffness=self.k, damping=self.damping) self.space.add(c) # cross c = pymunk.DampedSpring( self.balls[i * 4], self.balls[i * 4 + 3], (0, 0), (0, 0), rest_length=self.side_length * np.sqrt(2), stiffness=self.k, damping=self.damping) self.space.add(c) c = pymunk.DampedSpring( self.balls[i * 4 + 1], self.balls[i * 4 + 2], (0, 0), (0, 0), rest_length=self.side_length * np.sqrt(2), stiffness=self.k, damping=self.damping) self.space.add(c) else: # if the box is rigid # side
#------------------------------------------------------------------------------- # Copyright 2017 Cognizant Technology Solutions # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. #------------------------------------------------------------------------------- ''' Created on Dec 28, 2017 @author: 610962 ''' from dateutil import parser import datetime from BaseAgent import BaseAgent import time import calendar import random import string import os import json import logging.handlers class DummyDataAgent(BaseAgent): def process(self): self.printLog("DummyDataAgent processing started ",True) jiraSample = { "jiraStatus":"Completed", "jiraProjectName":"Knowledge Transfer", "jiraCreator":"393565", "inSightsTimeX":"2016-09-13T14:15:44Z", "jiraPriority":"Medium", "jiraUpdated":"2016-09-13T14:15:44.000+0530", "jiraIssueType":"Story", "toolName":"JIRA", "jiraKey":"KKT-3", "inSightsTime":1473777524, "sprint":"Sprint4", "fixVersions": "ACS17.0.4.3" } # GIT sample json gitSample = { "gitCommitId":1, "inSightsTimeX":"2016-03-16T10:47:22Z", "toolName":"GIT", "gitAuthorName":"Akshay", "gitReponame":"Insights", "inSightsTime":1458122182, "jiraKey":"IS-10", "gitCommiTime":"2016-03-16T15:47:22Z" } # Jenkins sample json jenkinsSample = { "environment": "PROD", "endTime": 1508351788, "gitCommitId":1, "jobName": "BillingApproved", "duration": 10178, "buildNumber": 1, "sprintID": "S52", "vector": "BUILD", "startTime": 1508341610, "projectName": "PaymentServices", "inSightsTimeX": "2017-10-18T15:46:50Z", "status": "Success", "toolName": "JENKINS", "projectID": "1002" } # Sonar Sample json sonarSample = { "id": 4, "k": "PaymentServices", "nm": "PaymentServices", "sc": "PRJ", "qu": "TRK" } sprintSample = { "sprintName":"Adoption", "sprintId":"ad1", "state":"closed" } # Jira variables jira_status = ['Open', 'Backlog', 'To Do', 'In Progress', 'Canceled', 'Done', 'Closed', 'Reopen'] jira_priority = ['Low', 'Medium', 'High'] jira_issuetype = ['Story', 'Task', 'Sub-task', 'Bug', 'Epic', 'User Story'] jira_creator = ['Akshay', 'Mayank', 'Vishwajit', 'Prajakta', 'Vishal'] jira_sprint = ['S51', 'S52', 'S53', 'S54', 'S55'] jira_project_name = ['PaymentServices', 'MobileServices', 'ClaimFinder', 'AgentLocator'] Story_Id = ['ST-10', 'ST-11', 'ST-12', 'ST-13', 'ST-14'] jira_version = ['ACS17.0.4.3', 'BDE17.0.4.3', 'ACS19.0.3.1'] state = ['start', 'closed', 'finish', 'deliver'] Priority = ['2', '3', '4', '5'] Author_Name = ['HAri', 'Dhrubaj', 'Akshay', 'Tommy'] resolution = ['Done', 'Completed', 'Reopen'] storyPoints = ['1', '2', '3', '5', '8', '13'] #alm_ID = ['a23', 'a33', 'a44', 'a55'] progressTimeSec = ['1232', '32342', '2323'] assigneeID = ['1231212', '2345253', '234234', '1342323'] assigneeEmail = ['<EMAIL>', '<EMAIL>', '<EMAIL>', '<EMAIL>'] # Sprint variables sprint_Name = ['Adoption', 'UIEnhance', 'Three', 'Testphase'] state = ['start', 'closed', 'finish', 'deliver'] issue_Type = ['Bug', 'Sprint_Bug', 'SIT_Bug', 'Performance_Bug', 'Regression_Bug'] # GIT variables repo = ['Insights', 'InsightsDemo', 'InsightsTest', 'InsightsTest'] author = ['Akshay', 'Mayank', 'Vishwajit', 'Prajakta'] # message = ['Adding debug lines for IS-10 / S51', 'Remvoing bug for IS-11 / S52', 'New feature added for IS-1 / S53', 'Rolling back changes IS-13 / S54'] Commit_Id = ['123', '456', '789', '111', '009', '008', '007', '990'] # Jenkins variables sprint = ['S51', 'S52', 'S53', 'S54', 'S55'] status = ['Success', 'Failure', 'Aborted'] project_name = ['PaymentServices', 'MobileServices', 'ClaimFinder', 'AgentLocator'] job_name = ['BillingApproved', 'BillingInvoice', 'ClaimValidated', 'ClaimProcessed', 'deploy'] projectId = ['1001', '1002', '1003', '1004'] jen_env = ['PROD', 'DEV', 'INT', 'RELEASE'] buildUrl = ['productv4.1.devops.com', 'productv4.2.devops.com', 'productv4.3.devops.com', 'productv4.4.devops.com'] result = ['SUCCESS', 'FAILURE', 'ABORTED'] master = ['master1', 'master2'] # Sonar variables project = ['PaymentServices', 'MobileServices', 'ClaimFinder', 'AgentLocator'] sonar_key = ['payment1', 'Mobile1', 'Claim', 'agent'] project_id = ['1', '2', '3', '4'] resourceKey = ['09', '099', '89', '32'] sonar_quality_gate_Status = ['SUCCESS', 'FAILED'] sonar_coverage = ['35','50','70','85'] sonar_complexity = ['35','50','70','85','100','125'] sonar_duplicate = ['15','25','45','60'] sonar_techdepth = ['3','5','17','25','21'] rundeck_env=['PROD','DEV','INTG','SIT','UAT'] dataCount = self.config.get("dataCount") start_date_days = self.config.get("start_date_days") sleepTime= self.config.get("sleepTime") createSprintData= self.config.get("createSprintData", False) currentDate= datetime.datetime.now() - datetime.timedelta(days=start_date_days) self.printLog(currentDate,True) flag = 1 # To save the data count in tracking.json script_dir = os.path.dirname(__file__) #print(script_dir) file_path = os.path.join(script_dir, 'config.json') self.printLog(file_path, False) # Input your system path to tracking.json of DummyAgent with open(file_path, "r") as jsonFile: # Open the JSON file for reading data = json.load(jsonFile) # Read the JSON into the buffer #self.printLog('Starting Agent!') #currentDT = datetime.datetime.now() #print(currentDT) record_count = 0 total_record_count = 0 globle_sprintArr = [] sprint_data = [] self.printLog('Jira sprint Started .... 50', False) # sprint json configurations sprintEndDate=currentDate sprintDay=7 numberOfSprint=150 try: for rangeNumber in range(0,numberOfSprint ) : sprint = 'ST-' + str(rangeNumber) #if sprint not in globle_sprintArr : sprintSample = {} sprintStartDate = sprintEndDate sprintEndDate=(sprintStartDate + datetime.timedelta(days=sprintDay)) if createSprintData: self.printLog(sprint +' '+str(sprintStartDate) +' '+str(sprintEndDate), False) sprintSample['sprintName'] = random.choice(sprint_Name) sprintSample['sprintId'] = sprint sprintSample['state'] = random.choice(state) sprintSample['issueType'] = random.choice(issue_Type) sprintSample['sprintStartDate'] =sprintStartDate.strftime("%Y-%m-%dT%H:%M:%SZ") sprintSample['sprintEndDate'] = sprintEndDate.strftime("%Y-%m-%dT%H:%M:%SZ") sprint_data.append(sprintSample) globle_sprintArr.append(sprint) #print(sprintSample) if createSprintData: metadata = {"labels" : ["Sprint"]} #self.printLog(len(sprint_data), False) total_record_count =total_record_count + len(sprint_data) self.publishToolsData(sprint_data, metadata) except Exception as ex: self.printLog(ex,True) while flag == 1 : jira_data = [] sprint_data = [] git_data = [] jenkins_data = [] sonar_data = [] rundeck_data = [] #print(jira_data) # Run-time calculated variables currentDT = datetime.datetime.now() self.printLog('currentDate '+str(currentDate),True) time_tuple = time.strptime(currentDate.strftime('%Y-%m-%d %H:%M:%S'), '%Y-%m-%d %H:%M:%S') #print(time_tuple) time_epoch = time.mktime(time_tuple) #print(time_epoch) randomStr = ''.join([random.choice(string.ascii_letters + string.digits) for n in xrange(32)]) #randonjirakey = 'LS-' + str(''.join([random.choice(string.digits) for n in xrange(1)])) #randonGitCommitId = 'CM-' + str(''.join([random.choice(string.digits) for n in xrange(1)])) time_start = (random.randint(100, 500)) time_end = (random.randint(501, 800)) publish_message_count_loop="" self.printLog('Jira Started .... ', False) # jira_count =[] jira_count = 0 jira_keyArr = [] jira_sprintArr = [] while jira_count != 50 : try: randonjirakey = 'LS-' + str(''.join([random.choice(string.digits) for n in xrange(10)])) randonSprintStringId = 'ST-' + str(''.join([random.choice(string.digits) for n in xrange(3)])) #print(randonSprintStringId) #Jira json configurations time_offset_jira = (random.randint(01, 24)) time_offset = (random.randint(101, 800)) jira_date =(currentDate + datetime.timedelta(hours=time_offset_jira,seconds=time_offset)) sprintNumber =random.choice(globle_sprintArr) self.printLog('sprintNumber '+sprintNumber+' jira date '+str(jira_date), False) jiraSample ={} jiraSample['inSightsTimeX'] = jira_date.strftime("%Y-%m-%dT%H:%M:%SZ") jiraSample['jiraUpdated'] = (jira_date + datetime.timedelta(days=time_offset_jira)).strftime("%Y-%m-%dT%H:%M:%SZ") jiraSample['creationDate'] = jira_date.strftime("%Y-%m-%dT%H:%M:%SZ") jiraSample['inSightsTime'] = time_epoch jiraSample['jiraCreator'] = random.choice(jira_creator) jiraSample['jiraPriority'] = random.choice(jira_priority) jiraSample['jiraIssueType'] = random.choice(jira_issuetype) jiraSample['sprintId'] = sprintNumber jiraSample['jiraStatus'] = random.choice(jira_status) jiraSample['fixVersions'] = random.choice(jira_version) jiraSample['issueType'] = random.choice(issue_Type) jiraSample['jiraKey'] = randonjirakey jiraSample['storyId'] = random.choice(Story_Id) jiraSample['Priority'] = random.choice(Priority) jiraSample['projectName'] = random.choice(jira_project_name) jiraSample['resolution'] = random.choice(resolution) jiraSample['storyPoints'] = random.choice(storyPoints) jiraSample['progressTimeSec'] = random.choice(progressTimeSec) jiraSample['assigneeID'] = random.choice(assigneeID) jiraSample['assigneeEmail'] = random.choice(assigneeEmail) jiraSample['authorName'] = random.choice(Author_Name) jiraSample['toolName'] = "JIRA" jiraSample['categoryName'] = "ALM" jira_count += 1 jira_data.append(jiraSample) #print(jiraSample) jira_keyArr.append(jiraSample) #if randonSprintStringId not in jira_sprintArr: # jira_sprintArr.append(randonSprintStringId) except Exception as ex: self.printLog(ex,True) jiraMetadata = {"labels" : ["JIRA"]} total_record_count =total_record_count + len(jira_data) self.publishToolsData(jira_data, jiraMetadata) publish_message_count_loop=publish_message_count_loop+' Jira Data='+str(len(jira_data)) #print(jira_keyArr) #print(jira_sprintArr) self.printLog('GIT Started .... ', False) #print(jira_keyArr) #print(len(jira_keyArr)) git = 0 git_CommitArr = [] for rangeNumber in range(0, len(jira_keyArr)) : git_count = 0 #print(jirakey) jiraSampleData=jira_keyArr[rangeNumber] while git_count != 50: randonGitCommitId = 'CM-' + str(''.join([random.choice(string.digits) for n in xrange(10)])) time_offset = (random.randint(101, 800)) # GIT json configurations 10 2 #print("GIT 1") git_date = (datetime.datetime.strptime(jiraSampleData['inSightsTimeX'],"%Y-%m-%dT%H:%M:%SZ") + datetime.timedelta(seconds=time_offset)) git_datetime_epoch = int(time.mktime(git_date.timetuple())) #print(git_datetime_epoch) self.printLog(' jirakey '+ jiraSampleData['jiraKey'] +' jira Date '+jiraSampleData['inSightsTimeX'] +' GIT Date '+str(git_date), False) gitSample = {} gitSample['inSightsTimeX'] = git_date.strftime("%Y-%m-%dT%H:%M:%SZ") gitSample['gitCommiTime'] = git_date.strftime("%Y-%m-%dT%H:%M:%SZ") gitSample['inSightsTime'] = git_datetime_epoch gitSample['gitCommitId'] = randomStr if git_count < 2001 : gitSample['jiraKey'] = jiraSampleData['jiraKey'] gitSample['message'] = 'This commit is associated with jira-key : ' + str(jiraSampleData['jiraKey']) gitSample['gitReponame'] = random.choice(repo) gitSample['gitAuthorName'] = random.choice(author) gitSample['repoName'] = random.choice(repo) gitSample['commitId'] = randonGitCommitId gitSample['toolName'] = "GIT" gitSample['categoryName'] = "SCM" #gitSample['git_date']=str(git_date) git_count += 1 #print(gitSample) git_CommitArr.append(gitSample) git_data.append(gitSample) gitMetadata = {"labels" : ["GIT"]} #print(len(git_data)) total_record_count =total_record_count + len(git_data) self.publishToolsData(git_data, gitMetadata) publish_message_count_loop=publish_message_count_loop+' GIT Data='+str(len(git_data)) self.printLog('Jenkins Started ....', False) #print(git_CommitArr) #print(len(git_CommitArr)) jenkins_count = 0 jenkins_keyArr = [] for rangeNumber in range(0, len(git_CommitArr)) : try: gitSampleData = git_CommitArr[rangeNumber] #print(gitSampleData) + time_start #print(gitSampleData['commitId']) time_offset = (random.randint(101, 800)) randomJenkineBuildNumber = str(''.join([random.choice(string.digits) for n in xrange(10)])) #print('a jenkine key '+randomJenkineBuildNumber +' '+gitSampleData['inSightsTimeX']) #+' '+gitSample['git_date'] jenkins_date = (datetime.datetime.strptime(gitSampleData['inSightsTimeX'],"%Y-%m-%dT%H:%M:%SZ") + datetime.timedelta(seconds=120)) self.printLog('GIT Commit Id '+gitSampleData['commitId']+' GIT Date '+ gitSampleData['inSightsTimeX'] +' Jenkine Date '+str(jenkins_date), False) jenkins_startTime = (jenkins_date) jenkins_endTime = (jenkins_date + datetime.timedelta(seconds=time_offset)) jenkine_epochtime=int(time.mktime(jenkins_date.timetuple())) jenkine_status =random.choice(status) jenkinsSample = {} jenkinsSample['inSightsTimeX'] = (jenkins_date).strftime("%Y-%m-%dT%H:%M:%SZ") jenkinsSample['inSightsTime'] = jenkine_epochtime jenkinsSample['startTime'] = jenkins_startTime.strftime("%Y-%m-%dT%H:%M:%SZ") jenkinsSample['endTime'] = jenkins_endTime.strftime("%Y-%m-%dT%H:%M:%SZ") jenkinsSample['duration'] = (jenkins_endTime - jenkins_startTime).seconds jenkinsSample['status'] = jenkine_status #jenkinsSample['sprintID'] = random.choice(sprint) jenkinsSample['buildNumber'] = randomJenkineBuildNumber jenkinsSample['jobName'] = random.choice(job_name) jenkinsSample['projectName'] = random.choice(project_name) jenkinsSample['projectID'] = random.choice(projectId) jenkinsSample['environment'] = random.choice(jen_env) jenkinsSample['buildUrl'] = random.choice(buildUrl) jenkinsSample['result'] = random.choice(result) jenkinsSample['master'] = random.choice(master) jenkinsSample['jenkins_date']=str(jenkins_date) if rangeNumber < 2001 : jenkinsSample['scmcommitId'] = gitSampleData['commitId'] jenkinsSample['toolName'] = "JENKINS" jenkinsSample['categoryName'] = "CI" #print(jenkinsSample) if jenkine_status=="Success": jenkins_keyArr.append(jenkinsSample) jenkins_data.append(jenkinsSample) except Exception as ex: self.printLog(ex,True) jenkinsMetadata = {"labels" : ["JENKINS"]} #self.printLog(len(jenkins_data), False) total_record_count =total_record_count + len(jenkins_data) self.publishToolsData(jenkins_data, jenkinsMetadata) publish_message_count_loop=publish_message_count_loop+' Jenkins Data='+str(len(jenkins_data)) self.printLog('Sonar Started ....', False) #print(jenkins_keyArr) jenkine_success_build =len(jenkins_keyArr) self.printLog('Jenkine Array size for success build '+str(jenkine_success_build),True)
import torch import pretty_midi import os import numpy as np import matplotlib.pyplot as plt from attn_ecvae_mq import VAE from utils import * import glob def load_model(VAE, model_path): model = VAE(130, 2048, 3, 12, 128, 128, 32) model.eval() dic = torch.load(model_path) for name in list(dic.keys()): dic[name.replace('module.', '')] = dic.pop(name) model.load_state_dict(dic) return model def swap(model, melody1, chord1, melody2, chord2, mode=1): melody1 = torch.stack(melody1.split(32, 0)).float() melody2 = torch.stack(melody2.split(32, 0)).float() chord1 = torch.stack(chord1.split(32, 0)).float() chord2 = torch.stack(chord2.split(32, 0)).float() if torch.cuda.is_available(): model = model.cuda() melody1 = melody1.cuda() melody2 = melody2.cuda() chord1 = chord1.cuda() chord2 = chord2.cuda() with torch.no_grad(): o_p, o_r = model.encoder(melody1, chord1) z1p, z1r = (o_p.mean, o_r.mean) r_p, r_r = model.encoder(melody2, chord2) z2p, z2r = (r_p.mean, r_r.mean) if mode == 1: test = model.decoder(z1p, z2r, chord1).cpu() if mode == 2: test = model.decoder(z1p, z1r, chord2).cpu() return test def chord_to_ins(chord): chord = chord.cpu().numpy() ins = pretty_midi.Instrument(1) chords = [] for i, c in enumerate(chord): note_index = tuple(np.where(c == 1)[0]) if len(note_index) == 0: continue if len(chords) != 0 and chords[-1][0] == note_index: chords[-1][2] += 0.125 else: chords.append([note_index, i * 0.125 , 0.125]) notes = [] for c in chords: start = c[1] end = c[1] + c[2] appended = [pretty_midi.Note(80, p + 48, start, end) for p in c[0]] notes += appended ins.notes = notes return ins def add_chord(melody_path, chord, target_path): midi = pretty_midi.PrettyMIDI(melody_path) midi.instruments.append(chord_to_ins(chord)) midi.write(target_path) def run(VAE, model_path): if not os.path.exists('demo'): os.mkdir('demo') model_name = os.path.join('demo', model_path.split('\\')[-1][0: -3]) if not os.path.exists(model_name): os.mkdir(model_name) model = load_model(VAE, model_path) m1 = "original-melody/jigs222.mid" c1 = "original-chord/" + m1.split('/')[1] m2 = "original-melody/reelsa-c22.mid" c2 = "original-chord/" + m2.split('/')[1] m3 = "original-melody/hpps55.mid" c3 = "original-chord/" + m1.split('/')[1] m4 = "original-melody/jigs33.mid" c4 = "original-chord/" + m2.split('/')[1] m5 = "original-melody/ashover13.mid" c5 = "original-chord/" + m2.split('/')[1] random.seed(22) sec1 = random.randint(1, 100) sec2 = random.randint(1, 100) sec3 = random.randint(1, 100) sec4 = random.randint(1, 100) sec4 = 25 sec5 = random.randint(1, 100) start1 = int(sec1 * 8) start2 = int(sec2 * 8) start3 = int(sec3 * 8) start4 = int(sec4 * 8) start5 = int(sec5 * 8) length = 1 melody1 = melody_to_numpy(fpath=m1)[start1: start1 + 32 * length] chord1 = chord_to_numpy(fpath=c1)[start1: start1 + 32 * length] melody2 = melody_to_numpy(fpath=m2)[start2: start2 + 32 * length] chord2 = chord_to_numpy(fpath=c2)[start2: start2 + 32 * length] melody3 = melody_to_numpy(fpath=m3)[start3: start3 + 32 * length] chord3 = chord_to_numpy(fpath=c3)[start3: start3 + 32 * length] melody4 = melody_to_numpy(fpath=m4)[start4: start4 + 32 * length] chord4 = chord_to_numpy(fpath=c4)[start4: start4 + 32 * length] melody5 = melody_to_numpy(fpath=m5)[start5: start5 + 32 * length] chord5 = chord_to_numpy(fpath=c5)[start5: start5 + 32 * length] chord1[chord1>1] = 1 chord2[chord2>1] = 1 chord3[chord3>1] = 1 chord4[chord4>1] = 1 chord5[chord5>1] = 1 # # prepare 16th note C # melody16 = torch.zeros(32, 130) # melody16[:, 60] = 1. # # prepare empty chord # empty_chord = torch.zeros_like(chord1) # # prepare scale melody # scale = torch.zeros(32, 130) # scale[[0, 3, 4, 8, 11, 12, 16, 18, 20, # 21, 22, 23, 24, 25, 26, 27, 28], # [67, 71, 74, 71, 74, 79, 78, 76, # 74, 76, 74, 72, 71, 72, 71, 69, 67]] = 1. # scale[[1, 2, 5, 6, 7, 9, 10, 13, 14, 15, 17, 19, 29, 30, 31], 128] = 1. # # prepare scale chord # scale_chord = torch.zeros(32, 12) # scale_chord[0: 16, [2, 7, 11]] = 1. # scale_chord[16: 24, [2, 6, 9]] = 1. # scale_chord[24: 32, [2, 7, 11]] = 1. # prepare shift one chord, 5 chords and 7 chords down_chord1_1 = torch.clone(chord1) down_chord1_1 = down_chord1_1[:, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 0]] down_chord1_5 = torch.clone(chord1) down_chord1_5 = down_chord1_5[:, [5, 6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4]] down_chord1_7 = torch.clone(chord1) down_chord1_7 = down_chord1_7[:, [7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5, 6]] down_chord2_1 = torch.clone(chord2) down_chord2_1 = down_chord2_1[:, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 0]] down_chord2_5 = torch.clone(chord2) down_chord2_5 = down_chord2_5[:, [5, 6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4]] down_chord2_7 = torch.clone(chord2) down_chord2_7 = down_chord2_7[:, [7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5, 6]] down_chord3_1 = torch.clone(chord3) down_chord3_1 = down_chord3_1[:, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 0]] down_chord3_5 = torch.clone(chord3) down_chord3_5 = down_chord3_5[:, [5, 6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4]] down_chord3_7 = torch.clone(chord3) down_chord3_7 = down_chord3_7[:, [7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5, 6]] down_chord4_1 = torch.clone(chord4) down_chord4_1 = down_chord4_1[:, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 0]] down_chord4_5 = torch.clone(chord4) down_chord4_5 = down_chord4_5[:, [5, 6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4]] down_chord4_7 = torch.clone(chord4) down_chord4_7 = down_chord4_7[:, [7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5, 6]] down_chord5_1 = torch.clone(chord5) down_chord5_1 = down_chord5_1[:, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 0]] down_chord5_5 = torch.clone(chord5) down_chord5_5 = down_chord5_5[:, [5, 6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4]] down_chord5_7 = torch.clone(chord5) down_chord5_7 = down_chord5_7[:, [7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5, 6]] # prepare minor chord minor_1 = torch.zeros(32, 12) minor_1[0:8, [2, 5, 9]] = 1. minor_1[8:16, :] = chord1[8:16, :] minor_1[16:24, [2, 5, 9]] = 1. minor_1[24:32, :] = chord1[24:32, :] minor_2 = torch.zeros(32, 12) minor_2[0:24, [2, 6, 11]] = 1. minor_2[24:32, :] = chord2[24:32, :] minor_3 = torch.zeros(32, 12) minor_3[0:24, [2, 5, 9]] = 1. minor_3[24:32, :] = chord3[24:32, :] minor_4 = torch.zeros(32, 12) minor_4[0:8, :] = chord4[0:8, :] minor_4[8:16, [2, 6, 11]] = 1. minor_4[16:24, :] = chord4[16:24, :] minor_4[24:32, [2, 6, 11]] = 1. minor_5 = torch.zeros(32, 12) minor_5[0:8, [2, 6, 11]] = 1. minor_5[8:16, :] = chord5[8:16, :] minor_5[16:24, [2, 6, 11]] = 1. minor_5[24:32, [0, 3, 7]] = 1. # save chords chord_to_midi(chord1, 'jigs222_chord.mid') chord_to_midi(down_chord1_1, 'mid/move_1/down_chord_1_jigs222.mid') chord_to_midi(down_chord1_5, 'mid/move_5/down_chord_5_jigs222.mid') chord_to_midi(down_chord1_7, 'mid/move_7/down_chord_7_jigs222.mid') chord_to_midi(chord2, 'reelsa-c22_chord.mid') chord_to_midi(down_chord2_1, 'mid/move_1/down_chord_1_reelsa-c22.mid') chord_to_midi(down_chord2_5, 'mid/move_5/down_chord_5_reelsa-c22.mid') chord_to_midi(down_chord2_7, 'mid/move_7/down_chord_7_reelsa-c22.mid') chord_to_midi(chord3, 'hpps55_chord.mid') chord_to_midi(down_chord3_1, 'mid/move_1/down_chord_1_hpps55.mid') chord_to_midi(down_chord3_5, 'mid/move_5/down_chord_5_hpps55.mid') chord_to_midi(down_chord3_7, 'mid/move_7/down_chord_7_hpps55.mid') chord_to_midi(chord4, 'jigs33_chord.mid') chord_to_midi(down_chord4_1, 'mid/move_1/down_chord_1_jigs33.mid') chord_to_midi(down_chord4_5, 'mid/move_5/down_chord_5_jigs33.mid') chord_to_midi(down_chord4_7, 'mid/move_7/down_chord_7_jigs33.mid') chord_to_midi(chord5, 'ashover13_chord.mid') chord_to_midi(down_chord5_1, 'mid/move_1/down_chord_1_ashover13.mid') chord_to_midi(down_chord5_5, 'mid/move_5/down_chord_5_ashover13.mid') chord_to_midi(down_chord5_7, 'mid/move_7/down_chord_7_ashover13.mid') chord_to_midi(minor_1, 'mid/major_to_minor/minor_jigs222.mid') chord_to_midi(minor_2, 'mid/major_to_minor/minor_reelsa-c22.mid') chord_to_midi(minor_3, 'mid/major_to_minor/minor_hpps55.mid') chord_to_midi(minor_4, 'mid/major_to_minor/minor_jigs33.mid') chord_to_midi(minor_5, 'mid/major_to_minor/minor_ashover13.mid') numpy_to_midi(melody1, output='mid/jigs222_melody.mid') numpy_to_midi(melody2, output='mid/reelsa-c22_melody.mid') numpy_to_midi(melody3, output='mid/hpps55_melody.mid') numpy_to_midi(melody4, output='mid/jigs33_melody.mid') numpy_to_midi(melody5, output='mid/ashover13_melody.mid') # rhy1 = swap(model, melody1, chord1, melody16, empty_chord) # rhy2 = swap(model, melody1, chord1, melody2, chord2) # pit1 = swap(model, scale, scale_chord, melody1, chord1) # pit2 = swap(model, melody2, chord2, melody1, chord1) down1_1 = swap(model, melody1, chord1, melody1, down_chord1_1, mode=2) down1_5 = swap(model, melody1, chord1, melody1, down_chord1_5, mode=2) down1_7 = swap(model, melody1, chord1, melody1, down_chord1_7, mode=2) down2_1 = swap(model, melody2, chord2, melody2, down_chord2_1, mode=2) down2_5 = swap(model, melody2, chord2, melody2, down_chord2_5, mode=2) down2_7 = swap(model, melody2, chord2, melody2, down_chord2_7, mode=2) down3_1 = swap(model, melody3, chord3, melody3, down_chord3_1, mode=2) down3_5 = swap(model, melody3, chord3, melody3, down_chord3_5, mode=2) down3_7 = swap(model, melody3, chord3, melody3, down_chord3_7, mode=2) down4_1 = swap(model, melody4, chord4, melody4, down_chord4_1, mode=2) down4_5 = swap(model, melody4, chord4, melody4, down_chord4_5, mode=2) down4_7 = swap(model, melody4, chord4, melody4, down_chord4_7, mode=2) down5_1 = swap(model, melody5, chord5, melody5, down_chord5_1, mode=2) down5_5 = swap(model, melody5, chord5, melody5, down_chord5_5, mode=2) down5_7 = swap(model, melody5, chord5, melody5, down_chord5_7, mode=2) minor1 = swap(model, melody1, chord1, melody1, minor_1, mode=2) minor2 = swap(model, melody2, chord2, melody2, minor_2, mode=2) minor3 = swap(model, melody3, chord3, melody3, minor_3, mode=2) minor4 = swap(model, melody4, chord4, melody4, minor_4, mode=2) minor5 = swap(model, melody5, chord5, melody5, minor_5, mode=2) items = [down1_1, down1_5, down1_7, down2_1, down2_5, down2_7, down3_1, down3_5, down3_7, down4_1, down4_5, down4_7, down5_1, down5_5, down5_7, minor1, minor2, minor3, minor4, minor5] temp_names = ['mid/move_1/' + model_path.rstrip('params.pt') + '_down_chord_j222_1', 'mid/move_5/' + model_path.rstrip('params.pt') + '_down_chord_j222_5', 'mid/move_7/' + model_path.rstrip('params.pt') + '_down_chord_j222_7', 'mid/move_1/' + model_path.rstrip('params.pt') + '_down_chord_reelsa-c22_1', 'mid/move_5/' + model_path.rstrip('params.pt') + '_down_chord_reelsa-c22_5', 'mid/move_7/' + model_path.rstrip('params.pt') + '_down_chord_reelsa-c22_7', 'mid/move_1/' + model_path.rstrip('params.pt') + '_down_chord_hpps55_1', 'mid/move_5/' + model_path.rstrip('params.pt') + '_down_chord_hpps55_5', 'mid/move_7/' + model_path.rstrip('params.pt') + '_down_chord_hpps55_7', 'mid/move_1/' + model_path.rstrip('params.pt') + '_down_chord_jigs33_1',
# Actions self.actions_input = dict() for name, action in self.actions_spec.items(): self.actions_input[name] = tf.placeholder( dtype=util.tf_dtype(action['type']), shape=(None,) + tuple(action['shape']), name=name ) # Explorations self.explorations = dict() if self.explorations_spec is None: pass elif isinstance(self.explorations_spec, list): for name, state in self.actions_spec.items(): self.explorations[name] = Exploration.from_spec(spec=self.explorations_spec) # single spec for all components of our action space elif "type" in self.explorations_spec: for name, state in self.actions_spec.items(): self.explorations[name] = Exploration.from_spec(spec=self.explorations_spec) # different spec for different components of our action space else: for name, state in self.actions_spec.items(): if self.explorations_spec.get(name) is not None: self.explorations[name] = Exploration.from_spec(spec=self.explorations_spec[name]) # Terminal self.terminal_input = tf.placeholder(dtype=util.tf_dtype('bool'), shape=(None,), name='terminal') # Reward preprocessing if self.reward_preprocessing_spec is None: self.reward_preprocessing = None else: self.reward_preprocessing = PreprocessorStack.from_spec(spec=self.reward_preprocessing_spec) if self.reward_preprocessing.processed_shape(shape=()) != (): raise TensorForceError("Invalid reward preprocessing!") # Reward self.reward_input = tf.placeholder(dtype=util.tf_dtype('float'), shape=(None,), name='reward') # Internal states self.internals_input = list() self.internals_init = list() # Deterministic action flag self.deterministic_input = tf.placeholder(dtype=util.tf_dtype('bool'), shape=(), name='deterministic') # Update flag self.update_input = tf.placeholder(dtype=util.tf_dtype('bool'), shape=(), name='update') # TensorFlow functions self.fn_discounted_cumulative_reward = tf.make_template( name_=(self.scope + '/discounted-cumulative-reward'), func_=self.tf_discounted_cumulative_reward, custom_getter_=custom_getter ) self.fn_actions_and_internals = tf.make_template( name_=(self.scope + '/actions-and-internals'), func_=self.tf_actions_and_internals, custom_getter_=custom_getter ) self.fn_loss_per_instance = tf.make_template( name_=(self.scope + '/loss-per-instance'), func_=self.tf_loss_per_instance, custom_getter_=custom_getter ) self.fn_regularization_losses = tf.make_template( name_=(self.scope + '/regularization-losses'), func_=self.tf_regularization_losses, custom_getter_=custom_getter ) self.fn_loss = tf.make_template( name_=(self.scope + '/loss'), func_=self.tf_loss, custom_getter_=custom_getter ) self.fn_optimization = tf.make_template( name_=(self.scope + '/optimization'), func_=self.tf_optimization, custom_getter_=custom_getter ) self.fn_preprocess_states = tf.make_template( name_=(self.scope + '/preprocess-states'), func_=self.tf_preprocess_states, custom_getter_=custom_getter ) self.fn_action_exploration = tf.make_template( name_=(self.scope + '/action-exploration'), func_=self.tf_action_exploration, custom_getter_=custom_getter ) self.fn_preprocess_reward = tf.make_template( name_=(self.scope + '/preprocess-reward'), func_=self.tf_preprocess_reward, custom_getter_=custom_getter ) self.summary_configuration_op = None if self.summary_spec and 'meta_param_recorder_class' in self.summary_spec: self.summary_configuration_op = self.summary_spec['meta_param_recorder_class'].build_metagraph_list() # self.fn_summarization = tf.make_template( # name_='summarization', # func_=self.tf_summarization, # custom_getter_=custom_getter # ) def tf_preprocess_states(self, states): """ Applies optional preprocessing to the states. """ for name, state in states.items(): if name in self.states_preprocessing: states[name] = self.states_preprocessing[name].process(tensor=state) else: states[name] = tf.identity(input=state) return states def tf_action_exploration(self, action, exploration, action_spec): """ Applies optional exploration to the action (post-processor for action outputs). Args: action (tf.Tensor): The original output action tensor (to be post-processed). exploration (Exploration): The Exploration object to use. action_spec (dict): Dict specifying the action space. Returns: The post-processed action output tensor. """ action_shape = tf.shape(input=action) exploration_value = exploration.tf_explore( episode=self.episode, timestep=self.timestep, action_shape=action_shape ) if action_spec['type'] == 'bool': action = tf.where( condition=(tf.random_uniform(shape=action_shape[0]) < exploration_value), x=(tf.random_uniform(shape=action_shape) < 0.5), y=action ) elif action_spec['type'] == 'int': action = tf.where( condition=(tf.random_uniform(shape=action_shape) < exploration_value), x=tf.random_uniform(shape=action_shape, maxval=action_spec['num_actions'], dtype=util.tf_dtype('int')), y=action ) elif action_spec['type'] == 'float': action += tf.reshape(tensor=exploration_value, shape=tuple(1 for _ in range(action_shape.get_shape().as_list()[0]))) if 'min_value' in action_spec: action = tf.clip_by_value( t=action, clip_value_min=action_spec['min_value'], clip_value_max=action_spec['max_value'] ) return action def tf_preprocess_reward(self, states, internals, terminal, reward): """ Applies optional preprocessing to the reward. """ if self.reward_preprocessing is None: reward = tf.identity(input=reward) else: reward = self.reward_preprocessing.process(tensor=reward) return reward # TODO: this could be a utility helper function if we remove self.discount and only allow external discount-value input def tf_discounted_cumulative_reward(self, terminal, reward, discount=None, final_reward=0.0, horizon=0): """ Creates and returns the TensorFlow operations for calculating the sequence of discounted cumulative rewards for a given sequence of single rewards. Example: single rewards = 2.0 1.0 0.0 0.5 1.0 -1.0 terminal = False, False, False, False True False gamma = 0.95 final_reward = 100.0 (only matters for last episode (r=-1.0) as this episode has no terminal signal) horizon=3 output = 2.95 1.45 1.38 1.45 1.0 94.0 Args: terminal: Tensor (bool) holding the is-terminal sequence. This sequence may contain more than one True value. If its very last element is False (not terminating), the given `final_reward` value is assumed to follow the last value in the single rewards sequence (see below). reward: Tensor (float) holding the sequence of single rewards. If the last element of `terminal` is False, an assumed last reward of the value of `final_reward` will be used. discount (float): The discount factor (gamma). By default, take the Model's discount factor. final_reward (float): Reward value to use if last episode in sequence does not terminate (terminal sequence ends with False). This value will be ignored if horizon == 1 or discount == 0.0. horizon (int): The length of the horizon (e.g. for n-step cumulative rewards in continuous tasks without terminal signals). Use 0 (default) for an infinite horizon. Note that horizon=1 leads to the exact same results as a discount factor of 0.0. Returns: Discounted cumulative reward tensor with the same shape as `reward`. """ # By default -> take Model's gamma value if discount is None: discount = self.discount # Accumulates discounted (n-step) reward (start new if terminal) def cumulate(cumulative, reward_terminal_horizon_subtract): rew, is_terminal, is_over_horizon, sub = reward_terminal_horizon_subtract return tf.where( # If terminal, start new cumulation. condition=is_terminal, x=rew, y=tf.where( # If we are above the horizon length (H) -> subtract discounted value from H steps back. condition=is_over_horizon, x=(rew + cumulative * discount - sub), y=(rew + cumulative * discount) ) ) # Accumulates length of episodes (starts new if terminal) def len_(cumulative, term): return tf.where( condition=term, # Start counting from 1 after is-terminal signal x=tf.ones(shape=(), dtype=tf.int32), # Otherwise, increase length by 1 y=cumulative + 1 ) # Reverse, since reward cumulation is calculated right-to-left, but tf.scan only works left-to-right. reward = tf.reverse(tensor=reward, axis=(0,)) # e.g. -1.0 1.0 0.5 0.0 1.0 2.0 terminal = tf.reverse(tensor=terminal, axis=(0,)) # e.g. F T F F F F # Store the steps until end of the episode(s) determined by the input terminal signals (True starts new count). lengths = tf.scan(fn=len_, elems=terminal, initializer=0) # e.g. 1 1 2 3 4 5 off_horizon = tf.greater(lengths, tf.fill(dims=tf.shape(lengths), value=horizon)) # e.g. F F F F T T # Calculate the horizon-subtraction value for each step. if horizon > 0: horizon_subtractions = tf.map_fn(lambda x: (discount ** horizon) * x, reward, dtype=tf.float32) # Shift right by size of horizon (fill rest with 0.0). horizon_subtractions = tf.concat([np.zeros(shape=(horizon,)), horizon_subtractions], axis=0) horizon_subtractions = tf.slice(horizon_subtractions, begin=(0,), size=tf.shape(reward)) # e.g. 0.0, 0.0, 0.0, -1.0*g^3, 1.0*g^3, 0.5*g^3 # all 0.0 if infinite horizon (special case: horizon=0) else: horizon_subtractions = tf.zeros(shape=tf.shape(reward)) # Now do the scan, each time summing up the previous step (discounted by gamma) and # subtracting the respective `horizon_subtraction`. reward = tf.scan( fn=cumulate, elems=(reward, terminal, off_horizon, horizon_subtractions), initializer=final_reward if horizon != 1 else 0.0 ) # Re-reverse again to match input sequences. return tf.reverse(tensor=reward, axis=(0,)) def tf_actions_and_internals(self, states, internals, update, deterministic): """ Creates and returns the TensorFlow operations for retrieving the actions and - if applicable - the posterior internal state Tensors in reaction to the given input states (and prior internal states). Args: states (dict): Dict of state tensors (each key represents one state space component). internals: List of prior internal state tensors. update: Single boolean tensor indicating whether this call happens during an update. deterministic: Boolean Tensor indicating, whether we will not apply exploration when actions are calculated. Returns: tuple: 1) dict of output actions (with or without exploration applied (see `deterministic`)) 2) list of posterior internal state Tensors (empty for non-internal state models) """ raise NotImplementedError def tf_loss_per_instance(self, states, internals, actions, terminal, reward, update): """ Creates and returns the TensorFlow operations for calculating the loss per batch instance (sample) of the given input state(s) and action(s). Args: states (dict): Dict of state tensors (each key represents one state space component). internals: List of prior internal state tensors. actions (dict): Dict of action tensors (each key represents one action space component). terminal: Terminal boolean tensor (shape=(batch-size,)). reward: Reward float tensor (shape=(batch-size,)). update: Single boolean tensor indicating whether this call happens during an update. Returns: Loss tensor (first rank is the batch size -> one loss value per sample in the batch). """ raise NotImplementedError def tf_regularization_losses(self, states, internals, update): """ Creates and returns the TensorFlow operations for calculating the different regularization losses for the given batch of state/internal state inputs. Args: states (dict): Dict of state tensors (each key represents one state space component). internals: List of prior internal state tensors. update: Single boolean tensor indicating whether this call happens during an update. Returns: Dict of regularization loss
#!/usr/bin/env python3 # --------------------( LICENSE )-------------------- # Copyright (c) 2014-2021 Beartype authors. # See "LICENSE" for further details. ''' **Beartype decorator.** This private submodule implements the core :func:`beartype` decorator as well as ancillary functions called by that decorator. The :mod:`beartype.__init__` submodule then imports the former for importation as the public :mod:`beartype.beartype` decorator by downstream callers -- completing the virtuous cycle of code life. This private submodule is *not* intended for importation by downstream callers. ''' # ....................{ TODO }.................... # All "FIXME:" comments for this submodule reside in this package's "__init__" # submodule to improve maintainability and readability here. # ....................{ IMPORTS }.................... from beartype.roar import ( BeartypeDecorWrappeeException, BeartypeDecorWrapperException, ) from beartype._decor._code.codemain import generate_code from beartype._decor._data import BeartypeData from beartype._util.cache.pool.utilcachepoolobjecttyped import ( acquire_object_typed, release_object_typed) from beartype._util.func.utilfuncmake import make_func from typing import Any, Callable, TypeVar, TYPE_CHECKING # See the "beartype.cave" submodule for further commentary. __all__ = ['STAR_IMPORTS_CONSIDERED_HARMFUL'] # ....................{ GLOBALS }.................... T = TypeVar('T', bound=Callable[..., Any]) ''' :pep:`484`-compliant **generic callable type variable** (i.e., type hint matching any arbitrary function or method). This type variable notifies static analysis performed by both static type checkers (e.g., :mod:`mypy`) and type-aware IDEs (e.g., VSCode) that the :mod:`beartype` decorator preserves callable signatures by creating and returning callables with the same signatures as the passed callables. ''' # ....................{ DECORATORS }.................... def beartype(func: T) -> T: ''' Dynamically create and return a **constant-time runtime type-checker** (i.e., pure-Python function validating all parameters and returns of all calls to the passed pure-Python callable against all PEP-compliant type hints annotating those parameters and returns). The type-checker returned by this decorator is: * Optimized uniquely for the passed callable. * Guaranteed to run in O(1) constant-time with negligible constant factors. * Type-check effectively instantaneously. * Add effectively no runtime overhead to the passed callable. If optimizations are enabled by the active Python interpreter (e.g., due to option ``-O`` passed to this interpreter), this decorator silently reduces to a noop. Parameters ---------- func : Callable **Non-class callable** (i.e., callable object that is *not* a class) to be decorated by a dynamically generated new callable wrapping this original callable with pure-Python type-checking. Returns ---------- Callable Dynamically generated new callable wrapping this original callable with pure-Python type-checking. Raises ---------- BeartypeDecorHintException If any annotation on this callable is neither: * A **PEP-compliant type** (i.e., instance or class complying with a PEP supported by :mod:`beartype`), including: * `PEP 484`_ types (i.e., instance or class declared by the stdlib :mod:`typing` module). * A **PEP-noncompliant type** (i.e., instance or class complying with :mod:`beartype`-specific semantics rather than a PEP), including: * **Fully-qualified forward references** (i.e., strings specified as fully-qualified classnames). * **Tuple unions** (i.e., tuples containing one or more classes and/or forward references). BeartypeDecorHintPep563Exception If `PEP 563`_ is active for this callable and evaluating a **postponed annotation** (i.e., annotation whose value is a string) on this callable raises an exception (e.g., due to that annotation referring to local state no longer accessible from this deferred evaluation). BeartypeDecorParamNameException If the name of any parameter declared on this callable is prefixed by the reserved substring ``__beartype_``. BeartypeDecorWrappeeException If this callable is either: * Uncallable. * A class, which :mod:`beartype` currently fails to support. * A C-based callable (e.g., builtin, third-party C extension). BeartypeDecorWrapperException If this decorator erroneously generates a syntactically invalid wrapper function. This should *never* happen, but here we are, so this probably happened. Please submit an upstream issue with our issue tracker if you ever see this. (Thanks and abstruse apologies!) .. _PEP 484: https://www.python.org/dev/peps/pep-0484 .. _PEP 563: https://www.python.org/dev/peps/pep-0563 ''' # Validate the type of the decorated object *BEFORE* performing any work # assuming this object to define attributes (e.g., "func.__name__"). # # If this object is uncallable, raise an exception. if not callable(func): raise BeartypeDecorWrappeeException(f'{repr(func)} uncallable.') # Else if this object is a class, raise an exception. elif isinstance(func, type): raise BeartypeDecorWrappeeException( f'{repr(func)} unsupported, ' f'as classes currently unsupported by @beartype.' ) # Else, this object is a non-class callable. Let's do this, folks. # If either... if ( # This callable is unannotated *OR*... not func.__annotations__ or # This callable is decorated by the @typing.no_type_check decorator # defining this dunder instance variable on this callable *OR*... getattr(func, '__no_type_check__', False) is True or # This callable is a @beartype-specific wrapper previously generated by # this decorator... hasattr(func, '__beartype_wrapper') ): # Efficiently reduce to a noop (i.e., the identity decorator) by # returning this callable as is. return func # Previously cached callable metadata reinitialized from this callable. func_data = acquire_object_typed(BeartypeData) func_data.reinit(func) # Generate the raw string of Python statements implementing this wrapper. func_wrapper_code = generate_code(func_data) # If this callable requires *NO* type-checking, silently reduce to a noop # and thus the identity decorator by returning this callable as is. if not func_wrapper_code: return func #FIXME: Uncomment after uncommenting the corresponding logic below. # Fully-qualified name of this undecorated callable to be decorated. # func_name_qualified = get_object_name(func) #FIXME: Once this is working, use the commented code example starting with #"func_code_compiled = compile" given below to associate this filename with #this wrapper function. #FIXME: Unit test this to externally be the case for function wrappers #generated by @beartype, please. # Fake filename of the in-memory fake module file masquerading as declaring # this wrapper function. This filename guarantees the uniqueness of the # 3-tuple ``({func_filename}, {func_file_line_number}, {func_name})`` # containing this filenames commonly leveraged by profilers (e.g., # "cProfile") to identify arbitrary callables, where: # * `{func_filename}` is this filename (e.g., # `"</home/leycec/py/betse/betse/lib/libs.py:beartype({func_name})>"`). # * `{func_file_line_number}`, is *ALWAYS* 0 and thus *NEVER* unique. # * `{func_name}`, is identical to that of the decorated callable and also # thus *NEVER* unique. # # Ergo, uniquifying this filename is the *ONLY* means of uniquifying # metadata identifying this wrapper function via runtime inspection. # # Note this filename is intentionally *NOT* prefixed and suffixed by the # "<" and ">" delimiters. Why? Because the stdlib linecache.lazycache() # function called below explicitly ignores filenames matching that # syntactic format, presumably due to the standard fake module filename # "<string>" applied by default to Python code dynamically generated by # the eval() and exec() builtins. Since Python occasionally emits in-memory # fake filenames resembling "memory:0x7f2ea8589810", we adopt a similar # syntax here to generate beartype-specific fake module filenames. # func_wrapper_filename = f'beartype_wrapper:{func_name_qualified}' #FIXME: Actually, we absolutely *DO* want to leverage the example #documented below of leveraging the compile() builtin. We want to do so #explicitly to pass something other than "<string>" here -- ideally, #"func.__code__.co_filename", ensuring that this wrapper function shares #the same absolute filename as that of the original function. To do so: # #* Implement the # beartype._util.utilcallable.get_callable_filename_or_placeholder() # getter. #* Call that function here to obtain that filename. # #Note that a similar example (also leveraging the exec() builtin, which #frankly seems excessive) is also given by: # https://stackoverflow.com/a/42478041/2809027 # #Failure to do so reduces tracebacks induced by exceptions raised by #this wrapper to non-human-readability, which is less than ideal: e.g., # # ModuleNotFoundError: No module named 'betsee.util.widget.abc.guiwdgabc' # # Traceback (most recent call last): # File "/home/leycec/py/betsee/betsee/gui/simconf/stack/widget/mixin/guisimconfwdgeditscalar.py", line 313, in _set_alias_to_widget_value_if_sim_conf_open # widget=self, value_old=self._widget_value_last) # File "<string>", line 25, in func_beartyped # File "/home/leycec/py/betsee/betsee/gui/simconf/stack/widget/mixin/guisimconfwdgeditscalar.py", line 409, in __init__ # *args, widget=widget, synopsis=widget.undo_synopsis, **kwargs) # File "<string>", line 13, in func_beartyped # #Note the final traceback line, which is effectively useless. #FIXME: Note that the existing third-party "makefun" package replacing the #stdlib @functools.wraps() decorator is probably the optimal solution for #preserving metadata on the original callable into our wrapper callable. #While we absolutely should *NOT* depend on that or any other third-party #package, that package's implementation should lend us useful insight. #Indeed, see the _make() function of the "makefun.main" submodule: # https://github.com/smarie/python-makefun/blob/master/makefun/main.py # Function wrapping this callable with type-checking to be returned. # # For efficiency, this wrapper accesses *ONLY* local rather than global # attributes. The latter incur a minor performance penalty, since local # attributes take precedence over global attributes, implying all global # attributes are *ALWAYS*
0x05492141, 0xfdadf7ff, 0x28009806, 0x6006d000, 0xe7cb2000, 0x00000515, 0x460bb510, 0x7b1c3320, 0xd00c2c01, 0x600c6814, 0x604c6854, 0x608a6852, 0x73192101, 0xf7ff05c9, 0x2000fd94, 0x4801bd10, 0x0000bd10, 0x00000514, 0xb084b5f7, 0xd00d000f, 0x90012000, 0x97002404, 0x46399002, 0x98064361, 0xffe2f7fe, 0x0c2d0405, 0xe002d002, 0xff85f7fe, 0x46212501, 0x98064369, 0xffd6f7fe, 0x1c681bc6, 0x43414621, 0x98069003, 0xffcef7fe, 0x42b01a38, 0x4606d202, 0xb2859803, 0x42869800, 0x9600d802, 0x94019502, 0x2c201c64, 0x2164d9d7, 0xf7fe4638, 0x0041ffbb, 0x98001841, 0xd9254281, 0x69819804, 0x22036980, 0x43900492, 0x61909a04, 0x69009804, 0x1f129a01, 0xd2022a04, 0x04522201, 0x221f4310, 0x43900612, 0x1e529a01, 0x08d206d2, 0x0b504302, 0x03409a02, 0x0cd204d2, 0x98044302, 0x98046102, 0x20006181, 0xbdf0b007, 0xe7fb4800, 0x00000521, 0x2900b510, 0x2a00d00b, 0x61cbd008, 0x2200618a, 0x844a840a, 0x05492141, 0xfd1bf7ff, 0xf7febd10, 0xb510ff24, 0xd004000c, 0x5d09212d, 0xd0022902, 0xf7fee005, 0x2141ff1a, 0xf7ff0549, 0x2000fcf9, 0x61e061a0, 0x84608420, 0xe005bd10, 0x021b6943, 0x780bd5fc, 0x1c4961c3, 0xd2f71e52, 0x00004770, 0x0105b5f7, 0x98024c22, 0x444c092d, 0x2001d007, 0x42850340, 0x7820d132, 0xd0072800, 0x6800e02e, 0x4288491c, 0x2001d12a, 0xe0277020, 0x22004e1a, 0x4631444e, 0xf0014610, 0x2701fa31, 0x6060033f, 0x4631463a, 0xf0012000, 0x2200fa29, 0x46336060, 0x46104631, 0xfa91f001, 0x60601df3, 0x113a33f9, 0x20004631, 0xfa89f001, 0x60604b09, 0x3308444b, 0x4631463a, 0xf0012000, 0x6060fa80, 0x462a4906, 0x20004449, 0xf0019b02, 0x6060fa78, 0x0000bdfe, 0x00000004, 0x42464346, 0x0000005c, 0x4c35b5f8, 0x49356960, 0x01804f35, 0x69a0d51b, 0x03122203, 0xd0054010, 0x03122201, 0xd00d4290, 0xe00c2000, 0x07c068e0, 0x6938d008, 0xd0010780, 0xe0002016, 0x49292014, 0xe0004348, 0x69614608, 0x0f490089, 0x6b38e029, 0xd00107c0, 0xe0002516, 0x48222514, 0x48224345, 0x6a063040, 0xf0006900, 0x4632f8c5, 0xf7fe2300, 0x6939fe01, 0x07891940, 0x0f894a1a, 0x084a4351, 0x230369a1, 0x4019049b, 0x2301d010, 0x1ac9049b, 0x1ac8d016, 0x4298d019, 0x6920d107, 0x48130741, 0x1c490f49, 0xfebef7fe, 0x2000e000, 0x04c96961, 0x1c490f49, 0xfeb6f7fe, 0x4449490d, 0xbdf86008, 0x6809490c, 0x0e890089, 0x480ae005, 0x6b003840, 0x0e810080, 0xf7fe4610, 0x2112fea5, 0xe7e54348, 0x400fc000, 0x016e3600, 0x400d8000, 0x1dcd6500, 0x00000058, 0x400d8100, 0x482db570, 0x074a8801, 0x2a002104, 0x8802da02, 0x8002438a, 0x88024829, 0xd5020752, 0x438a8802, 0x48288002, 0x60414926, 0x60814927, 0x22806801, 0x22204391, 0x60014311, 0x69014824, 0xd00307c9, 0x08496901, 0x61010049, 0x8f4ff3bf, 0x8f6ff3bf, 0x21004d1f, 0xf3bf6129, 0xf3bf8f4f, 0x4c1d8f6f, 0x22016960, 0x43100452, 0xf3bf6160, 0xf3bf8f4f, 0x48188f6f, 0x60413080, 0x8f4ff3bf, 0x01016800, 0x04c00c49, 0x05ca0d83, 0x0c964618, 0x43320782, 0x1e40622a, 0x1e49d2fa, 0xf3bfd2f5, 0x69608f4f, 0x04092101, 0x61604308, 0x8f4ff3bf, 0x8f6ff3bf, 0xf812f000, 0x0000bd70, 0x400b8000, 0x400d0000, 0xd928c520, 0x400bc000, 0x0000ffff, 0xe000e000, 0xe000ef40, 0xe000ed00, 0x20004770, 0xb5704770, 0x460d4613, 0x4604460a, 0x46184601, 0xfdd1f7fe, 0xd0012800, 0xbd702000, 0xbd701960, 0x0c04b5f8, 0x4626b28b, 0x435eb282, 0x46100c0d, 0x43580c37, 0x19800436, 0x41792100, 0x436e4616, 0x04360c37, 0x41791980, 0x436e4626, 0xbdf81989, 0x4674b430, 0x78251e64, 0x42ab1c64, 0x461dd200, 0x005b5d63, 0xbc3018e3, 0x00004718, 0x4959b5f8, 0x44494857, 0x49586008, 0x24016b08, 0x0f800240, 0x6a084084, 0x07404e55, 0x6b300fc5, 0xd57d03c0, 0x21016b30, 0x43880309, 0x6b306330, 0xdafc2800, 0x21016b30, 0x43080409, 0x4a4d6330, 0x4f4d6810, 0x40382303, 0x18c002db, 0x43184b4b, 0x20036010, 0x61300300, 0x28006930, 0x6930dafc, 0x04092101, 0x61304388, 0x38404842, 0x22236b01, 0x02124039, 0x4a421889, 0x63014311, 0xd0032d00, 0x2c080064, 0x2408d900, 0xd0062d00, 0x4621483d, 0xfda8f7fe, 0x90002701, 0x483be002, 0x1c7fe7f7, 0x98004639, 0xfd9ef7fe, 0x42884938, 0x4838d8f7, 0x4a386941, 0x40112307, 0x07521e7a, 0x43110b52, 0x029b02a2, 0x04d218d2, 0x43110cd2, 0x3aff023a, 0x05923a01, 0x43110d92, 0x6a416141, 0x01c909c9, 0x62411cc9, 0x04892103, 0x2d006982, 0x438ad002, 0xe0046182, 0x2101438a, 0x185104c9, 0x69816181, 0x40114a25, 0x06922203, 0x61811889, 0x4a2369c1, 0x22034011, 0x18890752, 0x43114a21, 0xe00061c1, 0x69c1e014, 0x04122203, 0x61c14311, 0x22e76a41, 0x439102d2, 0x02d22221, 0x62411889, 0x20016b31, 0x43810400, 0x69316331, 0x61314381, 0xd0072d00, 0x4621480c, 0xfd46f7fe, 0x44494912, 0xbdf86008, 0xe7f64809, 0x016e3600, 0x00000050, 0x401f4440, 0x400d8000, 0x400d8100, 0xc0c0c0c0, 0x18131818, 0x0f1a2323, 0x1f78a400, 0x179a7b00, 0x08954400, 0x400fc000, 0xfff8e0ff, 0xe3ffffcf, 0x9c7fff80, 0x03800001, 0x00000058, 0x2800b510, 0xf7ffd101, 0xbd10ff27, 0xb5104770, 0x02402003, 0xf7fe2100, 0xbd10fe0b, 0xb5f74770, 0x4616b082, 0xd051000d, 0xd84f2e01, 0x34ff462c, 0x7a2034c1, 0xd0002800, 0x28002001, 0x6c28d046, 0x0fc70640, 0xf0029802, 0x2e00f94f, 0x7a20d02b, 0xd0092800, 0x5d412046, 0x9802463a, 0xf852f000, 0x98024629, 0xfa74f000, 0x28007ba0, 0x2100d003, 0xf0009802, 0x466afcdf, 0x98022102, 0xfc70f000, 0xd0022f00, 0x08409800, 0xaa019000, 0x98022100, 0xfc66f000, 0x98019900, 0xfcd0f7fe, 0x1c40210a, 0x08804348, 0x7a21e010, 0xd0072900, 0x9802463a, 0xf828f000, 0x98024629, 0xfa4af000, 0x28007ba0, 0x2101d0d9, 0xbf00e7d4, 0xd2fc1e40, 0xbdf0b005, 0xf000b510, 0x2800fc83, 0xf002d001, 0xbd10f8ca, 0xf000b510, 0x2800fc7b, 0x2100d00b, 0x07db2301, 0x1812008a, 0x68143280, 0x6014431c, 0x29041c49, 0xbd10d3f6, 0x460cb5f7, 0xb08a4927, 0x22144615, 0x46684479, 0xfb7af7fe, 0x22144923, 0x31084479, 0xf7fea805, 0x2d00fb73, 0x2d01d001, 0x4e1fd137, 0x09876b30, 0x01bf481e, 0x200769c2, 0x438205c0, 0xd02e2d00, 0x2c00a805, 0x2c09d029, 0x2401d900, 0x5c430061, 0x78401808, 0x05c02107, 0x069b05c9, 0x0e9b1840, 0x4d130184, 0x09a4433b, 0x43146828, 0x43382702, 0x980a6028, 0xf81ef000, 0x42986b30, 0x6333d000, 0x69c1480a, 0xd00042a1, 0x980a61c4, 0xf81ef000, 0x43b86828, 0xf3bf6028, 0xb00d8f6f, 0x4668bdf0, 0x0000e7cf, 0x00002c1c, 0x400d80c0, 0x400fc000, 0x402a8000, 0x68014804, 0x02922203, 0x60014391, 0x8f6ff3bf, 0x00004770, 0x400fc080, 0x68014804, 0x02922203, 0x60014311, 0x8f6ff3bf, 0x00004770, 0x400fc080, 0x4605b5fe, 0x90002004, 0x4628460e, 0xfbf2f000, 0xd07d0004, 0xd0fc2e00, 0x28037830, 0x7c30d878, 0x27009001, 0xf0024620, 0x4628f875, 0xff64f7ff, 0x211e6960, 0x61604308, 0x35804625, 0x62286870, 0x28027830, 0x2803d002, 0xe003d002, 0xe0008b30, 0xb2878c30, 0x7a307b31, 0x07001e49, 0x0b000749, 0x43080949, 0x07c99901, 0x43384308, 0x78306268, 0xd14b2803, 0x6a3269f0, 0xd1490783, 0x211c2340, 0x63a9469e, 0x21016bab, 0x63ab430b, 0x07db6bab, 0x6b2bd1fc, 0x632b430b, 0x1de1e036, 0x467331f9, 0xd30f4572, 0x089b6967, 0xd40206bf, 0xc980e029, 0x1e5bc080, 0x4671d2fb, 0x69611a52, 0x43192320, 0xe01e6161, 0x061b6f2b, 0x42930d5b, 0xe018d203, 0x1f12c908, 0x2a04c008, 0x2a00d2fa, 0x6809d010, 0xa9019101, 0x4601468c, 0xe0072300, 0x783f4667, 0x4667700f, 0x1c491c7f, 0x1c5b46bc, 0xd3f54293, 0x69612200, 0xd4010709, 0xd1c62a00, 0xe0007830, 0x2802e058, 0x2801d001, 0x6971d13d, 0x078869b2, 0x2040d150, 0x201c4684, 0x6be863e8, 0x43182301, 0x6be863e8, 0xd1fc07c0, 0xe02a4686, 0x30ff4620, 0x30816963, 0xd521065b, 0x089b4663, 0xd2024562, 0xc980e00e, 0x1e5bc080, 0x4660d2fb, 0xe0091a12, 0xdd032a00, 0xc080c980, 0xe0011f12, 0xc0802700, 0xd2f51e5b, 0x28004670, 0x6b28d104, 0x43182301, 0x469e6328, 0x23406960, 0x61604318, 0x07006960, 0x2a00d401, 0x7830dcd2, 0xd1032800, 0x21016b28, 0x63284308, 0xf0014620, 0x6960ffb5, 0xd50b0700, 0x01006e68, 0x280e0f00, 0x4804d001, 0x4803e001, 0x90001e40, 0xbdfe9800, 0xe7fa2000, 0x00001771, 0xb5104602, 0x2a002004, 0x2900d021, 0x2044d01f, 0x28015c40, 0x68d0d10c, 0x005b0843, 0x43032040, 0xf0004608, 0x2800fcdd, 0x2001d001, 0x60d34303, 0x212068d0, 0x60d04308, 0x20004b05, 0x18890081, 0x401c6a0c, 0x1c40620c, 0xd3f72803, 0xbd102000, 0xfcf0ff00, 0xb087b5f3, 0x90002000, 0x2504460c, 0xf0009807, 0x9003fadf, 0xd0542800, 0xd0522c00, 0x30404620, 0x90054627, 0x26003750, 0x304130ff, 0x68389002, 0x0a8100b2, 0x18109803, 0x66019001, 0x1d3f7ba0, 0x7b6006c5, 0x06c00eed, 0x43050d80, 0x07007be0, 0x43050c40, 0xf0004620, 0x2800fca0, 0x2001d002, 0x43050280, 0x28006f60, 0xaa04d00d, 0x98072102, 0xfa70f000, 0x6f612301, 0x9a044668, 0xfab2f000, 0x04009800, 0x98014305, 0x98056705, 0x280079c0, 0x9802d00e, 0x07417c00, 0x9902d00a, 0x7c492207, 0x07090340, 0x03520d09, 0xb2801880, 0xe0014308, 0x02002009, 0x31809901, 0x1c766008, 0xd3b62e04, 0x46282500, 0xbdf0b009, 0xb085b5f0, 0x460e4d1b, 0x95004607, 0xf0009501, 0x0004fa7b, 0x2e00d02b, 0xaa02d029, 0x46382102, 0xfa34f000, 0x2101aa03, 0xf0004638, 0x2601fa2f, 0x02b64f11, 0x46394633, 0x9a024668, 0xfa6ef000, 0x46394633, 0x9a03a801, 0xfa68f000, 0x42a89800, 0x9500d900, 0x42a89801, 0x9501d900, 0x99019800, 0xb2890400, 0x60604308, 0xb0052000, 0x2004bdf0, 0x0000e7fb, 0x0000ffff, 0x1dcd6500, 0xb085b5f3, 0x2404460e, 0xf0009805, 0x0005fa3f, 0x2e00d07e, 0x7b30d07c, 0xd8792803, 0xd00a2800, 0xd0082801, 0xd0062802, 0xd1042803, 0xf0004630, 0x2800fbf1, 0x20ffd002, 0xe00c3001, 0x2102aa03, 0xf0009805, 0x4938f9e5, 0x42889803, 0x6c30d306, 0xd4030600, 0x90002079, 0xe0219001, 0x00602400, 0x31601981, 0x7e0f7e48, 0xd10b2800, 0x43472064, 0x214bd01d, 0xf7fe4638, 0x214bfa39, 0x42b94341, 0x1c40d200, 0x283f213f, 0x4608d900, 0x0c400680, 0x30ff00a2, 0x46693001, 0x50881c64, 0xd3de2c02, 0x22026828, 0x28000780, 0x2000da1c, 0x4630e01e, 0xfbb7f000, 0x46024607, 0x9805a902, 0xf9daf000, 0x9902481a, 0xd0092f00, 0xfa10f7fe, 0x217d0880, 0x434800c9, 0xf7fe214b, 0xe7d2fa09, 0xfa06f7fe, 0xe7f40840, 0x20016829, 0x60294311, 0x29006d31, 0x6d71d102, 0xd0022900, 0x9b0021c0, 0x6db1514b, 0xd1022900, 0x29006df1, 0x21c4d004, 0xe0009b01, 0x514be006, 0xd0022800, 0x43906828, 0x24006028, 0xb0074620, 0x0000bdf0, 0x05f5e100, 0x3b9aca00, 0xb08ab5f7, 0x2604460d, 0xf000980a, 0x0007f9a7, 0x2d00d054, 0x2001d052, 0x70084669, 0x9001980c, 0x74082000, 0x24002004, 0x00a09006, 0x46101942, 0x78413020, 0xd03e2900, 0x025b2301, 0x059b59db, 0xd0030f9b, 0x7f5b192b, 0xd0342b02, 0x78009102, 0x92053230, 0x22009003, 0x9b0c4629, 0xf000980a, 0x2800f911, 0x9802d12b, 0x01002101, 0x32801942, 0x980a9b03, 0xfdc6f001, 0x90022001, 0x980a4669, 0xfd72f7ff, 0xd11a0006, 0x28008a68, 0x1929d10e, 0x29027f49, 0x2903d00a, 0x2200d008, 0x9b0c4629, 0xf000980a, 0x0006f80f, 0xe004d109, 0x43482164, 0xf0012100, 0x1c64fd8c, 0xd3b62c03, 0xb00d4630, 0x0000bdf0, 0x2004b5ff, 0x460db091, 0x2900900e, 0x4628d079, 0x8f823040, 0x460c2101, 0x8fc24094, 0x92009302, 0x466b2203, 0x9104711a, 0xa90c9103, 0x99139108, 0xd0012900, 0xe0002108, 0x91092104, 0x75199913, 0x280079c0, 0x4628d009, 0x308130ff, 0x29007901, 0x7941d003, 0x79009103, 0x98039004, 0x01009b04, 0x32801942, 0x98119903, 0xfd6af001, 0x2d006f2d, 0x2001d001, 0x2000e000, 0x900f2600, 0x9811a901, 0xfd10f7ff, 0x2800900e, 0x9813d13b, 0xd0192800, 0xa80ba90a, 0x2208ab0c, 0x700f781f, 0x7007785f, 0x1c491c40, 0x1c9b1e92, 0xd1f52a00, 0x990b9800, 0x980a2800, 0x4008d004, 0x43814621, 0xe00ed10a, 0xe00a4308, 0x28009800, 0x990cd006, 0x43884620, 0xd0042800, 0xe0032701, 0x4020980c, 0x2700e7f8, 0x4207980f, 0x4628d00b, 0xd00d4330, 0x00c0207d, 0xf0012100, 0x2000fd0a, 0x1e6d43c0, 0x2f004146, 0x980ed1bc, 0xbdf0b015, 0x900e4801, 0x0000e7f9, 0x00001772, 0xb089b5f0, 0x460c4616, 0x25044607, 0xf8baf000, 0xd0482800, 0xd0462c00, 0x30ff30ff, 0x68003002, 0x0f800580, 0x7c60d002, 0xd0252802, 0x46692501, 0x9601700d, 0x90027d60, 0x90037d20, 0x74082000, 0x30184620, 0x20049005, 0x46339006, 0x46212200, 0xf0004638, 0x9802f82b, 0x01002101, 0x46381902, 0x9b033280, 0xfce2f001, 0x46384669, 0xf7ff9502, 0x0005fc8f, 0xe001d117, 0xe0142500, 0x29008a61, 0x7c60d10c, 0xd0092802, 0xd0072803, 0x22004633, 0x46384621, 0xff2af7ff, 0xe0044605, 0x43482064, 0xf0012100, 0x4628fca8, 0xbdf0b009, 0x4604b570, 0xb08a2004, 0xd0242900, 0x466e2000, 0x20037030, 0x90022501, 0x74329503, 0x93012047, 0x28005c40, 0x4608d009, 0x308130ff, 0x2a007a02, 0x7a42d003, 0x7a009202, 0x98029003, 0x01009b03, 0x21011842, 0x32804620, 0xfc9af001, 0x46204669, 0xf7ff9502, 0xb00afc47, 0x0000bd70, 0x4616b570, 0x20004a13, 0x6812444a, 0x29004604, 0x4d11d008, 0xd0072901, 0xd00a2902, 0x60302404, 0xbd704620, 0xe7fa4610, 0x05806968, 0x46100f81, 0x69e8e00c, 0x6b004809, 0x0e890681, 0xf7fe4808, 0x2112f84d, 0x69e94348, 0x0f490189, 0xf7fe1c49, 0xe7e4f845, 0x00000058, 0x400fc000, 0x400d80c0, 0x1c9c3800, 0x20044602, 0xd1042a00, 0xd0022900, 0x60084801, 0x47702000, 0x07ed6b40, 0x20004601, 0xd1022900, 0x44784801, 0x47706800, 0x0000236e, 0x4607b5f8, 0x461c2004, 0x2f00460e, 0x2a00d016, 0x2c00d014, 0x4611d012, 0xf7fe4809, 0x4601f817, 0x43614605, 0xf7fe4630, 0xe000f811, 0x46011c40, 0x43614369, 0xd3f942b1, 0x20006038, 0x0000bdf8, 0x3b9aca00, 0x460db570, 0xffd0f7ff, 0xd00a0004, 0xfc5cf001, 0x02c02001, 0x2d006821, 0x4301d001, 0x4381e000, 0xbd706021, 0xb087b5f3, 0x2604460d, 0xf7ff9807, 0x0004ffbb, 0x2d00d07e, 0x7c28d0fc, 0xd1022800, 0x28007f28, 0x6c28d009, 0xd40406c0, 0xf0004628, 0x2800f976, 0x2001d001, 0x2000e000, 0x49619003, 0x42886828, 0x9807d166, 0xfb8cf7ff, 0x98074629, 0xf8bef000, 0x30404628, 0x90029903, 0xd0022900, 0x21012200, 0x4628e005, 0xf959f000, 0x98024602, 0x98077981, 0xfb1cf7ff, 0xf7ff9807, 0x6820fb7f, 0x43b02602, 0x46206020, 0xfbc9f001, 0x43306820, 0x68206020, 0x4008494c, 0x79499902, 0xd1012908, 0x43080289, 0x43084949, 0x07897b29, 0x43010e89, 0x46296021, 0xf7ff9807, 0x68a0fce3, 0x03c92101, 0x60a04388, 0xf0004628, 0x2800f931, 0x68a0d004, 0x04c92101, 0x60a04308, 0x46204629, 0xfc42f7ff, 0x98074629, 0xfc68f7ff, 0x98074629, 0xfd06f7ff, 0x21026820, 0x60204388, 0xf0014620, 0x4628fb90, 0x30507c29, 0x29009004, 0x9807d01c, 0x20049005, 0xe0002100, 0x9e04e052, 0x460f9100, 0x9101ce02, 0xd00a2900, 0x9a004629, 0xf7ff9805, 0x2800fe6b, 0x9a01d106, 0x18899900, 0x1c7f9100, 0xd3ed2f04, 0xd13b0006, 0x28007f28, 0x9807d01c, 0x20049005, 0xd0152d00, 0x9e042100, 0x460f9100, 0x9101ce02, 0xd00a2900, 0x9a004629, 0xf7ff9805, 0x2800fd5f, 0x9a01d106, 0x18899900, 0x1c7f9100, 0xd3ed2f04, 0xd11b0006, 0x28009803, 0x6820d017, 0x43302602, 0x46286020, 0xf8c3f000, 0x98024602, 0x98077981, 0xfa86f7ff, 0x98074629, 0xfc6af7ff, 0x98074629, 0xfca4f7ff, 0x43b06820, 0x26006020, 0xb0094630, 0x0000bdf0, 0x42464346, 0x0000df0f, 0xffff0000, 0x4d4bb5f8, 0x462e484b, 0x460c9000, 0x46084637, 0xf8a9f000, 0xd0042800, 0x6ee06e26, 0x6e679000, 0x6d206ea5, 0x6d602800, 0x2800d025, 0x4942d004, 0x63c82006, 0x63064841, 0x6d204b3f, 0x28003380, 0x2001d004, 0x483d6058, 0x63863040, 0x20114a3a, 0x63503240, 0x30404839, 0x21016287, 0x62c56391, 0x63456019, 0x630563d1, 0x62456311, 0x5d002045, 0xd0032808, 0x2800e00f, 0xe025d1d9, 0x2001492e, 0x62083140, 0x32404a2d, 0x62886155, 0x624861d5, 0x61886195, 0x7b2060d5, 0xd0012803, 0xd1072801, 0x21114825, 0x62c13040, 0x98004924, 0x62083140, 0xf0004620, 0x2800f853, 0x481fd006, 0x30402101, 0x481e61c1, 0x61073040, 0x28006da0, 0xd02b6de0, 0xd0052800, 0x20064918, 0x60083140, 0x63464817, 0x28006da0, 0x4814d006, 0x30402104, 0x481360c1, 0x60063040, 0x21114810, 0x61c13040, 0x3140490f, 0x2201610f, 0x614d6202, 0x61cd6282, 0x618d6242, 0x60cd6182, 0x2a037b22, 0x2a01d001, 0x2214d103, 0x98006102, 0xbdf86048, 0xd1d32800, 0x0000bdf8, 0x000010f1, 0x000130f1, 0x401f8100, 0x401f8280, 0x07806c00, 0x2001d501, 0x20004770, 0x6c004770, 0xd5010640, 0x47702001, 0x47702000, 0x07c06c00, 0x2001d000, 0x6c004770, 0xd5010680, 0x47702001, 0x47702000, 0x07406c00, 0x2001d501, 0x20004770, 0x6c004770, 0xd5010700, 0x47702001, 0x47702000, 0xb08ab570, 0x20004605, 0x7030466e, 0x2401200f, 0x94039002, 0x460a7432, 0x930132ff, 0x46233271, 0x46284621, 0xfa54f001, 0x46284669, 0xf7ff9402, 0xb00afa01, 0xb5ffbd70, 0xb0812004, 0x2900460f, 0x463dd01e, 0x358135ff, 0x42496c69, 0x4014460c, 0x425218d2, 0x21ff400a, 0x5dc931ca, 0x29004256,
import numpy as np import pandas as pd import math import tensorflow as tf from tensorflow.keras.utils import Sequence from tensorflow.keras.wrappers.scikit_learn import KerasRegressor from tensorflow.keras.models import Sequential from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau import aux.config as cfg class DataGenerator(Sequence): """When running in validation mode i.e. num profiles is less than batch_size, then the batch_size of an instance of this class should not be altered after creation.""" def __init__(self, x, y=None, batch_size=32, tbptt_len=1, increase_tbptt_factor=2, val_mode=False, ): self.reduce_weight_on_thinner_batches = \ cfg.keras_cfg['rnn_params']['reduce_weight_on_thinner_batches'] self.batch_size = batch_size self.tbptt_len = tbptt_len self.increase_tbptt_factor = increase_tbptt_factor self.epoch_counter = 0 self.orig_x, self.orig_y = x, y self.num_profiles = x.groupby(cfg.data_cfg['p_id_col']).ngroups self.x, self.sample_weights = self._generate_batches(self.orig_x) if self.orig_y is not None: self.y, _ = self._generate_batches(self.orig_y) # validation/ test set condition self.validation_mode = self.num_profiles < self.batch_size or val_mode self.val_idx = None if self.validation_mode: self.val_idx = np.tile(np.arange(self.num_profiles), self.batch_size)[:self.batch_size] # placeholder for speed-up self.val_x = np.zeros([self.batch_size] + list(self.x.shape[1:])) if y is not None: self.val_y = np.zeros([self.batch_size] + list(self.y.shape[1:])) else: self.val_y = None self.val_sample_weights = \ np.zeros([self.batch_size] + list(self.sample_weights.shape[1:])) def __getitem__(self, idx): """idx is the enumerated batch idx starting at 0""" if self.validation_mode: self.val_x[:] = \ self.x[idx * self.num_profiles: (idx+1) * self.num_profiles][self.val_idx] if self.orig_y is not None: self.val_y[:] = \ self.y[idx * self.num_profiles: (idx + 1) * self.num_profiles][self.val_idx] x, y = self.val_x, self.val_y else: x = self.x[idx * self.batch_size: (idx+1) * self.batch_size] y = self.y[idx * self.batch_size: (idx + 1) * self.batch_size] if self.sample_weights is None: s = None else: if self.validation_mode: self.val_sample_weights[:] = self.sample_weights[ idx * self.num_profiles: (idx + 1) * self.num_profiles][self.val_idx] s = self.val_sample_weights else: s = self.sample_weights[ idx * self.batch_size:(idx + 1) * self.batch_size] return x, y, s def __len__(self): if self.validation_mode: return math.ceil(len(self.x) / self.num_profiles) else: return math.ceil(len(self.x) / self.batch_size) def _generate_batches(self, _df): """Write me""" p_id_col = cfg.data_cfg['p_id_col'] grp = _df.groupby(p_id_col) profile_dfs_l = [df.drop(p_id_col, axis=1).reset_index(drop=True) for p_id, df in grp] max_len = max(grp.groups.values(), key=lambda g: g.size).size # increase maxlen for having profiles multiples of tbptt_len. if max_len % self.tbptt_len > 0: max_len += (self.tbptt_len - (max_len % self.tbptt_len)) max_len = int(max_len) # placeholder dummy_val = -999999 arr = np.full((len(profile_dfs_l), max_len, _df.shape[1]-1), dummy_val, dtype=np.float32) # give all profiles equal length where we pad with zero for i, profile in enumerate(profile_dfs_l): arr[i, :len(profile), :] = profile.to_numpy() arr[arr == dummy_val] = np.nan # break sequences along axis 1 for tbptt length if max_len >= self.tbptt_len: if max_len != self.tbptt_len: arr = np.vstack([arr[:, n:n + self.tbptt_len, :] for n in range(0, arr.shape[1], self.tbptt_len)]) else: raise ValueError('TBPTT Len > max profile length!') assert arr.shape[1] % self.tbptt_len == 0, 'ping!' # mask the padded zeros out by giving them a sample weight of # 0 during training nan_mask = np.isnan(arr[:, :, 0].reshape(arr.shape[:2])) sample_weights = np.ones_like(nan_mask, dtype=np.float64) if np.any(nan_mask): sample_weights[nan_mask] = 0 if self.reduce_weight_on_thinner_batches: sample_weights *= \ sample_weights.sum(axis=0) / sample_weights.shape[0] arr = np.nan_to_num(arr).astype(np.float32) return arr, sample_weights class RNNKerasRegressor(KerasRegressor): """ScikitLearn wrapper for keras models which incorporates batch-generation on top. This Class wraps RNN topologies.""" def __init__(self, *args, **kwargs): self.score_params = kwargs.pop('score_params', None) self.predict_params = kwargs.pop('predict_params', None) super().__init__(*args, **kwargs) def __call__(self, *args, **kwargs): """Dummy function to satisfy keras BaseWrapper""" pass def get_params(self, **params): res = super().get_params(**params) res.update({'score_params': self.score_params, 'predict_params': self.predict_params}) return res def set_params(self, **params): self.predict_params = params.pop('predict_params', None) self.score_params = params.pop('score_params', None) super().set_params(**params) def reset_states(self): self.model.reset_states() def fit(self, x, y, **kwargs): assert isinstance(x, pd.DataFrame), \ f'{self.__class__.__name__} needs pandas DataFrames as input' tbptt_len = kwargs.pop('tbptt_len', None) batch_size = kwargs.pop('batch_size', 32) # first conduct iteration batch_generation_cfg = {'batch_size': batch_size, 'tbptt_len': tbptt_len} # training set seq = DataGenerator(x, y, **batch_generation_cfg) # validation set x_val, y_val = kwargs.pop('validation_data', (None, None)) if x_val is not None and y_val is not None: val_seq = DataGenerator(x_val, y_val, **batch_generation_cfg) else: val_seq = None kwargs['validation_data'] = val_seq if 'callbacks' not in kwargs: kwargs['callbacks'] = [ EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=cfg.keras_cfg['early_stop_patience']), ReduceLROnPlateau(monitor='loss', factor=0.5, patience=cfg.keras_cfg[ 'early_stop_patience'] // 3), ] if hasattr(self, 'model'): # already initialized history = self.model.fit(seq, y=None, **kwargs) else: history = super().fit(seq, y=None, **kwargs) return history def predict(self, x, **kwargs): """Use this func to get a prediction for x. """ assert isinstance(x, pd.DataFrame), \ f'{self.__class__.__name__} needs pandas DataFrames as input' if self.predict_params is not None: kwargs.update(self.predict_params.copy()) _ = kwargs.pop('downsample_rate', 1) val_mode = kwargs.pop('val_mode', False) tbptt_len = kwargs.pop('tbptt_len', None) batch_size = kwargs['batch_size'] seq = DataGenerator(x, None, batch_size, tbptt_len, val_mode=val_mode) kwargs = self.filter_sk_params(Sequential.predict, kwargs) _yhat = self.model.predict(seq, **kwargs) def revert_reshaping(yhat, sample_w): if len(yhat.shape) < 3: if tbptt_len == 1: third_dim = 1 if len(yhat.shape) == 1 else yhat.shape[1] yhat = yhat.reshape(yhat.shape[0], 1, third_dim) elif len(yhat.shape) == 2: # single target model yhat = yhat.reshape(yhat.shape[0], yhat.shape[1], 1) else: raise ValueError('Something wrong with _yhat shape!') stride = seq.num_profiles if seq.validation_mode else batch_size if sample_w is None: n_dummies = stride * [0] else: n_dummies = self.get_dummies_from_w_matrix(sample_w, stride) # return yhat as 2-dim matrix # 3d due to tbptt length -> 2d profiles = [] # revert breakdown due to tbptt for idx_b, n_dummy in enumerate(n_dummies): profile = np.vstack([yhat[idx_b + n, :, :] for n in range(0, yhat.shape[0], batch_size)]) if n_dummy != 0: profile = profile[:-n_dummy, :] profiles.append(profile) yhat = np.vstack(profiles) return yhat pred = revert_reshaping(np.squeeze(_yhat), seq.sample_weights) return pred @staticmethod def get_dummies_from_w_matrix(weights, stride): """Scan weight matrix for zeros which denote the padded zeros that need to be chopped off at the end. Return List of dummies for each profile which may be downsampled""" max_profile_len = weights.shape[0] * weights.shape[1] // stride n_dummies_within_batch = \ max_profile_len - np.sum(np.count_nonzero( weights[n:n + stride, :], axis=1) for n in range(0, weights.shape[0], stride)) return n_dummies_within_batch.astype(int) class StateResetter(tf.keras.callbacks.Callback): """This callback helps conditioning the output states of the recurrent layers in an RNN architecture.""" def __init__(self, datamanager=None, layer=None, noise=None): super().__init__() self.init_states_epoch_begin = None self.init_states_validation = None self.init_states_test = None if layer is not None: assert datamanager is not None, \ 'datamanager must be given if layer is specified' init_vals = pd.concat([df.iloc[0:1, :] for p, df in datamanager.df .groupby(datamanager.PROFILE_ID_COL)], ignore_index=True) \ .set_index(datamanager.PROFILE_ID_COL) # DataGenerator sorts profiles during GroupBy # thus, sort profile numbers here, too val_profiles = sorted([int(p) for p in cfg.data_cfg['valset']]) if \ datamanager.has_hold_out else [] train_profiles = sorted([p for p in datamanager.original_profiles if str(p) not in cfg.data_cfg['testset'] + [str(q) for q in val_profiles]]) test_profiles = sorted([int(p) for p in cfg.data_cfg['testset']]) self.init_states_epoch_begin = init_vals.loc[train_profiles, :] self.init_states_validation = init_vals.loc[val_profiles, :] self.init_states_test = init_vals.loc[test_profiles, :] self.init_vals = init_vals self.dm = datamanager self.num_batches = 'samples' if tf.version.VERSION.startswith('2.1.') \ else 'steps' self.layer = layer # whether to reset only a specific layer # if no layer is specified, init_states_.. is ignored. self.noise = noise def _reset(self, states): if self.layer is not None: if hasattr(self.init_states_epoch_begin, 'shape'): batch_size = self.init_states_epoch_begin.shape[0] if states.shape[0] < batch_size: states = (np.tile(states.values, (batch_size, 1))[:batch_size, :],) else: # assume init states are list like batch_size = self.init_states_epoch_begin[0].shape[0] if states[0].shape[0] < batch_size: for i, s in enumerate(states): states[i] = np.tile(s.values, (batch_size, 1))[:batch_size, :] self.model.get_layer(self.layer).reset_states(states=states) else: self.model.reset_states() def on_train_batch_end(self, batch, logs={}): # check whether we are in last batch and initialize for val set if batch == (self.params[self.num_batches] - 1): if len(self.init_states_validation) > 0: self._reset(self.init_states_validation) def on_epoch_begin(self, epoch, logs={}): reset_values = self.init_states_epoch_begin if self.noise is not None: # add noise with std = 0.01 if hasattr(reset_values, 'shape'): reset_values += np.random.randn(*reset_values.shape)*self.noise else: # assume reset_values to be list like for i in range(len(reset_values)): reset_values[i] += np.random.randn(*reset_values[i].shape)*0.01 self._reset(reset_values) def on_train_end(self, logs={}): self._reset(self.init_states_test) class IntegratorStateResetter(StateResetter): """Resets with initial values of the targets""" def __init__(self, datamanager=None, layer='rnn', noise=None): super().__init__(datamanager, layer, noise) if layer is not None: state_cols = cfg.data_cfg['Target_param_names'] self.init_states_epoch_begin = \ self.init_states_epoch_begin.loc[:, state_cols] self.init_states_validation = \ self.init_states_validation.loc[:, state_cols] self.init_states_test = self.init_states_test.loc[:, state_cols] self.intermediate_init_vals_train = None def on_train_batch_end(self, batch, logs={}): last_batch = self.params[self.num_batches] - 1 if self.intermediate_init_vals_train is not None: if batch < last_batch: self._reset(self.intermediate_init_vals_train[batch]) super().on_train_batch_end(batch, logs) class LPTNStateResetter(StateResetter): """Resets with initial target values of full dataset""" def __init__(self, datamanager=None, layer=None, only_these_profiles=None): if only_these_profiles is not None: assert isinstance(only_these_profiles, list), 'ping' self.init_vals = pd.concat([df.iloc[0:1, :] for p, df in datamanager.df .groupby(datamanager.PROFILE_ID_COL) if p in only_these_profiles], ignore_index=True) \ .set_index(datamanager.PROFILE_ID_COL) else: super().__init__(datamanager, layer) if layer is not None: target_cols = cfg.data_cfg['Target_param_names'] init = self.init_vals.loc[:, target_cols] self.init_states_epoch_begin = init self.init_states_validation = init
<reponame>craigmaloney/eeweather #!/usr/bin/env python # -*- coding: utf-8 -*- """ Copyright 2018 Open Energy Efficiency, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import pandas as pd import numpy as np import pyproj import eeweather.mockable from .exceptions import ISDDataNotAvailableError from .connections import metadata_db_connection_proxy from .geo import get_lat_long_climate_zones from .stations import ISDStation from .utils import lazy_property from .warnings import EEWeatherWarning __all__ = ("rank_stations", "combine_ranked_stations", "select_station") class CachedData(object): @lazy_property def all_station_metadata(self): conn = metadata_db_connection_proxy.get_connection() cur = conn.cursor() cur.execute( """ select isd.usaf_id , isd.latitude , isd.longitude , isd.iecc_climate_zone , isd.iecc_moisture_regime , isd.ba_climate_zone , isd.ca_climate_zone , isd.quality as rough_quality , isd.elevation , isd.state , tmy3.class as tmy3_class , tmy3.usaf_id is not null as is_tmy3 , cz2010.usaf_id is not null as is_cz2010 from isd_station_metadata as isd left join cz2010_station_metadata as cz2010 on isd.usaf_id = cz2010.usaf_id left join tmy3_station_metadata as tmy3 on isd.usaf_id = tmy3.usaf_id order by isd.usaf_id """ ) df = pd.DataFrame( [ {col[0]: val for col, val in zip(cur.description, row)} for row in cur.fetchall() ], columns=[ "usaf_id", "latitude", "longitude", "iecc_climate_zone", "iecc_moisture_regime", "ba_climate_zone", "ca_climate_zone", "rough_quality", "elevation", "state", "tmy3_class", "is_tmy3", "is_cz2010", ], ).set_index("usaf_id") df["latitude"] = df.latitude.astype(float) df["longitude"] = df.longitude.astype(float) df["elevation"] = df.elevation.astype(float) df["is_tmy3"] = df.is_tmy3.astype(bool) df["is_cz2010"] = df.is_cz2010.astype(bool) return df cached_data = CachedData() def _combine_filters(filters, index): combined_filters = pd.Series(True, index=index) for f in filters: combined_filters &= f return combined_filters def rank_stations( site_latitude, site_longitude, site_state=None, site_elevation=None, match_iecc_climate_zone=False, match_iecc_moisture_regime=False, match_ba_climate_zone=False, match_ca_climate_zone=False, match_state=False, minimum_quality=None, minimum_tmy3_class=None, max_distance_meters=None, max_difference_elevation_meters=None, is_tmy3=None, is_cz2010=None, ): """ Get a ranked, filtered set of candidate weather stations and metadata for a particular site. Parameters ---------- site_latitude : float Latitude of target site for which to find candidate weather stations. site_longitude : float Longitude of target site for which to find candidate weather stations. site_state : str, 2 letter abbreviation US state of target site, used optionally to filter potential candidate weather stations. Ignored unless ``match_state=True``. site_elevation : float Elevation of target site in meters, used optionally to filter potential candidate weather stations. Ignored unless ``max_difference_elevation_meters`` is set. match_iecc_climate_zone : bool If ``True``, filter candidate weather stations to those matching the IECC climate zone of the target site. match_iecc_moisture_regime : bool If ``True``, filter candidate weather stations to those matching the IECC moisture regime of the target site. match_ca_climate_zone : bool If ``True``, filter candidate weather stations to those matching the CA climate zone of the target site. match_ba_climate_zone : bool If ``True``, filter candidate weather stations to those matching the Building America climate zone of the target site. match_state : bool If ``True``, filter candidate weather stations to those matching the US state of the target site, as specified by ``site_state=True``. minimum_quality : str, ``'high'``, ``'medium'``, ``'low'`` If given, filter candidate weather stations to those meeting or exceeding the given quality, as summarized by the frequency and availability of observations in the NOAA Integrated Surface Database. minimum_tmy3_class : str, ``'I'``, ``'II'``, ``'III'`` If given, filter candidate weather stations to those meeting or exceeding the given class, as reported in the NREL TMY3 metadata. max_distance_meters : float If given, filter candidate weather stations to those within the ``max_distance_meters`` of the target site location. max_difference_elevation_meters : float If given, filter candidate weather stations to those with elevations within ``max_difference_elevation_meters`` of the target site elevation. is_tmy3 : bool If given, filter candidate weather stations to those for which TMY3 normal year temperature data is available. is_cz2010 : bool If given, filter candidate weather stations to those for which CZ2010 normal year temperature data is available. Returns ------- ranked_filtered_candidates : :any:`pandas.DataFrame` Index is ``usaf_id``. Each row contains a potential weather station match and metadata. Contains the following columns: - ``rank``: Rank of weather station match for the target site. - ``distance_meters``: Distance from target site to weather station site. - ``latitude``: Latitude of weather station site. - ``longitude``: Longitude of weather station site. - ``iecc_climate_zone``: IECC Climate Zone ID (1-8) - ``iecc_moisture_regime``: IECC Moisture Regime ID (A-C) - ``ba_climate_zone``: Building America climate zone name - ``ca_climate_zone``: Califoria climate zone number - ``rough_quality``: Approximate measure of frequency of ISD observations data at weather station. - ``elevation``: Elevation of weather station site, if available. - ``state``: US state of weather station site, if applicable. - ``tmy3_class``: Weather station class as reported by NREL TMY3, if available - ``is_tmy3``: Weather station has associated TMY3 data. - ``is_cz2010``: Weather station has associated CZ2010 data. - ``difference_elevation_meters``: Absolute difference in meters between target site elevation and weather station elevation, if available. """ candidates = cached_data.all_station_metadata # compute distances candidates_defined_lat_long = candidates[ candidates.latitude.notnull() & candidates.longitude.notnull() ] candidates_latitude = candidates_defined_lat_long.latitude candidates_longitude = candidates_defined_lat_long.longitude tiled_site_latitude = np.tile(site_latitude, candidates_latitude.shape) tiled_site_longitude = np.tile(site_longitude, candidates_longitude.shape) geod = pyproj.Geod(ellps="WGS84") dists = geod.inv( tiled_site_longitude, tiled_site_latitude, candidates_longitude.values, candidates_latitude.values, )[2] distance_meters = pd.Series(dists, index=candidates_defined_lat_long.index).reindex( candidates.index ) candidates["distance_meters"] = distance_meters if site_elevation is not None: difference_elevation_meters = (candidates.elevation - site_elevation).abs() else: difference_elevation_meters = None candidates["difference_elevation_meters"] = difference_elevation_meters site_climate_zones = get_lat_long_climate_zones(site_latitude, site_longitude) site_iecc_climate_zone = site_climate_zones["iecc_climate_zone"] site_iecc_moisture_regime = site_climate_zones["iecc_moisture_regime"] site_ca_climate_zone = site_climate_zones["ca_climate_zone"] site_ba_climate_zone = site_climate_zones["ba_climate_zone"] # create filters filters = [] if match_iecc_climate_zone: if site_iecc_climate_zone is None: filters.append(candidates.iecc_climate_zone.isnull()) else: filters.append(candidates.iecc_climate_zone == site_iecc_climate_zone) if match_iecc_moisture_regime: if site_iecc_moisture_regime is None: filters.append(candidates.iecc_moisture_regime.isnull()) else: filters.append(candidates.iecc_moisture_regime == site_iecc_moisture_regime) if match_ba_climate_zone: if site_ba_climate_zone is None: filters.append(candidates.ba_climate_zone.isnull()) else: filters.append(candidates.ba_climate_zone == site_ba_climate_zone) if match_ca_climate_zone: if site_ca_climate_zone is None: filters.append(candidates.ca_climate_zone.isnull()) else: filters.append(candidates.ca_climate_zone == site_ca_climate_zone) if match_state: if site_state is None: filters.append(candidates.state.isnull()) else: filters.append(candidates.state == site_state) if is_tmy3 is not None: filters.append(candidates.is_tmy3.isin([is_tmy3])) if is_cz2010 is not None: filters.append(candidates.is_cz2010.isin([is_cz2010])) if minimum_quality == "low": filters.append(candidates.rough_quality.isin(["high", "medium", "low"])) elif minimum_quality == "medium": filters.append(candidates.rough_quality.isin(["high", "medium"])) elif minimum_quality == "high": filters.append(candidates.rough_quality.isin(["high"])) if minimum_tmy3_class == "III": filters.append(candidates.tmy3_class.isin(["I", "II", "III"])) elif minimum_tmy3_class == "II": filters.append(candidates.tmy3_class.isin(["I", "II"])) elif minimum_tmy3_class == "I": filters.append(candidates.tmy3_class.isin(["I"])) if max_distance_meters is not None: filters.append(candidates.distance_meters <= max_distance_meters) if max_difference_elevation_meters is not None and site_elevation is not None: filters.append( candidates.difference_elevation_meters <= max_difference_elevation_meters ) combined_filters = _combine_filters(filters, candidates.index) filtered_candidates = candidates[combined_filters] ranked_filtered_candidates = filtered_candidates.sort_values(by=["distance_meters"]) # add rank column ranks = range(1, 1 + len(ranked_filtered_candidates)) ranked_filtered_candidates.insert(0, "rank", ranks) return ranked_filtered_candidates[ [ "rank", "distance_meters", "latitude", "longitude", "iecc_climate_zone", "iecc_moisture_regime", "ba_climate_zone", "ca_climate_zone", "rough_quality", "elevation", "state", "tmy3_class", "is_tmy3", "is_cz2010", "difference_elevation_meters", ] ] def combine_ranked_stations(rankings): """ Combine :any:`pandas.DataFrame` s of candidate weather stations to form a hybrid ranking dataframe. Parameters ---------- rankings : list of :any:`pandas.DataFrame` Dataframes of ranked weather station candidates and metadata. All ranking dataframes should have the same columns and must be sorted by rank. Returns ------- ranked_filtered_candidates : :any:`pandas.DataFrame` Dataframe has a rank column and the same columns given in the source dataframes. """ if len(rankings) == 0: raise ValueError("Requires at least one ranking.") combined_ranking = rankings[0] for ranking in rankings[1:]: filtered_ranking = ranking[~ranking.index.isin(combined_ranking.index)] combined_ranking = pd.concat([combined_ranking, filtered_ranking]) combined_ranking["rank"] = range(1, 1 + len(combined_ranking)) return combined_ranking @eeweather.mockable.mockable() def load_isd_hourly_temp_data( station, start_date, end_date, fetch_from_web ): # pragma: no cover return station.load_isd_hourly_temp_data( start_date, end_date, fetch_from_web=fetch_from_web ) def select_station( candidates, coverage_range=None, min_fraction_coverage=0.9, distance_warnings=(50000, 200000), rank=1, fetch_from_web=True, ): """ Select a station from a list of candidates that meets given data quality criteria. Parameters ---------- candidates : :any:`pandas.DataFrame` A dataframe of the form given by :any:`eeweather.rank_stations` or :any:`eeweather.combine_ranked_stations`, specifically having at least an index with ``usaf_id`` values and the column ``distance_meters``. Returns ------- isd_station, warnings : tuple of (:any:`eeweather.ISDStation`, list of str) A qualified weather station. ``None`` if no station meets criteria. """ def _test_station(station): if coverage_range is None: return True, [] else: start_date, end_date = coverage_range try: tempC, warnings = eeweather.mockable.load_isd_hourly_temp_data( station, start_date, end_date, fetch_from_web ) except ISDDataNotAvailableError: return False, [] # reject # TODO(philngo): also need to incorporate within-day limits if len(tempC) > 0: fraction_coverage = tempC.notnull().sum() / float(len(tempC)) return (fraction_coverage > min_fraction_coverage), warnings else: return False, [] # reject def _station_warnings(station, distance_meters): return