max_stars_repo_path
stringlengths
3
269
max_stars_repo_name
stringlengths
4
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.05M
score
float64
0.23
5.13
int_score
int64
0
5
dbd/cli/dbdcli.py
AlexRogalskiy/dbd
33
17900
import importlib.metadata import logging import os import shutil from typing import Dict, Any, List import click from sqlalchemy import text from dbd.log.dbd_exception import DbdException from dbd.config.dbd_profile import DbdProfile from dbd.config.dbd_project import DbdProject from dbd.executors.model_executor import ModelExecutor, InvalidModelException from dbd.log.dbd_logger import setup_logging log = logging.getLogger(__name__) this_script_dir = os.path.dirname(__file__) class Dbd(object): """ Top level CLI object """ def __init__(self, debug: bool = False, logfile: str = 'dbd.log', profile: str = 'dbd.profile', project: str = 'dbd.project'): """ Constructor :param bool debug: debug flag :param str logfile: log file :param str profile: profile file :param str project: project file """ self.__debug = debug self.__logfile = logfile self.__profile = profile self.__project = project def debug(self) -> bool: """ Debug flag getter :return: debug flag :rtype: bool """ return self.__debug def logfile(self) -> str: """ Logfile getter :return: logfile :rtype: str """ return self.__logfile def profile(self) -> str: """ Profile getter :return: profile :rtype: str """ return self.__profile def project(self) -> str: """ Project getter :return: project :rtype: str """ return self.__project def print_version(): """ Prints DBD version """ click.echo(f"You're using DBD version {importlib.metadata.version('dbd')}.") @click.group(invoke_without_command=True) @click.option('--debug/--no-debug', envvar='DBD_DEBUG', default=False, help='Sets debugging on/off') @click.option('--version', help="Print the DBD version and exit.", is_flag=True, is_eager=True) @click.option('--logfile', envvar='DBD_LOG_FILE', default='dbd.log', help='Log file location') @click.option('--profile', envvar='DBD_PROFILE', default='dbd.profile', help='Profile configuration file') @click.option('--project', envvar='DBD_PROJECT', default='dbd.project', help='Project configuration file') @click.pass_context def cli(ctx, debug, logfile, version, profile, project): if debug: click.echo(f"Logging DEBUG info to '{logfile}'") setup_logging(logging.DEBUG, logfile) if version: print_version() ctx.exit(0) ctx.obj = Dbd(debug, logfile, profile, project) # noinspection PyUnusedLocal @cli.command(help='Initializes a new DBD project.') @click.argument('dest', required=False, default='my_new_dbd_project') @click.pass_obj def init(dbd, dest): try: src = os.path.join(this_script_dir, '..', 'resources', 'template') if os.path.exists(dest): log.error(f"Can't overwrite directory '{dest}'") raise DbdException(f"Can't overwrite directory '{dest}'") shutil.copytree(src, dest) click.echo(f"New project {dest} generated. Do cd {dest}; dbd run .") except DbdException as d: click.echo(f"ERROR: '{d}'") @cli.command(help='Executes project.') @click.option('--only', envvar='DBD_ONLY', default=None, help='Comma separated list of fully qualified table names ' '(<schema>.<table-name-no suffix>) to execute.') @click.option('--deps/--no-deps', envvar='DBD_DEPS', default=True, help='Ignores dependencies for the --only list.') @click.argument('dest', required=False, default='.') @click.pass_obj def run(dbd, only, deps, dest): try: log.debug("Loading configuration.") prf = DbdProfile.load(os.path.join('.', dbd.profile())) prj = DbdProject.load(prf, os.path.join(dest, dbd.project())) log.debug("Creating model.") model = ModelExecutor(prj) log.debug("Connecting database.") engine = prj.alchemy_engine_from_project() # engine.execution_options(supports_statement_cache=False) log.debug("Executing model.") if not deps and only is None: log.error("You must specify --only list for --no-deps.") raise DbdException("You must specify --only list for --no-deps.") if only is not None: only_list = only.split(',') try: model.execute(engine, only_list, deps) except InvalidModelException as e: log.error(f"Can't run {only_list}: {e}") raise DbdException(f"Can't run {only_list}: {e}") else: model.execute(engine) log.debug("Finished.") click.echo("All tasks finished!") except DbdException as d: click.echo(f"ERROR: '{d}'") @cli.command(help='Validates project.') @click.argument('dest', required=False, default='.') @click.pass_obj def validate(dbd, dest): try: prf = DbdProfile.load(os.path.join('.', dbd.profile())) prj = DbdProject.load(prf, os.path.join(dest, dbd.project())) model = ModelExecutor(prj) engine = prj.alchemy_engine_from_project() # noinspection PyBroadException try: engine.execute(text("SELECT 1")) except Exception: click.echo( f"Can't connect to the target database. Check profile configuration in " f"'{os.path.normpath(os.path.join(dest, dbd.profile()))}'.") validation_result, validation_errors = model.validate() if validation_result: click.echo("No errors found. Model is valid.") else: click.echo("Model isn't valid. Please fix the following errors:") __echo_validation_errors(validation_errors) except DbdException as d: click.echo(f"ERROR: '{d}'") def __echo_validation_errors(validation_errors: Dict[str, Any]): """ Top level function for printing validation errors :param validation_errors: :return: """ __echo_validation_level(validation_errors) class InvalidValidationErrorStructure(DbdException): pass def __echo_validation_level(level_validation_errors: Dict[str, Any], indent: int = 0): """ Echo validation error line (called recursively on all Dict values) :param level_validation_errors: Dict with validation result :param indent: indentation level """ for (k, v) in level_validation_errors.items(): if isinstance(v, str): msg = f"{k}:{v}" click.echo(msg.rjust(indent * 2 + len(msg), ' ')) elif isinstance(v, Dict): msg = f"{k}:" click.echo(msg.rjust(indent * 2 + len(msg), ' ')) __echo_validation_level(v, indent + 1) elif isinstance(v, List): msg = f"{k}:{str(v)}" click.echo(msg.rjust(indent * 2 + len(msg), ' ')) else: raise InvalidValidationErrorStructure(f"Invalid validation result: '{v}' isn't supported type.")
2.28125
2
bestiary/serializers.py
Itori/swarfarm
66
17901
<filename>bestiary/serializers.py<gh_stars>10-100 from rest_framework import serializers from bestiary import models class GameItemSerializer(serializers.ModelSerializer): category = serializers.SerializerMethodField() class Meta: model = models.GameItem fields = [ 'id', 'com2us_id', 'url', 'name', 'category', 'icon', 'description', 'sell_value', ] extra_kwargs = { 'url': { 'view_name': 'bestiary/items-detail', }, } def get_category(self, instance): return instance.get_category_display() class SourceSerializer(serializers.ModelSerializer): class Meta: model = models.Source fields = ['id', 'url', 'name', 'description', 'farmable_source'] extra_kwargs = { 'url': { 'view_name': 'bestiary/monster-sources-detail', }, } class SkillUpgradeSerializer(serializers.ModelSerializer): effect = serializers.SerializerMethodField() class Meta: model = models.SkillUpgrade fields = ('effect', 'amount') def get_effect(self, instance): return instance.get_effect_display() class SkillEffectSerializer(serializers.ModelSerializer): type = serializers.CharField(source='get_type_display') class Meta: model = models.SkillEffect fields = ('id', 'url', 'name', 'is_buff', 'type', 'description', 'icon_filename') extra_kwargs = { 'url': { 'view_name': 'bestiary/skill-effects-detail', }, } class SkillEffectDetailSerializer(serializers.ModelSerializer): effect = SkillEffectSerializer() class Meta: model = models.SkillEffectDetail fields = [ 'effect', 'aoe', 'single_target', 'self_effect', 'chance', 'on_crit', 'on_death', 'random', 'quantity', 'all', 'self_hp', 'target_hp', 'damage', 'note', ] class SkillSerializer(serializers.HyperlinkedModelSerializer): level_progress_description = serializers.SerializerMethodField() upgrades = SkillUpgradeSerializer(many=True, read_only=True) effects = SkillEffectDetailSerializer(many=True, read_only=True, source='skilleffectdetail_set') scales_with = serializers.SerializerMethodField() used_on = serializers.PrimaryKeyRelatedField(source='monster_set', many=True, read_only=True) class Meta: model = models.Skill fields = ( 'id', 'com2us_id', 'name', 'description', 'slot', 'cooltime', 'hits', 'passive', 'aoe', 'max_level', 'upgrades', 'effects', 'multiplier_formula', 'multiplier_formula_raw', 'scales_with', 'icon_filename', 'used_on', 'level_progress_description', ) def get_level_progress_description(self, instance): if instance.level_progress_description: return instance.level_progress_description.rstrip().split('\n') else: return [] def get_scales_with(self, instance): # TODO: Fix N+1 query in API response caused by this return instance.scaling_stats.values_list('stat', flat=True) class LeaderSkillSerializer(serializers.ModelSerializer): attribute = serializers.SerializerMethodField('get_stat') area = serializers.SerializerMethodField() element = serializers.SerializerMethodField() class Meta: model = models.LeaderSkill fields = ('id', 'url', 'attribute', 'amount', 'area', 'element') extra_kwargs = { 'url': { 'view_name': 'bestiary/leader-skills-detail', }, } def get_stat(self, instance): return instance.get_attribute_display() def get_area(self, instance): return instance.get_area_display() def get_element(self, instance): return instance.get_element_display() class HomunculusSkillCraftCostSerializer(serializers.ModelSerializer): item = GameItemSerializer(read_only=True) class Meta: model = models.HomunculusSkillCraftCost fields = ['item', 'quantity'] class HomunculusSkillSerializer(serializers.ModelSerializer): craft_materials = HomunculusSkillCraftCostSerializer(source='homunculusskillcraftcost_set', many=True, read_only=True) used_on = serializers.PrimaryKeyRelatedField(source='monsters', many=True, read_only=True) class Meta: model = models.HomunculusSkill fields = ['id', 'url', 'skill', 'craft_materials', 'prerequisites', 'used_on'] extra_kwargs = { 'url': { 'view_name': 'bestiary/homunculus-skills-detail', }, } class MonsterCraftCostSerializer(serializers.ModelSerializer): item = GameItemSerializer(read_only=True) class Meta: model = models.MonsterCraftCost fields = ['item', 'quantity'] class AwakenCostSerializer(serializers.ModelSerializer): item = GameItemSerializer(read_only=True) class Meta: model = models.AwakenCost fields = ['item', 'quantity'] class MonsterSerializer(serializers.ModelSerializer): url = serializers.HyperlinkedIdentityField(view_name='bestiary/monsters-detail') element = serializers.SerializerMethodField() archetype = serializers.SerializerMethodField() source = SourceSerializer(many=True, read_only=True) leader_skill = LeaderSkillSerializer(read_only=True) awaken_cost = AwakenCostSerializer(source='awakencost_set', many=True, read_only=True) homunculus_skills = serializers.PrimaryKeyRelatedField(source='homunculusskill_set', read_only=True, many=True) craft_materials = MonsterCraftCostSerializer(many=True, source='monstercraftcost_set', read_only=True) class Meta: model = models.Monster fields = ( 'id', 'url', 'bestiary_slug', 'com2us_id', 'family_id', 'name', 'image_filename', 'element', 'archetype', 'base_stars', 'natural_stars', 'obtainable', 'can_awaken', 'awaken_level', 'awaken_bonus', 'skills', 'skill_ups_to_max', 'leader_skill', 'homunculus_skills', 'base_hp', 'base_attack', 'base_defense', 'speed', 'crit_rate', 'crit_damage', 'resistance', 'accuracy', 'raw_hp', 'raw_attack', 'raw_defense', 'max_lvl_hp', 'max_lvl_attack', 'max_lvl_defense', 'awakens_from', 'awakens_to', 'awaken_cost', 'source', 'fusion_food', 'homunculus', 'craft_cost', 'craft_materials', ) def get_element(self, instance): return instance.get_element_display() def get_archetype(self, instance): return instance.get_archetype_display() class FusionSerializer(serializers.ModelSerializer): class Meta: model = models.Fusion fields = ['id', 'url', 'product', 'cost', 'ingredients'] extra_kwargs = { 'url': { 'view_name': 'bestiary/fusions-detail', }, } class BuildingSerializer(serializers.ModelSerializer): url = serializers.HyperlinkedIdentityField(view_name='bestiary/buildings-detail') area = serializers.SerializerMethodField() affected_stat = serializers.SerializerMethodField() element = serializers.SerializerMethodField() class Meta: model = models.Building fields = [ 'id', 'url', 'area', 'affected_stat', 'element', 'com2us_id', 'name', 'max_level', 'stat_bonus', 'upgrade_cost', 'description', 'icon_filename', ] def get_area(self, instance): return instance.get_area_display() def get_affected_stat(self, instance): return instance.get_affected_stat_display() def get_element(self, instance): return instance.get_element_display() class DungeonSerializer(serializers.ModelSerializer): url = serializers.HyperlinkedIdentityField(view_name='bestiary/dungeons-detail') levels = serializers.PrimaryKeyRelatedField(source='level_set', read_only=True, many=True) category = serializers.SerializerMethodField() class Meta: model = models.Dungeon fields = [ 'id', 'url', 'enabled', 'name', 'slug', 'category', 'icon', 'levels', ] def get_category(self, instance): return instance.get_category_display() class EnemySerializer(serializers.ModelSerializer): class Meta: model = models.Enemy fields = [ 'id', 'monster', 'stars', 'level', 'hp', 'attack', 'defense', 'speed', 'resist', 'crit_bonus', 'crit_damage_reduction', 'accuracy_bonus', ] class WaveSerializer(serializers.ModelSerializer): enemies = EnemySerializer(source='enemy_set', many=True, read_only=True) class Meta: model = models.Wave fields = [ 'enemies', ] class LevelSerializer(serializers.ModelSerializer): url = serializers.HyperlinkedIdentityField(view_name='bestiary/levels-detail') difficulty = serializers.SerializerMethodField() waves = WaveSerializer(source='wave_set', many=True, read_only=True) class Meta: model = models.Level fields = [ 'id', 'url', 'dungeon', 'floor', 'difficulty', 'energy_cost', 'xp', 'frontline_slots', 'backline_slots', 'total_slots', 'waves', ] def get_difficulty(self, instance): return instance.get_difficulty_display()
2.234375
2
Code/ConvNetAbel.py
abel-gr/AbelNN
1
17902
<reponame>abel-gr/AbelNN # Copyright <NAME>. All Rights Reserved. # https://github.com/abel-gr/AbelNN import numpy as np import copy as copy import random import matplotlib import matplotlib.pyplot as plt import matplotlib.cm as cm from pylab import text import math class ConvNetAbel: version = 1.2 def __init__(self, hidden = [1], nEpochs = 1, learningRate=0.1, manualWeights=[], debugLevel=1, rangeRandomWeight=None, showLogs=False, softmax=False, activationFunction='leakyrelu', verbose = False, use='classification', batch_size=1, batch_gradient='average', batch_mult=1, dropout=0, pre_norm=False, shuffle=True, iterationDrop=0, convFilters = [32, 64, 128], convStride=2, convFilterSizes=3, learningRateConv=0.001, convEpochs=10, kernel_initializer='he_normal'): self.hiddenL = copy.deepcopy(hidden) self.hiddenL2 = copy.deepcopy(hidden) self.learningRate = learningRate self.numEpochs = nEpochs self.costs = [] # Costs list to check performance self.debugWeights = [] self.meanCostByEpoch = [] self.hiddenWeights = [] self.manualWeights = manualWeights self.debugMode = debugLevel self.rangeRandomWeight = rangeRandomWeight self.showLogs = showLogs self.softmax = softmax self.n_layer0 = -1 self.activationFunction = activationFunction self.verbose = verbose self.use = use self.batch_size = batch_size self.batch_gradient = batch_gradient self.batch_mult = batch_mult self.dropout = dropout self.pre_norm = pre_norm self.shuffle = shuffle self.iterationDrop = iterationDrop self.XavierInitialization = '1' self.lastLayerNeurons = -1 # ConvNet: self.convFilters = convFilters self.filtersValues = [None] * len(convFilters) self.convStride = convStride self.convFilterSizes = convFilterSizes self.learningRateConv = learningRateConv self.convEpochs = convEpochs self.kernel_initializer = kernel_initializer # Conv2 with only one kernel def conv2(self, x, kernel, stride=1): output = [] #np.zeros((kernel.shape), dtype=np.float32) kernel_l = kernel.shape[0] kernel_size = kernel.shape[0] * kernel.shape[1] c = int(kernel_l / 2) for i in range(c, x.shape[0] - c, stride): o = [] for j in range(c, x.shape[1] - c, stride): i0 = i - c j0 = j - c i1 = i + c + 1 j1 = j + c + 1 o.append(np.sum(x[i0:i1, j0:j1] * kernel)) output.append(o) output = np.asarray(output) return output # Convolution with multi-filters def conv_filters(self, x, filters, stride=1, relu=False, mode='same'): lex = len(x.shape) lef = len(filters.shape) if lex > lef: print('conv_filters: The input array cannot have more dimensions than the filter array.') return 0 output = [] kernel_l = filters.shape[0] kernel_size = filters.shape[0] * filters.shape[1] if lef == 2: num_filters = 1 else: num_filters = filters.shape[-1] c = int(kernel_l / 2) dim3 = False evenShapeKernel = (kernel_l % 2 == 0) if lex == 2: dim2 = True p0 = x.shape[0] p1 = x.shape[1] else: # x parameter was the output of this method previously called if lex == lef: num_new_filters = int(num_filters / x.shape[-1]) if (num_new_filters % 2 != 0) and (num_filters % 2 == 0): num_new_filters = num_new_filters - 1 if (num_new_filters == 0): num_new_filters = 1 else: # It is the first convolutional layer of a color image num_new_filters = num_filters dim3 = True dim2 = False p0 = x.shape[0] p1 = x.shape[1] if mode == 'full': fs0 = int(filters.shape[0] / 2) fs1 = int(filters.shape[1] / 2) max0 = p0 + fs0 max1 = p1 + fs1 ini0 = -1 * fs0 ini1 = -1 * fs1 elif mode == 'same': max0 = p0 max1 = p1 ini0 = 0 ini1 = 0 elif mode == 'valid': fs0 = int(filters.shape[0] / 2) fs1 = int(filters.shape[1] / 2) max0 = p0 - fs0 max1 = p1 - fs1 ini0 = fs0 ini1 = fs1 else: print('Mode must be same, valid or full') return 0 if evenShapeKernel and mode == 'valid': max0 = max0 + 1 max1 = max1 + 1 for i in range(ini0, max0, stride): o = [] for j in range(ini1, max1, stride): i0 = i - c j0 = j - c i1 = i + c + 1 j1 = j + c + 1 if evenShapeKernel: i0 = i0 + 1 j0 = j0 + 1 zero_padding_top = 0 zero_padding_bottom = 0 zero_padding_left = 0 zero_padding_right = 0 if i0 < 0: zero_padding_top = abs(i0) i0 = 0 if j0 < 0: zero_padding_left = abs(j0) j0 = 0 if i1 > p0: zero_padding_bottom = i1 - p0 i1 = p0 if j1 > p1: zero_padding_right = j1 - p1 j1 = p1 if dim2: m = x[i0:i1, j0:j1] #print('mshape:', m.shape, kernel_size, zero_padding_top, zero_padding_left) # Zero padding: m = np.pad(m, ((zero_padding_top,zero_padding_bottom),(zero_padding_left,zero_padding_right)), 'constant') if lef != 2: m = np.expand_dims(m, axis=-1) m = np.repeat(m, num_filters, axis=-1) else: xi = x[i0:i1, j0:j1, :] # Zero padding: xi = np.pad(xi, ((zero_padding_top,zero_padding_bottom),(zero_padding_left,zero_padding_right),(0,0)), 'constant') if dim3: xi = np.expand_dims(xi, axis=-1) m = np.repeat(xi, num_new_filters, axis=-1) #print('M,F\n', m[:,:,0], filters[:,:,0]) #print(m.shape, filters.shape) m = m * filters #print('m*f\n', m[:,:,0]) m = np.sum(m, axis=0) m = np.sum(m, axis=0) if dim3: m = np.sum(m, axis=0) o.append(m) output.append(o) output = np.asarray(output) if relu: output[output < 0] = 0 return output def kernelInitializer(self, i, ksize, inSize, outSize): if 'xavier' in self.kernel_initializer: if self.kernel_initializer == 'xavier_normal': if len(ksize) == 4: self.filtersValues[i] = np.random.randn(ksize[0],ksize[1],ksize[2],ksize[3]) * math.sqrt(2.0 / (inSize + outSize)) else: self.filtersValues[i] = np.random.randn(ksize[0],ksize[1],ksize[2]) * math.sqrt(2.0 / (inSize + outSize)) elif self.kernel_initializer == 'xavier_uniform': highVal = math.sqrt(6.0 / (inSize + outSize)) lowVal = -1 * highVal self.filtersValues[i] = np.random.uniform(low=lowVal, high=highVal, size=ksize) else: if self.kernel_initializer == 'he_normal': if len(ksize) == 4: self.filtersValues[i] = np.random.randn(ksize[0],ksize[1],ksize[2],ksize[3]) * math.sqrt(2.0 / inSize) else: self.filtersValues[i] = np.random.randn(ksize[0],ksize[1],ksize[2]) * math.sqrt(2.0 / inSize) elif self.kernel_initializer == 'he_uniform': highVal = math.sqrt(6.0 / inSize) lowVal = -1 * highVal self.filtersValues[i] = np.random.uniform(low=lowVal, high=highVal, size=ksize) def convLayersFeedForward(self, im): self.convInputs = [] len_m = len(im.shape) #print('len_m:', len_m) for i, cl in enumerate(self.convFilters): self.convInputs.append(im) if (self.filtersValues[i] is None): if (type(self.convFilterSizes) == list): ks = self.convFilterSizes[i] else: ks = self.convFilterSizes inSize = np.prod(im.shape) if 'xavier' in self.kernel_initializer: if self.batch_size == 1: imshape = np.asarray([im.shape[0], im.shape[1]]) else: imshape = np.asarray([im.shape[1], im.shape[2]]) extraShape = int((ks % 2) == 0) ks2 = int(ks / 2) * 2 outSize = np.prod((imshape - ks2 + extraShape)) * cl else: outSize = 0 if i == 0 and len_m == 3: if self.batch_size == 1: self.kernelInitializer(i, (ks,ks,im.shape[2],cl), inSize, outSize) else: self.kernelInitializer(i, (ks,ks,cl), inSize, outSize) else: self.kernelInitializer(i, (ks,ks,cl), inSize, outSize) k_filters = self.filtersValues[i] if (type(self.convStride) == list): stride_par = self.convStride[i] else: stride_par = self.convStride #print('Convolutional layer', i, '\n') #print('Layer input shape:', im.shape) #print('Layer filters array shape:', k_filters.shape) # Start of convolutions #im = self.conv_filters(im, k_filters, relu=True, stride=stride_par, mode='valid') filtersValues_shape01 = np.asarray([k_filters.shape[0], k_filters.shape[1]]) filtersValues_shape_d2 = (filtersValues_shape01 / 2).astype(int) extraShape = (filtersValues_shape01 % 2) == 0 eS0 = extraShape[0].astype(int) eS1 = extraShape[1].astype(int) posYf = eS0 posXf = eS1 filter_shape0 = k_filters.shape[0] filter_shape1 = k_filters.shape[1] if (len(k_filters.shape) >= 3): num_filters = k_filters.shape[-1] else: num_filters = 1 if self.batch_size == 1: xshape = np.asarray([im.shape[0], im.shape[1]]) else: xshape = np.asarray([im.shape[1], im.shape[2]]) output_shape = xshape - filtersValues_shape_d2*2 + eS0 if ((len(im.shape) < len(k_filters.shape)) or (len(im.shape) == 2 and num_filters == 1)): Xr = np.expand_dims(im, axis=-1) Xr = np.repeat(Xr, num_filters, axis=-1) else: if (len(im.shape) == len(k_filters.shape)): if self.batch_size == 1: new_filters = int(im.shape[-1] / num_filters) Xr = np.repeat(im, new_filters, axis=-1) else: Xr = np.expand_dims(im, axis=-1) Xr = np.repeat(Xr, num_filters, axis=-1) else: Xr = im if (len(Xr.shape) == 2): npad = ((0,eS0), (0,eS1)) out_s = [output_shape[0], output_shape[1], 1] elif (len(Xr.shape) == 3): npad = ((0,eS0), (0,eS1), (0,0)) out_s = [output_shape[0], output_shape[1], num_filters] elif (len(Xr.shape) == 4): if self.batch_size == 1: npad = ((0,eS0), (0,eS1), (0,0), (0,0)) out_s = [output_shape[0], output_shape[1], im.shape[2], num_filters] else: npad = ((0,0), (0,eS0), (0,eS1), (0,0)) out_s = [im.shape[0], output_shape[0], output_shape[1], num_filters] X_pad = np.pad(Xr, npad, 'constant') out_s[0 if self.batch_size == 1 else 1] = int(np.ceil(out_s[0 if self.batch_size == 1 else 1] / stride_par)) out_s[1 if self.batch_size == 1 else 2] = int(np.ceil(out_s[1 if self.batch_size == 1 else 2] / stride_par)) conv_output = np.zeros(out_s) if self.batch_size != 1: k_filters = np.expand_dims(k_filters, axis=0) k_filters = np.repeat(k_filters, im.shape[0], axis=0) #print(Xr.shape, X_pad.shape, k_filters.shape, conv_output.shape, output_shape) for posY in range(0, filter_shape0): for posX in range(0, filter_shape1): # valid convolution if self.batch_size == 1: conv_output += X_pad[posYf:posYf+output_shape[0]:stride_par, posXf:posXf+output_shape[1]:stride_par] * k_filters[posY, posX] else: conv_output += X_pad[:, posYf:posYf+output_shape[0]:stride_par, posXf:posXf+output_shape[1]:stride_par] * k_filters[:, posY, posX].reshape(k_filters.shape[0],1,1,k_filters.shape[3]) posXf = posXf + 1 posYf = posYf + 1 posXf = eS1 # End of convolutions if self.pre_norm: ax_f = tuple(range(0,len(conv_output.shape))) if self.batch_size == 1: ax_f = ax_f[0:-1] conv_output = (conv_output - np.mean(conv_output, axis=ax_f)) / (np.std(conv_output, axis=ax_f) + 1e-7) else: ax_f = ax_f[1:-1] conv_output = (conv_output - np.mean(conv_output, axis=ax_f).reshape(conv_output.shape[0],1,1,conv_output.shape[3])) / (np.std(conv_output, axis=ax_f).reshape(conv_output.shape[0],1,1,conv_output.shape[3]) + 1e-7) #conv_output = (conv_output - conv_output.mean()) / (conv_output.std() + 1e-7) im = self.ActivationFunction(conv_output, 'relu') #print('Layer output shape:', im.shape, '\n---------------------\n') return im def convLayersBackpropagation(self, last_layer_output, prev_cost): i = len(self.filtersValues) - 1 last_shape = list(last_layer_output.shape) if self.batch_size != 1: batch_el = last_shape[0] last_shape = last_shape[1:] + [batch_el] error_by_x = np.reshape(prev_cost, last_shape) """ if self.batch_size == 1: num_filters = last_layer_output.shape[2] else: num_filters = last_layer_output.shape[3] """ self.log('Start of convLayersBackpropagation:', '\n') #self.log('prev_cost:', prev_cost.shape, prev_cost, '\n') #self.log('last_layer_output:', last_layer_output.shape, last_layer_output, '\n') #self.log('error_by_x:', error_by_x.shape, error_by_x, '\n') #if self.batch_size != 1: #error_by_x = np.mean(error_by_x, axis=0) for k_filters in self.filtersValues[::-1]: X = self.convInputs[i] if self.batch_size != 1: X_batchshape = list(X.shape) X_batch_elements = X_batchshape[0] X_batchshape = X_batchshape[1:] + [X_batch_elements] X = np.reshape(X, X_batchshape) #X = np.mean(X, axis=0) # to dilate gradient if needed because of stride if (type(self.convStride) == list): stride_par = self.convStride[i] else: stride_par = self.convStride if stride_par != 1: #erShape = error_by_x.shape[0] * stride_par erShape = (X.shape[0]) if self.batch_size == 1: error_by_output = np.zeros((erShape, erShape, self.convFilters[i]), dtype=float) else: error_by_output = np.zeros((erShape, erShape, self.convFilters[i], batch_el), dtype=float) #print(error_by_output.shape, error_by_x.shape) posI = 0 posJ = 0 erx1 = (error_by_x.shape[0]) erx2 = (error_by_x.shape[1]) # Zero-interweave: for pe_i in range(0, erx1): for pe_j in range(0, erx2): error_by_output[posI, posJ] = error_by_x[pe_i, pe_j] if (posJ + 2) < erShape: posJ = posJ + 2 else: posJ = posJ + 1 if (posI + 2) < erShape: posI = posI + 2 else: posI = posI + 1 posJ = 0 else: # dE/dO error_by_output = error_by_x f_rotated = np.flip(self.filtersValues[i], 0) f_rotated = np.flip(f_rotated, 1) # dE/dF #error_by_filter = self.conv_filters(X, error_by_output, relu=False, stride=1, mode='valid') # dE/dX #error_by_x = self.conv_filters(f_rotated, error_by_output, relu=False, stride=1, mode='full') # Start of convolutions err_output_shape01 = np.asarray([error_by_output.shape[0], error_by_output.shape[1]]) err_out_shape_d2 = (err_output_shape01 / 2).astype(int) xshape = np.asarray([X.shape[0], X.shape[1]]) fshape = np.asarray([f_rotated.shape[0], f_rotated.shape[1]]) extraShape = (err_output_shape01 % 2) == 0 eS0 = extraShape[0].astype(int) eS1 = extraShape[1].astype(int) err_filt_shape = xshape - err_out_shape_d2*2 + eS0 err_x_shape = fshape + err_out_shape_d2*2 + eS0 num_filters = self.filtersValues[i].shape[-1] #print(error_by_output.shape, xshape, err_output_shape01, err_out_shape_d2*2, eS0, err_filt_shape) if self.batch_size == 1: error_by_filter = np.zeros((err_filt_shape[0], err_filt_shape[1], num_filters)) error_by_x = np.zeros((err_x_shape[0], err_x_shape[1], num_filters)) else: error_by_filter = np.zeros((err_filt_shape[0], err_filt_shape[1], num_filters, X_batch_elements)) error_by_x = np.zeros((err_x_shape[0], err_x_shape[1], num_filters, X_batch_elements)) err_out_shape0 = error_by_output.shape[0] err_out_shape1 = error_by_output.shape[1] fil_shape0 = error_by_filter.shape[0] fil_shape1 = error_by_filter.shape[1] ex_shape0 = self.filtersValues[i].shape[0] ex_shape1 = self.filtersValues[i].shape[1] posYf = eS0 posXf = eS1 if (len(X.shape) < 3): Xr = np.expand_dims(X, axis=-1) Xr = np.repeat(Xr, num_filters, axis=-1) else: Xr = X if (len(Xr.shape) == 3): X_pad = np.pad(Xr, ((0,eS0), (0,eS1), (0,0)), 'constant') elif (len(Xr.shape) == 4): X_pad = np.pad(Xr, ((0,eS0), (0,eS1), (0,0), (0,0)), 'constant') else: # color image with batch X_pad = np.pad(Xr, ((0,0), (0,eS0), (0,eS1), (0,0), (0,0)), 'constant') layer_filters = self.filtersValues[i] if self.batch_size != 1: layer_filters = np.expand_dims(layer_filters, axis=-1) layer_filters = np.repeat(layer_filters, X_batch_elements, axis=-1) #print(X_pad.shape, error_by_output.shape, error_by_filter.shape, self.filtersValues[i].shape, error_by_output.shape, error_by_x.shape) for posY in range(0, err_out_shape0): for posX in range(0, err_out_shape1): # valid convolution (dE/dF) error_by_filter += X_pad[posYf:posYf+fil_shape0, posXf:posXf+fil_shape1] * error_by_output[posY, posX] # full convolution (dE/dX) error_by_x[posYf:posYf+ex_shape0, posXf:posXf+ex_shape1] += layer_filters * error_by_output[posY, posX] posXf = posXf + 1 posYf = posYf + 1 posXf = eS1 error_by_x = np.flip(error_by_x, 0) error_by_x = np.flip(error_by_x, 1) # End of convolutions #print(X.shape, X_pad.shape, self.filtersValues[i].shape, error_by_filter.shape, error_by_x.shape, error_by_output.shape) #self.log('error_by_filter:', error_by_filter[:,:,0], '\n\n') #self.log('prev filtersValues[i]:', self.filtersValues[i][:,:,0], '\n\n') #self.log('error_by_x:', error_by_x[:,:,0], '\n\n') if self.batch_size != 1: error_by_filter = np.mean(error_by_filter, axis=-1) #if self.pre_norm: #ax_f = tuple(range(0,len(error_by_filter[i].shape)))[0:-1] #error_by_filter = (error_by_filter - np.mean(error_by_filter, axis=ax_f)) / (np.std(error_by_filter, axis=ax_f) + 1e-7) #error_by_filter = (error_by_filter - error_by_filter.mean()) / (error_by_filter.std() + 1e-7) # Filters update self.filtersValues[i] = self.filtersValues[i] - self.learningRateConv * error_by_filter if self.pre_norm: ax_f = tuple(range(0,len(self.filtersValues[i].shape)))[0:-1] self.filtersValues[i] = (self.filtersValues[i] - np.mean(self.filtersValues[i], axis=ax_f)) / (np.std(self.filtersValues[i], axis=ax_f) + 1e-7) #self.log('filtersValues[i] updated:', self.filtersValues[i][:,:,0], '\n\n') #self.log('\n-----------------------\n') i = i - 1 self.log('End of convLayersBackpropagation') def draw(self, showWeights=False, textSize=9, customRadius=0): plt.figure(figsize=(10,8)) fig = plt.gcf() ax = fig.gca() ax.set_xlim(xmin=0, xmax=1) ax.set_ylim(ymin=0, ymax=1) xmin, xmax, ymin, ymax = ax.axis() xdim = xmax - xmin ydim = ymax - ymin space_per_layer = xdim / (len(self.hiddenL) + 1) x0 = xmin x1 = xmin + space_per_layer medio_intervalo = space_per_layer / 2 if customRadius <= 0: radio = 1 / ((sum(self.hiddenL) + self.n_layer0) * 5) else: radio = customRadius lista_lineas_xy = [] lasth = self.n_layer0 for capa,h in enumerate([self.n_layer0] + self.hiddenL): space_per_neuron = ydim / h y0 = ymin y1 = ymin + space_per_neuron medio_intervalo_n = space_per_neuron / 2 lista_lineas_xy_pre = [] ne = (lasth * h) - 1 neY = h - 1 for j in range(0, h): ax.add_patch(plt.Circle(((medio_intervalo + x0), (medio_intervalo_n + y0)), radio, color='r')) neX = lasth - 1 for xy in lista_lineas_xy: if True: #j == 2: plt.plot([xy[0],(medio_intervalo + x0)],[xy[1], (medio_intervalo_n + y0)]) #print(capa, ne, self.hiddenWeights[capa-1][ne]) my = ((medio_intervalo_n + y0) - xy[1]) mx = ((medio_intervalo + x0) - xy[0]) pendiente = my / mx ordenada_origen = xy[1] - pendiente * xy[0] margen_ord = 0.015 if pendiente < 0: margen_ord = -0.045 # para compensar la rotacion del texto ordenada_origen = ordenada_origen + margen_ord # para evitar que el texto salga encima de la linea no sobre ella # aleatorio entre las x del segmento de la recta (menos un margen para que no salga demasiado cerca de la neurona) mx2 = random.uniform(xy[0] + 0.04, (medio_intervalo + x0) - 0.04) my2 = pendiente*mx2 + ordenada_origen alfa = math.degrees(math.atan(pendiente)) if showWeights: #print(h, capa-1, neX, neY) text(mx2, my2, round(self.hiddenWeights[capa-1][neX][neY],3), rotation = alfa, fontsize = textSize) ne = ne - 1 neX = neX - 1 lista_lineas_xy_pre.append([(medio_intervalo + x0), (medio_intervalo_n + y0)]) neY = neY - 1 y0 = y0 + space_per_neuron y1 = y1 + space_per_neuron lasth = h #print('\n') x0 = x0 + space_per_layer x1 = x1 + space_per_layer #print('-------------\n') lista_lineas_xy = lista_lineas_xy_pre plt.show() def importModel(self, path='', filename='ConvNetAbel_model'): self.hiddenWeights = np.load(path + filename + '_weights.npy', allow_pickle=True) mConfig = np.load(path + filename + '_config.npy', allow_pickle=True) self.n_layer0 = int(mConfig[0]) self.showLogs = bool(mConfig[1]) self.lastLayerNeurons = int(mConfig[2]) self.numEpochs = int(mConfig[3]) self.learningRate = float(mConfig[4]) self.debugMode = int(mConfig[5]) self.softmax = bool(mConfig[6]) self.activationFunction = str(mConfig[7]) self.verbose = bool(mConfig[8]) self.use = str(mConfig[9]) self.batch_size = int(mConfig[10]) self.batch_gradient = str(mConfig[11]) self.batch_mult = int(mConfig[12]) self.dropout = float(mConfig[13]) self.pre_norm = bool(mConfig[14]) self.shuffle = bool(mConfig[15]) self.iterationDrop = float(mConfig[16]) self.version_importedModel = mConfig[17] self.hiddenL2 = mConfig[18] self.hiddenL = mConfig[19] convConfig = np.load(path + filename + '_convConfig.npy', allow_pickle=True) self.convFilters = convConfig[0] self.convStride = convConfig[1] self.convFilterSizes = convConfig[2] self.kernel_initializer = str(convConfig[3]) self.convEpochs = int(convConfig[4]) self.learningRateConv = float(convConfig[5]) self.filtersValues = np.load(path + filename + '_filtersValues.npy', allow_pickle=True) if self.debugMode > 0: self.meanCostByEpoch = np.load(path + filename + '_meanCostByEpoch.npy', allow_pickle=True).tolist() if self.debugMode > 1: self.debugWeights = np.load(path + filename + '_debugWeights.npy', allow_pickle=True).tolist() def exportModel(self, path='', filename='ConvNetAbel_model'): np.save(path + filename + '_weights.npy', np.asarray(self.hiddenWeights, dtype=object)) mConfig = [] mConfig.append(self.n_layer0) mConfig.append(self.showLogs) mConfig.append(self.lastLayerNeurons) mConfig.append(self.numEpochs) mConfig.append(self.learningRate) mConfig.append(self.debugMode) mConfig.append(self.softmax) mConfig.append(self.activationFunction) mConfig.append(self.verbose) mConfig.append(self.use) mConfig.append(self.batch_size) mConfig.append(self.batch_gradient) mConfig.append(self.batch_mult) mConfig.append(self.dropout) mConfig.append(self.pre_norm) mConfig.append(self.shuffle) mConfig.append(self.iterationDrop) mConfig.append(self.version) mConfig.append(self.hiddenL2) mConfig.append(self.hiddenL) mConfig = np.asarray(mConfig, dtype=object) np.save(path + filename + '_config.npy', mConfig) convConfig = [] convConfig.append(self.convFilters) convConfig.append(self.convStride) convConfig.append(self.convFilterSizes) convConfig.append(self.kernel_initializer) convConfig.append(self.convEpochs) convConfig.append(self.learningRateConv) convConfig = np.asarray(convConfig, dtype=object) np.save(path + filename + '_convConfig.npy', convConfig) np.save(path + filename + '_filtersValues.npy', np.asarray(self.filtersValues, dtype=np.float32)) if self.debugMode > 0: np.save(path + filename + '_meanCostByEpoch.npy', self.meanCostByEpoch) if self.debugMode > 1: np.save(path + filename + '_debugWeights.npy', np.asarray(self.debugWeights, dtype=object)) def log(self, *m): if self.showLogs: print(*m) def printVerbose(self, *m): if self.verbose: print(*m) def initializeWeight(self, n, i, lastN): if len(self.manualWeights) == 0: numW = n * lastN if self.rangeRandomWeight is None: if self.activationFunction == 'sigmoid': if self.XavierInitialization == 'normalized': # Normalized Xavier initialization highVal = math.sqrt(6.0) / math.sqrt(lastN + n) lowVal = -1 * highVal mnar = np.random.uniform(low=lowVal, high=highVal, size=(numW,1)) else: # Xavier initialization mnar = np.random.randn(numW, 1) * math.sqrt(1.0 / lastN) else: mnar = np.random.randn(numW, 1) * math.sqrt(2.0 / lastN) # He initialization else: highVal = self.rangeRandomWeight[1] lowVal = self.rangeRandomWeight[0] mnar = np.random.uniform(low=lowVal, high=highVal, size=(numW,1)) else: mnar = np.asarray(self.manualWeights[i]) #mnar = mnar.reshape(mnar.shape[0], 1) #ns = int(mnar.shape[0] / lastN) #print('ns: ', ns) mnar = mnar.reshape(lastN, n, order='F') return mnar def ActivationFunction(self, x, activ_type='sigmoid'): if activ_type=='sigmoid': return 1.0/(1 + np.exp(-1*x)) elif activ_type=='relu': return np.where(x > 0, x, 0) elif activ_type=='softplus': return np.log(1 + np.exp(x)) elif activ_type=='leakyrelu': return np.where(x > 0, x, 0.01 * x) elif activ_type=='identity': return np.copy(x) else: x[x>0.5] = 1 x[x<=0.5] = 0 return x def functionDerivative(self, x, activ_type='sigmoid'): if activ_type=='sigmoid': return self.ActivationFunction(x,activ_type) * (1-self.ActivationFunction(x,activ_type)) elif activ_type=='relu': return np.where(x >= 0, 1, 0) elif activ_type=='softplus': return 1.0/(1 + np.exp(-1*x)) elif activ_type=='leakyrelu': return np.where(x >= 0, 1, 0.01) elif activ_type=='identity': return 1 else: return 1 def cost(self, y_true, y_pred): c = y_true - y_pred return c def softmaxF(self, x): if np.max(np.abs(x)) < 500: # prevent overflow expX = np.exp(x) return expX / np.sum(expX, axis=-1).reshape(-1, 1) else: return x / np.maximum(1, np.sum(x, axis=-1).reshape(-1, 1)) def pre_norm_forward_FC(self, v_layer): if self.batch_size == 1 or len(v_layer.shape) == 1: v_layer_norm = (v_layer - v_layer.mean()) / (v_layer.std() + 1e-7) else: v_layer_norm = ((v_layer.T - np.mean(v_layer, axis=1)) / (np.std(v_layer, axis=1) + 1e-7)).T return v_layer_norm def fit(self, x, y): n_layer0 = -1 self.hiddenL = copy.deepcopy(self.hiddenL2) hiddenW = [None] * (len(self.hiddenL) + 1) self.lastLayerNeurons = y.shape[1] self.hiddenL.append(y.shape[1]) self.convOutputs = [] self.printVerbose('Training started with', x.shape[0], 'samples') if self.batch_size == 1: numIterations = x.shape[0] else: numIterations = math.ceil(x.shape[0] / self.batch_size) numIterations = int(numIterations * (1 - self.iterationDrop)) for epochs in range(0, self.numEpochs): meanCostByEpochE = 0 batch_pos = 0 if epochs < self.convEpochs: xy_ind = np.arange(x.shape[0]) else: xy_ind = np.arange(len(self.convOutputs)) if self.shuffle: np.random.shuffle(xy_ind) for x_pos in range(0, numIterations): if epochs < self.convEpochs: if self.batch_size == 1: c_positions = xy_ind[x_pos] else: if (batch_pos + self.batch_size) < xy_ind.shape[0]: c_positions = xy_ind[batch_pos:batch_pos+self.batch_size] else: c_positions = xy_ind[batch_pos:] x_val = x[c_positions] x_val_batch_s = x_val.shape[0] last_layer_output = self.convLayersFeedForward(x_val) x_val = last_layer_output.flatten() if self.batch_size != 1: x_val = x_val.reshape(x_val_batch_s, int(x_val.shape[0] / x_val_batch_s)) if epochs == (self.convEpochs - 1): self.convOutputs.append([x_val, c_positions]) else: x_val, c_positions = self.convOutputs[xy_ind[x_pos]] #self.log('x_val:', x_val.shape, x_val) #print(x_val.shape) if n_layer0 == -1: if self.batch_size == 1: n_layer0 = x_val.shape[0] else: n_layer0 = x_val.shape[1] self.n_layer0 = n_layer0 v_layer = x_val lastN = n_layer0 layerValues = [] preActivateValues = [] f_vlayer = self.ActivationFunction(v_layer, 'identity') layerValues.append(f_vlayer) preActivateValues.append(v_layer) f_vlayer = v_layer dropout_values = [] for i, hiddenLayer in enumerate(self.hiddenL): entries = hiddenLayer * lastN if hiddenW[i] is None: hiddenW[i] = self.initializeWeight(hiddenLayer, i, lastN) # Initialize weights valuesForPerc = int(entries / hiddenLayer) firstPos = 0 lastPos = valuesForPerc self.log('x_j: ', f_vlayer) self.log('w_j: ', hiddenW[i]) v_layer = f_vlayer.dot(hiddenW[i]) if self.pre_norm and (i < (len(self.hiddenL) - 1)): v_layer = self.pre_norm_forward_FC(v_layer) if self.dropout != 0 and (i < (len(self.hiddenL) - 1)): dropout_v = np.random.binomial(1, 1-self.dropout, size=hiddenLayer) / (1-self.dropout) v_layer = v_layer * dropout_v dropout_values.append(dropout_v) self.log('net_j:', v_layer, '\n') if (i == (len(self.hiddenL) - 1)): if(self.softmax): f_vlayer = self.softmaxF(v_layer).reshape(-1) else: if self.use == 'classification': f_vlayer = self.ActivationFunction(v_layer, 'sigmoid') # use sigmoid on last layer if classification else: f_vlayer = self.ActivationFunction(v_layer, 'identity') # use identity on last layer if regression else: f_vlayer = self.ActivationFunction(v_layer, self.activationFunction)#.reshape(-1) layerValues.append(f_vlayer) preActivateValues.append(v_layer) v_layer = f_vlayer self.log('f(net_j):', f_vlayer, '\n') lastN = hiddenLayer coste_anterior = None i = len(self.hiddenL) - 1 #print(f_vlayer) """ if(self.softmax): f_vlayer = self.softmaxF(f_vlayer).reshape(-1) self.log('f_vlayer (Softmax output):', f_vlayer) #print(f_vlayer) """ #print(f_vlayer, '\n\n') self.log('-----------------\nBackPropagation: \n') # backpropagation: for hiddenLayer in ([n_layer0] + self.hiddenL)[::-1]: self.log('Neurons in this layer: ', hiddenLayer) #print('i: ', i, '\n') if coste_anterior is None: if(self.softmax): derivf_coste = self.functionDerivative(v_layer, self.activationFunction) else: if self.use == 'classification': derivf_coste = self.functionDerivative(v_layer, 'sigmoid') else: derivf_coste = self.functionDerivative(v_layer, 'identity') f_cost = self.cost(y[c_positions], f_vlayer) #if self.batch_size != 1: #f_cost = f_cost / v_layer.shape[0] coste = f_cost * derivf_coste if self.batch_size != 1: batch_pos = batch_pos + self.batch_size #coste = coste.reshape(-1) #coste = coste.reshape(coste.shape[0], 1) #if self.batch_size != 1: #coste = np.sum(coste, axis=0) #derivf_coste = np.sum(derivf_coste, axis=0) if self.debugMode > 0: meanCostByEpochE = meanCostByEpochE + (abs(coste) if self.batch_size == 1 else np.mean(np.absolute(coste), axis=0)) if self.debugMode > 2: self.costs.append(coste) self.log('derivf_coste: ', derivf_coste, 'cost: ', coste, '\n') else: entries = hiddenLayer * nextN valuesForPerc = int(entries / hiddenLayer) firstPos = 0 lastPos = valuesForPerc #coste = [] #coste = np.zeros(shape=(hiddenLayer)) self.log('prev_error: ', coste_anterior) pesos_salientes = hiddenW[i+1].T #print('hiddenW[i+1][j::hiddenLayer]: ', pesos_salientes) preActivateValueM = preActivateValues[i+1] preDeriv = self.functionDerivative(preActivateValueM, self.activationFunction) self.log('preDeriv: ', preDeriv) costeA = coste_anterior.dot(pesos_salientes) # coste por los pesos que salen de la neurona #costeA = np.asarray(costeA) self.log("preCostA: ", costeA) costeA = costeA * (preDeriv) #costeA = costeA.reshape(-1) #costeA = costeA.T if self.dropout != 0 and i > -1: # dropout is not done on input layer costeA = costeA * dropout_values[i] self.log('costA: ', costeA) layerValueM = layerValues[i+1] #print("coste_anterior.shape: ", coste_anterior.shape) self.log("layer values: ", layerValueM) if self.batch_gradient == 'sum': preT1 = coste_anterior.reshape((1 if self.batch_size==1 else coste_anterior.shape[0]), (coste_anterior.shape[0] if self.batch_size==1 else coste_anterior.shape[1])) preT2 = layerValueM.reshape((layerValueM.shape[0] if self.batch_size==1 else layerValueM.shape[1]), (1 if self.batch_size==1 else layerValueM.shape[0])) elif self.batch_size == 1: preT1 = coste_anterior.reshape(1, coste_anterior.shape[0]) preT2 = layerValueM.reshape(layerValueM.shape[0], 1) else: preT1 = np.mean(coste_anterior, axis=0) preT1 = preT1.reshape(1, preT1.shape[0]) preT2 = np.mean(layerValueM, axis=0) preT2 = preT2.reshape(preT2.shape[0], 1) pre = preT2.dot(preT1) #if self.batch_size != 1: #pre = pre * (1.0 / layerValueM.shape[0]) pre = pre * self.learningRate self.log('pre: ', pre, '\n') self.log('Old weight: ', hiddenW[i+1]) hiddenW[i+1] = (hiddenW[i+1] + pre) self.log('New weight: ', hiddenW[i+1], '\n\n') coste = costeA self.log('\n\n') #coste = coste.reshape(-1) #print(coste.shape) #if len(coste.shape) == 3: #coste = coste.reshape(coste.shape[0] * coste.shape[1], coste.shape[2]) #print('Coste: ' , coste, coste.shape) #print("\n\n") coste_anterior = coste nextN = hiddenLayer i = i - 1 #print('------------------') #print('\n\nNuevos pesos: ', hiddenW) #print('Coste anterior shape: ', coste_anterior.shape) if epochs < self.convEpochs: # because of resources limitations self.convLayersBackpropagation(last_layer_output, coste_anterior) self.printVerbose('\nEpoch', str(epochs+1) + '/' + str(self.numEpochs), 'completed') if self.debugMode > 0: self.meanCostByEpoch.append(meanCostByEpochE / numIterations) self.printVerbose('--- Epoch loss:', round(np.mean(self.meanCostByEpoch[-1]),4)) if self.debugMode > 1: self.debugWeights.append(copy.deepcopy(hiddenW)) self.batch_size = int(self.batch_size * self.batch_mult) self.hiddenWeights = hiddenW #print('\n\nNuevos pesos: ', hiddenW) self.printVerbose('\n\nTraining finished\n\n') return self def predict(self, x, noProba=1): n_layer0 = -1 layerValues = np.zeros(shape=(x.shape[0],self.lastLayerNeurons)) batch_pos = 0 if self.batch_size == 1: numIterations = x.shape[0] else: numIterations = math.ceil(x.shape[0] / self.batch_size) for x_pos in range(0, numIterations): if self.batch_size == 1: x_val = x[x_pos] else: if (batch_pos + self.batch_size) < x.shape[0]: x_val = x[batch_pos:batch_pos+self.batch_size] else: x_val = x[batch_pos:] x_val_batch_s = x_val.shape[0] #for x_pos, x_val in enumerate(x): x_val = self.convLayersFeedForward(x_val).flatten() if self.batch_size != 1: x_val = x_val.reshape(x_val_batch_s, int(x_val.shape[0] / x_val_batch_s)) if n_layer0 == -1: n_layer0 = x_val.shape[0] self.n_layer0 = n_layer0 v_layer = x_val lastN = n_layer0 f_vlayer = self.ActivationFunction(v_layer, 'identity') for i, hiddenLayer in enumerate(self.hiddenL): entries = hiddenLayer * lastN valuesForPerc = int(entries / hiddenLayer) firstPos = 0 lastPos = valuesForPerc v_layer = f_vlayer.dot(self.hiddenWeights[i]) if self.pre_norm and (i < (len(self.hiddenL) - 1)): v_layer = self.pre_norm_forward_FC(v_layer) if (i == (len(self.hiddenL) - 1)): if(self.softmax): f_vlayer = self.softmaxF(v_layer).reshape(-1) else: if self.use == 'classification': f_vlayer = self.ActivationFunction(v_layer, 'sigmoid') # use sigmoid on last layer if classification else: f_vlayer = self.ActivationFunction(v_layer, 'identity') # use identity on last layer if regression else: f_vlayer = self.ActivationFunction(v_layer, self.activationFunction)#.reshape(-1) v_layer = f_vlayer lastN = hiddenLayer if self.batch_size == 1: layerValues[x_pos] = f_vlayer else: if (batch_pos + self.batch_size) < x.shape[0]: layerValues[batch_pos:batch_pos+self.batch_size] = f_vlayer else: layerValues[batch_pos:] = f_vlayer batch_pos = batch_pos + self.batch_size """ if(self.softmax): layerValues = self.softmaxF(layerValues) """ if noProba==1: if self.use == 'classification': return self.ActivationFunction(layerValues, 2).astype(int) else: return layerValues else: return layerValues def predict_proba(self, x): return self.predict(x, 0) def plot_mean_error_last_layer(self, customLabels=[], byClass=False): if self.debugMode > 0: meancost = np.asarray(self.meanCostByEpoch) if len(meancost.shape) > 1 and not byClass: meancost = np.mean(meancost, axis=1) ptitle = 'Last layer mean error by epoch' fig, ax = plt.subplots(figsize=(8,6)) ax.plot(range(0, meancost.shape[0]), meancost) ax.set(xlabel='Epoch', ylabel='Mean error', title=ptitle) ax.grid() if len(meancost.shape) > 1: if meancost.shape[1] > 1: if len(customLabels) == 0: neur = [("Neuron " + str(i)) for i in range(0, meancost.shape[1])] else: neur = customLabels plt.legend(neur, loc="upper right") plt.show() else: print('ConvNet debug mode must be level 1 or higher') def plot_weights_by_epoch(self, max_weights=-1): if self.debugMode > 1: dw = self.debugWeights dwx = dw[0][len(dw[0]) - 1][:] fig, ax = plt.subplots(figsize=(8,6)) ygrafico = {} for jposH, posH in enumerate(range(0, len(dw))): # for each epoch dwF = dw[jposH][len(dw[0]) - 1][:] #print(dwF.shape) for posg, neu in enumerate(dwF): #print(neu.shape) if posg in ygrafico: ygrafico[posg].append(neu[0]) else: ygrafico[posg] = [neu[0]] if max_weights == -1: for ygrafico2 in ygrafico.values(): ax.plot(range(0, len(ygrafico2)), ygrafico2) else: if max_weights < 1: print('max_weights must be bigger than 0') elif max_weights > len(ygrafico.values()): print('max_weights must be lower than total weights of last layer') else: ygrafico3 = [] # Gets the weights that have changed the most from beginning to end. for yi, ygrafico2 in enumerate(ygrafico.values()): a = abs(ygrafico[yi][0] - ygrafico[yi][-1]) #print(ygrafico[yi][0], a) ygrafico3.append([ygrafico2, a]) for ygrafico4 in sorted(ygrafico3, key=lambda tupval: -1*tupval[1])[0:max_weights]: #print(ygrafico4) plt.plot(range(0, len(ygrafico4[0])), ygrafico4[0]) ax.set(xlabel='Epoch', ylabel='Weight', title='Last layer weights by epoch') ax.grid() plt.show() else: print('ConvNet debug mode must be level 2 or higher')
2.34375
2
trading_ig/config.py
schwankner/ig-markets-api-python-library
1
17903
#!/usr/bin/env python # -*- coding:utf-8 -*- import os import logging ENV_VAR_ROOT = "IG_SERVICE" CONFIG_FILE_NAME = "trading_ig_config.py" logger = logging.getLogger(__name__) class ConfigEnvVar(object): def __init__(self, env_var_base): self.ENV_VAR_BASE = env_var_base def _env_var(self, key): return(self.ENV_VAR_BASE + "_" + key.upper()) def get(self, key, default_value=None): env_var = self._env_var(key) return(os.environ.get(env_var, default_value)) def __getattr__(self, key): env_var = self._env_var(key) try: return(os.environ[env_var]) except KeyError: raise Exception("Environment variable '%s' doesn't exist" % env_var) try: from trading_ig_config import config logger.info("import config from %s" % CONFIG_FILE_NAME) except Exception: logger.warning("can't import config from config file") try: config = ConfigEnvVar(ENV_VAR_ROOT) logger.info("import config from environment variables '%s_...'" % ENV_VAR_ROOT) except Exception: logger.warning("can't import config from environment variables") raise("""Can't import config - you might create a '%s' filename or use environment variables such as '%s_...'""" % (CONFIG_FILE_NAME, ENV_VAR_ROOT))
2.8125
3
train.py
jmlipman/MedicDeepLabv3Plus
1
17904
<reponame>jmlipman/MedicDeepLabv3Plus # Example usage: # python train.py --device cuda --epochs 10 --input /home/miguelv/data/in/train/ --output /home/miguelv/data/out/delete/test/25/ import os, time, torch, json import numpy as np import nibabel as nib from lib.utils import * from lib.losses import Loss from torch.utils.data import DataLoader from datetime import datetime from lib.models.MedicDeepLabv3Plus import MedicDeepLabv3Plus from lib.data.DataWrapper import DataWrapper def get_arguments(): """Gets (and parses) the arguments from the command line. Args: `args`: If None, it takes the arguments from the command line. Else, it will parse `args` (used for testing with sacred) """ def str2bool(v): if isinstance(v, bool): return v if v.lower() in ('yes', 'true', 't', 'y', '1'): return True elif v.lower() in ('no', 'false', 'f', 'n', '0'): return False else: raise argparse.ArgumentTypeError('Boolean value expected.') parser = argparse.ArgumentParser() # Data parser.add_argument("--input", type=str, required=True, help="Directory with the data for optimizing MedicDeepLabv3+") # Training parser.add_argument("--epochs", type=int, default=300, help="Epochs. If 0: only evaluate") parser.add_argument("--batch_size", type=int, default=1, help="Batch size") parser.add_argument("--lr", type=float, default="1e-4", help="Learning rate") parser.add_argument("--wd", type=float, default="0", help="Weight decay") parser.add_argument("--filters", type=int, default=32, help="Number of filters (fewer filters -> lower GPU requirements)") # Validation parser.add_argument("--validation", type=str, default="", help="Directory with the data for validation") parser.add_argument("--val_interval", type=int, default=1, help="After how many epochs data is validated") parser.add_argument("--val_metrics", type=str, default="dice", help="List of metrics to measure during validation") # Other parser.add_argument("--output", type=str, required=True, help="Output directory (if it doesn't exist, it will create it)") parser.add_argument("--gpu", type=int, default=0, dest="device", help="GPU Device. Write -1 if no GPU is available") parser.add_argument("--model_state", type=str, default="", help="File that contains the saved parameters of the model") parsed = parser.parse_args() # --input if not os.path.isdir(parsed.input): raise Exception("The input folder `" + parsed.input + "` does not exist") # --output if os.path.exists(parsed.output): if os.path.isfile(parsed.output): raise Exception("The provided path for the --output `" + parsed.output + "` corresponds to an existing file. Provide a non-existing path or a folder.") elif os.path.isdir(parsed.output): files = [int(f) for f in os.listdir(parsed.output) if f.isdigit()] parsed.output = os.path.join(parsed.output, str(len(files)+1), "") os.makedirs(parsed.output) else: raise Exception("The provided path for the --output `" + parsed.output + "` is invalid. Provide a non-existing path or a folder.") else: parsed.output = os.path.join(parsed.output, "1", "") os.makedirs(parsed.output) # --validation if parsed.validation != "" and not os.path.isdir(parsed.validation): raise Exception("The validaiton folder `" + parsed.validation + "` does not exist") if parsed.validation == "": print("> Note: No validation data was provided, so validation won't be done during MedicDeepLabv3+ optimization") # --gpu if parsed.device >= torch.cuda.device_count(): if torch.cuda.device_count() == 0: print("> No available GPUs. Add --gpu -1 to not use GPU. NOTE: This may take FOREVER to run.") else: print("> Available GPUs:") for i in range(torch.cuda.device_count()): print(" > GPU #"+str(i)+" ("+torch.cuda.get_device_name(i)+")") raise Exception("The GPU #"+str(parsed.device)+" does not exist. Check available GPUs.") if parsed.device > -1: parsed.device = "cuda:"+str(parsed.device) else: parsed.device = "cpu" # Metrics to be evaluated during evaluation allowed_metrics = ["dice", "HD", "compactness"] # Metrics to be evaluated during validation parsed.val_metrics = parsed.val_metrics.split(",") for m in parsed.val_metrics: if not m in allowed_metrics: raise Exception("Wrong --val_metrics: "+str(m)+". Only allowed: "+str(allowed_metrics)) return parsed def main(args): log("Start training MedicDeepLabv3+", args.output) # Creates the folder where the models will be saved os.makedirs(args.output + "model") # Parameters required to initialize the model model = MedicDeepLabv3Plus(modalities=1, n_classes=3, first_filters=args.filters) model.initialize(device=args.device, output=args.output, model_state=args.model_state) # Dataloaders tr_data = DataWrapper(args.input, "train") val_data = DataWrapper(args.validation, "val") if len(tr_data) > 0 and args.epochs > 0: # DataLoaders tr_loader = DataLoader(tr_data, batch_size=args.batch_size, shuffle=True, pin_memory=False, num_workers=6) if len(val_data) > 0: val_loader = DataLoader(val_data, batch_size=args.batch_size, shuffle=False, pin_memory=False, num_workers=6) else: val_loader = [] # So that len(val_loader) = 0 # Loss function loss = Loss("CrossEntropyDiceLoss_multiple") # Deep supervision # Optimizer optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd) # Train the model model.fit(tr_loader=tr_loader, val_loader=val_loader, epochs=args.epochs, val_interval=args.val_interval, loss=loss, val_metrics=args.val_metrics, opt=optimizer) log("End", args.output) if __name__ == "__main__": # Get command-line arguments args = get_arguments() # Train MedicDeepLabv3+ main(args)
2.265625
2
src/graph_util.py
oonat/inverse-distance-weighted-trust-based-recommender
0
17905
<reponame>oonat/inverse-distance-weighted-trust-based-recommender import numpy as np from toml_parser import Parser from scipy.sparse.csgraph import dijkstra, csgraph_from_dense from sklearn.metrics.pairwise import nan_euclidean_distances from math import sqrt class Graph(object): def __init__(self, transactions, weighted=True): config = Parser("config.toml").load() self._max_distance = \ config["graph"]["max_distance"] self._transactions = transactions self._weighted = weighted self._create_customer_trust_matrix() def _create_adjacency_matrix(self): if self._weighted: self._adjacency_matrix = nan_euclidean_distances(self._transactions, self._transactions, missing_values=0) """ self._adjacency_matrix /= sqrt(self._transactions.shape[1]) """ self._adjacency_matrix[~np.isnan(self._adjacency_matrix)] += 1 else: self._adjacency_matrix = np.zeros( (self._transactions.shape[0], self._transactions.shape[0]), dtype=np.bool, ) list_of_neighbour_customers = [ np.nonzero(t)[0] for t in self._transactions.T ] for neighbour_customers in list_of_neighbour_customers: for i in range(neighbour_customers.shape[0]): self._adjacency_matrix[neighbour_customers[i], neighbour_customers[i+1:]] = \ self._adjacency_matrix[neighbour_customers[i+1:], neighbour_customers[i]] = True def _create_distance_matrix(self): self._create_adjacency_matrix() if self._weighted: adjacency_csgraph = csgraph_from_dense(self._adjacency_matrix, null_value=np.nan) self._distance_matrix = \ dijkstra(csgraph=adjacency_csgraph, directed=False, limit=self._max_distance) else: self._distance_matrix = \ dijkstra(csgraph=self._adjacency_matrix, directed=False, unweighted= True, limit=self._max_distance) self._distance_matrix[~np.isfinite(self._distance_matrix)] = 0 def _create_customer_trust_matrix(self): self._create_distance_matrix() self._customer_trust_matrix = \ np.reciprocal(self._distance_matrix, out=np.zeros_like(self._distance_matrix), where=self._distance_matrix!=0)
2.71875
3
qtapps/skrf_qtwidgets/analyzers/analyzer_rs_zva.py
mike0164/scikit-rf
379
17906
<gh_stars>100-1000 from skrf.vi.vna import rs_zva class Analyzer(rs_zva.ZVA): DEFAULT_VISA_ADDRESS = "GPIB::16::INSTR" NAME = "Rhode & Schwartz ZVA" NPORTS = 4 NCHANNELS = 32 SCPI_VERSION_TESTED = ''
1.296875
1
rules/taxonomic_classification/utils.py
dahak-metagenomics/taco-taxonomic-classification
0
17907
def container_image_is_external(biocontainers, app): """ Return a boolean: is this container going to be run using an external URL (quay.io/biocontainers), or is it going to use a local, named Docker image? """ d = biocontainers[app] if (('use_local' in d) and (d['use_local'] is True)): # This container does not use an external url return False else: # This container uses a quay.io url return True def container_image_name(biocontainers, app): """ Get the name of a container image for app, using params dictionary biocontainers. Verification: - Check that the user provides 'local' if 'use_local' is True - Check that the user provides both 'quayurl' and 'version' """ if container_image_is_external(biocontainers,app): try: qurl = biocontainers[k]['quayurl'] qvers = biocontainers[k]['version'] quayurls.append(qurl + ":" + qvers) return quayurls except KeyError: err = "Error: quay.io URL for %s biocontainer "%(k) err += "could not be determined" raise Exception(err) else: try: return biocontainers[app]['local'] except KeyError: err = "Error: the parameters provided specify a local " err += "container image should be used for %s, but none "%(app) err += "was specified using the 'local' key." raise Exception(err)
3.203125
3
Contents/Libraries/Shared/guessit/rules/properties/episodes.py
slvxstar/Kinopoisk.bundle
7
17908
#!/usr/bin/env python # -*- coding: utf-8 -*- """ episode, season, disc, episode_count, season_count and episode_details properties """ import copy from collections import defaultdict from rebulk import Rebulk, RemoveMatch, Rule, AppendMatch, RenameMatch from rebulk.match import Match from rebulk.remodule import re from rebulk.utils import is_iterable from .title import TitleFromPosition from ..common import dash, alt_dash, seps, seps_no_fs from ..common.formatters import strip from ..common.numeral import numeral, parse_numeral from ..common.pattern import is_disabled from ..common.validators import compose, seps_surround, seps_before, int_coercable from ...reutils import build_or_pattern def episodes(config): """ Builder for rebulk object. :param config: rule configuration :type config: dict :return: Created Rebulk object :rtype: Rebulk """ # pylint: disable=too-many-branches,too-many-statements,too-many-locals def is_season_episode_disabled(context): """Whether season and episode rules should be enabled.""" return is_disabled(context, 'episode') or is_disabled(context, 'season') rebulk = Rebulk().regex_defaults(flags=re.IGNORECASE).string_defaults(ignore_case=True) rebulk.defaults(private_names=['episodeSeparator', 'seasonSeparator', 'episodeMarker', 'seasonMarker']) episode_max_range = config['episode_max_range'] season_max_range = config['season_max_range'] def episodes_season_chain_breaker(matches): """ Break chains if there's more than 100 offset between two neighbor values. :param matches: :type matches: :return: :rtype: """ eps = matches.named('episode') if len(eps) > 1 and abs(eps[-1].value - eps[-2].value) > episode_max_range: return True seasons = matches.named('season') if len(seasons) > 1 and abs(seasons[-1].value - seasons[-2].value) > season_max_range: return True return False rebulk.chain_defaults(chain_breaker=episodes_season_chain_breaker) def season_episode_conflict_solver(match, other): """ Conflict solver for episode/season patterns :param match: :param other: :return: """ if match.name != other.name: if match.name == 'episode' and other.name == 'year': return match if match.name in ('season', 'episode'): if other.name in ('video_codec', 'audio_codec', 'container', 'date'): return match if (other.name == 'audio_channels' and 'weak-audio_channels' not in other.tags and not match.initiator.children.named(match.name + 'Marker')) or ( other.name == 'screen_size' and not int_coercable(other.raw)): return match if other.name in ('season', 'episode') and match.initiator != other.initiator: if (match.initiator.name in ('weak_episode', 'weak_duplicate') and other.initiator.name in ('weak_episode', 'weak_duplicate')): return '__default__' for current in (match, other): if 'weak-episode' in current.tags or 'x' in current.initiator.raw.lower(): return current return '__default__' season_words = config['season_words'] episode_words = config['episode_words'] of_words = config['of_words'] all_words = config['all_words'] season_markers = config['season_markers'] season_ep_markers = config['season_ep_markers'] disc_markers = config['disc_markers'] episode_markers = config['episode_markers'] range_separators = config['range_separators'] weak_discrete_separators = list(sep for sep in seps_no_fs if sep not in range_separators) strong_discrete_separators = config['discrete_separators'] discrete_separators = strong_discrete_separators + weak_discrete_separators max_range_gap = config['max_range_gap'] def ordering_validator(match): """ Validator for season list. They should be in natural order to be validated. episode/season separated by a weak discrete separator should be consecutive, unless a strong discrete separator or a range separator is present in the chain (1.3&5 is valid, but 1.3-5 is not valid and 1.3.5 is not valid) """ values = match.children.to_dict() if 'season' in values and is_iterable(values['season']): # Season numbers must be in natural order to be validated. if not list(sorted(values['season'])) == values['season']: return False if 'episode' in values and is_iterable(values['episode']): # Season numbers must be in natural order to be validated. if not list(sorted(values['episode'])) == values['episode']: return False def is_consecutive(property_name): """ Check if the property season or episode has valid consecutive values. :param property_name: :type property_name: :return: :rtype: """ previous_match = None valid = True for current_match in match.children.named(property_name): if previous_match: match.children.previous(current_match, lambda m: m.name == property_name + 'Separator') separator = match.children.previous(current_match, lambda m: m.name == property_name + 'Separator', 0) if separator.raw not in range_separators and separator.raw in weak_discrete_separators: if not 0 < current_match.value - previous_match.value <= max_range_gap + 1: valid = False if separator.raw in strong_discrete_separators: valid = True break previous_match = current_match return valid return is_consecutive('episode') and is_consecutive('season') # S01E02, 01x02, S01S02S03 rebulk.chain(formatter={'season': int, 'episode': int}, tags=['SxxExx'], abbreviations=[alt_dash], children=True, private_parent=True, validate_all=True, validator={'__parent__': ordering_validator}, conflict_solver=season_episode_conflict_solver, disabled=is_season_episode_disabled) \ .regex(build_or_pattern(season_markers, name='seasonMarker') + r'(?P<season>\d+)@?' + build_or_pattern(episode_markers + disc_markers, name='episodeMarker') + r'@?(?P<episode>\d+)', validate_all=True, validator={'__parent__': seps_before}).repeater('+') \ .regex(build_or_pattern(episode_markers + disc_markers + discrete_separators + range_separators, name='episodeSeparator', escape=True) + r'(?P<episode>\d+)').repeater('*') \ .chain() \ .regex(r'(?P<season>\d+)@?' + build_or_pattern(season_ep_markers, name='episodeMarker') + r'@?(?P<episode>\d+)', validate_all=True, validator={'__parent__': seps_before}) \ .chain() \ .regex(r'(?P<season>\d+)@?' + build_or_pattern(season_ep_markers, name='episodeMarker') + r'@?(?P<episode>\d+)', validate_all=True, validator={'__parent__': seps_before}) \ .regex(build_or_pattern(season_ep_markers + discrete_separators + range_separators, name='episodeSeparator', escape=True) + r'(?P<episode>\d+)').repeater('*') \ .chain() \ .regex(build_or_pattern(season_markers, name='seasonMarker') + r'(?P<season>\d+)', validate_all=True, validator={'__parent__': seps_before}) \ .regex(build_or_pattern(season_markers + discrete_separators + range_separators, name='seasonSeparator', escape=True) + r'(?P<season>\d+)').repeater('*') # episode_details property for episode_detail in ('Special', 'Pilot', 'Unaired', 'Final'): rebulk.string(episode_detail, value=episode_detail, name='episode_details', disabled=lambda context: is_disabled(context, 'episode_details')) def validate_roman(match): """ Validate a roman match if surrounded by separators :param match: :type match: :return: :rtype: """ if int_coercable(match.raw): return True return seps_surround(match) rebulk.defaults(private_names=['episodeSeparator', 'seasonSeparator', 'episodeMarker', 'seasonMarker'], validate_all=True, validator={'__parent__': seps_surround}, children=True, private_parent=True, conflict_solver=season_episode_conflict_solver) rebulk.chain(abbreviations=[alt_dash], formatter={'season': parse_numeral, 'count': parse_numeral}, validator={'__parent__': compose(seps_surround, ordering_validator), 'season': validate_roman, 'count': validate_roman}, disabled=lambda context: context.get('type') == 'movie' or is_disabled(context, 'season')) \ .defaults(validator=None) \ .regex(build_or_pattern(season_words, name='seasonMarker') + '@?(?P<season>' + numeral + ')') \ .regex(r'' + build_or_pattern(of_words) + '@?(?P<count>' + numeral + ')').repeater('?') \ .regex(r'@?' + build_or_pattern(range_separators + discrete_separators + ['@'], name='seasonSeparator', escape=True) + r'@?(?P<season>\d+)').repeater('*') rebulk.regex(build_or_pattern(episode_words, name='episodeMarker') + r'-?(?P<episode>\d+)' + r'(?:v(?P<version>\d+))?' + r'(?:-?' + build_or_pattern(of_words) + r'-?(?P<count>\d+))?', # Episode 4 abbreviations=[dash], formatter={'episode': int, 'version': int, 'count': int}, disabled=lambda context: context.get('type') == 'episode' or is_disabled(context, 'episode')) rebulk.regex(build_or_pattern(episode_words, name='episodeMarker') + r'-?(?P<episode>' + numeral + ')' + r'(?:v(?P<version>\d+))?' + r'(?:-?' + build_or_pattern(of_words) + r'-?(?P<count>\d+))?', # Episode 4 abbreviations=[dash], validator={'episode': validate_roman}, formatter={'episode': parse_numeral, 'version': int, 'count': int}, disabled=lambda context: context.get('type') != 'episode' or is_disabled(context, 'episode')) rebulk.regex(r'S?(?P<season>\d+)-?(?:xE|Ex|E|x)-?(?P<other>' + build_or_pattern(all_words) + ')', tags=['SxxExx'], abbreviations=[dash], validator=None, formatter={'season': int, 'other': lambda match: 'Complete'}, disabled=lambda context: is_disabled(context, 'season')) # 12, 13 rebulk.chain(tags=['weak-episode'], formatter={'episode': int, 'version': int}, disabled=lambda context: context.get('type') == 'movie' or is_disabled(context, 'episode')) \ .defaults(validator=None) \ .regex(r'(?P<episode>\d{2})') \ .regex(r'v(?P<version>\d+)').repeater('?') \ .regex(r'(?P<episodeSeparator>[x-])(?P<episode>\d{2})').repeater('*') # 012, 013 rebulk.chain(tags=['weak-episode'], formatter={'episode': int, 'version': int}, disabled=lambda context: context.get('type') == 'movie' or is_disabled(context, 'episode')) \ .defaults(validator=None) \ .regex(r'0(?P<episode>\d{1,2})') \ .regex(r'v(?P<version>\d+)').repeater('?') \ .regex(r'(?P<episodeSeparator>[x-])0(?P<episode>\d{1,2})').repeater('*') # 112, 113 rebulk.chain(tags=['weak-episode'], formatter={'episode': int, 'version': int}, name='weak_episode', disabled=lambda context: context.get('type') == 'movie' or is_disabled(context, 'episode')) \ .defaults(validator=None) \ .regex(r'(?P<episode>\d{3,4})') \ .regex(r'v(?P<version>\d+)').repeater('?') \ .regex(r'(?P<episodeSeparator>[x-])(?P<episode>\d{3,4})').repeater('*') # 1, 2, 3 rebulk.chain(tags=['weak-episode'], formatter={'episode': int, 'version': int}, disabled=lambda context: context.get('type') != 'episode' or is_disabled(context, 'episode')) \ .defaults(validator=None) \ .regex(r'(?P<episode>\d)') \ .regex(r'v(?P<version>\d+)').repeater('?') \ .regex(r'(?P<episodeSeparator>[x-])(?P<episode>\d{1,2})').repeater('*') # e112, e113, 1e18, 3e19 # TODO: Enhance rebulk for validator to be used globally (season_episode_validator) rebulk.chain(formatter={'season': int, 'episode': int, 'version': int}, disabled=lambda context: is_disabled(context, 'episode')) \ .defaults(validator=None) \ .regex(r'(?P<season>\d{1,2})?(?P<episodeMarker>e)(?P<episode>\d{1,4})') \ .regex(r'v(?P<version>\d+)').repeater('?') \ .regex(r'(?P<episodeSeparator>e|x|-)(?P<episode>\d{1,4})').repeater('*') # ep 112, ep113, ep112, ep113 rebulk.chain(abbreviations=[dash], formatter={'episode': int, 'version': int}, disabled=lambda context: is_disabled(context, 'episode')) \ .defaults(validator=None) \ .regex(r'ep-?(?P<episode>\d{1,4})') \ .regex(r'v(?P<version>\d+)').repeater('?') \ .regex(r'(?P<episodeSeparator>ep|e|x|-)(?P<episode>\d{1,4})').repeater('*') # cap 112, cap 112_114 rebulk.chain(abbreviations=[dash], tags=['see-pattern'], formatter={'season': int, 'episode': int}, disabled=is_season_episode_disabled) \ .defaults(validator=None) \ .regex(r'(?P<seasonMarker>cap)-?(?P<season>\d{1,2})(?P<episode>\d{2})') \ .regex(r'(?P<episodeSeparator>-)(?P<season>\d{1,2})(?P<episode>\d{2})').repeater('?') # 102, 0102 rebulk.chain(tags=['weak-episode', 'weak-duplicate'], formatter={'season': int, 'episode': int, 'version': int}, name='weak_duplicate', conflict_solver=season_episode_conflict_solver, disabled=lambda context: (context.get('episode_prefer_number', False) or context.get('type') == 'movie') or is_season_episode_disabled(context)) \ .defaults(validator=None) \ .regex(r'(?P<season>\d{1,2})(?P<episode>\d{2})') \ .regex(r'v(?P<version>\d+)').repeater('?') \ .regex(r'(?P<episodeSeparator>x|-)(?P<episode>\d{2})').repeater('*') rebulk.regex(r'v(?P<version>\d+)', children=True, private_parent=True, formatter=int, disabled=lambda context: is_disabled(context, 'version')) rebulk.defaults(private_names=['episodeSeparator', 'seasonSeparator']) # TODO: List of words # detached of X count (season/episode) rebulk.regex(r'(?P<episode>\d+)-?' + build_or_pattern(of_words) + r'-?(?P<count>\d+)-?' + build_or_pattern(episode_words) + '?', abbreviations=[dash], children=True, private_parent=True, formatter=int, disabled=lambda context: is_disabled(context, 'episode')) rebulk.regex(r'Minisodes?', name='episode_format', value="Minisode", disabled=lambda context: is_disabled(context, 'episode_format')) rebulk.rules(WeakConflictSolver, RemoveInvalidSeason, RemoveInvalidEpisode, SeePatternRange(range_separators + ['_']), EpisodeNumberSeparatorRange(range_separators), SeasonSeparatorRange(range_separators), RemoveWeakIfMovie, RemoveWeakIfSxxExx, RemoveWeakDuplicate, EpisodeDetailValidator, RemoveDetachedEpisodeNumber, VersionValidator, RemoveWeak, RenameToAbsoluteEpisode, CountValidator, EpisodeSingleDigitValidator, RenameToDiscMatch) return rebulk class WeakConflictSolver(Rule): """ Rule to decide whether weak-episode or weak-duplicate matches should be kept. If an anime is detected: - weak-duplicate matches should be removed - weak-episode matches should be tagged as anime Otherwise: - weak-episode matches are removed unless they're part of an episode range match. """ priority = 128 consequence = [RemoveMatch, AppendMatch] def enabled(self, context): return context.get('type') != 'movie' @classmethod def is_anime(cls, matches): """Return True if it seems to be an anime. Anime characteristics: - version, crc32 matches - screen_size inside brackets - release_group at start and inside brackets """ if matches.named('version') or matches.named('crc32'): return True for group in matches.markers.named('group'): if matches.range(group.start, group.end, predicate=lambda m: m.name == 'screen_size'): return True if matches.markers.starting(group.start, predicate=lambda m: m.name == 'path'): hole = matches.holes(group.start, group.end, index=0) if hole and hole.raw == group.raw: return True return False def when(self, matches, context): to_remove = [] to_append = [] anime_detected = self.is_anime(matches) for filepart in matches.markers.named('path'): weak_matches = matches.range(filepart.start, filepart.end, predicate=( lambda m: m.initiator.name == 'weak_episode')) weak_dup_matches = matches.range(filepart.start, filepart.end, predicate=( lambda m: m.initiator.name == 'weak_duplicate')) if anime_detected: if weak_matches: to_remove.extend(weak_dup_matches) for match in matches.range(filepart.start, filepart.end, predicate=( lambda m: m.name == 'episode' and m.initiator.name != 'weak_duplicate')): episode = copy.copy(match) episode.tags = episode.tags + ['anime'] to_append.append(episode) to_remove.append(match) elif weak_dup_matches: episodes_in_range = matches.range(filepart.start, filepart.end, predicate=( lambda m: m.name == 'episode' and m.initiator.name == 'weak_episode' and m.initiator.children.named('episodeSeparator') )) if not episodes_in_range and not matches.range(filepart.start, filepart.end, predicate=lambda m: 'SxxExx' in m.tags): to_remove.extend(weak_matches) else: for match in episodes_in_range: episode = copy.copy(match) episode.tags = [] to_append.append(episode) to_remove.append(match) if to_append: to_remove.extend(weak_dup_matches) return to_remove, to_append class CountValidator(Rule): """ Validate count property and rename it """ priority = 64 consequence = [RemoveMatch, RenameMatch('episode_count'), RenameMatch('season_count')] properties = {'episode_count': [None], 'season_count': [None]} def when(self, matches, context): to_remove = [] episode_count = [] season_count = [] for count in matches.named('count'): previous = matches.previous(count, lambda match: match.name in ['episode', 'season'], 0) if previous: if previous.name == 'episode': episode_count.append(count) elif previous.name == 'season': season_count.append(count) else: to_remove.append(count) return to_remove, episode_count, season_count class SeePatternRange(Rule): """ Create matches for episode range for SEE pattern. E.g.: Cap.102_104 """ priority = 128 consequence = [RemoveMatch, AppendMatch] def __init__(self, range_separators): super(SeePatternRange, self).__init__() self.range_separators = range_separators def when(self, matches, context): to_remove = [] to_append = [] for separator in matches.tagged('see-pattern', lambda m: m.name == 'episodeSeparator'): previous_match = matches.previous(separator, lambda m: m.name == 'episode' and 'see-pattern' in m.tags, 0) next_match = matches.next(separator, lambda m: m.name == 'season' and 'see-pattern' in m.tags, 0) if not next_match: continue next_match = matches.next(next_match, lambda m: m.name == 'episode' and 'see-pattern' in m.tags, 0) if previous_match and next_match and separator.value in self.range_separators: to_remove.append(next_match) for episode_number in range(previous_match.value + 1, next_match.value + 1): match = copy.copy(next_match) match.value = episode_number to_append.append(match) to_remove.append(separator) return to_remove, to_append class AbstractSeparatorRange(Rule): """ Remove separator matches and create matches for season range. """ priority = 128 consequence = [RemoveMatch, AppendMatch] def __init__(self, range_separators, property_name): super(AbstractSeparatorRange, self).__init__() self.range_separators = range_separators self.property_name = property_name def when(self, matches, context): to_remove = [] to_append = [] for separator in matches.named(self.property_name + 'Separator'): previous_match = matches.previous(separator, lambda m: m.name == self.property_name, 0) next_match = matches.next(separator, lambda m: m.name == self.property_name, 0) initiator = separator.initiator if previous_match and next_match and separator.value in self.range_separators: to_remove.append(next_match) for episode_number in range(previous_match.value + 1, next_match.value): match = copy.copy(next_match) match.value = episode_number initiator.children.append(match) to_append.append(match) to_append.append(next_match) to_remove.append(separator) previous_match = None for next_match in matches.named(self.property_name): if previous_match: separator = matches.input_string[previous_match.initiator.end:next_match.initiator.start] if separator not in self.range_separators: separator = strip(separator) if separator in self.range_separators: initiator = previous_match.initiator for episode_number in range(previous_match.value + 1, next_match.value): match = copy.copy(next_match) match.value = episode_number initiator.children.append(match) to_append.append(match) to_append.append(Match(previous_match.end, next_match.start - 1, name=self.property_name + 'Separator', private=True, input_string=matches.input_string)) to_remove.append(next_match) # Remove and append match to support proper ordering to_append.append(next_match) previous_match = next_match return to_remove, to_append class RenameToAbsoluteEpisode(Rule): """ Rename episode to absolute_episodes. Absolute episodes are only used if two groups of episodes are detected: S02E04-06 25-27 25-27 S02E04-06 2x04-06 25-27 28. Anime Name S02E05 The matches in the group with higher episode values are renamed to absolute_episode. """ consequence = RenameMatch('absolute_episode') def when(self, matches, context): # pylint:disable=inconsistent-return-statements initiators = {match.initiator for match in matches.named('episode') if len(match.initiator.children.named('episode')) > 1} if len(initiators) != 2: ret = [] for filepart in matches.markers.named('path'): if matches.range(filepart.start + 1, filepart.end, predicate=lambda m: m.name == 'episode'): ret.extend( matches.starting(filepart.start, predicate=lambda m: m.initiator.name == 'weak_episode')) return ret initiators = sorted(initiators, key=lambda item: item.end) if not matches.holes(initiators[0].end, initiators[1].start, predicate=lambda m: m.raw.strip(seps)): first_range = matches.named('episode', predicate=lambda m: m.initiator == initiators[0]) second_range = matches.named('episode', predicate=lambda m: m.initiator == initiators[1]) if len(first_range) == len(second_range): if second_range[0].value > first_range[0].value: return second_range if first_range[0].value > second_range[0].value: return first_range class EpisodeNumberSeparatorRange(AbstractSeparatorRange): """ Remove separator matches and create matches for episoderNumber range. """ def __init__(self, range_separators): super(EpisodeNumberSeparatorRange, self).__init__(range_separators, "episode") class SeasonSeparatorRange(AbstractSeparatorRange): """ Remove separator matches and create matches for season range. """ def __init__(self, range_separators): super(SeasonSeparatorRange, self).__init__(range_separators, "season") class RemoveWeakIfMovie(Rule): """ Remove weak-episode tagged matches if it seems to be a movie. """ priority = 64 consequence = RemoveMatch def enabled(self, context): return context.get('type') != 'episode' def when(self, matches, context): to_remove = [] to_ignore = set() remove = False for filepart in matches.markers.named('path'): year = matches.range(filepart.start, filepart.end, predicate=lambda m: m.name == 'year', index=0) if year: remove = True next_match = matches.range(year.end, filepart.end, predicate=lambda m: m.private, index=0) if (next_match and not matches.holes(year.end, next_match.start, predicate=lambda m: m.raw.strip(seps)) and not matches.at_match(next_match, predicate=lambda m: m.name == 'year')): to_ignore.add(next_match.initiator) to_ignore.update(matches.range(filepart.start, filepart.end, predicate=lambda m: len(m.children.named('episode')) > 1)) to_remove.extend(matches.conflicting(year)) if remove: to_remove.extend(matches.tagged('weak-episode', predicate=( lambda m: m.initiator not in to_ignore and 'anime' not in m.tags))) return to_remove class RemoveWeak(Rule): """ Remove weak-episode matches which appears after video, source, and audio matches. """ priority = 16 consequence = RemoveMatch def when(self, matches, context): to_remove = [] for filepart in matches.markers.named('path'): weaks = matches.range(filepart.start, filepart.end, predicate=lambda m: 'weak-episode' in m.tags) if weaks: previous = matches.previous(weaks[0], predicate=lambda m: m.name in ( 'audio_codec', 'screen_size', 'streaming_service', 'source', 'video_profile', 'audio_channels', 'audio_profile'), index=0) if previous and not matches.holes( previous.end, weaks[0].start, predicate=lambda m: m.raw.strip(seps)): to_remove.extend(weaks) return to_remove class RemoveWeakIfSxxExx(Rule): """ Remove weak-episode tagged matches if SxxExx pattern is matched. Weak episodes at beginning of filepart are kept. """ priority = 64 consequence = RemoveMatch def when(self, matches, context): to_remove = [] for filepart in matches.markers.named('path'): if matches.range(filepart.start, filepart.end, predicate=lambda m: not m.private and 'SxxExx' in m.tags): for match in matches.range(filepart.start, filepart.end, predicate=lambda m: 'weak-episode' in m.tags): if match.start != filepart.start or match.initiator.name != 'weak_episode': to_remove.append(match) return to_remove class RemoveInvalidSeason(Rule): """ Remove invalid season matches. """ priority = 64 consequence = RemoveMatch def when(self, matches, context): to_remove = [] for filepart in matches.markers.named('path'): strong_season = matches.range(filepart.start, filepart.end, index=0, predicate=lambda m: m.name == 'season' and not m.private and 'SxxExx' in m.tags) if strong_season: if strong_season.initiator.children.named('episode'): for season in matches.range(strong_season.end, filepart.end, predicate=lambda m: m.name == 'season' and not m.private): # remove weak season or seasons without episode matches if 'SxxExx' not in season.tags or not season.initiator.children.named('episode'): if season.initiator: to_remove.append(season.initiator) to_remove.extend(season.initiator.children) else: to_remove.append(season) return to_remove class RemoveInvalidEpisode(Rule): """ Remove invalid episode matches. """ priority = 64 consequence = RemoveMatch def when(self, matches, context): to_remove = [] for filepart in matches.markers.named('path'): strong_episode = matches.range(filepart.start, filepart.end, index=0, predicate=lambda m: m.name == 'episode' and not m.private and 'SxxExx' in m.tags) if strong_episode: strong_ep_marker = RemoveInvalidEpisode.get_episode_prefix(matches, strong_episode) for episode in matches.range(strong_episode.end, filepart.end, predicate=lambda m: m.name == 'episode' and not m.private): ep_marker = RemoveInvalidEpisode.get_episode_prefix(matches, episode) if strong_ep_marker and ep_marker and strong_ep_marker.value.lower() != ep_marker.value.lower(): if episode.initiator: to_remove.append(episode.initiator) to_remove.extend(episode.initiator.children) else: to_remove.append(ep_marker) to_remove.append(episode) return to_remove @staticmethod def get_episode_prefix(matches, episode): """ Return episode prefix: episodeMarker or episodeSeparator """ return matches.previous(episode, index=0, predicate=lambda m: m.name in ('episodeMarker', 'episodeSeparator')) class RemoveWeakDuplicate(Rule): """ Remove weak-duplicate tagged matches if duplicate patterns, for example The 100.109 """ priority = 64 consequence = RemoveMatch def when(self, matches, context): to_remove = [] for filepart in matches.markers.named('path'): patterns = defaultdict(list) for match in reversed(matches.range(filepart.start, filepart.end, predicate=lambda m: 'weak-duplicate' in m.tags)): if match.pattern in patterns[match.name]: to_remove.append(match) else: patterns[match.name].append(match.pattern) return to_remove class EpisodeDetailValidator(Rule): """ Validate episode_details if they are detached or next to season or episode. """ priority = 64 consequence = RemoveMatch def when(self, matches, context): ret = [] for detail in matches.named('episode_details'): if not seps_surround(detail) \ and not matches.previous(detail, lambda match: match.name in ['season', 'episode']) \ and not matches.next(detail, lambda match: match.name in ['season', 'episode']): ret.append(detail) return ret class RemoveDetachedEpisodeNumber(Rule): """ If multiple episode are found, remove those that are not detached from a range and less than 10. Fairy Tail 2 - 16-20, 2 should be removed. """ priority = 64 consequence = RemoveMatch dependency = [RemoveWeakIfSxxExx, RemoveWeakDuplicate] def when(self, matches, context): ret = [] episode_numbers = [] episode_values = set() for match in matches.named('episode', lambda m: not m.private and 'weak-episode' in m.tags): if match.value not in episode_values: episode_numbers.append(match) episode_values.add(match.value) episode_numbers = list(sorted(episode_numbers, key=lambda m: m.value)) if len(episode_numbers) > 1 and \ episode_numbers[0].value < 10 and \ episode_numbers[1].value - episode_numbers[0].value != 1: parent = episode_numbers[0] while parent: # TODO: Add a feature in rebulk to avoid this ... ret.append(parent) parent = parent.parent return ret class VersionValidator(Rule): """ Validate version if previous match is episode or if surrounded by separators. """ priority = 64 dependency = [RemoveWeakIfMovie, RemoveWeakIfSxxExx] consequence = RemoveMatch def when(self, matches, context): ret = [] for version in matches.named('version'): episode_number = matches.previous(version, lambda match: match.name == 'episode', 0) if not episode_number and not seps_surround(version.initiator): ret.append(version) return ret class EpisodeSingleDigitValidator(Rule): """ Remove single digit episode when inside a group that doesn't own title. """ dependency = [TitleFromPosition] consequence = RemoveMatch def when(self, matches, context): ret = [] for episode in matches.named('episode', lambda match: len(match.initiator) == 1): group = matches.markers.at_match(episode, lambda marker: marker.name == 'group', index=0) if group: if not matches.range(*group.span, predicate=lambda match: match.name == 'title'): ret.append(episode) return ret class RenameToDiscMatch(Rule): """ Rename episodes detected with `d` episodeMarkers to `disc`. """ consequence = [RenameMatch('disc'), RenameMatch('discMarker'), RemoveMatch] def when(self, matches, context): discs = [] markers = [] to_remove = [] disc_disabled = is_disabled(context, 'disc') for marker in matches.named('episodeMarker', predicate=lambda m: m.value.lower() == 'd'): if disc_disabled: to_remove.append(marker) to_remove.extend(marker.initiator.children) continue markers.append(marker) discs.extend(sorted(marker.initiator.children.named('episode'), key=lambda m: m.value)) return discs, markers, to_remove
2.484375
2
cwinpy/heterodyne/heterodyne.py
nigeltrc72/cwinpy
5
17909
<gh_stars>1-10 """ Run heterodyne pre-processing of gravitational-wave data. """ import ast import configparser import copy import os import shutil import signal import sys import tempfile from argparse import ArgumentParser import cwinpy import numpy as np from bilby_pipe.bilbyargparser import BilbyArgParser from bilby_pipe.job_creation.dag import Dag from bilby_pipe.utils import ( BilbyPipeError, check_directory_exists_and_if_not_mkdir, parse_args, ) from configargparse import ArgumentError from ..condor.hetnodes import HeterodyneInput, HeterodyneNode, MergeHeterodyneNode from ..data import HeterodynedData from ..info import ( ANALYSIS_SEGMENTS, CVMFS_GWOSC_DATA_SERVER, CVMFS_GWOSC_DATA_TYPES, CVMFS_GWOSC_FRAME_CHANNELS, HW_INJ, HW_INJ_RUNTIMES, HW_INJ_SEGMENTS, RUNTIMES, ) from ..parfile import PulsarParameters from ..utils import ( LAL_BINARY_MODELS, LAL_EPHEMERIS_TYPES, check_for_tempo2, initialise_ephemeris, sighandler, ) from .base import Heterodyne, generate_segments, remote_frame_cache def create_heterodyne_parser(): """ Create the argument parser. """ description = """\ A script to heterodyne raw gravitational-wave strain data based on the \ expected evolution of the gravitational-wave signal from a set of pulsars.""" parser = BilbyArgParser( prog=sys.argv[0], description=description, ignore_unknown_config_file_keys=False, allow_abbrev=False, ) parser.add("--config", type=str, is_config_file=True, help="Configuration ini file") parser.add( "--version", action="version", version="%(prog)s {version}".format(version=cwinpy.__version__), ) parser.add( "--periodic-restart-time", default=14400, type=int, help=( "Time after which the job will be self-evicted with code 130. " "After this, condor will restart the job. Default is 14400s. " "This is used to decrease the chance of HTCondor hard evictions." ), ) parser.add( "--overwrite", action="store_true", default=False, help=( "Set this flag to make sure any previously generated heterodyned " 'files are overwritten. By default the analysis will "resume" ' "from where it left off (by checking whether output files, as set " 'using "--output" and "--label" arguments, already exist), such ' "as after forced Condor eviction for checkpointing purposes. " "Therefore, this flag is needs to be explicitly given (the " "default is False) if not wanting to use resume and overwrite " "existing files." ), ) dataparser = parser.add_argument_group("Data inputs") dataparser.add( "--starttime", required=True, type=int, help=("The start time of the data to be heterodyned in GPS seconds."), ) dataparser.add( "--endtime", required=True, type=int, help=("The end time of the data to be heterodyned in GPS seconds."), ) dataparser.add( "--stride", default=3600, type=int, help=( "The number of seconds to stride through the data (i.e., this " "number of seconds of data will be read in in one go), Defaults " "to 3600." ), ) dataparser.add( "--detector", required=True, type=str, help=("The name of the detectors for which the data is to be heterodyned."), ) dataparser.add( "--frametype", type=str, help=( 'The "frame type" name of the data to be heterodyned. If this ' "is not given the correct data set will be attempted to be found " "using the channel name." ), ) dataparser.add( "--channel", required=True, type=str, help=( 'The "channel" within the gravitational-wave data file(s) ' '(either a GW frame ".gwf", or HDF5 file) containing the strain ' "data to be heterodyned. The channel name should contain the " "detector name prefix as the first two characters followed by a " 'colon, e.g., "L1:GWOSC-4KHZ_R1_STRAIN"' ), ) dataparser.add( "--host", type=str, help=( "The server name for finding the gravitational-wave data files. " 'Use "datafind.ligo.org:443" for open data available via CVMFS. ' "To use open data available from the GWOSC use " '"https://www.gw-openscience.org".' ), ) dataparser.add( "--outputframecache", type=str, help=( "If given this should give a file path to which a list of " "gravitational-wave data file paths, as found by the code, will " "be written. If not given then the file list will not be output." ), ) dataparser.add( "--appendframecache", action="store_true", default=False, help=( "If writing out the frame cache to a file, set this to True to " "append to the file rather than overwriting. Default is False." ), ) dataparser.add( "--framecache", help=( "Provide a pregenerated cache of gravitational-wave files, either " "as a single file, or a list of files. Alternatively, you can " "supply a directory containing the files (which will be " "searched recursively for gwf and then hdf5 files), which should " 'be used in conjunction with the "frametype" argument. If giving ' "a list, this should be in the form of a Python list, surrounded " "by quotation marks, e.g., \"['file1.lcf','file2.lcf']\"." ), ) dataparser.add( "--heterodyneddata", help=( "A string, or dictionary of strings, containing the full file " "path, or directory path, pointing the the location of " "pre-heterodyned data. For a single pulsar a file path can be " "given. For multiple pulsars a directory containing heterodyned " "files (in HDF5 or txt format) can be given provided that within " "it the file names contain the pulsar names as supplied in the " 'file input with "--pulsarfiles". Alternatively, a dictionary ' "can be supplied, keyed on the pulsar name, containing a single " "file path or a directory path as above. If supplying a " "directory, it can contain multiple heterodyned files for a each " "pulsar and all will be used. If giving a dictionary it should be " "surrounded by quotation marks." ), ) segmentparser = parser.add_argument_group("Analysis segment inputs") segmentparser.add( "--segmentlist", help=( "Provide a list of data segment start and end times, as " "list/tuple pairs in the list, or an ASCII text file containing " "the segment start and end times in two columns. If a list, this " "should be in the form of a Python list, surrounded by quotation " 'marks, e.g., "[(900000000,900086400),(900100000,900186400)]".' ), ) segmentparser.add( "--includeflags", help=( "If not providing a segment list then give a string, or list of " "strings, giving the data DQ flags that will be used to generate " "a segment list. Lists should be surrounded by quotation marks, " "e.g., \"['L1:DMT-ANALYSIS_READY:1']\"." ), ) segmentparser.add( "--excludeflags", help=( "A string, or list of strings, giving the data DQ flags to " "when generating a segment list. Lists should be surrounded by " "quotation marks." ), ) segmentparser.add( "--outputsegmentlist", type=str, help=( "If generating a segment list it will be output to the file " "specified by this argument." ), ) segmentparser.add( "--appendsegmentlist", action="store_true", default=False, help=( "If generating a segment list set this to True to append to the " 'file specified by "--outputsegmentlist" rather than ' "overwriting. Default is False." ), ) segmentparser.add("--segmentserver", type=str, help=("The segment database URL.")) pulsarparser = parser.add_argument_group("Pulsar inputs") pulsarparser.add( "--pulsarfiles", action="append", help=( "This specifies the pulsars for which to heterodyne the data. It " "can be either i) a string giving the path to an individual " "pulsar Tempo(2)-style parameter file, ii) a string giving the " "path to a directory containing multiple Tempo(2)-style parameter " "files (the path will be recursively searched for any file with " 'the extension ".par"), iii) a list of paths to individual ' "pulsar parameter files, iv) a dictionary containing paths to " "individual pulsars parameter files keyed to their names. If " "instead, pulsar names are given rather than parameter files it " "will attempt to extract an ephemeris for those pulsars from the " "ATNF pulsar catalogue. If such ephemerides are available then " "they will be used (notification will be given when this is " "these cases). If providing a list or dictionary it should be " "surrounded by quotation marks." ), ) pulsarparser.add( "--pulsars", action="append", help=( "You can analyse only particular pulsars from those specified by " 'parameter files found through the "--pulsarfiles" argument by ' "passing a string, or list of strings, with particular pulsars " "names to use." ), ) outputparser = parser.add_argument_group("Data output inputs") outputparser.add( "--output", help=( "The base directory into which the heterodyned results will be " "output. To specify explicit directory paths for individual " "pulsars this can be a dictionary of directory paths keyed to the " 'pulsar name (in which case the "--label" argument will be used ' "to set the file name), or full file paths, which will be used in " 'place of the "--label" argument. If not given then the current' "working directory will be used." ), ) outputparser.add( "--label", help=( "The output format for the heterodyned data files. These can be " 'format strings containing the keywords "psr" for the pulsar ' 'name, "det" for the detector, "freqfactor" for the rotation ' 'frequency scale factor used, "gpsstart" for the GPS start ' 'time, and "gpsend" for the GPS end time. The extension should ' 'be given as ".hdf", ".h5", or ".hdf5". E.g., the default ' 'is "heterodyne_{psr}_{det}_{freqfactor}_{gpsstart}-{gpsend}.hdf".' ), ) heterodyneparser = parser.add_argument_group("Heterodyne inputs") heterodyneparser.add( "--filterknee", type=float, help=( "The knee frequency (Hz) of the low-pass filter applied after " "heterodyning the data. This should only be given when " "heterodying raw strain data and not if re-heterodyning processed " "data. Default is 0.5 Hz." ), ) heterodyneparser.add( "--resamplerate", type=float, required=True, help=( "The rate in Hz at which to resample the data (via averaging) " "after application of the heterodyne (and filter if applied)." ), ) heterodyneparser.add( "--freqfactor", type=float, help=( "The factor applied to the pulsars rotational parameters when " "defining the gravitational-wave phase evolution. For example, " "the default value of 2 multiplies the phase evolution by 2 under " "the assumption of a signal emitted from the l=m=2 quadrupole " "mode of a rigidly rotating triaxial neutron star." ), ) heterodyneparser.add( "--crop", type=int, help=( "The number of seconds to crop from the start and end of data " "segments to remove filter impulse effects and issues prior to " "lock-loss. Default is 60 seconds." ), ) heterodyneparser.add( "--includessb", action="store_true", default=False, help=( "Set this flag to include removing the modulation of the signal due to " "Solar System motion and relativistic effects (e.g., Roemer, " "Einstein, and Shapiro delay) during the heterodyne." ), ) heterodyneparser.add( "--includebsb", action="store_true", default=False, help=( "Set this flag to include removing the modulation of the signal " "due to binary system motion and relativistic effects during the " 'heterodyne. To use this "--includessb" must also be set.' ), ) heterodyneparser.add( "--includeglitch", action="store_true", default=False, help=( "Set this flag to include removing the effects of the phase " "evolution of any modelled pulsar glitches during the heterodyne." ), ) heterodyneparser.add( "--includefitwaves", action="store_true", default=False, help=( "Set this to True to include removing the phase evolution of a " "series of sinusoids designed to model low-frequency timing noise " "in the pulsar signal during the heterodyne." ), ) heterodyneparser.add( "--usetempo2", action="store_true", default=False, help=( "Set this to True to use Tempo2 (via libstempo) to calculate the " "signal phase evolution. For this to be used v2.4.2 or greater of " "libstempo must be installed. When using Tempo2 the " '"--earthephemeris", "--sunephemeris" and "--timeephemeris" ' "arguments do not need to be supplied. This can only be used when " "running the full heterodyne in one stage, but not for " 're-heterodyning previous data, as such all the "--include..." ' "arguments will be assumed to be True." ), ) ephemerisparser = parser.add_argument_group("Solar system ephemeris inputs") ephemerisparser.add( "--earthephemeris", help=( 'A dictionary, keyed to ephemeris names, e.g., "DE405", pointing ' "to the location of a file containing that ephemeris for the " "Earth. The dictionary must be supplied within quotation marks, " "e.g., \"{'DE436':'earth_DE436.txt'}\". If a pulsar requires a " "specific ephemeris that is not provided in this dictionary, then " "the code will automatically attempt to find or download the " "required file if available." ), ) ephemerisparser.add( "--sunephemeris", help=( 'A dictionary, keyed to ephemeris names, e.g., "DE405", pointing ' "to the location of a file containing that ephemeris for the " "Sun. If a pulsar requires a specific ephemeris that is not " "provided in this dictionary, then the code will automatically " "attempt to find or download the required file if available." ), ) ephemerisparser.add( "--timeephemeris", help=( "A dictionary, keyed to time system name, which can be either " '"TCB" or "TDB", pointing to the location of a file containing ' "that ephemeris for that time system. If a pulsar requires a " "specific ephemeris that is not provided in this dictionary, then " "the code will automatically attempt to find or download the " "required file if available." ), ) cfparser = parser.add_argument_group("Configuration inputs") cfparser.add( "--cwinpy-heterodyne-dag-config-file", help=( "A path to the cwinpy_heterodyne_dag configuration file can be " "supplied if this was has been used to setup the heterodyne job." ), ) return parser def heterodyne(**kwargs): """ Run heterodyne within Python. See the `class::~cwinpy.heterodyne.Heterodyne` class for the required arguments. Returns ------- het: `class::~cwinpy.heterodyne.Heterodyne` The heterodyning class object. """ if "cli" in kwargs or "config" in kwargs: if "cli" in kwargs: kwargs.pop("cli") # get command line arguments parser = create_heterodyne_parser() # parse config file or command line arguments if "config" in kwargs: cliargs = ["--config", kwargs["config"]] else: cliargs = sys.argv[1:] try: args, _ = parse_args(cliargs, parser) except BilbyPipeError as e: raise IOError("{}".format(e)) # convert args to a dictionary hetkwargs = vars(args) if "config" in kwargs: # update with other keyword arguments hetkwargs.update(kwargs) else: hetkwargs = kwargs # check non-standard arguments that could be Python objects nsattrs = [ "framecache", "heterodyneddata", "segmentlist", "includeflags", "excludeflags", "pulsarfiles", "pulsars", "output", "earthephemeris", "sunephemeris", "timeephemeris", ] for attr in nsattrs: value = hetkwargs.pop(attr, None) if isinstance(value, str): # check whether the value can be evaluated as a Python object try: value = ast.literal_eval(value) except (ValueError, SyntaxError): pass # if the value was a string within a string, e.g., '"[2.3]"', # evaluate again just in case it contains a Python object! if isinstance(value, str): try: value = ast.literal_eval(value) except (ValueError, SyntaxError): pass hetkwargs[attr] = value elif value is not None: hetkwargs[attr] = value # check if pulsarfiles is a single entry list containing a dictionary if isinstance(hetkwargs["pulsarfiles"], list): if len(hetkwargs["pulsarfiles"]) == 1: try: value = ast.literal_eval(hetkwargs["pulsarfiles"][0]) if isinstance(value, dict): # switch to passing the dictionary hetkwargs["pulsarfiles"] = value except SyntaxError: pass signal.signal(signal.SIGALRM, handler=sighandler) signal.alarm(hetkwargs.pop("periodic_restart_time", 14400)) # remove any None values for key in hetkwargs.copy(): if hetkwargs[key] is None: hetkwargs.pop(key) # convert "overwrite" to "resume" hetkwargs["resume"] = not hetkwargs.pop("overwrite", False) # remove "config" from hetkwargs if "config" in hetkwargs: hetkwargs.pop("config") # set up the run het = Heterodyne(**hetkwargs) # heterodyne the data het.heterodyne() return het def heterodyne_cli(**kwargs): # pragma: no cover """ Entry point to ``cwinpy_heterodyne`` script. This just calls :func:`cwinpy.heterodyne.heterodyne`, but does not return any objects. """ kwargs["cli"] = True # set to show use of CLI _ = heterodyne(**kwargs) def create_heterodyne_merge_parser(): """ Create the argument parser for merging script. """ description = "A script to merge multiple heterodyned data files." parser = BilbyArgParser( prog=sys.argv[0], description=description, ignore_unknown_config_file_keys=False, allow_abbrev=False, ) parser.add("--config", type=str, is_config_file=True, help="Configuration ini file") parser.add( "--version", action="version", version="%(prog)s {version}".format(version=cwinpy.__version__), ) parser.add( "--heterodynedfiles", action="append", type=str, help=("A path, or list of paths, to heterodyned data files to merge together."), ) parser.add( "--output", type=str, help=("The output file for the merged heterodyned data."), ) parser.add( "--overwrite", action="store_true", help=("Set if wanting to overwrite an existing merged file."), ) parser.add( "--remove", action="store_true", help=("Set if wanting to delete individual files being merged."), ) return parser def heterodyne_merge(**kwargs): """ Merge the output of multiple heterodynes for a specific pulsar. Parameters ---------- heterodynedfiles: str, list A string, or list of strings, giving the paths to heterodyned data files to be read in and merged output: str The output file name to write the data to. If not given then the data will not be output. overwrite: bool Set whether to overwrite an existing file. Defaults to False. remove: bool Set whether to remove the individual files that form the merged file. Defaults to False. Returns ------- het: `class::~cwinpy.heterodyne.Heterodyne` The merged heterodyning class object. """ if "cli" in kwargs: # get command line arguments parser = create_heterodyne_merge_parser() cliargs = sys.argv[1:] try: args, _ = parse_args(cliargs, parser) except BilbyPipeError as e: raise IOError("{}".format(e)) # convert args to a dictionary mergekwargs = vars(args) else: mergekwargs = kwargs if "heterodynedfiles" not in mergekwargs: raise ArgumentError("'heterodynedfiles' is a required argument") heterodynedfiles = mergekwargs["heterodynedfiles"] filelist = ( heterodynedfiles if isinstance(heterodynedfiles, list) else [heterodynedfiles] ) filelist = [hf for hf in filelist if os.path.isfile(hf)] if len(filelist) == 0: raise ValueError("None of the heterodyned files given exists!") # read in and merge all the files het = HeterodynedData.read(filelist) # write out the merged data file if "output" in mergekwargs: het.write(mergekwargs["output"], overwrite=mergekwargs.get("overwrite", False)) if mergekwargs.get("remove", False): # remove the inidividual files for hf in filelist: os.remove(hf) return het def heterodyne_merge_cli(**kwargs): # pragma: no cover """ Entry point to ``cwinpy_heterodyne_merge`` script. This just calls :func:`cwinpy.heterodyne.heterodyne_merge`, but does not return any objects. """ kwargs["cli"] = True # set to show use of CLI _ = heterodyne_merge(**kwargs) class HeterodyneDAGRunner(object): """ Set up and run the heterodyne DAG. Parameters ---------- config: :class:`configparser.ConfigParser` A :class:`configparser.ConfigParser` object with the analysis setup parameters. """ def __init__(self, config, **kwargs): # create and build the dag self.create_dag(config, **kwargs) def create_dag(self, config, **kwargs): """ Create the HTCondor DAG from the configuration parameters. Parameters ---------- config: :class:`configparser.ConfigParser` A :class:`configparser.ConfigParser` object with the analysis setup parameters. """ if not isinstance(config, configparser.ConfigParser): raise TypeError("'config' must be a ConfigParser object") inputs = HeterodyneInput(config) dagsection = "heterodyne_dag" if config.has_section("heterodyne_dag") else "dag" if "dag" in kwargs: # get a previously created DAG if given (for example for a full # analysis pipeline) self.dag = kwargs["dag"] # get whether to automatically submit the dag self.dag.inputs.submit = config.getboolean( dagsection, "submitdag", fallback=False ) else: self.dag = Dag(inputs) # get whether to build the dag self.build = config.getboolean(dagsection, "build", fallback=True) # get any additional submission options self.submit_options = config.get(dagsection, "submit_options", fallback=None) # get the base directory self.basedir = config.get("run", "basedir", fallback=os.getcwd()) # create configurations for each cwinpy_heterodyne job if not config.has_section("heterodyne"): raise IOError("Configuration file must have a [heterodyne] section.") # detectors to use detectors = self.eval(config.get("heterodyne", "detectors", fallback=None)) if isinstance(detectors, str): detectors = [detectors] # make into a list elif detectors is None: raise ValueError("At least one detector must be supplied") # get pulsar information pulsarfiles = self.eval(config.get("ephemerides", "pulsarfiles", fallback=None)) pulsars = self.eval(config.get("ephemerides", "pulsars", fallback=None)) if pulsarfiles is None: raise ValueError("A set of pulsar parameter files must be supplied") # output information outputdirs = self.eval(config.get("heterodyne", "outputdir", fallback=None)) if not isinstance(outputdirs, list): outputdirs = [outputdirs] for i, outputdir in enumerate(copy.deepcopy(outputdirs)): if isinstance(outputdir, str): outputdirs[i] = {det: outputdir for det in detectors} elif isinstance(outputdir, dict): if sorted(outputdir.keys()) != sorted(detectors): raise KeyError( "outputdirs dictionary must have same keys as the given " "detectors" ) for det in detectors: if not isinstance(outputdir[det], str): raise TypeError("outputdirs must be a string") else: raise TypeError("outputdirs must be a string or dictionary") label = self.eval(config.get("heterodyne", "label", fallback=None)) if label is not None: if isinstance(label, str): label = [label] elif not isinstance(label, list): raise TypeError("label must be a string or a list") freqfactors = self.eval( config.get("heterodyne", "freqfactors", fallback="[2.0]") ) if isinstance(freqfactors, (int, float)): freqfactors = [freqfactors] # make into a list # get times of data to analyse fullstarttimes = self.eval( config.get("heterodyne", "starttimes", fallback=None) ) if isinstance(fullstarttimes, dict): if sorted(detectors) != sorted(fullstarttimes.keys()): raise ValueError("Start times must be specified for all detectors") for key, value in fullstarttimes.copy().items(): if isinstance(value, int): fullstarttimes[key] = [value] # convert values to lists elif not isinstance(value, list): raise TypeError("Must have a list of start times for a detector") elif isinstance(fullstarttimes, int): fullstarttimes = { det: [fullstarttimes] for det in detectors } # convert to dict else: raise ValueError("Start times must be given") fullendtimes = self.eval(config.get("heterodyne", "endtimes", fallback=None)) if isinstance(fullendtimes, dict): if sorted(detectors) != sorted(fullendtimes.keys()): raise ValueError("End times must be specified for all detectors") for key, value in fullendtimes.copy().items(): if isinstance(value, int): fullendtimes[key] = [value] # convert values to lists elif not isinstance(value, list): raise TypeError("Must have a list of end times for a detector") elif isinstance(fullendtimes, int): fullendtimes = {det: [fullendtimes] for det in detectors} # convert to dict else: raise ValueError("End times must be given") for det in detectors: if len(fullendtimes[det]) != len(fullstarttimes[det]): raise ValueError("Inconsistent numbers of start and end times") stride = config.getint("heterodyne", "stride", fallback=None) joblength = config.getint("heterodyne", "joblength", fallback=86400) # get frame data information frametypes = self.eval(config.get("heterodyne", "frametypes", fallback=None)) if isinstance(frametypes, str) and len(detectors) == 1: frametypes = {det: frametypes for det in detectors} framecaches = self.eval(config.get("heterodyne", "framecaches", fallback=None)) if isinstance(framecaches, str) and len(detectors) == 1: framecaches = {det: framecaches for det in detectors} channels = self.eval(config.get("heterodyne", "channels", fallback=None)) if isinstance(channels, str) and len(detectors) == 1: channels = {det: channels for det in detectors} host = config.get("heterodyne", "host", fallback=None) heterodyneddata = self.eval( config.get("heterodyne", "heterodyneddata", fallback=None) ) framedata = {det: [] for det in detectors} if frametypes is None and framecaches is None and heterodyneddata is None: raise ValueError( "Frame types, frame cache files, or heterodyned data information must " "be supplied" ) if heterodyneddata is None: for fname, finfo in dict( frametypes=frametypes, framecaches=framecaches, channels=channels ).items(): if finfo is not None: # set frame types/caches if isinstance(finfo, dict): for key, value in finfo.copy().items(): if isinstance(value, str): finfo[key] = [value] * len(fullstarttimes[key]) elif isinstance(value, list): if len(value) != len(fullstarttimes[key]): raise ValueError( "{} lists must be consistent with the number of start and end times".format( fname ) ) else: raise TypeError("Must have a list of {}".format(fname)) else: raise TypeError("{} should be a dictionary".format(fname)) # get segment information segmentserver = config.get("heterodyne", "segmentserver", fallback=None) segmentlists = self.eval( config.get("heterodyne", "segmentlists", fallback=None) ) if isinstance(segmentlists, str) and len(detectors) == 1: segmentlists = {det: segmentlists for det in detectors} includeflags = self.eval( config.get("heterodyne", "includeflags", fallback=None) ) if isinstance(includeflags, str) and len(detectors) == 1: includeflags = {det: includeflags for det in detectors} excludeflags = self.eval( config.get("heterodyne", "excludeflags", fallback=None) ) if isinstance(excludeflags, str) and len(detectors) == 1: excludeflags = {det: excludeflags for det in detectors} segmentdata = {det: [] for det in detectors} if segmentlists is None and includeflags is None and heterodyneddata is None: raise ValueError( "Segment lists of segment data quality flags must be supplied" ) for sname, sinfo in dict( includeflags=includeflags, excludeflags=excludeflags, segmentlists=segmentlists, ).items(): if sinfo is not None: if isinstance(sinfo, dict): for key, value in sinfo.copy().items(): if isinstance(value, str): sinfo[key] = [value] * len(fullstarttimes[key]) elif isinstance(value, list): if len(value) != len(fullstarttimes[key]): raise ValueError( "{} lists must be consistent with the number of start and end times".format( sname ) ) else: raise TypeError("Must have a list of {}".format(sname)) else: raise TypeError("{} should be a dictionary".format(sname)) # get ephemeris information earthephemeris = self.eval(config.get("ephemerides", "earth", fallback=None)) sunephemeris = self.eval(config.get("ephemerides", "sun", fallback=None)) timeephemeris = self.eval(config.get("ephemerides", "time", fallback=None)) # get all the split segment times and frame caches if joblength == 0: starttimes = fullstarttimes endtimes = fullendtimes for det in detectors: for i in range(len(fullstarttimes[det])): frinfo = {} if frametypes is not None: # generate the frame caches now rather than relying on # each job doing it frcachedir = os.path.join(self.basedir, "cache") check_directory_exists_and_if_not_mkdir(frcachedir) frinfo["framecache"] = os.path.join( frcachedir, "frcache_{0:d}-{1:d}_{2}.txt".format( starttimes[det][i], endtimes[det][i], frametypes[det][i] ), ) _ = remote_frame_cache( starttimes[det][i], endtimes[det][i], channels[det][i], frametype=frametypes[det][i], host=config.get("heterodyne", "host", fallback=None), write=frinfo["framecache"], ) else: frinfo["framecache"] = framecaches[det][i] frinfo["channel"] = channels[det][i] framedata[det].append(frinfo.copy()) seginfo = {} if segmentlists is not None: seginfo["segmentlist"] = segmentlists[det][i] else: # GWOSC segments look like DET_DATA, DET_CW* or DET_*_CAT* usegwosc = False if ( "{}_DATA".format(det) == includeflags[det][i] or "{}_CW".format(self.detector) in self.includeflags[0] or "CBC_CAT" in includeflags[det][i] or "BURST_CAT" in includeflags[det][i] ): usegwosc = True inputs.require_gwosc = True # if segment list files are not provided create the lists # now rather than relying on each job doing it segdir = os.path.join(self.basedir, "segments") check_directory_exists_and_if_not_mkdir(segdir) seginfo["segmentlist"] = os.path.join( segdir, "segments_{0:d}-{1:d}_{2}.txt".format( starttimes[det][i], endtimes[det][i], includeflags[det][i].replace(":", "_"), ), ) _ = generate_segments( starttime=starttimes[det][i], endtime=endtimes[det][i], includeflags=includeflags[det][i], excludeflags=( None if excludeflags is None else excludeflags[det][i].split(",") ), writesegments=seginfo["segmentlist"], usegwosc=usegwosc, server=segmentserver, ) segmentdata[det].append(seginfo.copy()) elif joblength > 0: starttimes = {det: [] for det in detectors} endtimes = {det: [] for det in detectors} for det in detectors: idx = 0 for starttime, endtime in zip(fullstarttimes[det], fullendtimes[det]): # if segment list files are not provided create the lists # now rather than relying on each job doing it seginfo = {} if segmentlists is not None: seginfo["segmentlist"] = segmentlists[det][idx] segmentlist = generate_segments( starttime=starttime, endtime=endtime, segmentfile=seginfo["segmentlist"], ) else: # GWOSC segments look like DET_DATA or DET_*_CAT* usegwosc = False if ( "{}_DATA".format(det) == includeflags[det][idx] or "CBC_CAT" in includeflags[det][idx] or "BURST_CAT" in includeflags[det][idx] ): usegwosc = True inputs.require_gwosc = True # if segment list files are not provided create the lists # now rather than relying on each job doing it segdir = os.path.join(self.basedir, "segments") check_directory_exists_and_if_not_mkdir(segdir) seginfo["segmentlist"] = os.path.join( segdir, "segments_{0:d}-{1:d}_{2}.txt".format( starttime, endtime, includeflags[det][idx].replace(":", "_"), ), ) segmentlist = generate_segments( starttime=starttime, endtime=endtime, includeflags=includeflags[det][idx], excludeflags=( None if excludeflags is None else excludeflags[det][idx].split(",") ), writesegments=seginfo["segmentlist"], usegwosc=usegwosc, server=segmentserver, ) if len(segmentlist) == 0: raise ValueError( f"No science data segments exist for {det}" ) # make segment list a list of lists, so values are not immutable segmentlist = [list(seg) for seg in segmentlist] frinfo = {} if frametypes is not None: # generate the frame caches now rather than relying on # each job doing it frcachedir = os.path.join(self.basedir, "cache") check_directory_exists_and_if_not_mkdir(frcachedir) frinfo["framecache"] = os.path.join( frcachedir, "frcache_{0:d}-{1:d}_{2}.txt".format( starttime, endtime, frametypes[det][idx] ), ) _ = remote_frame_cache( starttime, endtime, channels[det][i], frametype=frametypes[det][idx], host=config.get("heterodyne", "host", fallback=None), write=frinfo["framecache"], ) else: frinfo["framecache"] = framecaches[det][idx] frinfo["channel"] = channels[det][idx] segidx = 0 while segidx < len(segmentlist): curstart = segmentlist[segidx][0] # get segments containing up to joblength of data sumseg = 0 while sumseg < joblength: sumseg += segmentlist[segidx][1] - segmentlist[segidx][0] segidx += 1 if segidx == len(segmentlist): break if segidx < len(segmentlist): overlap = sumseg - joblength segidx -= 1 curend = segmentlist[segidx][1] - overlap segmentlist[segidx][0] = curend else: # ignore final segment if it's less than 30 mins if sumseg < 30 * 60: break # use end value curend = segmentlist[-1][1] starttimes[det].append(int(curstart)) endtimes[det].append(int(curend)) # append frame data for jobs framedata[det].append(frinfo.copy()) segmentdata[det].append(seginfo.copy()) idx += 1 else: raise ValueError("Length of each job must be a positive integer") # create Heterodyne object to get pulsar parameter file information het = Heterodyne( pulsarfiles=pulsarfiles, pulsars=pulsars, heterodyneddata=heterodyneddata, ) # get number over which to split up pulsars npulsarjobs = config.getint("heterodyne", "npulsarjobs", fallback=1) pulsargroups = [] if npulsarjobs == 1 or len(het.pulsars) == 1: pulsargroups.append(het.pulsars) else: pstep = int(np.ceil(len(het.pulsars) / npulsarjobs)) for i in range(npulsarjobs): pulsargroups.append(het.pulsars[pstep * i : pstep * (i + 1)]) # set whether to perform the heterodyne in 1 or two stages stages = config.getint("heterodyne", "stages", fallback=1) if stages not in [1, 2]: raise ValueError("Stages must either be 1 or 2") # get the resample rate(s) if stages == 1: resamplerate = [ self.eval( config.get("heterodyne", "resamplerate", fallback="1.0 / 60.0") ) ] else: resamplerate = self.eval( config.get("heterodyne", "resamplerate", fallback="[1.0, 1.0 / 60.0]") ) # set the components of the signal modulation, i.e., solar system, # binary system, to include in the heterodyne stages. By default a # single stage heterodyne will include all components and a two stage # heterodyne will include no components in the first stage, but all # components in the second stage. If supplying different values for a # two stage process use lists if stages == 1: includessb = [config.getboolean("heterodyne", "includessb", fallback=True)] includebsb = [config.getboolean("heterodyne", "includebsb", fallback=True)] includeglitch = [ config.getboolean("heterodyne", "includeglitch", fallback=True) ] includefitwaves = [ config.getboolean("heterodyne", "includefitwaves", fallback=True) ] # filter knee frequency (default to 0.1 Hz for single stage heterodyne) filterknee = config.getfloat("heterodyne", "filterknee", fallback=0.1) else: includessb = self.eval( config.getboolean("heterodyne", "includessb", fallback="[False, True]") ) includebsb = self.eval( config.getboolean("heterodyne", "includebsb", fallback="[False, True]") ) includeglitch = self.eval( config.getboolean( "heterodyne", "includeglitch", fallback="[False, True]" ) ) includefitwaves = self.eval( config.getboolean( "heterodyne", "includefitwaves", fallback="[False, True]" ) ) # filter knee frequency (default to 0.5 Hz for two stage heterodyne) filterknee = config.getfloat("heterodyne", "filterknee", fallback=0.5) # get whether using Tempo2 or not and check it's availability usetempo2 = config.getboolean("heterodyne", "usetempo2", fallback=False) if usetempo2 and not check_for_tempo2(): raise ImportError( "libstempo is not installed so 'usetempo2' option cannot be used" ) # get the required solar system ephemeris types and binary model for # the given pulsars etypes = [] binarymodels = [] for pf in het.pulsarfiles: par = PulsarParameters(het.pulsarfiles[pf]) etypes.append(par["EPHEM"] if par["EPHEM"] is not None else "DE405") if par["BINARY"] is not None: binarymodels.append(par["BINARY"]) self.pulsar_files = het.pulsarfiles.copy() # remove duplicates etypes = set(etypes) binarymodels = set(binarymodels) # if ephemeris information is None download/extract information if earthephemeris is None or sunephemeris is None: earthephemeris = {} if earthephemeris is None else earthephemeris sunephemeris = {} if sunephemeris is None else sunephemeris for etype in LAL_EPHEMERIS_TYPES: if etype not in earthephemeris: edat = initialise_ephemeris(ephem=etype, ssonly=True) earthephemeris[etype] = edat.filenameE sunephemeris[etype] = edat.filenameS if timeephemeris is None: timeephemeris = {} if timeephemeris is None else timeephemeris for unit in ["TCB", "TDB"]: if unit not in timeephemeris: _, fnames = initialise_ephemeris( units=unit, timeonly=True, filenames=True ) timeephemeris[unit] = fnames[0] # create copy of each file to a unique name in case of identical filenames # from astropy cache, which causes problems if requiring files be # transferred if inputs.transfer_files or inputs.osg: for edat, ename in zip( [earthephemeris, sunephemeris, timeephemeris], ["earth", "sun", "time"] ): if ( len(set([os.path.basename(edat[etype]) for etype in edat])) == 1 and len(edat) > 1 ): for etype in edat: tmpephem = os.path.join( tempfile.gettempdir(), f"{ename}_{etype}" ) shutil.copy(edat[etype], tmpephem) edat[etype] = tmpephem # check that ephemeris files exist for all required types if not usetempo2: for etype in etypes: if etype not in earthephemeris or etype not in sunephemeris: raise ValueError( f"Pulsar(s) require ephemeris '{etype}' which has not been supplied" ) # check that binary models exist for all required types if not usetempo2: for bmodel in binarymodels: if bmodel not in LAL_BINARY_MODELS: raise ValueError( f"Pulsar(s) require binary model type '{bmodel}' " "which is not available in LALSuite. Try the " "usetempo2 option." ) # check output directories and labels lists are correct length if stages == 1: if label is not None: if len(label) == 0: raise ValueError("A label must be supplied") if len(outputdirs) == 0: raise ValueError("An output directory must be supplied") else: if label is not None: if len(label) != 2: raise ValueError( "Two labels must be supplied, one for each heterodyne stage" ) if len(outputdirs) != 2: raise ValueError( "Two output directories must be supplied, one for each heterodyne stage" ) interpolationstep = config.get("heterodyne", "interpolationstep", fallback=60) crop = config.getint("heterodyne", "crop", fallback=60) overwrite = config.getboolean("heterodyne", "overwrite", fallback=False) merge = config.getboolean("merge", "merge", fallback=True) and joblength > 0 # create jobs self.hetnodes = [] # dictionary to contain all nodes for a given pulsar (for passing on to # cwinpy_pe if required) self.pulsar_nodes = {psr: {det: [] for det in detectors} for psr in het.pulsars} if merge: # dictionary containing child nodes for each merge job mergechildren = { det: {ff: {psr: [] for psr in het.pulsars} for ff in freqfactors} for det in detectors } # dictionary containing the output files for the merge results self.mergeoutputs = { det: {ff: {psr: None for psr in het.pulsars} for ff in freqfactors} for det in detectors } # dictionary to contain all the heterodyned data files for each pulsar self.heterodyned_files = { det: {ff: {psr: [] for psr in het.pulsars} for ff in freqfactors} for det in detectors } # loop over sets of pulsars for pgroup in pulsargroups: self.hetnodes.append([]) # loop over frequency factors for ff in freqfactors: # loop over each detector for det in detectors: # loop over times idx = 0 for starttime, endtime in zip(starttimes[det], endtimes[det]): configdict = {} configdict["starttime"] = starttime configdict["endtime"] = endtime configdict["detector"] = det configdict["freqfactor"] = ff configdict["resamplerate"] = resamplerate[0] configdict["filterknee"] = filterknee configdict["crop"] = crop configdict["overwrite"] = overwrite # set frame data/heterodyned data info configdict.update(framedata[det][idx]) configdict["host"] = host configdict["stride"] = stride configdict["heterodyneddata"] = ( heterodyneddata if heterodyneddata is None else {psr: het.heterodyneddata[psr] for psr in pgroup} ) # set segment data info configdict.update(segmentdata[det][idx]) configdict["pulsarfiles"] = { psr: het.pulsarfiles[psr] for psr in pgroup } configdict["pulsars"] = copy.deepcopy(pgroup) # set whether to include modulations configdict["includessb"] = includessb[0] configdict["includebsb"] = includebsb[0] configdict["includeglitch"] = includeglitch[0] configdict["includefitwaves"] = includefitwaves[0] configdict["interpolationstep"] = interpolationstep configdict["usetempo2"] = usetempo2 # include ephemeris files configdict["earthephemeris"] = earthephemeris configdict["sunephemeris"] = sunephemeris configdict["timeephemeris"] = timeephemeris # temporary Heterodyne object to get the output file names tmphet = Heterodyne( starttime=starttime, endtime=endtime, detector=det, freqfactor=ff, output=outputdirs[0][det], label=label[0] if label is not None else None, pulsars=copy.deepcopy(pgroup), pulsarfiles=pulsarfiles, ) # get lists of set of output heterodyned files for each pulsar/detector for psr in pgroup: self.heterodyned_files[det][ff][psr].append( copy.deepcopy(tmphet.outputfiles[psr]) ) # set the final merged output files for psr in pgroup: if merge and self.mergeoutputs[det][ff][psr] is None: # use full start and end times tmphet.starttime = starttimes[det][0] tmphet.endtime = endtimes[det][-1] self.mergeoutputs[det][ff][psr] = os.path.join( outputdirs[0][det], tmphet.outputfiles[psr], ) configdict["output"] = outputdirs[0][det] configdict["label"] = label[0] if label is not None else None self.hetnodes[-1].append( HeterodyneNode( inputs, { key: copy.deepcopy(value) for key, value in configdict.items() if value is not None }, self.dag, ) ) # put nodes into dictionary for each pulsar if stages == 1: for psr in pgroup: self.pulsar_nodes[psr][det].append( self.hetnodes[-1][-1] ) if merge: for psr in pgroup: mergechildren[det][ff][psr].append( self.hetnodes[-1][-1] ) idx += 1 # need to check whether doing fine heterodyne - in this case need to create new jobs on a per pulsar basis if stages == 2: for i, pgroup in enumerate(pulsargroups): for psr in pgroup: for ff in freqfactors: for det in detectors: configdict = {} configdict["starttime"] = starttimes[det][0] configdict["endtime"] = endtimes[det][-1] configdict["detector"] = det configdict["freqfactor"] = ff configdict["pulsars"] = psr configdict["pulsarfiles"] = pulsarfiles configdict["resamplerate"] = resamplerate[-1] # include all modulations configdict["includessb"] = includessb[-1] configdict["includebsb"] = includebsb[-1] configdict["includeglitch"] = includeglitch[-1] configdict["includefitwaves"] = includefitwaves[-1] # include ephemeris files configdict["earthephemeris"] = earthephemeris configdict["sunephemeris"] = sunephemeris configdict["timeephemeris"] = timeephemeris # input the data configdict["heterodyneddata"] = { psr: self.heterodyned_files[det][ff][psr] } # output structure configdict["output"] = outputdirs[1][det] configdict["label"] = ( label[1] if label is not None else None ) self.pulsar_nodes[psr][det].append( HeterodyneNode( inputs, { key: copy.deepcopy(value) for key, value in configdict.items() if value is not None }, self.dag, generation_node=self.hetnodes[i], ) ) elif merge: # set output merge jobs for i, pgroup in enumerate(pulsargroups): for psr in pgroup: for ff in freqfactors: for det in detectors: if len(self.heterodyned_files[det][ff][psr]) > 1: self.pulsar_nodes[psr][det].append( MergeHeterodyneNode( inputs, { "heterodynedfiles": copy.deepcopy( self.heterodyned_files[det][ff][psr] ), "freqfactor": ff, "detector": det, "pulsar": psr, "output": copy.deepcopy( self.mergeoutputs[det][ff][psr] ), }, self.dag, generation_node=mergechildren[det][ff][psr], ) ) if self.build: self.dag.build() def eval(self, arg): """ Try and evaluate a string using :func:`ast.literal_eval`. Parameters ---------- arg: str A string to be evaluated. Returns ------- object: The evaluated object, or original string, if not able to be evaluated. """ # copy of string newobj = str(arg) try: newobj = ast.literal_eval(newobj) except (ValueError, SyntaxError): # try evaluating expressions such as "1/60" or "[1., 1./60.]"", # which fail for recent versions of ast in Python 3.7+ # if expression contains a list strip the brackets to start objlist = newobj.strip("[").strip("]").split(",") issafe = False for obj in objlist: try: # check if value is just a number _ = float(obj) issafe = True except ValueError: issafe = False for op in ["/", "*", "+", "-"]: if op in obj: if len(obj.split(op)) == 2: try: _ = [float(val) for val in obj.split(op)] issafe = True except ValueError: break # object is "safe", use eval if issafe: newobj = eval(newobj) return newobj def heterodyne_dag(**kwargs): """ Run heterodyne_dag within Python. This will create a `HTCondor <https://htcondor.readthedocs.io/>`_ DAG for running multiple ``cwinpy_heterodyne`` instances on a computer cluster. Optional parameters that can be used instead of a configuration file (for "quick setup") are given in the "Other parameters" section. Parameters ---------- config: str A configuration file, or :class:`configparser:ConfigParser` object, for the analysis. Other parameters ---------------- run: str The name of an observing run for which open data exists, which will be heterodyned, e.g., "O1". detector: str, list The detector, or list of detectors, for which the data will be heterodyned. If not set then all detectors available for a given run will be used. hwinj: bool Set this to True to analyse the continuous hardware injections for a given run. No ``pulsar`` argument is required in this case. samplerate: str: Select the sample rate of the data to use. This can either be 4k or 16k for data sampled at 4096 or 16384 Hz, respectively. The default is 4k, except if running on hardware injections for O1 or later, for which 16k will be used due to being requred for the highest frequency source. For the S5 and S6 runs only 4k data is avaialble from GWOSC, so if 16k is chosen it will be ignored. pulsar: str, list The path to, or list of paths to, a Tempo(2)-style pulsar parameter file(s), or directory containing multiple parameter files, to heterodyne. If a pulsar name is given instead of a parameter file then an attempt will be made to find the pulsar's ephemeris from the ATNF pulsar catalogue, which will then be used. osg: bool Set this to True to run on the Open Science Grid rather than a local computer cluster. output: str, The location for outputting the heterodyned data. By default the current directory will be used. Within this directory, subdirectories for each detector will be created. joblength: int The length of data (in seconds) into which to split the individual analysis jobs. By default this is set to 86400, i.e., one day. If this is set to 0, then the whole dataset is treated as a single job. accounting_group_tag: str For LVK users this sets the computing accounting group tag. usetempo2: bool Set this flag to use Tempo2 (if installed) for calculating the signal phase evolution rather than the default LALSuite functions. Returns ------- dag: An object containing a pycondor :class:`pycondor.Dagman` object. """ if "config" in kwargs: configfile = kwargs.pop("config") else: # pragma: no cover parser = ArgumentParser( description=( "A script to create a HTCondor DAG to process GW strain data " "by heterodyning it based on the expected phase evolution for " "a selection of pulsars." ) ) parser.add_argument( "config", nargs="?", help=("The configuration file for the analysis"), default=None, ) optional = parser.add_argument_group( "Quick setup arguments (this assumes CVMFS open data access)." ) optional.add_argument( "--run", help=( "Set an observing run name for which to heterodyne the data. " "This can be one of {} for which open data exists".format( list(RUNTIMES.keys()) ) ), ) optional.add_argument( "--detector", action="append", help=( "The detector for which the data will be heterodyned. This can " "be used multiple times to specify multiple detectors. If not " "set then all detectors available for a given run will be " "used." ), ) optional.add_argument( "--hwinj", action="store_true", help=( "Set this flag to analyse the continuous hardware injections " "for a given run. No '--pulsar' arguments are required in " "this case." ), ) optional.add_argument( "--samplerate", help=( "Select the sample rate of the data to use. This can either " "be 4k or 16k for data sampled at 4096 or 16384 Hz, " "respectively. The default is 4k, except if running on " "hardware injections for O1 or later, for which 16k will be " "used due to being requred for the highest frequency source. " "For the S5 and S6 runs only 4k data is avaialble from GWOSC, " "so if 16k is chosen it will be ignored." ), default="4k", ) optional.add_argument( "--pulsar", action="append", help=( "The path to a Tempo(2)-style pulsar parameter file, or " "directory containing multiple parameter files, to " "heterodyne. This can be used multiple times to specify " "multiple pulsar inputs. If a pulsar name is given instead " "of a parameter file then an attempt will be made to find the " "pulsar's ephemeris from the ATNF pulsar catalogue, which " "will then be used." ), ) optional.add_argument( "--osg", action="store_true", help=( "Set this flag to run on the Open Science Grid rather than a " "local computer cluster." ), ) optional.add_argument( "--output", help=( "The location for outputting the heterodyned data. By default " "the current directory will be used. Within this directory, " "subdirectories for each detector will be created." ), default=os.getcwd(), ) optional.add_argument( "--joblength", type=int, help=( "The length of data (in seconds) into which to split the " "individual analysis jobs. By default this is set to 86400, " "i.e., one day. If this is set to 0, then the whole dataset " "is treated as a single job." ), ) optional.add_argument( "--accounting-group-tag", dest="accgroup", help=("For LVK users this sets the computing accounting group tag"), ) optional.add_argument( "--usetempo2", action="store_true", help=( "Set this flag to use Tempo2 (if installed) for calculating " "the signal phase evolution rather than the default LALSuite " "functions." ), ) args = parser.parse_args() if args.config is not None: configfile = args.config else: # use the "Quick setup" arguments configfile = configparser.ConfigParser() run = kwargs.get("run", args.run) if run not in RUNTIMES: raise ValueError(f"Requested run '{run}' is not available") pulsars = [] if kwargs.get("hwinj", args.hwinj): # use hardware injections for the run runtimes = HW_INJ_RUNTIMES segments = HW_INJ_SEGMENTS pulsars.extend(HW_INJ[run]["hw_inj_files"]) # set sample rate to 16k, expect for S runs srate = "16k" if run[0] == "O" else "4k" else: # use pulsars provided runtimes = RUNTIMES segments = ANALYSIS_SEGMENTS pulsar = kwargs.get("pulsar", args.pulsar) if pulsar is None: raise ValueError("No pulsar parameter files have be provided") pulsars.extend(pulsar if isinstance(list) else [pulsar]) # get sample rate srate = ( "16k" if (args.samplerate[0:2] == "16" and run[0] == "O") else "4k" ) detector = kwargs.get("detector", args.detector) if args.detector is None: detectors = list(runtimes[run].keys()) else: detector = detector if isinstance(detector, list) else [detector] detectors = [det for det in detector if det in runtimes[run]] if len(detectors) == 0: raise ValueError( f"Provided detectors '{detector}' are not valid for the given run" ) # create required settings configfile["run"] = {} configfile["run"]["basedir"] = kwargs.get("output", args.output) configfile["heterodyne_dag"] = {} configfile["heterodyne_dag"]["submitdag"] = "True" if kwargs.get("osg", args.osg): configfile["heterodyne_dag"]["osg"] = "True" configfile["heterodyne_job"] = {} configfile["heterodyne_job"]["getenv"] = "True" if args.accgroup is not None: configfile["heterodyne_job"]["accounting_group"] = kwargs.get( "accounting_group_tag", args.accgroup ) # add pulsars/pulsar ephemerides configfile["ephemerides"] = {} configfile["ephemerides"]["pulsarfiles"] = str(pulsars) # add heterodyne settings configfile["heterodyne"] = {} configfile["heterodyne"]["detectors"] = str(detectors) configfile["heterodyne"]["starttimes"] = str( {det: runtimes[run][det][0] for det in detectors} ) configfile["heterodyne"]["endtimes"] = str( {det: runtimes[run][det][1] for det in detectors} ) configfile["heterodyne"]["frametypes"] = str( {det: CVMFS_GWOSC_DATA_TYPES[run][srate][det] for det in detectors} ) configfile["heterodyne"]["channels"] = str( {det: CVMFS_GWOSC_FRAME_CHANNELS[run][srate][det] for det in detectors} ) configfile["heterodyne"]["host"] = CVMFS_GWOSC_DATA_SERVER if args.hwinj: configfile["heterodyne"]["includeflags"] = str( {det: segments[run][det]["includesegments"] for det in detectors} ) configfile["heterodyne"]["excludeflags"] = str( {det: segments[run][det]["excludesegments"] for det in detectors} ) else: configfile["heterodyne"]["includeflags"] = str( {det: segments[run][det] for det in detectors} ) configfile["heterodyne"]["outputdir"] = str( { det: os.path.join(kwargs.get("output", args.output), det) for det in detectors } ) configfile["heterodyne"]["overwrite"] = "False" # set whether to use Tempo2 for phase evolution if kwargs.get("usetempo2", args.usetempo2): configfile["heterodyne"]["usetempo2"] = "True" # split the analysis into on average day long chunks if kwargs.get("joblength", args.joblength) is None: configfile["heterodyne"]["joblength"] = "86400" else: configfile["heterodyne"]["joblength"] = str( kwargs.get("joblength", args.joblength) ) # merge the resulting files and remove individual files configfile["merge"] = {} configfile["merge"]["merge"] = "True" configfile["merge"]["remove"] = "True" configfile["merge"]["overwrite"] = "True" if isinstance(configfile, configparser.ConfigParser): config = configfile else: config = configparser.ConfigParser() try: config.read_file(open(configfile, "r")) except Exception as e: raise IOError(f"Problem reading configuration file '{configfile}'\n: {e}") return HeterodyneDAGRunner(config, **kwargs) def heterodyne_dag_cli(**kwargs): # pragma: no cover """ Entry point to the cwinpy_heterodyne_dag script. This just calls :func:`cwinpy.heterodyne.heterodyne_dag`, but does not return any objects. """ _ = heterodyne_dag(**kwargs)
2.171875
2
timpani/webserver/webhelpers.py
ollien/Timpani
3
17910
import flask import functools import bs4 import urllib.parse from .. import auth from .. import themes from .. import settings INVALID_PERMISSIONS_FLASH_MESSAGE = "Sorry, you don't have permission to view that page." def checkForSession(): if "uid" in flask.session: session = auth.validateSession(flask.session["uid"]) if session is not None: return session return None def redirectAndSave(path): flask.session["donePage"] = urllib.parse.urlparse(flask.request.url).path return flask.redirect(path) def canRecoverFromRedirect(): if "donePage" in flask.session: return flask.session["donePage"] return None #Decorator which checks if a user logged in and capable of using the specified permissions. #If redirectPage is equal to none, #the target funciton MUST have the arguments authed and authMessage defined. def checkUserPermissions(redirectPage=None, saveRedirect=True, redirectMessage=INVALID_PERMISSIONS_FLASH_MESSAGE, requiredPermissions=None): def decorator(function): def decorated(*args, **kwargs): session = checkForSession() if session is not None: username = session.user.username result = True #If we don't have any permissions necessary, a login is enough. #Otherwise, we're going to check to make sure that all necessary permissions are in place. if requiredPermissions is not None: if type(requiredPermissions) == str: result = auth.userHasPermission(username, requiredPermissions) else: for permission in requiredPermissions: if not auth.userHasPermission(username, permission): result = False #If all permissions is valid, redirect as needed. if result: if redirectPage is not None: return function(*args, **kwargs) else: return function(authed=True, authMessage=redirectMessage, *args, **kwargs) else: #We don't want to flash on thigns like ajax routes, so we use redirectPage is not None willFlash = redirectPage is not None return _permissionRedirect(redirectPage, saveRedirect, redirectMessage, willFlash, function, *args, **kwargs) else: return _permissionRedirect(redirectPage, saveRedirect, redirectMessage, False, function, *args, **kwargs) return functools.update_wrapper(decorated, function) return decorator def _permissionRedirect(redirectPage, saveRedirect, redirectMessage, flash, function, *args, **kwargs): if flash: flask.flash(redirectMessage) if redirectPage is not None: if not saveRedirect: return flask.redirect(redirectPage) else: return redirectAndSave(redirectPage) else: return function(authed=False, authMessage=redirectMessage, *args, **kwargs) #Will return all information that is needed to render a post. #Prevents fragmentation in various post display methods def getPostsParameters(): title = settings.getSettingValue("title") subtitle = settings.getSettingValue("subtitle") displayName = settings.getSettingValue("display_name") return { "blogTitle": title, "blogSubtitle": subtitle, "displayName": displayName, } #Renders the theme's template if the theme contains one #Otherwise, it renders the default template def renderPosts(defaultPath, pageTitle, pageNumber, pageCount, nextPageExists, basePageUrl="", *args, **kwargs): theme = themes.getCurrentTheme() template = theme["template"] postParams = getPostsParameters() #Merge postParams and kwargs #Anything in kwargs will overwrite postParams (which is why we use these two lines) postParams.update(kwargs) kwargs = postParams if template is None: templateFile = open(defaultPath, "r") template = templateFile.read() templateFile.close() return flask.render_template_string(template, pageTitle=pageTitle, pageNumber=pageNumber, pageCount=pageCount, nextPageExists=nextPageExists, basePageUrl=basePageUrl, *args, **kwargs) def xssFilter(postBody): whitelistedTags = ["div", "span", "b", "i", "u", "a", "p", "img", "code", "ul", "li", "h1", "h2", "h3", "h4", "h5", "h6", "pre", "br"] #src and href must be checked seperately whitelistedAttributes = ["id", "class", "style"] soupedBody = bs4.BeautifulSoup(postBody, "html.parser") blockedTags = soupedBody.findAll(lambda tag: tag.name not in whitelistedTags) #Check if element has any attriutes that are not allowed, but only if #they are not already in blockedTags. Those will be escaped, anyway. blockedAttrs = soupedBody.findAll(lambda tag: len(set(tag.attrs.keys()) - set(whitelistedAttributes)) != 0 and tag.name in whitelistedTags) for tag in blockedTags: #Beautiful soup will escape HTML strings tag.replace_with(str(tag)) for tag in blockedAttrs: allowedAttrs = {} for attr in tag.attrs: if attr in whitelistedAttributes: allowedAttrs[attr] = tag.attrs[attr] elif attr == "src" or attr == "href": scheme = urllib.parse.urlparse(tag.attrs[attr]).scheme if scheme != "data" and scheme != "javascript": allowedAttrs[attr] = tag.attrs[attr] tag.attrs = allowedAttrs return str(soupedBody)
2.796875
3
bot/plugins/keyboard/__init__.py
grahamtito/TelegramFiletoCloud
0
17911
#!/usr/bin/env python3 # This is bot coded by <NAME> and used for educational purposes only # https://github.com/AbhijithNT # Copyright <NAME> # Thank you https://github.com/pyrogram/pyrogram from pyrogram.types import ( InlineKeyboardMarkup, InlineKeyboardButton ) def server_select(): upload_selection = [ [ InlineKeyboardButton( "transfer.sh", callback_data="transfersh" ), InlineKeyboardButton( "File.io", callback_data="File.io" ) ], [ InlineKeyboardButton( "gofile.io", callback_data="gofileio" ), InlineKeyboardButton( "anonymfiles.com", callback_data="anonymfiles" ) ], [ InlineKeyboardButton( "aparat", callback_data="aparat" ), InlineKeyboardButton( "splus", callback_data="splus" ) ] ] return InlineKeyboardMarkup(upload_selection) def completedKeyboard(dl): replayMarkup = InlineKeyboardMarkup( [[ InlineKeyboardButton( "DOWNLOAD URL", url=f"{dl}" ) ], [ InlineKeyboardButton( "🗂 SOURCE", url="https://github.com/AbhijithNT/" ) ]]) return replayMarkup
2.21875
2
django_storymarket/forms.py
jacobian/django-storymarket
1
17912
<gh_stars>1-10 import logging import operator import storymarket from django import forms from django.core.cache import cache from django.conf import settings from .models import SyncedObject # Timeout for choices cached from Storymarket. 5 minutes. CHOICE_CACHE_TIMEOUT = 600 log = logging.getLogger('django_storymarket') class StorymarketSyncForm(forms.ModelForm): """ A form allowing the choice of sync options for a given model instance. """ class Meta: model = SyncedObject fields = ['org', 'category', 'tags', 'pricing', 'rights'] def __init__(self, *args, **kwargs): super(StorymarketSyncForm, self).__init__(*args, **kwargs) # Override some fields. Tags is left alone; the default is fine. self.fields['org'] = forms.TypedChoiceField(label='Org', choices=self._choices('orgs'), coerce=int) self.fields['category'] = forms.TypedChoiceField(label='Category', choices=self._choices('subcategories'), coerce=int) self.fields['pricing'] = forms.TypedChoiceField(label='Pricing', choices=self._choices('pricing'), coerce=int) self.fields['rights'] = forms.TypedChoiceField(label='Rights', choices=self._choices('rights'), coerce=int) def _choices(self, manager_name): """ Generate a list of choices from a given storymarket manager type. These choices are cached to save API hits, sorted, and an empty choice is included. """ cache_key = 'storymarket_choice_cache:%s' % manager_name choices = cache.get(cache_key) if choices is None: manager = getattr(self._api, manager_name) try: objs = sorted(manager.all(), key=operator.attrgetter('name')) except storymarket.exceptions.StorymarketError, e: log.exception('Storymarket API call failed: %s' % e) return [(u'', u'--- Storymarket Unavailable ---')] # If there's only a single object, just select it -- don't offer # an empty choice. Otherwise, offer an empty. if len(objs) == 1: empty_choice = [] else: empty_choice = [(u'', u'---------')] choices = empty_choice + [(o.id, o.name) for o in objs] cache.set(cache_key, choices, CHOICE_CACHE_TIMEOUT) return choices @property def _api(self): return storymarket.Storymarket(settings.STORYMARKET_API_KEY) class StorymarketOptionalSyncForm(StorymarketSyncForm): """ Like a StorymarketSyncForm, but with an extra boolean field indicating whether syncing should take place or not. """ sync = forms.BooleanField(initial=False, required=False, label="Upload to Storymarket") def __init__(self, *args, **kwargs): super(StorymarketOptionalSyncForm, self).__init__(*args, **kwargs) # Make fields optional; we'll validate them in clean() for field in ('org', 'category', 'tags'): self.fields[field].required = False def clean(self): if self.cleaned_data['sync']: for field in ('org', 'category', 'tags'): if not self.cleaned_data.get(field, None): message = self.fields[field].error_messages['required'] self._errors[field] = self.error_class([message]) del self.cleaned_data[field] return self.cleaned_data
2.1875
2
revisum/snippet.py
medariox/revisum
0
17913
import pickle from collections import OrderedDict from datetime import datetime from .chunk import Chunk from .review import Review from .tokenizer import LineTokenizer from .utils import norm_path from .database.snippet import maybe_init, Snippet as DataSnippet class Snippet(object): def __init__(self, snippet_id, merged, chunks, source, target): self.snippet_id = snippet_id self.merged = merged self._chunks = chunks self._chunk_ids = [] self.start = chunks[0].start self.length = self.total_len(chunks[0].start, chunks[-1].end) self.source_file = norm_path(str(source)) self.target_file = norm_path(str(target)) self._target_lines = [] self._source_lines = [] self._target_tokens = [] self._source_tokens = [] def __str__(self): return '\n-------------------------\n'.join(self.to_text()) def to_json(self): snippet = OrderedDict() snippet['snippet_id'] = self.snippet_id reviews = Review.load(self.pr_number(self.snippet_id), self.repo_id(self.snippet_id)) snippet['reviews'] = [review.to_json() for review in reviews] snippet['chunk_ids'] = self.chunk_ids return snippet @property def chunks(self): return self._chunks @property def chunk_ids(self): if not self._chunk_ids: self._chunk_ids = [c.chunk_id for c in self._chunks] return self._chunk_ids @staticmethod def repo_id(snippet_id): return snippet_id.split('-')[3] @staticmethod def pr_number(snippet_id): return snippet_id.split('-')[2] @classmethod def make_id(cls, hunk_no, file_no, pr_number, repo_id): return '-'.join([str(hunk_no), str(file_no), str(pr_number), str(repo_id)]) @staticmethod def total_len(start, end): length = end - start + 1 return length def to_tokens(self): chunks = [] for chunk in self._chunks: chunks.append(chunk.as_tokens()) return chunks def to_text(self): chunks = [] for chunk in self._chunks: chunks.append(chunk.as_text(pretty=True)) return chunks @classmethod def as_tokens(cls, code): if not isinstance(code, list): code = [code] tokens = LineTokenizer(code).tokens lines = [] for line in tokens: lines += line return lines @classmethod def as_elements(cls, code): if not isinstance(code, list): code = [code] tokens = LineTokenizer(code).elements lines = [] for line in tokens: lines += line return lines @classmethod def load(cls, snippet_id, path=None): repo_id = cls.repo_id(snippet_id) maybe_init(repo_id, path=path) db_snippet = DataSnippet.get_or_none(snippet_id=snippet_id) if db_snippet: chunks = [] chunk_ids = pickle.loads(db_snippet.chunk_ids) for chunk_id in chunk_ids: chunks.append(Chunk.load(chunk_id)) merged = db_snippet.merged source = db_snippet.source target = db_snippet.target snippet = cls(snippet_id, merged, chunks, source, target) return snippet @classmethod def load_all(cls, repo_id, merged_only=False, path=None): maybe_init(repo_id, path=path) query = DataSnippet.select( DataSnippet.snippet_id, DataSnippet.chunk_ids, DataSnippet.source, DataSnippet.target) if merged_only: query = query.where(DataSnippet.merged == 1) query = query.order_by(DataSnippet.last_mod.desc()) for db_snippet in query: snippet_id = db_snippet.snippet_id chunks = [] chunk_ids = pickle.loads(db_snippet.chunk_ids) for chunk_id in chunk_ids: chunks.append(Chunk.load(chunk_id)) merged = db_snippet.merged source = db_snippet.source target = db_snippet.target snippet = cls(snippet_id, merged, chunks, source, target) print('Finished loading snippet with ID: {0}'.format(snippet_id)) yield snippet def _serialize_ids(self): return pickle.dumps(self.chunk_ids, pickle.HIGHEST_PROTOCOL) def exists(self): repo_id = self.repo_id(self.snippet_id) maybe_init(repo_id) snippet = DataSnippet.get_or_none(snippet_id=self.snippet_id) return bool(snippet) def save(self): repo_id = self.repo_id(self.snippet_id) maybe_init(repo_id) snippet = DataSnippet.get_or_none(snippet_id=self.snippet_id) if snippet: (DataSnippet .update(snippet_id=self.snippet_id, merged=self.merged, last_mod=datetime.now(), start=self.start, length=self.length, source=self.source_file, target=self.target_file, chunk_ids=self._serialize_ids()) .where(DataSnippet.snippet_id == self.snippet_id) .execute()) else: (DataSnippet .create(snippet_id=self.snippet_id, merged=self.merged, last_mod=datetime.now(), start=self.start, length=self.length, source=self.source_file, target=self.target_file, chunk_ids=self._serialize_ids()))
2.4375
2
ainnovation_dcim/workflow/__init__.py
ltxwanzl/ainnovation_dcim
0
17914
<reponame>ltxwanzl/ainnovation_dcim # default_app_config = '.apps.WorkflowConfig'
1.046875
1
examples/api-samples/inc_samples/sample33.py
groupdocs-legacy-sdk/python
0
17915
<reponame>groupdocs-legacy-sdk/python ####<i>This sample will show how to convert several HTML documents to PDF and merge them to one document</i> #Import of classes from libraries import base64 import os import shutil import random import time from pyramid.renderers import render_to_response from groupdocs.StorageApi import StorageApi from groupdocs.AsyncApi import AsyncApi from groupdocs.ApiClient import ApiClient from groupdocs.GroupDocsRequestSigner import GroupDocsRequestSigner from groupdocs.models.JobInfo import JobInfo # Checking value on null def IsNotNull(value): return value is not None and len(value) > 0 ####Set variables and get POST data def sample33(request): clientId = request.POST.get('client_id') privateKey = request.POST.get('private_key') firstUrl = request.POST.get('url1') secondUrl = request.POST.get('url2') thirdUrl = request.POST.get('url3') basePath = request.POST.get('server_type') message = "" iframe = "" # Checking clientId, privateKey and file_Id if IsNotNull(clientId) == False or IsNotNull(privateKey) == False: return render_to_response('__main__:templates/sample33.pt', { 'error' : 'You do not enter all parameters' }) ####Create Signer, ApiClient and Storage Api objects #Create signer object signer = GroupDocsRequestSigner(privateKey) #Create apiClient object apiClient = ApiClient(signer) #Create Storage Api object storageApi = StorageApi(apiClient) #Create Async api object asyncApi = AsyncApi(apiClient) #Set base Path if basePath == "": basePath = "https://api.groupdocs.com/v2.0" storageApi.basePath = basePath asyncApi.basePath = basePath #Create list of URL's urlList = [firstUrl, secondUrl, thirdUrl] #Create empty list for uploaded files GUID's guidList = [] for url in urlList: try: #Upload file upload = storageApi.UploadWeb(clientId, url) if upload.status == "Ok": #Add GUID of uploaded file to list guidList.append(upload.result.guid) else: raise Exception(upload.error_message) except Exception, e: return render_to_response('__main__:templates/sample33.pt', { 'error' : str(e) }) ####Make a request to Signature API using clientId try: #Create list of result document type convertType = [] convertType.append("pdf") #Create JobInfo object and set attributes jobInfo = JobInfo() jobInfo.actions = "convert, combine" jobInfo.out_formats = convertType jobInfo.status = "-1" jobInfo.email_results = True rand = random.randint(0, 500) jobInfo.name = "test" + str(rand) #Create job createJob = asyncApi.CreateJob(clientId, jobInfo) if createJob.status == "Ok": for guid in guidList: try: #Add all uploaded files to created job addJobDocument = asyncApi.AddJobDocument(clientId, createJob.result.job_id, guid, False) if addJobDocument.status != "Ok": raise Exception(addJobDocument.error_message) except Exception, e: return render_to_response('__main__:templates/sample33.pt', { 'error' : str(e) }) #Change job status jobInfo.status = "0" try: #Update job with new status updateJob = asyncApi.UpdateJob(clientId,createJob.result.job_id, jobInfo) if updateJob.status == "Ok": time.sleep(5) try: #Get result file from job by it's ID getJobDocument = asyncApi.GetJobDocuments(clientId, createJob.result.job_id) if getJobDocument.status == "Ok": fileGuid = getJobDocument.result.outputs[0].guid #Generation of iframe URL using $pageImage->result->guid #iframe to prodaction server if basePath == "https://api.groupdocs.com/v2.0": iframe = 'https://apps.groupdocs.com/document-viewer/embed/' + fileGuid #iframe to dev server elif basePath == "https://dev-api.groupdocs.com/v2.0": iframe = 'https://dev-apps.groupdocs.com/document-viewer/embed/' + fileGuid #iframe to test server elif basePath == "https://stage-api.groupdocs.com/v2.0": iframe = 'https://stage-apps.groupdocs.com/document-viewer/embed/' + fileGuid elif basePath == "http://realtime-api.groupdocs.com": iframe = 'http://realtime-apps.groupdocs.com/document-viewer/embed/' + fileGuid iframe = signer.signUrl(iframe) else: raise Exception(getJobDocument.error_message) except Exception, e: return render_to_response('__main__:templates/sample33.pt', { 'error' : str(e) }) else: raise Exception(updateJob.error_message) except Exception, e: return render_to_response('__main__:templates/sample33.pt', { 'error' : str(e) }) else: raise Exception(createJob.error_message) except Exception, e: return render_to_response('__main__:templates/sample33.pt', { 'error' : str(e) }) #If request was successfull - set message variable for template return render_to_response('__main__:templates/sample33.pt', { 'userId' : clientId, 'privateKey' : privateKey, 'url1' : firstUrl, 'url2' : secondUrl, 'url3' : thirdUrl, 'iframe' : iframe, 'message' : message }, request=request)
2.59375
3
silver/api/pagination.py
DocTocToc/silver
222
17916
# Copyright (c) 2015 Presslabs SRL # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from rest_framework.pagination import PageNumberPagination from rest_framework.response import Response from rest_framework.settings import api_settings from rest_framework.utils.urls import replace_query_param, remove_query_param class LinkHeaderPagination(PageNumberPagination): page_size = api_settings.PAGE_SIZE or 30 page_size_query_param = 'page_size' max_page_size = 100 def get_last_link(self): url = self.request.build_absolute_uri() page_number = self.page.paginator.num_pages return replace_query_param(url, self.page_query_param, page_number) def get_first_link(self, display_page_query_param=True): url = self.request.build_absolute_uri() if display_page_query_param: page_number = self.page.paginator.validate_number(1) return replace_query_param(url, self.page_query_param, page_number) else: return remove_query_param(url, self.page_query_param) def get_paginated_response(self, data): next_url = self.get_next_link() previous_url = self.get_previous_link() first_url = self.get_first_link() last_url = self.get_last_link() if next_url is not None and previous_url is not None: link = '<{next_url}>; rel="next", <{previous_url}>; rel="prev"' elif next_url is not None: link = '<{next_url}>; rel="next"' elif previous_url is not None: link = '<{previous_url}>; rel="prev"' else: link = '' if link: link += ', ' link += '<{first_url}>; rel="first", <{last_url}> rel="last"' link = link.format(next_url=next_url, previous_url=previous_url, first_url=first_url, last_url=last_url) headers = {'Link': link} if link else {} return Response(data, headers=headers)
2.03125
2
process_filing_headers.py
jsfenfen/fec2file
1
17917
import os import fecfile import json import csv import sys from settings import RAW_ELECTRONIC_DIR, MASTER_HEADER_ROW, HEADER_DUMP_FILE START_YEAR = 2019 ERROR_HEADERS = ['path', 'error', ] def readfile(filepath, writer): filename = os.path.basename(filepath) filename = filename.replace(".fec", "") file_number = int(filename) file = open(filepath, encoding = "ISO-8859-1") #file = open(filepath) firstline = file.readline() secondline = file.readline() firstline = firstline.replace("\n", "") raw_results = fecfile.parse_header(firstline) results = raw_results[0] results["filing_number"] = file_number version = raw_results[1] lines = None if len(raw_results)==3: lines = raw_results[1] original_report = results.get('report_id', None) report_number = results.get('report_number', None) if original_report: original_report = original_report.replace("FEC-", "") original_report_number = int(original_report) results["amends"] = original_report_number #print("Found amended filing %s amends %s # %s" % (file_number, original_report_number, report_number)) secondlineparsed = fecfile.parse_line(secondline, version) #print(secondlineparsed) results["form_type"] = secondlineparsed.get('form_type', '') results["filer_committee_id_number"] = secondlineparsed.get('filer_committee_id_number', '') results["committee_name"] = secondlineparsed.get('committee_name', '') results["date_signed"] = secondlineparsed.get('date_signed', '') results["coverage_from_date"] = secondlineparsed.get('coverage_from_date', '') results["coverage_through_date"] = secondlineparsed.get('coverage_through_date', '') writer.writerow(results) if __name__ == '__main__': outfile = open(HEADER_DUMP_FILE, 'w') dw = csv.DictWriter(outfile, fieldnames=MASTER_HEADER_ROW, extrasaction='ignore') dw.writeheader() print("Writing output to %s" % HEADER_DUMP_FILE) errorfile = open("header_read_errors.csv", 'w') error_writer = csv.DictWriter(errorfile, fieldnames=ERROR_HEADERS, extrasaction='ignore') error_writer.writeheader() for dirName, subdirList, fileList in os.walk(RAW_ELECTRONIC_DIR, topdown=False): try: directory_year = int(dirName.split("/")[-1][0:4]) if directory_year < START_YEAR: print("Ignoring directory %s" % dirName) continue except ValueError: continue for fname in fileList: if fname.endswith(".fec"): full_path = os.path.join(dirName, fname) #readfile(full_path, dw) #print("Found file %s" % full_path) try: readfile(full_path, dw) except Exception as e: print("error reading %s: %s" % (full_path, e)) error_writer.writerow({ 'path':full_path, 'error':e })
2.859375
3
src/python/collector/urls.py
swqqn/django-collector
3
17918
# -*- coding: utf-8 -*- from django.conf.urls.defaults import patterns, url urlpatterns = patterns('collector.views', url(r'^blob404/$', 'blob404'), url(r'^deleted/$', 'deleted'), url(r'^$', 'create'), url(r'^(?P<uid>\w+)/$', 'delete'), ) # Local Variables: # indent-tabs-mode: nil # End: # vim: ai et sw=4 ts=4
1.5
2
cdisp/core.py
felippebarbosa/cdisp
0
17919
#-*- coding: utf-8 -*- """ Dispersion calculation functions """ import numpy # module for array manipulation import pandas # module for general data analysis import os # module for general OS manipulation import scipy # module for scientific manipulation and analysis ## def set_transverse_mode(data_frame, order_tag, neff_tag = 'neff', complex_neff = False): """ Function for classification of transverse modes For this function to work, the frequency and polarization must the the same. Also the input have to be a Pandas data frame; """ if type(x) <> 'pandas.core.frame.DataFrame': raise(ValueError("The object MUST be a Pandas data frame")) #### No = len(data_frame) # number of modes order_list = np.array(['%1d' % x for x in np.arange(1, No + 1)][::-1]) # list with the transversal order neffs = np.array(data_frame[neff_tag]) # neffs of the modes if complex_neff: neffs = np.abs(np.array([complex(s.replace('i' , 'j ')) for s in neffs])) # for complex neff inds = neffs.argsort(kind = 'mergesort') # neff sorting inds2 = np.array(inds).argsort(kind = 'mergesort') # index resorting (reverse sorting) order_list_sorted = order_list[inds2] # list with the right (sorted) transversal order data_frame[order_tag] = order_list_sorted return data_frame ####### def data_classification(data_frame, wavelength_tag = 'wlength', frequency_tag = 'freq', input_tags = ['eig', 'Ptm', 'Ppml', 'Pcore', 'Pbus'], class_tags = ['polarization', 'ring_bus', 'transverse_mode']): """ Function for filtering quality factor, losses and classification of polarization and transverse modes The input have to be a Pandas data frame; """ ## limits setting pml_thre = 0.5 # threshold for power in the PMLs bus_thre = 1.0 # threshold for power in the bus waveguide relative to the ring tm_thre = 1.0 # threshold for power in the TM mode ## tags for classification [eigenval_tag, TM_tag, pml_tag, ring_tag, bus_tag] = input_tags [pol_tag, ring_bus_tag, order_tag] = class_tags ## list of columns list_col = list(data_frame.columns) # columns names Neig = list_col.index(eigenval_tag) # index before list_par = list_col[:Neig] # list of parameters ## create wavelength or frequency colunm if frequency_tag not in list_col: data_frame[frequency_tag] = scipy.constants.c/data_frame[wavelength_tag] if wavelength_tag not in list_col: data_frame[wavelength_tag] = scipy.constants.c/data_frame[frequency_tag] ## setting frequency column as the standard for internal use if frequency_tag not in list_par: list_par.remove(wavelength_tag) list_par.append(frequency_tag) ## PML filtering data_frame = data_frame[data_frame[pml_tag] < pml_thre] # Filter the light that goes to the Pml ## TE and TM modes separation data_frame[pol_tag] = np.array(pandas.cut(np.array(data_frame[TM_tag]), [0, tm_thre, data_frame[TM_tag].max()], labels = ['TE', 'TM'])) list_tag = [pol_tag] ## waveguide and bus separation if bus_tag in list_col: data_frame[ring_bus_tag] = np.array(pandas.cut((np.array(data_frame[bus_tag])/np.array(data_frame[ring_tag]))**(1./4), [0, bus_thre, 1000000], labels = ['ring', 'bus'])) # data_frame[ring_bus_tag] = np.array(pandas.cut(np.array(data_frame[ring_tag]), [0, ring_thre, 100000], labels = ['','ring'])) list_tag = list_tag + [ring_bus_tag] ## transverse mode separation list_group = list_par + list_tag # list to filter the first time data_frame = data_frame.groupby(list_group, as_index = False).apply(set_transverse_mode, order_tag) # transverse order return data_frame, list_group + [order_tag] #### def find_idx_nearest_val(array, value): '''function to find the nearest index matching to the value given''' idx_sorted = np.argsort(array) sorted_array = np.array(array[idx_sorted]) idx = np.searchsorted(sorted_array, value, side="left") if idx >= len(array): idx_nearest = idx_sorted[len(array)-1] elif idx == 0: idx_nearest = idx_sorted[0] else: if abs(value - sorted_array[idx-1]) < abs(value - sorted_array[idx]): idx_nearest = idx_sorted[idx-1] else: idx_nearest = idx_sorted[idx] return idx_nearest ### def dispersion_calculation(data_frame, frequency_tag = 'freq', wavelength_tag = 'wlength', neff_tag = 'neff', wlength0 = None): """ functions for dispersion calculation """ ## initial definitions wlength = np.array(data_frame[wavelength_tag]) # wavelength omega = 2*np.pi*np.array(data_frame[frequency_tag]) # angular frequency beta = np.array(data_frame[neff_tag])*omega/scipy.constants.c # propagation constant ## dialing with circular waveguides if 'r0' in data_frame.columns: rad0 = np.array(data_frame['r0']) beta = beta/rad0 else: rad0 = 1.0 ## dispersion calculations beta1 = Df(beta*rad0, omega)/rad0 # beta 1 beta2 = Df(beta1*rad0, omega)/rad0 # beta 2 beta3 = Df(beta2*rad0, omega)/rad0 # beta 3 beta4 = Df(beta3*rad0, omega)/rad0 # beta 4 D = -2*np.pi*scipy.constants.c/wlength*beta2 # D parameter ## set up the wlength for phase matching wlength0 = 0.9e-6 if wlength0 == None: wlength0 = wlength[int(wlength.shape[0]/2)] elif wlength0 < min(wlength): wlength0 = min(wlength) elif wlength0 > max(wlength): wlength0 = max(wlength) omega0 = 2*np.pi*scipy.constants.c/wlength0 ## phase matching calculation idx0 = find_idx_nearest_val(omega, omega0) Dbeta = calculate_Dbeta(beta, idx0) # propagation constant in Dbeta2 = beta2[idx0]*(omega - omega[idx0])**2 + beta4[idx0]/12*(omega - omega[idx0])**4 norm_gain = calculate_gain(Dbeta, 1.0e4) ## outputs n_clad, n_core = 1.0, 3.5 output_tags = ['beta', 'beta1', 'beta2', 'beta3', 'beta4', 'D', 'Dbeta', 'Dbeta_approx', 'beta_norm', 'beta_clad', 'beta_core', 'n_clad', 'n_core', 'gain', 'ng', 'fsr'] outputs = [beta, beta1, beta2, beta3, beta4, D, Dbeta, Dbeta2, beta/scipy.constants.c, n_clad*omega/scipy.constants.c, n_core*omega/scipy.constants.c, n_clad, n_core, norm_gain, beta1*scipy.constants.c, 1/(2*np.pi*rad0*beta1)] for m, output in enumerate(outputs): data_frame[output_tags[m]] = output return data_frame ### def dispersion_analysis(data_frame, list0, frequency_tag = 'freq'): ## list of columns list0.remove(frequency_tag) ## remove short data_frames Lmin = 3 data_frame = data_frame.groupby(list0, as_index = False).filter(lambda x: len(x) >= Lmin) ## calculate dispersion data_frame = data_frame.groupby(list0, as_index = False).apply(dispersion_calculation) return data_frame ## def calculate_Dbeta(x, idx0): '''calculate Dbeta for a set of date with equally spaced frequencies''' d = x.shape[0] # array dimension Dx = np.full(d, np.nan) idxm = max(-idx0, idx0 - d + 1) # minimum index idxp = min(idx0 + 1, d - idx0) # maximum index for idx in range(idxm, idxp): xm, xp = np.roll(x, idx), np.roll(x, -idx) Dx[idx0 + idx] = xm[idx0] + xp[idx0] - 2*x[idx0] return Dx ## def calculate_gain(Dbeta, Pn): '''calculate the gain of the 4 wave mixing ** here Pn is normalized such as Pn = gamma*P0''' return np.sqrt(Pn**2 - (Dbeta/2 + Pn)**2)
3
3
appname/predict.py
Lambda-ds-31/build_week_spotify
0
17920
<gh_stars>0 import numpy as np from data_prep import data import spotipy from spotipy.oauth2 import SpotifyClientCredentials from os import getenv client_id = getenv('CLIENT_ID') client_id_secret = getenv('CLIENT_ID_SECRET') manager = SpotifyClientCredentials( client_id = client_id, client_secret= client_id_secret) sp = spotipy.Spotify(client_credentials_manager=manager) def find_knn(track_id, df, k=6): """ Takes in the user input song's track_id, and the prep-ed dataframe. Outputs a list of k-1 nearest neighbors based on audio features """ features = sp.audio_features(track_id)[0] df = data() user_track = np.array( [ features['acousticness'], features['danceability'], features['duration_ms'], features['energy'], features['instrumentalness'], features['liveness'], features['loudness'], features['speechiness'], features['tempo'], features['valence'] ] ) df['distances'] = np.linalg.norm(df - user_track, axis=1) nn_ids = df.sort_values(by='distances').index.to_list()[:k] if nn_ids[0] == track_id: nn_ids = nn_ids[1:] else: nn_ids = nn_ids[:-1] return nn_ids
3.140625
3
tests/cpydiff/modules_array_deletion.py
learnforpractice/micropython-cpp
692
17921
""" categories: Modules,array description: Array deletion not implemented cause: Unknown workaround: Unknown """ import array a = array.array('b', (1, 2, 3)) del a[1] print(a)
2.546875
3
jaysblog/models.py
cRiii/jaysblog
5
17922
# !/usr/bin/env python3 # -*- coding: utf-8 -*- """ @Time : 2019/9/17 15:07 @Author : <NAME> @FileName: models.py @GitHub : https://github.com/cRiii """ from datetime import datetime from werkzeug.security import generate_password_hash, check_password_hash from jaysblog.extensions import db from flask_login import UserMixin class BaseModel(object): # 模型基类 为所有模型添加创建和更新的时间 create_time = db.Column(db.DateTime, default=datetime.utcnow) update_time = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow) class User(BaseModel, db.Model, UserMixin): """ UserMixin表示通过认证的用户 is_authenticated 表示用户已通过认证 返回True 否则False is_active 表示允许用户登陆 返回True 否则False is_anonymous 表示如果当前未用户登陆(匿名用户) 返回True 否则False get_id() 以unicode形式返回用户唯一标识 """ __tablename__ = 'b_users' id = db.Column(db.Integer, primary_key=True) # 用户id nick_name = db.Column(db.String(32), nullable=False) # 用户名 password_hash = db.Column(db.String(128), nullable=False) # 用户密码 mobile = db.Column(db.String(11), unique=True) # 手机号码 email = db.Column(db.String(64), unique=True, nullable=True) # 邮箱 desc = db.Column(db.Text) # 个人简介 location = db.Column(db.String(128)) # 地址 avatar_url = db.Column(db.String(256)) # 用户头像路径 is_admin = db.Column(db.Boolean, default=False) # 是否为管理员 last_login_time = db.Column(db.DateTime, default=datetime.utcnow) # 最后一次登陆时间 is_delete = db.Column(db.Integer, default=1) # 用户是否被删除 1正常 0被删除 gender = db.Column( db.Enum( 'MAN', # 男 'WOMAN' # 女 ), default='MAN' ) @property def password(self): raise AttributeError(u'该属性不可读') @password.setter def password(self, value): """ generate_password_hash(password,method='pbkdf2:sha256',salt_length=8) method指定计算散列值的方法 salt_length 指定盐长度 """ self.password_hash = generate_password_hash(value) def check_password(self, password): """ 接收散列值 和 密码作比较 返回布尔类型 check_password_hash(<PASSWORD>,password) """ return check_password_hash(self.password_hash, password) def to_dict(self): res_dict = { "id": self.id, "nick_name": self.nick_name, "email": self.email, "desc": self.desc, "avatar_url": self.avatar_url, "gender": self.gender, "is_admin": self.is_admin, } return res_dict class Post(BaseModel, db.Model): __tablename__ = 'b_posts' id = db.Column(db.Integer, primary_key=True) # 文章编号 post_title = db.Column(db.String(256), nullable=False) # 文章标题 post_user_id = db.Column(db.Integer, nullable=False) # 创建文章用户 post_digest = db.Column(db.String(512), nullable=True) # 文章简介 post_content = db.Column(db.Text, nullable=False) # 文章内容 post_clicks = db.Column(db.Integer, default=0) # 点击量 post_like_num = db.Column(db.Integer, default=0) # 点赞数量 post_index_image_url = db.Column(db.String(256)) # 主页面列表图片地址 post_status = db.Column(db.Integer, default=1) # 文章状态 post_can_comment = db.Column(db.Integer, default=1) # 当前文章是否可以被评论 post_comments = db.relationship('Comment', backref='comment_post') # 当前文章的评论 post_category = db.relationship('Category', back_populates='cg_posts') post_category_id = db.Column(db.Integer, db.ForeignKey('b_category.id'), nullable=False) # 文章类型 def get_comment_length(self): comments = [] if self.post_comments is not []: for comment in self.post_comments: if comment.comment_status == 1: comments.append(comment) return len(comments) def to_dict(self): res_dict = { "id": self.id, "post_title": self.post_title, "post_user_id": self.post_user_id, "post_digest": self.post_digest if self.post_digest else "", "post_clicks": self.post_clicks, "post_like_num": self.post_like_num, "post_index_image_url": self.post_index_image_url if self.post_index_image_url else "", "post_category": self.post_category.to_dict() if self.post_category else None, "post_comments_count": self.get_comment_length(), "post_create_time": self.create_time, "post_update_time": self.update_time, } return res_dict def to_dict_details(self): res_dict = { "id": self.id, "post_title": self.post_title, "post_user_id": self.post_user_id, "post_content": self.post_content, "post_clicks": self.post_clicks, "post_like_num": self.post_like_num, "post_can_comment": self.post_can_comment, "post_create_time": self.create_time, "post_category": self.post_category.to_dict() if self.post_category else None, "post_comments_count": self.get_comment_length(), } return res_dict class Category(BaseModel, db.Model): __tablename__ = 'b_category' id = db.Column(db.Integer, primary_key=True) # 分类编号 cg_name = db.Column(db.String(64), nullable=False, unique=True) # 分类名称 cg_posts = db.relationship('Post', back_populates='post_category') # 分类下的文章 def to_dict(self): res_dict = { "id": self.id, "cg_name": self.cg_name, "cg_posts_count": len(self.cg_posts) if self.cg_posts else 0 } return res_dict class Comment(BaseModel, db.Model): __tablename__ = 'b_comments' id = db.Column(db.Integer, primary_key=True) # 评论编号 comment_user_id = db.Column(db.Integer, nullable=False) # 评论用户ID comment_content = db.Column(db.Text, nullable=False) # 评论内容 comment_from_admin = db.Column(db.Integer, default=0) # 是否为管理员评论 comment_status = db.Column(db.Integer, default=0) # 评论是否通过审核 -1不可用 0:审核中 1:审核通过 comment_post_id = db.Column(db.Integer, db.ForeignKey('b_posts.id'), nullable=False) # 当前评论属于的文章id comment_reply = db.relationship('Reply', backref='reply_comment') # 当前评论下的回复 def to_dict(self): comment_replies = [] if self.comment_reply is not []: for reply in self.comment_reply: if reply.reply_status == 1: comment_replies.append(reply.to_dict()) user = User.query.filter_by(id=self.comment_user_id).first() res_dict = { "id": self.id, "comment_user_name": user.nick_name, "comment_user_avatar_url": user.avatar_url, "comment_content": self.comment_content, "comment_from_admin": user.is_admin, "comment_post_id": self.comment_post_id, "comment_replies": comment_replies, "comment_create_time": self.create_time, "comment_update_time": self.update_time, } return res_dict class Reply(BaseModel, db.Model): __tablename__ = 'b_reply' id = db.Column(db.Integer, primary_key=True) # 回复的id reply_from_user = db.Column(db.String(32), nullable=False) # 谁回复的 reply_to_user = db.Column(db.String(32), nullable=False) # 回复给谁的 reply_content = db.Column(db.Text, nullable=False) # 回复的内容 reply_status = db.Column(db.Integer, default=0) # 回复是否通过审核 -1不可用 0:审核中 1:审核通过 reply_comment_id = db.Column(db.Integer, db.ForeignKey('b_comments.id'), nullable=False) # 当前回复属于的评论id def to_dict(self): user = User.query.filter_by(nick_name=self.reply_from_user).first() res_dict = { "id": self.id, "reply_from_user": self.reply_from_user, "reply_to_user": self.reply_to_user, "reply_content": self.reply_content, "reply_comment_id": self.reply_comment_id, "reply_create_time": self.create_time, "reply_update_time": self.update_time, "reply_user_is_admin": user.is_admin, "reply_user_avatar_url": user.avatar_url, } return res_dict class Journey(BaseModel, db.Model): __tablename__ = 'b_journey' id = db.Column(db.Integer, primary_key=True) # 历程id journey_title = db.Column(db.String(32), nullable=False) # 历程标题 journey_desc = db.Column(db.Text, nullable=False) # 历程详情 journey_time = db.Column(db.DateTime, default=datetime.utcnow) # 历程时间 def to_dict(self): res_dict = { "id": self.id, "journey_title": self.journey_title, "journey_desc": self.journey_desc, "journey_time": self.journey_time } return res_dict class MessageBoard(BaseModel, db.Model): __tablename__ = 'b_board' id = db.Column(db.Integer, primary_key=True) # 留言板id board_user = db.Column(db.String(32), nullable=False) # 留言用户 board_desc = db.Column(db.Text, nullable=False) # 留言内容 board_status = db.Column(db.Integer, nullable=False, default=0) # 留言状态 -1不可用 0:审核中 1:审核通过 board_email = db.Column(db.String(50), nullable=False) # 留言回复邮箱 def to_dict(self): res_dict = { "id": self.id, "board_user": self.board_user, "board_desc": self.board_desc, "board_status": self.board_status, "board_create_time": self.create_time, "board_update_time": self.update_time, "board_email": self.board_email, } return res_dict class UsersLikePosts(BaseModel, db.Model): __tablename__ = 'b_users_like_posts' id = db.Column(db.Integer, primary_key=True) # 主键 user_id = db.Column(db.Integer, nullable=False) user_like_post_id = db.Column(db.Integer, nullable=False) def to_dict(self): res_dict = { "id": self.id, "user_id": self.user_id, "user_like_post_id": self.user_like_post_id, } return res_dict
2.171875
2
112_Path Sum.py
Alvin1994/leetcode-python3-
0
17923
<filename>112_Path Sum.py # Definition for a binary tree node. # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution: def hasPathSum(self, root: 'TreeNode', sum: 'int') -> 'bool': if not root: return False def helper(node,val): if not node: return False val -= node.val if node.left is None and node.right is None: return val == 0 return helper(node.left, val) or helper(node.right, val) return helper(root,sum)
3.703125
4
telemanom/_globals.py
tonyzeng2019/telemanom
0
17924
#!/usr/bin/env python # coding: utf-8 import yaml import json import sys import os sys.path.append('../venv/lib/python3.5/site-packages') from elasticsearch import Elasticsearch sys.path.append('../telemanom') class Config: '''Loads parameters from config.yaml into global object''' def __init__(self, path_to_config): if os.path.isfile(path_to_config): pass else: path_to_config = '../%s' %path_to_config setattr(self, "path_to_config", path_to_config) dictionary = None with open(path_to_config, "r") as f: dictionary = yaml.load(f.read()) try: for k,v in dictionary.items(): setattr(self, k, v) except: for k,v in dictionary.iteritems(): setattr(self, k, v) def build_group_lookup(self, path_to_groupings): channel_group_lookup = {} with open(path_to_groupings, "r") as f: groupings = json.loads(f.read()) for subsystem in groupings.keys(): for subgroup in groupings[subsystem].keys(): for chan in groupings[subsystem][subgroup]: channel_group_lookup[chan["key"]] = {} channel_group_lookup[chan["key"]]["subsystem"] = subsystem channel_group_lookup[chan["key"]]["subgroup"] = subgroup return channel_group_lookup
2.3125
2
nanome/_internal/_network/_commands/_serialization/_open_url.py
rramji/nanome-lib
0
17925
<gh_stars>0 from nanome._internal._util._serializers import _StringSerializer from nanome._internal._util._serializers import _TypeSerializer class _OpenURL(_TypeSerializer): def __init__(self): self.string = _StringSerializer() def version(self): return 0 def name(self): return "OpenURL" def serialize(self, version, value, context): context.write_using_serializer(self.string, value) def deserialize(self, version, context): raise NotImplementedError
1.929688
2
questoes/questao1.py
raulbarcelos/Lista-de-Exercicios-PO
0
17926
<reponame>raulbarcelos/Lista-de-Exercicios-PO print("********************************") print("********** QUESTÃO 01 **********") print("********************************") print("******** <NAME> *********") print() print("Ol<NAME>")
1.960938
2
homeassistant/components/hue/v2/helpers.py
MrDelik/core
30,023
17927
"""Helper functions for Philips Hue v2.""" from __future__ import annotations def normalize_hue_brightness(brightness: float | None) -> float | None: """Return calculated brightness values.""" if brightness is not None: # Hue uses a range of [0, 100] to control brightness. brightness = float((brightness / 255) * 100) return brightness def normalize_hue_transition(transition: float | None) -> float | None: """Return rounded transition values.""" if transition is not None: # hue transition duration is in milliseconds and round them to 100ms transition = int(round(transition, 1) * 1000) return transition def normalize_hue_colortemp(colortemp: int | None) -> int | None: """Return color temperature within Hue's ranges.""" if colortemp is not None: # Hue only accepts a range between 153..500 colortemp = min(colortemp, 500) colortemp = max(colortemp, 153) return colortemp
3.03125
3
incapsula/__init__.py
zanachka/incapsula-cracker-py3
0
17928
from .errors import IncapBlocked, MaxRetriesExceeded, RecaptchaBlocked from .parsers import ResourceParser, WebsiteResourceParser, IframeResourceParser from .session import IncapSession
0.878906
1
sdk/python/pulumi_civo/get_network.py
dirien/pulumi-civo
3
17929
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from . import _utilities __all__ = [ 'GetNetworkResult', 'AwaitableGetNetworkResult', 'get_network', ] @pulumi.output_type class GetNetworkResult: """ A collection of values returned by getNetwork. """ def __init__(__self__, default=None, id=None, label=None, name=None, region=None): if default and not isinstance(default, bool): raise TypeError("Expected argument 'default' to be a bool") pulumi.set(__self__, "default", default) if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if label and not isinstance(label, str): raise TypeError("Expected argument 'label' to be a str") pulumi.set(__self__, "label", label) if name and not isinstance(name, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", name) if region and not isinstance(region, str): raise TypeError("Expected argument 'region' to be a str") pulumi.set(__self__, "region", region) @property @pulumi.getter def default(self) -> bool: """ If is the default network. """ return pulumi.get(self, "default") @property @pulumi.getter def id(self) -> Optional[str]: """ A unique ID that can be used to identify and reference a Network. """ return pulumi.get(self, "id") @property @pulumi.getter def label(self) -> Optional[str]: """ The label used in the configuration. """ return pulumi.get(self, "label") @property @pulumi.getter def name(self) -> str: """ The name of the network. """ return pulumi.get(self, "name") @property @pulumi.getter def region(self) -> Optional[str]: return pulumi.get(self, "region") class AwaitableGetNetworkResult(GetNetworkResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetNetworkResult( default=self.default, id=self.id, label=self.label, name=self.name, region=self.region) def get_network(id: Optional[str] = None, label: Optional[str] = None, region: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNetworkResult: """ Use this data source to access information about an existing resource. :param str id: The unique identifier of an existing Network. :param str label: The label of an existing Network. :param str region: The region of an existing Network. """ __args__ = dict() __args__['id'] = id __args__['label'] = label __args__['region'] = region if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('civo:index/getNetwork:getNetwork', __args__, opts=opts, typ=GetNetworkResult).value return AwaitableGetNetworkResult( default=__ret__.default, id=__ret__.id, label=__ret__.label, name=__ret__.name, region=__ret__.region)
1.976563
2
Hard/longest_valid_parentheses.py
BrynjarGeir/LeetCode
0
17930
<gh_stars>0 from collections import deque class Solution: def longestValidParentheses(self, s: str) -> int: if len(s) == 1 or s == '': return 0 opened = deque() for i,p in enumerate(s): if p == '(': opened.append(i) else: if opened: if s[opened[-1]] == '(': opened.pop() else: opened.append(i) else: opened.append(i) if not opened: return len(s) else: longest = 0 a, b = len(s), 0 while opened: b = opened.pop() longest = max(longest, a-b-1) a = b longest = max(longest, a) return longest
3.09375
3
IMFlask/config.py
iml1111/IMFlask
2
17931
''' Flask Application Config ''' import os from logging.config import dictConfig BASEDIR = os.path.abspath(os.path.dirname(__file__)) class Config: '''공통 Config''' JWT_SECRET_KEY = os.environ.get('FLASK_JWT_SECRET_KEY') # test only TEST_ACCESS_TOKEN = os.environ.get('FLASK_TEST_ACCESS_TOKEN') ADMIN_ID = os.environ.get('FLASK_ADMIN_ID', "iml") ADMIN_PW = os.environ.get('FLASK_ADMIN_PW', "iml") # DB_PROXY: basic, mysql, mongodb, redis, all DB_PROXY = os.environ.get('FLASK_DB_PROXY') if DB_PROXY in ['mysql', 'all']: MYSQL_URI = os.environ.get('FLASK_MYSQL_URI') if DB_PROXY in ['mongodb', 'all']: MONGO_URI = os.environ.get('FLASK_MONGO_URI') MONGO_DB_NAME = os.environ.get('FLASK_MONGO_DB_NAME') if DB_PROXY == ['reids', 'all']: REDIS_HOST = os.environ.get('FLASK_REDIS_HOST') REDIS_PORT = os.environ.get('FLASK_REDIS_PORT') REDIS_PW = os.environ.get('FLASK_REDIS_PW') ALLOWED_EXTENSION = {'txt', 'docs', 'md', 'hwp', 'ppt', 'pptx'} SLOW_API_TIME = 0.5 @staticmethod def init_app(app): '''전역 init_app 함수''' class TestingConfig(Config): '''Test 전용 Config''' DEBUG = True TESTING = True class DevelopmentConfig(Config): '''개발 환경 전용 Config''' DEBUG = True TESTING = False class ProductionConfig(Config): ''' 상용환경 전용 Config''' DEBUG = False TESTING = False @staticmethod def init_app(app): '''로거 등록 및 설정''' dictConfig({ 'version': 1, 'formatters': { 'default': { 'format': '[%(asctime)s] %(levelname)s in %(module)s: %(message)s', } }, 'handlers': { 'file': { 'level': 'INFO', 'class': 'logging.handlers.RotatingFileHandler', 'filename': './server_error.log', 'maxBytes': 1024 * 1024 * 5, 'backupCount': 5, 'formatter': 'default', }, }, 'root': { 'level': 'INFO', 'handlers': ['file'] } }) config = { 'development':DevelopmentConfig, 'production':ProductionConfig, 'testing':TestingConfig, 'default':DevelopmentConfig, }
1.953125
2
cnn/conv_average_pooling.py
nforesperance/Tensorflow-Keras
1
17932
<filename>cnn/conv_average_pooling.py # example of average pooling from numpy import asarray from keras.models import Sequential from keras.layers import Conv2D from keras.layers import AveragePooling2D # define input data data = [[0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0]] data = asarray(data) data = data.reshape(1, 8, 8, 1) # create model model = Sequential() model.add(Conv2D(1, (3,3), activation='relu', input_shape=(8, 8, 1))) model.add(AveragePooling2D()) # summarize model model.summary() # define a vertical line detector detector = [[[[0]],[[1]],[[0]]], [[[0]],[[1]],[[0]]], [[[0]],[[1]],[[0]]]] weights = [asarray(detector), asarray([0.0])] # store the weights in the model model.set_weights(weights) # apply filter to input data yhat = model.predict(data) # enumerate rows for r in range(yhat.shape[1]): # print each column in the row print([yhat[0,r,c,0] for c in range(yhat.shape[2])])
3.59375
4
acquisitions/models.py
18F/acqstackdb
2
17933
from django.db import models from django.core.validators import RegexValidator, ValidationError from django.utils.translation import ugettext_lazy as _ from django.contrib.auth.models import User from smart_selects.db_fields import ChainedForeignKey, ChainedManyToManyField from ordered_model.models import OrderedModel # Create your models here. class Agency(models.Model): name = models.CharField(max_length=100, blank=False) abbreviation = models.CharField(max_length=10, null=True, blank=True) department = models.CharField(max_length=100, null=True, blank=True) omb_agency_code = models.IntegerField(null=True, blank=True) omb_bureau_code = models.IntegerField(null=True, blank=True) treasury_agency_code = models.IntegerField(null=True, blank=True) cgac_agency_code = models.IntegerField(null=True, blank=True) def __str__(self): return self.name class Meta: verbose_name_plural = "Agencies" ordering = ('name',) class Subagency(models.Model): name = models.CharField(max_length=100, blank=False) abbreviation = models.CharField(max_length=10, null=True, blank=True) agency = models.ForeignKey(Agency) def __str__(self): return "%s - %s" % (self.agency, self.name) class Meta: ordering = ('name',) verbose_name_plural = "Subagencies" class ContractingOffice(models.Model): name = models.CharField(max_length=100) def __str__(self): return self.name class Meta: verbose_name = "Contracting Office" verbose_name_plural = "Contracting Offices" class ContractingOfficer(models.Model): name = models.CharField(max_length=100) contracting_office = models.ForeignKey(ContractingOffice) def __str__(self): return "%s - %s" % (self.name, self.contracting_office) class Meta: ordering = ('name',) verbose_name = "Contracting Officer" verbose_name_plural = "Contracting Officers" class COR(models.Model): name = models.CharField(max_length=100) def __str__(self): return self.name class Meta: ordering = ('name',) verbose_name = "Contracting Officer Representative" verbose_name_plural = "Contracting Officer Representatives" # Is the acquisition internal or external? class Track(models.Model): name = models.CharField(max_length=50) def __str__(self): return "%s" % (self.name) class Stage(OrderedModel): name = models.CharField(max_length=50) wip_limit = models.IntegerField(default=0, verbose_name="WIP Limit") def __str__(self): return "%s" % (self.name) class Meta(OrderedModel.Meta): pass class Actor(models.Model): name = models.CharField(max_length=200, blank=False) def __str__(self): return "%s" % (self.name) class Step(models.Model): actor = models.ForeignKey( Actor, blank=False ) track = models.ManyToManyField( Track, blank=False, through="StepTrackThroughModel" ) stage = models.ForeignKey( Stage, blank=False ) def __str__(self): return "%s - %s" % (self.stage, self.actor,) class Meta: ordering = ('steptrackthroughmodel__order',) class StepTrackThroughModel(OrderedModel): track = models.ForeignKey(Track) step = models.ForeignKey(Step) wip_limit = models.IntegerField(default=0, verbose_name="WIP Limit") order_with_respect_to = 'track' class Meta(OrderedModel.Meta): unique_together = ('track', 'step') ordering = ('track', 'order') class Vendor(models.Model): name = models.CharField(max_length=200, blank=False) email = models.EmailField(blank=False) duns = models.CharField(max_length=9, blank=False, validators=[ RegexValidator(regex='^\d{9}$', message="DUNS number must be 9 digits") ]) def __str__(self): return self.name class Role(models.Model): description = models.CharField(max_length=100, choices=( ('P', 'Product Lead'), ('A', 'Acquisition Lead'), ('T', 'Technical Lead') ), null=True, blank=True) teammate = models.ForeignKey(User, blank=True, null=True) def __str__(self): return "%s - %s" % (self.get_description_display(), self.teammate) class Acquisition(models.Model): SET_ASIDE_CHOICES = ( ("AbilityOne", "AbilityOne"), ("HUBZone Small Business", "HUBZone Small Business"), ("Multiple Small Business Categories", "Multiple Small Business Categories"), ("Other Than Small", "Other Than Small"), ("Service Disabled Veteran-owned Small Business", "Service Disabled Veteran-owned Small Business"), ("Small Business", "Small Business"), ("Small Disadvantaged Business (includes Section 8a)", "Small Disadvantaged Business (includes Section 8a)"), ("To Be Determined-BPA", "To Be Determined-BPA"), ("To Be Determined-IDIQ", "To Be Determined-IDIQ"), ("Veteran-Owned Small Business", "Veteran-Owned Small Business"), ("Woman-Owned Small Business", "Woman-Owned Small Business"), ) CONTRACT_TYPE_CHOICES = ( ("Cost No Fee", "Cost No Fee"), ("Cost Plus Award Fee", "Cost Plus Award Fee"), ("Cost Plus Fixed Fee", "Cost Plus Fixed Fee"), ("Cost Plus Incentive Fee", "Cost Plus Incentive Fee"), ("Cost Sharing", "Cost Sharing"), ("Fixed Price Award Fee", "Fixed Price Award Fee"), ("Fixed Price Incentive", "Fixed Price Incentive"), ("Fixed Price Labor Hours", "Fixed Price Labor Hours"), ("Fixed Price Level of Effort", "Fixed Price Level of Effort"), ("Fixed Price Time and Materials", "Fixed Price Time and Materials"), ("Fixed Price with Economic Price Adjustment", "Fixed Price with Economic Price Adjustment"), ("Fixed Price", "Fixed Price"), ("Interagency Agreement", "Interagency Agreement"), ("Labor Hours and Time and Materials", "Labor Hours and Time and Materials"), ("Labor Hours", "Labor Hours"), ("Order Dependent", "Order Dependent"), ("Time and Materials", "Time and Materials"), ) COMPETITION_STRATEGY_CHOICES = ( ("A/E Procedures", "A/E Procedures"), ("Competed under SAP", "Competed under SAP"), ("Competitive Delivery Order Fair Opportunity Provided", "Competitive Delivery Order Fair Opportunity Provided"), ("Competitive Schedule Buy", "Competitive Schedule Buy"), ("Fair Opportunity", "Fair Opportunity"), ("Follow On to Competed Action (FAR 6.302-1)", "Follow On to Competed Action (FAR 6.302-1)"), ("Follow On to Competed Action", "Follow On to Competed Action"), ("Full and Open after exclusion of sources (competitive small business \ set-asides, competitive 8a)", "Full and Open after exclusion of sources (competitive small \ business set-asides, competitive 8a)"), ("Full and Open Competition Unrestricted", "Full and Open Competition Unrestricted"), ("Full and Open Competition", "Full and Open Competition"), ("Limited Sources FSS Order", "Limited Sources FSS Order"), ("Limited Sources", "Limited Sources"), ("Non-Competitive Delivery Order", "Non-Competitive Delivery Order"), ("Not Available for Competition (e.g., 8a sole source, HUBZone & \ SDVOSB sole source, Ability One, all > SAT)", "Not Available for Competition (e.g., 8a sole source, HUBZone & \ SDVOSB sole source, Ability One, all > SAT)"), ("Not Competed (e.g., sole source, urgency, etc., all > SAT)", "Not Competed (e.g., sole source, urgency, etc., all > SAT)"), ("Not Competed under SAP (e.g., Urgent, Sole source, Logical \ Follow-On, 8a, HUBZone & SDVOSB sole source, all < SAT)", "Not Competed under SAP (e.g., Urgent, Sole source, Logical \ Follow-On, 8a, HUBZone & SDVOSB sole source, all < SAT)"), ("Partial Small Business Set-Aside", "Partial Small Business Set-Aside"), ("Set-Aside", "Set-Aside"), ("Sole Source", "Sole Source"), ) PROCUREMENT_METHOD_CHOICES = ( ("Ability One", "Ability One"), ("Basic Ordering Agreement", "Basic Ordering Agreement"), ("Blanket Purchase Agreement-BPA", "Blanket Purchase Agreement-BPA"), ("BPA Call", "BPA Call"), ("Call Order under GSA Schedules BPA", "Call Order under GSA Schedules BPA"), ("Commercial Item Contract", "Commercial Item Contract"), ("Contract modification", "Contract modification"), ("Contract", "Contract"), ("Definitive Contract other than IDV", "Definitive Contract other than IDV"), ("Definitive Contract", "Definitive Contract"), ("Government-wide Agency Contract-GWAC", "Government-wide Agency Contract-GWAC"), ("GSA Schedule Contract", "GSA Schedule Contract"), ("GSA Schedule", "GSA Schedule"), ("GSA Schedules Program BPA", "GSA Schedules Program BPA"), ("Indefinite Delivery Indefinite Quantity-IDIQ", "Indefinite Delivery Indefinite Quantity-IDIQ"), ("Indefinite Delivery Vehicle (IDV)", "Indefinite Delivery Vehicle (IDV)"), ("Indefinite Delivery Vehicle Base Contract", "Indefinite Delivery Vehicle Base Contract"), ("Multi-Agency Contract", "Multi-Agency Contract"), ("Negotiated", "Negotiated"), ("Order under GSA Federal Supply Schedules Program", "Order under GSA Federal Supply Schedules Program"), ("Order under GSA Schedules Program BPA", "Order under GSA Schedules Program BPA"), ("Order under GSA Schedules Program", "Order under GSA Schedules Program"), ("Order under IDV", "Order under IDV"), ("Purchase Order", "Purchase Order"), ("Sealed Bid", "Sealed Bid"), ) subagency = models.ForeignKey(Subagency) task = models.CharField(max_length=100, blank=False) description = models.TextField(max_length=500, null=True, blank=True) track = models.ForeignKey( Track, blank=False, related_name="%(class)s_track" ) step = ChainedForeignKey( Step, chained_field="track", chained_model_field="track", blank=False ) dollars = models.DecimalField(decimal_places=2, max_digits=14, null=True, blank=True) period_of_performance = models.DateField(null=True, blank=True) product_owner = models.CharField(max_length=50, null=True, blank=True) roles = models.ManyToManyField(Role, blank=True) contracting_officer = models.ForeignKey(ContractingOfficer, null=True, blank=True) contracting_officer_representative = models.ForeignKey(COR, null=True, blank=True) contracting_office = models.ForeignKey(ContractingOffice, null=True, blank=True) vendor = models.ForeignKey(Vendor, null=True, blank=True) rfq_id = models.IntegerField(null=True, blank=True, verbose_name="RFQ ID") naics = models.IntegerField( null=True, blank=True, verbose_name="NAICS Code" ) set_aside_status = models.CharField(max_length=100, null=True, blank=True, choices=SET_ASIDE_CHOICES) amount_of_competition = models.IntegerField(null=True, blank=True) contract_type = models.CharField(max_length=100, null=True, blank=True, choices=CONTRACT_TYPE_CHOICES) competition_strategy = models.CharField( max_length=100, null=True, blank=True, choices=COMPETITION_STRATEGY_CHOICES) procurement_method = models.CharField( max_length=100, null=True, blank=True, choices=PROCUREMENT_METHOD_CHOICES) award_date = models.DateField(null=True, blank=True) delivery_date = models.DateField(null=True, blank=True) def clean(self): print(self.step.track.all()) print(self.track) if self.track not in self.step.track.all(): raise ValidationError(_('Tracks are not equal.')) def __str__(self): return "%s (%s)" % (self.task, self.subagency) class Evaluator(models.Model): name = models.CharField(max_length=100) acquisition = models.ManyToManyField(Acquisition) def __str__(self): return self.name class Meta: ordering = ('name',) class Release(models.Model): acquisition = models.ForeignKey(Acquisition) def __str__(self): return self.id class Meta: ordering = ('id',)
2.125
2
app/kobo/forms.py
wri/django_kobo
1
17934
from django import forms from .models import Connection, KoboUser, KoboData from django.contrib.admin.widgets import FilteredSelectMultiple from django.db.models import Q class ConnectionForm(forms.ModelForm): class Meta: model = Connection exclude = [] widgets = { 'auth_pass': forms.PasswordInput(), } class KoboUserForm(forms.ModelForm): class Meta: model = KoboUser exclude = [] surveys = forms.ModelMultipleChoiceField(queryset=KoboData.objects.filter(Q(tags__contains=['bns']) | Q(tags__contains=['nrgt'])), widget=FilteredSelectMultiple( 'Surveys', is_stacked=False), label='')
2.078125
2
old/dea/aws/__init__.py
robbibt/odc-tools
0
17935
<reponame>robbibt/odc-tools<gh_stars>0 from odc.aws import ( ec2_metadata, ec2_current_region, botocore_default_region, auto_find_region, make_s3_client, s3_url_parse, s3_fmt_range, s3_ls, s3_ls_dir, s3_find, get_boto_session, get_creds_with_retry, s3_fetch, ) from odc.aws._find import ( s3_file_info, norm_predicate, parse_query, ) __all__ = ( "ec2_metadata", "ec2_current_region", "botocore_default_region", "auto_find_region", "make_s3_client", "s3_url_parse", "s3_fmt_range", "s3_ls", "s3_ls_dir", "s3_find", "get_boto_session", "get_creds_with_retry", "s3_fetch", "s3_file_info", "norm_predicate", "parse_query", )
1.828125
2
systems/stage.py
will-nickson/starter_system
0
17936
from log.logger import logger class SystemStage(object): """ Default stage object: creates a SystemStage for doing something """ @property def name(self): return "Need to replace name when inheriting" def __repr__(self): return "SystemStage '%s' Try %s.methods()" % ( self.name, self.name, ) def methods(self): return get_methods(self) def system_init(self, system: System): # method called once we have a system self._parent = system # and a log log = system.log.setup(stage=self.name) self._log = log @property def log(self) -> logger: log = getattr(self, "_log", logtoscreen("")) return log @property def parent(self) -> System: parent = getattr(self, "_parent", None) return parent
2.9375
3
pybind/slxos/v17r_2_00/mpls_state/rsvp/igp_sync/link/lsp/__init__.py
extremenetworks/pybind
0
17937
from operator import attrgetter import pyangbind.lib.xpathhelper as xpathhelper from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType from pyangbind.lib.base import PybindBase from decimal import Decimal from bitarray import bitarray import __builtin__ import hops class lsp(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module brocade-mpls-operational - based on the path /mpls-state/rsvp/igp-sync/link/lsp. Each member element of the container is represented as a class variable - with a specific YANG type. """ __slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__lsp_name','__lsp_instance_id','__path_name','__cspf_enabled','__rro_enabled','__frr_enabled','__nbr_down_enabled','__link_count','__nbr_down_inprogress','__cspf_hop_count','__rro_hop_count','__hops',) _yang_name = 'lsp' _rest_name = 'lsp' _pybind_generated_by = 'container' def __init__(self, *args, **kwargs): path_helper_ = kwargs.pop("path_helper", None) if path_helper_ is False: self._path_helper = False elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper): self._path_helper = path_helper_ elif hasattr(self, "_parent"): path_helper_ = getattr(self._parent, "_path_helper", False) self._path_helper = path_helper_ else: self._path_helper = False extmethods = kwargs.pop("extmethods", None) if extmethods is False: self._extmethods = False elif extmethods is not None and isinstance(extmethods, dict): self._extmethods = extmethods elif hasattr(self, "_parent"): extmethods = getattr(self._parent, "_extmethods", None) self._extmethods = extmethods else: self._extmethods = False self.__path_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="path-name", rest_name="path-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False) self.__cspf_hop_count = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="cspf-hop-count", rest_name="cspf-hop-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) self.__hops = YANGDynClass(base=YANGListType("index hop_type",hops.hops, yang_name="hops", rest_name="hops", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='index hop-type', extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-igp-sync-hop-data', u'cli-suppress-show-path': None}}), is_container='list', yang_name="hops", rest_name="hops", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-igp-sync-hop-data', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False) self.__lsp_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="lsp-name", rest_name="lsp-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False) self.__nbr_down_enabled = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="nbr-down-enabled", rest_name="nbr-down-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False) self.__rro_enabled = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="rro-enabled", rest_name="rro-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False) self.__cspf_enabled = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="cspf-enabled", rest_name="cspf-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False) self.__nbr_down_inprogress = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="nbr-down-inprogress", rest_name="nbr-down-inprogress", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False) self.__lsp_instance_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-instance-id", rest_name="lsp-instance-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) self.__rro_hop_count = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="rro-hop-count", rest_name="rro-hop-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) self.__frr_enabled = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="frr-enabled", rest_name="frr-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False) self.__link_count = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="link-count", rest_name="link-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path()+[self._yang_name] else: return [u'mpls-state', u'rsvp', u'igp-sync', u'link', u'lsp'] def _rest_path(self): if hasattr(self, "_parent"): if self._rest_name: return self._parent._rest_path()+[self._rest_name] else: return self._parent._rest_path() else: return [u'mpls-state', u'rsvp', u'igp-sync', u'link', u'lsp'] def _get_lsp_name(self): """ Getter method for lsp_name, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/lsp_name (string) YANG Description: LSP name """ return self.__lsp_name def _set_lsp_name(self, v, load=False): """ Setter method for lsp_name, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/lsp_name (string) If this variable is read-only (config: false) in the source YANG file, then _set_lsp_name is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_lsp_name() directly. YANG Description: LSP name """ parent = getattr(self, "_parent", None) if parent is not None and load is False: raise AttributeError("Cannot set keys directly when" + " within an instantiated list") if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="lsp-name", rest_name="lsp-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """lsp_name must be of a type compatible with string""", 'defined-type': "string", 'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="lsp-name", rest_name="lsp-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)""", }) self.__lsp_name = t if hasattr(self, '_set'): self._set() def _unset_lsp_name(self): self.__lsp_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="lsp-name", rest_name="lsp-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False) def _get_lsp_instance_id(self): """ Getter method for lsp_instance_id, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/lsp_instance_id (uint32) YANG Description: Instance id of the lsp instance """ return self.__lsp_instance_id def _set_lsp_instance_id(self, v, load=False): """ Setter method for lsp_instance_id, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/lsp_instance_id (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_lsp_instance_id is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_lsp_instance_id() directly. YANG Description: Instance id of the lsp instance """ parent = getattr(self, "_parent", None) if parent is not None and load is False: raise AttributeError("Cannot set keys directly when" + " within an instantiated list") if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-instance-id", rest_name="lsp-instance-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """lsp_instance_id must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-instance-id", rest_name="lsp-instance-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""", }) self.__lsp_instance_id = t if hasattr(self, '_set'): self._set() def _unset_lsp_instance_id(self): self.__lsp_instance_id = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-instance-id", rest_name="lsp-instance-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) def _get_path_name(self): """ Getter method for path_name, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/path_name (string) YANG Description: LSP Path name """ return self.__path_name def _set_path_name(self, v, load=False): """ Setter method for path_name, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/path_name (string) If this variable is read-only (config: false) in the source YANG file, then _set_path_name is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_path_name() directly. YANG Description: LSP Path name """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="path-name", rest_name="path-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """path_name must be of a type compatible with string""", 'defined-type': "string", 'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="path-name", rest_name="path-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False)""", }) self.__path_name = t if hasattr(self, '_set'): self._set() def _unset_path_name(self): self.__path_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="path-name", rest_name="path-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='string', is_config=False) def _get_cspf_enabled(self): """ Getter method for cspf_enabled, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/cspf_enabled (boolean) YANG Description: CSPF enabled for LSP """ return self.__cspf_enabled def _set_cspf_enabled(self, v, load=False): """ Setter method for cspf_enabled, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/cspf_enabled (boolean) If this variable is read-only (config: false) in the source YANG file, then _set_cspf_enabled is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_cspf_enabled() directly. YANG Description: CSPF enabled for LSP """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="cspf-enabled", rest_name="cspf-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """cspf_enabled must be of a type compatible with boolean""", 'defined-type': "boolean", 'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="cspf-enabled", rest_name="cspf-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)""", }) self.__cspf_enabled = t if hasattr(self, '_set'): self._set() def _unset_cspf_enabled(self): self.__cspf_enabled = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="cspf-enabled", rest_name="cspf-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False) def _get_rro_enabled(self): """ Getter method for rro_enabled, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/rro_enabled (boolean) YANG Description: RRO enabled for LSP """ return self.__rro_enabled def _set_rro_enabled(self, v, load=False): """ Setter method for rro_enabled, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/rro_enabled (boolean) If this variable is read-only (config: false) in the source YANG file, then _set_rro_enabled is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_rro_enabled() directly. YANG Description: RRO enabled for LSP """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="rro-enabled", rest_name="rro-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """rro_enabled must be of a type compatible with boolean""", 'defined-type': "boolean", 'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="rro-enabled", rest_name="rro-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)""", }) self.__rro_enabled = t if hasattr(self, '_set'): self._set() def _unset_rro_enabled(self): self.__rro_enabled = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="rro-enabled", rest_name="rro-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False) def _get_frr_enabled(self): """ Getter method for frr_enabled, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/frr_enabled (boolean) YANG Description: FRR enabled for LSP """ return self.__frr_enabled def _set_frr_enabled(self, v, load=False): """ Setter method for frr_enabled, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/frr_enabled (boolean) If this variable is read-only (config: false) in the source YANG file, then _set_frr_enabled is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_frr_enabled() directly. YANG Description: FRR enabled for LSP """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="frr-enabled", rest_name="frr-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """frr_enabled must be of a type compatible with boolean""", 'defined-type': "boolean", 'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="frr-enabled", rest_name="frr-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)""", }) self.__frr_enabled = t if hasattr(self, '_set'): self._set() def _unset_frr_enabled(self): self.__frr_enabled = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="frr-enabled", rest_name="frr-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False) def _get_nbr_down_enabled(self): """ Getter method for nbr_down_enabled, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/nbr_down_enabled (boolean) YANG Description: LSP Neighbour down is enabled """ return self.__nbr_down_enabled def _set_nbr_down_enabled(self, v, load=False): """ Setter method for nbr_down_enabled, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/nbr_down_enabled (boolean) If this variable is read-only (config: false) in the source YANG file, then _set_nbr_down_enabled is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_nbr_down_enabled() directly. YANG Description: LSP Neighbour down is enabled """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="nbr-down-enabled", rest_name="nbr-down-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """nbr_down_enabled must be of a type compatible with boolean""", 'defined-type': "boolean", 'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="nbr-down-enabled", rest_name="nbr-down-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)""", }) self.__nbr_down_enabled = t if hasattr(self, '_set'): self._set() def _unset_nbr_down_enabled(self): self.__nbr_down_enabled = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="nbr-down-enabled", rest_name="nbr-down-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False) def _get_link_count(self): """ Getter method for link_count, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/link_count (uint32) YANG Description: Total links used by the LSP """ return self.__link_count def _set_link_count(self, v, load=False): """ Setter method for link_count, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/link_count (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_link_count is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_link_count() directly. YANG Description: Total links used by the LSP """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="link-count", rest_name="link-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """link_count must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="link-count", rest_name="link-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""", }) self.__link_count = t if hasattr(self, '_set'): self._set() def _unset_link_count(self): self.__link_count = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="link-count", rest_name="link-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) def _get_nbr_down_inprogress(self): """ Getter method for nbr_down_inprogress, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/nbr_down_inprogress (boolean) YANG Description: Neighbor down processing is in progress """ return self.__nbr_down_inprogress def _set_nbr_down_inprogress(self, v, load=False): """ Setter method for nbr_down_inprogress, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/nbr_down_inprogress (boolean) If this variable is read-only (config: false) in the source YANG file, then _set_nbr_down_inprogress is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_nbr_down_inprogress() directly. YANG Description: Neighbor down processing is in progress """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="nbr-down-inprogress", rest_name="nbr-down-inprogress", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """nbr_down_inprogress must be of a type compatible with boolean""", 'defined-type': "boolean", 'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="nbr-down-inprogress", rest_name="nbr-down-inprogress", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False)""", }) self.__nbr_down_inprogress = t if hasattr(self, '_set'): self._set() def _unset_nbr_down_inprogress(self): self.__nbr_down_inprogress = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="nbr-down-inprogress", rest_name="nbr-down-inprogress", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='boolean', is_config=False) def _get_cspf_hop_count(self): """ Getter method for cspf_hop_count, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/cspf_hop_count (uint32) YANG Description: CSPF hop count """ return self.__cspf_hop_count def _set_cspf_hop_count(self, v, load=False): """ Setter method for cspf_hop_count, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/cspf_hop_count (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_cspf_hop_count is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_cspf_hop_count() directly. YANG Description: CSPF hop count """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="cspf-hop-count", rest_name="cspf-hop-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """cspf_hop_count must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="cspf-hop-count", rest_name="cspf-hop-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""", }) self.__cspf_hop_count = t if hasattr(self, '_set'): self._set() def _unset_cspf_hop_count(self): self.__cspf_hop_count = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="cspf-hop-count", rest_name="cspf-hop-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) def _get_rro_hop_count(self): """ Getter method for rro_hop_count, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/rro_hop_count (uint32) YANG Description: RRO hop rout """ return self.__rro_hop_count def _set_rro_hop_count(self, v, load=False): """ Setter method for rro_hop_count, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/rro_hop_count (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_rro_hop_count is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_rro_hop_count() directly. YANG Description: RRO hop rout """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="rro-hop-count", rest_name="rro-hop-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """rro_hop_count must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="rro-hop-count", rest_name="rro-hop-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""", }) self.__rro_hop_count = t if hasattr(self, '_set'): self._set() def _unset_rro_hop_count(self): self.__rro_hop_count = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="rro-hop-count", rest_name="rro-hop-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) def _get_hops(self): """ Getter method for hops, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/hops (list) YANG Description: MPLS Rsvp IGP Synchronization Hop information """ return self.__hops def _set_hops(self, v, load=False): """ Setter method for hops, mapped from YANG variable /mpls_state/rsvp/igp_sync/link/lsp/hops (list) If this variable is read-only (config: false) in the source YANG file, then _set_hops is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_hops() directly. YANG Description: MPLS Rsvp IGP Synchronization Hop information """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("index hop_type",hops.hops, yang_name="hops", rest_name="hops", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='index hop-type', extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-igp-sync-hop-data', u'cli-suppress-show-path': None}}), is_container='list', yang_name="hops", rest_name="hops", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-igp-sync-hop-data', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """hops must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("index hop_type",hops.hops, yang_name="hops", rest_name="hops", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='index hop-type', extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-igp-sync-hop-data', u'cli-suppress-show-path': None}}), is_container='list', yang_name="hops", rest_name="hops", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-igp-sync-hop-data', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)""", }) self.__hops = t if hasattr(self, '_set'): self._set() def _unset_hops(self): self.__hops = YANGDynClass(base=YANGListType("index hop_type",hops.hops, yang_name="hops", rest_name="hops", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='index hop-type', extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-igp-sync-hop-data', u'cli-suppress-show-path': None}}), is_container='list', yang_name="hops", rest_name="hops", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp-igp-sync-hop-data', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False) lsp_name = __builtin__.property(_get_lsp_name) lsp_instance_id = __builtin__.property(_get_lsp_instance_id) path_name = __builtin__.property(_get_path_name) cspf_enabled = __builtin__.property(_get_cspf_enabled) rro_enabled = __builtin__.property(_get_rro_enabled) frr_enabled = __builtin__.property(_get_frr_enabled) nbr_down_enabled = __builtin__.property(_get_nbr_down_enabled) link_count = __builtin__.property(_get_link_count) nbr_down_inprogress = __builtin__.property(_get_nbr_down_inprogress) cspf_hop_count = __builtin__.property(_get_cspf_hop_count) rro_hop_count = __builtin__.property(_get_rro_hop_count) hops = __builtin__.property(_get_hops) _pyangbind_elements = {'lsp_name': lsp_name, 'lsp_instance_id': lsp_instance_id, 'path_name': path_name, 'cspf_enabled': cspf_enabled, 'rro_enabled': rro_enabled, 'frr_enabled': frr_enabled, 'nbr_down_enabled': nbr_down_enabled, 'link_count': link_count, 'nbr_down_inprogress': nbr_down_inprogress, 'cspf_hop_count': cspf_hop_count, 'rro_hop_count': rro_hop_count, 'hops': hops, }
1.898438
2
vega/trainer/callbacks/horovod.py
zjzh/vega
0
17938
# -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Data parallel callback.""" import logging import vega from vega.common import ClassFactory, ClassType from .callback import Callback logger = logging.getLogger(__name__) @ClassFactory.register(ClassType.CALLBACK) class Horovod(Callback): """Callback that saves the evaluated Performance.""" def __init__(self): """Initialize ModelCheckpoint callback.""" super(Horovod, self).__init__() self.priority = 260 def before_train(self, logs=None): """Be called before the training process.""" if not self.trainer.horovod: return if vega.is_torch_backend(): self._init_torch() def _init_torch(self): import torch import horovod.torch as hvd hvd.broadcast_parameters(self.trainer.model.state_dict(), root_rank=0) hvd.broadcast_optimizer_state(self.trainer.optimizer, root_rank=0) self.trainer._average_metrics = self._average_metrics def _average_metrics(self, metrics_results): import torch import horovod.torch as hvd for key, value in metrics_results.items(): tensor = torch.tensor(value) avg_tensor = hvd.allreduce(tensor, name=key) metrics_results[key] = avg_tensor.item() return metrics_results
1.859375
2
parse_doc.py
nprapps/idp-georgia
1
17939
# _*_ coding:utf-8 _*_ import logging import re import app_config from bs4 import BeautifulSoup from shortcode import process_shortcode logging.basicConfig(format=app_config.LOG_FORMAT) logger = logging.getLogger(__name__) logger.setLevel(app_config.LOG_LEVEL) end_doc_regex = re.compile(ur'^\s*[Ee][Nn][Dd]\s*$', re.UNICODE) new_section_marker_regex = re.compile(ur'^\s*\+{50,}\s*$', re.UNICODE) section_end_marker_regex = re.compile(ur'^\s*-{50,}\s*$', re.UNICODE) frontmatter_marker_regex = re.compile(ur'^\s*-{3}\s*$', re.UNICODE) extract_metadata_regex = re.compile(ur'^(.*?):(.*)$', re.UNICODE) shortcode_regex = re.compile(ur'^\s*\[%\s*.*\s*%\]\s*$', re.UNICODE) def is_section_marker(tag): """ Checks for the beginning of a new section """ text = tag.get_text() m = new_section_marker_regex.match(text) if m: return True else: return False def is_section_end_marker(tag): """ Checks for the beginning of a new section """ text = tag.get_text() m = section_end_marker_regex.match(text) if m: return True else: return False def process_headline(contents): logger.debug('--process_headline start--') headline = None for tag in contents: if tag.name == "h2": headline = tag.get_text() else: logger.warning('unexpected tag found: Ignore %s' % tag.get_text()) if not headline: logger.error('Did not find headline on post. Contents: %s' % contents) return headline def process_metadata(contents): logger.debug('--process_metadata start--') metadata = {} for tag in contents: text = tag.get_text() m = extract_metadata_regex.match(text) if m: key = m.group(1).strip().lower() value = m.group(2).strip().lower() metadata[key] = value else: logger.error('Could not parse metadata. Text: %s' % text) logger.debug("metadata: %s" % metadata) return metadata def process_section_contents(contents): """ Process episode copy content In particular parse and generate HTML from shortcodes """ logger.debug('--process_post_contents start--') parsed = [] for tag in contents: text = tag.get_text() m = shortcode_regex.match(text) if m: parsed.append(process_shortcode(tag)) else: parsed.append(unicode(tag)) episode_contents = ''.join(parsed) return episode_contents def parse_raw_sections(raw_sections): """ parse raw episodes into an array of section objects """ # Divide each episode into its subparts # - Headline # - FrontMatter # - Contents sections = [] for raw_section in raw_sections: section = {} marker_counter = 0 section_raw_headline = [] section_raw_metadata = [] section_raw_contents = [] for tag in raw_section: text = tag.get_text() m = frontmatter_marker_regex.match(text) if m: marker_counter += 1 else: if (marker_counter == 0): section_raw_headline.append(tag) elif (marker_counter == 1): section_raw_metadata.append(tag) else: section_raw_contents.append(tag) section[u'headline'] = process_headline(section_raw_headline) metadata = process_metadata(section_raw_metadata) for k, v in metadata.iteritems(): section[k] = v section[u'contents'] = process_section_contents(section_raw_contents) sections.append(section) return sections def split_sections(doc): """ split the raw document into an array of raw sections """ logger.debug('--split_sections start--') raw_sections = [] raw_episode_contents = [] ignore_orphan_text = True body = doc.soup.body for child in body.children: if is_section_marker(child): # Detected first post stop ignoring orphan text if ignore_orphan_text: ignore_orphan_text = False else: if ignore_orphan_text: continue elif is_section_end_marker(child): ignore_orphan_text = True raw_sections.append(raw_episode_contents) raw_episode_contents = [] else: raw_episode_contents.append(child) return raw_sections def find_section_id(sections, id): """ Find the section with a given id """ for idx, section in enumerate(sections): try: if section['id'] == id: return idx except KeyError: continue return None def process_extracted_contents(inline_intro): """ Remove html markup """ return inline_intro['contents'] def parse(doc): """ parse google doc files and extract markup """ try: parsed_document = {} logger.info('-------------start------------') raw_sections = split_sections(doc) sections = parse_raw_sections(raw_sections) logger.info('Number of sections: %s' % len(sections)) parsed_document['sections'] = sections finally: logger.info('-------------end------------') return parsed_document
2.625
3
01_irc_bot/bot.py
pymug/ARJ_SpoonfeedingSockets_APR2021
0
17940
""" <NAME> Skeleton of https://github.com/pyhoneybot/honeybot/ """ import time import os import socket directory = "irc" if not os.path.exists(directory): os.makedirs(directory) target = open(os.path.join(directory, "log.txt"), "w") def message_checker(msgLine): sendvar = "" global mute mute = False completeLine = str(msgLine[1:]).replace("'b", "").split(":", 1) info = completeLine[0].split() message = (completeLine[1].split("\\r")[0]).replace("'b", "") sender = info[0][2:].split("!", 1)[0] refinedmsg = str(message.lower()) refinedmsgl = len(refinedmsg) print("Complete Line-->" + str(completeLine)) print("Info-->" + str(info)) print("Message-->" + str(message)) print("Sender-->" + str(sender) + "\n") def ping_checker(pingLine): if pingLine.find(bytes("PING", "utf8")) != -1: pingLine = pingLine.rstrip().split() if pingLine[0] == bytes("PING", "utf8"): irc.send(bytes("PONG ", "utf8") + pingLine[1] + bytes("\r\n", "utf8")) BOT_IRC_SERVER = "chat.freenode.net" BOT_IRC_CHANNEL = "##bottestingmu" # BOT_IRC_CHANNEL = "#python" BOT_IRC_PORT = 6667 BOT_NICKNAME = "appinventormuBot" # BOT_PASSWORD = '' irc = socket.socket() irc.connect((BOT_IRC_SERVER, BOT_IRC_PORT)) irc.recv(4096) irc.send(bytes("NICK " + BOT_NICKNAME + "\r\n", "utf8")) ping_checker(irc.recv(4096)) irc.send( bytes( "USER appinventormuBot appinventormuBot appinventormuBot : appinventormuBot IRC\r\n", "utf8", ) ) ping_checker(irc.recv(4096)) # irc.send(bytes('msg NickServ identify ' + BOT_PASSWORD + " \r\n" ,'utf8') ) # ping_checker(irc.recv(4096)) # irc.send(bytes('NICKSERV identify ' + BOT_NICKNAME+' '+BOT_PASSWORD+ '\r\n','utf8' ) ) # ping_checker(irc.recv(4096)) time.sleep(3) irc.send(bytes("JOIN " + BOT_IRC_CHANNEL + "\r\n", "utf8")) while 1: pass line = irc.recv(4096) print(line) ping_checker(line) if ( line.find(bytes("PRIVMSG", "utf8")) != -1 or line.find(bytes("NOTICE", "utf8")) != -1 ): message_checker(line) target.write(str(line)) target.flush()
2.40625
2
xenia_python_client_library/models/attachments_list.py
DutchAnalytics/xenia-python-client-library
0
17941
# coding: utf-8 """ Xenia Python Client Library Python Client Library to interact with the Xenia API. # noqa: E501 The version of the OpenAPI document: v2.1 Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from xenia_python_client_library.configuration import Configuration class AttachmentsList(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'id': 'str', 'source_name': 'str', 'destination_name': 'str', 'mapping': 'list[AttachmentFieldsList]' } attribute_map = { 'id': 'id', 'source_name': 'source_name', 'destination_name': 'destination_name', 'mapping': 'mapping' } def __init__(self, id=None, source_name=None, destination_name=None, mapping=None, local_vars_configuration=None): # noqa: E501 """AttachmentsList - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._id = None self._source_name = None self._destination_name = None self._mapping = None self.discriminator = None if id is not None: self.id = id if source_name is not None: self.source_name = source_name if destination_name is not None: self.destination_name = destination_name if mapping is not None: self.mapping = mapping @property def id(self): """Gets the id of this AttachmentsList. # noqa: E501 :return: The id of this AttachmentsList. # noqa: E501 :rtype: str """ return self._id @id.setter def id(self, id): """Sets the id of this AttachmentsList. :param id: The id of this AttachmentsList. # noqa: E501 :type: str """ self._id = id @property def source_name(self): """Gets the source_name of this AttachmentsList. # noqa: E501 :return: The source_name of this AttachmentsList. # noqa: E501 :rtype: str """ return self._source_name @source_name.setter def source_name(self, source_name): """Sets the source_name of this AttachmentsList. :param source_name: The source_name of this AttachmentsList. # noqa: E501 :type: str """ self._source_name = source_name @property def destination_name(self): """Gets the destination_name of this AttachmentsList. # noqa: E501 :return: The destination_name of this AttachmentsList. # noqa: E501 :rtype: str """ return self._destination_name @destination_name.setter def destination_name(self, destination_name): """Sets the destination_name of this AttachmentsList. :param destination_name: The destination_name of this AttachmentsList. # noqa: E501 :type: str """ self._destination_name = destination_name @property def mapping(self): """Gets the mapping of this AttachmentsList. # noqa: E501 :return: The mapping of this AttachmentsList. # noqa: E501 :rtype: list[AttachmentFieldsList] """ return self._mapping @mapping.setter def mapping(self, mapping): """Sets the mapping of this AttachmentsList. :param mapping: The mapping of this AttachmentsList. # noqa: E501 :type: list[AttachmentFieldsList] """ self._mapping = mapping def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, AttachmentsList): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, AttachmentsList): return True return self.to_dict() != other.to_dict()
1.867188
2
src/AuShadha/registry/icd10/aushadha.py
GosthMan/AuShadha
46
17942
################################################################################ # Create a Registration with the UI for a Role. # Each module's aushadha.py is screened for this # # Each Class is registered for a Role in UI # These can be used to generate Role based UI elements later. # # As of now string base role assignement is done. # This can be later extended to class based role ################################################################################ from .models import Chapter, Section,Diagnosis from AuShadha.apps.ui.ui import ui as UI UI.register('RegistryApp',Chapter ) UI.register('DiseaseCodes',Chapter) UI.register('ReferenceApp',Chapter)
2.25
2
Etap 2/Logia03/Zad1.py
aszokalski/Logia
0
17943
<filename>Etap 2/Logia03/Zad1.py from turtle import * def rysuj(s): a = 720 / len(s) up = "bdfhklt" down = "gjpqy" numb = "0123456789" samogloski = "aeiouy" pu(); bk(360); pd() for elem in s: if elem in numb: prost(a, "green") elif elem in up: prost(a, "yellow") elif elem in down: col = "yellow" if elem in samogloski: col = "red" pu(); rt(90); fd(a); lt(90); pd() prost(a, col) pu(); lt(90); fd(a); rt(90); pd() else: col = "yellow" if elem in samogloski: col = "red" kwad(a, col) def prost(a, col): fillcolor(col) begin_fill() for i in range(2): fd(a) lt(90) fd(2 * a) lt(90) fd(a) end_fill() def kwad(a, col): fillcolor(col) begin_fill() for i in range(4): fd(a) lt(90) fd(a) end_fill()
2.953125
3
server/src/models/movie.py
Rubilmax/netflux
2
17944
""" Define the Movie model """ from . import db from .abc import BaseModel, MetaBaseModel class Movie(db.Model, BaseModel, metaclass=MetaBaseModel): """ The Movie model """ __tablename__ = "movies" movie_id = db.Column(db.String(300), primary_key=True) title = db.Column(db.String(300)) author = db.Column(db.String(300)) release_year = db.Column(db.Integer) def __init__(self, movie_id, title, author, release_year): """ Create a new movie """ self.movie_id = movie_id self.title = title self.author = author self.release_year = release_year
3.640625
4
wmt-shared-task/segment-level/segment_level_prism.py
chryssa-zrv/UA_COMET
0
17945
<filename>wmt-shared-task/segment-level/segment_level_prism.py f""" Shell script tho reproduce results for BERTScores in data from WMT18/19 Metrics Shared task. """ import argparse import hashlib import logging import os import sys from typing import Any, Dict, Iterator, List import numpy as np import pandas as pd import sentencepiece as spm import torch from tqdm import tqdm from fairseq import utils from fairseq import checkpoint_utils from fairseq.data import LanguagePairDataset #!/usr/bin/env python3 logger = logging.getLogger('prism') logger.setLevel(logging.INFO) MODELS = { '8412b2044da4b9b2c0a8ce87b305d0d1': { 'name': 'm39v1', 'path': 'todo', 'date': '2020-04-30', 'description': 'model released with arXiv paper April 2020', 'langs': ['ar', 'bg', 'bn', 'ca', 'cs', 'da', 'de', 'el', 'en', 'es', 'et', 'eo', 'fi', 'fr', 'he', 'hr', 'hu', 'id', 'it', 'ja', 'kk', 'lt', 'lv', 'mk', 'nl', 'no', 'pl', 'pt', 'ro', 'ru', 'sk', 'sl', 'sq', 'sr', 'sv', 'tr', 'uk', 'vi', 'zh'], } } def hash_model(model_dir): md5 = hashlib.md5() block_size = 2 ** 20 for fname in ('checkpoint.pt', 'spm.model', 'dict.src.txt', 'dict.tgt.txt'): with open(os.path.join(model_dir, fname), "rb") as f: while True: data = f.read(block_size) if not data: break md5.update(data) md5.digest() return md5.hexdigest() """ Copy of https://github.com/pytorch/fairseq/blob/master/fairseq/sequence_scorer.py with softmax temperature control added """ class SequenceScorer(object): """Scores the target for a given source sentence.""" def __init__(self, tgt_dict, softmax_batch=None, temperature=1.0): self.pad = tgt_dict.pad() self.eos = tgt_dict.eos() self.softmax_batch = softmax_batch or sys.maxsize self.temperature = temperature assert self.softmax_batch > 0 @torch.no_grad() def generate(self, models, sample, **kwargs): """Score a batch of translations.""" net_input = sample['net_input'] def batch_for_softmax(dec_out, target): # assumes decoder_out[0] is the only thing needed (may not be correct for future models!) first, rest = dec_out[0], dec_out[1:] bsz, tsz, dim = first.shape if bsz * tsz < self.softmax_batch: yield dec_out, target, True else: flat = first.contiguous().view(1, -1, dim) flat_tgt = target.contiguous().view(flat.shape[:-1]) s = 0 while s < flat.size(1): e = s + self.softmax_batch yield (flat[:, s:e],) + rest, flat_tgt[:, s:e], False s = e def gather_target_probs(probs, target): probs = probs.gather( dim=2, index=target.unsqueeze(-1), ) return probs orig_target = sample['target'] # compute scores for each model in the ensemble avg_probs = None avg_attn = None for model in models: model.eval() decoder_out = model.forward(**net_input) attn = decoder_out[1] if type(attn) is dict: attn = attn.get('attn', None) batched = batch_for_softmax(decoder_out, orig_target) probs, idx = None, 0 for bd, tgt, is_single in batched: sample['target'] = tgt # divide the logits by temperature prior to softmax # for example, see https://github.com/pytorch/fairseq/blob/master/fairseq/sequence_generator.py: # decoder_out[0][:, -1:, :].div_(temperature) bd[0].div_(self.temperature) curr_prob = model.get_normalized_probs(bd, log_probs=len(models) == 1, sample=sample).data if is_single: probs = gather_target_probs(curr_prob, orig_target) else: if probs is None: probs = curr_prob.new(orig_target.numel()) step = curr_prob.size(0) * curr_prob.size(1) end = step + idx tgt_probs = gather_target_probs(curr_prob.view(tgt.shape + (curr_prob.size(-1),)), tgt) probs[idx:end] = tgt_probs.view(-1) idx = end sample['target'] = orig_target probs = probs.view(sample['target'].shape) if avg_probs is None: avg_probs = probs else: avg_probs.add_(probs) if attn is not None and torch.is_tensor(attn): attn = attn.data if avg_attn is None: avg_attn = attn else: avg_attn.add_(attn) if len(models) > 1: avg_probs.div_(len(models)) avg_probs.log_() if avg_attn is not None: avg_attn.div_(len(models)) bsz = avg_probs.size(0) hypos = [] start_idxs = sample['start_indices'] if 'start_indices' in sample else [0] * bsz for i in range(bsz): # remove padding from ref ref = utils.strip_pad(sample['target'][i, start_idxs[i]:], self.pad) \ if sample['target'] is not None else None tgt_len = ref.numel() avg_probs_i = avg_probs[i][start_idxs[i]:start_idxs[i] + tgt_len] score_i = avg_probs_i.sum() / tgt_len if avg_attn is not None: avg_attn_i = avg_attn[i] alignment = utils.extract_hard_alignment(avg_attn_i, sample['net_input']['src_tokens'][i], sample['target'][i], self.pad, self.eos) else: avg_attn_i = alignment = None hypos.append([{ 'tokens': ref, 'score': score_i, 'attention': avg_attn_i, 'alignment': alignment, 'positional_scores': avg_probs_i, }]) return hypos class Prism: def __init__(self, model_dir, lang, temperature): ''' model_dir should contain: 1) checkpoint.pt: the fairseq model 2) spm.model: the sentencepiece model 3) dict.src.txt: the fairseq source dictionary 4) dict.tgt.txt: the fairseq target dictionary (likely a copy of the source) lang: ISO 639-1 Code (e.g. "en"). Must be a language compatable with the model. ''' self.sp = spm.SentencePieceProcessor() self.sp.Load(model_dir + '/spm.model') self.lang = lang self.temperature = temperature # this prints things and I can't figure out how to disable it sys.stdout = open(os.devnull, 'w') self.models, self.args, self.task = checkpoint_utils.load_model_ensemble_and_task( [model_dir + '/checkpoint.pt', ], arg_overrides=dict(data=model_dir + '/'), ) sys.stdout = sys.__stdout__ self.use_cuda = torch.cuda.is_available() self.generator = SequenceScorer(self.task.target_dictionary, temperature=temperature) for model in self.models: if self.use_cuda: model.cuda() model.make_generation_fast_( beamable_mm_beam_size=None, need_attn=False, ) # if model.args.fp16: # model.half() # hash model self.model_hash = hash_model(model_dir) if self.model_hash in MODELS: model_langs = MODELS[self.model_hash]['langs'] if lang not in model_langs: model_name = MODELS[self.model_hash]['name'] logger.warning(f'Language "{lang}" is unsupported for model "{model_name}"') logger.warning(f'Supported languages for {model_name}: {", ".join(model_langs)}') else: logger.warning('unrecognized model, so cannot check language') def identifier(self): if self.model_hash in MODELS: model_name = MODELS[self.model_hash]['name'] else: logger.warning('unrecognized model, using hash to identify') model_name = self.model_hash return dict(version='0.1', model=model_name, seg_scores='avg_log_prob', sys_scores='avg_log_prob', log_base=2, temperature=self.temperature) def _binarize(self, sentence: str) -> torch.LongTensor: return self.task.source_dictionary.encode_line(sentence, add_if_not_exist=False).long() def _encode(self, sent, prepend=True): sent = ' '.join(self.sp.EncodeAsPieces(sent)) if prepend: sent = f'<{self.lang}> ' + sent return self._binarize(sent) def _build_batches(self, source_tokens: List[List[int]], target_tokens: List[List[int]], skip_invalid_size_inputs: bool) -> Iterator[Dict[str, Any]]: source_lengths = torch.LongTensor([t.numel() for t in source_tokens]) target_lengths = torch.LongTensor([t.numel() for t in target_tokens]) batch_iterator = self.task.get_batch_iterator( dataset=LanguagePairDataset(source_tokens, source_lengths, self.task.source_dictionary, tgt=target_tokens, tgt_sizes=target_lengths, tgt_dict=self.task.target_dictionary), max_tokens=self.args.max_tokens, max_sentences=self.args.max_sentences, max_positions=(2000, 2000), # ??? ignore_invalid_inputs=skip_invalid_size_inputs, ).next_epoch_itr(shuffle=False) return batch_iterator def _score_forward(self, tok_sents_in, tok_sents_out): assert len(tok_sents_in) == len(tok_sents_out) tok_level_scores = [None, ] * len(tok_sents_in) # for debug results = [None, ] * len(tok_sents_in) for batch in self._build_batches(tok_sents_in, tok_sents_out, skip_invalid_size_inputs=False): if self.use_cuda: # must be a better way batch['id'] = batch['id'].cuda() batch['net_input']['src_tokens'] = batch['net_input']['src_tokens'].cuda() batch['net_input']['src_lengths'] = batch['net_input']['src_lengths'].cuda() batch['net_input']['prev_output_tokens'] = batch['net_input']['prev_output_tokens'].cuda() batch['target'] = batch['target'].cuda() translations = self.task.inference_step(self.generator, self.models, batch) ids = batch['id'].cpu().numpy() tok_scores = [x[0]['positional_scores'].cpu().numpy() for x in translations] # [1:] to skip language tag log prob sent_scores = [np.mean(x[1:]) for x in tok_scores] for _id, sent_score, _tok_score in zip(ids, sent_scores, tok_scores): results[_id] = sent_score tok_level_scores[_id] = _tok_score if logger.level == logging.DEBUG: for ii, (sent_in, scores_out, sent_out) in enumerate(zip(tok_sents_in, tok_level_scores, tok_sents_out)): sent_in_str = ' '.join([self.task.source_dictionary[x] for x in sent_in]) logger.debug(f'Input[{ii}] = ' + sent_in_str) sent_out_tok = [self.task.source_dictionary[x] for x in sent_out] logger.debug(f'Output[{ii}] = ' + \ f' '.join([f'{a}[{b:.02f}]' for a, b in zip(sent_out_tok, scores_out)])) if None in results: raise Exception('Missing one or more sentence scores') return np.array(results) def score(self, cand, ref=None, src=None, segment_scores=False): if not (ref is None) ^ (src is None): raise Exception('Must provide exactly one of "ref" or "src"') tokenized_cand = [self._encode(sentence, prepend=False) for sentence in cand] tokenized_cand_prep = [self._encode(sentence, prepend=True) for sentence in cand] if src is not None: # Prism-src: score candidate given on source if len(cand) != len(src): raise Exception(f'Length of cand ({len(cand)}) does not match length of src ({len(src)})') tokenized_src = [self._encode(sentence, prepend=False) for sentence in src] scores = self._score_forward(tokenized_src, tokenized_cand_prep) else: # Prism-ref: average candidate given reference and reference given candidate if len(cand) != len(ref): raise Exception(f'Length of cand ({len(cand)}) does not match length of ref ({len(ref)})') tokenized_ref = [self._encode(sentence, prepend=False) for sentence in ref] tokenized_ref_prep = [self._encode(sentence, prepend=True) for sentence in ref] forward_scores = self._score_forward(tok_sents_in=tokenized_ref, tok_sents_out=tokenized_cand_prep) reverse_scores = self._score_forward(tok_sents_in=tokenized_cand, tok_sents_out=tokenized_ref_prep) scores = 0.5 * forward_scores + 0.5 * reverse_scores if not segment_scores: scores = np.mean(scores) return scores def compute_kendall( hyp1_scores: list, hyp2_scores: list, dataframe: pd.DataFrame ) -> (int, list): """ Computes the official WMT19 shared task Kendall correlation score. """ assert len(hyp1_scores) == len(hyp2_scores) == len(data) conc, disc = 0, 0 for i, row in tqdm(data.iterrows(), total=len(data), desc="Kendall eval..."): if hyp1_scores[i] > hyp2_scores[i]: conc += 1 else: disc += 1 return (conc - disc) / (conc + disc) def run_prism(mt: list, ref: list, language=False, temperature=1.0) -> list: prism = Prism(model_dir="m39v1", lang=language, temperature=temperature) scores = prism.score(cand=mt, ref=ref, segment_scores=True) return list(scores) if __name__ == "__main__": parser = argparse.ArgumentParser( description="Evaluates BERTScores against relative preferences." ) parser.add_argument( "--test_path", default="wmt-metrics/wmt19/de-en/relative-ranks.csv", help="Path to the test dataframe with relative preferences.", type=str, ) parser.add_argument( "--language", default="en", help="Target language of the testset.", type=str, ) parser.add_argument( '--temperature', type=float, default=1.0, help='Softmax temperature: values >1.0 produce more uniform samples and values <1.0 produce sharper samples') parser.add_argument( "--run_wmt18", default=False, help="Runs entire WMT18 evaluation.", action="store_true", ) parser.add_argument( "--run_wmt19", default=False, help="Runs entire WMT19 evaluation.", action="store_true", ) args = parser.parse_args() if args.run_wmt18: lps = [ "en-cs", "en-de", "en-et", "en-fi", "en-ru", "en-tr", "en-zh", "cs-en", "de-en", "et-en", "fi-en", "ru-en", "tr-en", "zh-en", ] kendall_scores = {} for lp in lps: data = pd.read_csv(f"wmt-metrics/wmt18/{lp}/relative-ranks.csv") hyp1_scores = run_prism([str(s) for s in data.hyp1], list(data.ref), language=lp.split('-')[1], temperature=args.temperature) hyp2_scores = run_prism([str(s) for s in data.hyp2], list(data.ref), language=lp.split('-')[1], temperature=args.temperature) #hyp1_scores = run_prism([str(s) for s in data.hyp1], list(data.ref), list(data.src), language=lp.split('-')[1]) #hyp2_scores = run_prism([str(s) for s in data.hyp2], list(data.ref), list(data.src), language=lp.split('-')[1]) kendall = compute_kendall(hyp1_scores, hyp2_scores, data) print("Results for {}: {}".format(lp, kendall)) kendall_scores[lp] = kendall print(kendall_scores) elif args.run_wmt19: lps = [ "en-cs", "en-de", "en-fi", "en-gu", "en-kk", "en-lt", "en-ru", "en-zh", "de-en", "fi-en", "gu-en", "kk-en", "lt-en", "ru-en", "zh-en", "de-cs", "de-fr", "fr-de", ] kendall_scores = {} for lp in lps: data = pd.read_csv(f"wmt-metrics/wmt19/{lp}/relative-ranks.csv") hyp1_scores = run_prism([str(s) for s in data.hyp1], list(data.ref), language=lp.split('-')[1], temperature=args.temperature) hyp2_scores = run_prism([str(s) for s in data.hyp2], list(data.ref), language=lp.split('-')[1], temperature=args.temperature) kendall = compute_kendall(hyp1_scores, hyp2_scores, data) print("Results for {}: {}".format(lp, kendall)) kendall_scores[lp] = kendall print(kendall_scores) else: data = pd.read_csv(args.test_path) kendall_scores = {} hyp1_scores = run_prism([str(s) for s in data.hyp1], list(data.ref), language=lp.split('-')[1], temperature=args.temperature) hyp2_scores = run_prism([str(s) for s in data.hyp2], list(data.ref), language=lp.split('-')[1], temperature=args.temperature) kendall = compute_kendall(hyp1_scores, hyp2_scores, data) print("Results for {}: {}".format(args.test_path, kendall)) kendall_scores[lp] = kendall print(kendall_scores)
1.796875
2
source_code/3-2-download.py
VickyMin1994/easy-scraping-tutorial
708
17946
<gh_stars>100-1000 import os os.makedirs('./img/', exist_ok=True) IMAGE_URL = "https://mofanpy.com/static/img/description/learning_step_flowchart.png" def urllib_download(): from urllib.request import urlretrieve urlretrieve(IMAGE_URL, './img/image1.png') # whole document def request_download(): import requests r = requests.get(IMAGE_URL) with open('./img/image2.png', 'wb') as f: f.write(r.content) # whole document def chunk_download(): import requests r = requests.get(IMAGE_URL, stream=True) # stream loading with open('./img/image3.png', 'wb') as f: for chunk in r.iter_content(chunk_size=32): f.write(chunk) urllib_download() print('download image1') request_download() print('download image2') chunk_download() print('download image3')
3.21875
3
flare_classifier/cnn.py
Wingham1/hessidf
0
17947
from tensorflow.keras import Sequential from tensorflow.keras.layers import Conv2D, Flatten, Dense, Dropout import tensorflow.keras as keras import os import cv2 import numpy as np from sklearn.model_selection import train_test_split def data_prep(path, img_rows, img_cols, color): """ A function to preprocess the input data for a CNN. The images are resized, normalised to have pixel values between 0-1, converted into greyscale if required and put into a numpy array. Each class label is turned into a one hot pixel array and added to an ordered numpy array such that the order for the labels is the same as the images. The data is shuffled to make sure each batch is representative of the overall data during training which will reduce overfitting to each batch. This function requires that the images for each class are in a seperate directory. param: - path, a string of the path to the directory containing the images - img_rows, an integer for the number of rows the resized image should have - img_cols, an integer for the number of columns the resized image should have - color, a boolean that is set to true if the image should be in RGB colour space or false for greyscale return: - images, a numpy array of images with pixel values normalised to be between 0 and 1. numpy array dimensions are [number of images, number of rows, number of columns, number of chanels] - labels, a numpy array of labels associated with each image (labels are a one hot pixel numpy array [1, 0, 0, ...] or [0, 1, 0, ...], etc) """ images = [] labels = [] for image_class in os.listdir(path): print('image_class =', image_class) path_to_class_directory = os.path.join(path, image_class) for img_name in os.listdir(path_to_class_directory): true_path = os.path.join(path_to_class_directory, img_name) if color: images.append(cv2.imread(true_path, 1)/255.0) else: images.append(cv2.imread(true_path, 0)/255.0) # greyscale labels.append(os.listdir(path).index(image_class)) data = list(zip(images, labels)) np.random.shuffle(data) images, labels = zip(*data) images = [cv2.resize(img, (img_rows, img_cols), cv2.INTER_AREA) for img in images] # resize images to all be the same if color: images = np.array(images).reshape(len(images), img_rows, img_cols, 3) else: images = np.array(images).reshape(len(images), img_rows, img_cols, 1) labels = keras.utils.to_categorical(labels, num_classes=len(os.listdir(path))) return images, labels def build_CNN(img_rows, img_cols, color=False): model = Sequential() if color: model.add(Conv2D(20, kernel_size=(3, 3), strides=1, activation='relu', input_shape=(img_rows, img_cols, 3))) else: model.add(Conv2D(20, kernel_size=(3, 3), strides=1, activation='relu', input_shape=(img_rows, img_cols, 1))) model.add(Conv2D(20, kernel_size=(3, 3), strides=1, activation='relu')) model.add(Flatten()) #model.add(Dropout(0.25)) model.add(Dense(128, activation='relu')) model.add(Dense(num_classes, activation='softmax')) model.compile(loss=keras.losses.categorical_crossentropy, optimizer='adam', metrics=['accuracy']) return model def decode_labels(coded, class_names): """ A funtion to get the name of the class by decoding a one hot pixel array. Uses a list comprehension and boolean indexing. The list comprehension returns the index of the variable with the highest value in each one hot pixel array. That list is then used for boolean indexing with a numpy array to get a list of class_names for each label in coded. Param: - coded, a numpy array of coded labels - class_names, a list of the class_names in the same order they were coded (alphabetical) Return: - numpy array of class names for each label in coded """ return np.array(class_names)[[np.argmax(example) for example in coded]] def calc_accuracy(pred, real): """ A function to calculate the accuracy of a CNN when given a list of predicted classes and a list of the real classes Param: - pred, a numpy array of predicted classes - real, a numpy array of the real classes Return: - Accuracy as a decimal """ return sum(pred==real) / len(pred) if __name__ == '__main__': path = 'data' img_rows = 150 img_cols = 150 is_color = True model_filename = 'flare_cnn' print('\nloading training data\n') num_classes = len(os.listdir(path)) x, y = data_prep(path, img_rows, img_cols, color=is_color) x_train, x_test, y_train, y_test = train_test_split(x, y) print('\nbuilding model\n') cnn = build_CNN(img_rows, img_cols, color=is_color) print('\ntraining model\n') cnn.fit(x_train, y_train, batch_size=50, epochs=1, validation_split=0.2) print('\nsaving model\n') if is_color: model_filename = model_filename + '_RGB' + '.h5' else: model_filename = model_filename + '_grey' + '.h5' cnn.save(model_filename) print('\nsaved model to file {}\n'.format(model_filename)) print('\nloading model\n') loaded_cnn = keras.models.load_model(model_filename) print('\ngenerating predictions\n') predictions = loaded_cnn.predict(x_test) dec_preds = decode_labels(predictions, os.listdir(path)) dec_ytest = decode_labels(y_test, os.listdir(path)) # F1 score would probably be a better metric due to skew of training expample (num B > num C) print('\naccuracy =', calc_accuracy(dec_preds, dec_ytest))
3.546875
4
survol/sources_types/oracle/library/__init__.py
AugustinMascarelli/survol
0
17948
""" Oracle library """ import lib_common from lib_properties import pc def Graphic_colorbg(): return "#CC99FF" def EntityOntology(): return ( ["Db", "Schema", "Library"], ) # Ambiguity with tables, oracle or normal users. def MakeUri(dbName,schemaName,libraryName): return lib_common.gUriGen.UriMakeFromDict("oracle/library", { "Db" : dbName, "Schema" : schemaName, "Library" : libraryName } ) def AddInfo(grph,node,entity_ids_arr): # TODO: SPECIAL. Imported here to avoid circular inclusions, see oracle/package_body/__init__.py from sources_types.oracle import schema as oracle_schema argDb = entity_ids_arr[0] argSchema = entity_ids_arr[1] node_oraschema = oracle_schema.MakeUri( argDb, argSchema ) grph.add( ( node_oraschema, pc.property_oracle_library, node ) ) def EntityName(entity_ids_arr): return entity_ids_arr[0] + "." + entity_ids_arr[1] + "." + entity_ids_arr[2]
2.296875
2
src/train.py
rnagumo/dgm_vae
5
17949
<gh_stars>1-10 """Training method""" import argparse import json import os import pathlib from typing import Union import numpy as np import torch from torch.backends import cudnn import pytorch_lightning as pl import dgmvae.models as dvm from experiment import VAEUpdater def main(): # ------------------------------------------------------------------------- # 1. Settings # ------------------------------------------------------------------------- # Kwargs args = init_args() # Configs condig_path = os.getenv("CONFIG_PATH", "./src/config_ch1.json") with pathlib.Path(condig_path).open() as f: config = json.load(f) # Path root = pathlib.Path(os.getenv("DATA_ROOT", "./data/mnist/")) save_path = pathlib.Path(os.getenv("SAVE_PATH", "./logs/"), os.getenv("EVALUATION_NAME", "dev")) model_path = save_path / "representation" dataset = os.getenv("DATASET_NAME", "mnist") # Cuda setting use_cuda = torch.cuda.is_available() and args.cuda != "null" gpus = args.cuda if use_cuda else None # Random seed torch.manual_seed(args.seed) np.random.seed(args.seed) cudnn.deterministic = True cudnn.benchmark = False # ------------------------------------------------------------------------- # 2. Training # ------------------------------------------------------------------------- # VAE model model_dict = { "beta": dvm.BetaVAE, "factor": dvm.FactorVAE, "dipi": dvm.DIPVAE, "dipii": dvm.DIPVAE, "joint": dvm.JointVAE, "tcvae": dvm.TCVAE, "aae": dvm.AAE, "avb": dvm.AVB, } model = model_dict[args.model](**config[f"{args.model}_params"]) # Updater updater = VAEUpdater(model, args, dataset, root, args.batch_size) # Trainer params = { "default_save_path": save_path, "gpus": gpus, "early_stop_callback": None, "max_steps": args.steps, "log_save_interval": args.log_save_interval, } trainer = pl.Trainer(**params) # Run trainer.fit(updater) # Export model model_path.mkdir() ch_num = config[f"{args.model}_params"]["channel_num"] export_model(updater.model, str(model_path / "pytorch_model.pt"), input_shape=(1, ch_num, 64, 64)) def export_model(model: Union[torch.nn.Module, torch.jit.ScriptModule], path: Union[str, pathlib.Path], input_shape: tuple = (1, 3, 64, 64), use_script_module: bool = True ) -> Union[str, pathlib.Path]: """Exports model. Args: model (torch.nn.Module or torch.jit.ScriptModule): Saved model. path (str or pathlib.Path): Path to file. input_shape (tuple, optional): Tuple of input data shape. use_script_module (bool, optional): Boolean flag for using script module. Returns: path (str or pathlib.Path): Path to saved file. """ model = model.cpu().eval() if isinstance(model, torch.jit.ScriptModule): assert use_script_module, \ "Provided model is a ScriptModule, set use_script_module to True." if use_script_module: if not isinstance(model, torch.jit.ScriptModule): assert input_shape is not None traced_model = torch.jit.trace(model, torch.zeros(*input_shape)) else: traced_model = model torch.jit.save(traced_model, path) else: torch.save(model, path) # saves model as a nn.Module return path def init_args(): parser = argparse.ArgumentParser(description="VAE training") parser.add_argument("--model", type=str, default="beta") parser.add_argument("--cuda", type=str, default="0") parser.add_argument("--seed", type=int, default=0) parser.add_argument("--steps", type=int, default=100) parser.add_argument("--batch-size", type=int, default=64) parser.add_argument("--log-save-interval", type=int, default=100) return parser.parse_args() if __name__ == "__main__": main()
2.171875
2
kea/axi_lite_registers/_registers.py
SmartAcoustics/Kea
3
17950
from myhdl import Signal, intbv, block, always_comb, ConcatSignal import myhdl from collections import OrderedDict import keyword def _is_valid_name(ident: str) -> bool: '''Determine if ident is a valid register or bitfield name. ''' if not isinstance(ident, str): raise TypeError("expected str, but got {!r}".format(type(ident))) if not ident.isidentifier(): return False if keyword.iskeyword(ident): return False return True @block def assign_bitfield_from_register(reg, bitfield, offset): if isinstance(bitfield.val, bool): @always_comb def assignment(): bitfield.next = reg[offset] else: start = offset stop = offset + len(bitfield) @always_comb def assignment(): bitfield.next = reg[stop:start] return assignment class Bitfields: def __eq__(self, other): if not ((self._bitfields_config == other._bitfields_config) and (self._initial_values == other._initial_values) and (self._register_width == other._register_width) and (self._reg_type == other._reg_type)): return False else: # The values also need to be the same for bf_name in self._bitfields_config: if getattr(self, bf_name) != getattr(other, bf_name): return False if self.register != other.register: return False return True def __init__( self, register_width, register_type, bitfields_config, initial_values=None): ''' Creates a MyHDL interface representing a series of bitfields. `register_width` is the width of the register that the bitfields sit on top of. `register_type` is one of `axi_read_write`, `axi_read_only` or `axi_write_only`. `initial_values` is an optional lookup for each bitfield when the register type is `axis_read_write`. If a bitfield has an initial value set, then, assuming the register_type is `axis_read_write`, the bitfield will be set to the initial value. If the register type is not `axis_read_write`, then a ValueError will be raised if this argument is not `None`. `bitfields_config` is a dictionary that provides the configuration for each bitfield on a register. The keys are the names of the bitfields and each key should point to a configuration dict. Each configution should have the `type` key, which should have data which is one of: - `uint` - `bool` - `const-uint` - `const-bool` In addition, it should also have keys which depend on the type, as follows: - `uint`: - `length` giving the length in bits of the uint - `offset` giving the offset of the bitfield. - `bool`: - `offset` giving the offset of the boolean value. - `const-uint`: - `length` giving the length in bits of the uint - `offset` giving the offset of the bitfield. - `const-value` giving the value of the constant. - `const-bool`: - `offset` giving the offset of the boolean balue. - `const-value` giving the value of the constant. Extra keys are ignored. Other constraints are enforced and will cause an error: - All bitfields must fit within the register width. - A `const-uint` and `const-bool` can only be set on a read-only register. - Overlapping bitfields are invalid. - No bitfield can be called 'register'. This is reserved for the full register representation. - Only read-write registers can have an initial value. An example bitfield entry might look something like: {'foo':{'type': 'uint', 'length': 6, 'offset': 0}, 'bar': {'type': 'bool', 'offset': 6}, 'baz': {'type': 'const-uint', 'length': 5, 'offset': 7, 'const-value': 15}} ''' if len(bitfields_config) == 0: raise ValueError('bitfields_config cannot be empty') if register_type not in ( 'axi_read_write', 'axi_read_only', 'axi_write_only'): raise ValueError( 'The register type must be one of `axi_read_write`, ' '`axi_read_only` or `axi_write_only`') if initial_values != None and register_type != 'axi_read_write': raise ValueError( '`initial_values` must be `None` if the register type ' 'is not `axi_read_write`') if initial_values is None: initial_values = {} # We always create a register attribute register_initial_val = 0 for bitfield in bitfields_config: offset = bitfields_config[bitfield]['offset'] try: init_val = initial_values[bitfield] except KeyError: init_val = 0 register_initial_val += init_val << offset self._reg_type = register_type self._register_width = register_width self._bitfields_config = bitfields_config self._initial_values = initial_values bitfield_masks = {} bitfield_starts = {} bitfield_stops = {} self._constant_vals = {} for bitfield in bitfields_config: if not _is_valid_name(bitfield): raise ValueError( 'Bitfield names must be valid python identifiers: ' '{}'.format(bitfield)) if bitfield[0] == '_': raise ValueError( 'Bitfield names cannot begin with an underscore: ' '{}'.format(bitfield)) if bitfield == 'register': raise ValueError('Bitfields cannot be named `register`.') if bitfields_config[bitfield]['type'] == 'uint': length = bitfields_config[bitfield]['length'] offset = bitfields_config[bitfield]['offset'] bf_signal = Signal(intbv(0)[length:]) mask = (2**length - 1) << offset bitfield_starts[offset] = bitfield bitfield_stops[bitfield] = offset + length elif bitfields_config[bitfield]['type'] == 'bool': offset = bitfields_config[bitfield]['offset'] bf_signal = Signal(False) mask = 1 << offset bitfield_starts[offset] = bitfield bitfield_stops[bitfield] = offset + 1 elif bitfields_config[bitfield]['type'] == 'const-uint': if register_type != 'axi_read_only': raise ValueError( 'The bitfield `{}` is of type `const-uint` which ' 'requires the register is read-only, but the register ' 'has been configured to be `{}`'.format( bitfield, register_type)) length = bitfields_config[bitfield]['length'] offset = bitfields_config[bitfield]['offset'] const_val = int(bitfields_config[bitfield]['const-value']) if (const_val >= 2**length or const_val < 0): raise ValueError( 'The bitfield const value, {}, is invalid for ' 'bitfield {}'.format(const_val, bitfield)) bf_signal = intbv(const_val)[length:] self._constant_vals[bitfield] = const_val # We also set the initial value for constants register_initial_val += const_val << offset mask = (2**length - 1) << offset bitfield_starts[offset] = bitfield bitfield_stops[bitfield] = offset + length elif bitfields_config[bitfield]['type'] == 'const-bool': if register_type != 'axi_read_only': raise ValueError( 'The bitfield `{}` is of type `const-bool` which ' 'requires the register is read-only, but the register ' 'has been configured to be `{}`'.format( bitfield, register_type)) offset = bitfields_config[bitfield]['offset'] const_val = bitfields_config[bitfield]['const-value'] if not isinstance(const_val, bool): raise ValueError( 'The bitfield const value, {}, is invalid for ' 'bitfield {}'.format(const_val, bitfield)) bf_signal = const_val self._constant_vals[bitfield] = const_val # We also set the initial value for constants register_initial_val += const_val << offset mask = 1 << offset bitfield_starts[offset] = bitfield bitfield_stops[bitfield] = offset + 1 else: raise ValueError('A bitfield type must be one of `uint`, ' '`bool`, `const-uint` or `const-bool`: ' '{}'.format(bitfield)) if mask >= 2**register_width: raise ValueError( 'The bitfield `{}` is out of range for a register of ' 'width {}'.format(bitfield, register_width)) # Check the bitfield doesn't overlap with any others for other_bf in bitfield_masks: if (bitfield_masks[other_bf] & mask) != 0: raise ValueError( 'Bitfield `{}` overlaps with bitfield `{}`'.format( bitfield, other_bf)) bitfield_masks[bitfield] = mask setattr(self, bitfield, bf_signal) # We now need to construct the packed version of the bitfields, # including padding. rev_concat_list = [] bitfield_starts_list = list(bitfield_starts.keys()) bitfield_starts_list.sort() if bitfield_starts_list[0] != 0: padding = intbv(0)[bitfield_starts_list[0]:] rev_concat_list.append(padding) for i, start in enumerate(bitfield_starts_list): bitfield = bitfield_starts[start] rev_concat_list.append(getattr(self, bitfield)) try: next_start = bitfield_starts_list[i + 1] # The higher up checks make sure padding_len should never be # negative. padding_len = next_start - bitfield_stops[bitfield] if padding_len > 0: padding = intbv(0)[padding_len:] rev_concat_list.append(padding) except IndexError: if bitfield_stops[bitfield] < register_width: padding = intbv(0)[ register_width - bitfield_stops[bitfield]:] rev_concat_list.append(padding) self.register = Signal(intbv(register_initial_val)[register_width:]) self._concat_list = rev_concat_list[::-1] self._bitfield_starts = bitfield_starts self._bitfield_masks = bitfield_masks @block def bitfield_connector(self): if self._reg_type in ('axi_read_write', 'axi_write_only'): instances = [] for bitfield_start in self._bitfield_starts: bitfield = getattr(self, self._bitfield_starts[bitfield_start]) instances.append( assign_bitfield_from_register( self.register, bitfield, bitfield_start)) return instances elif self._reg_type in ('axi_read_only'): if len(self._concat_list) == 1: # This is a hack to allow a concat signal to work in # all cases. An alternative would be to special case single # signals, but that doesn't work well with constants, which # themselves would require a special case, and some hackery to # have the constant read (and requiring initial_values to be # turned on). keep = Signal(True) keep.driven = True reg_signal = ConcatSignal(keep, self._concat_list[0]) else: reg_signal = ConcatSignal(*self._concat_list) @always_comb def assign_register(): self.register.next = reg_signal[self._register_width:] return assign_register class RegisterSet(object): pass class Registers(object): ''' A general purpose register definition. ''' @property def register_types(self): return self._register_types def __eq__(self, other): return (self._bitfields == other._bitfields and self._register_types == other._register_types and self._register_width == other._register_width) def __init__( self, register_list, register_types=None, register_width=32, initial_values=None, bitfields=None): ''' Constructs a MyHDL interface that encapsulates each register name given in `register_list`. The order of the registers in the list is kept. If `register_types` is set, it should be a dictionary like object that provides data of the form `axi_read_write`, `axi_read_only` or `axi_write_only` for the register name given by its key. If a register name is missing from `register_types`, then the register type defaults to `axi_read_write`. If `register_types` is `None`, then all the registers are `axi_read_write`. `register_width` gives the width in bits of each register that is created, defaulting to 32. `initial_values` is an optional dictionary that sets the initial value of a read-write register. A `ValueError` will be raised if an initial value is set for a non read-write register. The default is for the initial values to be zero. If a register has bitfields set (see below), then the dictionary entry should itself be a dictionary to the initial values for each bitfield. `bitfields` is an optional dictionary argument in which each register that is included in the dictionary is populated as a Bitfield interface rather than a signal. Each data in bitfields is passed directly as the bitfields_config argument to the initialisation of a `Bitfield` class. See the documentation for that class to see what form the data should be. ''' for name in register_list: if not _is_valid_name(name): raise ValueError('Invalid register name: {}'.format(name)) if register_types is None: # Create a register types dictionary so that the system can handle # an empty register types argument. register_types = {} self._register_width = register_width # Create an ordered dictionary self._register_types = OrderedDict() for each in register_types: if each not in register_list: # Check that the register types have a corresponding register # in the register list. If not error. raise ValueError( 'Invalid register in register_types: %s' % each) if initial_values is None: initial_values = {} if bitfields is None: bitfields = {} for initial_val_key in initial_values: if (register_types.get(initial_val_key, 'axi_read_write') != 'axi_read_write'): raise ValueError( 'Only read-write registers can take initial values: %s' % initial_val_key + ': ' + str(register_types[initial_val_key])) for name in register_list: register_type = register_types.get(name, 'axi_read_write') if name in bitfields: initial_vals = initial_values.get(name, None) setattr( self, name, Bitfields(register_width, register_type, bitfields[name], initial_values=initial_vals)) else: # Create the registers setattr(self, name, Signal( intbv(initial_values.get(name, 0))[register_width:])) # Populate the ordered dictionary with the appropriate # register types, defaulting to 'axi_read_write' self._register_types[name] = ( register_types.get(name, 'axi_read_write')) self._bitfields = bitfields
2.671875
3
tools/clear_from_n.py
ubercomrade/MultiDeNA
0
17951
<filename>tools/clear_from_n.py import random def read_fasta(path_in, path_out): fasta = list() append = fasta.append fasta_in = open(path_in, 'r') fasta_out = open(path_out, 'w') for index, line in enumerate(fasta_in): if not line.startswith('>'): line = line.strip().upper() line = clear_n(line) if line != '': fasta_out.write('>{0}\n'.format(int(index / 2))) fasta_out.write(line + '\n') fasta_in.close() fasta_out.close() pass def longest(ss): if len(ss[0]) > len(ss[1]): return(ss[0]) else: return(ss[1]) def clear_n(string): while 1: position = string.find('N') if position == -1: break elif position == len(string) - 1: string = string[:position - 1] break elif string[position + 1] != 'N': string = string[:position] + random.choice('ACGT') + string[position + 1:] else: for index, n in enumerate(string[position:],position): if n != 'N': string = longest([string[:position], string[index:]]) break elif index == len(string) - 1: string = string[:position] break return(string) def clear_from_n(fasta_in, fasta_out): read_fasta(fasta_in, fasta_out) return(0)
3.296875
3
tensorbank/tf/slices.py
pshved/tensorbank
1
17952
<filename>tensorbank/tf/slices.py """Advanced Tensor slicing ========================== Utilities for advanced tensor slicing and batching operations. Reference --------- """ import tensorflow as tf def slice_within_stride(x, stride, si=0, ei=None, keepdims=True): """Select ``x[..., (i * stride + si):(i * stride + ei)]`` for each i. The tensor returned will have the last dimension shrunk by a factor of ``(ei-si)/stride``. As a natural special case, ``tb.multiple_within_stride(x, N)`` is equivalent to adding a dimension of ``N`` at the end, as in ``tf.expand_dims(x, (..., -1, N))``. Example: When predicting anchor positions in SSD, ``num_classes + num_offsets`` are predicted for each anchor. To get only the class confidence, this would be used:: logits = model(input) class_logits = tb.slice_within_stride( logits, 0, num_classes, num_classes + num_offsets) loss = softmax_cross_entropy_with_logits( class_preds, class_logits) Args: x (tf.Tensor): value to modify stride (int): stride for the last dimension si (int): starting index within stride. Negative indices are supported. Defaults to 0. ei (int): end index (1 element after the last) within stride. Negative indices are supported. Defaults to ``None``, which means "until the last element". keepdims (bool): if False, adds another dimension that iterates over each stride. This dimension will be of size ``ei-si``. Defaults to True. Returns: tf.Tensor: modified ``x`` with the last dimension sliced. """ step1 = tf.reshape(x, (-1, stride)) step2 = step1[..., si:ei] new_shape = list(x.shape) new_shape[-1] = -1 if not keepdims: if ei is None: ei = stride # Calculate the size of the slice. This is O(stride) which is # small. last_dim_len = len(list(range(stride)[si:ei])) new_shape.append(last_dim_len) print("NS: {}".format(new_shape)) step3 = tf.reshape(step2, new_shape) return step3
3.21875
3
testfixtures/tests/test_roundcomparison.py
Alexhuszagh/XLDiscoverer
0
17953
# Copyright (c) 2014 Simplistix Ltd # See license.txt for license details. from decimal import Decimal from testfixtures import RoundComparison as R, compare, ShouldRaise from unittest import TestCase from ..compat import PY2, PY3 class Tests(TestCase): def test_equal_yes_rhs(self): self.assertTrue(0.123457 == R(0.123456, 5)) def test_equal_yes_lhs(self): self.assertTrue(R(0.123456, 5) == 0.123457) def test_equal_no_rhs(self): self.assertFalse(0.123453 == R(0.123456, 5)) def test_equal_no_lhs(self): self.assertFalse(R(0.123456, 5) == 0.123453) def test_not_equal_yes_rhs(self): self.assertFalse(0.123457 != R(0.123456, 5)) def test_not_equal_yes_lhs(self): self.assertFalse(R(0.123456, 5) != 0.123457) def test_not_equal_no_rhs(self): self.assertTrue(0.123453 != R(0.123456, 5)) def test_not_equal_no_lhs(self): self.assertTrue(R(0.123456, 5) != 0.123453) def test_equal_in_sequence_rhs(self): self.assertEqual((1, 2, 0.123457), (1, 2, R(0.123456, 5))) def test_equal_in_sequence_lhs(self): self.assertEqual((1, 2, R(0.123456, 5)), (1, 2, 0.123457)) def test_not_equal_in_sequence_rhs(self): self.assertNotEqual((1, 2, 0.1236), (1, 2, R(0.123456, 5))) def test_not_equal_in_sequence_lhs(self): self.assertNotEqual((1, 2, R(0.123456, 5)), (1, 2, 0.1236)) def test_not_numeric_rhs(self): with ShouldRaise(TypeError): 'abc' == R(0.123456, 5) def test_not_numeric_lhs(self): with ShouldRaise(TypeError): R(0.123456, 5) == 'abc' def test_repr(self): compare('<R:0.12346 to 5 digits>', repr(R(0.123456, 5))) def test_str(self): compare('<R:0.12346 to 5 digits>', repr(R(0.123456, 5))) def test_str_negative(self): if PY3: expected = '<R:123500 to -2 digits>' else: expected = '<R:123500.0 to -2 digits>' compare(expected, repr(R(123456, -2))) TYPE_ERROR_DECIMAL = TypeError( "Cannot compare <R:0.12346 to 5 digits> with <class 'decimal.Decimal'>" ) def test_equal_yes_decimal_to_float_rhs(self): with ShouldRaise(self.TYPE_ERROR_DECIMAL, unless=PY2): self.assertTrue(Decimal("0.123457") == R(0.123456, 5)) def test_equal_yes_decimal_to_float_lhs(self): with ShouldRaise(self.TYPE_ERROR_DECIMAL, unless=PY2): self.assertTrue(R(0.123456, 5) == Decimal("0.123457")) def test_equal_no_decimal_to_float_rhs(self): with ShouldRaise(self.TYPE_ERROR_DECIMAL, unless=PY2): self.assertFalse(Decimal("0.123453") == R(0.123456, 5)) def test_equal_no_decimal_to_float_lhs(self): with ShouldRaise(self.TYPE_ERROR_DECIMAL, unless=PY2): self.assertFalse(R(0.123456, 5) == Decimal("0.123453")) TYPE_ERROR_FLOAT = TypeError( "Cannot compare <R:0.12346 to 5 digits> with <class 'float'>" ) def test_equal_yes_float_to_decimal_rhs(self): with ShouldRaise(self.TYPE_ERROR_FLOAT, unless=PY2): self.assertTrue(0.123457 == R(Decimal("0.123456"), 5)) def test_equal_yes_float_to_decimal_lhs(self): with ShouldRaise(self.TYPE_ERROR_FLOAT, unless=PY2): self.assertTrue(R(Decimal("0.123456"), 5) == 0.123457) def test_equal_no_float_to_decimal_rhs(self): with ShouldRaise(self.TYPE_ERROR_FLOAT, unless=PY2): self.assertFalse(0.123453 == R(Decimal("0.123456"), 5)) def test_equal_no_float_to_decimal_lhs(self): with ShouldRaise(self.TYPE_ERROR_FLOAT, unless=PY2): self.assertFalse(R(Decimal("0.123456"), 5) == 0.123453) def test_integer_float(self): with ShouldRaise(TypeError, unless=PY2): 1 == R(1.000001, 5) def test_float_integer(self): with ShouldRaise(TypeError, unless=PY2): R(1.000001, 5) == 1 def test_equal_yes_integer_other_rhs(self): self.assertTrue(10 == R(11, -1)) def test_equal_yes_integer_lhs(self): self.assertTrue(R(11, -1) == 10) def test_equal_no_integer_rhs(self): self.assertFalse(10 == R(16, -1)) def test_equal_no_integer_lhs(self): self.assertFalse(R(16, -1) == 10) def test_equal_integer_zero_precision(self): self.assertTrue(1 == R(1, 0)) def test_equal_yes_negative_precision(self): self.assertTrue(149.123 == R(101.123, -2)) def test_equal_no_negative_precision(self): self.assertFalse(149.123 == R(150.001, -2)) def test_decimal_yes_rhs(self): self.assertTrue(Decimal('0.123457') == R(Decimal('0.123456'), 5)) def test_decimal_yes_lhs(self): self.assertTrue(R(Decimal('0.123456'), 5) == Decimal('0.123457')) def test_decimal_no_rhs(self): self.assertFalse(Decimal('0.123453') == R(Decimal('0.123456'), 5)) def test_decimal_no_lhs(self): self.assertFalse(R(Decimal('0.123456'), 5) == Decimal('0.123453'))
3
3
config/usb_device_cdc.py
newbs/usb
0
17954
"""***************************************************************************** * Copyright (C) 2019 Microchip Technology Inc. and its subsidiaries. * * Subject to your compliance with these terms, you may use Microchip software * and any derivatives exclusively with Microchip products. It is your * responsibility to comply with third party license terms applicable to your * use of third party software (including open source software) that may * accompany Microchip software. * * THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER * EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED * WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A * PARTICULAR PURPOSE. * * IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE, * INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND * WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS * BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE * FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN * ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY, * THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE. *****************************************************************************""" currentQSizeRead = 1 currentQSizeWrite = 1 currentQSizeSerialStateNotification = 1 cdcInterfacesNumber = 2 cdcDescriptorSize = 58 cdcEndpointsPic32 = 2 cdcEndpointsSAM = 3 indexFunction = None configValue = None startInterfaceNumber = None numberOfInterfaces = None useIad = None epNumberInterrupt = None epNumberBulkOut = None epNumberBulkIn = None cdcEndpointNumber = None def handleMessage(messageID, args): global useIad if (messageID == "UPDATE_CDC_IAD_ENABLE"): useIad.setValue(args["iadEnable"]) return args def onAttachmentConnected(source, target): global cdcInterfacesNumber global cdcDescriptorSize global configValue global startInterfaceNumber global numberOfInterfaces global useIad global epNumberInterrupt global epNumberBulkOut global epNumberBulkIn global cdcEndpointsPic32 global cdcEndpointsSAM global currentQSizeRead global currentQSizeWrite global currentQSizeSerialStateNotification print ("CDC Function Driver: Attached") remoteComponent = target["component"] remoteComponentId = remoteComponent.getID() if (remoteComponentId == "usb_device"): dependencyID = source["id"] ownerComponent = source["component"] # Read number of functions from USB Device Layer nFunctions = Database.getSymbolValue("usb_device", "CONFIG_USB_DEVICE_FUNCTIONS_NUMBER") if nFunctions != None: #Log.writeDebugMessage ("USB Device CDC Function Driver: Attachment connected") # Update Number of Functions in USB Device, Increment the value by One. args = {"nFunction":nFunctions + 1} res = Database.sendMessage("usb_device", "UPDATE_FUNCTIONS_NUMBER", args) # If we have CDC function driver plus any function driver (no matter what Class), we enable IAD. if nFunctions > 0: args = {"nFunction":True} res = Database.sendMessage("usb_device", "UPDATE_IAD_ENABLE", args) iadEnableSymbol = ownerComponent.getSymbolByID("CONFIG_USB_DEVICE_FUNCTION_USE_IAD") iadEnableSymbol.clearValue() iadEnableSymbol.setValue(True, 1) isIadEnabled = Database.getSymbolValue("usb_device_cdc_0", "CONFIG_USB_DEVICE_FUNCTION_USE_IAD") if isIadEnabled == False: args = {"iadEnable":True} res = Database.sendMessage("usb_device_cdc_0", "UPDATE_CDC_IAD_ENABLE", args) nCDCInstances = Database.getSymbolValue("usb_device_cdc", "CONFIG_USB_DEVICE_CDC_INSTANCES") if nCDCInstances == 2: configDescriptorSize = Database.getSymbolValue("usb_device", "CONFIG_USB_DEVICE_CONFIG_DESCRPTR_SIZE") if configDescriptorSize != None: args = {"nFunction": configDescriptorSize + 8} res = Database.sendMessage("usb_device", "UPDATE_CONFIG_DESCRPTR_SIZE", args) configDescriptorSize = Database.getSymbolValue("usb_device", "CONFIG_USB_DEVICE_CONFIG_DESCRPTR_SIZE") if configDescriptorSize != None: iadEnableSymbol = ownerComponent.getSymbolByID("CONFIG_USB_DEVICE_FUNCTION_USE_IAD") if iadEnableSymbol.getValue() == True: descriptorSize = cdcDescriptorSize + 8 else: descriptorSize = cdcDescriptorSize args = {"nFunction": configDescriptorSize + descriptorSize} res = Database.sendMessage("usb_device", "UPDATE_CONFIG_DESCRPTR_SIZE", args) nInterfaces = Database.getSymbolValue("usb_device", "CONFIG_USB_DEVICE_INTERFACES_NUMBER") if nInterfaces != None: args = {"nFunction": nInterfaces + cdcInterfacesNumber} res = Database.sendMessage("usb_device", "UPDATE_INTERFACES_NUMBER", args) startInterfaceNumber.setValue(nInterfaces, 1) nEndpoints = Database.getSymbolValue("usb_device", "CONFIG_USB_DEVICE_ENDPOINTS_NUMBER") if nEndpoints != None: epNumberInterrupt.setValue(nEndpoints + 1, 1) epNumberBulkOut.setValue(nEndpoints + 2, 1) if any(x in Variables.get("__PROCESSOR") for x in ["PIC32MZ", "PIC32MX", "PIC32MK", "SAMD21", "SAMDA1","SAMD51", "SAME51", "SAME53", "SAME54", "SAML21", "SAML22", "SAMD11"]): epNumberBulkIn.setValue(nEndpoints + 2, 1) args = {"nFunction": nEndpoints + cdcEndpointsPic32} res = Database.sendMessage("usb_device", "UPDATE_ENDPOINTS_NUMBER", args) else: epNumberBulkIn.setValue(nEndpoints + 3, 1) args = {"nFunction": nEndpoints + cdcEndpointsSAM} res = Database.sendMessage("usb_device", "UPDATE_ENDPOINTS_NUMBER", args) def onAttachmentDisconnected(source, target): print ("CDC Function Driver: Detached") global cdcInterfacesNumber global cdcDescriptorSize global configValue global startInterfaceNumber global numberOfInterfaces global useIad global epNumberInterrupt global epNumberBulkOut global epNumberBulkIn global cdcEndpointsPic32 global cdcEndpointsSAM global cdcInstancesCount global currentQSizeRead global currentQSizeWrite global currentQSizeSerialStateNotification dependencyID = source["id"] ownerComponent = source["component"] remoteComponent = target["component"] remoteComponentId = remoteComponent.getID() if (remoteComponentId == "usb_device"): nFunctions = Database.getSymbolValue("usb_device", "CONFIG_USB_DEVICE_FUNCTIONS_NUMBER") if nFunctions != None: nFunctions = nFunctions - 1 args = {"nFunction":nFunctions} res = Database.sendMessage("usb_device", "UPDATE_FUNCTIONS_NUMBER", args) endpointNumber = Database.getSymbolValue("usb_device", "CONFIG_USB_DEVICE_ENDPOINTS_NUMBER") if endpointNumber != None: if any(x in Variables.get("__PROCESSOR") for x in ["PIC32MZ"]): args = {"nFunction":endpointNumber - cdcEndpointsPic32 } res = Database.sendMessage("usb_device", "UPDATE_ENDPOINTS_NUMBER", args) else: args = {"nFunction":endpointNumber - cdcEndpointsSAM } res = Database.sendMessage("usb_device", "UPDATE_ENDPOINTS_NUMBER", args) interfaceNumber = Database.getSymbolValue("usb_device", "CONFIG_USB_DEVICE_INTERFACES_NUMBER") if interfaceNumber != None: args = {"nFunction": interfaceNumber - 2} res = Database.sendMessage("usb_device", "UPDATE_INTERFACES_NUMBER", args) nCDCInstances = Database.getSymbolValue("usb_device_cdc", "CONFIG_USB_DEVICE_CDC_INSTANCES") if nCDCInstances != None: nCDCInstances = nCDCInstances - 1 args = {"cdcInstanceCount": nCDCInstances} res = Database.sendMessage("usb_device_cdc", "UPDATE_CDC_INSTANCES", args) if nCDCInstances == 1 and nFunctions != None and nFunctions == 1: args = {"iadEnable":False} res = Database.sendMessage("usb_device_cdc_0", "UPDATE_CDC_IAD_ENABLE", args) args = {"nFunction":False} res = Database.sendMessage("usb_device", "UPDATE_IAD_ENABLE", args) configDescriptorSize = Database.getSymbolValue("usb_device", "CONFIG_USB_DEVICE_CONFIG_DESCRPTR_SIZE") if configDescriptorSize != None: args = {"nFunction": configDescriptorSize - 8} res = Database.sendMessage("usb_device", "UPDATE_CONFIG_DESCRPTR_SIZE", args) configDescriptorSize = Database.getSymbolValue("usb_device", "CONFIG_USB_DEVICE_CONFIG_DESCRPTR_SIZE") if configDescriptorSize != None: if useIad.getValue() == True: descriptorSize = cdcDescriptorSize + 8 else: descriptorSize = cdcDescriptorSize args = {"nFunction": configDescriptorSize - descriptorSize} res = Database.sendMessage("usb_device", "UPDATE_CONFIG_DESCRPTR_SIZE", args) def destroyComponent(component): print ("CDC Function Driver: Destroyed") # This function is called when user modifies the CDC Queue Size. def usbDeviceCdcBufferQueueSize(usbSymbolSource, event): global currentQSizeRead global currentQSizeWrite global currentQSizeSerialStateNotification queueDepthCombined = Database.getSymbolValue("usb_device_cdc", "CONFIG_USB_DEVICE_CDC_QUEUE_DEPTH_COMBINED") if (event["id"] == "CONFIG_USB_DEVICE_FUNCTION_READ_Q_SIZE"): queueDepthCombined = queueDepthCombined - currentQSizeRead + event["value"] currentQSizeRead = event["value"] if (event["id"] == "CONFIG_USB_DEVICE_FUNCTION_WRITE_Q_SIZE"): queueDepthCombined = queueDepthCombined - currentQSizeWrite + event["value"] currentQSizeWrite = event["value"] if (event["id"] == "CONFIG_USB_DEVICE_FUNCTION_SERIAL_NOTIFIACATION_Q_SIZE"): queueDepthCombined = queueDepthCombined - currentQSizeSerialStateNotification + event["value"] currentQSizeSerialStateNotification = event["value"] # We have updated queueDepthCombined variable with current combined queue length. # Now send a message to USB_DEVICE_CDC_COMMON.PY to modify the Combined queue length. args = {"cdcQueueDepth": queueDepthCombined} res = Database.sendMessage("usb_device_cdc", "UPDATE_CDC_QUEUE_DEPTH_COMBINED", args) def instantiateComponent(usbDeviceCdcComponent, index): global cdcDescriptorSize global cdcInterfacesNumber global cdcDescriptorSize global configValue global startInterfaceNumber global numberOfInterfaces global useIad global currentQSizeRead global currentQSizeWrite global currentQSizeSerialStateNotification global epNumberInterrupt global epNumberBulkOut global epNumberBulkIn res = Database.activateComponents(["usb_device"]) if any(x in Variables.get("__PROCESSOR") for x in ["PIC32MZ"]): MaxEpNumber = 7 BulkInDefaultEpNumber = 2 elif any(x in Variables.get("__PROCESSOR") for x in ["PIC32MX", "PIC32MK"]): MaxEpNumber = 15 BulkInDefaultEpNumber = 2 elif any(x in Variables.get("__PROCESSOR") for x in ["SAMD21", "SAMDA1", "SAMD51", "SAME51", "SAME53", "SAME54", "SAML21", "SAML22", "SAMD11"]): MaxEpNumber = 7 BulkInDefaultEpNumber = 2 elif any(x in Variables.get("__PROCESSOR") for x in ["SAMA5D2", "SAM9X60"]): MaxEpNumber = 15 BulkInDefaultEpNumber = 3 elif any(x in Variables.get("__PROCESSOR") for x in ["SAME70", "SAMS70", "SAMV70", "SAMV71"]): MaxEpNumber = 9 BulkInDefaultEpNumber = 3 elif any(x in Variables.get("__PROCESSOR") for x in ["SAMG55"]): MaxEpNumber = 5 BulkInDefaultEpNumber = 3 # Index of this function indexFunction = usbDeviceCdcComponent.createIntegerSymbol("CONFIG_USB_DEVICE_FUNCTION_INDEX", None) indexFunction.setVisible(False) indexFunction.setMin(0) indexFunction.setMax(16) indexFunction.setDefaultValue(index) # Config name: Configuration number configValue = usbDeviceCdcComponent.createIntegerSymbol("CONFIG_USB_DEVICE_FUNCTION_CONFIG_VALUE", None) configValue.setLabel("Configuration Value") configValue.setVisible(False) configValue.setMin(1) configValue.setMax(16) configValue.setDefaultValue(1) configValue.setReadOnly(True) # Adding Start Interface number startInterfaceNumber = usbDeviceCdcComponent.createIntegerSymbol("CONFIG_USB_DEVICE_FUNCTION_INTERFACE_NUMBER", None) startInterfaceNumber.setLabel("Start Interface Number") helpText = '''Indicates the Interface Number of the first interfaces in the Communication Device Interface Group. This is provided here for indication purposes only and is automatically updated based on the function driver selection.''' startInterfaceNumber.setDescription(helpText) startInterfaceNumber.setVisible(True) startInterfaceNumber.setMin(0) startInterfaceNumber.setDefaultValue(0) startInterfaceNumber.setReadOnly(True) # Adding Number of Interfaces numberOfInterfaces = usbDeviceCdcComponent.createIntegerSymbol("CONFIG_USB_DEVICE_FUNCTION_NUMBER_OF_INTERFACES", None) numberOfInterfaces.setLabel("Number of Interfaces") helpText = '''Indicates the interfaces in the Communication Device Interface Group. This is provided here for indication purposes only.''' numberOfInterfaces.setDescription(helpText) numberOfInterfaces.setVisible(True) numberOfInterfaces.setMin(1) numberOfInterfaces.setMax(16) numberOfInterfaces.setDefaultValue(2) numberOfInterfaces.setReadOnly(True) # Use IAD useIad = usbDeviceCdcComponent.createBooleanSymbol("CONFIG_USB_DEVICE_FUNCTION_USE_IAD", None) useIad.setLabel("Use Interface Association Descriptor") helpText = '''Enable this option to generate a Interface Association Descriptor (IAD). This option should be enabled in case multiple CDC interfaces are included in the Device. Enabling the option will update the Class, Sublass fields in the Device Descriptor to indicate that that device uses IAD.''' useIad.setDescription(helpText) useIad.setVisible(True) useIad.setDefaultValue(False) useIad.setUseSingleDynamicValue(True) # CDC Function driver Read Queue Size queueSizeRead = usbDeviceCdcComponent.createIntegerSymbol("CONFIG_USB_DEVICE_FUNCTION_READ_Q_SIZE", None) queueSizeRead.setLabel("CDC Read Queue Size") helpText = '''Configure the size of the Read Queue. This configures the maximum number of Read Requests that can be queued before the Function Driver returns a queue full response. Using a queue increases memory consumption but also increases throughput. The driver will queue requests if the transfer request is currently being processed.''' queueSizeRead.setDescription(helpText) queueSizeRead.setVisible(True) queueSizeRead.setMin(1) queueSizeRead.setMax(32767) queueSizeRead.setDefaultValue(1) currentQSizeRead = queueSizeRead.getValue() # CDC Function driver Write Queue Size queueSizeWrite = usbDeviceCdcComponent.createIntegerSymbol("CONFIG_USB_DEVICE_FUNCTION_WRITE_Q_SIZE", None) helpText = '''Configure the size of the Write Queue. This configures the maximum number of Write Requests that can be queued before the Function Driver returns a queue full response. Using a queue increases memory consumption but also increases throughput. The driver will queue requests if the transfer request is currently being processed.''' queueSizeWrite.setDescription(helpText) queueSizeWrite.setLabel("CDC Write Queue Size") queueSizeWrite.setVisible(True) queueSizeWrite.setMin(1) queueSizeWrite.setMax(32767) queueSizeWrite.setDefaultValue(1) currentQSizeWrite = queueSizeWrite.getValue() # CDC Function driver Serial state notification Queue Size queueSizeSerialStateNotification = usbDeviceCdcComponent.createIntegerSymbol("CONFIG_USB_DEVICE_FUNCTION_SERIAL_NOTIFIACATION_Q_SIZE", None) queueSizeSerialStateNotification.setLabel("CDC Serial Notification Queue Size") helpText = '''Configure the size of the Serial State Notification Queue. This configures the maximum number of Serial State Notification Requests that can be queued before the Function Driver returns a queue full response. Using a queue increases memory consumption but also increases throughput. The driver will queue requests if the transfer request is currently being processed.''' queueSizeSerialStateNotification.setDescription(helpText) queueSizeSerialStateNotification.setVisible(True) queueSizeSerialStateNotification.setMin(1) queueSizeSerialStateNotification.setMax(32767) queueSizeSerialStateNotification.setDefaultValue(1) currentQSizeSerialStateNotification = queueSizeSerialStateNotification.getValue() # CDC Function driver Notification Endpoint Number epNumberInterrupt = usbDeviceCdcComponent.createIntegerSymbol("CONFIG_USB_DEVICE_FUNCTION_INT_ENDPOINT_NUMBER", None) helpText = '''Specify the endpoint number of Interrupt IN Endpoint to be used for this instance of the CDC Interface. Refer to Device Datasheet for details on available endpoints and limitations.''' epNumberInterrupt.setDescription(helpText) epNumberInterrupt.setLabel("Interrupt Endpoint Number") epNumberInterrupt.setVisible(True) epNumberInterrupt.setMin(1) epNumberInterrupt.setDefaultValue(1) epNumberInterrupt.setMax(MaxEpNumber) # CDC Function driver Data OUT Endpoint Number epNumberBulkOut = usbDeviceCdcComponent.createIntegerSymbol("CONFIG_USB_DEVICE_FUNCTION_BULK_OUT_ENDPOINT_NUMBER", None) helpText = '''Specify the endpoint number of Bulk Out Endpoint to be used for this instance of the CDC Interface. Refer to Device Datasheet for details on available endpoints and limitations.''' epNumberBulkOut.setDescription(helpText) epNumberBulkOut.setLabel("Bulk OUT Endpoint Number") epNumberBulkOut.setVisible(True) epNumberBulkOut.setMin(1) epNumberBulkOut.setDefaultValue(2) epNumberBulkOut.setMax(MaxEpNumber) # CDC Function driver Data IN Endpoint Number epNumberBulkIn = usbDeviceCdcComponent.createIntegerSymbol("CONFIG_USB_DEVICE_FUNCTION_BULK_IN_ENDPOINT_NUMBER", None) helpText = '''Specify the endpoint number of Bulk IN Endpoint to be used for this instance of the CDC Interface. Refer to Device Datasheet for details on available endpoints and limitations.''' epNumberBulkIn.setDescription(helpText) epNumberBulkIn.setLabel("Bulk IN Endpoint Number") epNumberBulkIn.setVisible(True) epNumberBulkIn.setMin(1) epNumberBulkIn.setMax(MaxEpNumber) epNumberBulkIn.setDefaultValue(BulkInDefaultEpNumber) usbDeviceCdcBufPool = usbDeviceCdcComponent.createBooleanSymbol("CONFIG_USB_DEVICE_CDC_BUFFER_POOL", None) usbDeviceCdcBufPool.setLabel("**** Buffer Pool Update ****") usbDeviceCdcBufPool.setDependencies(usbDeviceCdcBufferQueueSize, ["CONFIG_USB_DEVICE_FUNCTION_READ_Q_SIZE", "CONFIG_USB_DEVICE_FUNCTION_WRITE_Q_SIZE", "CONFIG_USB_DEVICE_FUNCTION_SERIAL_NOTIFIACATION_Q_SIZE"]) usbDeviceCdcBufPool.setVisible(False) ############################################################################ #### Dependency #### ############################################################################ # USB DEVICE CDC Common Dependency Log.writeDebugMessage ("Dependency Started") numInstances = Database.getSymbolValue("usb_device_cdc", "CONFIG_USB_DEVICE_CDC_INSTANCES") if (numInstances == None): numInstances = 0 args = {"cdcInstanceCount": index+1} res = Database.sendMessage("usb_device_cdc", "UPDATE_CDC_INSTANCES", args) ############################################################# # Function Init Entry for CDC ############################################################# usbDeviceCdcFunInitFile = usbDeviceCdcComponent.createFileSymbol(None, None) usbDeviceCdcFunInitFile.setType("STRING") usbDeviceCdcFunInitFile.setOutputName("usb_device.LIST_USB_DEVICE_FUNCTION_INIT_ENTRY") usbDeviceCdcFunInitFile.setSourcePath("templates/device/cdc/system_init_c_device_data_cdc_function_init.ftl") usbDeviceCdcFunInitFile.setMarkup(True) ############################################################# # Function Registration table for CDC ############################################################# usbDeviceCdcFunRegTableFile = usbDeviceCdcComponent.createFileSymbol(None, None) usbDeviceCdcFunRegTableFile.setType("STRING") usbDeviceCdcFunRegTableFile.setOutputName("usb_device.LIST_USB_DEVICE_FUNCTION_ENTRY") usbDeviceCdcFunRegTableFile.setSourcePath("templates/device/cdc/system_init_c_device_data_cdc_function.ftl") usbDeviceCdcFunRegTableFile.setMarkup(True) ############################################################# # HS Descriptors for CDC Function ############################################################# usbDeviceCdcDescriptorHsFile = usbDeviceCdcComponent.createFileSymbol(None, None) usbDeviceCdcDescriptorHsFile.setType("STRING") usbDeviceCdcDescriptorHsFile.setOutputName("usb_device.LIST_USB_DEVICE_FUNCTION_DESCRIPTOR_HS_ENTRY") usbDeviceCdcDescriptorHsFile.setSourcePath("templates/device/cdc/system_init_c_device_data_cdc_function_descrptr_hs.ftl") usbDeviceCdcDescriptorHsFile.setMarkup(True) ############################################################# # FS Descriptors for CDC Function ############################################################# usbDeviceCdcDescriptorFsFile = usbDeviceCdcComponent.createFileSymbol(None, None) usbDeviceCdcDescriptorFsFile.setType("STRING") usbDeviceCdcDescriptorFsFile.setOutputName("usb_device.LIST_USB_DEVICE_FUNCTION_DESCRIPTOR_FS_ENTRY") usbDeviceCdcDescriptorFsFile.setSourcePath("templates/device/cdc/system_init_c_device_data_cdc_function_descrptr_fs.ftl") usbDeviceCdcDescriptorFsFile.setMarkup(True) ############################################################# # Class code Entry for CDC Function ############################################################# usbDeviceCdcDescriptorClassCodeFile = usbDeviceCdcComponent.createFileSymbol(None, None) usbDeviceCdcDescriptorClassCodeFile.setType("STRING") usbDeviceCdcDescriptorClassCodeFile.setOutputName("usb_device.LIST_USB_DEVICE_DESCRIPTOR_CLASS_CODE_ENTRY") usbDeviceCdcDescriptorClassCodeFile.setSourcePath("templates/device/cdc/system_init_c_device_data_cdc_function_class_codes.ftl") usbDeviceCdcDescriptorClassCodeFile.setMarkup(True) ################################################ # USB CDC Function driver Files ################################################ usbDeviceCdcHeaderFile = usbDeviceCdcComponent.createFileSymbol(None, None) addFileName('usb_device_cdc.h', usbDeviceCdcComponent, usbDeviceCdcHeaderFile, "middleware/", "/usb/", True, None) usbCdcHeaderFile = usbDeviceCdcComponent.createFileSymbol(None, None) addFileName('usb_cdc.h', usbDeviceCdcComponent, usbCdcHeaderFile, "middleware/", "/usb/", True, None) usbDeviceCdcSourceFile = usbDeviceCdcComponent.createFileSymbol(None, None) addFileName('usb_device_cdc.c', usbDeviceCdcComponent, usbDeviceCdcSourceFile, "middleware/src/", "/usb/src", True, None) usbDeviceCdcAcmSourceFile = usbDeviceCdcComponent.createFileSymbol(None, None) addFileName('usb_device_cdc_acm.c', usbDeviceCdcComponent, usbDeviceCdcAcmSourceFile, "middleware/src/", "/usb/src", True, None) usbDeviceCdcLocalHeaderFile = usbDeviceCdcComponent.createFileSymbol(None, None) addFileName('usb_device_cdc_local.h', usbDeviceCdcComponent, usbDeviceCdcLocalHeaderFile, "middleware/src/", "/usb/src", True, None) # all files go into src/ def addFileName(fileName, component, symbol, srcPath, destPath, enabled, callback): configName1 = Variables.get("__CONFIGURATION_NAME") #filename = component.createFileSymbol(None, None) symbol.setProjectPath("config/" + configName1 + destPath) symbol.setSourcePath(srcPath + fileName) symbol.setOutputName(fileName) symbol.setDestPath(destPath) if fileName[-2:] == '.h': symbol.setType("HEADER") else: symbol.setType("SOURCE") symbol.setEnabled(enabled)
0.960938
1
quant_test/__init__.py
rgkimball/quant_test
0
17955
""" quant_test ~~~~~~ The quant_test package - a Python package template project that is intended to be used as a cookie-cutter for developing new Python packages. """
0.902344
1
django_sql_dashboard/extensions/ExtendedParameter.py
ipamo/django-sql-dashboard
0
17956
<filename>django_sql_dashboard/extensions/ExtendedParameter.py import re from django.utils.html import escape from django.utils.safestring import mark_safe from ..utils import Parameter class ExtendedParameter(Parameter): extract_re = re.compile(r"\%\(([\w\-]+)(?:\:([\w\-]+))?\)(s|(?:0?\.(\d+))?d|b)") extract_name_re = lambda name: re.compile(rf"\%\({name}(?:\:[\w\-]+)?\)(?:s|(?:0?\.(\d+))?d|b)") number_re = re.compile(r"^\d+(?:\.\d+)?") def __init__(self, name, default_value, typecode, decimals): if decimals: typecode = "d" self.typecode = typecode # Adapt default value depending on the type if default_value == "": if self.typecode == "b": default_value = "false" if self.typecode == "d": default_value = "0" self.decimals = int(decimals) if len(decimals) >= 1 else 0 super().__init__(name, default_value) def ensure_consistency(self, previous): super().ensure_consistency(previous) if self.typecode != previous.typecode: raise ValueError("Invalid typecode specification '%s' for parameter '%s': previously registered with typecode '%s'" % (self.typecode, self.name, previous.typecode)) if self.decimals != 0 and self.decimals != previous.typecode: raise ValueError("Invalid decimals specification '%d' for parameter '%s': previously registered with %d decimals" % (self.decimals, self.name, previous.decimals)) def get_sanitized(self, value, for_default=False): value = super().get_sanitized(value, for_default=for_default) if value is None: return None if self.typecode == "s": # String parameter: no need to check sanity because we use psycopg2 parameter-passing feature return value # Need to check sanity if self.typecode == "b": value = value.lower() if not value in ["true", "false"]: raise ValueError("Invalid %svalue for bool parameter '%s': '%s'" % ("default " if for_default else "", self.name, value)) return value elif self.typecode == "d": if not ExtendedParameter.number_re.match(value): raise ValueError("Invalid %svalue for number parameter '%s': '%s'" % ("default " if for_default else "", self.name, value)) return value else: raise ValueError("Unsupported typecode '%s' for parameter '%s'" % (self.typecode, self.name)) @property def step(self): """ Determine "step" attribute for number inputs """ return pow(10, -1*self.decimals) def form_control(self): label = f"""<label for="qp_{escape(self.name)}">{escape(self.name)}</label>""" if self.typecode == 'd': control = f"""<input type="number" step="{str(self.step)}" id="qp_{escape(self.name)}" name="{escape(self.name)}" value="{escape(self.value) if self.value is not None else ""}">""" elif self.typecode == 'b': if self.default_value: control = f"""<input type="hidden" name="{escape(self.name)}" value="false"> <input type="checkbox" id="qp_{escape(self.name)}" name="{escape(self.name)}" value="true" {"checked" if self.value == "true" else ""}>""" else: control = f"""<div> <input type="radio" id="qp_{escape(self.name)}_null" name="{escape(self.name)}" value="" {"checked" if not self.value else ""}> <label for="qp_{escape(self.name)}_null">null</label> <input type="radio" id="qp_{escape(self.name)}_true" name="{escape(self.name)}" value="true" {"checked" if self.value == "true" else ""}> <label for="qp_{escape(self.name)}_true">true</label> <input type="radio" id="qp_{escape(self.name)}_false" name="{escape(self.name)}" value="false" {"checked" if self.value == "false" else ""}> <label for="qp_{escape(self.name)}_false">false</label> </div>""" else: control = f"""<input type="text" id="qp_{escape(self.name)}" name="{escape(self.name)}" value="{escape(self.value) if self.value is not None else ""}">""" return mark_safe(label + '\n' + control) @classmethod def execute(cls, cursor, sql: str, parameters: list=[]): string_values = {} for parameter in parameters: if parameter.typecode == 's': # For strings, we will use psycopg2 name parameter passing string_values[parameter.name] = parameter.value # If a default value has been specified, this needs to be removed from the SQL if parameter.default_value != "": sql = ExtendedParameter.extract_name_re(parameter.name).sub(f"%({parameter.name})s", sql) else: # For non-strings, we cannot use psycopg2 name parameter passing (not supported) value = parameter.value sql = ExtendedParameter.extract_name_re(parameter.name).sub(value if value is not None else "null", sql) cursor.execute(sql, string_values)
2.34375
2
jiotc/models/bilstm_model.py
JHP4911/JioTC
4
17957
# -*- coding=utf-8 -*- # author: dongrixinyu # contact: <EMAIL> # blog: https://github.com/dongrixinyu/ # file: bare_embedding.py # time: 2020-06-12 11:27 import os import pdb import logging from typing import Union, Optional, Dict, Any, Tuple import torch import torch.nn as nn from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence from jiotc.embeddings.base_embedding import BaseEmbedding from .base_model import BaseModel # Bidirectional LSTM neural network (many-to-one) class BiLSTMModel(BaseModel): @classmethod def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]: return { 'layer_bi_lstm': { 'hidden_size': 128, 'num_layers': 1, 'dropout': 0.2, # 当 num_layers == 1 时失效 'bidirectional': True }, 'layer_dense': { 'activation': 'softmax' } } def __init__(self, embed_model: Optional[BaseEmbedding] = None, device: Union['cuda', 'cpu'] = None, hyper_parameters: Optional[Dict[str, Dict[str, Any]]] = None): ''' self.device self.embedding_layer self.embedding self.embedding_size self.num_classes 参数已知,可以直接使用 ''' super(BiLSTMModel, self).__init__(embed_model, device=device) self.hidden_size = hyper_parameters['layer_bi_lstm']['hidden_size'] self.num_layers = hyper_parameters['layer_bi_lstm']['num_layers'] self.dropout = hyper_parameters['layer_bi_lstm']['dropout'] self.lstm = nn.LSTM( self.embedding_size, self.hidden_size, self.num_layers, batch_first=True, bidirectional=True) self.fc = nn.Linear(self.hidden_size * 2, self.num_classes) # 2 for bidirection def forward(self, samples): masks = samples.gt(0) embeds = self.embedding_layer(samples) #.to(self.device) # 按长短调整样本顺序 seq_length = masks.sum(1) sorted_seq_length, perm_idx = seq_length.sort(descending=True) embeds = embeds[perm_idx, :] # 重新排序 pack_sequence = pack_padded_sequence( embeds, lengths=sorted_seq_length, batch_first=True) # Set initial states, involved with batch_size ''' h0 = torch.autograd.Variable(torch.randn( self.num_layers * 2, embeds.shape[0], self.hidden_size)).to(self.device) # 2 for bidirection c0 = torch.autograd.Variable(torch.randn( self.num_layers * 2, embeds.shape[0], self.hidden_size)).to(self.device) #''' # Forward propagate LSTM packed_output, _ = self.lstm(pack_sequence) #, (h0, c0)) # out: tensor of shape (batch_size, seq_length, hidden_size * 2) lstm_out, _ = pad_packed_sequence(packed_output, batch_first=True) _, unperm_idx = perm_idx.sort() lstm_out = lstm_out[unperm_idx, :] # dropout_layer lstm_out = lstm_out.permute(1, 0, 2) # [batch_size, seq_len, hidden_size * 2] => [seq_len, batch_size, hidden_size * 2] # disabled when not training lstm_out = F.dropout2d(lstm_out, p=self.dropout, training=self.training) lstm_out = lstm_out.permute(1, 0, 2) # [seq_len, batch_size, hidden_size * 2] => [batch_size, seq_len, hidden_size * 2] lstm_out_sum = torch.mean(lstm_out, dim=1) output = self.fc(lstm_out_sum) return output
2.28125
2
tests/learning/test_prediction_error_delta_function.py
mihaic/psyneulink
0
17958
<filename>tests/learning/test_prediction_error_delta_function.py import numpy as np from psyneulink import PredictionErrorDeltaFunction np.set_printoptions(suppress=True) def test_prediction_error_delta_first_run(): learning_rate = 0.3 stimulus_onset = 41 sample = np.zeros(60) sample[stimulus_onset:] = 1 reward_onset = 54 target = np.zeros(60) target[reward_onset] = 1 delta_function = PredictionErrorDeltaFunction() delta_vals = np.zeros((60, 60)) weights = np.zeros(60) for t in range(60): print("Timestep {}".format(t)) new_sample = sample * weights # print("sample = {}".format(new_sample)) delta_vals[t] = delta_function.function(variable=[new_sample, target]) print("delta: {}".format(delta_vals[t])) for i in range(59): weights[i] = weights[i] + learning_rate * sample[i] * \ delta_vals[t][i + 1] validation_array = np.array([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3, 0.7, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.09, 0.42000000000000004, 0.49, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.027, 0.189, 0.44100000000000006, 0.34299999999999997, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0081, 0.0756, 0.2646, 0.4116, 0.24009999999999998, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.00243, 0.02835, 0.1323, 0.3087, 0.3601500000000001, 0.16806999999999994, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0007289999999999999, 0.010206, 0.05953499999999999, 0.18522, 0.32413500000000006, 0.30252599999999996, 0.117649, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.00021869999999999998, 0.0035721, 0.025004699999999998, 0.09724049999999998, 0.2268945, 0.31765230000000005, 0.24706289999999997, 0.08235429999999999, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 6.560999999999999e-05, 0.0012247199999999999, 0.01000188, 0.04667544, 0.1361367, 0.25412184, 0.29647548, 0.19765032000000005, 0.05764800999999997, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.9682999999999998e-05, 0.000413343, 0.003857868, 0.021003947999999998, 0.073513818, 0.171532242, 0.26682793199999993, 0.2668279320000001, 0.15564962699999996, 0.040353607000000014, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 5.904899999999999e-06, 0.000137781, 0.0014467005, 0.009001692, 0.036756909000000004, 0.1029193452, 0.200120949, 0.26682793199999993, 0.2334744405000001, 0.12106082099999993, 0.028247524900000043, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.7714699999999997e-06, 4.5467729999999994e-05, 0.00053045685, 0.0037131979500000002, 0.0173282571, 0.05660563986, 0.13207982633999998, 0.2201330439, 0.25682188454999993, 0.19975035465000013, 0.09321683216999987, 0.019773267430000074, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 5.314409999999999e-07, 1.4880347999999997e-05, 0.00019096446599999996, 0.00148527918, 0.0077977156950000005, 0.029111471928000003, 0.07924789580399999, 0.15849579160799998, 0.23113969609499996, 0.23970042558000004, 0.16779029790600009, 0.07118376274799987, 0.013841287201000085, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.5943229999999994e-07, 4.8361131e-06, 6.770558339999998e-05, 0.0005792588802, 0.0033790101345, 0.014191842564900003, 0.044152399090799994, 0.1030222645452, 0.18028896295409996, 0.23370791494049992, 0.21812738727780012, 0.1388083373586, 0.05398102008389993, 0.009688901040700082, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.61026623e-06, 2.3696954189999994e-05, 0.00022117157244, 0.00141918425649, 0.006622859863620001, 0.023180009522670002, 0.06181335872711999, 0.12620227406787, 0.19631464855001995, 0.22903375664168996, 0.19433167230204007, 0.11336014217619006, 0.040693384370939945, 0.006782230728490046, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 8.719352486999997e-06, 8.2939339665e-05, 0.000580575377655, 0.002980286938629, 0.011590004761335003, 0.034770014284004995, 0.08113003332934499, 0.14723598641251498, 0.20613038097752096, 0.218623131339795, 0.1700402132642851, 0.09156011483461501, 0.03052003827820493, 0.0047475615099430435, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3.3601154386499996e-05, 0.00023223015106199995, 0.0013004888459472001, 0.0055632022854408, 0.018544007618136, 0.048678019997607, 0.100961819254296, 0.16490430478201676, 0.20987820608620322, 0.20404825591714193, 0.1464961837353841, 0.07324809186769199, 0.02278829524772641, 0.0033232930569601082, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.00010327019970509998, 0.0005527077595275599, 0.0025793028777952804, 0.00945744388524936, 0.0275842113319773, 0.0643631597746137, 0.12014456491261222, 0.17839647517327273, 0.2081292210354848, 0.18678263426261454, 0.12452175617507655, 0.05811015288170229, 0.016948794590496474, 0.002326305139872087, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.00026908252756336796, 0.0011606862950078762, 0.004642745180031503, 0.014895474119267742, 0.038617895864768215, 0.08109758131601326, 0.13762013799081035, 0.18731629893193635, 0.2017252450036237, 0.1681043708363532, 0.10459827518706422, 0.045761745394340525, 0.012562047755309225, 0.0016284135979104386, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0006172884160657308, 0.002205303960514964, 0.007718563861802375, 0.022012200642917885, 0.05136180150014173, 0.09805434831845238, 0.15252898627314818, 0.1916389827534426, 0.1916389827534425, 0.1490525421415665, 0.08694731624924712, 0.03580183610263121, 0.009281957508089578, 0.0011398895185372737, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.00127887960422022, 0.0038592819309011877, 0.012006654896137028, 0.030817080900085034, 0.06536956554563493, 0.11439673970486111, 0.1642619852172365, 0.1916389827534426, 0.1788630505698796, 0.13042097437387068, 0.07160367220526243, 0.027845872524268733, 0.006839337111223864, 0.0007979226629760694, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0024366641834905763, 0.006303493820471939, 0.01764978269732143, 0.04118282629375, 0.08007771779340278, 0.12935631335857373, 0.17247508447809834, 0.1878062030983737, 0.16433042771107698, 0.11277578372328811, 0.058476332300964384, 0.021543911900355206, 0.005026912776749604, 0.0005585458640832153, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0043277123296321576, 0.009707380483526788, 0.024709695776250002, 0.05285129374364584, 0.09486129646295406, 0.1422919446944311, 0.17707442006418095, 0.18076347048218466, 0.1488640345147404, 0.09648594829659096, 0.047396606180781564, 0.01658881216327357, 0.0036864027029497315, 0.0003909821048582174, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.007239926474690194, 0.014208075071343751, 0.03315217516646875, 0.06545429455943831, 0.10909049093239716, 0.15272668730535605, 0.17818113518958212, 0.17119363969195134, 0.13315060864929562, 0.08175914566184805, 0.03815426797552923, 0.012718089325176374, 0.0026977765235223217, 0.00027368747340072996, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.011502348996093318, 0.01989130509988125, 0.04284281098435962, 0.07854515347132598, 0.1221813498442848, 0.16036302167062388, 0.17608488654029292, 0.15978073037915452, 0.11773316975306136, 0.0686776823559524, 0.030523414380423386, 0.009711995484680158, 0.0019705498084858775, 0.00019158123138052208, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.017469740526057695, 0.02677675686522476, 0.053553513730449524, 0.09163601238321362, 0.13363585139218653, 0.1650795811315246, 0.17119363969195145, 0.14716646219132656, 0.10301652353392865, 0.05723140196329368, 0.02427998871170045, 0.0073895617818218184, 0.0014368592353543042, 0.00013410686196635435, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.02550276758562512, 0.03480978392479219, 0.06497826332627876, 0.10423596408590546, 0.14306897031398796, 0.16691379869965267, 0.16398548644176403, 0.13392148059410713, 0.08928098706273813, 0.0473459779878157, 0.01921286063273686, 0.005603751017881575, 0.0010460335233379858, 9.387480337641474e-05, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.035945702763062776, 0.04386032774523817, 0.07675557355416678, 0.1158858659543302, 0.1502224188296874, 0.16603530502228608, 0.15496628468746698, 0.12052933253469633, 0.07670048434026144, 0.03890604278129206, 0.015130127748280264, 0.004236435769518487, 0.0007603859073495034, 6.571236236352362e-05, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.04910380108663422, 0.05372890148791675, 0.0884946612742158, 0.12618683181693738, 0.15496628468746698, 0.16271459892184037, 0.1446351990416358, 0.10738067807636587, 0.06536215187257055, 0.0317732682713886, 0.011862020154651653, 0.0031936208108678255, 0.0005519838438536873, 4.5998653654510946e-05, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.06522247153300925, 0.06415862942380646, 0.09980231243703228, 0.13482066767809628, 0.157290778957779, 0.157290778957779, 0.13345884275205488, 0.09477512021522716, 0.05528548679221601, 0.025799893836367493, 0.009261500351516516, 0.002401129720763562, 0.000400188286794001, 3.219905755813546e-05, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.08447006036015119, 0.07485173432777421, 0.1103078190093515, 0.14156170106200106, 0.157290778957779, 0.15014119809606175, 0.12185372599100663, 0.08292823018832374, 0.04643980890546151, 0.02083837579091219, 0.007203389162290574, 0.0018008472905727269, 0.0002897915180232191, 2.2539340290728127e-05, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.10692558065848345, 0.0854885597322474, 0.11968398362514637, 0.1462804244307344, 0.15514590469926381, 0.14165495646454518, 0.11017607725020184, 0.07198170380346502, 0.038759378971096714, 0.016747879802325727, 0.005582626600775242, 0.0013475305588078745, 0.00020961586470347182, 1.5777538203476382e-05, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.13257214857815766, 0.0957471869001171, 0.12766291586682277, 0.14894006851129327, 0.15109862022884823, 0.13221129270024212, 0.09871776521618081, 0.062015006353754565, 0.032155929220465396, 0.013398303841860582, 0.0043120977881849765, 0.0010061561505766425, 0.00015146436675339547, 1.1044276742477876e-05, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.16129630464819278, 0.10532190559012883, 0.13404606166016392, 0.14958763402655972, 0.1454324219702664, 0.12216323445502375, 0.08770693755745296, 0.05305728321376779, 0.02652864160688395, 0.0106724420257579, 0.003320315296902465, 0.0007497486154296462, 0.0001093383397501313, 7.730993719756718e-06, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.19289287632523142, 0.11393915241113936, 0.13870853337008265, 0.1483410704096717, 0.13845166571569367, 0.11182634538575253, 0.07731204125434732, 0.045098690731702695, 0.021771781732546125, 0.008466804007101203, 0.0025491452924606417, 0.0005576255327258695, 7.885613594094121e-05, 5.411695603863009e-06, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.22707462204857323, 0.12136996669882236, 0.14159829448195937, 0.14537424900147833, 0.1304640696167113, 0.10147205414633098, 0.06764803609755388, 0.038100618031955746, 0.017780288414912637, 0.00669150639270899, 0.0019516893645402655, 0.0004139947136904132, 5.682280383978444e-05, 3.7881869227041065e-06, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.26348561205821996, 0.12743846503376344, 0.14273108083781505, 0.14090119518604827, 0.12176646497559718, 0.09132484873169788, 0.058783810677874415, 0.03200451914684277, 0.014453653808251588, 0.005269561284258373, 0.001490380969285332, 0.00030684314073514685, 4.091241876469365e-05, 2.6517308459039768e-06, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.301717151568349, 0.1320262497749789, 0.14218211514228507, 0.13516077612291288, 0.11263398010242742, 0.08156253731555085, 0.05075002321856492, 0.026739259545265348, 0.011698426051053645, 0.004135807189766472, 0.001135319620720332, 0.00022706392414395538, 2.9434212389101155e-05, 1.856211592099477e-06, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3413250265008427, 0.13507300938517075, 0.14007571343647335, 0.12840273731676732, 0.10331254726636441, 0.07231878308645512, 0.04354679411657503, 0.02222700949700185, 0.009429640392667471, 0.0032356609190525853, 0.0008628429117474301, 0.000167775010617488, 2.116081215008947e-05, 1.2993481144363273e-06, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3818469293163939, 0.1365738206005615, 0.13657382060056156, 0.12087568030164642, 0.09401441801239163, 0.06368718639549109, 0.03715085873070312, 0.018387798765701513, 0.00757144655058295, 0.0025238155168610943, 0.0006543225414084031, 0.00012379075107726845, 1.5202372939393527e-05, 9.09543680149838e-07, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.4228190754965624, 0.13657382060056145, 0.13186437851088706, 0.11281730161487002, 0.08491624852732138, 0.05572628809605473, 0.031521940741202625, 0.01514289310116601, 0.006057157240466293, 0.0019629676242253202, 0.0004951630043090738, 9.121423763591707e-05, 1.0914524161576011e-05, 6.366805761492955e-07, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.4637912216767308, 0.13516098797365916, 0.12615025544208192, 0.10444698568860544, 0.07615926039794141, 0.04846498388959908, 0.026608226449191696, 0.012417172342956029, 0.0048289003555940235, 0.0015226262382503908, 0.0003739783743071934, 6.712432359357035e-05, 7.831171085936894e-06, 4.4567640333781355e-07, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5043395180688286, 0.13245776821418598, 0.11963927451603895, 0.09596066810140624, 0.06785097744543866, 0.04190795665747693, 0.02235091021732094, 0.010140690746747505, 0.003837018120390834, 0.0011780318790675093, 0.00028192215909306206, 4.933637784132472e-05, 5.6155226810794545e-06, 3.119734823808784e-07, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5440768485330844, 0.12861222010474194, 0.11253569259164908, 0.087527760904616, 0.06006807120905011, 0.03604084272543018, 0.0186878443761489, 0.008249588958840537, 0.0030393222479937476, 0.0009091989630751751, 0.00021214642471756306, 3.6220121293228935e-05, 4.024457921469882e-06, 2.183814377110238e-07, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.582660514564507, 0.12378926185081407, 0.1050333130855392, 0.07928985399594612, 0.05285990266396423, 0.030834943220645727, 0.015556367750956368, 0.006686508945586533, 0.0024002852625182314, 0.0007000832015678915, 0.00015936853369025172, 2.6561422281634606e-05, 2.8826349763866332e-06, 1.5286700638661443e-07, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6197972931197512, 0.11816247722123163, 0.09731027535866121, 0.0713608685963516, 0.04625241483096865, 0.02625137057973892, 0.012895410109345473, 0.005400641840666021, 0.0018902246442331627, 0.0005378688012045441, 0.00011952640026768879, 1.9457786090026907e-05, 2.0637045854421388e-06, 1.0700690444842564e-07, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6552460362861207, 0.1119068166624605, 0.08952545332996831, 0.0638283324667368, 0.04025210155559966, 0.02224458243862093, 0.010646979628741615, 0.004347516681736163, 0.0014845178913245327, 0.000412366080923543, 8.950581601441243e-05, 1.4239561638595966e-05, 1.4766952811662293e-06, 7.490483311389795e-08, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6888180812848588, 0.10519240766271287, 0.0818163170709989, 0.05675546319339564, 0.03484984582050599, 0.018765301595657147, 0.008757140744639957, 0.003488617044612674, 0.0011628723482043357, 0.0003155080014507483, 6.69259397017008e-05, 1.0410701731355942e-05, 1.0561581467172232e-06, 5.2433383190830796e-08, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.7203758035836726, 0.0981795804851987, 0.07429806090771796, 0.05018377798152873, 0.030024482553051346, 0.01576285334035199, 0.007176583634631806, 0.0027908936356900726, 0.0009086630441783594, 0.00024093338292596744, 4.997136831064175e-05, 7.604338655986531e-06, 7.550407176148966e-07, 3.670336823358156e-08, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.7498296777292321, 0.09101512461195449, 0.06706377602986124, 0.04413598935298546, 0.025745993789241584, 0.013186972428635979, 0.005860876634949275, 0.002226224458236503, 0.0007083441458026751, 0.00018364477854138084, 3.726125941427849e-05, 5.549549274452836e-06, 5.395395127338887e-07, 2.569235779681378e-08, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.7771342151128184, 0.08382972003732658, 0.06018544002679849, 0.03861899068386232, 0.02197828738105989, 0.010989143690529946, 0.004770480981935443, 0.0017708603645063548, 0.0005509343356242535, 0.00013972972280329454, 2.7747746372375204e-05, 4.046546345892743e-06, 3.853853662860729e-07, 1.7984650435565186e-08, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.8022831311240164, 0.07673643603416813, 0.05371550522391766, 0.033626779693021636, 0.018681544273900896, 0.009123544877951528, 0.0038705947967068166, 0.0014048825558417022, 0.00042757295177797694, 0.00010613512987400764, 2.0637386364374954e-05, 2.9481980520218443e-06, 2.7516515155312504e-07, 1.258925530489563e-08, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.8253040619342669, 0.06983015679109295, 0.04768888756464884, 0.029143209067285403, 0.015814144455116086, 0.0075476598535781925, 0.00313088112444726, 0.0011116896746226068, 0.00033114160520675284, 8.048580682107342e-05, 1.5330629870691226e-05, 2.146288181847922e-06, 1.9639238268975845e-07, 8.812478746733632e-09, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.8462531089715948, 0.06318777602315973, 0.04212518401543974, 0.025144489683634585, 0.013334199074654718, 0.006222626234838935, 0.002525123689499864, 0.0008775252537979172, 0.0002559448656910268, 6.0939253735958765e-05, 1.1375327364060439e-05, 1.5613194420671661e-06, 1.401184115401577e-07, 6.168735078304621e-09, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.8652094417785428, 0.0568689984208437, 0.037030975715898196, 0.02160140250094056, 0.011200727222710039, 0.005113375471237247, 0.002030844158789291, 0.0006910511373657835, 0.000197443182104462, 4.607007582446698e-05, 8.431124987495764e-06, 1.1349591328979614e-06, 9.993350857939731e-08, 4.3181145326087744e-09, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.8822701413047959, 0.05091759160936005, 0.03240210375141095, 0.018481199917471325, 0.009374521697268268, 0.004188616077502871, 0.0016289062523622277, 0.0005429687507872982, 0.0001520312502205634, 3.4778390573309004e-05, 6.242275231160832e-06, 8.244514455579832e-07, 7.124889034315629e-08, 3.022680217235063e-09, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.8975454187876039, 0.04536294525197537, 0.028225832601229017, 0.015749196451410374, 0.007818750011338693, 0.0034207031299606783, 0.0013031250018897822, 0.00042568750061722227, 0.00011685539232642039, 2.621755597065345e-05, 4.616928095502182e-06, 5.984906790157396e-07, 5.078102727207323e-08, 2.115876140962314e-09, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9111543023631965, 0.04022181145675141, 0.024482841756283458, 0.013370062519388881, 0.006499335946925311, 0.0027854296915393872, 0.0010398937515080364, 0.0003330378681299928, 8.96640414196348e-05, 1.9737367608074763e-05, 3.411396870545147e-06, 4.341777835037419e-07, 3.6181481921637726e-08, 1.4811133430825407e-09, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9232208458002219, 0.035500120546611, 0.02114900798521513, 0.011308844547649799, 0.005385164070309534, 0.002261768909530004, 0.0008278369864945789, 0.0002600257201169631, 6.868603927612238e-05, 1.4839576386815878e-05, 2.5182311443883165e-06, 3.1477889306241735e-07, 2.577137137027563e-08, 1.0367793290555483e-09, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9338708819642052, 0.031194786778192207, 0.018196958953945574, 0.009531740404447708, 0.0044481455220757304, 0.0018315893326192878, 0.0006574936065812942, 0.0002026238158647775, 5.253210040934153e-05, 1.1143172814032098e-05, 1.8571954689683423e-06, 2.280766365769793e-07, 1.8350993724602915e-08, 7.257455747478048e-10, 0.0, 0.0, 0.0, 0.0, 0.0], ]) for i in range(len(delta_vals)): deltas = delta_vals[i] validation_deltas = validation_array[i] np.testing.assert_allclose(deltas, validation_deltas, atol=1e-08, err_msg="mismatch on timestep {}".format(i))
2.96875
3
jts/backend/jobapps/views.py
goupaz/babylon
1
17959
<gh_stars>1-10 from datetime import datetime as dt from django.utils import timezone import uuid from django.contrib.auth import get_user_model from django.http import JsonResponse from django.views.decorators.csrf import csrf_exempt from rest_framework.decorators import api_view from company.utils import get_or_create_company from position.utils import get_or_insert_position from utils import utils from utils.error_codes import ResponseCodes from utils.generic_json_creator import create_response from .models import JobApplication, Contact, ApplicationStatus, StatusHistory from .models import JobApplicationNote, JobApplicationFile from .models import Source from alumni.serializers import AlumniSerializer from .serializers import ApplicationStatusSerializer from .serializers import JobApplicationNoteSerializer, JobApplicationFileSerializer from .serializers import JobApplicationSerializer, ContactSerializer from .serializers import SourceSerializer from .serializers import StatusHistorySerializer User = get_user_model() @csrf_exempt @api_view(["GET", "POST", "PUT", "PATCH", "DELETE"]) def job_applications(request): body = request.data if 'recaptcha_token' in body and utils.verify_recaptcha(None, body['recaptcha_token'], 'add_job') == ResponseCodes.verify_recaptcha_failed: return JsonResponse(create_response(data=None, success=False, error_code=ResponseCodes.verify_recaptcha_failed), safe=False) if request.method == "GET": timestamp = request.GET.get('timestamp') if timestamp is not None: timestamp = int(timestamp) / 1000 if timestamp is None: return JsonResponse(create_response(data=None, success=False, error_code=ResponseCodes.invalid_parameters)) profile = request.user time = dt.fromtimestamp(int(timestamp)) user_job_apps = JobApplication.objects.filter(created__gte=time) job_application_list = JobApplicationSerializer(instance=user_job_apps, many=True, context={ 'user': request.user}).data response = {'data': job_application_list, 'synching': profile.synching} return JsonResponse(create_response(data=response), safe=False) status_id = request.GET.get('status_id') if status_id is not None: user_job_apps = JobApplication.objects.filter( application_status__id=status_id, user__id=request.user.id, is_deleted=False).order_by('-apply_date') else: user_job_apps = JobApplication.objects.filter( user_id=request.user.id, is_deleted=False).order_by('-apply_date') job_applications_list = JobApplicationSerializer(instance=user_job_apps, many=True, context={ 'user': request.user}).data return JsonResponse(create_response(data=job_applications_list), safe=False) elif request.method == "POST": job_title = body['job_title'] company = body['company'] application_date = body['application_date'] status = int(body['status_id']) source = body['source'] jt = get_or_insert_position(job_title) jc = get_or_create_company(company) if Source.objects.filter(value__iexact=source).count() == 0: source = Source.objects.create(value=source) else: source = Source.objects.get(value__iexact=source) job_application = JobApplication(position=jt, company_object=jc, apply_date=application_date, msg_id='', app_source=source, user=request.user) job_application.application_status = ApplicationStatus.objects.get(pk=status) job_application.save() return JsonResponse( create_response( data=JobApplicationSerializer(instance=job_application, many=False, context={'user': request.user}).data), safe=False) elif request.method == "PUT": status_id = body.get('status_id') rejected = body.get('rejected') job_application_ids = [] if 'jobapp_ids' in body: job_application_ids = body['jobapp_ids'] if 'jobapp_id' in body: job_application_ids.append(body['jobapp_id']) if len(job_application_ids) == 0: return JsonResponse(create_response(success=False, error_code=ResponseCodes.record_not_found), safe=False) elif rejected is None and status_id is None: return JsonResponse(create_response(success=False, error_code=ResponseCodes.record_not_found), safe=False) else: user_job_apps = JobApplication.objects.filter(pk__in=job_application_ids) if user_job_apps.count() == 0: return JsonResponse(create_response(success=False, error_code=ResponseCodes.record_not_found), safe=False) else: for user_job_app in user_job_apps: if user_job_app.user == request.user: if status_id is None: user_job_app.is_rejected = rejected else: new_status = ApplicationStatus.objects.filter(pk=status_id) if new_status.count() == 0: return JsonResponse( create_response(data=None, success=False, error_code=ResponseCodes.invalid_parameters), safe=False) else: if rejected is None: user_job_app.application_status = new_status[0] else: user_job_app.application_status = new_status[0] user_job_app.is_rejected = rejected status_history = StatusHistory( job_post=user_job_app, application_status=new_status[0]) status_history.save() if rejected is not None: user_job_app.rejected_date = timezone.now() user_job_app.updated_date = timezone.now() user_job_app.save() return JsonResponse(create_response(data=None), safe=False) elif request.method == "PATCH": job_app_id = body.get('jobapp_id') if job_app_id is None: return JsonResponse(create_response(data=None, success=False, error_code=ResponseCodes.record_not_found), safe=False) user_job_app = JobApplication.objects.get(pk=job_app_id) if user_job_app.user != request.user: return JsonResponse(create_response(data=None, success=False, error_code=ResponseCodes.record_not_found), safe=False) if user_job_app.msg_id is not None and user_job_app.msg_id != '': return JsonResponse(create_response(data=None, success=False, error_code=ResponseCodes.record_not_found), safe=False) job_title = body.get('job_title') company = body.get('company') application_date = body.get('application_date') source = body.get('source') if application_date is not None: user_job_app.apply_date = application_date if job_title is not None: user_job_app.position = get_or_insert_position(job_title) if company is not None: user_job_app.company_object = get_or_create_company(company) if source is not None: if Source.objects.filter(value__iexact=source).count() == 0: source = Source.objects.create(value=source) else: source = Source.objects.get(value__iexact=source) user_job_app.app_source = source user_job_app.updated_date = timezone.now() user_job_app.save() return JsonResponse(create_response( data=JobApplicationSerializer(instance=user_job_app, many=False, context={'user': request.user}).data), safe=False) elif request.method == "DELETE": job_application_ids = [] if 'jobapp_ids' in body: job_application_ids = body['jobapp_ids'] if 'jobapp_id' in body: job_application_ids.append(body['jobapp_id']) if len(job_application_ids) == 0 or JobApplication.objects.filter(pk__in=job_application_ids).count() == 0: return JsonResponse(create_response(success=False, error_code=ResponseCodes.record_not_found), safe=False) else: user_job_apps = JobApplication.objects.filter(pk__in=job_application_ids) for user_job_app in user_job_apps: if user_job_app.user == request.user: user_job_app.deleted_date = timezone.now() user_job_app.is_deleted = True user_job_app.save() return JsonResponse(create_response(data=None), safe=False) @csrf_exempt @api_view(["GET"]) def statuses(request): statuses_list = ApplicationStatus.objects.all() statuses_list = ApplicationStatusSerializer(instance=statuses_list, many=True).data return JsonResponse(create_response(data=statuses_list), safe=False) @csrf_exempt @api_view(["GET"]) def sources(request): source_list = SourceSerializer(instance=Source.objects.all(), many=True).data return JsonResponse(create_response(data=source_list), safe=False) @csrf_exempt @api_view(["GET"]) def status_history(request, job_app_pk): if job_app_pk is None: return JsonResponse(create_response(data=None, success=False, error_code=ResponseCodes.invalid_parameters), safe=False) else: statuses_list = StatusHistory.objects.filter(job_post__pk=job_app_pk) statuses_list = StatusHistorySerializer(instance=statuses_list, many=True).data return JsonResponse(create_response(data=statuses_list), safe=False) @csrf_exempt @api_view(["GET", "POST", "PUT", "DELETE"]) def contacts(request, job_app_pk): body = request.data if request.method == "GET": data = {} if job_app_pk is None: return JsonResponse(create_response(data=None, success=False, error_code=ResponseCodes.invalid_parameters), safe=False) else: contacts_list = Contact.objects.filter(job_post__pk=job_app_pk) contacts_list = ContactSerializer(instance=contacts_list, many=True).data data['contacts'] = contacts_list user_profile = request.user if not user_profile.user_type.alumni_listing_enabled: alumni = [] else: jobapp = JobApplication.objects.get(pk=job_app_pk) alumni_list = User.objects.filter(college=user_profile.college, company=jobapp.company_object, user_type__name__iexact='Alumni', is_demo=False) alumni = AlumniSerializer( instance=alumni_list, many=True, context={'user': request.user}).data data['alumni'] = alumni return JsonResponse(create_response(data=data, success=True, error_code=ResponseCodes.success), safe=False) elif request.method == "POST": first_name = body.get('first_name') last_name = body.get('last_name') if job_app_pk is None or first_name is None or last_name is None: return JsonResponse(create_response(data=None, success=False, error_code=ResponseCodes.invalid_parameters), safe=False) user_job_app = JobApplication.objects.get(pk=job_app_pk) if user_job_app.user == request.user: phone_number = body.get('phone_number') linkedin_url = body.get('linkedin_url') description = body.get('description') email = body.get('email') job_title = body.get('job_title') jt = None jc = None if job_title is not None: jt = get_or_insert_position(job_title) company = body.get('company') if company is not None: jc = get_or_create_company(company) contact = Contact( job_post=user_job_app, first_name=first_name, last_name=last_name, phone_number=phone_number, linkedin_url=linkedin_url, description=description, email=email, position=jt, company=jc) contact.save() data = ContactSerializer( instance=contact, many=False).data return JsonResponse(create_response(data=data), safe=False) else: return JsonResponse( create_response(data=None, success=False, error_code=ResponseCodes.record_not_found), safe=False) elif request.method == "PUT": contact_id = body.get('contact_id') if contact_id is None: return JsonResponse(create_response(data=None, success=False, error_code=ResponseCodes.invalid_parameters), safe=False) contact = Contact.objects.get(pk=contact_id) if contact.job_post.user == request.user: first_name = body.get('first_name') if first_name is not None: contact.first_name = first_name last_name = body.get('last_name') if last_name is not None: contact.last_name = last_name email = body.get('email') if email is not None: contact.email = email phone_number = body.get('phone_number') if phone_number is not None: contact.phone_number = phone_number linkedin_url = body.get('linkedin_url') if linkedin_url is not None: contact.linkedin_url = linkedin_url description = body.get('description') if description is not None: contact.description = description job_title = body.get('job_title') if job_title is not None: contact.position = get_or_insert_position(job_title) company = body.get('company') if company is not None: contact.company = get_or_create_company(company) contact.update_date = timezone.now() contact.save() data = ContactSerializer( instance=contact, many=False).data return JsonResponse(create_response(data=data, success=True, error_code=ResponseCodes.success), safe=False) else: return JsonResponse( create_response(data=None, success=False, error_code=ResponseCodes.record_not_found), safe=False) elif request.method == "DELETE": contact_id = body.get('contact_id') if contact_id is None: return JsonResponse(create_response(data=None, success=False, error_code=ResponseCodes.invalid_parameters), safe=False) user_job_app_contact = Contact.objects.filter( pk=contact_id) if user_job_app_contact.count() == 0: return JsonResponse(create_response(data=None, success=False, error_code=ResponseCodes.record_not_found), safe=False) user_job_app_contact = user_job_app_contact[0] if user_job_app_contact.job_post.user == request.user: user_job_app_contact.delete() return JsonResponse(create_response(data=None, success=True, error_code=ResponseCodes.success), safe=False) else: return JsonResponse(create_response(data=None, success=False, error_code=ResponseCodes.record_not_found), safe=False) @csrf_exempt @api_view(["GET", "POST", "PUT", "DELETE"]) def notes(request, job_app_pk): body = request.data if 'recaptcha_token' in body and utils.verify_recaptcha(None, body['recaptcha_token'], 'jobapp_note') == ResponseCodes.verify_recaptcha_failed: return JsonResponse( create_response(data=None, success=False, error_code=ResponseCodes.verify_recaptcha_failed), safe=False) if request.method == "GET": if job_app_pk is None: return JsonResponse(create_response(data=None, success=False, error_code=ResponseCodes.invalid_parameters), safe=False) else: notes_list = JobApplicationNote.objects.filter( job_post__pk=job_app_pk).order_by('-update_date', '-created_date') notes_list = JobApplicationNoteSerializer( instance=notes_list, many=True).data return JsonResponse(create_response(data=notes_list, success=True, error_code=ResponseCodes.success), safe=False) elif request.method == "POST": description = body['description'] if job_app_pk is None or description is None: return JsonResponse(create_response(data=None, success=False, error_code=ResponseCodes.invalid_parameters), safe=False) else: user_job_app = JobApplication.objects.get(pk=job_app_pk) if user_job_app.user == request.user: note = JobApplicationNote( job_post=user_job_app, description=description) note.save() data = JobApplicationNoteSerializer( instance=note, many=False).data return JsonResponse(create_response(data=data, success=True, error_code=ResponseCodes.success), safe=False) else: return JsonResponse( create_response(data=None, success=False, error_code=ResponseCodes.record_not_found), safe=False) elif request.method == "PUT": jobapp_note_id = body['jobapp_note_id'] description = body['description'] if jobapp_note_id is None: return JsonResponse( create_response(data=None, success=False, error_code=ResponseCodes.invalid_parameters), safe=False) else: note = JobApplicationNote.objects.get(pk=jobapp_note_id) if note.job_post.user == request.user: note.description = description note.update_date = timezone.now() note.save() data = JobApplicationNoteSerializer( instance=note, many=False).data return JsonResponse(create_response(data=data, success=True, error_code=ResponseCodes.success), safe=False) else: return JsonResponse(create_response(data=None, success=False, error_code=ResponseCodes.record_not_found), safe=False) elif request.method == "DELETE": jobapp_note_id = body['jobapp_note_id'] if jobapp_note_id is None: return JsonResponse( create_response(data=None, success=False, error_code=ResponseCodes.invalid_parameters), safe=False) else: user_job_app_note = JobApplicationNote.objects.get( pk=jobapp_note_id) if user_job_app_note.job_post.user == request.user: user_job_app_note.delete() return JsonResponse(create_response(data=None, success=True, error_code=ResponseCodes.success), safe=False) else: return JsonResponse( create_response(data=None, success=False, error_code=ResponseCodes.record_not_found), safe=False) @csrf_exempt @api_view(["GET", "POST", "DELETE"]) def files(request, job_app_pk): body = request.data if request.method == "GET": if job_app_pk is None: return JsonResponse(create_response(data=None, success=False, error_code=ResponseCodes.invalid_parameters), safe=False) else: files_list = JobApplicationFile.objects.filter( job_post__pk=job_app_pk).order_by('-update_date', '-created_date') files_list = JobApplicationFileSerializer( instance=files_list, many=True).data return JsonResponse(create_response(data=files_list, success=True, error_code=ResponseCodes.success), safe=False) elif request.method == "POST": file = body['file'] if job_app_pk is None or file is None: return JsonResponse(create_response(data=None, success=False, error_code=ResponseCodes.invalid_parameters), safe=False) else: ext = file.name.split('.')[-1] filename = "%s.%s" % (uuid.uuid4(), ext) name = file.name.replace(('.' + ext), '') filename = name + '_' + filename user_job_app = JobApplication.objects.get(pk=job_app_pk) if user_job_app.user == request.user: jobapp_file = JobApplicationFile( job_post=user_job_app, name=name) jobapp_file.save() jobapp_file.file.save(filename, file, save=True) data = JobApplicationFileSerializer( instance=jobapp_file, many=False).data return JsonResponse(create_response(data=data, success=True, error_code=ResponseCodes.success), safe=False) else: return JsonResponse( create_response(data=None, success=False, error_code=ResponseCodes.record_not_found), safe=False) elif request.method == "DELETE": jobapp_file_id = body['jobapp_file_id'] if jobapp_file_id is None: return JsonResponse( create_response(data=None, success=False, error_code=ResponseCodes.invalid_parameters), safe=False) else: user_job_app_file = JobApplicationFile.objects.get( pk=jobapp_file_id) if user_job_app_file.job_post.user == request.user: user_job_app_file.delete() return JsonResponse(create_response(data=None, success=True, error_code=ResponseCodes.success), safe=False) else: return JsonResponse( create_response(data=None, success=False, error_code=ResponseCodes.record_not_found), safe=False)
2.09375
2
reassign.py
Ca2Patton/PythonStuff
0
17960
<gh_stars>0 #!/Library/Frameworks/Python.framework/Versions/2.7/bin/python x=5 print x def reassign(b): x=6 print x reassign(x) print x
2.0625
2
python_modules/dagster/dagster/core/meta/config_types.py
Ramshackle-Jamathon/dagster
0
17961
from collections import namedtuple from dagster import check from dagster.config.config_type import ConfigType, ConfigTypeKind from dagster.config.field import Field from dagster.core.serdes import whitelist_for_serdes @whitelist_for_serdes class NonGenericTypeRefMeta(namedtuple('_NonGenericTypeRefMeta', 'key')): def __new__(cls, key): return super(NonGenericTypeRefMeta, cls).__new__(cls, check.str_param(key, 'key')) @whitelist_for_serdes class ConfigTypeMeta( namedtuple( '_ConfigTypeMeta', 'kind key given_name description ' 'type_param_refs ' # only valid for closed generics (Set, Tuple, List, Optional) 'enum_values ' # only valid for enums 'fields', # only valid for dicts and selectors ) ): def __new__( cls, kind, key, given_name, type_param_refs, enum_values, fields, description, ): return super(ConfigTypeMeta, cls).__new__( cls, kind=check.inst_param(kind, 'kind', ConfigTypeKind), key=check.str_param(key, 'key'), given_name=check.opt_str_param(given_name, 'given_name'), type_param_refs=None if type_param_refs is None else check.list_param(type_param_refs, 'type_param_refs', of_type=TypeRef), enum_values=None if enum_values is None else check.list_param(enum_values, 'enum_values', of_type=ConfigEnumValueMeta), fields=None if fields is None else check.list_param(fields, 'field', of_type=ConfigFieldMeta), description=check.opt_str_param(description, 'description'), ) @property def inner_type_refs(self): ''' This recurses through the type references with non-generic types as leaves. ''' def _doit(): next_level_refs = _get_next_level_refs(self) if next_level_refs: for next_level in next_level_refs: for inner_ref in _recurse_through_generics(next_level): yield inner_ref # there might be duplicate keys (esp for scalars) refs_by_key = {} for ref in _doit(): if ref.key not in refs_by_key: refs_by_key[ref.key] = ref return list(refs_by_key.values()) # This function is used by the recursive descent # through all the inner types. This does *not* # recursively descend through the type parameters # of generic types. It just gets the next level of # types. Either the direct type parameters of a # generic type. Or the type refs of all the fields # if it is a type with fields. def _get_next_level_refs(ref): # if a generic type, get type params # if a type with fields, get refs of the fields if ConfigTypeKind.is_closed_generic(ref.kind): return ref.type_param_refs elif ( ConfigTypeKind.has_fields(ref.kind) and ref.fields ): # still check fields because permissive return [field_meta.type_ref for field_meta in ref.fields] def _recurse_through_generics(ref): yield ref if isinstance(ref, ConfigTypeMeta) and ConfigTypeKind.is_closed_generic(ref.kind): for type_param_ref in ref.type_param_refs: for inner_ref in _recurse_through_generics(type_param_ref): yield inner_ref # A type reference in these serializable data structures are one of two things # 1) A closed generic type (e.g. List[Int] of Optional[Set[str]]) # 2) Or a reference to a non-generic type, such as Dict, Selector, or a Scalar. # Upon deserialization and when hydrated back to the graphql query, it will # be the responsibility of that module to maintain a dictionary of the # non-generic types and then do lookups into the dictionary in order to # to explode the entire type hierarchy requested by the client TypeRef = (ConfigTypeMeta, NonGenericTypeRefMeta) @whitelist_for_serdes class ConfigEnumValueMeta(namedtuple('_ConfigEnumValueMeta', 'value description')): def __new__(cls, value, description): return super(ConfigEnumValueMeta, cls).__new__( cls, value=check.str_param(value, 'value'), description=check.opt_str_param(description, 'description'), ) @whitelist_for_serdes class ConfigFieldMeta( namedtuple( '_ConfigFieldMeta', 'name type_ref is_required default_provided default_value_as_str description', ) ): def __new__( cls, name, type_ref, is_required, default_provided, default_value_as_str, description ): return super(ConfigFieldMeta, cls).__new__( cls, name=check.opt_str_param(name, 'name'), type_ref=check.inst_param(type_ref, 'type_ref', TypeRef), is_required=check.bool_param(is_required, 'is_required'), default_provided=check.bool_param(default_provided, 'default_provided'), default_value_as_str=check.opt_str_param(default_value_as_str, 'default_value_as_str'), description=check.opt_str_param(description, 'description'), ) def meta_from_field(name, field): check.str_param(name, 'name') check.inst_param(field, 'field', Field) return ConfigFieldMeta( name=name, type_ref=type_ref_of(field.config_type), is_required=field.is_required, default_provided=field.default_provided, default_value_as_str=field.default_value_as_str if field.default_provided else None, description=field.description, ) def type_ref_of(config_type): check.inst_param(config_type, 'config_type', ConfigType) if ConfigTypeKind.is_closed_generic(config_type.kind): return meta_from_config_type(config_type) else: return NonGenericTypeRefMeta(key=config_type.key) def type_refs_of(type_list): return list(map(type_ref_of, type_list)) if type_list is not None else None def meta_from_config_type(config_type): check.inst_param(config_type, 'config_type', ConfigType) return ConfigTypeMeta( key=config_type.key, given_name=config_type.given_name, kind=config_type.kind, description=config_type.description, type_param_refs=type_refs_of(config_type.type_params), enum_values=[ ConfigEnumValueMeta(ev.config_value, ev.description) for ev in config_type.enum_values ] if config_type.kind == ConfigTypeKind.ENUM else None, fields=[meta_from_field(name, field) for name, field in config_type.fields.items()] if ConfigTypeKind.has_fields(config_type.kind) else None, )
2.1875
2
database/migrations/2017_06_14_205530_create_users_table.py
emirbek/cope
2
17962
from orator.migrations import Migration class CreateUsersTable(Migration): def up(self): """ Run the migrations. """ with self.schema.create('users') as table: table.integer('id') table.string('name') table.string('gender', 1) table.tiny_integer('status').default(0) table.integer('chat_id').unique() table.string('lang', 2).default('ru') table.timestamps() table.primary('id') def down(self): """ Revert the migrations. """ self.schema.drop('users')
2.671875
3
make_json.py
jfalcou/infra
135
17963
<reponame>jfalcou/infra<gh_stars>100-1000 from configparser import ConfigParser import os import json obj = {} config = ConfigParser() config.read(os.path.join(os.getenv("HOME"), ".aws", "credentials")) obj["MY_ACCESS_KEY"] = config.get("default", "aws_access_key_id", fallback="") obj["MY_SECRET_KEY"] = config.get("default", "aws_secret_access_key", fallback="") with open("config.json", "w") as out: json.dump(obj, out)
2.15625
2
ztest-type1.py
tochiji/ztest-type1
0
17964
######################################################### # 母比率の差の検定/タイプ1 ######################################################### import sys import math def error_usage(): sys.stderr.write("usage: " + sys.argv[0] + "\n") sys.stderr.write("\tこのプログラムは、4つの引数が必要です。\n\n") sys.stderr.write( "\t1.属性1のn数 2.属性1における比率p 3.属性2のn数 4.属性2における比率p\n") sys.stderr.write("\t例: 200 0.6 100 0.48\n\n") sys.stderr.write("\tただし、それぞれn数は30以上かつ比率pは[0<=p<=1]を満たすこと\n") sys.exit(1) # 引数がちょうど4つあるか? if len(sys.argv[1:]) != 4: error_usage() n1,p1,n2,p2 = map(float, sys.argv[1:]) p = ((n1*p1) + (n2*p2))/(n1+n2) # n数が30以上か? if (n1 < 30) or (n2 < 30): error_usage() # 比率は0から1の間か? if not (0 <= p1 <= 1) or not (0 <= p2 <= 1): error_usage() T = math.fabs(p1 - p2) / math.sqrt((p * (1-p)) * ((1/n1) + (1/n2))) if T >= 2.58: print("1%有意 (検定統計量:" + str(T) + ")") elif T >= 1.96: print("5%有意 (検定統計量:" + str(T) + ")") elif T >= 1.65: print("10%有意 (検定統計量:" + str(T) + ")") else: print("有意差なし (検定統計量:" + str(T) + ")")
2.9375
3
flask_demo/main.py
yzj2019/database_learning
0
17965
# coding=utf-8 import functools from flask import Flask, session from flask import redirect from flask import request, make_response from flask import render_template from flask import url_for from flask_bootstrap import Bootstrap # 数据库处理 from db import * # json import json # 生成一个app app = Flask(__name__, instance_relative_config=True) bootstrap=Bootstrap(app) app.secret_key = 'lab3' # 对app执行请求页面地址到函数的绑定 @app.route("/", methods=("GET", "POST")) @app.route("/login", methods=("GET", "POST")) def login(): """Log in a registered user by adding the user id to the session.""" if request.method == "POST": # 客户端在login页面发起的POST请求 username = request.form["username"] password = request.form["password"] ipaddr = request.form["ipaddr"] database = request.form["database"] db = MyDefSQL(username, password, ipaddr, database) err = db.login() if err != '0': return render_template("login_fail.html", err=err) else: #print(err) session['username'] = username session['password'] = password session['ipaddr'] = ipaddr session['database'] = database return redirect(url_for('home')) else : # 客户端GET 请求login页面时 return render_template("login.html") # 主页面 @app.route("/home", methods=(["GET", "POST"])) def home(): return render_template("home.html") # 请求url为host/table的页面返回结果 @app.route("/table", methods=(["GET", "POST"])) def table(): # 出于简单考虑,每次请求都需要连接数据库,可以尝试使用其它context保存数据库连接 if 'username' in session: db = MyDefSQL(session['username'], session['password'], session['ipaddr'], session['database']) err = db.login() else: return redirect(url_for('login')) tabs = db.showtablecnt() if request.method == "POST": if 'clear' in request.form: return render_template("table.html", rows = '', dbname=session['database']) elif 'search' in request.form: return render_template("table.html", rows = tabs, dbname=session['database']) else: return render_template("table.html", rows = tabs, dbname=session['database']) # 客户管理页面 @app.route("/customer", methods=(["GET", "POST"])) def customer(): if 'username' in session: db = MyDefSQL(session['username'], session['password'], session['ipaddr'], session['database']) err = db.login() else: return redirect(url_for('login')) tabs = db.showcustomer() if tabs==None: tabs="" if request.method == "POST": if 'search' in request.form: # 是由search表单提交的post请求 searchinfo = {} # print(len(request.form[u"客户身份证号"])) for key,value in request.form.items(): # 注意这里key和value仍然是unicode编码,统一在db.py中处理! if len(value) != 0 and key!='search': # 做第一层过滤,使得可以表单中某块信息不填 searchinfo[key] = value tabs = db.customer_search(searchinfo) return render_template("customer.html", rows = tabs, dbname=session['database']) # 其它删改查需求,是由Ajax提交的post datas = json.loads(request.get_data(as_text=True)) function = datas["function"] datas = datas["inputdata"] # print(function) # print(datas[0][u"客户身份证号"]) if function == "delete": res = {'info':'删除成功!', 'errs':[]} for data in datas: err = db.customer_del(data) if err != '0': res['errs'].append([data[u"客户身份证号"],err]) if len(res['errs']) != 0: res['info'] = "删除失败!" return json.dumps(res) elif function == "insert": res = {'info':'插入成功!', 'errs':[]} for data in datas: err = db.customer_insert(data) if err != '0': res['errs'].append([data[u"客户身份证号"],err]) if len(res['errs']) != 0: res['info'] = "插入失败!" return json.dumps(res) elif function == "update": res = {'info':'修改成功!', 'errs':[]} for data in datas: err = db.customer_update(data) if err != '0': res['errs'].append([data[u"客户身份证号"],err]) if len(res['errs']) != 0: res['info'] = "修改失败!" return json.dumps(res) else: return render_template("customer.html", rows = tabs, dbname=session['database']) # 账户管理页面 # 储蓄账户 @app.route("/account/saving", methods=(["GET", "POST"])) def saving(): if 'username' in session: db = MyDefSQL(session['username'], session['password'], session['ipaddr'], session['database']) err = db.login() else: return redirect(url_for('login')) tabs = db.showaccount(True) if tabs==None: tabs="" if request.method == "POST": if 'search' in request.form: # 是由search表单提交的post请求 searchinfo = {} for key,value in request.form.items(): # 注意这里key和value仍然是unicode编码,统一在db.py中处理! if len(value) != 0 and key!='search': # 做第一层过滤,使得可以表单中某块信息不填 searchinfo[key] = value tabs = db.account_search(searchinfo, True) return render_template("account_saving.html", rows = tabs, dbname=session['database']) # 其它删改查需求,是由Ajax提交的post datas = json.loads(request.get_data(as_text=True)) function = datas["function"] datas = datas["inputdata"] # print(function) if function == "delete": res = {'info':'删除成功!', 'errs':[]} for data in datas: err = db.account_del(data, True) if err != '0': res['errs'].append([data[u"账户.账户号"],err]) if len(res['errs']) != 0: res['info'] = "删除失败!" return json.dumps(res) elif function == "insert": res = {'info':'插入成功!', 'errs':[]} for data in datas: err = db.account_insert(data, True) if err != '0': res['errs'].append([data[u"账户.账户号"],err]) if len(res['errs']) != 0: res['info'] = "插入失败!" return json.dumps(res) elif function == "update": res = {'info':'修改成功!', 'errs':[]} for data in datas: err = db.account_update(data, True) if err != '0': res['errs'].append([data[u"账户.账户号"],err]) if len(res['errs']) != 0: res['info'] = "修改失败!" return json.dumps(res) else: return render_template("account_saving.html", rows = tabs, dbname=session['database']) # 支票账户 @app.route("/account/checking", methods=(["GET", "POST"])) def checking(): if 'username' in session: db = MyDefSQL(session['username'], session['password'], session['ipaddr'], session['database']) err = db.login() else: return redirect(url_for('login')) tabs = db.showaccount(False) if tabs==None: tabs="" if request.method == "POST": if 'search' in request.form: # 是由search表单提交的post请求 searchinfo = {} for key,value in request.form.items(): # 注意这里key和value仍然是unicode编码,统一在db.py中处理! if len(value) != 0 and key!='search': # 做第一层过滤,使得可以表单中某块信息不填 searchinfo[key] = value tabs = db.account_search(searchinfo, False) return render_template("account_checking.html", rows = tabs, dbname=session['database']) # 其它删改查需求,是由Ajax提交的post datas = json.loads(request.get_data(as_text=True)) function = datas["function"] datas = datas["inputdata"] # print(function) if function == "delete": res = {'info':'删除成功!', 'errs':[]} for data in datas: err = db.account_del(data, False) if err != '0': res['errs'].append([data[u"账户.账户号"],err]) if len(res['errs']) != 0: res['info'] = "删除失败!" return json.dumps(res) elif function == "insert": res = {'info':'插入成功!', 'errs':[]} for data in datas: err = db.account_insert(data, False) if err != '0': res['errs'].append([data[u"账户.账户号"],err]) if len(res['errs']) != 0: res['info'] = "插入失败!" return json.dumps(res) elif function == "update": res = {'info':'修改成功!', 'errs':[]} for data in datas: err = db.account_update(data, False) if err != '0': res['errs'].append([data[u"账户.账户号"],err]) if len(res['errs']) != 0: res['info'] = "修改失败!" return json.dumps(res) else: return render_template("account_checking.html", rows = tabs, dbname=session['database']) # 贷款管理页面 @app.route("/loan", methods=(["GET", "POST"])) def loan(): if 'username' in session: db = MyDefSQL(session['username'], session['password'], session['ipaddr'], session['database']) err = db.login() else: return redirect(url_for('login')) tabs = db.showloan() if tabs==None: tabs="" if request.method == "POST": if 'search' in request.form: # 是由search表单提交的post请求 searchinfo = {} for key,value in request.form.items(): # 注意这里key和value仍然是unicode编码,统一在db.py中处理! if len(value) != 0 and key!='search': # 做第一层过滤,使得可以表单中某块信息不填 searchinfo[key] = value tabs = db.loan_search(searchinfo) return render_template("loan.html", rows = tabs, dbname=session['database']) # 其它删改查需求,是由Ajax提交的post datas = json.loads(request.get_data(as_text=True)) function = datas["function"] datas = datas["inputdata"] # print(function) if function == "delete": res = {'info':'删除成功!', 'errs':[]} for data in datas: err = db.loan_del(data) if err != '0': res['errs'].append([data[u"贷款号"],err]) if len(res['errs']) != 0: res['info'] = "删除失败!" return json.dumps(res) elif function == "insert": res = {'info':'插入成功!', 'errs':[]} for data in datas: err = db.loan_insert(data) if err != '0': res['errs'].append([data[u"贷款号"],err]) if len(res['errs']) != 0: res['info'] = "插入失败!" return json.dumps(res) elif function == "release": res = {'info':'贷款发放成功!', 'errs':[]} for data in datas: err = db.loan_release(data) if err != '0': res['errs'].append([data[u"贷款号"],err]) if len(res['errs']) != 0: res['info'] = "贷款发放失败!" return json.dumps(res) else: return render_template("loan.html", rows = tabs, dbname=session['database']) # 业务统计 # 按月 @app.route("/statistic/month") def month(): if 'username' in session: db = MyDefSQL(session['username'], session['password'], session['ipaddr'], session['database']) err = db.login() else: return redirect(url_for('login')) tabs = db.statistic_month() return render_template("statistic.html", how = u'月份', rows = tabs, dbname=session['database']) # 按季度 @app.route("/statistic/quarter") def quarter(): if 'username' in session: db = MyDefSQL(session['username'], session['password'], session['ipaddr'], session['database']) err = db.login() else: return redirect(url_for('login')) tabs = db.statistic_quarter() return render_template("statistic.html", how = u'季度', rows = tabs, dbname=session['database']) # 按年 @app.route("/statistic/year") def year(): if 'username' in session: db = MyDefSQL(session['username'], session['password'], session['ipaddr'], session['database']) err = db.login() else: return redirect(url_for('login')) tabs = db.statistic_year() return render_template("statistic.html", how = u'年份', rows = tabs, dbname=session['database']) # 测试新html页面 @app.route("/test") def test(): if 'username' in session: db = MyDefSQL(session['username'], session['password'], session['ipaddr'], session['database']) err = db.login() else: return redirect(url_for('login')) tabs = db.showtablecnt() return render_template("test.html", rows = tabs) # 测试URL下返回html page @app.route("/hello") def hello(): return "hello world!" #返回不存在页面的处理 @app.errorhandler(404) def not_found(e): return render_template("404.html") if __name__ == "__main__": app.run(host = "0.0.0.0", debug=True)
3.1875
3
tests/test_env.py
dmitrvk/mymusichere-app
0
17966
<gh_stars>0 # Licensed under the MIT License from mymusichere import env class TestEnv: def test_get_config_from_env(self, monkeypatch): monkeypatch.setenv('CONFIG', 'value') assert env.get_str_config('config') == 'value' def test_get_secret_from_env(self, monkeypatch): monkeypatch.setenv('SECRET', 'value') assert env.get_secret('secret') == 'value' def test_get_config_from_file(self, fs): fs.create_file('/config', contents='value') assert env.get_str_config('config') == 'value' def test_get_secret_from_file(self, fs): fs.create_file('/run/secrets/secret', contents='value') assert env.get_secret('secret') == 'value' def test_config_default(self): assert env.get_str_config('config', 'default') == 'default' def test_secret_default(self): assert env.get_secret('secret', 'default') == 'default' def test_bool_config(self, monkeypatch): monkeypatch.setenv('CONFIG_TRUE', '1') monkeypatch.setenv('CONFIG_FALSE', '0') assert env.get_bool_config('config_true') is True assert env.get_bool_config('config_false') is False assert env.get_bool_config('config_default', default=True) is True assert env.get_bool_config('config_default', default=False) is False def test_str_config(self, monkeypatch): monkeypatch.setenv('CONFIG', 'config') assert env.get_str_config('config') == 'config'
2.28125
2
tests/factorys.py
2h4dl/pymilvus
0
17967
# STL imports import random import logging import string import time import datetime import random import struct import sys from functools import wraps # Third party imports import numpy as np import faker from faker.providers import BaseProvider logging.getLogger('faker').setLevel(logging.ERROR) sys.path.append('.') # grpc from milvus.grpc_gen import milvus_pb2 def gen_vectors(num, dim): return [[random.random() for _ in range(dim)] for _ in range(num)] def gen_single_vector(dim): return [[random.random() for _ in range(dim)]] def gen_vector(nb, d, seed=np.random.RandomState(1234)): xb = seed.rand(nb, d).astype("float32") return xb.tolist() def gen_unique_str(str=None): prefix = "".join(random.choice(string.ascii_letters + string.digits) for _ in range(8)) return prefix if str is None else str + "_" + prefix def get_current_day(): return time.strftime('%Y-%m-%d', time.localtime()) def get_last_day(day): tmp = datetime.datetime.now() - datetime.timedelta(days=day) return tmp.strftime('%Y-%m-%d') def get_next_day(day): tmp = datetime.datetime.now() + datetime.timedelta(days=day) return tmp.strftime('%Y-%m-%d') def gen_long_str(num): string = '' for _ in range(num): char = random.choice('tomorrow') string += char def gen_one_binary(topk): ids = [random.randrange(10000000, 99999999) for _ in range(topk)] distances = [random.random() for _ in range(topk)] return milvus_pb2.TopKQueryResult(struct.pack(str(topk) + 'l', *ids), struct.pack(str(topk) + 'd', *distances)) def gen_nq_binaries(nq, topk): return [gen_one_binary(topk) for _ in range(nq)] def fake_query_bin_result(nq, topk): return gen_nq_binaries(nq, topk) class FakerProvider(BaseProvider): def collection_name(self): return 'collection_names' + str(random.randint(1000, 9999)) def name(self): return 'name' + str(random.randint(1000, 9999)) def dim(self): return random.randint(0, 999) fake = faker.Faker() fake.add_provider(FakerProvider) def collection_name_factory(): return fake.collection_name() def records_factory(dimension, nq): return [[random.random() for _ in range(dimension)] for _ in range(nq)] def binary_records_factory(dimension, nq): def binary_record(bsize): s_m = "abcdefghijklmnopqrstuvwxyz" s_list = [s_m[random.randint(0, 25)] for _ in range(bsize)] s = "".join(s_list) return bytes(s, encoding="ASCII") bs = dimension // 8 return [binary_record(bs) for _ in range(nq)] def integer_factory(nq): return [random.randint(0, 128) for _ in range(nq)] def time_it(func): @wraps(func) def inner(*args, **kwrgs): pref = time.perf_counter() result = func(*args, **kwrgs) delt = time.perf_counter() - pref print(f"[{func.__name__}][{delt:.4}s]") return result return inner
2.15625
2
HACKERRANK_Regrex&Parsing/Matrix_Script.py
StefaniaSferragatta/ADM2020-HW1
0
17968
import math import os import random import re import sys first_multiple_input = input().rstrip().split() n = int(first_multiple_input[0]) m = int(first_multiple_input[1]) matrix = [] if (n>0 and m>0 and n<100 and m< 100): for _ in range(n): matrix_item = input() matrix.append(matrix_item) for _ in range(m): string = "" for cols in range (m): for rows in range (n): string += matrix[rows][cols] output = re.sub(r"\b[!@#$%& ]+\b"," ", string) print(output)
3.0625
3
smartsheet/models/filter.py
Funtimes-Smarts/Python-import-Smart
0
17969
# pylint: disable=C0111,R0902,R0904,R0912,R0913,R0915,E1101 # Smartsheet Python SDK. # # Copyright 2016 Smartsheet.com, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"): you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import from .criteria import Criteria from ..types import TypedList from ..util import prep from datetime import datetime import json import logging import six class Filter(object): """Smartsheet Filter data model.""" def __init__(self, props=None, base_obj=None): """Initialize the Filter model.""" self._base = None if base_obj is not None: self._base = base_obj self._pre_request_filter = None self.allowed_values = { '_type': [ 'LIST', 'CUSTOM']} self._criteria = TypedList(Criteria) self._exclude_selected = None self.__type = None self._values = TypedList(str) if props: # account for alternate variable names from raw API response if 'criteria' in props: self.criteria = props['criteria'] if 'excludeSelected' in props: self.exclude_selected = props['excludeSelected'] if 'exclude_selected' in props: self.exclude_selected = props[ 'exclude_selected'] if 'type' in props: self._type = props['type'] if '_type' in props: self._type = props['_type'] if 'values' in props: self.values = props['values'] self.__initialized = True def __getattr__(self, key): if key == 'type': return self._type else: raise AttributeError(key) @property def criteria(self): return self._criteria @criteria.setter def criteria(self, value): if isinstance(value, list): self._criteria.purge() self._criteria.extend([ (Criteria(x, self._base) if not isinstance(x, Criteria) else x) for x in value ]) elif isinstance(value, TypedList): self._criteria.purge() self._criteria = value.to_list() elif isinstance(value, Criteria): self._criteria.purge() self._criteria.append(value) @property def exclude_selected(self): return self._exclude_selected @exclude_selected.setter def exclude_selected(self, value): if isinstance(value, bool): self._exclude_selected = value @property def _type(self): return self.__type @_type.setter def _type(self, value): if isinstance(value, six.string_types): if value not in self.allowed_values['_type']: raise ValueError( ("`{0}` is an invalid value for Filter`_type`," " must be one of {1}").format( value, self.allowed_values['_type'])) self.__type = value @property def values(self): return self._values @values.setter def values(self, value): if isinstance(value, list): self._values.purge() self._values.extend([ (str(x) if not isinstance(x, str) else x) for x in value ]) elif isinstance(value, TypedList): self._values.purge() self._values = value.to_list() elif isinstance(value, str): self._values.purge() self._values.append(value) def to_dict(self, op_id=None, method=None): obj = { 'criteria': prep(self._criteria), 'excludeSelected': prep(self._exclude_selected), 'type': prep(self.__type), 'values': prep(self._values)} return obj def to_json(self): return json.dumps(self.to_dict(), indent=2) def __str__(self): return json.dumps(self.to_dict())
1.960938
2
utils/train.py
danilonumeroso/MEG
6
17970
import torch import torch.nn.functional as F import os.path as osp import json from torch_geometric.utils import precision, recall from torch_geometric.utils import f1_score, accuracy from torch.utils.tensorboard import SummaryWriter def train_epoch_classifier(model, train_loader, len_train, optimizer, device): model.train() loss_all = 0 for data in train_loader: data = data.to(device) optimizer.zero_grad() output, _ = model(data.x, data.edge_index, batch=data.batch) loss = F.nll_loss(F.log_softmax(output, dim=-1), data.y) loss.backward() loss_all += data.num_graphs * loss.item() optimizer.step() return loss_all / len_train def test_classifier(model, loader, device): model.eval() y = torch.tensor([]).long().to(device) yp = torch.tensor([]).long().to(device) loss_all = 0 for data in loader: data = data.to(device) pred, _ = model(data.x, data.edge_index, batch=data.batch) loss = F.nll_loss(F.log_softmax(pred, dim=-1), data.y) pred = pred.max(dim=1)[1] y = torch.cat([y, data.y]) yp = torch.cat([yp, pred]) loss_all += data.num_graphs * loss.item() return ( accuracy(y, yp), precision(y, yp, model.num_output).mean().item(), recall(y, yp, model.num_output).mean().item(), f1_score(y, yp, model.num_output).mean().item(), loss_all ) def train_cycle_classifier(task, train_loader, val_loader, test_loader, len_train, len_val, len_test, model, optimizer, device, base_path, epochs): best_acc = (0, 0) writer = SummaryWriter(base_path + '/plots') for epoch in range(epochs): loss = train_epoch_classifier(model, train_loader, len_train, optimizer, device) writer.add_scalar('Loss/train', loss, epoch) train_acc, train_prec, train_rec, train_f1, _ = test_classifier(model, train_loader, device) val_acc, val_prec, val_rec, val_f1, l = test_classifier(model, val_loader, device) writer.add_scalar('Accuracy/train', train_acc, epoch) writer.add_scalar('Accuracy/val', val_acc, epoch) writer.add_scalar('Loss/val', l / len_val, epoch) print(f'Epoch: {epoch}, Loss: {loss:.5f}') print(f'Train -> Acc: {train_acc:.5f} Rec: {train_rec:.5f} \ Prec: {train_prec:.5f} F1: {train_f1:.5f}') print(f'Val -> Acc: {val_acc:.5f} Rec: {val_rec:.5f} \ Prec: {val_prec:.5f} F1: {val_f1:.5f}') if best_acc[1] < val_acc: best_acc = train_acc, val_acc torch.save( model.state_dict(), osp.join(base_path + '/ckpt/', model.__class__.__name__ + ".pth") ) print("New best model saved!") with open(base_path + '/best_result.json', 'w') as outfile: json.dump({'train_acc': train_acc, 'val_acc': val_acc, 'train_rec': train_rec, 'val_rec': val_rec, 'train_f1': train_f1, 'val_f1': val_f1, 'train_prec': train_prec, 'val_prec': val_prec}, outfile) def train_epoch_regressor(model, train_loader, len_train, optimizer, device): model.train() loss_all = 0 for data in train_loader: data = data.to(device) optimizer.zero_grad() output, _ = model(data.x.float(), data.edge_index, batch=data.batch) loss = F.mse_loss(output, data.y) loss.backward() loss_all += data.num_graphs * loss.item() optimizer.step() return loss_all / len_train def test_regressor(model, loader, len_loader, device): model.eval() loss_all = 0 for data in loader: data = data.to(device) pred, _ = model(data.x.float(), data.edge_index, batch=data.batch) loss = F.mse_loss(pred, data.y).detach() loss_all += data.num_graphs * loss.item() return loss_all / len_loader def train_cycle_regressor(task, train_loader, val_loader, test_loader, len_train, len_val, len_test, model, optimizer, device, base_path, epochs): best_acc = (0, 0) writer = SummaryWriter(base_path + '/plots') best_error = (+10000, +10000) for epoch in range(epochs): loss = train_epoch_regressor(model, train_loader, len_train, optimizer, device) writer.add_scalar('Loss/train', loss, epoch) train_error = test_regressor(model, train_loader, len_train, device) val_error = test_regressor(model, val_loader, len_val, device) writer.add_scalar('MSE/train', train_error, epoch) writer.add_scalar('MSE/test', val_error, epoch) print(f'Epoch: {epoch}, Loss: {loss:.5f}') print(f'Training Error: {train_error:.5f}') print(f'Val Error: {val_error:.5f}') if best_error[1] > val_error: best_error = train_error, val_error torch.save( model.state_dict(), osp.join(base_path + '/ckpt/', model.__class__.__name__ + ".pth") ) print("New best model saved!") with open(base_path + '/best_result.json', 'w') as outfile: json.dump({'train_error': train_error, 'val_error': val_error}, outfile)
2.359375
2
Algorithms/LCP/29/math1.py
M-Quadra/LeetCode-problems
0
17971
<reponame>M-Quadra/LeetCode-problems class Solution: def orchestraLayout(self, num: int, xPos: int, yPos: int) -> int: a, b = (min(xPos, num-1-yPos), 1) if yPos >= xPos else (min(yPos, num-1-xPos), -1) return (4*num*a - 4*a*a - 2*a + b*(xPos+yPos) + (b>>1&1)*4*(num-a-1))%9 + 1
2.78125
3
python-sdk/nuimages/scripts/render_images.py
bjajoh/nuscenes-devkit
1,284
17972
# nuScenes dev-kit. # Code written by <NAME>, 2020. import argparse import gc import os import random from typing import List from collections import defaultdict import cv2 import tqdm from nuimages.nuimages import NuImages def render_images(nuim: NuImages, mode: str = 'all', cam_name: str = None, log_name: str = None, sample_limit: int = 50, filter_categories: List[str] = None, out_type: str = 'image', out_dir: str = '~/Downloads/nuImages', cleanup: bool = True) -> None: """ Render a random selection of images and save them to disk. Note: The images rendered here are keyframes only. :param nuim: NuImages instance. :param mode: What to render: "image" for the image without annotations, "annotated" for the image with annotations, "trajectory" for a rendering of the trajectory of the vehice, "all" to render all of the above separately. :param cam_name: Only render images from a particular camera, e.g. "CAM_BACK'. :param log_name: Only render images from a particular log, e.g. "n013-2018-09-04-13-30-50+0800". :param sample_limit: Maximum number of samples (images) to render. Note that the mini split only includes 50 images. :param filter_categories: Specify a list of object_ann category names. Every sample that is rendered must contain annotations of any of those categories. :param out_type: The output type as one of the following: 'image': Renders a single image for the image keyframe of each sample. 'video': Renders a video for all images/pcls in the clip associated with each sample. :param out_dir: Folder to render the images to. :param cleanup: Whether to delete images after rendering the video. Not relevant for out_type == 'image'. """ # Check and convert inputs. assert out_type in ['image', 'video'], ' Error: Unknown out_type %s!' % out_type all_modes = ['image', 'annotated', 'trajectory'] assert mode in all_modes + ['all'], 'Error: Unknown mode %s!' % mode assert not (out_type == 'video' and mode == 'trajectory'), 'Error: Cannot render "trajectory" for videos!' if mode == 'all': if out_type == 'image': modes = all_modes elif out_type == 'video': modes = [m for m in all_modes if m not in ['annotated', 'trajectory']] else: raise Exception('Error" Unknown mode %s!' % mode) else: modes = [mode] if filter_categories is not None: category_names = [c['name'] for c in nuim.category] for category_name in filter_categories: assert category_name in category_names, 'Error: Invalid object_ann category %s!' % category_name # Create output folder. out_dir = os.path.expanduser(out_dir) if not os.path.isdir(out_dir): os.makedirs(out_dir) # Filter by camera. sample_tokens = [s['token'] for s in nuim.sample] if cam_name is not None: sample_tokens_cam = [] for sample_token in sample_tokens: sample = nuim.get('sample', sample_token) key_camera_token = sample['key_camera_token'] sensor = nuim.shortcut('sample_data', 'sensor', key_camera_token) if sensor['channel'] == cam_name: sample_tokens_cam.append(sample_token) sample_tokens = sample_tokens_cam # Filter by log. if log_name is not None: sample_tokens_cleaned = [] for sample_token in sample_tokens: sample = nuim.get('sample', sample_token) log = nuim.get('log', sample['log_token']) if log['logfile'] == log_name: sample_tokens_cleaned.append(sample_token) sample_tokens = sample_tokens_cleaned # Filter samples by category. if filter_categories is not None: # Get categories in each sample. sd_to_object_cat_names = defaultdict(lambda: set()) for object_ann in nuim.object_ann: category = nuim.get('category', object_ann['category_token']) sd_to_object_cat_names[object_ann['sample_data_token']].add(category['name']) # Filter samples. sample_tokens_cleaned = [] for sample_token in sample_tokens: sample = nuim.get('sample', sample_token) key_camera_token = sample['key_camera_token'] category_names = sd_to_object_cat_names[key_camera_token] if any([c in category_names for c in filter_categories]): sample_tokens_cleaned.append(sample_token) sample_tokens = sample_tokens_cleaned # Get a random selection of samples. random.shuffle(sample_tokens) # Limit number of samples. sample_tokens = sample_tokens[:sample_limit] print('Rendering %s for mode %s to folder %s...' % (out_type, mode, out_dir)) for sample_token in tqdm.tqdm(sample_tokens): sample = nuim.get('sample', sample_token) log = nuim.get('log', sample['log_token']) log_name = log['logfile'] key_camera_token = sample['key_camera_token'] sensor = nuim.shortcut('sample_data', 'sensor', key_camera_token) sample_cam_name = sensor['channel'] sd_tokens = nuim.get_sample_content(sample_token) # We cannot render a video if there are missing camera sample_datas. if len(sd_tokens) < 13 and out_type == 'video': print('Warning: Skipping video for sample token %s, as not all 13 frames exist!' % sample_token) continue for mode in modes: out_path_prefix = os.path.join(out_dir, '%s_%s_%s_%s' % (log_name, sample_token, sample_cam_name, mode)) if out_type == 'image': write_image(nuim, key_camera_token, mode, '%s.jpg' % out_path_prefix) elif out_type == 'video': write_video(nuim, sd_tokens, mode, out_path_prefix, cleanup=cleanup) def write_video(nuim: NuImages, sd_tokens: List[str], mode: str, out_path_prefix: str, cleanup: bool = True) -> None: """ Render a video by combining all the images of type mode for each sample_data. :param nuim: NuImages instance. :param sd_tokens: All sample_data tokens in chronological order. :param mode: The mode - see render_images(). :param out_path_prefix: The file prefix used for the images and video. :param cleanup: Whether to delete images after rendering the video. """ # Loop through each frame to create the video. out_paths = [] for i, sd_token in enumerate(sd_tokens): out_path = '%s_%d.jpg' % (out_path_prefix, i) out_paths.append(out_path) write_image(nuim, sd_token, mode, out_path) # Create video. first_im = cv2.imread(out_paths[0]) freq = 2 # Display frequency (Hz). fourcc = cv2.VideoWriter_fourcc(*'MJPG') video_path = '%s.avi' % out_path_prefix out = cv2.VideoWriter(video_path, fourcc, freq, first_im.shape[1::-1]) # Load each image and add to the video. for out_path in out_paths: im = cv2.imread(out_path) out.write(im) # Delete temporary image if requested. if cleanup: os.remove(out_path) # Finalize video. out.release() def write_image(nuim: NuImages, sd_token: str, mode: str, out_path: str) -> None: """ Render a single image of type mode for the given sample_data. :param nuim: NuImages instance. :param sd_token: The sample_data token. :param mode: The mode - see render_images(). :param out_path: The file to write the image to. """ if mode == 'annotated': nuim.render_image(sd_token, annotation_type='all', out_path=out_path) elif mode == 'image': nuim.render_image(sd_token, annotation_type='none', out_path=out_path) elif mode == 'trajectory': sample_data = nuim.get('sample_data', sd_token) nuim.render_trajectory(sample_data['sample_token'], out_path=out_path) else: raise Exception('Error: Unknown mode %s!' % mode) # Trigger garbage collection to avoid memory overflow from the render functions. gc.collect() if __name__ == '__main__': parser = argparse.ArgumentParser(description='Render a random selection of images and save them to disk.') parser.add_argument('--seed', type=int, default=42) # Set to 0 to disable. parser.add_argument('--version', type=str, default='v1.0-mini') parser.add_argument('--dataroot', type=str, default='/data/sets/nuimages') parser.add_argument('--verbose', type=int, default=1) parser.add_argument('--mode', type=str, default='all') parser.add_argument('--cam_name', type=str, default=None) parser.add_argument('--log_name', type=str, default=None) parser.add_argument('--sample_limit', type=int, default=50) parser.add_argument('--filter_categories', action='append') parser.add_argument('--out_type', type=str, default='image') parser.add_argument('--out_dir', type=str, default='~/Downloads/nuImages') args = parser.parse_args() # Set random seed for reproducible image selection. if args.seed != 0: random.seed(args.seed) # Initialize NuImages class. nuim_ = NuImages(version=args.version, dataroot=args.dataroot, verbose=bool(args.verbose), lazy=False) # Render images. render_images(nuim_, mode=args.mode, cam_name=args.cam_name, log_name=args.log_name, sample_limit=args.sample_limit, filter_categories=args.filter_categories, out_type=args.out_type, out_dir=args.out_dir)
2.46875
2
version_info.py
sairam4123/GodotReleaseScriptPython
0
17973
<filename>version_info.py import re from configparser import ConfigParser from constants import PROJECT_FOLDER, RELEASE_LEVEL_DICT from release_type import ReleaseLevel, ReleaseType, value_from_key class VersionInfo: def __init__( self, major: int = 0, minor: int = 0, bugfix: int = 0, hotfix: int = 0, release_level: ReleaseLevel = ReleaseLevel.public, serial: int = None, release_type: ReleaseType = None, short_version: bool = False, ): self.major = 0 if major is None else int(major) self.minor = 0 if minor is None else int(minor) self.bugfix = 0 if bugfix is None else int(bugfix) self.hotfix = 0 if hotfix is None else int(hotfix) self.short_version = short_version self.release_type = release_type if self.release_type is None: if self.hotfix == 0: if self.bugfix == 0: if self.minor == 0: if self.major != 0: self.release_type = ReleaseType.major else: self.release_type = ReleaseType.minor else: self.release_type = ReleaseType.bugfix else: self.release_type = ReleaseType.hotfix self.serial = (serial and int(serial)) or 0 self.release_level = value_from_key(RELEASE_LEVEL_DICT, release_level) or release_level or ReleaseLevel.public def __str__(self): version: str = f'v{self.major}.{self.minor}.{self.bugfix}' if self.release_type == ReleaseType.hotfix: version = f'{version}.{self.hotfix}' elif self.release_level != ReleaseLevel.public: version = f'{version}{RELEASE_LEVEL_DICT[self.release_level]}{self.serial}' return version def increment(self, release_level: ReleaseLevel, release_type: ReleaseType = None): sequel: bool = False if release_type == self.release_type and self.release_level == ReleaseLevel.public: sequel = True if self.release_type != release_type or sequel: if release_type == ReleaseType.hotfix: self.hotfix += 1 else: self.hotfix = 0 if release_type == ReleaseType.bugfix: self.bugfix += 1 else: self.bugfix = 0 if release_type == ReleaseType.minor: self.minor += 1 else: self.minor = 0 if release_type == ReleaseType.major: self.major += 1 self.serial = None self.release_type = release_type if release_level != ReleaseLevel.public: self.increase_serial(release_level) elif release_level == ReleaseLevel.public: self.serial = 0 self.release_level = release_level self.release_type = release_type def increase_serial(self, release_level: ReleaseLevel): if self.serial is not None and self.release_level != release_level: self.serial = 0 else: if self.serial is not None: self.serial += 1 else: self.serial = 0 self.release_level = release_level def convert_to_godot_format(self): return repr(str(self).lstrip("v")).replace("'", '"') @classmethod def start_version(cls): return cls(0, 1, 0) @classmethod def load_version(cls, version: str): pattern: re.Pattern = re.compile(r"(\d)\.(\d)\.?(\d)?\.?(\d)?\.?([a-z]{1,2})?(\d{1,3})?") match: re.Match = pattern.match(version.replace('"', '')) if match: return cls(*match.groups()) else: return cls.start_version() @classmethod def check_version(cls, version: str): pattern: re.Pattern = re.compile(r"(\d)\.(\d)\.?(\d)?\.?(\d)?\.?([a-z]{1,2})?(\d{1,3})?") match: re.Match = pattern.match(version.replace('"', '')) return bool(match) def set_version(new_version: VersionInfo) -> None: config = ConfigParser() with open(list(PROJECT_FOLDER.glob("export_presets.cfg"))[0], 'r') as exports_config: config.read_file(exports_config) for section_name, section in config.items(): for key, value in section.items(): if key.endswith('version'): config.set(section_name, key, new_version.convert_to_godot_format()) config_file = open(list(PROJECT_FOLDER.glob("export_presets.cfg"))[0], "w") config.write(config_file) config_file.close() with open(list(PROJECT_FOLDER.glob("version.txt"))[0], 'w') as version_file: version_file.write(str(new_version)) def get_version() -> VersionInfo: try: version_file = open(list(PROJECT_FOLDER.glob("version.txt"))[0], 'r') except IndexError: version_file = open(PROJECT_FOLDER/"version.txt", "w+") else: if not VersionInfo.check_version(version_file.read()): print("Falling back to export presets") config = ConfigParser() with open(list(PROJECT_FOLDER.glob("export_presets.cfg"))[0], 'r') as exports_config: config.read_file(exports_config) version: VersionInfo = VersionInfo.start_version() for section_name, section in config.items(): for key, value in section.items(): if key.endswith('version'): version = VersionInfo.load_version(value) return version else: return VersionInfo.load_version(version_file.read()) if __name__ == '__main__': # Test Script index = 0 version_info = VersionInfo(1, 0, 0, 0, ReleaseLevel.public, None, ReleaseType.major) print(index, version_info) index += 1 version_info.increment(ReleaseLevel.alpha, release_type=ReleaseType.minor) print(index, version_info) index += 1 version_info.increment(ReleaseLevel.alpha, release_type=ReleaseType.minor) print(index, version_info) index += 1 version_info.increment(ReleaseLevel.beta, release_type=ReleaseType.minor) print(index, version_info) index += 1 version_info.increment(ReleaseLevel.beta, release_type=ReleaseType.minor) print(index, version_info) index += 1 version_info.increment(ReleaseLevel.beta, release_type=ReleaseType.minor) print(index, version_info) index += 1 version_info.increment(ReleaseLevel.beta, release_type=ReleaseType.minor) print(index, version_info) index += 1 version_info.increment(ReleaseLevel.release_candidate, release_type=ReleaseType.minor) print(index, version_info) index += 1 version_info.increment(ReleaseLevel.release_candidate, release_type=ReleaseType.minor) print(index, version_info) index += 1 version_info.increment(ReleaseLevel.release_candidate, release_type=ReleaseType.minor) print(index, version_info) index += 1 version_info.increment(ReleaseLevel.release_candidate, release_type=ReleaseType.minor) print(index, version_info) index += 1 version_info.increment(ReleaseLevel.release_candidate, release_type=ReleaseType.minor) print(index, version_info) index += 1 version_info.increment(ReleaseLevel.public, release_type=ReleaseType.minor) print(index, version_info) index += 1 version_info.increment(ReleaseLevel.public, release_type=ReleaseType.bugfix) print(index, version_info) index += 1 version_info.increment(ReleaseLevel.public, release_type=ReleaseType.bugfix) print(index, version_info) index += 1 version_info.increment(ReleaseLevel.public, release_type=ReleaseType.hotfix) print(index, version_info) index += 1 version_info.increment(ReleaseLevel.alpha, release_type=ReleaseType.minor) print(index, version_info) index += 1 version_info.increment(ReleaseLevel.release_candidate, release_type=ReleaseType.minor) print(index, version_info) index += 1 version_info.increment(ReleaseLevel.public, release_type=ReleaseType.minor) print(index, version_info) index += 1 version_info.increment(ReleaseLevel.alpha, release_type=ReleaseType.major) print(index, version_info) index += 1 version_info.increment(ReleaseLevel.alpha, release_type=ReleaseType.major) print(index, version_info) index += 1 version_info.increment(ReleaseLevel.alpha, release_type=ReleaseType.major) print(index, version_info) index += 1 version_info.increment(ReleaseLevel.alpha, release_type=ReleaseType.major) print(index, version_info) index += 1 version_info.increment(ReleaseLevel.beta, release_type=ReleaseType.major) print(index, version_info) index += 1 version_info.increment(ReleaseLevel.beta, release_type=ReleaseType.major) print(index, version_info) index += 1 version_info.increment(ReleaseLevel.beta, release_type=ReleaseType.major) print(index, version_info) index += 1 version_info.increment(ReleaseLevel.beta, release_type=ReleaseType.major) print(index, version_info) index += 1 version_info.increment(ReleaseLevel.beta, release_type=ReleaseType.major) print(index, version_info) index += 1 version_info.increment(ReleaseLevel.beta, release_type=ReleaseType.major) print(index, version_info) index += 1 version_info.increment(ReleaseLevel.release_candidate, release_type=ReleaseType.major) print(index, version_info) index += 1 version_info.increment(ReleaseLevel.release_candidate, release_type=ReleaseType.major) print(index, version_info) index += 1 version_info.increment(ReleaseLevel.release_candidate, release_type=ReleaseType.major) print(index, version_info) index += 1 version_info.increment(ReleaseLevel.release_candidate, release_type=ReleaseType.major) print(index, version_info) index += 1 version_info.increment(ReleaseLevel.public, release_type=ReleaseType.major) print(index, version_info) index += 1 version_info.increment(ReleaseLevel.public, release_type=ReleaseType.hotfix) print(index, version_info) index += 1 version_info.increment(ReleaseLevel.public, release_type=ReleaseType.hotfix) print(index, version_info) index += 1 version_info.increment(ReleaseLevel.alpha, release_type=ReleaseType.minor) print(index, version_info) index += 1 _version = version_info.convert_to_godot_format() print(_version) _pattern: re.Pattern = re.compile(r"(\d)\.(\d)\.?(\d)?\.?(\d)?\.?([a-z]{1,2})?(\d{1,3})?") _match: re.Match = _pattern.match(_version.replace('"', '')) print(index, VersionInfo(*_match.groups()))
2.65625
3
test/test_create_json_items_from_embark_xml.py
ndlib/mellon-search
0
17974
<reponame>ndlib/mellon-search # test_create_json_items_from_embark_xml.py 2/18/19 sm """ test create_json_items_from_embark_xml.py """ import sys import json import unittest import csv from xml.etree.ElementTree import ElementTree, tostring # add parent directory to path import os import inspect CURRENTDIR = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) PARENTDIR = os.path.dirname(CURRENTDIR) sys.path.insert(0, PARENTDIR) import create_json_items_from_embark_xml class Test(unittest.TestCase): """ Class for test fixtures """ def test_write_json_output(self): """ test writing json output """ json_data = '{"sample" : "test"}' create_json_items_from_embark_xml.write_json_output('.', 'test_write_json_output.json', json_data) with open('./test_write_json_output.json', 'r') as input_source: data = json.load(input_source) input_source.close() self.assertTrue(json_data == data) def test_everything(self): """ run test on whole process, verifying expected results """ create_json_items_from_embark_xml.create_json_items_from_embark_xml('./objects 01_18_19.xml', 'temp/pnx', csv_output_root_directory='temp') # verify one csv with open('temp/1976.057/main.csv', 'r') as read_actual: reader = csv.reader(read_actual) actual_csv = list(reader) with open('./expected_results/test_everything.csv', 'r') as read_expected: reader = csv.reader(read_expected) expected_csv = list(reader) self.assertTrue(actual_csv == expected_csv) # verify one pnx actual_results_file_name = 'temp/pnx/1976.057.xml' expected_results_file_name = 'expected_results/test_everything.xml' actual_results = ElementTree(file=actual_results_file_name) expected_results = ElementTree(file=expected_results_file_name) # print(ElementTree.tostring(xml_tree.getroot())) self.assertTrue(tostring(actual_results.getroot()) == tostring(expected_results.getroot())) def suite(): """ define test suite """ return unittest.TestLoader().loadTestsFromTestCase(Test) if __name__ == '__main__': suite() unittest.main()
2.578125
3
plot/laikago/plot_task.py
MaxxWilson/ASE389Project
17
17975
import os import sys cwd = os.getcwd() sys.path.append(cwd) import pickle import numpy as np import matplotlib matplotlib.use('TkAgg') import matplotlib.pyplot as plt from plot.helper import plot_task, plot_weights, plot_rf_z_max, plot_rf_quad, plot_vector_traj tasks = [ 'com_pos', 'com_vel', 'chassis_quat', 'chassis_ang_vel', 'toeFL_pos', 'toeFL_vel', 'toeFR_pos', 'toeFR_vel', 'toeRR_pos', 'toeRR_vel', 'toeRL_pos', 'toeRL_vel' ] weights = [ 'w_com', 'w_chassis_ori', 'w_toeFL', 'w_toeFR', 'w_toeRR', 'w_toeRL' ] rf_z = ['rf_z_max_toeFL', 'rf_z_max_toeFR', 'rf_z_max_toeRR', 'rf_z_max_toeRL'] time = [] phase = [] rf_cmd = [] des, act = dict(), dict() for topic in tasks: des[topic] = [] act[topic] = [] w = dict() for topic in weights: w[topic] = [] rf_z_max = dict() for topic in rf_z: rf_z_max[topic] = [] with open('data/pnc.pkl', 'rb') as file: while True: try: d = pickle.load(file) time.append(d['time']) phase.append(d['phase']) for topic in tasks: des[topic].append(d[topic + '_des']) act[topic].append(d[topic]) for topic in weights: w[topic].append(d[topic]) for topic in rf_z: rf_z_max[topic].append(d[topic]) rf_cmd.append(d['rf_cmd']) except EOFError: break for k, v in des.items(): des[k] = np.stack(v, axis=0) for k, v in act.items(): act[k] = np.stack(v, axis=0) rf_cmd = np.stack(rf_cmd, axis=0) phase = np.stack(phase, axis=0) ## ============================================================================= ## Plot Task ## ============================================================================= plot_task(time, des['com_pos'], act['com_pos'], des['com_vel'], act['com_vel'], phase, 'com lin') plot_task(time, des['chassis_quat'], act['chassis_quat'], des['chassis_ang_vel'], act['chassis_ang_vel'], phase, 'pelvis ori') plot_task(time, des['toeFL_pos'], act['toeFL_pos'], des['toeFL_vel'], act['toeFL_vel'], phase, 'left foot lin') plot_task(time, des['toeFR_pos'], act['toeFR_pos'], des['toeFR_vel'], act['toeFR_vel'], phase, 'left foot ori') plot_task(time, des['toeRR_pos'], act['toeRR_pos'], des['toeRR_vel'], act['toeRR_vel'], phase, 'right foot lin') plot_task(time, des['toeRL_pos'], act['toeRL_pos'], des['toeRL_vel'], act['toeRL_vel'], phase, 'right foot ori') ## ============================================================================= ## Plot WBC Solutions ## ============================================================================= plot_rf_quad(time, rf_cmd, phase) ## ============================================================================= ## Plot Weights and Max Reaction Force Z ## ============================================================================= plot_weights(time, w, phase) plot_rf_z_max(time, rf_z_max, phase) plt.show()
2.046875
2
h/views/api/users.py
bibliotechie/h
0
17976
from pyramid.httpexceptions import HTTPConflict from h.auth.util import client_authority from h.presenters import TrustedUserJSONPresenter from h.schemas import ValidationError from h.schemas.api.user import CreateUserAPISchema, UpdateUserAPISchema from h.services.user_unique import DuplicateUserError from h.views.api.config import api_config from h.views.api.exceptions import PayloadError @api_config( versions=["v1", "v2"], route_name="api.user_read", request_method="GET", link_name="user.read", description="Fetch a user", permission="read", ) def read(context, _request): """ Fetch a user. This API endpoint allows authorized clients (those able to provide a valid Client ID and Client Secret) to read users in their authority. """ return TrustedUserJSONPresenter(context.user).asdict() @api_config( versions=["v1", "v2"], route_name="api.users", request_method="POST", link_name="user.create", description="Create a new user", permission="create", ) def create(request): """ Create a user. This API endpoint allows authorised clients (those able to provide a valid Client ID and Client Secret) to create users in their authority. These users are created pre-activated, and are unable to log in to the web service directly. Note: the authority-enforcement logic herein is, by necessity, strange. The API accepts an ``authority`` parameter but the only valid value for the param is the client's verified authority. If the param does not match the client's authority, ``ValidationError`` is raised. :raises ValidationError: if ``authority`` param does not match client authority :raises HTTPConflict: if user already exists """ client_authority_ = client_authority(request) schema = CreateUserAPISchema() appstruct = schema.validate(_json_payload(request)) # Enforce authority match if appstruct["authority"] != client_authority_: raise ValidationError( "authority '{auth_param}' does not match client authority".format( auth_param=appstruct["authority"] ) ) user_unique_service = request.find_service(name="user_unique") try: user_unique_service.ensure_unique(appstruct, authority=client_authority_) except DuplicateUserError as err: raise HTTPConflict(str(err)) from err user_signup_service = request.find_service(name="user_signup") user = user_signup_service.signup(require_activation=False, **appstruct) presenter = TrustedUserJSONPresenter(user) return presenter.asdict() @api_config( versions=["v1", "v2"], route_name="api.user", request_method="PATCH", link_name="user.update", description="Update a user", permission="update", ) def update(user, request): """ Update a user. This API endpoint allows authorised clients (those able to provide a valid Client ID and Client Secret) to update users in their authority. """ schema = UpdateUserAPISchema() appstruct = schema.validate(_json_payload(request)) user_update_service = request.find_service(name="user_update") user = user_update_service.update(user, **appstruct) presenter = TrustedUserJSONPresenter(user) return presenter.asdict() def _json_payload(request): try: return request.json_body except ValueError as err: raise PayloadError() from err
2.421875
2
controllers/social_auth/kivyauth/__init__.py
richierh/SalesKivyMD
126
17977
<filename>controllers/social_auth/kivyauth/__init__.py from kivy.logger import Logger from kivy.utils import platform __version__ = "2.3.2" _log_message = "KivyAuth:" + f" {__version__}" + f' (installed at "{__file__}")' __all__ = ("login_providers", "auto_login") Logger.info(_log_message)
1.929688
2
RDS/circle3_central_services/research_manager/src/api/User/__init__.py
Sciebo-RDS/Sciebo-RDS
10
17978
from .user import *
1.171875
1
cesium_app/app_server.py
yaowenxi/cesium
41
17979
<filename>cesium_app/app_server.py import tornado.web import os import sys import pathlib from baselayer.app.config import Config from . import models from baselayer.app import model_util # This provides `login`, `complete`, and `disconnect` endpoints from social_tornado.routes import SOCIAL_AUTH_ROUTES from .handlers import ( ProjectHandler, DatasetHandler, FeatureHandler, PrecomputedFeaturesHandler, ModelHandler, PredictionHandler, FeatureListHandler, SklearnModelsHandler, PlotFeaturesHandler, PredictRawDataHandler ) def make_app(cfg, baselayer_handlers, baselayer_settings): """Create and return a `tornado.web.Application` object with specified handlers and settings. Parameters ---------- cfg : Config Loaded configuration. Can be specified with '--config' (multiple uses allowed). baselayer_handlers : list Tornado handlers needed for baselayer to function. baselayer_settings : cfg Settings needed for baselayer to function. """ if baselayer_settings['cookie_secret'] == '<KEY>': print('!' * 80) print(' Your server is insecure. Please update the secret string ') print(' in the configuration file!') print('!' * 80) for path_name, path in cfg['paths'].items(): if not os.path.exists(path): print("Creating %s" % path) try: os.makedirs(path) except Exception as e: print(e) handlers = baselayer_handlers + [ (r'/project(/.*)?', ProjectHandler), (r'/dataset(/.*)?', DatasetHandler), (r'/features(/[0-9]+)?', FeatureHandler), (r'/features/([0-9]+)/(download)', FeatureHandler), (r'/precomputed_features(/.*)?', PrecomputedFeaturesHandler), (r'/models(/[0-9]+)?', ModelHandler), (r'/models/([0-9]+)/(download)', ModelHandler), (r'/predictions(/[0-9]+)?', PredictionHandler), (r'/predictions/([0-9]+)/(download)', PredictionHandler), (r'/predict_raw_data', PredictRawDataHandler), (r'/features_list', FeatureListHandler), (r'/sklearn_models', SklearnModelsHandler), (r'/plot_features/(.*)', PlotFeaturesHandler) ] settings = baselayer_settings # settings.update({}) # Specify additional settings here app = tornado.web.Application(handlers, **settings) models.init_db(**cfg['database']) model_util.create_tables() return app
2.25
2
tests/test_building.py
sietekk/elevator
0
17980
<reponame>sietekk/elevator # # Copyright (c) 2016 <NAME> # from elevator.building import ( Building, Floor, DEFAULT_FLOOR_QTY, DEFAULT_ELEVATOR_QTY, ) from elevator.elevator import Elevator def test_building(): b1 = Building() assert len(b1.floors) == DEFAULT_FLOOR_QTY, \ "Incorrect default number of floors" assert len(b1.elevators) == DEFAULT_ELEVATOR_QTY, \ "Incorrect default number of elevators" b2 = Building(20, 5) assert len(b2.floors) == 20, \ "Initialize to wrong number of floors" assert len(b2.elevators) == 5, \ "Initialized to wrong number of elevators" for elevator in b2: assert isinstance(elevator, Elevator), \ "Elevator object not instantiated with Elevator class" for floor in b2.floors: assert isinstance(floor, Floor), \ "Floor object not instantiated with Floor class"
3.421875
3
Modulo_3/semana 2/miercoles/main.py
AutodidactaMx/cocid_python
0
17981
import tkinter as tk from presentacion.formulario import FormularioPersona def centrar_ventana(ventana): aplicacion_ancho = 550 aplicacion_largo = 650 pantall_ancho = ventana.winfo_screenwidth() pantall_largo = ventana.winfo_screenheight() x = int((pantall_ancho/2) - (aplicacion_ancho/2)) y = int((pantall_largo/2) - (aplicacion_largo/2)) return ventana.geometry(f"{aplicacion_ancho}x{aplicacion_largo}+{x}+{y}") try: ventana=tk.Tk() centrar_ventana(ventana) ventana.title("Formulario") form = FormularioPersona(ventana) ventana.mainloop() except Exception as e: print("Existe un error : ", e)
3.15625
3
Voting/urls.py
Poornartha/Odonata
0
17982
from django.urls import path from .views import teams_all, team_vote urlpatterns = [ path('teams/all', teams_all, name="teams_all"), path('teams/<int:pk>', team_vote, name="team_vote"), ]
1.617188
2
models/3-Whats goin on/train_code/resnext50/train.py
cns-iu/HuBMAP---Hacking-the-Kidney
0
17983
from Dataset import * from Network import * from Functions import * import os from fastai.distributed import * import argparse import torch try: #from apex.parallel import DistributedDataParallel as DDP from apex.fp16_utils import * from apex import amp, optimizers from apex.multi_tensor_apply import multi_tensor_applier except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.") from tqdm import tqdm def get_args(): parser = argparse.ArgumentParser() parser.add_argument('--gpu_id', type=str, default='0,1', help='which gpu to use') parser.add_argument('--path', type=str, default='/N/u/soodn/Carbonate/hubmap-kidney-segmentation', help='path of csv file with DNA sequences and labels') parser.add_argument('--epochs', type=int, default=32, help='number of epochs to train') parser.add_argument('--batch_size', type=int, default=64, help='size of each batch during training') parser.add_argument('--weight_decay', type=float, default=1e-5, help='weight dacay used in optimizer') parser.add_argument('--save_freq', type=int, default=1, help='saving checkpoints per save_freq epochs') parser.add_argument('--dropout', type=float, default=.1, help='transformer dropout') parser.add_argument('--lr', type=float, default=1e-3, help='learning rate') parser.add_argument('--nfolds', type=int, default=4, help='number of cross validation folds') parser.add_argument('--fold', type=int, default=0, help='which fold to train') parser.add_argument('--val_freq', type=int, default=1, help='which fold to train') parser.add_argument('--workers', type=int, default=8, help='number of workers for dataloader') parser.add_argument('--expansion', type=int, default=64, help='number of expansion pixels') parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='gradient_accumulation_steps') parser.add_argument('--transfer', default=1, help='transfer learning activated') opts = parser.parse_args() return opts opts=get_args() #set up gpu os.environ["CUDA_VISIBLE_DEVICES"] = opts.gpu_id device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') os.system('mkdir models') os.system('mkdir logs') #dice = Dice_th_pred(np.arange(0.2,0.7,0.01)) #datasets and dataloaders dataset = HuBMAPDataset(path=opts.path, fold=opts.fold, nfolds=opts.nfolds, train=True, tfms=get_aug()) val_dataset = HuBMAPDataset(path=opts.path, fold=opts.fold, nfolds=opts.nfolds, train=False) dataloader = DataLoader(dataset, batch_size=opts.batch_size, shuffle=True, num_workers=opts.workers, drop_last=True) val_dataloader = DataLoader(val_dataset, batch_size=opts.batch_size, shuffle=False, num_workers=opts.workers, drop_last=True) #model and optimizer model = UneXt50().cuda() #optimizer = Ranger(model.parameters(), lr=opts.lr, weight_decay=opts.weight_decay) optimizer = torch.optim.Adam(model.parameters(), lr=opts.lr, weight_decay=opts.weight_decay) # scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer=optimizer, pct_start=0.1, div_factor=1e3, # max_lr=1e-3, epochs=opts.epochs, steps_per_epoch=len(dataloader)) criterion=nn.BCEWithLogitsLoss() opt_level = 'O1' model, optimizer = amp.initialize(model, optimizer, opt_level=opt_level) model = nn.DataParallel(model) ####### Transfer learning ####### if opts.transfer == 1: best_model_path = "models_scratch/fold4.pth" state_dict = torch.load(best_model_path) model.load_state_dict(state_dict) #some more things logger=CSVLogger(['epoch','train_loss','val_loss','dice_coef'],f"logs/log_fold{opts.fold}.csv") metric=Dice_soft() best_metric=0 #training scheduler=torch.optim.lr_scheduler.OneCycleLR(optimizer=optimizer, pct_start=0.2, div_factor=1e2, max_lr=1e-4, epochs=opts.epochs, steps_per_epoch=len(dataloader)) for epoch in range(opts.epochs): train_loss=0 model.train(True) for data in tqdm(dataloader): img=data['img'].to(device) mask=data['mask'].to(device) img=cutout(img) output=model(img) loss=criterion(output,mask) with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), 1) #if step%opts.gradient_accumulation_steps==0: optimizer.step() scheduler.step() optimizer.zero_grad() train_loss+=loss.item() #break train_loss/=len(dataloader) print(f"### validating for epoch {epoch} ###") val_loss=0 model.eval() metric.reset() with torch.no_grad(): for data in tqdm(val_dataloader): if img.shape[0]%2!=0: img=img[:-1] mask=mask[:-1] img=data['img'].to(device) mask=data['mask'].to(device) shape=img.shape output=model(img)[:,:,opts.expansion//2:-opts.expansion//2,opts.expansion//2:-opts.expansion//2] output[output != output] = 0 mask=mask[:,:,opts.expansion//2:-opts.expansion//2,opts.expansion//2:-opts.expansion//2] metric.accumulate(output.detach(), mask) loss=criterion(output,mask) val_loss+=loss.item() val_loss/=len(val_dataloader) metric_this_epoch=metric.value # metric_this_epoch=val_loss logger.log([epoch+1,train_loss,val_loss,metric_this_epoch]) if metric_this_epoch>best_metric: torch.save(model.state_dict(),f'models/fold{opts.fold}.pth') best_metric=metric_this_epoch
2.140625
2
models/node/node.py
InfoCoV/Multi-Cro-CoV-cseBERT
0
17984
""" NODE model definition and experiment setup. Neural Oblivious Decision Ensembles for Deep Learning on Tabular Data https://arxiv.org/abs/1909.06312 Model details: https://pytorch-tabular.readthedocs.io/en/latest/models/#nodemodel """ import logging import os.path import shutil from sklearn.metrics import classification_report from omegaconf import OmegaConf import optuna from optuna.samplers import TPESampler from pytorch_tabular import TabularModel from pytorch_tabular.models import NodeConfig from pytorch_tabular.config import ( DataConfig, OptimizerConfig, TrainerConfig, ExperimentConfig) from pytorch_tabular.utils import get_class_weighted_cross_entropy from optuna_utils import OptunaExperiments, run_experiments LOGGER = logging.getLogger(__name__) LABEL_COL = "retweet_label" # updated by train.py before running config = OmegaConf.create( {"max_epochs": 50, "lr_exp_min": -4, "lr_exp_max": -3, "alpha_exp_min": -4, "alpha_exp_max": -3, "batch_exp_min": 7, "batch_exp_max": 8, "num_trees_min": 512, "num_trees_max": 2560, "num_trees_step": 512, "depth_min": 4, "depth_max": 6, "categorical_cols": [ "entities.urls", "entities.media", "user_in_net", "has_covid_keyword", "user.followers_isna", "users_mention_isna", "following_users_isna", "users_reply_isna"], "exp_log_freq": 100, "seed": 1, "num_workers": 24, "embed_categorical": True} ) class Experiments(OptunaExperiments): def __init__( self, train_data, val_data, train_labels, val_labels, experiment_root, config): self.train_data_joined = train_data.copy() self.train_data_joined[LABEL_COL] = train_labels self.val_data_joined = val_data.copy() self.val_data_joined[LABEL_COL] = val_labels self.experiment_root = experiment_root self.config = config self.study = self.create_study() self.best_score = None self.cat_col_names = config.categorical_cols self.num_col_names = [ c for c in train_data.columns if c not in config.categorical_cols] self.data_config = DataConfig( target=[LABEL_COL], continuous_cols=self.num_col_names, categorical_cols=self.cat_col_names, normalize_continuous_features=False, num_workers=config.num_workers) self.weighted_loss = get_class_weighted_cross_entropy( train_labels.values.ravel(), mu=0.1) def create_study(self): sampler = TPESampler(seed=self.config.study_seed) study = optuna.create_study(sampler=sampler, direction="maximize") for trial_dict in self.config.default_trials: study.enqueue_trial(trial_dict) return study def optimize(self): self.study.optimize(self.objective, n_trials=self.config.n_trials) self.store_study() def objective(self, trial): lr_exp = trial.suggest_int( "lr_exp", self.config.lr_exp_min, self.config.lr_exp_max) lr = 10 ** lr_exp alpha_exp = trial.suggest_int( "alpha_exp", self.config.alpha_exp_min, self.config.alpha_exp_max) alpha = 10 ** alpha_exp batch_exp = trial.suggest_int( "batch_exp", self.config.batch_exp_min, self.config.batch_exp_max) batch_size = 2 ** batch_exp num_trees = trial.suggest_int( "num_trees", self.config.num_trees_min, self.config.num_trees_max, self.config.num_trees_step ) depth = trial.suggest_int( "depth", self.config.depth_min, self.config.depth_max) experiment_path = self.config.experiment_root checkpoints_path = os.path.join(experiment_path, "checkpoints") tb_logs = os.path.join(experiment_path, "tb_logs") run_name = "category_embedding" # store all just for the current optuna run if os.path.exists(checkpoints_path): shutil.rmtree(checkpoints_path) if os.path.exists(tb_logs): shutil.rmtree(tb_logs) trainer_config = TrainerConfig( auto_lr_find=False, gpus=1, deterministic=True, batch_size=batch_size, max_epochs=self.config.max_epochs, checkpoints_path=checkpoints_path, ) optimizer_config = OptimizerConfig( optimizer="AdamW", optimizer_params={"weight_decay": alpha} ) model_config = NodeConfig( task="classification", learning_rate=lr, loss=self.weighted_loss, num_trees=num_trees, depth=depth, embed_categorical=self.config.embed_categorical, ) experiment_config = ExperimentConfig( project_name=tb_logs, run_name=run_name, exp_log_freq=self.config.exp_log_freq ) tabular_model = TabularModel( data_config=self.data_config, model_config=model_config, optimizer_config=optimizer_config, trainer_config=trainer_config, experiment_config=experiment_config ) tabular_model.fit( train=self.train_data_joined, validation=self.val_data_joined, seed=self.config.seed, loss=self.weighted_loss) result = tabular_model.evaluate(self.val_data_joined) LOGGER.info(result) pred_df = tabular_model.predict(self.val_data_joined) val_predictions = pred_df.prediction.values out = classification_report( self.val_data_joined[LABEL_COL].values, val_predictions, digits=3, output_dict=True) LOGGER.info(out) f1 = out["macro avg"]["f1-score"] if self.best_score is None or f1 > self.best_score: self.best_score = f1 self.store_results(tabular_model, out) self.store_study() return f1 def run(config): run_experiments( config=config, experiments_class=Experiments)
2.640625
3
dtf/packages/models.py
WebPowerLabs/django-trainings
0
17985
from django.db import models from django.core.urlresolvers import reverse from djnfusion import server, key from django.conf import settings from jsonfield import JSONField # TODO: change to this. Currently doesnt work. may have something to do with # the server not in __init__ # from packages.providers.infusionsoft import server, key from .managers import InfusionsoftTagManager, PackagePurchaseManager from packages.managers import PackageManager def remove_unused(_dict): return_dict = {} for _key, _value in _dict.iteritems(): if _value: return_dict[_key] = _value return return_dict def setdictattrs(obj, _dict): _dict = remove_unused(_dict) for _key, _value in _dict.iteritems(): setattr(obj, _key, _value) class Package(models.Model): """ Base for package classes """ name = models.CharField(max_length=255) courses = models.ManyToManyField("courses.Course", null=True, blank=True) lessons = models.ManyToManyField("lessons.Lesson", null=True, blank=True) groups = models.ManyToManyField("facebook_groups.FacebookGroup", null=True, blank=True) journals = models.ManyToManyField("journals.JournalQuestion", null=True, blank=True) objects = PackageManager() def __unicode__(self): return u'{}'.format(self.name if self.name else 'Package') def get_absolute_url(self): return reverse('packages:detail', kwargs={'pk': self.pk}) class PackagePurchase(models.Model): """ User's purchased packages. """ INACTIVE = 0 ACTIVE = 1 EXPIRED = 2 STATUS_CHOICES = [ [INACTIVE, 'Inactive'], [ACTIVE, 'Active'], [EXPIRED, 'Expired'], ] user = models.ForeignKey(settings.AUTH_USER_MODEL) package = models.ForeignKey('Package') status = models.IntegerField(choices=STATUS_CHOICES, default=INACTIVE) data = JSONField(blank=True, null=True) created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) objects = PackagePurchaseManager() def __unicode__(self): return u'{0} => {1}'.format(self.user, self.package) def set_status(self, status): self.status = status self.save() class InfusionsoftPackage(Package): """ Package with infusionsoft api hooks """ subscription_id = models.TextField(blank=True, null=True) product_id = models.TextField(blank=True, null=True) cycle = models.TextField(blank=True, null=True) frequency = models.TextField(blank=True, null=True) pre_authorize_amount = models.TextField(blank=True, null=True) prorate = models.TextField(blank=True, null=True) active = models.TextField(blank=True, null=True) plan_price = models.TextField(blank=True, null=True) product_price = models.TextField(blank=True, null=True) description = models.TextField(blank=True, null=True) status = models.TextField(blank=True, null=True) action_set_id = models.TextField(blank=True, null=True) tag = models.OneToOneField("InfusionsoftTag", blank=True, null=True) purchase_url = models.URLField(blank=True, null=True) def save(self, *args, **kwargs): sync_data = self._get_sync_data(product_id=self.product_id) if self.product_id else None if sync_data: setdictattrs(self, sync_data) return super(InfusionsoftPackage, self).save(*args, **kwargs) def sync(self): sync_data = self._get_sync_data() if sync_data: setdictattrs(self, sync_data) self.save() def _get_sync_data(self, product_id=None): subscription_data = self._get_subscription_data(product_id) product_data = self._get_product_data(product_id) if subscription_data and product_data: package_data = dict({ "id": self.id, "pk": self.pk, "action_set_id": self.action_set_id, "name": product_data.get("ProductName"), "subscription_id": subscription_data.get("Id"), "product_id": subscription_data.get("ProductId"), "cycle": subscription_data.get("Cycle"), "frequency": subscription_data.get("Frequency"), "prorate": subscription_data.get("Prorate"), "active": subscription_data.get("Active"), "plan_price": subscription_data.get("PlanPrice"), "product_price": product_data.get("ProductPrice"), "description": product_data.get("Description"), "status": product_data.get("Status"), }) elif product_data: # product but not subscription package_data = dict({ "id": self.id, "pk": self.pk, "action_set_id": self.action_set_id, "name": product_data.get("ProductName"), "product_id": product_data.get("Id"), "product_price": product_data.get("ProductPrice"), "description": product_data.get("Description"), "status": product_data.get("Status"), }) return package_data if package_data else None def _get_subscription_data(self, product_id=None): product_id = product_id if product_id else self.product_id if product_id: results = server.DataService.findByField(key, "SubscriptionPlan", 10, 0, "productid", product_id, ["Id", "ProductId", "Cycle", "Frequency", "PreAuthorizeAmount", "Prorate", "Active", "PlanPrice"]); return results[0] if len(results) else None def _get_product_data(self, product_id=None): product_id = product_id if product_id else self.product_id if product_id: results = server.DataService.findByField(key, "Product", 10, 0, "id", product_id, ["Id", "ProductName", "ProductPrice", "Description", "Status", "IsPackage"]); return results[0] if len(results) else None def cancel_subscription(self, contactId, actionSetId): results = server.ContactService.runActionSequence(key, contactId, actionSetId) return results @property def price(self): return self.plan_price if self.plan_price else self.product_price class InfusionsoftTag(models.Model): ''' Infusionsoft Tag (ContactGroup) ''' remote_id = models.TextField() group_category_id = models.TextField(blank=True, null=True) group_name = models.TextField(blank=True, null=True) group_description = models.TextField(blank=True, null=True) objects = InfusionsoftTagManager() def __unicode__(self): return u'{}'.format(self.group_name if self.group_name else u'InfusionsoftTag Object') def save(self, *args, **kwargs): remote_id = kwargs.get('remote_id') if kwargs.get('remote_id') else self.remote_id sync_data = self._get_sync_data(remote_id=remote_id) if remote_id else None if sync_data: obj = InfusionsoftTag(**sync_data) return super(InfusionsoftTag, obj).save(*args, **kwargs) else: return super(InfusionsoftTag, self).save(*args, **kwargs) def sync(self): sync_data = self._get_sync_data() if sync_data: self = InfusionsoftTag(**sync_data) self.save() def _get_sync_data(self, remote_id=None): provider_data = self._get_provider_data(remote_id) if provider_data: tag_data = dict({ "id": self.id, "pk": self.pk, "remote_id": provider_data.get("Id"), "group_category_id": provider_data.get("GroupCategoryId"), "group_name": provider_data.get("GroupName"), "group_description": provider_data.get("GroupDescription"), }) return tag_data def _get_provider_data(self, remote_id=None): remote_id = remote_id if remote_id else self.remote_id if remote_id: results = server.DataService.findByField(key, "ContactGroup", 10, 0, "id", remote_id, ["Id", "GroupCategoryId", "GroupName", "GroupDescription"]); return results[0] if len(results) else None
1.921875
2
chap 2/2_1.py
hmhuy2000/Reinforcement-Learning-SuttonBartoI
0
17986
import numpy as np import matplotlib.pyplot as plt from tqdm import trange class CFG: n = 10 mean = 0.0 variance = 1.0 t = 1000 esp = [0, 0.01, 0.05, 0.1, 0.15, 0.2] n_try = 2000 class bandit(): def __init__(self, m, v): self.m = m self.v = v self.mean = 0.0 self.cnt = 0 def reset(self): self.mean = 0.0 self.cnt = 0 def get_reward(self): reward = self.v * np.random.randn() + self.m return reward def update(self, reward): self.cnt += 1 self.mean = self.mean + 1/self.cnt * (reward - self.mean) def get_result(e): bandits = [bandit(np.random.randn(),CFG.variance) for i in range(CFG.n)] res = [] global cnt for _ in range(CFG.t): if (np.random.random()<e): choose = np.random.choice(CFG.n) else: choose = np.argmax([ban.mean for ban in bandits]) val = bandits[choose].get_reward() res.append(val) bandits[choose].update(val) # print(res) return res plt.figure(figsize=(20, 10)) for e in CFG.esp: res = np.zeros(CFG.t) for tr in trange(CFG.n_try): res += get_result(e) print(res.shape) res /= CFG.n_try # print(res) plt.plot(res, label = e) print(f'done {e}') plt.xlabel('step') plt.ylabel('average reward') plt.legend() plt.savefig('figure_2_1.png') plt.show()
2.9375
3
jetavator_databricks_client/setup.py
jetavator/jetavator_databricks
0
17987
<filename>jetavator_databricks_client/setup.py # -*- coding: utf-8 -*- import io import os from setuptools import setup, find_packages # Package metadata # ---------------- SHORT_NAME = 'databricks_client' NAME = 'jetavator_databricks_client' DESCRIPTION = ( 'Databricks support for the Jetavator engine ' 'to be installed on the client system' ) URL = 'https://github.com/jetavator/jetavator' EMAIL = '<EMAIL>' AUTHOR = '<NAME>' REQUIRES_PYTHON = '>=3.7.0' VERSION = None # What packages are required for this module to be executed? REQUIRED = [ 'jetavator>=0.1.5', 'lazy-property>=0.0.1,<1', 'databricks-cli>=0.14.1,<0.15', 'nbformat>=5.0.8>,<6', 'azure-storage-queue>=12.1.5,<13', 'azure-storage-blob>=12.7.1,<13' ] # What packages are optional? EXTRAS = { # 'some-feature': ['requirement'], } # Package setup # ------------- # Import the README and use it as the long description here = os.path.abspath(os.path.dirname(__file__)) try: with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f: long_description = '\n' + f.read() except FileNotFoundError: long_description = DESCRIPTION # Import the LICENSE with open(os.path.join(here, 'LICENSE')) as f: license_text = f.read() # Load the package's __version__.py module as a dictionary about = {} if not VERSION: with open(os.path.join(here, NAME, '__version__.py')) as f: exec(f.read(), about) else: about['__version__'] = VERSION setup( name=NAME, version=about['__version__'], description=DESCRIPTION, long_description=long_description, author=AUTHOR, author_email=EMAIL, python_requires=REQUIRES_PYTHON, url=URL, packages=find_packages(exclude=('tests', 'docs')), install_requires=REQUIRED, extras_require=EXTRAS, include_package_data=True, license=license_text, classifiers=[ 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.7' ], entry_points={'jetavator.plugins': f'{SHORT_NAME} = {NAME}'} )
2.140625
2
web-interface/app/application/misc/pages/misc_options/purposes_sampling.py
horvathi94/seqmeta
0
17988
from dataclasses import dataclass from .base import _MiscOptionBase from application.src.misc.sampling import PurposesOfSampling @dataclass class Editor(_MiscOptionBase): name = "Purpose of sampling" id = "purpose_of_sampling" link = "misc_bp.submit_purpose_of_sampling" description = "The reason the sample was collected " \ "<em>e.g. diagnostic testing</em>" @classmethod def get_values(cls) -> list: return PurposesOfSampling.fetch_list() @classmethod def save(cls, data: list) -> None: PurposesOfSampling.save_by_procedure(data)
2.546875
3
src/python3/sdp/scripts/FWR_Postprocess/nstx_singlechannel_analysis.py
LeiShi/Synthetic-Diagnostics-Platform
5
17989
<gh_stars>1-10 import sdp.scripts.load_nstx_exp_ref as nstx_exp #import sdp.scripts.FWR2D_NSTX_139047_Postprocess as fwrpp import pickle import numpy as np with open('/p/gkp/lshi/XGC1_NSTX_Case/FullF_XGC_ti191_output/ref_pos.pck','r') as f: ref_pos = pickle.load(f) channel = 9 nt = 50 llim = 1e-7 ulim = 1e-4 time_array = np.linspace(llim,ulim,nt) cs_mean = np.zeros((nt)) cs_median = np.zeros((nt)) cs_std = np.zeros((nt)) def cs_scan(cha = channel): global cs_mean,cs_std,time_array,cs_median time_array = np.linspace(llim,ulim,nt) cs_median = np.zeros((nt)) cs_mean = np.zeros((nt)) cs_std = np.zeros((nt)) for t in range(nt): cs_exp = nstx_exp.analyser.Coherent_over_time(0.632,0.640,1e-6,time_array[t],loader_num = cha) cs_mean[t] = np.mean(np.abs(cs_exp)) cs_median[t] = np.median(np.abs(cs_exp)) cs_std[t] = np.std(np.abs(cs_exp)) return cs_mean,cs_median,cs_std def get_coh_time(cha = channel): mean,median,std = cs_scan(cha) t_idx = np.argmax(std) print('optimal window for channel {1}= {0:.4}'.format(time_array[t_idx],cha)) return time_array[t_idx] def get_coh_median_std(cha = channel, window = None): if(window is None): window = get_coh_time(cha) cs_exp = nstx_exp.analyser.Coherent_over_time(0.632,0.640,1e-6,window,loader_num = cha) cs_ab = np.abs(cs_exp) #median = np.median(cs_ab) #print 'divider set to be {0:.4}'.format(median) # cs_tophalf = cs_ab[np.nonzero(cs_ab>median)] return np.median(cs_ab),np.std(cs_ab),cs_ab def all_channel_coh_sigs(window = None): m = np.zeros((16)) std = np.zeros((16)) cs_sig = [] for i in range(16): m[i],std[i],cs_tmp = get_coh_median_std(i,window = window) cs_sig.append(cs_tmp) return m,std,cs_sig
1.773438
2
tests/integration/suites/default/reboot.py
bularcasergiu/Anjay
0
17990
# -*- coding: utf-8 -*- # # Copyright 2017-2020 AVSystem <<EMAIL>> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from framework.lwm2m_test import * class RebootSendsResponseTest(test_suite.Lwm2mSingleServerTest): def _get_valgrind_args(self): # Reboot cannot be performed when demo is run under valgrind return [] def runTest(self): self.serv.set_timeout(timeout_s=1) # should send a response before rebooting req = Lwm2mExecute(ResPath.Device.Reboot) self.serv.send(req) self.assertMsgEqual(Lwm2mChanged.matching(req)(), self.serv.recv()) # should register after rebooting self.serv.reset() self.assertDemoRegisters(self.serv)
2.09375
2
astrodet/scarlet.py
lyf1436/astrodet
0
17991
<reponame>lyf1436/astrodet import sys, os import numpy as np import scarlet import sep from astropy.io import ascii import astropy.io.fits as fits import matplotlib import matplotlib.pyplot as plt from matplotlib.patches import Ellipse from astropy.wcs import WCS def write_scarlet_results(datas, observation, starlet_sources, model_frame, catalog_deblended, segmentation_masks, dirpath, filters, s): """ Saves images in each channel, with headers for each source in image, such that the number of headers = number of sources detected in image. Parameters ---------- datas: array array of Data objects observation: scarlet function Scarlet observation objects starlet_sources: list List of ScarletSource objects model_frame: scarlet function Image frame of source model catalog_deblended: list Deblended source detection catalog segmentation_masks: list List of segmentation mask of each object in image dirpath : str Path to HSC image file directory filters : list A list of filters for your images. Default is ['g', 'r', 'i']. s : str File basename string Returns ------- filename : dict dictionary of all paths to the saved scarlet files for the particular dataset. Saved image and model files for each filter, and one total segmentation mask file for all filters. """ def _make_hdr(starlet_source, cat): """ Helper function to make FITS header and insert metadata. Parameters ---------- starlet_source: starlet_source starlet_source object for source k cat: dict catalog data for source k Returns ------- model_hdr : Astropy fits.Header FITS header for source k with catalog metadata """ # For each header, assign descriptive data about each source # (x0, y0, w, h) in absolute floating pixel coordinates bbox_h = starlet_source.bbox.shape[1] bbox_w = starlet_source.bbox.shape[2] bbox_y = starlet_source.bbox.origin[1] + int(np.floor(bbox_w/2)) # y-coord of the source's center bbox_x = starlet_source.bbox.origin[2] + int(np.floor(bbox_w/2)) # x-coord of the source's center # Ellipse parameters (a, b, theta) from deblend catalog e_a, e_b, e_theta = cat['a'], cat['b'], cat['theta'] ell_parm = np.concatenate((cat['a'], cat['b'], cat['theta'])) # Add info to header model_hdr = fits.Header() model_hdr['bbox'] = ','.join(map(str, [bbox_x, bbox_y, bbox_w, bbox_h])) model_hdr['area'] = bbox_w * bbox_h model_hdr['ell_parm'] = ','.join(map(str, list(ell_parm))) model_hdr['cat_id'] = 1 # Category ID #TODO: set categor_id based on if the source is extended or not return model_hdr # Create dict for all saved filenames segmask_hdul = [] model_hdul = [] filenames = {} # Filter loop for i, f in enumerate(filters): # datas is HSC data array with dimensions [filters, N, N] f = f.upper() # Primary HDU is full image img_hdu = fits.PrimaryHDU(data=datas[i]) # Create header entry for each scarlet source for k, (src, cat) in enumerate(zip(starlet_sources, catalog_deblended)): # Get each model, make into image model = starlet_sources[k].get_model(frame=model_frame) model = observation.render(model) model = src.bbox.extract_from(model) model_hdr = _make_hdr(starlet_sources[k], cat) model_hdu = fits.ImageHDU(data=model[i], header=model_hdr) model_primary = fits.PrimaryHDU() model_hdul.append(model_hdu) # Write final fits file to specified location # Save full image and then headers per source w/ descriptive info save_img_hdul = fits.HDUList([img_hdu]) save_model_hdul = fits.HDUList([model_primary, *model_hdul]) # Save list of filenames in dict for each band filenames[f'img_{f}'] = os.path.join(dirpath, f'{f}-{s}_scarlet_img.fits') save_img_hdul.writeto(filenames[f'img_{f}'], overwrite=True) filenames[f'model_{f}'] = os.path.join(dirpath, f'{f}-{s}_scarlet_model.fits') save_model_hdul.writeto(filenames[f'model_{f}'], overwrite=True) # If we have segmentation mask data, save them as a separate fits file if segmentation_masks is not None: # Create header entry for each scarlet source for k, (src, cat) in enumerate(zip(starlet_sources, catalog_deblended)): segmask_hdr = _make_hdr(starlet_sources[k], cat) # Save each model source k in the image segmask_hdu = fits.ImageHDU(data=segmentation_masks[k], header=segmask_hdr) segmask_primary = fits.PrimaryHDU() segmask_hdul.append(segmask_hdu) save_segmask_hdul = fits.HDUList([segmask_primary, *segmask_hdul]) # Save list of filenames in dict for each band filenames['segmask'] = os.path.join(dirpath, f'{f}-{s}_scarlet_segmask.fits') save_segmask_hdul.writeto(filenames['segmask'], overwrite=True) return filenames def plot_stretch_Q(datas, stretches=[0.01,0.1,0.5,1], Qs=[1,10,5,100]): """ Plots different normalizations of your image using the stretch, Q parameters. Parameters ---------- stretches : array List of stretch params you want to permutate through to find optimal image normalization. Default is [0.01, 0.1, 0.5, 1] Qs : array List of Q params you want to permutate through to find optimal image normalization. Default is [1, 10, 5, 100] Code adapted from: https://pmelchior.github.io/scarlet/tutorials/display.html Returns ------- fig : Figure object """ fig, ax = plt.subplots(len(stretches), len(Qs), figsize=(9,9)) for i, stretch in enumerate(stretches): for j, Q in enumerate(Qs): asinh = scarlet.display.AsinhMapping(minimum=0, stretch=stretch, Q=Q) # Scale the RGB channels for the image img_rgb = scarlet.display.img_to_rgb(datas, norm=asinh) ax[i][j].imshow(img_rgb) ax[i][j].set_title("Stretch {}, Q {}".format(stretch, Q)) ax[i][j].axis('off') return fig def make_catalog(datas, lvl=4, wave=True, segmentation_map=False, maskthresh=10.0, object_limit=100000): """ Creates a detection catalog by combining low and high resolution data Parameters ---------- datas: array array of Data objects lvl: int detection lvl wave: Bool set to True to use wavelet decomposition of images before combination subtract_background : Bool if you want to subtract the background and retrieve an estimate, change to True. But default is False because HSC images are already background subtracted. segmentation_map : Bool Whether to run sep segmentation map maskthresh : float Mask threshold for sep segmentation object_limit : int Limit on number of objects to detect in image Code adapted from https://pmelchior.github.io/scarlet/tutorials/wavelet_model.html Returns ------- catalog: sextractor catalog catalog of detected sources (use 'catalog.dtype.names' for info) bg_rms: array background level for each data set (set to None if subtract_background is False) """ if type(datas) is np.ndarray: hr_images = datas / np.sum(datas, axis=(1, 2))[:, None, None] # Detection image as the sum over all images detect_image = np.sum(hr_images, axis=0) else: data_lr, data_hr = datas # Create observations for each image # Interpolate low resolution to high resolution interp = interpolate(data_lr, data_hr) # Normalization of the interpolate low res images interp = interp / np.sum(interp, axis=(1, 2))[:, None, None] # Normalisation of the high res data hr_images = data_hr.images / np.sum(data_hr.images, axis=(1, 2))[:, None, None] # Detection image as the sum over all images detect_image = np.sum(interp, axis=0) + np.sum(hr_images, axis=0) detect_image *= np.sum(data_hr.images) if np.size(detect_image.shape) == 4: if wave: # Wavelet detection in the first three levels wave_detect = scarlet.Starlet(detect_image.mean(axis=0), lvl=5).coefficients wave_detect[:, -1, :, :] = 0 detect = scarlet.Starlet(coefficients=wave_detect).image else: # Direct detection detect = detect_image.mean(axis=0) else: if wave: wave_detect = scarlet.Starlet(detect_image).coefficients detect = wave_detect[0][0] + wave_detect[0][1] + wave_detect[0][2] else: detect = detect_image bkg = sep.Background(detect) # Set the limit on the number of sub-objects when deblending. sep.set_sub_object_limit(object_limit) # Extract detection catalog with segmentation maps! # Can use this to retrieve ellipse params catalog = sep.extract(detect, lvl, err=bkg.globalrms, segmentation_map=segmentation_map, maskthresh=maskthresh) # Estimate background if type(datas) is np.ndarray: bkg_rms = scarlet.wavelet.mad_wavelet(datas) else: bkg_rms = [] for data in datas: bkg_rms.append(scarlet.wavelet.mad_wavelet(data.images)) return catalog, bkg_rms def fit_scarlet_blend(starlet_sources, observation, max_iters=15, e_rel=1e-4, plot_likelihood=True): """ Creates a detection catalog by combining low and high resolution data Parameters ---------- datas: array array of Data objects Will end early if likelihood and constraints converge Returns ------- """ # Create and fit Blend model. Go for 200 iterations, # but will end early if likelihood and constraints converge print(f"Fitting Blend model.") try: starlet_blend = scarlet.Blend(starlet_sources, observation) it, logL = starlet_blend.fit(max_iters, e_rel=e_rel) print(f"Scarlet ran for {it} iterations to logL = {logL}") # Catch any exceptions like no detections except AssertionError as e1: print(f"Length of detection catalog is {len(catalog)}.") if plot_likelihood == True: scarlet.display.show_likelihood(starlet_blend) plt.show() return starlet_blend, logL def _plot_wavelet(datas): """ Helper function to plot wavelet transformation diagnostic figures with scarlet Parameters ---------- datas: array array of Data objects Returns ------- """ # Declare a starlet object (and performs the transform) Sw = scarlet.Starlet(datas, lvl=5, direct=True) # This is the starlet transform as an array w = Sw.coefficients # The inverse starlet transform of w (new object otherwise, the tranform is not used) iw = Sw.image # TODO: Clean this code up using plt.subplots() # The wavelet transform of the first slice of images in pictures lvl = w.shape[1] plt.figure(figsize=(lvl*5+5,5)) plt.suptitle('Wavelet coefficients') for i in range(lvl): plt.subplot(1, lvl, i+1) plt.title('scale' + str(i+1)) plt.imshow(w[0,i], cmap='inferno') plt.colorbar() plt.show() # Making sure we recover the original image plt.figure(figsize=(30,10)) plt.subplot(131) plt.title('Original image', fontsize=20) plt.imshow(datas[0], cmap='inferno') plt.colorbar() plt.subplot(132) plt.title('Starlet-reconstructed image', fontsize=20) plt.imshow(iw[0], cmap='inferno') plt.colorbar() plt.subplot(133) plt.title('Absolute difference', fontsize=20) plt.imshow((np.abs(iw[0]-datas[0])), cmap='inferno') plt.colorbar() plt.show() return def _plot_scene(starlet_sources, observation, norm, catalog, show_model=True, show_rendered=True, show_observed=True, show_residual=True, add_labels=True, add_boxes=True, add_ellipses=True): """ Helper function to plot scene with scarlet Parameters ---------- starlet_sources: List List of ScarletSource objects observation: Scarlet observation objects norm: Scarlet normalization for plotting catalog: list Source detection catalog show_model: bool Whether to show model show_rendered: bool Whether to show rendered model show_observed: bool Whether to show observed show_residual: bool Whether to show residual add_labels: bool Whether to add labels add_boxes: bool Whether to add bounding boxes to each panel add_ellipses: bool Whether to add ellipses to each panel Returns ------- fig : matplotlib Figure Figure object """ fig = scarlet.display.show_scene(starlet_sources, observation=observation, norm=norm, show_model=show_model, show_rendered=show_rendered, show_observed=show_observed, show_residual=show_residual, add_labels=add_labels, add_boxes=add_boxes) for ax in fig.axes: # Plot sep ellipse around all sources from the detection catalog if add_ellipses == True: for k, src in enumerate(catalog): # See https://sextractor.readthedocs.io/en/latest/Position.html e = Ellipse(xy=(src['x'], src['y']), width=6*src['a'], height=6*src['b'], angle=np.rad2deg(src['theta'])) e.set_facecolor('none') e.set_edgecolor('white') ax.add_artist(e) ax.axis('off') fig.subplots_adjust(wspace=0.01) plt.show() return fig def run_scarlet(datas, filters, stretch=0.1, Q=5, sigma_model=1, sigma_obs=5, subtract_background=False, max_chi2=5000, max_iters=15, morph_thresh=0.1, starlet_thresh=0.1, lvl=5, lvl_segmask=2, maskthresh=0.025, segmentation_map=True, plot_wavelet=False, plot_likelihood=True, plot_scene=False, plot_sources=False, add_ellipses=True, add_labels=False, add_boxes=False): """ Run <NAME>'s scarlet (https://github.com/pmelchior/scarlet) implementation for source separation. This function will create diagnostic plots, a source detection catalog, and fit a model for all sources in the observation scene (image). Parameters ---------- subtract_background : boolean Whether or not to estimate and subtract the background (often background is already subtracted) Detault is False plot_wavelet_transform : boolean Plot starlet wavelet transform and inverse transform at different scales. NOTE: Not really useful at large image sizes (> ~few hundred pixels length/height) Default is False plot_detections : boolean Plot detection catalog results. Default is False plot_likelihood : boolean Plot likelihood as function of iterations from Blend fit function. Default is True plot_full_scene : boolean Plot full scene with the model, rendered model, observation, and residual. Default is False. plot_all_sources : boolean Plot the model, rendered model, observation, and spectrum across channels for each object. WARNING: dumb to do this with a large image with many sources! Default is False plot_first_isolated_comp : boolean Plot the subtracted and isolated first (or any) starlet component. Recommended for finding a bright component. Default is False. Return ------- FITS file with... TODO: fill this out once I get the exact fits file output generated to Colin's liking """ norm = scarlet.display.AsinhMapping(minimum=0, stretch=stretch, Q=Q) # Generate source catalog using wavelets catalog, bg_rms_hsc = make_catalog(datas, lvl, wave=True) # If image is already background subtracted, weights are set to 1 if subtract_background: weights = np.ones_like(datas) / (bg_rms_hsc**2)[:, None, None] else: weights = np.ones_like(datas) print("Source catalog found ", len(catalog), "objects") # Plot wavelet transform at different scales if plot_wavelet == True: _plot_wavelet(datas) # Define model frame and observations: model_psf = scarlet.GaussianPSF(sigma=sigma_model) #, boxsize=100) model_frame = scarlet.Frame(datas.shape, psf=model_psf, channels=filters) observation_psf = scarlet.GaussianPSF(sigma=sigma_obs) observation = scarlet.Observation(datas, psf=observation_psf, weights=weights, channels=filters).match(model_frame) # Initialize starlet sources to be fit. Assume extended sources for all because # we are not looking at all detections in each image # TODO: Plot chi2 vs. binned size and mag. Implement conidition if chi2 > xxx then # add another component until larger sources are modeled well print("Initializing starlet sources to be fit.") # Compute radii and spread of sources Rs = np.sqrt(catalog['a']**2 + catalog['b']**2) spread = Rs/sigma_obs # Array of chi^2 residuals computed after fit on each model chi2s = np.zeros(len(catalog)) # Loop through detections in catalog starlet_sources = [] for k, src in enumerate(catalog): # Is the source compact relative to the PSF? if spread[k] < 1: compact = True else: compact = False # Try modeling each source as a single ExtendedSource first new_source = scarlet.ExtendedSource(model_frame, (src['y'], src['x']), observation, K=1, thresh=morph_thresh, compact=compact) starlet_sources.append(new_source) # Fit scarlet blend starlet_blend, logL = fit_scarlet_blend(starlet_sources, observation, max_iters=max_iters, plot_likelihood=plot_likelihood) print("Computing residuals.") # Compute reduced chi^2 for each rendered sources for k, src in enumerate(starlet_sources): model = src.get_model(frame=model_frame) model = observation.render(model) res = datas - model # Compute in bbox only res = src.bbox.extract_from(res) chi2s[k] = np.sum(res**2) # Replace models with poor fits with StarletSource models if chi2s[k] > max_chi2: starlet_sources[k] = scarlet.StarletSource(model_frame, (catalog["y"][k], catalog["x"][k]), observation, thresh=morph_thresh, starlet_thresh=starlet_thresh, full=False) # If any chi2 residuals are flagged, re-fit the blend with a more complex model if np.any(chi2s > max_chi2): print("Re-fitting with Starlet models for poorly-fit sources.") starlet_blend, logL = fit_scarlet_blend(starlet_sources, observation, max_iters=max_iters, plot_likelihood=plot_likelihood) # Extract the deblended catalog and update the chi2 residuals print('Extracting deblended catalog.') catalog_deblended = [] segmentation_masks = [] for k, src in enumerate(starlet_sources): model = src.get_model(frame=model_frame) model = observation.render(model) # Compute in bbox only model = src.bbox.extract_from(model) # Run sep try: cat, _ = make_catalog(model, lvl_segmask, wave=False, segmentation_map=False, maskthresh=maskthresh) except: print(f'Exception with source {k}') cat = [] #if segmentation_map == True: # cat, mask = cat # If more than 1 source is detected for some reason (e.g. artifacts) if len(cat) > 1: # keep the brightest idx = np.argmax([c['cflux'] for c in cat]) cat = cat[idx] # if segmentation_map == True: # mask = mask[idx] # If failed to detect model source if len(cat) == 0: # Fill with nan cat = [np.full(catalog[0].shape, np.nan, dtype=catalog.dtype)] # Append to full catalog if segmentation_map == True: # For some reason sep doesn't like these images, so do the segmask ourselves for now model_det = np.array(model[0,:,:]) mask = np.zeros_like(model_det) mask[model_det>maskthresh] = 1 segmentation_masks.append(mask) #plt.imshow(mask) #plt.show() catalog_deblended.append(cat) # Combine catalog named array catalog_deblended = np.vstack(catalog_deblended) # Plot scene: rendered model, observations, and residuals if plot_scene == True: _plot_scene(starlet_sources, observation, norm, catalog, show_model=False, show_rendered=True, show_observed=True, show_residual=True, add_labels=add_labels, add_boxes=add_boxes, add_ellipses=add_ellipses) # Plot each for each source if plot_sources == True: scarlet.display.show_sources(starlet_sources, observation, norm=norm, show_rendered=True, show_observed=True, add_boxes=add_boxes) plt.show() return observation, starlet_sources, model_frame, catalog, catalog_deblended, segmentation_masks
2.53125
3
serve.py
haiyoumeiyou/cherrybrigde
0
17992
<reponame>haiyoumeiyou/cherrybrigde<gh_stars>0 from application import bootstrap bootstrap() if __name__=='__main__': import cherrypy cherrypy.engine.signals.subscribe() cherrypy.engine.start() cherrypy.engine.block()
1.28125
1
ooobuild/lo/smarttags/x_range_based_smart_tag_recognizer.py
Amourspirit/ooo_uno_tmpl
0
17993
# coding: utf-8 # # Copyright 2022 :Barry-Thomas-Paul: Moss # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http: // www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Interface Class # this is a auto generated file generated by Cheetah # Libre Office Version: 7.3 # Namespace: com.sun.star.smarttags import typing from abc import abstractmethod from ..lang.x_initialization import XInitialization as XInitialization_d46c0cca if typing.TYPE_CHECKING: from ..frame.x_controller import XController as XController_b00e0b8f from .smart_tag_recognizer_mode import SmartTagRecognizerMode as SmartTagRecognizerMode_9179119e from ..text.x_text_markup import XTextMarkup as XTextMarkup_a5d60b3a from ..text.x_text_range import XTextRange as XTextRange_9a910ab7 class XRangeBasedSmartTagRecognizer(XInitialization_d46c0cca): """ provides access to a range based smart tag recognizer. See Also: `API XRangeBasedSmartTagRecognizer <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1smarttags_1_1XRangeBasedSmartTagRecognizer.html>`_ """ __ooo_ns__: str = 'com.sun.star.smarttags' __ooo_full_ns__: str = 'com.sun.star.smarttags.XRangeBasedSmartTagRecognizer' __ooo_type_name__: str = 'interface' __pyunointerface__: str = 'com.sun.star.smarttags.XRangeBasedSmartTagRecognizer' @abstractmethod def recognizeTextRange(self, xRange: 'XTextRange_9a910ab7', eDataType: 'SmartTagRecognizerMode_9179119e', xTextMarkup: 'XTextMarkup_a5d60b3a', aApplicationName: str, xController: 'XController_b00e0b8f') -> None: """ recognizes smart tags. """ __all__ = ['XRangeBasedSmartTagRecognizer']
1.835938
2
apprest/plugins/icat/views/ICAT.py
acampsm/calipsoplus-backend
4
17994
from rest_framework import status from rest_framework.authentication import SessionAuthentication, BasicAuthentication from rest_framework.permissions import IsAuthenticated from rest_framework.views import APIView from apprest.plugins.icat.helpers.complex_encoder import JsonResponse from apprest.plugins.icat.services.ICAT import ICATService class GetInvestigationUsers(APIView): """ get: Return: Users involved in an investigation """ authentication_classes = (SessionAuthentication, BasicAuthentication) permission_classes = (IsAuthenticated,) pagination_class = None def get(self, request, *args, **kwargs): service = ICATService() investigation_id = self.kwargs.get('investigation_id') investigation_users = service.get_users_involved_in_investigation(investigation_id, request) return JsonResponse(investigation_users, status=status.HTTP_200_OK)
2.171875
2
python/phevaluator/table_tests/test_hashtable8.py
StTronn/PokerHandEvaluator
1
17995
import unittest from table_tests.utils import BaseTestNoFlushTable from evaluator.hashtable8 import NO_FLUSH_8 class TestNoFlush8Table(BaseTestNoFlushTable): TOCOMPARE = NO_FLUSH_8 TABLE = [0] * len(TOCOMPARE) VISIT = [0] * len(TOCOMPARE) NUM_CARDS = 8 def test_noflush8_table(self): self.assertListEqual(self.TABLE, self.TOCOMPARE) if __name__ == "__main__": unittest.main()
3.09375
3
Marcelina_Skoczylas_praca_domowa_3.py
marcelinaskoczylas/python_wprowadzenie_warsztaty_2021
1
17996
<gh_stars>1-10 #zadanie 1 i=1 j=1 k=1 ciag=[1,1] while len(ciag)<50: k=i+j j=i i=k ciag.append(k) print(ciag) #zadanie 2 wpisane=str(input("Proszę wpisać dowolne słowa po przecinku ")) zmienne=wpisane.split(",") def funkcja(*args): '''Funkcja sprawdza długość słów i usuwa te, które są za krótkie''' lista=[] lista2=[] wartosc = int(input("Proszę wpisać jakąś wartość ")) for arg in args: lista.append(arg) dlugosc=len(arg) if len(arg)>wartosc: lista2.append(arg) procenty=(len(lista2)/len(lista))*100 return procenty,lista,lista2 print(funkcja(zmienne)) #zadanie 3 liczby=list(input("Proszę wpisać liczby po przecinku: ")) unikalna_lista=[] n=1 a=liczby[n] unikalna_lista.append(liczby[0]) while n<len(liczby): if liczby[n]!=unikalna_lista[n-1]: unikalna_lista.append(a) n+=1
3.171875
3
rh_pathfinding/src/rh_pathfinding/engine/geometry/obstacle/lineFinder/__init__.py
RhinohawkUAV/rh_ros
4
17997
<reponame>RhinohawkUAV/rh_ros from linePathSegment import LinePathSegment from lineSegmentFinder import LineSegmentFinder
1.085938
1
days/day01/part2.py
jaredbancroft/aoc2021
0
17998
<filename>days/day01/part2.py<gh_stars>0 from helpers import inputs def solution(day): depths = inputs.read_to_list(f"inputs/{day}.txt") part2_total = 0 for index, depth in enumerate(depths): if index - 3 >= 0: current_window = ( int(depth) + int(depths[index - 1]) + int(depths[index - 2]) ) previous_window = ( int(depths[index - 1]) + int(depths[index - 2]) + int(depths[index - 3]) ) diff = current_window - previous_window if diff > 0: part2_total += 1 return f"Day 01 Part 2 Total Depth Increase: {part2_total}"
3.015625
3
src/unicon/plugins/nxos/n5k/service_statements.py
TestingBytes/unicon.plugins
18
17999
<gh_stars>10-100 from unicon.eal.dialogs import Statement from .service_patterns import NxosN5kReloadPatterns from unicon.plugins.nxos.service_statements import (login_stmt, password_stmt, enable_vdc, admin_password) from unicon.plugins.generic.service_statements import (save_env, auto_provision, auto_install_dialog, setup_dialog, confirm_reset, press_enter, confirm_config, module_reload, save_module_cfg, secure_passwd_std, ) # for nxos n5k single rp reload pat = NxosN5kReloadPatterns() reload_confirm_nxos = Statement(pattern=pat.reload_confirm_nxos, action='sendline(y)', loop_continue=True, continue_timer=False) # reload statement list for nxos n5k single-rp nxos_reload_statement_list = [save_env, confirm_reset, reload_confirm_nxos, press_enter, login_stmt, password_stmt, confirm_config, setup_dialog, auto_install_dialog, module_reload, save_module_cfg, secure_passwd_std, admin_password, auto_provision, enable_vdc]
1.765625
2