id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1645413
|
from os import listdir
from pathlib import Path
from typing import Optional, List
import hydra
import wandb
from omegaconf import DictConfig
from pytorch_lightning import LightningDataModule, LightningModule
from pytorch_lightning.loggers import LightningLoggerBase, WandbLogger
from tqdm import tqdm
import numpy as np
from src.callbacks.wandb_callbacks import get_wandb_logger
from src.evaluation.separate import separate_with_onnx, separate_with_ckpt
from src.utils import utils
from src.utils.utils import load_wav, sdr
log = utils.get_logger(__name__)
def evaluation(config: DictConfig):
assert config.split in ['train', 'valid', 'test']
data_dir = Path(config.get('data_dir')).joinpath(config['split'])
assert data_dir.exists()
# Init Lightning loggers
loggers: List[LightningLoggerBase] = []
if "logger" in config:
for _, lg_conf in config.logger.items():
if "_target_" in lg_conf:
log.info(f"Instantiating logger <{lg_conf._target_}>")
loggers.append(hydra.utils.instantiate(lg_conf))
if any([isinstance(l, WandbLogger) for l in loggers]):
utils.wandb_login(key=config.wandb_api_key)
model = hydra.utils.instantiate(config.model)
target_name = model.target_name
ckpt_path = Path(config.ckpt_dir).joinpath(config.ckpt_path)
scores = []
num_tracks = len(listdir(data_dir))
for i, track in tqdm(enumerate(sorted(listdir(data_dir)))):
track = data_dir.joinpath(track)
mixture = load_wav(track.joinpath('mixture.wav'))
target = load_wav(track.joinpath(target_name + '.wav'))
#target_hat = {source: separate(config['batch_size'], models[source], onnxs[source], mixture) for source in sources}
target_hat = separate_with_ckpt(config.batch_size, model, ckpt_path, mixture, config.device)
score = sdr(target_hat, target)
scores.append(score)
for logger in loggers:
logger.log_metrics({'sdr': score}, i)
for wandb_logger in [logger for logger in loggers if isinstance(logger, WandbLogger)]:
mid = mixture.shape[-1] // 2
track = target_hat[:, mid - 44100 * 3:mid + 44100 * 3]
wandb_logger.experiment.log(
{f'track={i}_target={target_name}': [wandb.Audio(track.T, sample_rate=44100)]})
for logger in loggers:
logger.log_metrics({'mean_sdr_' + target_name: sum(scores)/num_tracks})
logger.close()
if any([isinstance(logger, WandbLogger) for logger in loggers]):
wandb.finish()
|
1645445
|
import logging
from enum import Enum
from struct import Struct
_default_struct = Struct('<12sII')
class G(Enum):
def __init__(self, title: str, release_year: int, dat_filename: str = None, dir_filename: str = None,
dir_struct: Struct = None):
self.title = title
self.release_year = release_year
self.dir_filename = dir_filename
self.dat_filename = dat_filename
self.dir_struct = dir_struct
CROC_1_PS1 = ("Croc 1 PS1", 1997, 'CROCFILE.1', 'CROCFILE.DIR', Struct('<12sII4x'))
CROC_2_PS1 = ("Croc 2 PS1", 1999, 'CROCII.DAT', 'CROCII.DIR', _default_struct)
CROC_2_DEMO_PS1 = ("Croc 2 Demo PS1", 1999, 'CROCII.DAT', 'CROCII.DIR', _default_struct)
CROC_2_DEMO_PS1_DUMMY = ("Croc 2 Demo PS1 (Dummy)", 1999, 'DUMMY.DAT', None, None)
HARRY_POTTER_1_PS1 = ("Harry Potter 1 PS1", 2001, 'POTTER.DAT', 'POTTER.DIR', _default_struct)
HARRY_POTTER_2_PS1 = ("Harry Potter 2 PS1", 2002, 'POTTER.DAT', 'POTTER.DIR', _default_struct)
class Configuration:
def __init__(self, game: G, ignore_warnings=False, debug=False):
self.game = game
self.ignore_warnings = ignore_warnings # If False, warnings stop program execution
logging.basicConfig(format='%(message)s', level=logging.DEBUG if debug else logging.WARNING)
self.debug = debug
SUPPORTED_GAMES = (G.CROC_1_PS1, G.CROC_2_PS1, G.CROC_2_DEMO_PS1, G.CROC_2_DEMO_PS1_DUMMY, G.HARRY_POTTER_1_PS1,
G.HARRY_POTTER_2_PS1)
# Croc 1 parsing is not supported, but it can be sliced
PARSABLE_GAMES = (G.CROC_2_PS1, G.CROC_2_DEMO_PS1, G.CROC_2_DEMO_PS1_DUMMY, G.HARRY_POTTER_1_PS1, G.HARRY_POTTER_2_PS1)
SLICEABLE_GAMES = SUPPORTED_GAMES
wavefront_header = "# Generated by ps1_argonaut reverse tools: https://github.com/OverSurge/PS1-Argonaut-Reverse\n"
wav_header = b"Generated by ps1_argonaut reverse tools: https://github.com/OverSurge/PS1-Argonaut-Reverse"
|
1645470
|
from aetherling.modules.reduce import DefineReduceSequential, DefineReduceParallelWithIdentity, renameCircuitForReduce
from aetherling.modules.register_any_type import DefineRegisterAnyType
from aetherling.modules.term_any_type import TermAnyType
from aetherling.modules.noop import DefineNoop
from magma.backend.coreir_ import CoreIRBackend
from magma.bitutils import *
from coreir.context import *
from magma.simulator.coreir_simulator import CoreIRSimulator
import coreir
from magma.scope import Scope
from mantle.coreir.arith import *
from mantle.coreir.logic import *
from mantle.coreir.compare import *
from mantle.coreir import DefineCoreirConst
from mantle.coreir.LUT import *
from aetherling.modules.upsample import *
from aetherling.modules.downsample import *
from aetherling.modules.reduce import *
from aetherling.modules.native_linebuffer.two_dimensional_native_linebuffer import DefineTwoDimensionalLineBuffer
args = ['I0', Array[8, In(Bit)], 'I1', Array[8, In(Bit)], 'I2', Array[8, In(Bit)], 'I3', Array[8, In(Bit)], 'I4', Array[8, In(Bit)], 'I5', Array[8, In(Bit)], 'I6', Array[8, In(Bit)], 'I7', Array[8, In(Bit)], 'I8', Array[8, In(Bit)], 'I9', Array[8, In(Bit)], 'I10', Array[8, In(Bit)], 'I11', Array[8, In(Bit)], 'I12', Array[8, In(Bit)], 'I13', Array[8, In(Bit)], 'I14', Array[8, In(Bit)], 'I15', Array[8, In(Bit)], 'O0', Array[8, Out(Bit)], 'valid_data_in', In(Bit), 'ready_data_in', Out(Bit), 'valid_data_out', Out(Bit), 'ready_data_out', In(Bit), ] + ClockInterface(has_ce=True)
downsample_256x256_to_32x32_16px_in_per_clk = DefineCircuit('downsample_256x256_to_32x32_16px_in_per_clk_Circuit', *args)
magmaInstance0 = DefineTwoDimensionalLineBuffer(Array[8, In(Bit)], 16, 1, 2, 2, 256, 256, 2, 2, 0, 0)()
magmaInstance1 = DefineNoop(DefineTwoDimensionalLineBuffer(Array[8, In(Bit)], 16, 1, 2, 2, 256, 256, 2, 2, 0, 0))()
magmaInstance2 = DefineCoreirConst(8, 1)()
magmaInstance3 = DefineCoreirConst(8, 1)()
magmaInstance4 = DefineCoreirConst(8, 1)()
magmaInstance5 = DefineCoreirConst(8, 1)()
magmaInstance6 = DefineCoreirConst(8, 2)()
magmaInstance7 = DefineCoreirConst(8, 2)()
magmaInstance8 = DefineCoreirConst(8, 2)()
magmaInstance9 = DefineCoreirConst(8, 2)()
magmaInstance13 = DefineCoreirConst(8, 3)()
magmaInstance14 = DefineCoreirConst(8, 3)()
magmaInstance15 = DefineCoreirConst(8, 3)()
magmaInstance16 = DefineCoreirConst(8, 3)()
magmaInstance17 = DefineCoreirConst(8, 4)()
magmaInstance18 = DefineCoreirConst(8, 4)()
magmaInstance19 = DefineCoreirConst(8, 4)()
magmaInstance20 = DefineCoreirConst(8, 4)()
wire(magmaInstance0.O[0][0][0], magmaInstance1.in_O[0][0][0])
wire(magmaInstance0.O[0][0][1], magmaInstance1.in_O[0][0][1])
wire(magmaInstance0.O[0][1][0], magmaInstance1.in_O[0][1][0])
wire(magmaInstance0.O[0][1][1], magmaInstance1.in_O[0][1][1])
wire(magmaInstance0.O[1][0][0], magmaInstance1.in_O[1][0][0])
wire(magmaInstance0.O[1][0][1], magmaInstance1.in_O[1][0][1])
wire(magmaInstance0.O[1][1][0], magmaInstance1.in_O[1][1][0])
wire(magmaInstance0.O[1][1][1], magmaInstance1.in_O[1][1][1])
wire(magmaInstance0.O[2][0][0], magmaInstance1.in_O[2][0][0])
wire(magmaInstance0.O[2][0][1], magmaInstance1.in_O[2][0][1])
wire(magmaInstance0.O[2][1][0], magmaInstance1.in_O[2][1][0])
wire(magmaInstance0.O[2][1][1], magmaInstance1.in_O[2][1][1])
wire(magmaInstance0.O[3][0][0], magmaInstance1.in_O[3][0][0])
wire(magmaInstance0.O[3][0][1], magmaInstance1.in_O[3][0][1])
wire(magmaInstance0.O[3][1][0], magmaInstance1.in_O[3][1][0])
wire(magmaInstance0.O[3][1][1], magmaInstance1.in_O[3][1][1])
magmaInstance30 = DefineCoreirMul(8)()
magmaInstance31 = DefineCoreirMul(8)()
magmaInstance32 = DefineCoreirMul(8)()
magmaInstance33 = DefineCoreirMul(8)()
magmaInstance34 = DefineCoreirMul(8)()
magmaInstance35 = DefineCoreirMul(8)()
magmaInstance36 = DefineCoreirMul(8)()
magmaInstance37 = DefineCoreirMul(8)()
magmaInstance38 = DefineCoreirMul(8)()
magmaInstance39 = DefineCoreirMul(8)()
magmaInstance40 = DefineCoreirMul(8)()
magmaInstance41 = DefineCoreirMul(8)()
magmaInstance42 = DefineCoreirMul(8)()
magmaInstance43 = DefineCoreirMul(8)()
magmaInstance44 = DefineCoreirMul(8)()
magmaInstance45 = DefineCoreirMul(8)()
wire(magmaInstance1.O[0][0][0], magmaInstance30.I0)
wire(magmaInstance2.O, magmaInstance30.I1)
wire(magmaInstance1.O[0][0][1], magmaInstance31.I0)
wire(magmaInstance6.O, magmaInstance31.I1)
wire(magmaInstance1.O[0][1][0], magmaInstance32.I0)
wire(magmaInstance13.O, magmaInstance32.I1)
wire(magmaInstance1.O[0][1][1], magmaInstance33.I0)
wire(magmaInstance17.O, magmaInstance33.I1)
wire(magmaInstance1.O[1][0][0], magmaInstance34.I0)
wire(magmaInstance3.O, magmaInstance34.I1)
wire(magmaInstance1.O[1][0][1], magmaInstance35.I0)
wire(magmaInstance7.O, magmaInstance35.I1)
wire(magmaInstance1.O[1][1][0], magmaInstance36.I0)
wire(magmaInstance14.O, magmaInstance36.I1)
wire(magmaInstance1.O[1][1][1], magmaInstance37.I0)
wire(magmaInstance18.O, magmaInstance37.I1)
wire(magmaInstance1.O[2][0][0], magmaInstance38.I0)
wire(magmaInstance4.O, magmaInstance38.I1)
wire(magmaInstance1.O[2][0][1], magmaInstance39.I0)
wire(magmaInstance8.O, magmaInstance39.I1)
wire(magmaInstance1.O[2][1][0], magmaInstance40.I0)
wire(magmaInstance15.O, magmaInstance40.I1)
wire(magmaInstance1.O[2][1][1], magmaInstance41.I0)
wire(magmaInstance19.O, magmaInstance41.I1)
wire(magmaInstance1.O[3][0][0], magmaInstance42.I0)
wire(magmaInstance5.O, magmaInstance42.I1)
wire(magmaInstance1.O[3][0][1], magmaInstance43.I0)
wire(magmaInstance9.O, magmaInstance43.I1)
wire(magmaInstance1.O[3][1][0], magmaInstance44.I0)
wire(magmaInstance16.O, magmaInstance44.I1)
wire(magmaInstance1.O[3][1][1], magmaInstance45.I0)
wire(magmaInstance20.O, magmaInstance45.I1)
magmaInstance46 = DefineReduceParallelWithIdentity(4, renameCircuitForReduce(DefineAdd(8)))()
magmaInstance47 = DefineReduceParallelWithIdentity(4, renameCircuitForReduce(DefineAdd(8)))()
magmaInstance48 = DefineReduceParallelWithIdentity(4, renameCircuitForReduce(DefineAdd(8)))()
magmaInstance49 = DefineReduceParallelWithIdentity(4, renameCircuitForReduce(DefineAdd(8)))()
magmaInstance46_identityGen = DefineCoreirConst(8, 0)()
wire(magmaInstance46_identityGen.O, magmaInstance46.I.identity)
wire(magmaInstance46_identityGen.O, magmaInstance47.I.identity)
wire(magmaInstance46_identityGen.O, magmaInstance48.I.identity)
wire(magmaInstance46_identityGen.O, magmaInstance49.I.identity)
wire(magmaInstance30.O, magmaInstance46.I.data[0])
wire(magmaInstance31.O, magmaInstance46.I.data[1])
wire(magmaInstance32.O, magmaInstance46.I.data[2])
wire(magmaInstance33.O, magmaInstance46.I.data[3])
wire(magmaInstance34.O, magmaInstance47.I.data[0])
wire(magmaInstance35.O, magmaInstance47.I.data[1])
wire(magmaInstance36.O, magmaInstance47.I.data[2])
wire(magmaInstance37.O, magmaInstance47.I.data[3])
wire(magmaInstance38.O, magmaInstance48.I.data[0])
wire(magmaInstance39.O, magmaInstance48.I.data[1])
wire(magmaInstance40.O, magmaInstance48.I.data[2])
wire(magmaInstance41.O, magmaInstance48.I.data[3])
wire(magmaInstance42.O, magmaInstance49.I.data[0])
wire(magmaInstance43.O, magmaInstance49.I.data[1])
wire(magmaInstance44.O, magmaInstance49.I.data[2])
wire(magmaInstance45.O, magmaInstance49.I.data[3])
magmaInstance50 = DefineNoop(DefineCoreirConst(8, 0))()
magmaInstance51 = DefineNoop(DefineCoreirConst(8, 0))()
magmaInstance52 = DefineNoop(DefineCoreirConst(8, 0))()
magmaInstance53 = DefineNoop(DefineCoreirConst(8, 0))()
magmaInstance54 = DefineCoreirConst(8, 4)()
magmaInstance55 = DefineCoreirConst(8, 4)()
magmaInstance56 = DefineCoreirConst(8, 4)()
magmaInstance57 = DefineCoreirConst(8, 4)()
wire(magmaInstance46.out, magmaInstance50.in_O)
wire(magmaInstance47.out, magmaInstance51.in_O)
wire(magmaInstance48.out, magmaInstance52.in_O)
wire(magmaInstance49.out, magmaInstance53.in_O)
magmaInstance61 = DefineCoreirUDiv(8)()
magmaInstance62 = DefineCoreirUDiv(8)()
magmaInstance63 = DefineCoreirUDiv(8)()
magmaInstance64 = DefineCoreirUDiv(8)()
wire(magmaInstance50.O, magmaInstance61.I0)
wire(magmaInstance54.O, magmaInstance61.I1)
wire(magmaInstance51.O, magmaInstance62.I0)
wire(magmaInstance55.O, magmaInstance62.I1)
wire(magmaInstance52.O, magmaInstance63.I0)
wire(magmaInstance56.O, magmaInstance63.I1)
wire(magmaInstance53.O, magmaInstance64.I0)
wire(magmaInstance57.O, magmaInstance64.I1)
magmaInstance65 = DefineTwoDimensionalLineBuffer(Array[8, In(Bit)], 4, 1, 2, 2, 128, 128, 2, 2, 0, 0)()
wire(magmaInstance61.O, magmaInstance65.I[0][0])
wire(magmaInstance62.O, magmaInstance65.I[0][1])
wire(magmaInstance63.O, magmaInstance65.I[0][2])
wire(magmaInstance64.O, magmaInstance65.I[0][3])
magmaInstance66 = DefineNoop(DefineTwoDimensionalLineBuffer(Array[8, In(Bit)], 4, 1, 2, 2, 128, 128, 2, 2, 0, 0))()
magmaInstance67 = DefineCoreirConst(8, 1)()
magmaInstance68 = DefineCoreirConst(8, 2)()
magmaInstance69 = DefineCoreirConst(8, 3)()
magmaInstance70 = DefineCoreirConst(8, 4)()
wire(magmaInstance65.O[0][0][0], magmaInstance66.in_O[0][0][0])
wire(magmaInstance65.O[0][0][1], magmaInstance66.in_O[0][0][1])
wire(magmaInstance65.O[0][1][0], magmaInstance66.in_O[0][1][0])
wire(magmaInstance65.O[0][1][1], magmaInstance66.in_O[0][1][1])
magmaInstance71 = DefineCoreirMul(8)()
magmaInstance72 = DefineCoreirMul(8)()
magmaInstance73 = DefineCoreirMul(8)()
magmaInstance74 = DefineCoreirMul(8)()
wire(magmaInstance66.O[0][0][0], magmaInstance71.I0)
wire(magmaInstance67.O, magmaInstance71.I1)
wire(magmaInstance66.O[0][0][1], magmaInstance72.I0)
wire(magmaInstance68.O, magmaInstance72.I1)
wire(magmaInstance66.O[0][1][0], magmaInstance73.I0)
wire(magmaInstance69.O, magmaInstance73.I1)
wire(magmaInstance66.O[0][1][1], magmaInstance74.I0)
wire(magmaInstance70.O, magmaInstance74.I1)
magmaInstance75 = DefineReduceParallelWithIdentity(4, renameCircuitForReduce(DefineAdd(8)))()
magmaInstance75_identityGen = DefineCoreirConst(8, 0)()
wire(magmaInstance75_identityGen.O, magmaInstance75.I.identity)
wire(magmaInstance71.O, magmaInstance75.I.data[0])
wire(magmaInstance72.O, magmaInstance75.I.data[1])
wire(magmaInstance73.O, magmaInstance75.I.data[2])
wire(magmaInstance74.O, magmaInstance75.I.data[3])
magmaInstance76 = DefineNoop(DefineCoreirConst(8, 0))()
magmaInstance77 = DefineCoreirConst(8, 4)()
wire(magmaInstance75.out, magmaInstance76.in_O)
magmaInstance78 = DefineCoreirUDiv(8)()
wire(magmaInstance76.O, magmaInstance78.I0)
wire(magmaInstance77.O, magmaInstance78.I1)
magmaInstance79 = DefineTwoDimensionalLineBuffer(Array[8, In(Bit)], 1, 1, 2, 2, 64, 64, 2, 2, 0, 0)()
wire(magmaInstance78.O, magmaInstance79.I[0][0])
magmaInstance80 = DefineNoop(DefineTwoDimensionalLineBuffer(Array[8, In(Bit)], 1, 1, 2, 2, 64, 64, 2, 2, 0, 0))()
magmaInstance81 = DefineCoreirConst(8, 1)()
magmaInstance82 = DefineCoreirConst(8, 2)()
magmaInstance83 = DefineCoreirConst(8, 3)()
magmaInstance84 = DefineCoreirConst(8, 4)()
wire(magmaInstance79.O[0][0][0], magmaInstance80.in_O[0][0][0])
wire(magmaInstance79.O[0][0][1], magmaInstance80.in_O[0][0][1])
wire(magmaInstance79.O[0][1][0], magmaInstance80.in_O[0][1][0])
wire(magmaInstance79.O[0][1][1], magmaInstance80.in_O[0][1][1])
magmaInstance85 = DefineCoreirMul(8)()
magmaInstance86 = DefineCoreirMul(8)()
magmaInstance87 = DefineCoreirMul(8)()
magmaInstance88 = DefineCoreirMul(8)()
wire(magmaInstance80.O[0][0][0], magmaInstance85.I0)
wire(magmaInstance81.O, magmaInstance85.I1)
wire(magmaInstance80.O[0][0][1], magmaInstance86.I0)
wire(magmaInstance82.O, magmaInstance86.I1)
wire(magmaInstance80.O[0][1][0], magmaInstance87.I0)
wire(magmaInstance83.O, magmaInstance87.I1)
wire(magmaInstance80.O[0][1][1], magmaInstance88.I0)
wire(magmaInstance84.O, magmaInstance88.I1)
magmaInstance89 = DefineReduceParallelWithIdentity(4, renameCircuitForReduce(DefineAdd(8)))()
magmaInstance89_identityGen = DefineCoreirConst(8, 0)()
wire(magmaInstance89_identityGen.O, magmaInstance89.I.identity)
wire(magmaInstance85.O, magmaInstance89.I.data[0])
wire(magmaInstance86.O, magmaInstance89.I.data[1])
wire(magmaInstance87.O, magmaInstance89.I.data[2])
wire(magmaInstance88.O, magmaInstance89.I.data[3])
magmaInstance90 = DefineNoop(DefineCoreirConst(8, 0))()
magmaInstance91 = DefineCoreirConst(8, 4)()
wire(magmaInstance89.out, magmaInstance90.in_O)
magmaInstance92 = DefineCoreirUDiv(8)()
wire(magmaInstance90.O, magmaInstance92.I0)
wire(magmaInstance91.O, magmaInstance92.I1)
wire(downsample_256x256_to_32x32_16px_in_per_clk.I0, magmaInstance0.I[0][0])
wire(downsample_256x256_to_32x32_16px_in_per_clk.I1, magmaInstance0.I[0][1])
wire(downsample_256x256_to_32x32_16px_in_per_clk.I2, magmaInstance0.I[0][2])
wire(downsample_256x256_to_32x32_16px_in_per_clk.I3, magmaInstance0.I[0][3])
wire(downsample_256x256_to_32x32_16px_in_per_clk.I4, magmaInstance0.I[0][4])
wire(downsample_256x256_to_32x32_16px_in_per_clk.I5, magmaInstance0.I[0][5])
wire(downsample_256x256_to_32x32_16px_in_per_clk.I6, magmaInstance0.I[0][6])
wire(downsample_256x256_to_32x32_16px_in_per_clk.I7, magmaInstance0.I[0][7])
wire(downsample_256x256_to_32x32_16px_in_per_clk.I8, magmaInstance0.I[0][8])
wire(downsample_256x256_to_32x32_16px_in_per_clk.I9, magmaInstance0.I[0][9])
wire(downsample_256x256_to_32x32_16px_in_per_clk.I10, magmaInstance0.I[0][10])
wire(downsample_256x256_to_32x32_16px_in_per_clk.I11, magmaInstance0.I[0][11])
wire(downsample_256x256_to_32x32_16px_in_per_clk.I12, magmaInstance0.I[0][12])
wire(downsample_256x256_to_32x32_16px_in_per_clk.I13, magmaInstance0.I[0][13])
wire(downsample_256x256_to_32x32_16px_in_per_clk.I14, magmaInstance0.I[0][14])
wire(downsample_256x256_to_32x32_16px_in_per_clk.I15, magmaInstance0.I[0][15])
wire(downsample_256x256_to_32x32_16px_in_per_clk.O0, magmaInstance92.O)
wire(magmaInstance0.ready, downsample_256x256_to_32x32_16px_in_per_clk.ready_data_in)
wire(magmaInstance79.valid, downsample_256x256_to_32x32_16px_in_per_clk.valid_data_out)
wire(downsample_256x256_to_32x32_16px_in_per_clk.valid_data_in & magmaInstance65.ready & bit(downsample_256x256_to_32x32_16px_in_per_clk.CE), magmaInstance0.CE)
wire(magmaInstance0.valid & magmaInstance79.ready & bit(downsample_256x256_to_32x32_16px_in_per_clk.CE), magmaInstance65.CE)
wire(magmaInstance65.valid & downsample_256x256_to_32x32_16px_in_per_clk.ready_data_out & bit(downsample_256x256_to_32x32_16px_in_per_clk.CE), magmaInstance79.CE)
ceTerm = TermAnyType(Enable)
wire(ceTerm.I, downsample_256x256_to_32x32_16px_in_per_clk.CE)
EndCircuit()
|
1645482
|
from __future__ import annotations
import math
import random
from collections import deque
from typing import List, Optional, Tuple
from tqdm import tqdm
from utttpy.game.action import Action
from utttpy.game.ultimate_tic_tac_toe import UltimateTicTacToe
class MonteCarloTreeSearch:
def __init__(
self,
uttt: UltimateTicTacToe,
num_simulations: int,
exploration_strength: float,
):
self.tree = Tree(root=Node(uttt=uttt))
self.num_simulations = num_simulations
self.exploration_strength = exploration_strength
def run(self, progress_bar: bool = False) -> None:
num_run_simulations = self.num_simulations - self.tree.root.num_visits
for i in tqdm(range(num_run_simulations), disable=not progress_bar):
simulate(
node=self.tree.root,
exploration_strength=self.exploration_strength,
)
def get_evaluated_state(self) -> dict:
return self.tree.root.get_evaluated_state()
def get_evaluated_actions(self) -> List[dict]:
return self.tree.root.get_evaluated_actions()
def select_action(
self, evaluated_actions: List[dict], selection_method: str
) -> Action:
if selection_method == "argmax":
max_num_visits = max(
evaluated_action["num_visits"] for evaluated_action in evaluated_actions
)
top_evaluated_actions = [
evaluated_action
for evaluated_action in evaluated_actions
if evaluated_action["num_visits"] >= max_num_visits
]
selected_evaluated_action = random.choice(top_evaluated_actions)
elif selection_method == "sample":
num_visits_list = [
evaluated_action["num_visits"] for evaluated_action in evaluated_actions
]
total_num_visits = sum(num_visits_list)
weights = [num_visits / total_num_visits for num_visits in num_visits_list]
selected_evaluated_action = random.choices(evaluated_actions, weights=weights, k=1)[0]
elif selection_method == "random":
selected_evaluated_action = random.choice(evaluated_actions)
else:
raise ValueError(f"unknown selection_method={repr(selection_method)}")
return Action(
symbol=selected_evaluated_action["symbol"],
index=selected_evaluated_action["index"],
)
def synchronize(self, uttt: UltimateTicTacToe) -> None:
self.tree.synchronize(uttt=uttt)
def __str__(self):
output = (
'{cls}(\n'
' tree: {tree}\n'
' num_simulations: {num_simulations}\n'
' exploration_strength: {exploration_strength}\n)'
)
output = output.format(
cls=self.__class__.__name__,
tree=str(self.tree).replace('\n', '\n '),
num_simulations=self.num_simulations,
exploration_strength=self.exploration_strength,
)
return output
class Tree:
def __init__(self, root: Node):
self.root = root
@property
def size(self) -> int:
return bfs_count_nodes(node=self.root)
@property
def height(self) -> int:
return dfs_max_depth(node=self.root, depth=0)
def synchronize(self, uttt: UltimateTicTacToe) -> None:
for child_node in self.root.child_nodes:
if uttt.is_equal_to(child_node.uttt):
self.root = child_node
return
self.root = Node(uttt=uttt.clone())
def __str__(self):
output = (
'{cls}(\n'
' root: {root}\n'
' size: {size}\n'
' height: {height}\n)'
)
output = output.format(
cls=self.__class__.__name__,
root=str(self.root).replace('\n', '\n '),
size=self.size,
height=self.height,
)
return output
class Node:
def __init__(self, uttt: UltimateTicTacToe, action: Optional[Action] = None):
self.uttt = uttt
self.action = action
self.child_nodes = []
self.num_visits = 0
self.num_X_wins = 0
self.num_O_wins = 0
self.num_draws = 0
def is_leaf(self) -> bool:
return len(self.child_nodes) == 0
def is_terminal(self) -> bool:
return self.uttt.is_terminated()
def expand(self) -> None:
if not self.is_leaf():
return
if self.is_terminal():
return
legal_actions = self.uttt.get_legal_actions()
if len(legal_actions) == 0:
raise MonteCarloTreeSearchError("expanding node with no legal actions")
for legal_action in legal_actions:
uttt = self.uttt.clone()
uttt.execute(action=legal_action, verify=False)
child_node = Node(uttt=uttt, action=legal_action)
self.child_nodes.append(child_node)
def get_evaluated_state(self) -> dict:
if self.uttt.is_next_symbol_X():
num_wins = self.num_X_wins
num_losses = self.num_O_wins
else:
num_wins = self.num_O_wins
num_losses = self.num_X_wins
return {
"state": self.uttt.state.copy(),
"num_visits": self.num_visits,
"num_wins": num_wins,
"num_draws": self.num_draws,
"num_losses": num_losses,
}
def get_evaluated_actions(self) -> List[dict]:
if self.is_leaf():
raise MonteCarloTreeSearchError("node is a leaf")
if self.num_visits == 0:
raise MonteCarloTreeSearchError("node was not visited")
evaluated_actions = []
for child_node in self.child_nodes:
if child_node.action.is_symbol_X():
num_wins = child_node.num_X_wins
num_losses = child_node.num_O_wins
else:
num_wins = child_node.num_O_wins
num_losses = child_node.num_X_wins
evaluated_action = {
"symbol": child_node.action.symbol,
"index": child_node.action.index,
"num_visits": child_node.num_visits,
"num_wins": num_wins,
"num_draws": child_node.num_draws,
"num_losses": num_losses,
}
evaluated_actions.append(evaluated_action)
return evaluated_actions
def __str__(self):
output = (
'{cls}(\n'
' action: {action}\n'
' uttt: {uttt}\n'
' num_children: {num_children}\n'
' num_visits: {num_visits}\n'
' num_X_wins: {num_X_wins}\n'
' num_O_wins: {num_O_wins}\n'
' num_draws: {num_draws}\n)'
)
output = output.format(
cls=self.__class__.__name__,
action=self.action,
uttt=str(self.uttt).replace('\n', '\n '),
num_children=len(self.child_nodes),
num_visits=self.num_visits,
num_X_wins=self.num_X_wins,
num_O_wins=self.num_O_wins,
num_draws=self.num_draws,
)
return output
def simulate(node: Node, exploration_strength: float) -> None:
selected_path = select_leaf_node(node=node, exploration_strength=exploration_strength)
if len(selected_path) == 0:
raise MonteCarloTreeSearchError("selected path is empty")
leaf_node = selected_path[-1]
leaf_node.expand()
stats = playout(node=leaf_node)
backprop(selected_path=selected_path, stats=stats)
def select_leaf_node(node: Node, exploration_strength: float) -> List[Node]:
selected_path = []
while not node.is_leaf():
selected_path.append(node)
scores = [
UCT(
node=child_node,
parent_num_visits=node.num_visits,
exploration_strength=exploration_strength,
)
for child_node in node.child_nodes
]
top_score = max(scores)
top_score_indices = [i for i, score in enumerate(scores) if score >= top_score]
top_child_node_index = random.choice(top_score_indices)
node = node.child_nodes[top_child_node_index]
selected_path.append(node)
return selected_path
def UCT(node: Node, parent_num_visits: int, exploration_strength: float) -> float:
if node.num_visits == 0:
return float("inf")
exploitation_score = value_function(node)
exploration_score = exploration_strength * math.sqrt(
math.log(parent_num_visits) / node.num_visits
)
UCT_value = exploitation_score + exploration_score
return UCT_value
def value_function(node: Node) -> float:
if node.action.is_symbol_X():
num_wins = node.num_X_wins
num_losses = node.num_O_wins
elif node.action.is_symbol_O():
num_wins = node.num_O_wins
num_losses = node.num_X_wins
return (num_wins - num_losses) / node.num_visits
def playout(node: Node) -> Tuple[int, int, int]:
num_X_wins = 0
num_O_wins = 0
num_draws = 0
uttt = node.uttt.clone()
while not uttt.is_terminated():
actions = uttt.get_legal_actions()
action = random.choice(actions)
uttt.execute(action, verify=False)
if uttt.is_result_X():
num_X_wins += 1
elif uttt.is_result_O():
num_O_wins += 1
elif uttt.is_result_draw():
num_draws += 1
return num_X_wins, num_O_wins, num_draws
def backprop(selected_path: List[Node], stats: Tuple[int, int, int]) -> None:
num_X_wins, num_O_wins, num_draws = stats
for node in selected_path:
node.num_visits += 1
node.num_X_wins += num_X_wins
node.num_O_wins += num_O_wins
node.num_draws += num_draws
def bfs_count_nodes(node: Node) -> int:
cnt = 0
nodes = deque([node])
while len(nodes) > 0:
node = nodes.popleft()
cnt += 1
nodes.extend(node.child_nodes)
return cnt
def dfs_max_depth(node: Node, depth: int) -> int:
if len(node.child_nodes) == 0:
return depth
max_depth = depth
for child_node in node.child_nodes:
max_depth = max(max_depth, dfs_max_depth(node=child_node, depth=depth + 1))
return max_depth
def serialize_evaluated_state(evaluated_state: dict) -> str:
state = "".join(map(str, evaluated_state["state"]))
num_visits = str(evaluated_state["num_visits"])
num_wins = str(evaluated_state["num_wins"])
num_draws = str(evaluated_state["num_draws"])
num_losses = str(evaluated_state["num_losses"])
evaluated_state_str = f"evaluatedState{{{state} {num_visits} {num_wins} {num_draws} {num_losses}}}"
return evaluated_state_str
def serialize_evaluated_actions(evaluated_actions: List[dict]) -> str:
evaluated_actions_str = "evaluatedActions{"
for i, evaluated_action in enumerate(evaluated_actions):
if i > 0:
evaluated_actions_str += ","
symbol = str(evaluated_action["symbol"])
index = str(evaluated_action["index"])
num_visits = str(evaluated_action["num_visits"])
num_wins = str(evaluated_action["num_wins"])
num_draws = str(evaluated_action["num_draws"])
num_losses = str(evaluated_action["num_losses"])
evaluated_action_str = f"{symbol} {index} {num_visits} {num_wins} {num_draws} {num_losses}"
evaluated_actions_str += evaluated_action_str
evaluated_actions_str += "}"
return evaluated_actions_str
class MonteCarloTreeSearchError(Exception):
pass
|
1645518
|
from covid_model_seiir_pipeline.pipeline.forecasting.task.beta_residual_scaling import (
beta_residual_scaling,
)
from covid_model_seiir_pipeline.pipeline.forecasting.task.beta_forecast import (
beta_forecast,
)
|
1645532
|
from django.conf.urls import url
from .views.views import csv_daily_report, csv_report, election_day, election_day_center, \
reports, center_csv_report, phone_csv_report, \
election_day_center_n, election_day_office_n, election_day_preliminary, national, offices, \
offices_detail, redirect_to_national, regions, sms, subconstituencies, weekly, election_day_hq
from .views.phone_tool import matching_phones, phone_history, phone_message_tool, whitelist_phone
app_name = 'vr_dashboard'
urlpatterns = (
url(r'^$', redirect_to_national),
url(r'^election_day/$', election_day, name='election-day'),
url(r'^election_day/hq/$', election_day_hq, name='election-day-hq'),
url(r'^election_day/preliminary/$', election_day_preliminary, name='election-day-preliminary'),
url(r'^election_day/center/$', election_day_center, name='election-day-center'),
url(r'^election_day/center/(?P<center_id>[\d]+)/$', election_day_center_n,
name='election-day-center-n'),
url(r'^election_day/office/(?P<office_id>[\d]+)/$', election_day_office_n,
name='election-day-office-n'),
url(r'^national/$', national, name='national'),
url(r'^offices/$', offices, name='offices'),
url(r'^offices_detail/$', offices_detail, name='offices-detail'),
url(r'^regions/$', regions, name='regions'),
url(r'^sms/$', sms, name='sms'),
url(r'^reports/$', reports, name='reports'),
url(r'^subconstituencies/$', subconstituencies, name='subconstituencies'),
url(r'^weekly/$', weekly, name='weekly'),
url(r'^csv/$', csv_report, name='csv'),
url(r'^daily_csv/$', csv_daily_report, name='daily-csv'),
url(r'^daily_csv/(?P<from_date>\d{2}/\d{2}/\d{4})/(?P<to_date>\d{2}/\d{2}/\d{4})/$',
csv_daily_report, name='daily-csv-with-dates'),
url(r'^center_csv/$', center_csv_report, name='center-csv'),
url(r'^phone_csv/$', phone_csv_report, name='phone-csv'),
url(r'^phone_tool/$', phone_message_tool, name='phone-message-tool'),
url(r'^phone_tool/matching_phones/$', matching_phones, name='search-phones'),
url(r'^phone_tool/phone_history/$', phone_history, name='phone-history'),
url(r'^phone_tool/whitelist_phone/$', whitelist_phone, name='whitelist-phone')
)
|
1645571
|
import sys
import numpy as np
vert = np.loadtxt(sys.argv[1])
tri = np.loadtxt(sys.argv[2])
with open(sys.argv[3], 'w') as f:
f.write('OFF\n')
f.write('{} {} {}\n'.format(int(vert.shape[0]), int(tri.shape[0]), 0))
with open(sys.argv[3], 'ab') as f:
np.savetxt(f, vert, fmt='%.6f')
np.savetxt(f, np.hstack([np.ones((tri.shape[0],1))*3, tri]), fmt='%d')
|
1645572
|
from securityheaders.checkers import InfoCollector, FindingType, Finding, FindingSeverity
from securityheaders.models import ModelFactory
class InfoDirectiveCollector(InfoCollector):
def check(self, headers, opt_options=dict()):
headernames = ModelFactory().getheadernames()
findings = []
for header in headernames:
hdr = ModelFactory().getheader(header)
try:
obj = self.extractheader(headers, hdr)
if obj and obj.parsedstring:
findings.extend(self.mycheck(obj))
except:
pass
return findings
def mycheck(self, data):
findings = []
if not data:
return findings
for mydirective in data.keys():
if data.directive.isDirective(mydirective):
value = data[mydirective]
if value:
valstr = ''
for val in value:
valstr = valstr + ' ' + str(val)
else:
valstr = ""
findings.append(Finding(data.headerkey, FindingType.INFO_DIRECTIVE,valstr,FindingSeverity.NONE,mydirective))
return findings
|
1645599
|
from collections import namedtuple
Point2D = namedtuple('Point2D', ('x', 'y'))
pt1 = Point2D(10, 20)
Circle = namedtuple('Circle', ['center_x', 'center_y', 'radius'])
circle_1 = Circle(0, 0, 10)
Stock = namedtuple('Stock', '''symbol
year month day
open high low close''')
djia = Stock('DJIA', 2018, 1, 25, 26_313, 26_458, 26_260, 26_393)
print(pt1.x)
print(circle_1.radius)
for item in djia:
print(item)
x, y = pt1
print(x, y)
symbol, *_, close = djia
print(symbol, close)
print(_)
# >>> DJIA 26393
# >>> [2018, 1, 25, 26313, 26458, 26260]
|
1645609
|
import sys
from PySide import QtGui
import model_win_test
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
win = model_win_test.TestModelWin()
win.show()
app.exec_()
sys.exit()
|
1645620
|
from bitmovin.utils import Serializable
class StartManifest(Serializable):
def __init__(self, manifest_id):
super().__init__()
self.manifestId = manifest_id
def serialize(self):
serialized = super().serialize()
return serialized
@classmethod
def parse_from_json_object(cls, json_object):
manifest_id = json_object.get('manifestId')
start_manifest = StartManifest(manifest_id=manifest_id)
return start_manifest
|
1645640
|
import json
import pytest
from botocore.exceptions import ClientError
from app.callback.sqs_client import SQSClient
from botocore.stub import Stubber
@pytest.fixture(scope='function')
def sqs_client(notify_api, mocker):
with notify_api.app_context():
sqs_client = SQSClient()
statsd_client = mocker.Mock()
logger = mocker.Mock()
sqs_client.init_app(
aws_region='some-aws-region',
logger=logger,
statsd_client=statsd_client
)
return sqs_client
@pytest.fixture()
def sqs_stub(sqs_client):
with Stubber(sqs_client._client) as stubber:
yield stubber
stubber.assert_no_pending_responses()
@pytest.mark.parametrize(['message_attributes', 'expected_attributes'],
[({"CallbackType": {"DataType": "String", "StringValue": "foo"}},
{"CallbackType": {"DataType": "String", "StringValue": "foo"},
"ContentType": {"StringValue": "application/json", "DataType": "String"}}),
(None, {"ContentType": {"StringValue": "application/json", "DataType": "String"}})])
def test_send_message_successful_returns_response_body(sqs_stub, sqs_client, message_attributes, expected_attributes):
url = 'http://some_url'
body = {"message": "hello"}
message_attributes = message_attributes
message_id = "some-id"
sqs_stub.add_response(
'send_message',
expected_params={
'QueueUrl': url,
'MessageBody': json.dumps(body),
'MessageAttributes': expected_attributes
},
service_response={'MessageId': message_id}
)
response = sqs_client.send_message(url, body, message_attributes)
assert response['MessageId'] == message_id
def test_send_message_raises_client_error_on_client_exception(sqs_stub, sqs_client):
url = 'http://some_url'
body = {"message": "hello"}
message_attributes = {}
sqs_stub.add_client_error(
'send_message',
expected_params={
'QueueUrl': url,
'MessageBody': json.dumps(body),
'MessageAttributes': {"ContentType": {"StringValue": "application/json", "DataType": "String"}}
}
)
with pytest.raises(ClientError):
sqs_client.send_message(url, body, message_attributes)
|
1645642
|
import pandas as pd
from scipy.stats import ttest_ind
import numpy as np
from statsmodels.stats.multitest import multipletests as multit
from warnings import warn
def count_reps(inseries):
inseries = inseries.tolist()
counts = {k:0 for k in list(set(inseries))}
out = [878 for i in range(len(inseries))]
for ind, ite in enumerate(inseries):
out[ind] = counts[ite]
counts[ite] += 1
return out
from scipy.stats import mstats_basic
def interpret(ld, condition_column, strain_column, values_column, control_condition, out_prefix, circularity=None, set_missing_na=False):
'''
Interpret experimental data report produced by pyphe-analyse.
'''
###Check if essential columns exist
print('Checking input table')
print('Checking if axis_column exists')
if condition_column not in list(ld):
raise NameError('Axis_column not found in table.')
print('....OK')
print('Checking if grouping_column exists')
if strain_column not in list(ld):
raise NameError('grouping_column not found in table.')
print('....OK')
print('Checking if values_column exists')
if values_column not in list(ld):
raise NameError('values_column not found in table.')
print('....OK')
print('Checking if control exists in axis_column')
if control_condition not in ld[condition_column].unique():
raise NameError('control not found in axis_column.')
print('....OK')
if circularity:
print('Circularity filter is set. Checking if Colony_circularity column exists')
if 'Colony_circularity' not in list(ld):
raise NameError('Input data has no column named Colony_circularity. Cannot apply circularity filter.')
###Report some simple numbers
print('Data report loaded successfully')
initial_conditions = ld[condition_column].unique()
print('Number of unique elements in axis column: %i'%len(initial_conditions))
initial_strains = ld[strain_column].unique()
print('Number of unique elements in grouping column: %i'%len(initial_strains))
print('Number of plates: %i'%len(ld['Plate'].unique()))
print('Number of non-NA data points: %i'%len(ld.loc[~pd.isnull(ld[values_column])].index))
###Simple QC filters
n_datapoints = (~ld[values_column].isnull()).sum()
if circularity:
ld.loc[ld['Colony_circularity']<circularity, values_column] = np.nan
nn_datapoints = (~ld[values_column].isnull()).sum()
print('Removed %i entries with circularity < %f'%(n_datapoints-nn_datapoints, circularity))
n_datapoints = nn_datapoints
if set_missing_na:
ld.loc[ld[values_column]==0, values_column] = np.nan
nn_datapoints = (~ld[values_column].isnull()).sum()
print('Removed %i entries with fitness 0'%(n_datapoints-nn_datapoints))
n_datapoints = nn_datapoints
###Group by replicates
ld_stats = ld.copy()
#drop any NA
ld_stats = ld_stats.loc[~ld_stats[values_column].isnull()]
#Recompute number of axis and grouping elements
conditions = ld_stats[condition_column].unique()
print('Number of unique elements in axis column after filtering: %i'%len(conditions))
strains = ld_stats[strain_column].unique()
print('Number of unique elements in grouping column: %i'%len(strains))
ld_stats['condition---strain'] = ld_stats[condition_column] + '---' + ld_stats[strain_column]
ld_stats['rep'] = count_reps(ld_stats['condition---strain'])
#Pivot this into wide format
ld_stats_piv = ld_stats.pivot_table(index=strain_column, columns=[condition_column,'rep'], values=values_column)
#assert that there are no duplicates, i.e. that count_reps() worked as expected
assert (ld_stats.pivot_table(index=strain_column, columns=[condition_column,'rep'], values=values_column, aggfunc=len).unstack().dropna()==1.0).all()
#Save this table:
ld_stats_piv.to_csv(out_prefix+'_reps.csv')
###Compute summary stats
mean_fitness = ld_stats_piv.mean(axis=1, level=0)
median_fitness = ld_stats_piv.median(axis=1, level=0)
fitness_stdev = ld_stats_piv.std(axis=1, level=0)
obs_count = ld_stats_piv.count(axis=1, level=0)
#Compute effect sizes
median_effect_size = median_fitness.div(median_fitness[control_condition], axis=0)
mean_effect_size = mean_fitness.div(mean_fitness[control_condition], axis=0)
###run Welch's t-test
print('Running t-tests')
p_Welch = {}
b = ld_stats_piv.xs(control_condition,axis=1, level=0).values
b = np.ma.masked_invalid(b)
for co in conditions:
a = ld_stats_piv.xs(co, axis=1, level=0).values
a = np.ma.masked_invalid(a)
pvals_temp = mstats_basic.ttest_ind(a, b, axis=1, equal_var=False)[1].filled(np.nan)
p_Welch[co] = pd.Series(pvals_temp, index=ld_stats_piv.index)
p_Welch = pd.concat(p_Welch, axis=1)
#multiple testing correction by BH
p_Welch_BH = p_Welch.copy()
for c in p_Welch_BH:
if p_Welch_BH[c].isnull().all():
warn('No p-values obtained for %s (probably not enaough replicates)'%c)
else:
p_Welch_BH.loc[~p_Welch_BH[c].isnull(), c] = multit(p_Welch_BH.loc[~p_Welch_BH[c].isnull(), c], method='fdr_bh')[1]
#aggregate data in table and save
#And join together in one big data frame
combined_data = pd.concat({'mean_fitness' : mean_fitness,
'mean_fitness_log2' : mean_fitness.applymap(np.log2),
'median_fitness' : median_fitness,
'median_fitness_log2' : median_fitness.applymap(np.log2),
'mean_effect_size' : mean_effect_size,
'mean_effect_size_log2' : mean_effect_size.applymap(np.log2),
'median_effect_size' : median_effect_size,
'median_effect_size_log2' : median_effect_size.applymap(np.log2),
'observation_count' : obs_count,
'stdev_fitness' : fitness_stdev,
'p_Welch' : p_Welch,
'p_Welch_BH' : p_Welch_BH,
'p_Welch_BH_-log10' : -p_Welch_BH.applymap(np.log10)}, axis=1)
combined_data = combined_data.swaplevel(axis=1).sort_index(axis=1)
combined_data.to_csv(out_prefix+'_summaryStats.csv')
print('Interpretation completed and results saved.')
return combined_data
|
1645657
|
import io
import yaml
class Hyperparameters:
def __init__(self, path_to_config_file):
with io.open(path_to_config_file) as file:
config = yaml.load(file)
self.learning_rate = config['hyperparameters']['learning_rate']
self.input_size = config['hyperparameters']['input_size']
self.hidden_sizes = config['hyperparameters']['hidden_sizes']
self.output_size = config['hyperparameters']['output_size']
self.num_features = config['hyperparameters']['num_features']
self.activation_function = config['hyperparameters']['activation_function']
# TODO add force feature selection
# TODO gradient descent with momoentum
# TODO decorators for plots
self.num_layers = len(self.hidden_sizes) + 1
self.selection_methods = config['selection_methods']
self.num_epochs = config['training']['num_epochs']
self.batch_size = config['training']['batch_size'] # online learning when batch_size=1
self.cross_validation_folds = config['training']['cross_validation_folds'] # TODO when === num of observations then leave-one-out is applied.
self.lambda_reg = 0.8
self.norm_data = True
# TODO plot cost with and without reguralization
# dirs:
self.data_file = 'data/data.tsv'
|
1645672
|
from Components.Converter.Converter import Converter
from Components.Element import cached
class TPMChallenge(Converter):
L2C = 0
L3C = 1
VALUE = 2
RESULT = 3
TEXT = 4
def __init__(self, type):
Converter.__init__(self, type)
self.type = {"Level2Cert": self.L2C,
"Level3Cert": self.L3C,
"Value": self.VALUE,
"Result": self.RESULT,
"Text": self.TEXT
}[type]
@cached
def getText(self):
res = self.source.tpm_result
if self.type is self.L2C:
return str(res[0])
elif self.type is self.L3C:
return str(res[1])
elif self.type is self.VALUE:
return str(res[2])
elif self.type is self.RESULT:
return str(res[3])
elif self.type is self.TEXT:
return str(res[4])
else:
return "N/A"
text = property(getText)
|
1645693
|
import os
import shutil
from functools import partial
import neptune
import numpy as np
import pandas as pd
from attrdict import AttrDict
from steppy.adapter import Adapter, E
from steppy.base import IdentityOperation, Step
from common_blocks import augmentation as aug
from common_blocks import metrics
from common_blocks import models
from common_blocks import pipelines
from common_blocks import postprocessing
from common_blocks.utils import io, misc
CTX = neptune.Context()
LOGGER = misc.init_logger()
# ______ ______ .__ __. _______ __ _______ _______.
# / | / __ \ | \ | | | ____|| | / _____| / |
# | ,----'| | | | | \| | | |__ | | | | __ | (----`
# | | | | | | | . ` | | __| | | | | |_ | \ \
# | `----.| `--' | | |\ | | | | | | |__| | .----) |
# \______| \______/ |__| \__| |__| |__| \______| |_______/
#
EXPERIMENT_DIR = '/output/experiment'
CLONE_EXPERIMENT_DIR_FROM = '' # When running eval in the cloud specify this as for example /input/SHIP-14/output/experiment
OVERWRITE_EXPERIMENT_DIR = False
DEV_MODE = False
USE_TTA = True
INFERENCE_WITH_SHIP_NO_SHIP = True
if OVERWRITE_EXPERIMENT_DIR and os.path.isdir(EXPERIMENT_DIR):
shutil.rmtree(EXPERIMENT_DIR)
if CLONE_EXPERIMENT_DIR_FROM != '':
if os.path.exists(EXPERIMENT_DIR):
shutil.rmtree(EXPERIMENT_DIR)
shutil.copytree(CLONE_EXPERIMENT_DIR_FROM, EXPERIMENT_DIR)
if CTX.params.__class__.__name__ == 'OfflineContextParams':
PARAMS = misc.read_yaml().parameters
else:
PARAMS = CTX.params
MEAN = [0.485, 0.456, 0.406]
STD = [0.229, 0.224, 0.225]
CHUNK_SIZE_SHIP_NO_SHIP = 2500
CHUNK_SIZE_SEGMENTATION = 2500
SEED = 1234
ID_COLUMN = 'id'
ID_BIG_IMAGE = 'BigImageId'
IS_NOT_EMPTY_COLUMN = 'is_not_empty'
X_COLUMN = 'file_path_image'
Y_COLUMN = 'file_path_mask'
CONFIG = AttrDict({
'execution': {'experiment_dir': EXPERIMENT_DIR,
'num_workers': PARAMS.num_workers,
'num_threads': PARAMS.num_threads
},
'general': {'img_H-W': (PARAMS.image_h, PARAMS.image_w),
'loader_mode': PARAMS.loader_mode,
'num_classes': 2,
'original_size': (768, 768),
},
'meta_reader': {
'segmentation_network': {'x_columns': [X_COLUMN],
'y_columns': [Y_COLUMN, IS_NOT_EMPTY_COLUMN],
},
'ship_no_ship_network': {'x_columns': [X_COLUMN],
'y_columns': [IS_NOT_EMPTY_COLUMN],
},
},
'loaders': {'resize': {'dataset_params': {'h': PARAMS.image_h,
'w': PARAMS.image_w,
'sns_h': PARAMS.sns_image_h,
'sns_w': PARAMS.sns_image_w,
'image_source': PARAMS.image_source,
'target_format': PARAMS.target_format,
'empty_fraction': PARAMS.training_sampler_empty_fraction,
'sample_size': PARAMS.training_sampler_size,
'sns_empty_fraction': PARAMS.sns_training_sampler_empty_fracion,
'MEAN': MEAN,
'STD': STD
},
'loader_params': {'training': {'batch_size': PARAMS.batch_size_train,
'shuffle': False,
'num_workers': PARAMS.num_workers,
'pin_memory': PARAMS.pin_memory
},
'inference': {'batch_size': PARAMS.batch_size_inference,
'shuffle': False,
'num_workers': PARAMS.num_workers,
'pin_memory': PARAMS.pin_memory
},
},
'augmentation_params': {'image_augment_train': aug.intensity_seq,
'image_augment_with_target_train': aug.resize_seq(
resize_target_size=PARAMS.resize_target_size),
'image_augment_inference': aug.resize_to_fit_net(
resize_target_size=PARAMS.resize_target_size),
'image_augment_with_target_inference': aug.resize_to_fit_net(
resize_target_size=PARAMS.resize_target_size)
},
},
'resize_tta': {'dataset_params': {'h': PARAMS.image_h,
'w': PARAMS.image_w,
'image_source': PARAMS.image_source,
'target_format': PARAMS.target_format,
'empty_fraction': PARAMS.training_sampler_empty_fraction,
'sample_size': PARAMS.training_sampler_size,
'MEAN': MEAN,
'STD': STD
},
'loader_params': {'training': {'batch_size': PARAMS.batch_size_train,
'shuffle': False,
'num_workers': PARAMS.num_workers,
'pin_memory': PARAMS.pin_memory
},
'inference': {'batch_size': PARAMS.batch_size_inference,
'shuffle': False,
'num_workers': PARAMS.num_workers,
'pin_memory': PARAMS.pin_memory
},
},
'augmentation_params': {
'image_augment_inference': aug.resize_to_fit_net(
resize_target_size=PARAMS.resize_target_size),
'image_augment_with_target_inference': aug.resize_to_fit_net(
resize_target_size=PARAMS.resize_target_size),
'tta_transform': aug.test_time_augmentation_transform
},
},
},
'model': {
'segmentation_network': {
'architecture_config': {'model_params': {'in_channels': PARAMS.image_channels,
'out_channels': PARAMS.network_output_channels,
'architecture': PARAMS.architecture,
'encoder': PARAMS.encoder,
'activation': PARAMS.network_activation,
},
'optimizer_params': {'lr': PARAMS.lr,
},
'regularizer_params': {'regularize': True,
'weight_decay_conv2d': PARAMS.l2_reg_conv,
},
'weights_init': {'function': 'xavier',
},
},
'training_config': {'epochs': PARAMS.epochs_nr,
'shuffle': True,
'batch_size': PARAMS.batch_size_train,
'fine_tuning': PARAMS.fine_tuning,
},
'callbacks_config': {'model_checkpoint': {
'filepath': os.path.join(EXPERIMENT_DIR, 'checkpoints', 'segmentation_network', 'best.torch'),
'epoch_every': 1,
'metric_name': PARAMS.validation_metric_name,
'minimize': PARAMS.minimize_validation_metric},
"one_cycle_scheduler": {
"enabled": PARAMS.use_one_cycle,
"number_of_batches_per_full_cycle": PARAMS.one_cycle_number_of_batches_per_full_cycle,
"max_lr": PARAMS.one_cycle_max_lr,
"momentum_range": (0.95, 0.8),
"prcnt_annihilate": 10,
"div": 10
},
'exponential_lr_scheduler': {'gamma': PARAMS.gamma,
'epoch_every': 1},
'reduce_lr_on_plateau_scheduler': {'metric_name': PARAMS.validation_metric_name,
'minimize': PARAMS.minimize_validation_metric,
'reduce_factor': PARAMS.reduce_factor,
'reduce_patience': PARAMS.reduce_patience,
'min_lr': PARAMS.min_lr},
'training_monitor': {'batch_every': 1,
'epoch_every': 1},
'experiment_timing': {'batch_every': 10,
'epoch_every': 1},
'validation_monitor': {'epoch_every': 1,
'data_dir': PARAMS.train_images_dir,
'loader_mode': PARAMS.loader_mode},
'neptune_monitor': {'model_name': 'network',
'image_nr': 16,
'image_resize': 1.0,
'image_every': 1},
'early_stopping': {'patience': PARAMS.patience,
'metric_name': PARAMS.validation_metric_name,
'minimize': PARAMS.minimize_validation_metric},
}
},
'ship_no_ship_network': {
'architecture_config': {'model_params': {'architecture': PARAMS.sns_architecture,
'activation': 'sigmoid'},
'optimizer_params': {'lr': PARAMS.sns_lr,
},
'regularizer_params': {'regularize': True,
'weight_decay_conv2d': PARAMS.sns_l2_reg_conv,
},
'weights_init': {'function': 'xavier',
},
},
'training_config': {'epochs': PARAMS.sns_epochs_nr,
'shuffle': True,
'batch_size': PARAMS.sns_batch_size_train,
'fine_tuning': PARAMS.fine_tuning,
},
'callbacks_config': {'model_checkpoint': {
'filepath': os.path.join(EXPERIMENT_DIR, 'checkpoints', 'ship_no_ship_network', 'best.torch'),
'epoch_every': 1,
'metric_name': PARAMS.sns_validation_metric_name,
'minimize': PARAMS.sns_minimize_validation_metric
},
"one_cycle_scheduler": {
"enabled": PARAMS.sns_use_one_cycle,
"number_of_batches_per_full_cycle": PARAMS.sns_one_cycle_number_of_batches_per_full_cycle,
"max_lr": PARAMS.sns_one_cycle_max_lr,
"momentum_range": (0.95, 0.7),
"prcnt_annihilate": 10,
"div": 10
},
'exponential_lr_scheduler': {'gamma': PARAMS.gamma,
'epoch_every': 1},
'reduce_lr_on_plateau_scheduler': {'metric_name': PARAMS.sns_validation_metric_name,
'minimize': PARAMS.sns_minimize_validation_metric,
'reduce_factor': PARAMS.reduce_factor,
'reduce_patience': PARAMS.reduce_patience,
'min_lr': PARAMS.min_lr},
'training_monitor': {'batch_every': 10,
'epoch_every': 1},
'experiment_timing': {'batch_every': 10,
'epoch_every': 1},
'validation_monitor': {'epoch_every': 1,
'data_dir': PARAMS.train_images_dir,
'loader_mode': PARAMS.loader_mode},
'neptune_monitor': {'model_name': 'network',
'image_nr': 16,
'image_resize': 1.0,
'image_every': 1},
'early_stopping': {'patience': PARAMS.patience,
'metric_name': PARAMS.validation_metric_name,
'minimize': PARAMS.minimize_validation_metric},
}
},
},
'tta_generator': {'flip_ud': True,
'flip_lr': True,
'rotation': True,
'color_shift_runs': False},
'tta_aggregator': {'tta_inverse_transform': aug.test_time_augmentation_inverse_transform,
'method': PARAMS.tta_aggregation_method,
'nthreads': PARAMS.num_threads
},
'thresholder': {'threshold_masks': PARAMS.threshold_masks,
'threshold_ship_no_ship': PARAMS.sns_threshold,
},
})
# .______ __ .______ _______ __ __ .__ __. _______ _______.
# | _ \ | | | _ \ | ____|| | | | | \ | | | ____| / |
# | |_) | | | | |_) | | |__ | | | | | \| | | |__ | (----`
# | ___/ | | | ___/ | __| | | | | | . ` | | __| \ \
# | | | | | | | |____ | `----.| | | |\ | | |____.----) |
# | _| |__| | _| |_______||_______||__| |__| \__| |_______|_______/
#
def ship_no_ship_pipeline(config, suffix='_ship_no_ship', train_mode=True):
if train_mode:
preprocessing = pipelines.preprocessing_binary_train(config, model_name='ship_no_ship_network', suffix=suffix)
else:
preprocessing = pipelines.preprocessing_binary_inference(config, model_name='ship_no_ship_network',
suffix=suffix)
preprocessing.set_parameters_upstream({'is_fittable': False})
sns_network = misc.FineTuneStep(name='ship_no_ship_network',
transformer=models.BinaryModel(**config.model['ship_no_ship_network']),
input_steps=[preprocessing],
)
sns_network.set_mode_train()
sns_network.set_parameters_upstream({'experiment_directory': config.execution.experiment_dir,
})
sns_network.force_fitting = False
sns_network.fine_tuning = config.model.segmentation_network.training_config.fine_tuning
if train_mode:
return sns_network
else:
class_prediction = Step(name='class_prediction',
transformer=misc.make_apply_transformer(
partial(postprocessing.get_class,
threshold=config.thresholder.threshold_ship_no_ship),
output_name='classes',
apply_on=['predictions']),
input_steps=[sns_network],
adapter=Adapter({'predictions': E(sns_network.name, 'ship_no_ship_prediction'),
}),
is_fittable=False
)
return class_prediction
def train_segmentation_pipeline(config):
preprocessing = pipelines.preprocessing_train(config, model_name='segmentation_network')
segmentation_network = misc.FineTuneStep(name='segmentation_network',
transformer=models.SegmentationModel(
**config.model['segmentation_network']),
input_data=['callback_input'],
input_steps=[preprocessing],
adapter=Adapter({'datagen': E(preprocessing.name, 'datagen'),
'validation_datagen': E(preprocessing.name,
'validation_datagen'),
'meta_valid': E('callback_input', 'meta_valid'),
}))
segmentation_network.set_mode_train()
segmentation_network.set_parameters_upstream({'experiment_directory': config.execution.experiment_dir,
})
segmentation_network.force_fitting = False
segmentation_network.fine_tuning = config.model.segmentation_network.training_config.fine_tuning
return segmentation_network
def inference_segmentation_pipeline(config):
if config.general.loader_mode == 'resize_and_pad':
size_adjustment_function = partial(postprocessing.crop_image, target_size=config.general.original_size)
elif config.general.loader_mode == 'resize' or config.general.loader_mode == 'stacking':
size_adjustment_function = partial(postprocessing.resize_image, target_size=config.general.original_size)
else:
raise NotImplementedError
if USE_TTA:
preprocessing, tta_generator = pipelines.preprocessing_inference_tta(config, model_name='segmentation_network')
segmentation_network = Step(name='segmentation_network',
transformer=models.SegmentationModel(**config.model['segmentation_network']),
input_steps=[preprocessing])
tta_aggregator = pipelines.aggregator('tta_aggregator', segmentation_network,
tta_generator=tta_generator,
config=config.tta_aggregator)
prediction_renamed = Step(name='prediction_renamed',
transformer=IdentityOperation(),
input_steps=[tta_aggregator],
adapter=Adapter({'mask_prediction': E(tta_aggregator.name, 'aggregated_prediction')
}))
mask_resize = Step(name='mask_resize',
transformer=misc.make_apply_transformer(size_adjustment_function,
output_name='resized_images',
apply_on=['images'],
n_threads=config.execution.num_threads,
),
input_steps=[prediction_renamed],
adapter=Adapter({'images': E(prediction_renamed.name, 'mask_prediction'),
}))
else:
preprocessing = pipelines.preprocessing_inference(config, model_name='segmentation_network')
segmentation_network = misc.FineTuneStep(name='segmentation_network',
transformer=models.SegmentationModel(
**config.model['segmentation_network']),
input_steps=[preprocessing],
)
mask_resize = Step(name='mask_resize',
transformer=misc.make_apply_transformer(size_adjustment_function,
output_name='resized_images',
apply_on=['images'],
n_threads=config.execution.num_threads,
),
input_steps=[segmentation_network],
adapter=Adapter({'images': E(segmentation_network.name, 'mask_prediction'),
}),
)
binarizer = Step(name='binarizer',
transformer=misc.make_apply_transformer(
partial(postprocessing.binarize, threshold=config.thresholder.threshold_masks),
output_name='binarized_images',
apply_on=['images'],
n_threads=config.execution.num_threads
),
input_steps=[mask_resize],
adapter=Adapter({'images': E(mask_resize.name, 'resized_images'),
}))
labeler = Step(name='labeler',
transformer=misc.make_apply_transformer(postprocessing.label,
output_name='labeled_images',
apply_on=['images'],
n_threads=config.execution.num_threads,
),
input_steps=[binarizer],
adapter=Adapter({'images': E(binarizer.name, 'binarized_images'),
}))
mask_postprocessing = Step(name='mask_postprocessing',
transformer=misc.make_apply_transformer(postprocessing.mask_postprocessing,
output_name='labeled_images',
apply_on=['images'],
n_threads=config.execution.num_threads,
),
input_steps=[labeler],
adapter=Adapter({'images': E(labeler.name, 'labeled_images'),
}))
mask_postprocessing.set_mode_inference()
mask_postprocessing.set_parameters_upstream({'experiment_directory': config.execution.experiment_dir,
'is_fittable': False
})
segmentation_network.is_fittable = True
return mask_postprocessing
# __________ ___ _______ ______ __ __ .___________. __ ______ .__ __.
# | ____\ \ / / | ____| / || | | | | || | / __ \ | \ | |
# | |__ \ V / | |__ | ,----'| | | | `---| |----`| | | | | | | \| |
# | __| > < | __| | | | | | | | | | | | | | | | . ` |
# | |____ / . \ | |____ | `----.| `--' | | | | | | `--' | | |\ |
# |_______/__/ \__\ |_______| \______| \______/ |__| |__| \______/ |__| \__|
#
def train_ship_no_ship():
meta = pd.read_csv(PARAMS.metadata_filepath)
meta_train = meta[meta['is_train'] == 1]
meta_train = add_big_image_id(meta_train)
meta_train_split, meta_valid_split = misc.train_test_split_with_empty_fraction_with_groups(meta_train,
groups=meta_train[
ID_BIG_IMAGE],
empty_fraction=PARAMS.evaluation_empty_fraction,
test_size=PARAMS.evaluation_size,
shuffle=True,
random_state=SEED)
meta_train_split = meta_train_split.sample(frac=1, random_state=SEED)
meta_valid_split = meta_valid_split.sample(frac=1, random_state=SEED)
if DEV_MODE:
meta_train_split = meta_train_split.sample(PARAMS.dev_mode_size, random_state=SEED)
meta_valid_split = meta_valid_split.sample(int(PARAMS.dev_mode_size / 2), random_state=SEED)
data = {'input': {'meta': meta_train_split
},
'callback_input': {'meta_valid': meta_valid_split
}
}
sns_pipe = ship_no_ship_pipeline(config=CONFIG, train_mode=True)
sns_pipe.fit_transform(data)
def train():
meta = pd.read_csv(PARAMS.metadata_filepath)
meta_train = meta[meta['is_train'] == 1]
meta_train = add_big_image_id(meta_train)
meta_train_split, meta_valid_split = misc.train_test_split_with_empty_fraction_with_groups(meta_train,
groups=meta_train[
ID_BIG_IMAGE],
empty_fraction=PARAMS.evaluation_empty_fraction,
test_size=PARAMS.evaluation_size,
shuffle=True,
random_state=SEED)
meta_valid_split = meta_valid_split[meta_valid_split[IS_NOT_EMPTY_COLUMN] == 1].sample(
PARAMS.in_train_evaluation_size, random_state=SEED)
if DEV_MODE:
meta_train_split = meta_train_split.sample(PARAMS.dev_mode_size, random_state=SEED)
meta_valid_split = meta_valid_split.sample(int(PARAMS.dev_mode_size / 2), random_state=SEED)
data = {'input': {'meta': meta_train_split
},
'callback_input': {'meta_valid': meta_valid_split
}
}
pipeline = train_segmentation_pipeline(config=CONFIG)
pipeline.fit_transform(data)
def evaluate():
meta = pd.read_csv(PARAMS.metadata_filepath)
meta_train = meta[meta['is_train'] == 1]
meta_train = add_big_image_id(meta_train)
_, meta_valid_split = misc.train_test_split_with_empty_fraction_with_groups(meta_train,
groups=meta_train[ID_BIG_IMAGE],
empty_fraction=PARAMS.evaluation_empty_fraction,
test_size=PARAMS.evaluation_size,
shuffle=True, random_state=SEED)
if DEV_MODE:
_, meta_valid_split = misc.train_test_split_with_empty_fraction_with_groups(meta_valid_split,
groups=meta_valid_split[
ID_BIG_IMAGE],
empty_fraction=PARAMS.evaluation_empty_fraction,
test_size=PARAMS.evaluation_size,
shuffle=True, random_state=SEED)
segm_pipe = inference_segmentation_pipeline(config=CONFIG)
valid_ids = meta_valid_split[ID_COLUMN] + '.jpg'
if INFERENCE_WITH_SHIP_NO_SHIP:
sns_pipe = ship_no_ship_pipeline(config=CONFIG, train_mode=False)
ids_ship, ids_no_ship = predict_ship_no_ship(meta_valid_split, sns_pipe, CHUNK_SIZE_SHIP_NO_SHIP)
meta_valid_ship = meta_valid_split[valid_ids.isin(ids_ship)]
prediction_ship = generate_submission(meta_valid_ship, segm_pipe, CHUNK_SIZE_SEGMENTATION)
prediction = misc.combine_two_stage_predictions(ids_no_ship, prediction_ship, valid_ids)
else:
prediction = generate_submission(meta_valid_split, segm_pipe, CHUNK_SIZE_SEGMENTATION)
gt = io.read_gt_subset(PARAMS.annotation_file, valid_ids)
f2_per_image, image_ids = metrics.f_beta_metric(gt, prediction, beta=2, apply_mean=False)
f2 = np.mean(f2_per_image)
LOGGER.info('f2 {}'.format(f2))
CTX.channel_send('f2', 0, f2)
LOGGER.info('preparing results')
results = misc.prepare_results(gt, prediction, meta_valid_split, f2_per_image, image_ids)
results_filepath = os.path.join(EXPERIMENT_DIR, 'validation_results.csv')
results.to_csv(results_filepath, index=None)
def predict():
meta = pd.read_csv(PARAMS.metadata_filepath)
meta_test = meta[meta['is_train'] == 0]
if DEV_MODE:
meta_test = meta_test.sample(PARAMS.dev_mode_size, random_state=SEED)
segm_pipe = inference_segmentation_pipeline(config=CONFIG)
test_ids = meta_test[ID_COLUMN] + '.jpg'
if INFERENCE_WITH_SHIP_NO_SHIP:
sns_pipe = ship_no_ship_pipeline(config=CONFIG, train_mode=False)
ids_ship, ids_no_ship = predict_ship_no_ship(meta_test, sns_pipe, CHUNK_SIZE_SHIP_NO_SHIP)
meta_test_ship = meta_test[test_ids.isin(ids_ship)]
prediction_ship = generate_submission(meta_test_ship, segm_pipe, CHUNK_SIZE_SEGMENTATION)
submission = misc.combine_two_stage_predictions(ids_no_ship, prediction_ship, test_ids)
else:
submission = generate_submission(meta_test, segm_pipe, CHUNK_SIZE_SEGMENTATION)
submission_filepath = os.path.join(EXPERIMENT_DIR, 'submission.csv')
submission.to_csv(submission_filepath, index=None, encoding='utf-8')
LOGGER.info('submission saved to {}'.format(submission_filepath))
LOGGER.info('submission head \n\n{}'.format(submission.head()))
# __ __ .___________. __ __ _______.
# | | | | | || | | | / |
# | | | | `---| |----`| | | | | (----`
# | | | | | | | | | | \ \
# | `--' | | | | | | `----.----) |
# \______/ |__| |__| |_______|_______/
#
def add_big_image_id(meta):
big_image_ids = pd.read_csv('big-images-ids_v2.csv')
meta['ImageId'] = meta[ID_COLUMN] + '.jpg'
meta_joined = pd.merge(meta, big_image_ids, on='ImageId')
return meta_joined
def generate_submission(meta_data, pipeline, chunk_size):
if chunk_size is not None:
return _generate_submission_in_chunks(meta_data, pipeline, chunk_size)
else:
return _generate_submission(meta_data, pipeline)
def _generate_submission(meta_data, pipeline):
prediction = _generate_prediction(meta_data, pipeline)
submission = misc.create_submission(meta_data[ID_COLUMN] + '.jpg', prediction)
return submission
def _generate_submission_in_chunks(meta_data, pipeline, chunk_size):
submissions = []
for meta_chunk in misc.generate_data_frame_chunks(meta_data, chunk_size):
prediction_chunk = _generate_prediction(meta_chunk, pipeline)
submission_chunk = misc.create_submission(meta_chunk[ID_COLUMN] + '.jpg', prediction_chunk)
submissions.append(submission_chunk)
submission = pd.concat(submissions)
return submission
def _generate_prediction(meta_data, pipeline):
data = {'input': {'meta': meta_data,
},
'callback_input': {'meta_valid': None
}
}
output = pipeline.transform(data)
y_pred = output['labeled_images']
return y_pred
def predict_ship_no_ship(meta_data, pipeline, chunk_size):
if chunk_size is not None:
return _predict_ship_no_ship_in_chunks(meta_data, pipeline, chunk_size)
else:
return _predict_ship_no_ship(meta_data, pipeline)
def _predict_ship_no_ship(meta_data, pipeline):
prediction = _generate_prediction_ship_no_ship(meta_data, pipeline)
ids_ship, ids_no_ship = misc.get_ship_no_ship_ids(meta_data[ID_COLUMN] + '.jpg', prediction)
return ids_ship, ids_no_ship
def _predict_ship_no_ship_in_chunks(meta_data, pipeline, chunk_size):
ids_ship, ids_no_ship = [], []
for meta_chunk in misc.generate_data_frame_chunks(meta_data, chunk_size):
prediction_chunk = _generate_prediction_ship_no_ship(meta_chunk, pipeline)
ids_ship_chunk, ids_no_ship_chunk = misc.get_ship_no_ship_ids(meta_chunk[ID_COLUMN] + '.jpg', prediction_chunk)
ids_ship.extend(ids_ship_chunk)
ids_no_ship.extend(ids_no_ship_chunk)
return ids_ship, ids_no_ship
def _generate_prediction_ship_no_ship(meta_data, pipeline):
data = {'input': {'meta': meta_data,
},
'callback_input': {'meta_valid': None
}
}
output = pipeline.transform(data)
y_pred = output['classes']
return y_pred
# .___ ___. ___ __ .__ __.
# | \/ | / \ | | | \ | |
# | \ / | / ^ \ | | | \| |
# | |\/| | / /_\ \ | | | . ` |
# | | | | / _____ \ | | | |\ |
# |__| |__| /__/ \__\ |__| |__| \__|
#
if __name__ == '__main__':
train_ship_no_ship()
train()
evaluate()
predict()
|
1645705
|
import torch.optim as optim
from torch.utils import data
from e2efold.models import ContactNetwork, ContactNetwork_test, ContactNetwork_fc
from e2efold.models import ContactAttention, ContactAttention_simple_fix_PE
from e2efold.models import ContactAttention_simple
from e2efold.common.utils import *
from e2efold.common.config import process_config
from e2efold.postprocess import postprocess
args = get_args()
config_file = args.config
config = process_config(config_file)
print("#####Stage 1#####")
print('Here is the configuration of this run: ')
print(config)
os.environ["CUDA_VISIBLE_DEVICES"]= config.gpu
d = config.u_net_d
BATCH_SIZE = config.batch_size_stage_1
OUT_STEP = config.OUT_STEP
LOAD_MODEL = config.LOAD_MODEL
pp_steps = config.pp_steps
data_type = config.data_type
model_type = config.model_type
model_path = '../models_ckpt/supervised_{}_{}_d{}_l3.pt'.format(model_type, data_type,d)
epoches_first = config.epoches_first
evaluate_epi = config.evaluate_epi_stage_1
steps_done = 0
# if gpu is to be used
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
seed_torch()
# for loading data
# loading the rna ss data, the data has been preprocessed
# 5s data is just a demo data, which do not have pseudoknot, will generate another data having that
from e2efold.data_generator import RNASSDataGenerator, Dataset
import collections
RNA_SS_data = collections.namedtuple('RNA_SS_data',
'seq ss_label length name pairs')
train_data = RNASSDataGenerator('../data/{}/'.format(data_type), 'train')
val_data = RNASSDataGenerator('../data/{}/'.format(data_type), 'val')
test_data = RNASSDataGenerator('../data/{}/'.format(data_type), 'test_no_redundant')
# test_data = RNASSDataGenerator('../data/rnastralign_all/', 'test_no_redundant_600')
seq_len = train_data.data_y.shape[-2]
print('Max seq length ', seq_len)
# using the pytorch interface to parallel the data generation and model training
params = {'batch_size': BATCH_SIZE,
'shuffle': True,
'num_workers': 6,
'drop_last': True}
train_set = Dataset(train_data)
train_generator = data.DataLoader(train_set, **params)
val_set = Dataset(val_data)
val_generator = data.DataLoader(val_set, **params)
test_set = Dataset(test_data)
test_generator = data.DataLoader(test_set, **params)
# seq_len =500
# store the intermidiate activation
activation = {}
def get_activation(name):
def hook(model, input, output):
activation[name] = output.detach()
return hook
if model_type =='test_lc':
contact_net = ContactNetwork_test(d=d, L=seq_len).to(device)
if model_type == 'att6':
contact_net = ContactAttention(d=d, L=seq_len).to(device)
if model_type == 'att_simple':
contact_net = ContactAttention_simple(d=d, L=seq_len).to(device)
if model_type == 'att_simple_fix':
contact_net = ContactAttention_simple_fix_PE(d=d, L=seq_len,
device=device).to(device)
if model_type == 'fc':
contact_net = ContactNetwork_fc(d=d, L=seq_len).to(device)
if model_type == 'conv2d_fc':
contact_net = ContactNetwork(d=d, L=seq_len).to(device)
# contact_net.conv1d2.register_forward_hook(get_activation('conv1d2'))
if LOAD_MODEL and os.path.isfile(model_path):
print('Loading u net model...')
contact_net.load_state_dict(torch.load(model_path))
u_optimizer = optim.Adam(contact_net.parameters())
# for 5s
# pos_weight = torch.Tensor([100]).to(device)
# for length as 600
pos_weight = torch.Tensor([300]).to(device)
criterion_bce_weighted = torch.nn.BCEWithLogitsLoss(
pos_weight = pos_weight)
# randomly select one sample from the test set and perform the evaluation
def model_eval():
contact_net.eval()
contacts, seq_embeddings, matrix_reps, seq_lens = next(iter(val_generator))
contacts_batch = torch.Tensor(contacts.float()).to(device)
seq_embedding_batch = torch.Tensor(seq_embeddings.float()).to(device)
matrix_reps_batch = torch.unsqueeze(
torch.Tensor(matrix_reps.float()).to(device), -1)
# padding the states for supervised training with all 0s
state_pad = torch.zeros([matrix_reps_batch.shape[0],
seq_len, seq_len]).to(device)
PE_batch = get_pe(seq_lens, seq_len).float().to(device)
with torch.no_grad():
pred_contacts = contact_net(PE_batch,
seq_embedding_batch, state_pad)
u_no_train = postprocess(pred_contacts,
seq_embedding_batch, 0.01, 0.1, 50, 1.0, True)
map_no_train = (u_no_train > 0.5).float()
f1_no_train_tmp = list(map(lambda i: F1_low_tri(map_no_train.cpu()[i],
contacts_batch.cpu()[i]), range(contacts_batch.shape[0])))
print('Average val F1 score with pure post-processing: ', np.average(f1_no_train_tmp))
def model_eval_all_test():
contact_net.eval()
result_no_train = list()
result_no_train_shift = list()
batch_n = 0
for contacts, seq_embeddings, matrix_reps, seq_lens in test_generator:
if batch_n%10==0:
print('Batch number: ', batch_n)
batch_n += 1
contacts_batch = torch.Tensor(contacts.float()).to(device)
seq_embedding_batch = torch.Tensor(seq_embeddings.float()).to(device)
matrix_reps_batch = torch.unsqueeze(
torch.Tensor(matrix_reps.float()).to(device), -1)
state_pad = torch.zeros([matrix_reps_batch.shape[0],
seq_len, seq_len]).to(device)
PE_batch = get_pe(seq_lens, seq_len).float().to(device)
with torch.no_grad():
pred_contacts = contact_net(PE_batch,
seq_embedding_batch, state_pad)
# only post-processing without learning
u_no_train = postprocess(pred_contacts,
seq_embedding_batch, 0.01, 0.1, 50, 1.0, True)
map_no_train = (u_no_train > 0.5).float()
result_no_train_tmp = list(map(lambda i: evaluate_exact(map_no_train.cpu()[i],
contacts_batch.cpu()[i]), range(contacts_batch.shape[0])))
result_no_train += result_no_train_tmp
result_no_train_tmp_shift = list(map(lambda i: evaluate_shifted(map_no_train.cpu()[i],
contacts_batch.cpu()[i]), range(contacts_batch.shape[0])))
result_no_train_shift += result_no_train_tmp_shift
nt_exact_p,nt_exact_r,nt_exact_f1 = zip(*result_no_train)
nt_shift_p,nt_shift_r,nt_shift_f1 = zip(*result_no_train_shift)
print('Average testing F1 score with pure post-processing: ', np.average(nt_exact_f1))
print('Average testing F1 score with pure post-processing allow shift: ', np.average(nt_shift_f1))
print('Average testing precision with pure post-processing: ', np.average(nt_exact_p))
print('Average testing precision with pure post-processing allow shift: ', np.average(nt_shift_p))
print('Average testing recall with pure post-processing: ', np.average(nt_exact_r))
print('Average testing recall with pure post-processing allow shift: ', np.average(nt_shift_r))
# There are three steps of training
# step one: train the u net
for epoch in range(epoches_first):
contact_net.train()
# num_batches = int(np.ceil(train_data.len / BATCH_SIZE))
# for i in range(num_batches):
for contacts, seq_embeddings, matrix_reps, seq_lens in train_generator:
# contacts, seq_embeddings, matrix_reps, seq_lens = next(iter(train_generator))
contacts_batch = torch.Tensor(contacts.float()).to(device)
seq_embedding_batch = torch.Tensor(seq_embeddings.float()).to(device)
matrix_reps_batch = torch.unsqueeze(
torch.Tensor(matrix_reps.float()).to(device), -1)
# padding the states for supervised training with all 0s
state_pad = torch.zeros([matrix_reps_batch.shape[0],
seq_len, seq_len]).to(device)
PE_batch = get_pe(seq_lens, seq_len).float().to(device)
contact_masks = torch.Tensor(contact_map_masks(seq_lens, seq_len)).to(device)
pred_contacts = contact_net(PE_batch,
seq_embedding_batch, state_pad)
# Compute loss
loss_u = criterion_bce_weighted(pred_contacts*contact_masks, contacts_batch)
# print(steps_done)
if steps_done % OUT_STEP ==0:
print('Stage 1, epoch: {},step: {}, loss: {}'.format(
epoch, steps_done, loss_u))
# Optimize the model
u_optimizer.zero_grad()
loss_u.backward()
u_optimizer.step()
steps_done=steps_done+1
if epoch%evaluate_epi==0:
model_eval()
torch.save(contact_net.state_dict(), model_path)
model_eval_all_test()
# sys.exit()
|
1645707
|
import discord
from utils.globals import gc
from utils.settings import settings
import ui.text_manipulation as tm
# inherits from discord.py's Client
class Client(discord.Client):
# NOTE: These are strings!
__current_server = ""
__current_channel = ""
__prompt = ""
# discord.Status object
__status = ""
# discord.Game object
__game = ""
# Note: setting only allows for string types
def set_prompt(self, string):
self.__prompt = string.lower()
def set_current_server(self, string):
self.__current_server = string.lower()
def set_current_channel(self, string):
self.__current_channel = string.lower()
self.set_prompt(string)
def get_prompt(self): return self.__prompt
def get_current_server_name(self): return self.__current_server
def get_current_channel_name(self): return self.__current_channel
def get_current_server(self):
for server in self.servers:
if server.name.lower() == self.__current_server:
return server
def get_current_server_log(self):
for slog in gc.server_log_tree:
if slog.get_server() == self.get_current_server():
return slog
def get_current_channel(self):
for server in self.servers:
if server.name.lower() == self.__current_server.lower():
for channel in server.channels:
if channel.type is discord.ChannelType.text:
if channel.name.lower() == self.__current_channel.lower():
if channel.permissions_for(server.me).read_messages:
return channel
async def populate_current_channel_log(self):
slog = self.get_current_server_log()
for idx, clog in enumerate(slog.get_logs()):
if clog.get_channel().type is discord.ChannelType.text:
if clog.get_channel().name.lower() == self.__current_channel.lower():
if clog.get_channel().permissions_for(slog.get_server().me).read_messages:
async for msg in self.logs_from(clog.get_channel(), limit=settings["max_log_entries"]):
clog.insert(0, await tm.calc_mutations(msg))
def get_current_channel_log(self):
slog = self.get_current_server_log()
for idx, clog in enumerate(slog.get_logs()):
if clog.get_channel().type is discord.ChannelType.text:
if clog.get_channel().name.lower() == self.__current_channel.lower():
if clog.get_channel().permissions_for(slog.get_server().me).read_messages:
return clog
# returns online members in current server
async def get_online(self):
online_count = 0
if not self.get_current_server() == None:
for member in self.get_current_server().members:
if member is None: continue # happens if a member left the server
if member.status is not discord.Status.offline:
online_count +=1
return online_count
# because the built-in .say is really buggy, just overriding it with my own
async def say(self, string):
await self.send_message(self.get_current_channel(), string)
async def set_game(self, string):
self.__game = discord.Game(name=string,type=0)
self.__status = discord.Status.online
# Note: the 'afk' kwarg handles how the client receives messages, (rates, etc)
# This is meant to be a "nice" feature, but for us it causes more headache
# than its worth.
if self.__game is not None and self.__game != "":
if self.__status is not None and self.__status != "":
try: await self.change_presence(game=self.__game, status=self.__status, afk=False)
except: pass
else:
try: await self.change_presence(game=self.__game, status=discord.Status.online, afk=False)
except: pass
async def get_game(self):
return self.__game
async def set_status(self, string):
if string == "online":
self.__status = discord.Status.online
elif string == "offline":
self.__status = discord.Status.offline
elif string == "idle":
self.__status = discord.Status.idle
elif string == "dnd":
self.__status = discord.Status.dnd
if self.__game is not None and self.__game != "":
try: await self.change_presence(game=self.__game, status=self.__status, afk=False)
except: pass
else:
try: await self.change_presence(status=self.__status, afk=False)
except: pass
async def get_status(self):
return self.__status
|
1645752
|
from tqdm import tqdm
import tensorflow as tf
import config
import os
def augment(image):
image = tf.image.random_flip_left_right(image)
image = tf.image.random_flip_up_down(image)
if tf.random.uniform([], minval=0, maxval=1) < 0.5:
image = tf.image.rot90(image)
return image
def preprocess_image(image, label, train=True):
image = tf.image.resize(image, (config.IMAGE_SIZE, config.IMAGE_SIZE))
if train:
image = augment(image)
image = tf.cast(image, tf.uint8)
image = tf.image.encode_jpeg(image, optimize_size=True, chroma_downsampling=False)
return image, label
def prepare_dataset_tfr(dataset: tf.data.Dataset, train=True):
dataset = dataset.map(
lambda x, y: (preprocess_image(x, y, train)), num_parallel_calls=config.AUTO
)
dataset = dataset.batch(config.SHARD_SIZE)
return dataset
def bytestring_feature(list_of_bytestrings):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=list_of_bytestrings))
def int_feature(list_of_ints):
return tf.train.Feature(int64_list=tf.train.Int64List(value=list_of_ints))
def to_tfrecord(img_bytes, label):
feature = {
"image": bytestring_feature([img_bytes]),
"class": int_feature([label]),
}
return tf.train.Example(features=tf.train.Features(feature=feature))
def write_tfrecords(dataset: tf.data.Dataset, output_dir, print_every=5):
if not os.path.exists(output_dir):
os.mkdir(output_dir)
for shard, (image, label) in enumerate(tqdm(dataset)):
shard_size = image.numpy().shape[0]
filename = (
output_dir + "/catsdogs-" + "{:02d}-{}.tfrec".format(shard, shard_size)
)
with tf.io.TFRecordWriter(filename) as out_file:
for i in range(shard_size):
example = to_tfrecord(image.numpy()[i], label.numpy()[i])
out_file.write(example.SerializeToString())
if shard % print_every == 0:
print(
"Wrote file {} containing {} records".format(filename, shard_size)
)
|
1645823
|
from django.conf.urls import url
from document.views import TreeView
urlpatterns = [
url(r'^(?P<policy_id>[^/]+)(/(?P<identifier>[a-zA-Z0-9_-]+))?',
TreeView.as_view(), name='document'),
]
|
1645864
|
import operator
from bson import ObjectId
from django.conf.urls import url
from django.core.urlresolvers import reverse
from tastypie import http
from tastypie import fields
from tastypie.exceptions import ImmediateHttpResponse
from tastypie.utils import trailing_slash
from api.auth import DocumentsAuthorization
from api.resources import MongoDBResource
from comments.resources import CommentResource
from documents import get_collection
from documents.models import Document
from documents.signals import assignment_done
class DocumentResource(MongoDBResource):
id = fields.CharField(attribute="_id")
title = fields.CharField(attribute="title", null=True)
entities = fields.ListField(attribute="entities", null=True)
user_id = fields.IntegerField(attribute="user_id", readonly=True, null=True)
is_public = fields.BooleanField(attribute="is_public", null=True)
assignees = fields.ListField(attribute="assignees", null=True)
class Meta:
resource_name = "documents"
list_allowed_methods = ["get", "post"]
detail_allowed_methods = ["get", "put"]
authorization = DocumentsAuthorization()
object_class = Document
def get_collection(self):
return get_collection("documents")
def obj_get(self, request=None, **kwargs):
"""
Returns mongodb document from provided id.
"""
document = Document.objects.get(_id=ObjectId(kwargs.get("pk")))
if request is not None and not document.is_visible(user_id=request.user.id):
raise ImmediateHttpResponse(response=http.HttpUnauthorized())
return document
def obj_create(self, bundle, request=None, **kwargs):
"""
Populates the id of user to create document.
"""
return super(DocumentResource, self).obj_create(
bundle, user_id=request.user.pk)
def obj_update(self, bundle, request=None, **kwargs):
"""
- Checks the permissions of user, and updates the document
- Fires assignmend_done signals for assigned users
"""
document = self.obj_get(request=request, pk=kwargs.get("pk"))
if not document.is_editable(user_id=request.user.id):
raise ImmediateHttpResponse(response=http.HttpUnauthorized())
bundle = super(DocumentResource, self).obj_update(
bundle, request, **kwargs)
updated_document = self.obj_get(request=request, pk=kwargs.get("pk"))
if document.assignees != updated_document.assignees:
original = map(operator.itemgetter("id"), document.assignees)
updated = map(operator.itemgetter("id"), updated_document.assignees)
for user_id in set(updated).difference(original):
assignment_done.send(
sender=self,
user_id=user_id,
instance=updated_document)
return bundle
def dehydrate(self, bundle):
"""
Inserts the comments uri to the document bundle
"""
bundle.data["comments_uri"] = reverse("api_get_comments", kwargs={
"resource_name": "documents",
"pk": bundle.data.get("id")
})
return bundle
def override_urls(self):
"""
Adds the urls of nested resources
"""
return [
url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/comments%s$" %
(self._meta.resource_name, trailing_slash()),
self.wrap_view('dispatch_comments'), name="api_get_comments"),
]
def dispatch_comments(self, request, **kwargs):
document = Document(self.cached_obj_get(
request=request, **self.remove_api_resource_names(kwargs)))
child_resource = CommentResource()
return child_resource.dispatch_list(request, document_id=document.pk)
|
1645901
|
from controller.invoker.invoker_cmd_base import BaseMirControllerInvoker
from controller.utils import checker, revs, utils
from id_definition.error_codes import CTLResponseCode
from proto import backend_pb2
class SamplingInvoker(BaseMirControllerInvoker):
def pre_invoke(self) -> backend_pb2.GeneralResp:
return checker.check_request(request=self._request,
prerequisites=[
checker.Prerequisites.CHECK_USER_ID,
checker.Prerequisites.CHECK_REPO_ID,
checker.Prerequisites.CHECK_REPO_ROOT_EXIST,
checker.Prerequisites.CHECK_DST_DATASET_ID,
checker.Prerequisites.CHECK_TASK_ID,
],
mir_root=self._repo_root)
def invoke(self) -> backend_pb2.GeneralResp:
expected_type = backend_pb2.RequestType.CMD_SAMPLING
if self._request.req_type != expected_type:
return utils.make_general_response(CTLResponseCode.MIS_MATCHED_INVOKER_TYPE,
f"expected: {expected_type} vs actual: {self._request.req_type}")
command = [
utils.mir_executable(),
'sampling',
'--root',
self._repo_root,
'--dst-rev',
revs.join_tvt_branch_tid(branch_id=self._request.dst_dataset_id, tid=self._request.task_id),
'--src-revs',
revs.join_tvt_branch_tid(branch_id=self._request.in_dataset_ids[0], tid=self._request.his_task_id),
'-w',
self._work_dir,
]
if self._request.sampling_count:
command.extend(['--count', str(self._request.sampling_count)])
elif self._request.sampling_rate:
command.extend(['--rate', str(self._request.sampling_rate)])
return utils.run_command(command)
|
1645903
|
import joblib
import pytest
from pydefect.analyzer.grids import Grids
from pymatgen.core import Lattice, Structure
import numpy as np
from pymatgen.io.vasp import Chgcar
from vise.tests.helpers.assertion import assert_dataclass_almost_equal
@pytest.fixture
def grids():
return Grids(lattice=Lattice.cubic(10),
dim=(1, 1, 5),
distance_data=np.array([[[0.0, 2.0, 4.0, 4.0, 2.0]]]))
@pytest.fixture
def chgcar():
struc = Structure(lattice=Lattice.cubic(10), species=["H"], coords=[[0]*3])
data = {"total": np.array([[[0.0, 2.0, 4.0, 6.0, 8.0]]]),
"diff": np.array([[[0.0, 1.0, 2.0, 3.0, 4.0]]])}
return Chgcar(struc, data=data)
def test_grids_joblib_roundtrip(tmpdir, grids):
tmpdir.chdir()
print(tmpdir)
with open("tmp.joblib", mode="wb") as f:
joblib.dump(grids, f, compress=3)
with open("tmp.joblib", mode="rb") as f:
actual = joblib.load(f)
assert_dataclass_almost_equal(actual, grids)
def test_grids_np_save_load_roundtrip(tmpdir, grids):
tmpdir.chdir()
print(tmpdir)
grids.dump()
actual = grids.from_file()
assert_dataclass_almost_equal(actual, grids)
def test_grids_from_chgcar(grids, chgcar):
actual = Grids.from_chgcar(chgcar)
assert_dataclass_almost_equal(actual, grids)
def test_shift_distance_data(grids):
actual = grids.shifted_distance_data(center=[0, 0, 1])
expected = np.array([[[2.0, 0.0, 2.0, 4.0, 4.0]]])
np.testing.assert_array_almost_equal(actual, expected)
def test_shift_distance_data2():
grids = Grids(lattice=Lattice.cubic(10),
dim=(2, 2, 2),
distance_data=np.array([[[0.0, 5.0], [5.0, 7.07]],
[[5.0, 7.07], [7.07, 8.66]]]))
actual = grids.shifted_distance_data(center=[1, 1, 1])
expected = np.array([[[8.66, 7.07], [7.07, 5.0]],
[[7.07, 5.0], [5.0, 0.0]]])
np.testing.assert_array_almost_equal(actual, expected)
def test_spherical_dist(grids):
# distance_data=np.array([[[0.0, 2.0, 4.0, 4.0, 2.0]]]))
actual = grids.spherical_dist(data=np.array([[[0.0, 0.0, 0.0, 0.0, 1.0]]]),
center=[0, 0, 1],
distance_bins=np.array([0.0, 2.5, 5.0]))
# Divide by 2 since there are 2 points at 4.0 distance. volume=1000.
expected = [0.0, 1.0 / 2 / 1000]
assert actual == expected
"""
TODO
- Check how to revolve numpy array.
- Add distances_data
- Add _calc_histogram(chgcar, distances_data, center) method
DONE
- Add defect_center_idxs
- Add defect_center_coords property
"""
|
1645919
|
from django.conf import settings # import the settings file
import plistlib
import os
from server.utils import get_server_version
SAL_VERSION = get_server_version()
def display_name(request):
return {'DISPLAY_NAME': settings.DISPLAY_NAME}
def config_installed(request):
return {'CONFIG_INSTALLED': True if 'config' in settings.INSTALLED_APPS else False}
def sal_version(request):
return {'SAL_VERSION': SAL_VERSION}
|
1645923
|
from os import path
import unittest
from prudentia.utils import io
class TestIO(unittest.TestCase):
def test_xstr(self):
self.assertEqual(io.xstr(None), '')
def test_yes(self):
self.assertTrue(io.input_yes_no('test topic', prompt_fn=lambda m: 'y'))
self.assertTrue(io.input_yes_no('test topic', prompt_fn=lambda m: 'yes'))
def test_no(self):
self.assertFalse(io.input_yes_no('test topic', prompt_fn=lambda m: 'whatever'))
self.assertFalse(io.input_yes_no('test topic', prompt_fn=lambda m: 'no'))
def test_yes_no_default(self):
self.assertFalse(io.input_yes_no('test topic', prompt_fn=lambda m: ''))
def test_mandatory_input(self):
self.assertRaises(ValueError, io.input_value, 'mandatory topic', prompt_fn=lambda m: '')
def test_int_input(self):
self.assertEqual(io.input_value('int topic', default_value=1, prompt_fn=lambda m: '123'), 123)
self.assertRaises(ValueError, io.input_value, 'int topic', default_value=1, prompt_fn=lambda m: 'aaa')
def test_value_hidden(self):
pwd = '<PASSWORD>'
self.assertEqual(io.input_value('pwd', hidden=True, hidden_prompt_fn=lambda m: pwd), pwd)
def test_path_file(self):
f = "./uname.yml"
self.assertNotEqual(io.input_path('cwd file', prompt_fn=lambda m: f), None)
def test_prudentia_dir(self):
expected_path = path.join(path.dirname(path.realpath('.')), 'prudentia')
self.assertEqual(io.prudentia_python_dir(), expected_path)
def test_invalid_path_file(self):
self.assertRaises(ValueError, io.input_path, 'cwd file', prompt_fn=lambda m: 'foo')
self.assertRaises(ValueError, io.input_path, 'cwd file', prompt_fn=lambda m: '.')
self.assertRaises(ValueError, io.input_path, 'cwd file', is_file=False, prompt_fn=lambda m: './uname.yml')
def test_sanity_choices(self):
self.assertRaises(ValueError, io.input_choice, 'choice topic', choices=None)
self.assertRaises(ValueError, io.input_choice, 'choice topic', choices=[])
self.assertRaises(ValueError, io.input_choice, 'choice topic', default='d', choices=['a', 'b', 'c'])
def test_choice(self):
c = ['well', 'Iam', 'gonna', 'be', 'chosen']
self.assertEqual(io.input_choice('choice topic', choices=c, prompt_fn=lambda m: 'be'), 'be')
self.assertEqual(io.input_choice('choice topic', default='Iam', choices=c, prompt_fn=lambda m: ''), 'Iam')
def test_invalid_retry_choice(self):
self.assertRaises(ValueError, io.input_choice, 'choice topic', choices=['choice'], prompt_fn=lambda m: 'bla')
|
1645933
|
import torch
import random
import numpy as np
import torch.nn.functional as F
from .min_norm_solvers import MinNormSolver, gradient_normalizers
from torch.autograd import Variable
class backprop_scheduler(object):
def __init__(self, model, mode=None):
self.model = model
self.mode = mode
self.num_worker = len(self.model.regression_workers) + len(self.model.classification_workers)
self.Q = torch.zeros(self.num_worker).detach()
self.last_loss = torch.zeros(self.num_worker).detach()
self.pi = torch.ones(self.num_worker).detach()
def __call__(self, preds, label, cls_optim, regr_optim, frontend_optim, device, h=None, dropout_rate=None, delta=None, temperture=None, alpha=None, batch=None):
if self.mode == "base":
return self._base_scheduler(preds, label, cls_optim, regr_optim, frontend_optim, device)
elif self.mode == "adversarial":
return self._adversarial(preds, label, cls_optim, regr_optim, frontend_optim, device)
elif self.mode == "select_one":
return self._select_one(preds, label, cls_optim, regr_optim, frontend_optim, device)
elif self.mode == "select_half":
return self._select_half(preds, label, cls_optim, regr_optim, frontend_optim, device)
elif self.mode == "dropout":
return self._drop_out(preds, label, cls_optim, regr_optim, frontend_optim, device=device, dropout_rate=dropout_rate)
elif self.mode == "hyper_volume":
return self._hyper_volume(preds, label, cls_optim, regr_optim, frontend_optim, device=device, delta=delta)
elif self.mode == "softmax":
return self._softmax(preds, label, cls_optim, regr_optim, frontend_optim, temperture=temperture, device=device)
elif self.mode == "adaptive":
return self._online_adaptive(preds, label, cls_optim, regr_optim, frontend_optim, temperture=temperture, alpha=alpha, device=device)
elif self.mode == "MGD":
return self._MGDA(preds, label, cls_optim, regr_optim, frontend_optim, batch=batch, device=device)
else:
raise NotImplementedError
def _base_scheduler(self, preds, label, cls_optim, regr_optim, frontend_optim, device):
frontend_optim.zero_grad()
tot_loss = 0
losses = {}
for worker in self.model.classification_workers:
cls_optim[worker.name].zero_grad()
loss = worker.loss_weight * worker.loss(preds[worker.name], label[worker.name])
losses[worker.name] = loss
tot_loss += loss
for worker in self.model.regression_workers:
regr_optim[worker.name].zero_grad()
loss = worker.loss_weight * worker.loss(preds[worker.name], label[worker.name])
losses[worker.name] = loss
tot_loss += loss
for worker in self.model.regularizer_workers:
loss = worker.loss_weight * worker.loss(preds[worker.name], label[worker.name])
losses[worker.name] = loss
tot_loss += loss
tot_loss.backward()
for _, optim in cls_optim.items():
optim.step()
for _, optim in regr_optim.items():
optim.step()
frontend_optim.step()
losses["total"] = tot_loss
return losses, 1
def _select_one(self, preds, label, cls_optim, regr_optim, frontend_optim, device):
self.count += 1
loss_lst = []
num_worker = len(self.model.regression_workers) + len(self.model.classification_workers)
frontend_optim.zero_grad()
losses = {}
selected = self.count % num_worker
# select one
if selected > 3:
worker = self.model.classification_workers[selected - 4]
loss = worker.loss(preds[worker.name], label[worker.name])
else:
worker = self.model.classification_workers[selected]
loss = worker.loss(preds[worker.name], label[worker.name])
tot_loss = loss
tot_loss.backward()
for _, optim in cls_optim.items():
optim.step()
for _, optim in regr_optim.items():
optim.step()
frontend_optim.step()
losses["total"] = tot_loss
return losses, 1
def _select_half(self, preds, label, cls_optim, regr_optim, frontend_optim, device):
num_worker = len(self.model.regression_workers) + len(self.model.classification_workers)
loss_tmp = torch.zeros(num_worker).to(device)
idx = 0
frontend_optim.zero_grad()
losses = {}
for worker in self.model.classification_workers:
cls_optim[worker.name].zero_grad()
loss = worker.loss(preds[worker.name], label[worker.name])
losses[worker.name] = loss
loss_tmp[idx] = loss
idx += 1
for worker in self.model.regression_workers:
regr_optim[worker.name].zero_grad()
loss = worker.loss(preds[worker.name], label[worker.name])
losses[worker.name] = loss
loss_tmp[idx] = loss
idx += 1
# generate mask
mask = np.random.randint(2, size=num_worker)
while np.sum(mask) > 4 or np.sum(mask) < 3:
mask = np.random.randint(2, size=num_worker)
mask = torch.from_numpy(mask).type(torch.FloatTensor).to(device)
#sum up losses
tot_loss = torch.sum(mask * loss_tmp, dim=0)
tot_loss.backward()
for _, optim in cls_optim.items():
optim.step()
for _, optim in regr_optim.items():
optim.step()
frontend_optim.step()
losses["total"] = tot_loss
return losses, 1
def _drop_out(self, preds, label, cls_optim, regr_optim, frontend_optim, dropout_rate, device):
loss_tmp = torch.zeros(7, requires_grad=True).to(device)
idx = 0
assert dropout_rate is not None
re_mask = np.random.binomial(1, dropout_rate, size=len(self.model.regression_workers))
cls_mask = np.random.binomial(1, dropout_rate, size=len(self.model.classification_workers))
frontend_optim.zero_grad()
losses = {}
for i, worker in enumerate(self.model.classification_workers):
cls_optim[worker.name].zero_grad()
if cls_mask[i] == 1:
loss = worker.loss(preds[worker.name], label[worker.name])
else:
loss = 0
losses[worker.name] = loss
loss_tmp[idx] = loss
idx += 1
for worker in self.model.regression_workers:
regr_optim[worker.name].zero_grad()
if re_mask[i] == 1:
loss = worker.loss(preds[worker.name], label[worker.name])
else:
loss = 0
losses[worker.name] = loss
loss_tmp[idx] = loss
idx += 1
#sum up losses
tot_loss = torch.sum(loss_tmp, dim=0)
tot_loss.backward()
for _, optim in cls_optim.items():
optim.step()
for _, optim in regr_optim.items():
optim.step()
frontend_optim.step()
losses["total"] = tot_loss
return losses, 1
def _hyper_volume(self, preds, label, cls_optim, regr_optim, frontend_optim, delta ,device):
assert delta > 1
num_worker = len(self.model.regression_workers) + len(self.model.classification_workers)
loss_tmp = torch.zeros(num_worker).to(device)
idx = 0
frontend_optim.zero_grad()
losses = {}
for worker in self.model.classification_workers:
cls_optim[worker.name].zero_grad()
loss = worker.loss(preds[worker.name], label[worker.name])
losses[worker.name] = loss
loss_tmp[idx] = loss
idx += 1
for worker in self.model.regression_workers:
regr_optim[worker.name].zero_grad()
loss = worker.loss(preds[worker.name], label[worker.name])
losses[worker.name] = loss
loss_tmp[idx] = loss
idx += 1
#sum up losses
eta = delta * torch.max(loss_tmp.detach()).item()
hyper_votolume = torch.sum(loss_tmp)
alpha = 1 / (eta - loss_tmp + 1e-6)
hyper_votolume.backward()
for _, optim in cls_optim.items():
optim.step()
for _, optim in regr_optim.items():
optim.step()
frontend_optim.step()
losses["total"] = hyper_votolume
return losses, alpha
def _softmax(self, preds, label, cls_optim, regr_optim, frontend_optim, temperture, device):
assert temperture > 0
num_worker = len(self.model.regression_workers) + len(self.model.classification_workers)
loss_tmp = []
idx = 0
frontend_optim.zero_grad()
losses = {}
for worker in self.model.classification_workers:
cls_optim[worker.name].zero_grad()
loss = worker.loss(preds[worker.name], label[worker.name])
losses[worker.name] = loss
loss_tmp.append(loss.item() * temperture)
# idx += 1
for worker in self.model.regression_workers:
regr_optim[worker.name].zero_grad()
loss = worker.loss(preds[worker.name], label[worker.name])
losses[worker.name] = loss
loss_tmp.append(loss.item() * temperture)
# idx += 1
alpha = self._stable_softmax(loss_tmp)
tot_loss = 0
for worker in self.model.classification_workers:
# tot_loss += alpha[idx] * losses[worker.name]
tot_loss += losses[worker.name]
idx += 1
for worker in self.model.regression_workers:
# tot_loss += alpha[idx] * losses[worker.name]
tot_loss += losses[worker.name]
idx += 1
# tot_loss = torch.sum(alpha.detach() * loss_vec)
tot_loss.backward()
for _, optim in cls_optim.items():
optim.step()
for _, optim in regr_optim.items():
optim.step()
frontend_optim.step()
losses["total"] = tot_loss
return losses, alpha
def _online_adaptive(self, preds, label, cls_optim, regr_optim, frontend_optim, temperture, alpha, device):
assert temperture > 0 and alpha > 0
# device = preds['chunk'].device
num_worker = len(self.model.regression_workers) + len(self.model.classification_workers)
loss_tmp = torch.zeros(num_worker).to(device)
idx = 0
frontend_optim.zero_grad()
losses = {}
for worker in self.model.classification_workers:
cls_optim[worker.name].zero_grad()
loss = worker.loss(preds[worker.name], label[worker.name])
losses[worker.name] = loss
loss_tmp[idx] = loss
idx += 1
for worker in self.model.regression_workers:
regr_optim[worker.name].zero_grad()
loss = worker.loss(preds[worker.name], label[worker.name])
losses[worker.name] = loss
loss_tmp[idx] = loss
idx += 1
R_t = self.last_loss.to(device) - loss_tmp
with torch.no_grad():
Q_t = alpha * R_t.detach() + (1 - alpha) * self.Q.to(device)
self.pi = F.softmax(temperture * Q_t, dim=0)
tot_loss = torch.sum(loss_tmp)
tot_loss.backward()
self.last_loss = loss_tmp.detach()
self.Q = Q_t.detach()
for _, optim in cls_optim.items():
optim.step()
for _, optim in regr_optim.items():
optim.step()
frontend_optim.step()
losses["total"] = tot_loss
return losses, self.pi
def _MGDA(self, preds, label, cls_optim, regr_optim, frontend_optim, batch, device):
frontend_optim.zero_grad()
losses = {}
grads = {}
for worker in self.model.classification_workers:
self.model.zero_grad()
h, chunk, preds, labels = self.model.forward(batch, 1, device)
# print(worker.name)
loss = worker.loss(preds[worker.name], label[worker.name])
losses[worker.name] = loss
grads[worker.name] = self._get_gen_grads(loss)
for worker in self.model.regression_workers:
self.model.zero_grad()
h, chunk, preds, labels = self.model.forward(batch, 1, device)
# print(worker.name)
loss = worker.loss(preds[worker.name], label[worker.name])
losses[worker.name] = loss
grads[worker.name] = self._get_gen_grads(loss)
sol, min_norm = MinNormSolver.find_min_norm_element([grads[worker].unsqueeze(0) for worker, _ in grads.items()])
alpha = sol
tot_loss = 0
# idx = 0
self.model.zero_grad()
h, chunk, preds, labels = self.model.forward(batch, 1, device)
for worker in self.model.classification_workers:
loss = worker.loss(preds[worker.name], label[worker.name])
losses[worker.name] = loss
tot_loss += loss
# tot_loss += sol[idx] * loss
for worker in self.model.regression_workers:
loss = worker.loss(preds[worker.name], label[worker.name])
losses[worker.name] = loss
tot_loss += loss
# tot_loss += sol[idx] * loss
tot_loss.backward()
for _, optim in cls_optim.items():
optim.step()
for _, optim in regr_optim.items():
optim.step()
frontend_optim.step()
losses["total"] = tot_loss
return losses, alpha
def _get_gen_grads(self, loss_):
# grads = torch.autograd.grad(outputs=loss_, inputs=self.model.frontend.parameters())
self.model.frontend.zero_grad()
loss_.backward()
# grads = self.model.frontend.grad()
for params in self.model.frontend.parameters():
try:
grads_ = torch.cat([grads_, params.grad.view(-1)], 0)
except:
grads_ = params.grad.view(-1)
return grads_ / grads_.norm()
def _stable_softmax(self, x):
z = np.asarray(x, np.float) - np.max(x)
numerator = np.exp(z)
denominator = np.sum(numerator)
softmax = numerator / denominator
return softmax
|
1646023
|
from flask import render_template
from . import api_search
from .forms import ApiSearchForm
from ..tools.api_tools import ApiGetter
@api_search.route('/api_search', methods=['GET', 'POST'])
def index():
form = ApiSearchForm()
qa = ApiGetter()
if form.validate_on_submit():
api_response = qa.get_twitch(form.search_bar.data)
streams = api_response['streams']
return render_template('api_search/api_search.html',
form=form,
search_results=streams)
return render_template('api_search/api_search.html', form=form)
|
1646033
|
import os
import json
import re
import numpy as np
from shutil import copyfile
from keras.optimizers import SGD
import keras.backend as K
from AlphaGo.ai import ProbabilisticPolicyPlayer
import AlphaGo.go as go
from AlphaGo.models.policy import CNNPolicy
from AlphaGo.util import flatten_idx
def _make_training_pair(st, mv, preprocessor):
# Convert move to one-hot
st_tensor = preprocessor.state_to_tensor(st)
mv_tensor = np.zeros((1, st.size * st.size))
mv_tensor[(0, flatten_idx(mv, st.size))] = 1
return (st_tensor, mv_tensor)
def run_n_games(optimizer, learner, opponent, num_games, mock_states=[]):
'''Run num_games games to completion, keeping track of each position and move of the learner.
(Note: learning cannot happen until all games have completed)
'''
board_size = learner.policy.model.input_shape[-1]
states = [go.GameState(size=board_size) for _ in range(num_games)]
learner_net = learner.policy.model
# Allowing injection of a mock state object for testing purposes
if mock_states:
states = mock_states
# Create one list of features (aka state tensors) and one of moves for each game being played.
state_tensors = [[] for _ in range(num_games)]
move_tensors = [[] for _ in range(num_games)]
# List of booleans indicating whether the 'learner' player won.
learner_won = [None] * num_games
# Start all odd games with moves by 'opponent'. Even games will have 'learner' black.
learner_color = [go.BLACK if i % 2 == 0 else go.WHITE for i in range(num_games)]
odd_states = states[1::2]
moves = opponent.get_moves(odd_states)
for st, mv in zip(odd_states, moves):
st.do_move(mv)
current = learner
other = opponent
idxs_to_unfinished_states = {i: states[i] for i in range(num_games)}
while len(idxs_to_unfinished_states) > 0:
# Get next moves by current player for all unfinished states.
moves = current.get_moves(idxs_to_unfinished_states.values())
just_finished = []
# Do each move to each state in order.
for (idx, state), mv in zip(idxs_to_unfinished_states.iteritems(), moves):
# Order is important here. We must get the training pair on the unmodified state before
# updating it with do_move.
is_learnable = current is learner and mv is not go.PASS_MOVE
if is_learnable:
(st_tensor, mv_tensor) = _make_training_pair(state, mv, learner.policy.preprocessor)
state_tensors[idx].append(st_tensor)
move_tensors[idx].append(mv_tensor)
state.do_move(mv)
if state.is_end_of_game:
learner_won[idx] = state.get_winner() == learner_color[idx]
just_finished.append(idx)
# Remove games that have finished from dict.
for idx in just_finished:
del idxs_to_unfinished_states[idx]
# Swap 'current' and 'other' for next turn.
current, other = other, current
# Train on each game's results, setting the learning rate negative to 'unlearn' positions from
# games where the learner lost.
for (st_tensor, mv_tensor, won) in zip(state_tensors, move_tensors, learner_won):
optimizer.lr = K.abs(optimizer.lr) * (+1 if won else -1)
learner_net.train_on_batch(np.concatenate(st_tensor, axis=0),
np.concatenate(mv_tensor, axis=0))
# Return the win ratio.
wins = sum(state.get_winner() == pc for (state, pc) in zip(states, learner_color))
return float(wins) / num_games
def log_loss(y_true, y_pred):
'''Keras 'loss' function for the REINFORCE algorithm, where y_true is the action that was
taken, and updates with the negative gradient will make that action more likely. We use the
negative gradient because keras expects training data to minimize a loss function.
'''
return -y_true * K.log(K.clip(y_pred, K.epsilon(), 1.0 - K.epsilon()))
def run_training(cmd_line_args=None):
import argparse
parser = argparse.ArgumentParser(description='Perform reinforcement learning to improve given policy network. Second phase of pipeline.') # noqa: E501
parser.add_argument("model_json", help="Path to policy model JSON.")
parser.add_argument("initial_weights", help="Path to HDF5 file with inital weights (i.e. result of supervised training).") # noqa: E501
parser.add_argument("out_directory", help="Path to folder where the model params and metadata will be saved after each epoch.") # noqa: E501
parser.add_argument("--learning-rate", help="Keras learning rate (Default: 0.001)", type=float, default=0.001) # noqa: E501
parser.add_argument("--policy-temp", help="Distribution temperature of players using policies (Default: 0.67)", type=float, default=0.67) # noqa: E501
parser.add_argument("--save-every", help="Save policy as a new opponent every n batches (Default: 500)", type=int, default=500) # noqa: E501
parser.add_argument("--record-every", help="Save learner's weights every n batches (Default: 1)", type=int, default=1) # noqa: E501
parser.add_argument("--game-batch", help="Number of games per mini-batch (Default: 20)", type=int, default=20) # noqa: E501
parser.add_argument("--move-limit", help="Maximum number of moves per game", type=int, default=500) # noqa: E501
parser.add_argument("--iterations", help="Number of training batches/iterations (Default: 10000)", type=int, default=10000) # noqa: E501
parser.add_argument("--resume", help="Load latest weights in out_directory and resume", default=False, action="store_true") # noqa: E501
parser.add_argument("--verbose", "-v", help="Turn on verbose mode", default=False, action="store_true") # noqa: E501
# Baseline function (TODO) default lambda state: 0 (receives either file
# paths to JSON and weights or None, in which case it uses default baseline 0)
if cmd_line_args is None:
args = parser.parse_args()
else:
args = parser.parse_args(cmd_line_args)
ZEROTH_FILE = "weights.00000.hdf5"
if args.resume:
if not os.path.exists(os.path.join(args.out_directory, "metadata.json")):
raise ValueError("Cannot resume without existing output directory")
if not os.path.exists(args.out_directory):
if args.verbose:
print("creating output directory {}".format(args.out_directory))
os.makedirs(args.out_directory)
if not args.resume:
# make a copy of weights file, "weights.00000.hdf5" in the output directory
copyfile(args.initial_weights, os.path.join(args.out_directory, ZEROTH_FILE))
if args.verbose:
print("copied {} to {}".format(args.initial_weights,
os.path.join(args.out_directory, ZEROTH_FILE)))
player_weights = ZEROTH_FILE
iter_start = 1
else:
# if resuming, we expect initial_weights to be just a
# "weights.#####.hdf5" file, not a full path
if not re.match(r"weights\.\d{5}\.hdf5", args.initial_weights):
raise ValueError("Expected to resume from weights file with name 'weights.#####.hdf5'")
args.initial_weights = os.path.join(args.out_directory,
os.path.basename(args.initial_weights))
if not os.path.exists(args.initial_weights):
raise ValueError("Cannot resume; weights {} do not exist".format(args.initial_weights))
elif args.verbose:
print("Resuming with weights {}".format(args.initial_weights))
player_weights = os.path.basename(args.initial_weights)
iter_start = 1 + int(player_weights[8:13])
# Set initial conditions
policy = CNNPolicy.load_model(args.model_json)
policy.model.load_weights(args.initial_weights)
player = ProbabilisticPolicyPlayer(policy, temperature=args.policy_temp,
move_limit=args.move_limit)
# different opponents come from simply changing the weights of 'opponent.policy.model'. That
# is, only 'opp_policy' needs to be changed, and 'opponent' will change.
opp_policy = CNNPolicy.load_model(args.model_json)
opponent = ProbabilisticPolicyPlayer(opp_policy, temperature=args.policy_temp,
move_limit=args.move_limit)
if args.verbose:
print("created player and opponent with temperature {}".format(args.policy_temp))
if not args.resume:
metadata = {
"model_file": args.model_json,
"init_weights": args.initial_weights,
"learning_rate": args.learning_rate,
"temperature": args.policy_temp,
"game_batch": args.game_batch,
"opponents": [ZEROTH_FILE], # which weights from which to sample an opponent each batch
"win_ratio": {} # map from player to tuple of (opponent, win ratio) Useful for
# validating in lieu of 'accuracy/loss'
}
else:
with open(os.path.join(args.out_directory, "metadata.json"), "r") as f:
metadata = json.load(f)
# Append args of current run to history of full command args.
metadata["cmd_line_args"] = metadata.get("cmd_line_args", [])
metadata["cmd_line_args"].append(vars(args))
def save_metadata():
with open(os.path.join(args.out_directory, "metadata.json"), "w") as f:
json.dump(metadata, f, sort_keys=True, indent=2)
optimizer = SGD(lr=args.learning_rate)
player.policy.model.compile(loss=log_loss, optimizer=optimizer)
for i_iter in range(iter_start, args.iterations + 1):
# Note that player_weights will only be saved as a file every args.record_every iterations.
# Regardless, player_weights enters into the metadata to keep track of the win ratio over
# time.
player_weights = "weights.%05d.hdf5" % i_iter
# Randomly choose opponent from pool (possibly self), and playing
# game_batch games against them.
opp_weights = np.random.choice(metadata["opponents"])
opp_path = os.path.join(args.out_directory, opp_weights)
# Load new weights into opponent's network, but keep the same opponent object.
opponent.policy.model.load_weights(opp_path)
if args.verbose:
print("Batch {}\tsampled opponent is {}".format(i_iter, opp_weights))
# Run games (and learn from results). Keep track of the win ratio vs each opponent over
# time.
win_ratio = run_n_games(optimizer, player, opponent, args.game_batch)
metadata["win_ratio"][player_weights] = (opp_weights, win_ratio)
# Save intermediate models.
if i_iter % args.record_every == 0:
player.policy.model.save_weights(os.path.join(args.out_directory, player_weights))
# Add player to batch of oppenents once in a while.
if i_iter % args.save_every == 0:
metadata["opponents"].append(player_weights)
save_metadata()
if __name__ == '__main__':
run_training()
|
1646049
|
import discord
from discord.ext import commands
import asyncio
import json
from datetime import datetime
from random import choice
from aux.misc import round_down
from aux.stats import Stats
class InvalidNumberPlayers(Exception):
pass
class Warrior():
def __init__(self, member):
self.member = member
self.kills = 0
def add_Kill(self):
self.kills += 1
def get_name(self):
return self.member.display_name
def get_id(self):
return self.member.id
def get_kills(self):
return self.kills
class Battle():
def __init__(self, listReactions, embed_color, members):
self.listReactions = listReactions
self.embed_color = embed_color
self.alive = set()
self.dead = set()
for member in members:
if not member.bot:
self.alive.add(Warrior(member))
if len(self.alive) < 2:
raise InvalidNumberPlayers()
self.day = 1
now = datetime.now()
self.time = 24 - now.hour
if now.minute > 30:
self.time -= 1
elif now.minute > 0:
self.time -= 0.5
def initialReport(self):
embed = discord.Embed(
title = 'Battle Royale no DI',
description=f'Result of the battle\nParticipants: {len(self.alive)}',
color=self.embed_color)
embed.set_thumbnail(
url="https://mbtskoudsalg.com/images/pubg-lvl-3-helmet-png-7.png")
return embed
def get_winner(self):
winner = self.alive.pop()
self.alive.add(winner)
return winner
def victoryEmbed(self):
winner = self.get_winner()
embed = discord.Embed(
title = 'Winner',
description=winner.get_name(),
color=self.embed_color)
embed.set_footer(text = f"Kills: {winner.get_kills()}")
return embed
def allReports(self):
yield self.initialReport()
while(len(self.alive) > 1):
yield self.dailyReportEmbed()
self.time = 24
self.day += 1
yield self.victoryEmbed()
def dailyReportEmbed(self):
result = self.dailyReport()
if not result:
result = "Today nothing happened"
embed = discord.Embed(
title = f'DAY {self.day}',
description='\n'.join(result),
color=self.embed_color)
return embed
def dailyReport(self):
figthTrailer = []
while(len(self.alive) > 1 and self.time > 0):
match = choice(self.listReactions)
if self.time - match["time"] <= 0:
break
elif match["action"] == 0: #kill
p1 = choice(tuple(self.alive))
self.alive.remove(p1)
self.dead.add(p1)
p2 = choice(tuple(self.alive))
p2.add_Kill()
figthResult = match["description"].format(p1.get_name(), p2.get_name())
elif match["action"] == 1: #die
p1 = choice(tuple(self.alive))
self.alive.remove(p1)
self.dead.add(p1)
figthResult = match["description"].format(p1.get_name())
elif match["action"] == 2: #event
p1 = choice(tuple(self.alive))
figthResult = match["description"].format(p1.get_name())
elif match["action"] == 3: #meet
p1 = choice(tuple(self.alive))
p2 = choice(tuple(self.alive))
figthResult = match["description"].format(p1.get_name(), p2.get_name())
self.time -= match["time"]
figthTrailer.append("**" + self.displayTime() + "** " + figthResult)
return figthTrailer
def displayTime(self):
#convert time in hours to midnigth to hour of day
time = 24 - self.time
minutes = time * 60
hours, minutes = divmod(minutes, 60)
hours = int(hours)
minutes = int(minutes)
if hours < 10:
hours = "0{}".format(hours)
if minutes < 10:
minutes = "0{}".format(minutes)
return "{0}:{1}h".format(hours, minutes)
def updateStats(self, stats):
for warrior in self.dead:
stats.update_kills(warrior.get_id(), 1, warrior.get_kills(), 0)
for warrior in self.alive:
stats.update_kills(warrior.get_id(), 0, warrior.get_kills(), 1)
stats.save_stats()
class BattleRoyale(commands.Cog):
"""BattleRoyale in the server"""
def __init__(self, bot):
self.bot = bot
self.listAction = ["kill", "die", "event", "meet"]
self.listReactions = json.load(open(bot.BATTLEROYALE_PATH, 'r'))
@commands.command(name='battleroyaleFull',
description="create server wide battle royale [ADMIN ONLY]\n\nWinner gets 100 coins.",
brief="server wide battle royale",
aliases=['brF'])
@commands.has_permissions(administrator=True)
async def battleroyaleFull(self, ctx):
await ctx.message.delete()
await sendChallenge(self, ctx)
br = Battle(self.listReactions, self.bot.embed_color, ctx.message.guild.members)
for embed in br.allReports():
await ctx.send(embed=embed)
self.bot.stats.give_cash(br.get_winner().get_id(), 100)
br.updateStats(self.bot.stats)
@commands.command(name='battleroyaleOnline',
description="create battle royale with online users[ADMIN ONLY]\n\nWinner gets 100 coins.",
brief="online battle royale",
aliases=['brO'])
@commands.has_permissions(administrator=True)
async def battleroyaleOnline(self, ctx):
await ctx.message.delete()
await sendChallenge(self, ctx)
wid = await ctx.message.guild.widget()
br = Battle(
self.listReactions,
self.bot.embed_color,
wid.members)
for embed in br.allReports():
await ctx.send(embed=embed)
self.bot.stats.give_cash(br.get_winner().get_id(), 10)
br.updateStats(self.bot.stats)
@commands.command(name='battleroyale',
description="create server battle royale",
brief="server battle royale",
aliases=['br'])
@commands.is_nsfw()
async def battleroyale(self, ctx):
await ctx.message.delete()
msg = await sendChallenge(self, ctx)
async with ctx.message.channel.typing():
await asyncio.sleep(20)
msg = await msg.channel.fetch_message(msg.id)
members = set()
for reaction in msg.reactions:
members.update(await reaction.users().flatten())
try:
br = Battle(self.listReactions, self.bot.embed_color, members)
except InvalidNumberPlayers:
await ctx.send("Not enough players for a Battle Royale")
return
for embed in br.allReports():
await ctx.send(embed=embed)
br.updateStats(self.bot.stats)
@commands.command(name='battleroyaleKDR',
description="battleroyale Kill/Death Ratio",
brief="battleroyale Kill/Death Ratio",
aliases=['brKDR', 'KDR'])
async def battleroyaleKDR(self, ctx, member : discord.Member = None):
if member:
k, d = self.bot.stats.get_kdr(member.id)
embed = discord.Embed(
title = 'Battleroyale no DI',
description="KDR Leaderboard",
color=self.bot.embed_color)
embed.add_field(
name=member.display_name,
value="KDR: {0}/{1}".format(k, d),
inline=False)
await ctx.send(embed=embed)
return
arrayKDR = []
for id in self.bot.stats.get_all_users():
k, d = self.bot.get_kdr(id)
kdr = {
"id": id,
"kills": k,
"death": d}
arrayKDR.append(kdr)
def compare(kdr):
if kdr["death"] == 0:
return 0
return kdr["kills"] / kdr["death"]
arrayKDR.sort(key=compare, reverse=True)
embed = discord.Embed(
title = 'Battleroyale no DI',
description="KDR Leaderboard",
color=self.bot.embed_color)
for i in range(3):
win = arrayKDR[i]
member = ctx.message.guild.get_member(win["id"])
embed.add_field(
name=f"{i+1}. {member.display_name}",
value=f"KDR: {win['kills']}/{win['death']}",
inline=False)
embed.set_thumbnail(
url="https://mbtskoudsalg.com/images/pubg-lvl-3-helmet-png-7.png")
await ctx.send(embed=embed)
@commands.command(name='addBattleroyale',
brief="add a Battleroyale event",
aliases=['addBr'])
@commands.is_owner()
async def addBattleroyale(self, ctx, action, time : float,*, description = None):
"""add a Battleroyale event to the json [OWNER ONLY]
__**action**__:
* **kill** A was killed by B
* **die** A died
* **event** something happened to A
* **meet"** A met B
__**time**__:
* t <= 12"""
action = action.lower()
time = round(time, 1)
time = round_down(time * 10, 5)
time = time /10
if not description:
await ctx.send('Invalid description')
return
if action not in self.listAction:
await ctx.send('Invalid action')
return
if time <= 0 or time > 12:
await ctx.send('Invalid time')
return
event = {
"action":self.listAction.index(action),
"time":time,
"description":description}
self.listReactions.append(event)
updateListActions(self)
await ctx.send("**action:**`{0}`\n**time:**`{1}`h\n**description:**`{2}`".format(action, time, description))
@commands.command(name='deleteBattleroyale',
description="delete the last Battleroyale event on the json [OWNER ONLY]",
brief="remove last Battleroyale event",
aliases=['removeBattleroyale', 'removeBr', 'deleteBr'])
@commands.is_owner()
async def deleteBattleroyale(self, ctx):
deleted = self.listReactions.pop()
updateListActions(self)
await ctx.send(
"__**DELETED**__\n**action:**`{0}`\n**time:**`{1}`h\n**description:**`{2}`"
.format(
deleted["action"],
deleted["time"],
deleted["description"]))
async def sendChallenge(self, ctx):
embed = discord.Embed(
title = 'Battle Royale no DI',
description='{} started a battle royale'.format(ctx.message.author.mention),
color=self.bot.embed_color)
embed.set_thumbnail(
url="https://mbtskoudsalg.com/images/pubg-lvl-3-helmet-png-7.png")
embed.add_field(
name='Pick your weapon below if you wish to participate',
value='(you have approximately 30 seconds)')
msg = await ctx.send(embed=embed)
await msg.add_reaction('🔫')
return await ctx.fetch_message(msg.id)
def updateListActions(self):
#update a JSON file
with open(self.bot.BATTLEROYALE_PATH, 'w') as file:
json.dump(self.listReactions, file, indent=4)
def setup(bot):
bot.add_cog(BattleRoyale(bot))
|
1646075
|
import cv2
import numpy as np
import random
face_cascade=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
smile_cascade=cv2.CascadeClassifier('smile.xml')
cap=cv2.VideoCapture(0)
run=True
while run:
ret, img =cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces=face_cascade.detectMultiScale(gray, 1.1, 5)
for (x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2)
roi_gray=gray[y:y+h, x:x+w]
roi_color=img[y:y+h, x:x+w]
smiles=smile_cascade.detectMultiScale(roi_gray, 1.05, 5)
for (sx,sy,sw,sh) in smiles:
cv2.rectangle(roi_color, (sx,sy), (sx+sw,sy+sh), (255,255,0), 2)
if cv2.waitKey(1) & 0xff==ord('s'):
cv2.imwrite(f"{str(random.randint(10000))}.png", img)
break
cv2.imshow('img_smile', img)
if cv2.waitKey(2) & 0xff==ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
1646093
|
import pytest
import unittest
from lenstronomy.PointSource.Types.base_ps import PSBase
from lenstronomy.LensModel.lens_model import LensModel
class TestPSBase(object):
def setup(self):
self.base = PSBase(lens_model=LensModel(lens_model_list=[]), fixed_magnification=False, additional_image=False)
PSBase(fixed_magnification=True, additional_image=True)
def test_update_lens_model(self):
self.base.update_lens_model(lens_model_class=None)
assert self.base._solver is None
base = PSBase()
base.update_lens_model(lens_model_class=LensModel(lens_model_list=['SIS']))
assert base._solver is not None
PSBase(fixed_magnification=True, additional_image=True)
class TestUtil(object):
def setup(self):
pass
def test_expand_to_array(self):
from lenstronomy.PointSource.Types.base_ps import _expand_to_array
array = 1
num = 3
array_out = _expand_to_array(array, num)
assert len(array_out) == num
array = [1]
num = 3
array_out = _expand_to_array(array, num)
assert len(array_out) == num
assert array_out[1] == 0
array = [1, 1, 1]
num = 3
array_out = _expand_to_array(array, num)
assert len(array_out) == num
assert array_out[1] == 1
class TestRaise(unittest.TestCase):
def test_raise(self):
base = PSBase()
with self.assertRaises(ValueError):
base.image_position(kwargs_ps=None)
with self.assertRaises(ValueError):
base.source_position(kwargs_ps=None)
with self.assertRaises(ValueError):
base.image_amplitude(kwargs_ps=None)
with self.assertRaises(ValueError):
base.source_amplitude(kwargs_ps=None)
if __name__ == '__main__':
pytest.main()
|
1646096
|
import json
import unittest
from services.service import Service
class TestService(unittest.TestCase):
CONFIG1 = """
{
"id": "sspr",
"name": "SSPR service wrapper",
"description": "",
"version_data": {
"versions": {
"Default": {
}
}
},
"proxy": {
"listen_path": "/test/",
"target_url": "http://httpbin.org/"
}
}"""
CONFIG2 = """
{
"id": "sspr",
"name": "SSPR service wrapper",
"description": "",
"version_data": {
"versions": {
"Default": {
}
}
},
"proxy": {
"listen_path": "/test/[\\\\d]+",
"target_url": "http://httpbin.org/"
}
}"""
def test_can_handle(self):
service = Service(json.loads(self.CONFIG1))
self.assertTrue(service.can_handle('/test/my'))
def test_can_handle2(self):
service = Service(json.loads(self.CONFIG2))
self.assertFalse(service.can_handle('/test/as'))
self.assertTrue(service.can_handle('/test/12'))
|
1646169
|
import komand
from .schema import GetScanConfigsInput, GetScanConfigsOutput
# Custom imports below
class GetScanConfigs(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="get_scan_configs",
description="Get a list of all scan configurations in the OpenVAS server",
input=GetScanConfigsInput(),
output=GetScanConfigsOutput(),
)
def run(self, params={}):
scanProfileDict = self.connection.scanner.get_profiles
returnList = []
for key, value in scanProfileDict.iteritems():
jsonObjectTranslation = {key: value}
returnList.append(jsonObjectTranslation)
return {
"list_scans": returnList,
"success": True,
"message": "Successfully obtained list of scan configurations",
}
def test(self):
# TODO: Implement test function
return {}
|
1646237
|
from Stephanie.Modules.base_module import BaseModule
from Stephanie.local_libs.football_manager import FootballManager
class FootballModule(BaseModule):
modules = (
("FooballModule@GetAllCompetitions", ("all", "competitions")),
("FooballModule@GetEnglishLeague", ("english", "league")),
("FooballModule@GetEnglishSecondLeague", ("english", "second", "league")),
("FooballModule@GetGermanLeague", ("german", "league")),
("FooballModule@GetGermanSecondLeague", ("german", "second", "league")),
("FooballModule@GetFrenchLeague", ("french", "league")),
("FooballModule@GetFrenchSecondLeague", ("french", "second", "league")),
("FooballModule@GetSpanishLeague", ("spanish", "league")),
("FooballModule@GetSpanishSecondLeague", ("spanish", "second", "league")),
("FooballModule@GetGermanCup", ("german", "cup")),
("FooballModule@GetChampionsLeague", ("champions", "league")),
("FooballModule@GetNetherlandsLeague", ("netherlands", "league")),
("FooballModule@GetPortugueseLeague", ("portuguese", "league")),
("FooballModule@GetItalianLeague", ("italian", "league")),
("FooballModule@TeamHandle", ("team", "information")),
("FooballModule@GetNews", ("latest", "news")),
)
def __init__(self, *args):
super(FootballModule, self).__init__(*args)
self.API_KEY = self.get_configuration("api.football.org.key")
self.fm = FootballManager(self.API_KEY)
self.team_id = self.get_configuration("favorite_football_team_id")
self.team_name = self.get_configuration("favorite_football_team_name")
self.competition_name = self.get_configuration("favorite_football_competition_name")
def handle(self):
self.assistant.say("which competition would you like to know about? or maybe your team information? or perhaps some news?")
text = self.assistant.listen().decipher()
module_func = self.assistant.understand(self.modules, text)
getattr(self, module_func)()
def get_all_competitions(self):
return self.fm.get_all_competitions()
def get_english_league(self):
self.get_general_league(426)
def get_english_second_league(self):
self.get_general_league(427)
def get_german_league(self):
self.get_general_league(430)
def get_german_second_league(self):
self.get_general_league(431)
def get_spanish_league(self):
self.get_general_league(439)
def get_spanish_second_league(self):
self.get_general_league(437)
def get_french_league(self):
self.get_general_league(434)
def get_french_second_league(self):
self.get_general_league(435)
def get_netherlands_league(self):
self.get_general_league(433)
def get_portuguese_league(self):
self.get_general_league(436)
def get_italian_league(self):
self.get_general_league(438)
def get_champions_league(self):
self.get_general_league(440)
def get_general_league(self, competition_id):
active = False
modules = (
("FootballModule@LeagueSpecificNews", ("get", "news")),
("FootballModule@LeagueSpecificTable", ("get", "league", "table")),
("FootballModule@LeagueSpecificNext_fixtures", ("get", "next", "fixtures")),
("FootballModule@LeagueSpecificPrevious_fixtures", ("get", "previous", "fixtures")),
)
while not active:
response = self.fm.get_specific_competition(competition_id)
self.assistant.say("%s, would you like to know about it's latest news, league table or "
" maybe fixtures?" % response)
text = self.assistant.listen().decipher()
module_func = self.assistant.understand(modules, text)
active = getattr(self, module_func)()
return active
def league_specific_table(self):
response = self.fm.get_league_table()
self.assistant.say(response)
self.assistant.say("Any other information, you would like to know about? If yes then what would "
"it be?")
text = self.assistant.listen().decipher()
if text.upper() in self.NEGATIVE:
self.assistant.say("Alright then blimey.")
return "Alright then blimey."
return False
def league_specific_next_fixtures(self):
response = self.fm.get_fixtures()
self.assistant.say(response)
self.assistant.say("Any other information, you would like to know about? If yes then what would "
"it be?")
text = self.assistant.listen().decipher()
if text.upper() in self.NEGATIVE:
self.assistant.say("Alright then blimey.")
return "Alright then blimey."
return False
def league_specific_previous_fixtures(self):
response = self.fm.get_fixtures(prev=True)
self.assistant.say(response)
self.assistant.say("Any other information, you would like to know about? If yes then what would "
"it be?")
text = self.assistant.listen().decipher()
if text.upper() in self.NEGATIVE:
self.assistant.say("Alright then blimey.")
return "Alright then blimey."
return False
def team_handle(self):
active = False
modules = (
("FooballModule@TeamNews", ("get", "news")),
("FooballModule@TeamInjuryNews", ("get", "injury", "news")),
("FooballModule@TeamTransferTalk", ("get", "transfer", "talk")),
("FooballModule@TeamPlayers", ("get", "players")),
("FooballModule@TeamNextFixtures", ("get", "next", "fixtures")),
("FooballModule@TeamPreviousFixtures", ("get", "previous", "fixtures")),
)
while not active:
response = self.fm.get_team(self.team_id)
self.assistant.say("%s, would you like to know about it's latest news, transfer talks or "
" maybe fixtures?" % response)
text = self.assistant.listen().decipher()
module_func = self.assistant.understand(modules, text)
active = getattr(self, module_func)()
return active
def team_next_fixtures(self):
response = self.fm.get_team_fixtures()
self.assistant.say(response)
self.assistant.say("Any other information, you would like to know about? If yes then what would "
"it be?")
text = self.assistant.listen().decipher()
if text.upper() in self.NEGATIVE:
self.assistant.say("Alright then blimey.")
return "Alright then blimey."
return False
def team_previous_fixtures(self):
response = self.fm.get_team_fixtures(prev=True)
self.assistant.say(response)
self.assistant.say("Any other information, you would like to know about? If yes then what would "
"it be?")
text = self.assistant.listen().decipher()
if text.upper() in self.NEGATIVE:
self.assistant.say("Alright then blimey.")
return "Alright then blimey."
return False
def league_specific_news(self):
response = self.fm.get_competition_news(self.competition_name)
self.assistant.say(response)
self.assistant.say("For more information, check the sportsmole.co.uk, Any other information, you would like to know about? If yes then what would "
"it be?")
text = self.assistant.listen().decipher()
if text.upper() in self.NEGATIVE:
self.assistant.say("Alright then blimey.")
return "Alright then blimey."
return False
def team_specific_news(self):
response = self.fm.get_competition_news(self.competition_name)
self.assistant.say(response)
self.assistant.say("For more information, check the sportsmole.co.uk, Any other information, you would like to know about? If yes then what would "
"it be?")
text = self.assistant.listen().decipher()
if text.upper() in self.NEGATIVE:
self.assistant.say("Alright then blimey.")
return "Alright then blimey."
return False
def team_news(self):
response = self.fm.get_team_news(self.team_name)
self.assistant.say(response)
self.assistant.say("For more information, check the sportsmole.co.uk, Any other information, you would like to know about? If yes then what would "
"it be?")
text = self.assistant.listen().decipher()
if text.upper() in self.NEGATIVE:
self.assistant.say("Alright then blimey.")
return "Alright then blimey."
return False
def team_injury_news(self):
response = self.fm.get_team_injury_news(self.team_name)
self.assistant.say(response)
self.assistant.say("For more information, check the sportsmole.co.uk, Any other information, you would like to know about? If yes then what would "
"it be?")
text = self.assistant.listen().decipher()
if text.upper() in self.NEGATIVE:
self.assistant.say("Alright then blimey.")
return "Alright then blimey."
return False
def team_transfer_talk(self):
response = self.fm.get_team_news(self.team_name)
self.assistant.say(response)
self.assistant.say("For more information, check the sportsmole.co.uk, Any other information, you would like to know about? If yes then what would "
"it be?")
text = self.assistant.listen().decipher()
if text.upper() in self.NEGATIVE:
self.assistant.say("Alright then blimey.")
return "Alright then blimey."
return False
def get_news(self):
response = self.fm.get_news()
self.assistant.say(response)
self.assistant.say("For more information, check the sportsmole.co.uk, Any other information, you would like to know about? If yes then what would "
"it be?")
text = self.assistant.listen().decipher()
if text.upper() in self.NEGATIVE:
self.assistant.say("Alright then blimey.")
return "Alright then blimey."
return False
|
1646271
|
import g
def init_scene():
g.set_duration(0)
g.set_dataset('Arched Bridge 3D (TVCG01, Fig 6, Fig 9)')
#g.set_dataset('KittenHex (Vis2021, results)')
g.set_camera_checkpoint('Front')
g.set_rendering_algorithm_settings({
'line_width': 0.0022,
'band_width': 0.016,
'depth_cue_strength': 0.8
})
g.set_dataset_settings({
'attribute': "Principal Stress",
'major_on': True,
'medium_on': False,
'minor_on': True,
'major_lod': 1.0,
'medium_lod': 1.0,
'minor_lod': 1.0,
'major_use_bands': False,
'medium_use_bands': False,
'minor_use_bands': False,
'thick_bands': True,
'smoothed_bands': True,
'use_principal_stress_direction_index': True,
})
g.set_transfer_functions(['qualitative-ocher.xml', 'qualitative-emerald.xml', 'qualitative-pale-lilac.xml'])
g.set_duration(6)
def mode0():
g.set_duration(0)
g.set_dataset_settings({
'major_lod': 0.4
})
g.set_duration(6)
def mode1():
g.set_duration(0)
g.set_dataset_settings({
'major_on': False
})
g.set_duration(6)
def mode2():
g.set_duration(0)
g.set_dataset_settings({
'minor_lod': 0.4
})
g.set_duration(6)
def mode3():
g.set_duration(0)
g.set_dataset_settings({
'major_on': True,
'major_lod': 1.0,
})
g.set_duration(6)
def mode4():
g.set_duration(0)
g.set_dataset_settings({
'minor_use_bands': True
})
g.set_duration(6)
def replay():
init_scene()
mode0()
mode1()
mode2()
mode3()
mode4()
|
1646392
|
import sqlite3
from sqlite3 import Error
def storeMessage(time, distance, messageType):
conn = sqlite3.connect('telemetry.db', check_same_thread=False)
c = conn.cursor()
try:
c.execute('''CREATE TABLE IF NOT EXISTS telemetry
(time double PRIMARY KEY,
distance double NOT NULL,
messageType integer NOT NULL)''')
args = [time, distance, messageType]
c.execute('''INSERT INTO telemetry (time, distance, messageType) VALUES (?, ?, ?)''', args)
conn.commit()
except Error as e:
print(e)
|
1646432
|
class TextEditor:
def __init__(self):
self.text = []
def append(self, string_to_append):
self.text.append(self.peek() + string_to_append)
def delete(self, num_chars_to_delete):
self.text.append(self.peek()[:-num_chars_to_delete])
def char_at_position(self, k):
return self.peek()[k - 1]
def undo(self):
self.text.pop()
def peek(self):
if self.text:
return self.text[-1]
else:
return ''
n = int(input().strip())
text_editor = TextEditor()
for _ in range(n):
command = input().strip().split(' ')
if command[0] == '1':
text_editor.append(command[1])
elif command[0] == '2':
text_editor.delete(int(command[1]))
elif command[0] == '3':
print(text_editor.char_at_position(int(command[1])))
elif command[0] == '4':
text_editor.undo()
|
1646471
|
from .data import DiscoverMatrix, row_stack
from .grouptest import groupwise_discover_test
from .pairwise import pairwise_discover_test
__version__ = "0.9.4"
|
1646482
|
import logging
import re
import utils.data_format_keys as dfk
from evaluation.evaluation_utils import doi_normalize
from random import random
from utils.cr_utils import search, generate_unstructured
from time import sleep
class Matcher:
def __init__(self, min_score, excluded_dois=[], journal_file=None):
self.excluded_dois = [doi_normalize(d) for d in excluded_dois]
self.min_score = min_score
self.journal_abbrev = {}
if journal_file is not None:
with open(journal_file) as f:
content = f.readlines()
content = [x.strip().split('\t') for x in content]
self.journal_abbrev = {l[0]: l[1] for l in content}
def description(self):
return 'Crossref search matcher with DOI exclusion ' + \
'and minimum score {}'.format(self.min_score)
def match(self, reference):
if isinstance(reference, str):
return self.match_string(reference)
return self.match_structured(reference)
def match_structured(self, reference):
candidate, score = self.match_string(generate_unstructured(reference))
journal_norm = re.sub('[^a-z]', '',
reference.get('journal-title', '').lower())
if 'journal-title' in reference and \
journal_norm in self.journal_abbrev:
reference['journal-title'] = self.journal_abbrev[journal_norm]
candidate_j, score_j = \
self.match_string(generate_unstructured(reference))
if score_j > score:
return candidate_j, score_j
return candidate, score
def match_string(self, ref_string):
logging.debug('Matching string {}'.format(ref_string))
sleep(random())
if ref_string is None:
return None, None
results = search(ref_string)
if results is None or not results:
logging.debug('Searching for string {} got empty results'
.format(ref_string))
return None, None
for result in results:
if doi_normalize(result.get(dfk.CR_ITEM_DOI)) \
in self.excluded_dois:
logging.debug('String {} NOT matched to excluded DOI {}'
.format(ref_string, result.get(dfk.CR_ITEM_DOI)))
continue
if result.get(dfk.CR_ITEM_SCORE) < self.min_score:
logging.debug('Top hit for string {} has too low score {}'
.format(ref_string,
result.get(dfk.CR_ITEM_SCORE)))
return None, None
logging.debug('String {} matched to DOI {}'
.format(ref_string, result.get(dfk.CR_ITEM_DOI)))
return result.get(dfk.CR_ITEM_DOI), result.get(dfk.CR_ITEM_SCORE)
return None, None
|
1646486
|
import re
from django import template
from django.core.urlresolvers import NoReverseMatch
from django.core.urlresolvers import reverse
register = template.Library()
@register.simple_tag(takes_context=True)
def active(context, name):
try:
pattern = reverse(name)
except NoReverseMatch:
return ''
if re.match(pattern, context['request'].path):
return 'active'
return ''
|
1646488
|
from functools import lru_cache
import os
import shutil
import struct
import numpy as np
import torch
import re
from fairseq.data.datautils import utf8_to_uxxxx, uxxxx_to_utf8
import cv2
from fairseq.data import FairseqDataset
import json
import lmdb
import logging
LOG = logging.getLogger(__name__)
class OcrLmdbDataset(FairseqDataset):
"""Takes a text file as input and binarizes it in memory at instantiation.
Original lines are also kept in memory"""
def __init__(
self, split, data_dir, dictionary, transforms, image_height, max_allowed_width,
):
LOG.info("...OcrLmdbDataset %s", data_dir)
self.data_dir = data_dir
self.split = split
self.dictionary = dictionary
self.preprocess = transforms
self.image_height = image_height
self.max_allowed_width = max_allowed_width
with open(os.path.join(self.data_dir, "desc.json"), "r") as fh:
self.data_desc = json.load(fh)
self.sizes = []
for entry in self.data_desc[self.split]:
self.sizes.append(len(entry["trans"].split()))
self.sizes = np.array(self.sizes)
self.lmdb_env = lmdb.Environment(
os.path.join(self.data_dir, "line-images.lmdb"),
map_size=1e6,
readonly=True,
lock=False,
)
self.lmdb_txn = self.lmdb_env.begin(buffers=True)
self.size_group_limits = [150, 200, 300, 350, 450, 600, np.inf]
self.size_group_keys = self.size_group_limits
self.size_groups = dict()
self.size_groups_dict = dict()
for cur_limit in self.size_group_limits:
self.size_groups[cur_limit] = []
self.size_groups_dict[cur_limit] = dict()
for idx, entry in enumerate(self.data_desc[self.split]):
width_orig, height_orig = entry["width"], entry["height"]
normalized_width = width_orig * (self.image_height / height_orig)
for cur_limit in self.size_group_limits:
if (
normalized_width < cur_limit
and normalized_width < self.max_allowed_width
):
self.size_groups[cur_limit].append(idx)
self.size_groups_dict[cur_limit][idx] = 1
break
# Now get final size (might have dropped large entries!)
self.nentries = 0
self.max_index = 0
for cur_limit in self.size_group_limits:
self.nentries += len(self.size_groups[cur_limit])
if len(self.size_groups[cur_limit]) > 0:
cur_max = max(self.size_groups[cur_limit])
if cur_max > self.max_index:
self.max_index = cur_max
print("...finished loading, size {}".format(self.nentries))
print("count by group")
total_group_cnt = 0
for cur_limit in self.size_group_limits:
print("group", cur_limit, len(self.size_groups[cur_limit]))
total_group_cnt += len(self.size_groups[cur_limit])
print("TOTAL...", total_group_cnt)
def __getitem__(self, index):
entry = self.data_desc[self.split][index]
max_width = 0
for cur_limit in self.size_group_limits:
if index in self.size_groups_dict[cur_limit]:
max_width = cur_limit
break
group_id = max_width
image_name = entry["id"]
img_bytes = np.asarray(
self.lmdb_txn.get(entry["id"].encode("ascii")), dtype=np.uint8
)
line_image = cv2.imdecode(img_bytes, cv2.IMREAD_COLOR) # -1)
# Do a check for RGBA images; if found get rid of alpha channel
if len(line_image.shape) == 3 and line_image.shape[2] == 4:
line_image = cv2.cvtColor(line_image, cv2.COLOR_BGRA2BGR)
line_image = self.preprocess(line_image)
# Sanity check: make sure width@30px lh is long enough not to crash our model; we pad to at least 15px wide
# Need to do this and change the "real" image size so that pack_padded doens't complain
if line_image.size(2) < 15:
line_image_ = torch.ones(
line_image.size(0), line_image.size(1), 15)
line_image_[:, :, : line_image.size(2)] = line_image
line_image = line_image_
# Add padding up to max-width, so that we have consistent size for cudnn.benchmark to work with
original_width = line_image.size(2)
original_height = line_image.size(1)
transcription = []
for char in entry["trans"].split():
transcription.append(self.dictionary.index(char))
src_metadata = {
"target": transcription,
# "target_len": len(transcription),
"uxxx_trans": entry["trans"],
"utf8_trans": uxxxx_to_utf8(entry["trans"]),
"width": original_width,
"height": original_height,
"group": group_id,
"image_name": image_name,
"image": line_image,
"id": index,
}
return src_metadata
def __len__(self):
return self.nentries
|
1646532
|
from .logger import Logger
# TODO: better overwritting
def print(*msg):
Logger().log_message(*msg, stack_displacement=2)
|
1646563
|
import os
from colorama import Fore
import time
import sys
def Banner():
os.system("clear")
print(Fore.LIGHTRED_EX+"""\n
[ V 1.0 ]
██████╗ ███╗ ██╗ █████╗
██╔══██╗████╗ ██║██╔══██╗
██║ ██║██╔██╗ ██║███████║
██║ ██║██║╚██╗██║██╔══██║
██████╔╝██║ ╚████║██║ ██║
╚═════╝ ╚═╝ ╚═══╝╚═╝ ╚═╝
______ _____ _____ _____ _ _
| ___ \ ___/ __ \ _ | \ | |
| |_/ / |__ | / \/ | | | \| |
| /| __|| | | | | | . ` |
| |\ \| |___| \__/\ \_/ / |\ |
\_| \_\____/ \____/\___/\_| \_/
================================
** Developer:~ <NAME> **
================================
""")
def infolist1():
time.sleep(0.1)
print(Fore.RED+"["+Fore.WHITE+"卐"+Fore.RED+"]"+Fore.CYAN+" Choose one of the options below \n")
time.sleep(0.1)
print(Fore.LIGHTYELLOW_EX+" [1] Information Gathering\n")
time.sleep(0.1)
# print(Fore.RED+" [2] CMS Detection\n")
# time.sleep(0.1)
print(Fore.YELLOW+" [2] Creator \n")
time.sleep(0.1)
print(Fore.WHITE+" [3] Exit\n")
def infolist2():
time.sleep(0.1)
print(Fore.GREEN+" [1]"+Fore.BLUE+" - Whois")
time.sleep(0.2)
print(Fore.GREEN+" [2]"+Fore.BLUE+" - Reverse IP")
time.sleep(0.1)
print(Fore.GREEN+" [3]"+Fore.BLUE+" - Port Scan")
time.sleep(0.1)
print(Fore.GREEN+" [4]"+Fore.BLUE+" - Trace Toute")
time.sleep(0.1)
print(Fore.GREEN+" [5]"+Fore.BLUE+" - IP location Finder")
time.sleep(0.1)
print(Fore.GREEN+" [6]"+Fore.BLUE+" - Show HTTP Header")
time.sleep(0.1)
print(Fore.GREEN+" [7]"+Fore.BLUE+" - Find Shared DNS")
time.sleep(0.1)
print(Fore.GREEN+" [8]"+Fore.BLUE+" - DNS Lookup")
time.sleep(0.1)
print(Fore.GREEN+" [9]"+Fore.BLUE+" - Cms Detect")
time.sleep(0.1)
print(Fore.GREEN+" [10]"+Fore.BLUE+"- Robots Scanner")
time.sleep(0.1)
print(Fore.GREEN+" [11]"+Fore.BLUE+"- Admin Page Finder")
time.sleep(0.1)
# print(Fore.GREEN+" [12]"+Fore.BLUE+"- Bypass Cloud Flare")
# time.sleep(0.2)
print(Fore.GREEN+" [12]"+Fore.BLUE+"- Back To Menu")
time.sleep(0.1)
print(Fore.GREEN+" [13]"+Fore.WHITE+"- Exit \n")
def infolist3():
Banner()
time.sleep(0.1)
print (Fore.GREEN+" [*]"+Fore.BLUE+" Creator : <NAME> \n")
time.sleep(0.1)
print (Fore.GREEN+" [*]"+Fore.RED+" Website : https://bhattnikunj.com \n")
time.sleep(0.1)
print (Fore.GREEN+" [*]"+Fore.CYAN+" Telegram ID : @CYBERNIKUNJ \n")
time.sleep(0.1)
try:
input(Fore.LIGHTRED_EX+" [*] Back To Menu (Press Enter...) ")
except:
print("")
print("\n")
sys.exit()
def infolist4():
Banner()
print(Fore.GREEN+" [1]"+Fore.BLUE+"- WordPress ")
time.sleep(0.1)
print(Fore.GREEN+" [2]"+Fore.BLUE+" - Drupal"+Fore.RED+" Coming Soon . . .")
time.sleep(0.1)
print(Fore.GREEN+" [3]"+Fore.BLUE+" - Joomla "+Fore.RED+" Coming Soon . . . ")
time.sleep(0.1)
print(Fore.GREEN+" [4]"+Fore.BLUE+" - Back To Menu")
print(Fore.CYAN+" **********************\n")
time.sleep(0.1)
def infowp():
Banner()
print(Fore.GREEN+" [1]"+Fore.BLUE+" - Get Plugins ")
time.sleep(0.1)
print(Fore.GREEN+" [2]"+Fore.BLUE+" - Get Username ")
time.sleep(0.1)
print(Fore.GREEN+" [3]"+Fore.BLUE+" - Back To Menu ")
time.sleep(0.1)
|
1646572
|
from flask import Flask, request
import tensorflow as tf
from correct_text import create_model, DefaultMovieDialogConfig, decode
from text_corrector_data_readers import MovieDialogReader
data_path = '/input/data/movie_dialog_train.txt'
model_path = '/input/model'
tfs = tf.Session()
config = DefaultMovieDialogConfig()
print('Loading model from path: %s' % model_path)
model = create_model(tfs, True, model_path, config=config)
print('Using data from path: %s' % data_path)
data_reader = MovieDialogReader(config, data_path)
app = Flask(__name__)
@app.route('/', methods=['POST'])
def correct_handler():
corrective_tokens = data_reader.read_tokens(data_path)
request.get_data()
decodings = decode(tfs, model=model, data_reader=data_reader,
data_to_decode=[request.data.split()],
corrective_tokens=corrective_tokens)
return ' '.join(next(decodings))
if __name__ == '__main__':
app.run(host='0.0.0.0')
|
1646599
|
import unittest
from bump_version import bump_version
class TestBumpVersion(unittest.TestCase):
def test_bump_patch(self):
self.assertEqual(bump_version('v1.2.3', 'patch'), 'v1.2.4')
def test_bump_patch_does_not_carry_over(self):
self.assertEqual(bump_version('v1.2.9', 'patch'), 'v1.2.10')
def test_bump_minor_resets_patch(self):
self.assertEqual(bump_version('v1.2.3', 'minor'), 'v1.3.0')
def test_bump_minor_does_not_carry_over(self):
self.assertEqual(bump_version('v1.9.3', 'minor'), 'v1.10.0')
def test_bump_major_resets_minor_and_patch(self):
self.assertEqual(bump_version('v1.2.3', 'major'), 'v2.0.0')
|
1646617
|
import os
import json
from indra.sources import biofactoid
here = os.path.dirname(os.path.abspath(__file__))
def test_process_document():
doc_json = os.path.join(here, 'biofactoid_doc.json')
with open(doc_json, 'r') as fh:
doc = json.load(fh)
bp = biofactoid.process_json([doc])
assert len(bp.statements) == 2
assert {s.__class__.__name__ for s in bp.statements} == \
{'Inhibition', 'Phosphorylation'}
for stmt in bp.statements:
agents = stmt.agent_list()
assert agents[0].name == 'AKT1'
assert agents[0].db_refs == \
{'EGID': '207', 'HGNC': '391', 'ENSEMBL': 'ENSG00000142208'}
assert agents[1].name == 'FOXO3', agents
assert agents[1].db_refs == \
{'EGID': '2309', 'HGNC': '3821', 'ENSEMBL': 'ENSG00000118689'}
ev = stmt.evidence[0]
assert ev.pmid == '29886111'
assert ev.text == 'AKT1 inhibits FOXO3 via phosphorylation.'
assert ev.annotations == \
{"biofactoid_document": "3d2a77ba-55c1-463b-aff3-9acaa0307b62",
"created_date": "2020-09-07T16:38:08.837Z",
"lsatEditedDate": "2020-09-10T01:13:41.528Z"}
assert ev.text_refs == \
{'PMID': '29886111',
'DOI': '10.1016/j.cels.2018.05.004',
'PII': 'S2405-4712(18)30192-3',
'PMCID': 'PMC6322215'}
assert ev.source_api == 'biofactoid'
|
1646635
|
import requests
def download_model(model, shaves, cmx_slices, nces, output_file):
PLATFORM="VPU_MYRIAD_2450" if nces == 0 else "VPU_MYRIAD_2480"
url = "http://luxonis.com:8080/"
payload = {
'compile_type': 'zoo',
'model_name': model,
'model_downloader_params': '--precisions FP16 --num_attempts 5',
'intermediate_compiler_params': '--data_type=FP16 --mean_values [127.5,127.5,127.5] --scale_values [255,255,255]',
'compiler_params': '-ip U8 -VPU_MYRIAD_PLATFORM ' + PLATFORM + ' -VPU_NUMBER_OF_SHAVES ' + str(shaves) +' -VPU_NUMBER_OF_CMX_SLICES ' + str(cmx_slices)
}
try:
response = requests.request("POST", url, data=payload)
except:
print("Connection timed out!")
return 1
if response.status_code == 200:
blob_file = open(output_file, 'wb')
blob_file.write(response.content)
blob_file.close()
else:
print("Model compilation failed with error code: " + str(response.status_code))
print(str(response.text.encode('utf8')))
return 2
return 0
|
1646685
|
import argparse
import os
import pandas as pd
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
torch.manual_seed(1234)
# command line arguments
parser = argparse.ArgumentParser(description='Train a model with PyTorch.')
parser.add_argument('inxfile', type=str, help='Input file containing the x training data')
parser.add_argument('inyfile', type=str, help='Input file containing the y training data')
parser.add_argument('outdir', type=str, help='Output directory for the trained model')
args = parser.parse_args()
# read in the pre-processed X, y data
cols = ['f1', 'f2', 'f3', 'f4', 'l1', 'l2', 'l3']
X = pd.read_csv(args.inxfile, names=cols[0:-3])
y = pd.read_csv(args.inyfile, names=cols[-3:])
# model parameters
input_size = 4
num_classes = 3
hidden_size = 5
learning_rate = 0.1
num_epoch = 10000
# define model
class Net(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(Net, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, num_classes)
def forward(self, x):
out = self.fc1(x)
out = self.relu(out)
out = self.fc2(out)
return out
net = Net(input_size, hidden_size, num_classes)
# choose optimizer and loss function
criterion = nn.BCEWithLogitsLoss()
optimizer = torch.optim.SGD(net.parameters(), lr=learning_rate)
# X and y variables
X_tensor = Variable(torch.from_numpy(X.as_matrix()).float())
Y_tensor = Variable(torch.from_numpy(y.as_matrix()).float())
# train the model
for epoch in range(num_epoch):
#feedforward - backprop
optimizer.zero_grad()
out = net(X_tensor)
loss = criterion(out, Y_tensor)
loss.backward()
optimizer.step()
# export the model
torch.save(net.state_dict(), os.path.join(args.outdir, 'model.pt'))
|
1646697
|
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from functools import partial
import numpy as np
import tensorflow as tf
import horovod.tensorflow as hvd
from tensorflow.contrib.framework import arg_scope
from tensorflow.contrib import layers
import numpy as np
def get_net(model_name):
from efficientnet_pytorch import EfficientNet
from torch import nn
import timm
zoo_params = {
'efficientnet-b2': {
'fc_name': '_fc',
'fc': nn.Linear(in_features=1408, out_features=4, bias=True),
'init_op': partial(EfficientNet.from_pretrained, 'efficientnet-b2')
},
'efficientnet-b4': {
'fc_name': '_fc',
'fc': nn.Linear(in_features=1792, out_features=4, bias=True),
'init_op': partial(EfficientNet.from_pretrained, 'efficientnet-b4')
},
'efficientnet-b5': {
'fc_name': '_fc',
'fc': nn.Linear(in_features=2048, out_features=4, bias=True),
'init_op': partial(EfficientNet.from_pretrained, 'efficientnet-b5')
},
'efficientnet-b6': {
'fc_name': '_fc',
'fc': nn.Linear(in_features=2304, out_features=4, bias=True),
'init_op': partial(EfficientNet.from_pretrained, 'efficientnet-b6')
},
'mixnet_xl': {
'fc_name': 'classifier',
'fc': nn.Linear(in_features=1536, out_features=4, bias=True),
'init_op': partial(timm.create_model, 'mixnet_xl', pretrained=True)
},
'mixnet_s': {
'fc_name': 'classifier',
'fc': nn.Linear(in_features=1536, out_features=4, bias=True),
'init_op': partial(timm.create_model, 'mixnet_s', pretrained=True)
},
'mixnet_s_fromscratch': {
'fc_name': 'classifier',
'fc': nn.Linear(in_features=1536, out_features=4, bias=True),
'init_op': partial(timm.create_model, 'mixnet_s', pretrained=False)
},
}
net = zoo_params[model_name]['init_op']()
setattr(net, zoo_params[model_name]['fc_name'], zoo_params[model_name]['fc'])
return net
def SR_net_model_eff(features, mode, n_class):
_inputs = tf.transpose(features, [0, 3, 1, 2])
data_format = 'NCHW'
is_training = bool(mode == tf.estimator.ModeKeys.TRAIN)
with arg_scope([layers.conv2d], num_outputs=16,
kernel_size=3, stride=1, padding='SAME',
data_format=data_format,
activation_fn=None,
weights_initializer=layers.variance_scaling_initializer(),
weights_regularizer=layers.l2_regularizer(2e-4),
biases_initializer=tf.constant_initializer(0.2),
biases_regularizer=None),\
arg_scope([layers.batch_norm],
decay=0.9, center=True, scale=True,
updates_collections=tf.GraphKeys.UPDATE_OPS, is_training=is_training,
fused=True, data_format=data_format),\
arg_scope([layers.avg_pool2d],
kernel_size=[3,3], stride=[2,2], padding='SAME',
data_format=data_format):
with tf.variable_scope('Layer1'): # 256*256
conv=layers.conv2d(_inputs, num_outputs=64, kernel_size=3)
actv=tf.nn.relu(layers.batch_norm(conv))
with tf.variable_scope('Layer2'): # 256*256
conv=layers.conv2d(actv)
actv=tf.nn.relu(layers.batch_norm(conv))
with tf.variable_scope('Layer3'): # 256*256
conv1=layers.conv2d(actv)
actv1=tf.nn.relu(layers.batch_norm(conv1))
conv2=layers.conv2d(actv1)
bn2=layers.batch_norm(conv2)
res= tf.add(actv, bn2)
with tf.variable_scope('Layer4'): # 256*256
conv1=layers.conv2d(res)
actv1=tf.nn.relu(layers.batch_norm(conv1))
conv2=layers.conv2d(actv1)
bn2=layers.batch_norm(conv2)
res= tf.add(res, bn2)
with tf.variable_scope('Layer5'): # 256*256
conv1=layers.conv2d(res)
actv1=tf.nn.relu(layers.batch_norm(conv1))
conv2=layers.conv2d(actv1)
bn=layers.batch_norm(conv2)
res= tf.add(res, bn)
with tf.variable_scope('Layer6'): # 256*256
conv1=layers.conv2d(res)
actv1=tf.nn.relu(layers.batch_norm(conv1))
conv2=layers.conv2d(actv1)
bn=layers.batch_norm(conv2)
res= tf.add(res, bn)
with tf.variable_scope('Layer7'): # 256*256
conv1=layers.conv2d(res)
actv1=tf.nn.relu(layers.batch_norm(conv1))
conv2=layers.conv2d(actv1)
bn=layers.batch_norm(conv2)
res= tf.add(res, bn)
with tf.variable_scope('Layer8'): # 256*256
convs = layers.conv2d(res, kernel_size=1, stride=2)
convs = layers.batch_norm(convs)
conv1=layers.conv2d(res)
actv1=tf.nn.relu(layers.batch_norm(conv1))
conv2=layers.conv2d(actv1)
bn=layers.batch_norm(conv2)
pool = layers.avg_pool2d(bn)
res= tf.add(convs, pool)
with tf.variable_scope('Layer9'): # 128*128
convs = layers.conv2d(res, num_outputs=64, kernel_size=1, stride=2)
convs = layers.batch_norm(convs)
conv1=layers.conv2d(res, num_outputs=64)
actv1=tf.nn.relu(layers.batch_norm(conv1))
conv2=layers.conv2d(actv1, num_outputs=64)
bn=layers.batch_norm(conv2)
pool = layers.avg_pool2d(bn)
res= tf.add(convs, pool)
with tf.variable_scope('Layer10'): # 64*64
convs = layers.conv2d(res, num_outputs=128, kernel_size=1, stride=2)
convs = layers.batch_norm(convs)
conv1=layers.conv2d(res, num_outputs=128)
actv1=tf.nn.relu(layers.batch_norm(conv1))
conv2=layers.conv2d(actv1, num_outputs=128)
bn=layers.batch_norm(conv2)
pool = layers.avg_pool2d(bn)
res= tf.add(convs, pool)
with tf.variable_scope('Layer11'): # 32*32
convs = layers.conv2d(res, num_outputs=256, kernel_size=1, stride=2)
convs = layers.batch_norm(convs)
conv1=layers.conv2d(res, num_outputs=256)
actv1=tf.nn.relu(layers.batch_norm(conv1))
conv2=layers.conv2d(actv1, num_outputs=256)
bn=layers.batch_norm(conv2)
pool = layers.avg_pool2d(bn)
res= tf.add(convs, pool)
with tf.variable_scope('Layer12'): # 16*16
conv1=layers.conv2d(res, num_outputs=512)
actv1=tf.nn.relu(layers.batch_norm(conv1))
conv2=layers.conv2d(actv1, num_outputs=512)
bn=layers.batch_norm(conv2)
avgp = layers.avg_pool2d(bn, kernel_size=[32,32], stride=[32,32]) ########
avgp = layers.flatten(avgp)
ip=layers.fully_connected(avgp, num_outputs=n_class,
activation_fn=None, normalizer_fn=None,
weights_initializer=tf.random_normal_initializer(mean=0., stddev=0.01),
biases_initializer=tf.constant_initializer(0.), scope='ip')
return ip
|
1646709
|
from procfs.core import ProcessFile, Dict
# /proc/net/rpc/nfsd documentation:
# <kernel src>/fs/nfsd/stats.c : nfsd_proc_show
# /net/sunrpc/stats.c : svc_seq_show
# /fs/nfsd/nfsproc.c : nfsd_procedures2
# /fs/nfsd/nfs3proc.c : nfsd_procedures3
# /fs/nfsd/nfs4proc.c : nfsd_procedures4 nfsd4_ops
# http://marc.info/?l=linux-nfs&m=119308862812388&w=1
# http://article.gmane.org/gmane.linux.nfs/16594
class _BaseNfs(ProcessFile):
"""Base class for parsing /proc/net/rpc/nfs and /proc/net/rpc/nfsd
"""
def _parse(self, data):
lines = data.splitlines()
result = Dict()
for line in lines:
str_values = line.split()
type_ = str_values.pop(0)
values = []
for value in str_values:
if '.' in value:
parser = float
else:
parser = int
values.append(parser(value))
parser_name = '_parse_%s' % type_
if hasattr(self, parser_name):
parser = getattr(self, parser_name)
values = parser(*values)
result[type_] = values
return result
def _parse_net(self, netcnt, netudpcnt, nettcpcnt, nettcpconn):
return Dict(netcnt=netcnt, netudpcnt=netudpcnt,
nettcpcnt=nettcpcnt, nettcpconn=nettcpconn)
def _parse_proc2(self, cnt, null, getattr, setattr, root, lookup,
readlink, read, writecache, write, create, remove,
rename, link, symlink, mkdir, rmdir, readdir, statfs):
return Dict(null=null, getattr=getattr, setattr=setattr,
root=root, lookup=lookup, readlink=readlink,
read=read, writecache=writecache, write=write,
create=create, remove=remove, rename=rename,
link=link, symlink=symlink, mkdir=mkdir,
rmdir=rmdir, readdir=readdir, statfs=statfs)
def _parse_proc3(self, cnt, null, getattr, setattr, lookup, access,
readlink, read, write, create, mkdir, symlink, mknod,
remove, rmdir, rename, link, readdir, readdirplus,
fsstat, fsinfo, pathconf, commit):
return Dict(null=null, getattr=getattr, setattr=setattr,
lookup=lookup, access=access, readlink=readlink,
read=read, write=write, create=create, mkdir=mkdir,
symlink=symlink, mknod=mknod, remove=remove,
rmdir=rmdir, rename=rename, link=link,
readdir=readdir, readdirplus=readdirplus,
fsstat=fsstat, fsinfo=fsinfo, pathconf=pathconf,
commit=commit)
class nfsd(_BaseNfs):
"""/proc/net/rpc/nfsd
"""
def _parse_rc(self, hits, misses, nocache):
return Dict(hits=hits, misses=misses, nocache=nocache)
def _parse_fh(self, stale, total_lookups, anonlookups, dir_not_in_cache,
nondir_not_in_cache):
return Dict(stale=stale, total_lookups=total_lookups,
anonlookups=anonlookups,
dir_not_in_cache=dir_not_in_cache,
nondir_not_in_cache=nondir_not_in_cache)
def _parse_io(self, read, written):
return Dict(read=read, written=written)
def _parse_th(self, threads, fullcnt, *busy_times):
busy = {'10-20': busy_times[0],
'20-30': busy_times[1],
'30-40': busy_times[2],
'40-50': busy_times[3],
'50-60': busy_times[4],
'60-70': busy_times[5],
'70-80': busy_times[6],
'80-90': busy_times[7],
'90-100': busy_times[8]}
return Dict(threads=threads, fullcnt=fullcnt, busy=busy)
def _parse_rpc(self, cnt, badcnt, badfmt, badauth, badclnt):
"""See <linux-src>/net/sunrpc/stats.c : svc_seq_show
"""
return Dict(cnt=cnt, badcnt=badcnt,
badfmt=badfmt, badauth=badauth,
badclnt=badclnt)
def _parse_ra(self, cache_size, *depths):
not_found = depths[-1]
depth = {10: depths[0], 20: depths[1], 30: depths[2], 40: depths[3],
50: depths[4], 60: depths[5], 70: depths[6], 80: depths[7],
90: depths[8], 100: depths[9]}
return Dict(cache_size=cache_size, depth=depth, not_found=not_found)
def _parse_proc4(self, cnt, null, compound):
return Dict(null=null, compound=compound)
# def _parse_proc4ops(self, cnt, access, close, commit, create, delegreturn,
# getattr, getfh, link, lock, lockt, locku, lookup,
# lookupp, nverify, open, open_confirm, open_downgrade,
# putfh, putpubfh, putrootfh, read, readdir, readlink,
# remove, rename, renew, restorefh, savefh, secinfo,
# setattr, setclientid, setclientid_confirm, verify,
# write, release_lockowner, exchange_id, create_session,
# destroy_session, sequence):
# return Dict(access=access, close=close, commit=commit,
# create=create, delegreturn=delegreturn,
# getattr=getattr, getfh=getfh, link=link, lock=lock,
# lockt=lockt, locku=locku, lookup=lookup,
# lookupp=lookupp, nverify=nverify, open=open,
# open_confirm=open_confirm,
# open_downgrade=open_downgrade, putfh=putfh,
# putpubfh=putpubfh, putrootfh=putrootfh, read=read,
# readdir=readdir, readlink=readlink, remove=remove,
# rename=rename, renew=renew, restorefh=restorefh,
# savefh=savefh, secinfo=secinfo, setattr=setattr,
# setclientid=setclientid,
# setclientid_confirm=setclientid_confirm,
# verify=verify, write=write,
# release_lockowner=release_lockowner,
# exchange_id=exchange_id,
# create_session=create_session,
# destroy_session=destroy_session, sequence=sequence)
class nfs(_BaseNfs):
"""/proc/net/rpc/nfs
"""
def _parse_rpc(self, cnt, retrans, authrefresh):
"""See <linux-src>/net/sunrpc/stats.c : rpc_proc_show
"""
return Dict(cnt=cnt, retrans=retrans, authrefresh=authrefresh)
|
1646728
|
import sys
import click
import time
import calendar
import datetime
from anchore.cli.common import anchore_print, anchore_print_err
from anchore import anchore_auth, anchore_feeds
from anchore.anchore_utils import contexts
config = {}
@click.group(name='feeds', short_help='Manage syncing of and subscriptions to Anchore data feeds.')
@click.pass_obj
def feeds(anchore_config):
global config
config = anchore_config
ecode = 0
emsg = ""
success = True
try:
rc, msg = anchore_feeds.check()
if not rc:
anchore_print("initializing feed metadata: ...")
rc, ret = anchore_feeds.sync_feedmeta()
if not rc:
emsg = "could not sync feed metadata from service: " + ret['text']
success = False
except Exception as err:
anchore_print_err('operation failed')
sys.exit(1)
if not success:
anchore_print_err(emsg)
sys.exit(1)
@feeds.command(name='show', short_help='Show detailed info on a specific feed')
@click.argument('feed')
def show(feed):
"""
Show detailed feed information
"""
ecode = 0
try:
feedmeta = anchore_feeds.load_anchore_feedmeta()
if feed in feedmeta:
result = {}
groups = feedmeta[feed].get('groups',{}).values()
result['name'] = feed
result['access_tier'] = int(feedmeta[feed].get('access_tier'))
result['description'] = feedmeta[feed].get('description')
result['groups'] = {}
if 'subscribed' not in feedmeta[feed]:
result['subscribed'] = False
else:
result['subscribed'] = feedmeta[feed]['subscribed']
for g in groups:
result['groups'][g['name']] = {
'access_tier': int(g.get('access_tier')),
'description': g.get('description'),
'last_sync': datetime.datetime.fromtimestamp(g.get('last_update')).isoformat() if 'last_update' in g else 'None'
}
anchore_print(result, do_formatting=True)
else:
anchore_print_err('Unknown feed name. Valid feeds can be seen withe the "list" command')
ecode = 1
except Exception as err:
anchore_print_err('operation failed')
ecode = 1
sys.exit(ecode)
@feeds.command(name='list', short_help="List all feeds.")
@click.option('--showgroups', help='Along with the feed, show all groups within the feed.', is_flag=True)
def list(showgroups):
"""
Show list of Anchore data feeds.
"""
ecode = 0
try:
result = {}
subscribed = {}
available = {}
unavailable = {}
current_user_data = contexts['anchore_auth']['user_info']
feedmeta = anchore_feeds.load_anchore_feedmeta()
for feed in feedmeta.keys():
if feedmeta[feed]['subscribed']:
subscribed[feed] = {}
subscribed[feed]['description'] = feedmeta[feed]['description']
if showgroups:
subscribed[feed]['groups'] = feedmeta[feed]['groups'].keys()
else:
if current_user_data:
tier = int(current_user_data['tier'])
else:
tier = 0
if int(feedmeta[feed]['access_tier']) > tier:
collection = unavailable
else:
collection = available
collection[feed] = {}
collection[feed]['description'] = feedmeta[feed]['description']
if showgroups and collection == available:
collection[feed]['groups'] = feedmeta[feed]['groups'].keys()
if available:
result['Available'] = available
if subscribed:
result['Subscribed'] = subscribed
if unavailable:
result['Unavailable/Insufficient Access Tier'] = unavailable
anchore_print(result, do_formatting=True)
except Exception as err:
anchore_print_err('operation failed')
ecode = 1
sys.exit(ecode)
@feeds.command(name='sub', short_help="Subscribe to specified feed(s).")
@click.argument('feednames', nargs=-1, metavar='<feedname> <feedname> ...')
def sub(feednames):
"""
Subscribe to the specified feed(s).
"""
ecode = 0
current_user_data = contexts.get('anchore_auth', {}).get('user_info', None)
if not current_user_data:
current_user_tier = 0
else:
current_user_tier = int(current_user_data['tier'])
try:
for feed in feednames:
rc, msg = anchore_feeds.subscribe_anchore_feed(feed, current_user_tier)
if not rc:
ecode = 1
anchore_print_err(msg)
else:
anchore_print(msg)
except Exception as err:
anchore_print_err('operation failed')
ecode = 1
sys.exit(ecode)
@feeds.command(name='unsub', short_help="Unsubscribe from specified feed(s).")
@click.argument('feednames', nargs=-1, metavar='<feedname> <feedname> ...')
@click.option('--delete', help='Delete all feed data after unsubscribing', is_flag=True)
@click.option('--dontask', help='Used with --delete, will not prompt before deleting all feed data', is_flag=True)
def unsub(feednames, delete, dontask):
"""
Unsubscribe from the specified feed(s).
"""
ecode = 0
try:
for feed in feednames:
rc, msg = anchore_feeds.unsubscribe_anchore_feed(feed)
if not rc:
ecode = 1
anchore_print_err(msg)
else:
anchore_print(msg)
if delete:
dodelete = False
if dontask:
dodelete = True
else:
try:
answer = raw_input("Really delete feed data ("+str(feed)+"'? (y/N)")
except:
answer = "n"
if 'y' == answer.lower():
dodelete = True
else:
anchore_print(str(feed) + ": skipping delete.")
if dodelete:
anchore_print(str(feed) + ": deleting feed.")
rc = anchore_feeds.delete_anchore_feed(feed)
except Exception as err:
anchore_print_err('operation failed')
ecode = 1
sys.exit(ecode)
@feeds.command(name='sync', short_help="Sync (download) latest data for all subscribed feeds from the Anchore service.")
@click.option('--since', help='Force a feed sync from the given timestamp to today.', metavar='<unix timestamp>')
@click.option('--do-compact', help='After syncing, process feed data to eliminate duplicate entries and store only latest data records', is_flag=True)
def sync(since, do_compact):
"""
Sync (download) latest data for all subscribed feeds from the Anchore service.
"""
ecode = 0
try:
rc, ret = anchore_feeds.sync_feedmeta()
if not rc:
anchore_print_err(ret['text'])
ecode = 1
else:
rc, ret = anchore_feeds.sync_feeds(force_since=since, do_combine=do_compact)
if not rc:
anchore_print_err(ret['text'])
ecode = 1
except Exception as err:
anchore_print_err('operation failed')
ecode = 1
sys.exit(ecode)
|
1646736
|
import inspect
import logging
from datetime import datetime
from typing import Optional, Set, Callable
from celery.utils import uuid
from server.queue.celery.task_metadata import TaskMetadata
from server.queue.celery.task_status import task_status
from server.queue.framework import TaskQueue, BaseObserver
from server.queue.model import Task, TaskStatus, TaskError
from server.queue.task_utils import task_status_filter
from task_queue.events import TASK_METADATA, RUNTIME_METADATA_ATTR
from task_queue.metadata import TaskRuntimeMetadata
# Default module logger
logger = logging.getLogger(__name__)
def _task_name(task):
return task.__qualname__
class CeleryTaskQueue(TaskQueue):
# Event type for task deletion
TASK_DELETED_EVENT = "task-deleted"
def __init__(self, app, backend, request_transformer, requests):
if app is None:
raise ValueError("Celery app cannot be None")
self.app = app
self._celery_backend = backend
self._celery_tasks = {}
self._req_transformer = request_transformer
for request_type, task in requests.items():
self._celery_tasks[request_type] = task
self._observers: Set[BaseObserver] = set()
def dispatch(self, request):
# Resolve actual celery task to be invoked
celery_task = self._get_celery_task(request)
# Make sure the backend contains required task metadata
task_id = uuid()
meta = TaskMetadata(id=task_id, created=datetime.utcnow(), request=request)
self._celery_backend.store_task_meta(task_id, meta.asdict())
# Invoke celery task
celery_task.apply_async(task_id=task_id, kwargs=request.kwargs())
# Create a new task instance and return to the caller
return Task(
id=task_id, created=meta.created, status_updated=meta.created, request=request, status=TaskStatus.PENDING
)
def _get_celery_task(self, request):
if type(request) not in self._celery_tasks:
raise ValueError(f"Unsupported request type: {type(request)}")
return self._celery_tasks[type(request)]
def terminate(self, task_id):
if self.exists(task_id):
async_result = self.app.AsyncResult(task_id)
async_result.revoke(terminate=True, wait=False)
def delete(self, task_id):
self.terminate(task_id)
if self.exists(task_id):
self._celery_backend.delete_task_meta(task_id)
async_result = self.app.AsyncResult(task_id)
async_result.forget()
self._notify_deleted(task_id)
def get_task(self, task_id):
return self._construct_task(task_id, {})
def _construct_task(self, task_id, active_task_meta):
winnow_meta = self._get_task_meta(task_id)
async_result = self.app.AsyncResult(task_id)
if winnow_meta is None:
return None
status = task_status(async_result.status)
status_updated = winnow_meta.created
if task_id in active_task_meta:
status = TaskStatus.RUNNING
status_updated = datetime.utcfromtimestamp(active_task_meta[task_id]["time_start"])
if status != TaskStatus.PENDING and status != TaskStatus.RUNNING:
status_updated = async_result.date_done
error = None
if status == TaskStatus.FAILURE:
error = self._construct_error(async_result)
return Task(
id=winnow_meta.id,
created=winnow_meta.created,
status_updated=status_updated,
request=winnow_meta.request,
status=status,
error=error,
progress=winnow_meta.progress,
result=async_result.result,
)
def _get_task_meta(self, task_id, transaction=None) -> Optional[TaskMetadata]:
raw_meta = self._celery_backend.get_task_meta(task_id, transaction=transaction)
if raw_meta is None:
return None
return TaskMetadata.fromdict(raw_meta, self._req_transformer)
def _construct_error(self, async_result):
exc_type_name = None
exc_module_name = None
exc_message = None
result = async_result.result
if isinstance(result, Exception):
exc_type = type(result)
exc_type_name = getattr(exc_type, "__name__", None)
exc_module = inspect.getmodule(exc_type)
if exc_module is not None:
exc_module_name = getattr(exc_module, "__name__", None)
exc_message = str(result)
return TaskError(
exc_type=exc_type_name,
exc_message=exc_message,
exc_module=exc_module_name,
traceback=async_result.traceback,
)
def _active_tasks_meta(self):
metadata_index = {}
celery_inspector = self.app.control.inspect()
for metadata_entries in celery_inspector.active().values():
for task_metadata in metadata_entries:
metadata_index[task_metadata["id"]] = task_metadata
return metadata_index
def list_tasks(self, status=None, offset=0, limit=None):
satisfies = task_status_filter(status)
result = []
filtered_count = 0
for task_id in self._celery_backend.task_ids():
task = self._construct_task(task_id, {})
task_satisfies = satisfies(task)
if task_satisfies and offset <= filtered_count < offset + limit:
result.append(task)
filtered_count += int(task_satisfies)
return result, filtered_count
def exists(self, task_id):
return self._celery_backend.exists(task_id=task_id)
def _make_event_handler(self, state, task_handler: Callable[[Task], None]):
"""Create Celery event-receiver callback, which will accept Celery
events, create a task and pass the task to the actual handler.
"""
def event_handler(event):
"""Receive event and pass the corresponding task to the handler."""
state.event(event)
self._update_meta_from_event(event)
task = self.get_task(event["uuid"])
if task is not None:
task_handler(task)
return event_handler
def _notify(self, state, task_handler: Callable[[Task, BaseObserver], None]):
"""Create task handler that loops over the existing observers and apply the provided operation."""
def notifier(task):
"""Notify each observer with the given task"""
for observer in self._observers:
try:
task_handler(task, observer)
except Exception:
logger.exception("Error handling task update")
return self._make_event_handler(state, notifier)
def _update_meta_from_event(self, event):
"""Try to read TaskRuntimeMetadata from event and save it to the backend."""
if RUNTIME_METADATA_ATTR not in event:
return
task_id = event["uuid"]
try:
with self._celery_backend.transaction(task_id) as txn:
task_metadata = self._get_task_meta(task_id, transaction=txn)
if task_metadata is None:
return
runtime_metadata = TaskRuntimeMetadata.fromdict(event[RUNTIME_METADATA_ATTR])
task_metadata.progress = runtime_metadata.progress
self._celery_backend.begin_write_section(transaction=txn) # Necessary for redis transactions
self._celery_backend.store_task_meta(task_id, task_metadata.asdict(), transaction=txn)
except Exception:
logger.exception("Cannot update task metadata")
def observe(self, observer: BaseObserver):
"""Add observer to the queue notification list."""
self._observers.add(observer)
def stop_observing(self, observer: BaseObserver):
"""Remove observer from the queue notification list."""
self._observers.remove(observer)
def _notify_deleted(self, task_id):
"""Send task-deleted event via the Celery message bus."""
retry_policy = self.app.conf.task_publish_retry_policy
with self.app.events.default_dispatcher() as dispatcher:
dispatcher.send(type=self.TASK_DELETED_EVENT, uuid=task_id, retry=True, retry_policy=retry_policy)
def listen(self):
"""Listen for queue events and notify observers.
This is a blocking method, it should be executed in a background thread.
"""
def handle_started(task):
"""Do handle task-started event."""
# This is safe to force the RUNNING state
# because "state-failed" and "state-succeeded"
# events will be handled after that.
task.status = TaskStatus.RUNNING
for observer in self._observers:
try:
observer.on_task_started(task)
except Exception:
logger.exception("Error handling 'task-started' event")
def announce_task_deleted(event):
"""Do handle task-deleted event."""
task_id = event["uuid"]
for observer in self._observers:
try:
observer.on_task_deleted(task_id)
except Exception:
logger.exception(f"Error handling '{self.TASK_DELETED_EVENT}' event")
state = self.app.events.State()
announce_task_sent = self._notify(state, lambda task, observer: observer.on_task_sent(task))
announce_task_started = self._make_event_handler(state, handle_started)
announce_succeeded_tasks = self._notify(state, lambda task, observer: observer.on_task_succeeded(task))
announce_failed_tasks = self._notify(state, lambda task, observer: observer.on_task_failed(task))
announce_revoked_tasks = self._notify(state, lambda task, observer: observer.on_task_revoked(task))
announce_metadata_update = self._notify(state, lambda task, observer: observer.on_task_meta_updated(task))
with self.app.connection() as connection:
receiver = self.app.events.Receiver(
connection,
handlers={
"task-sent": announce_task_sent,
"task-started": announce_task_started,
"task-succeeded": announce_succeeded_tasks,
"task-failed": announce_failed_tasks,
"task-revoked": announce_revoked_tasks,
TASK_METADATA: announce_metadata_update,
self.TASK_DELETED_EVENT: announce_task_deleted,
},
)
receiver.capture(limit=None, timeout=None, wakeup=True)
|
1646737
|
from django.utils.translation import gettext as _
from collections import OrderedDict
DEFAULT_AFFILIATION = {_("student [student, member]") : ["student", "member"]}
idem_affiliation_map_extended = {
_("assistente universitario [staff, member]"): ["staff", "member"],
_("associato (ad es. CNR) [member]"): ["member"],
_("cessato"): [],
_("collaboratore coordinato continuativo [staff, member]"): ["staff", "member"],
_("collaboratore linguistico [staff, member]"): ["staff", "member"],
_("consorziato (membro del consorzio a cui l'ente appartiene) [member]"): ["member"],
_("convenzionato (cliente delle convenzioni) [affiliate]"): ["affiliate"],
_("cultore della materia [staff, member]"): ["staff", "member"],
_("dipendente [staff, member]"): ["staff", "member"],
_("dipendente altra università [member]"): ["member"],
_("dipendente altro ente di ricerca [member]"): ["member"],
_("dipendente azienda ospedaliera/policlinico [member]"): ["member"],
_("dipendente di altra azienda sanitaria [member]"): ["member"],
_("direttore amministrativo [staff, member]"): ["staff", "member"],
_("dirigente [staff, member]"): ["staff", "member"],
_("dirigente a contratto [staff, member]"): ["staff", "member"],
_("dirigente di ricerca [staff, member]"): ["staff", "member"],
_("dirigente tecnologo [staff, member]"): ["staff", "member"],
_("docente a contratto [staff, member]"): ["staff", "member"],
_("dottorando [staff, member, student]"): ["staff", "member", "student"],
_("dottorando di altra università (consorziata) [member]"): ["member"],
_("esperto linguistico [staff, member]"): ["staff", "member"],
_("fornitore (dipendente o titolare delle ditte fornitrici) [affiliate]"): ["affiliate"],
_("interinale [staff, member]"): ["staff", "member"],
_("ispettore generale [affiliate]"): ["affiliate"],
_("laureato frequentatore/collaboratore di ricerca (a titolo gratuito) [member]"): ["member"],
_("lavoratore occasionale (con contratto personale senza partita iva) [staff, member]"): ["staff","member"],
_("lettore di scambio [member]"): ["member"],
_("libero professionista (con contratto personale con partita iva) [staff, member]"): ["staff","member"],
_("ospite / visitatore [affiliate]"): ["affiliate"],
_("personale tecnico-amministrativo [staff, member]"): ["staff", "member"],
_("personale tecnico-amministrativo a tempo determinato [staff, member]"): ["staff","member"],
_("primo ricercatore [staff, member]"): ["staff", "member"],
_("primo tecnologo [staff, member]"): ["staff", "member"],
_("professore associato [staff, member]"): ["staff", "member"],
_("professore emerito [member]"): ["member"],
_("professore incaricato esterno [staff, member]"): ["staff", "member"],
_("professore incaricato interno [staff, member]"): ["staff", "member"],
_("professore ordinario [staff, member]"): ["staff", "member"],
_("ricercatore [staff, member]"): ["staff", "member"],
_("specializzando [staff, member, student]"): ["staff", "member", "student"],
_("studente [student, member]"): ["student", "member"],
_("studente erasmus in ingresso [student]"): ["student"],
_("studente fuori sede (tesista, tirocinante, ...) [student, member]"): ["student","member"],
_("studente laurea specialistica [student, member]"): ["student", "member"],
_("studente master [student, member]"): ["student", "member"],
_("studente siss [student, member]"): ["student", "member"],
_("supervisore siss [staff, member]"): ["staff", "member"],
_("supplente docente [staff, member]"): ["staff", "member"],
_("tecnologo [staff, member]"): ["staff", "member"],
_("titolare di assegno di ricerca [staff, member]"): ["staff", "member"],
_("titolare di borsa di studio [member]"): ["member"],
_("tutor [staff, member]"): ["staff", "member"],
_("volontario servizio civile nazionale [member]"): ["member"]
}
idem_affiliation_map = {
_("dipendente, \
professore, ricercatore, \
titolare di assegno di ricerca, \
tutor, \
assistente universitario, \
collaboratore coordinato continuativo, \
collaboratore linguistico, \
cultore della materia [staff, member]"): ["staff", "member"],
_("associato (ad es. CNR), \
consorziato (membro del consorzio a cui l\"ente appartiene), \
dipendente altra università o ente di \
ricerca o azienda sanitaria/ospedaliera/policlinico, \
dottorando di altra università (consorziata), \
laureato frequentatore/collaboratore di ricerca (a titolo gratuito) \
[member]"): ["member"],
_("cessato"): [],
_("convenzionato (cliente delle convenzioni), \
fornitore (dipendente o titolare delle ditte fornitrici), \
ispettore, ospite / visitatore [affiliate]"): ["affiliate"],
_("lettore di scambio, \
titolare di borsa di studio, \
volontario servizio civile nazionale [member]"): ["member"],
_("studente erasmus in ingresso [student]"): ["student"],
_("dottorando, specializzando [staff, member, student]"): ["staff", "member", "student"],
_("studente, \
studente fuori sede (tesista, tirocinante, ...), \
studente laurea specialistica, \
studente master, \
studente siss [member, student]"): ["student", "member"],
}
IDEM_AFFILIATION_MAP = OrderedDict(DEFAULT_AFFILIATION)
IDEM_AFFILIATION_MAP.update(OrderedDict(sorted(idem_affiliation_map.items(), key=lambda t: t[0])))
|
1646809
|
import functools
from .runtime_helper import PI, LARGE, empty
def broken_decorator(f):
def wrapped(*args, **kwargs):
return f(*args, **kwargs)
return wrapped
def simple_decorator(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
return f(*args, **kwargs)
return wrapped
class Foo(object):
@staticmethod
@broken_decorator
def f_broken():
pass
@staticmethod
@simple_decorator
def f_simple():
pass
@staticmethod
def generate_id():
def id(x):
return x
return id
class Bar(object):
@property
def prop(self):
pass
def method(self):
pass
class Baz(Foo.Bar):
pass
|
1646828
|
import matplotlib.pyplot as plt
import numpy as np
from brancher.variables import RootVariable, RandomVariable, ProbabilisticModel
from brancher.standard_variables import NormalVariable, DeterministicVariable, MultivariateNormalVariable
from brancher import inference
import brancher.functions as BF
N_itr = 250
N_smpl = 1000
optimizer = "SGD"
lr = 0.001 #0.0001
# Probabilistic model #
T = 30
dt = 0.1
driving_noise = 0.1
measure_noise = 0.15
x0 = NormalVariable(0., driving_noise, 'x0')
y0 = NormalVariable(x0, measure_noise, 'y0')
x = [x0]
y = [y0]
x_names = ["x0"]
y_names = ["y0"]
y_range = [t for t in range(T) if (t < 0 or t > 28)]
for t in range(1, T):
x_names.append("x{}".format(t))
x.append(NormalVariable(x[t - 1], np.sqrt(dt)*driving_noise, x_names[t]))
if t in y_range:
y_name = "y{}".format(t)
y_names.append(y_name)
y.append(NormalVariable(x[t], measure_noise, y_name))
AR_model = ProbabilisticModel(x + y)
# Generate data #
data = AR_model._get_sample(number_samples=1)
time_series = [float(data[yt].data) for yt in y]
ground_truth = [float(data[xt].data) for xt in x]
#true_b = data[b].data
#print("The true coefficient is: {}".format(float(true_b)))
# Observe data #
[yt.observe(data[yt][:, 0, :]) for yt in y]
# get time series
#plt.plot([data[xt][:, 0, :] for xt in x])
#plt.scatter(y_range, time_series, c="k")
#plt.show()
# Structured variational distribution #
Qx = [NormalVariable(0., 1., 'x0', learnable=True)]
Qx_mean = [RootVariable(0., 'x0_mean', learnable=True)]
Qlambda = [RootVariable(-1., 'x0_lambda', learnable=True)]
for t in range(1, T):
if t in y_range:
l = 0.
else:
l = 1.
Qx_mean.append(RootVariable(0, x_names[t] + "_mean", learnable=True))
Qlambda.append(RootVariable(l, x_names[t] + "_lambda", learnable=True))
Qx.append(NormalVariable(BF.sigmoid(Qlambda[t])*Qx[t - 1] + (1 - BF.sigmoid(Qlambda[t]))*Qx_mean[t],
np.sqrt(dt)*driving_noise, x_names[t], learnable=True))
variational_posterior = ProbabilisticModel(Qx)
AR_model.set_posterior_model(variational_posterior)
# Inference #
inference.perform_inference(AR_model,
number_iterations=N_itr,
number_samples=N_smpl,
optimizer=optimizer,
lr=lr)
loss_list1 = AR_model.diagnostics["loss curve"]
samples_PE = AR_model.posterior_model.get_sample(1000)
#
# # Plot posterior
# #from brancher.visualizations import plot_density
# #plot_density(AR_model.posterior_model, variables=["x0", "x1"])
#
# # ELBO
# N_ELBO_smpl = 5000
# ELBO1 = AR_model.estimate_log_model_evidence(N_ELBO_smpl)
# print("PE: {}".format(ELBO1))
#
# # Statistics
# posterior_samples1 = AR_model._get_posterior_sample(2000)
# #b_posterior_samples1 = posterior_samples1[b].detach().numpy().flatten()
# #b_mean1 = np.mean(b_posterior_samples1)
# #b_sd1 = np.sqrt(np.var(b_posterior_samples1))
#
# x_mean1 = []
# lower_bound1 = []
# upper_bound1 = []
# for xt in x:
# x_posterior_samples1 = posterior_samples1[xt].detach().numpy().flatten()
# mean1 = np.mean(x_posterior_samples1)
# sd1 = np.sqrt(np.var(x_posterior_samples1))
# x_mean1.append(mean1)
# lower_bound1.append(mean1 - sd1)
# upper_bound1.append(mean1 + sd1)
# #print("The estimated coefficient is: {} +- {}".format(b_mean1, b_sd1))
# # Two subplots, unpack the axes array immediately
# f, (ax1, ax2, ax3) = plt.subplots(1, 3)
# ax1.plot(range(T), x_mean1, color="b", label="PE")
# ax1.scatter(y_range, time_series, color="k")
# ax1.plot(range(T), ground_truth, color="k", ls ="--", lw=1.5)
# ax1.fill_between(range(T), lower_bound1, upper_bound1, color="b", alpha=0.25)
# ax1.set_title("Time series")
# ax2.plot(np.array(loss_list1), color="b")
# ax2.set_title("Convergence")
# ax2.set_xlabel("Iteration")
# ax3.hist(b_posterior_samples1, 25, color="b", alpha=0.25)
# ax3.set_title("Posterior samples (b)")
# ax3.set_xlim(0, 1)
# plt.show()
# Mean-field variational distribution #
#Qb = BetaVariable(8., 1., "b", learnable=True)
Qx = [NormalVariable(0., 1., 'x0', learnable=True)]
for t in range(1, T):
Qx.append(NormalVariable(0, 2., x_names[t], learnable=True))
variational_posterior = ProbabilisticModel(Qx)
AR_model.set_posterior_model(variational_posterior)
# Inference #
inference.perform_inference(AR_model,
number_iterations=N_itr,
number_samples=N_smpl,
optimizer=optimizer,
lr=lr)
loss_list2 = AR_model.diagnostics["loss curve"]
#Plot posterior
from brancher.visualizations import plot_density
plot_density(AR_model.posterior_model, variables=["x0", "x1"])
# ELBO
ELBO2 = AR_model.estimate_log_model_evidence(N_ELBO_smpl)
print("MF: {}".format(ELBO2))
#
# samples_MF = AR_model.posterior_model.get_sample(1000)
#
# # Statistics
# posterior_samples2 = AR_model._get_posterior_sample(2000)
# #b_posterior_samples2 = posterior_samples2[b].detach().numpy().flatten()
# #b_mean2 = np.mean(b_posterior_samples2)
# #b_sd2 = np.sqrt(np.var(b_posterior_samples2))
#
# x_mean2 = []
# lower_bound2 = []
# upper_bound2 = []
# for xt in x:
# x_posterior_samples2 = posterior_samples2[xt].detach().numpy().flatten()
# mean2 = np.mean(x_posterior_samples2)
# sd2 = np.sqrt(np.var(x_posterior_samples2))
# x_mean2.append(mean2)
# lower_bound2.append(mean2 - sd2)
# upper_bound2.append(mean2 + sd2)
#print("The estimated coefficient is: {} +- {}".format(b_mean2, b_sd2))
# # Multivariate normal variational distribution #
# QV = MultivariateNormalVariable(loc=np.zeros((T,)),
# covariance_matrix=2*np.identity(T),
# learnable=True)
# #Qb = BetaVariable(8., 1., "b", learnable=True)
# Qx = [NormalVariable(QV[0], 0.1, 'x0', learnable=True)]
#
# for t in range(1, T):
# Qx.append(NormalVariable(QV[t], 0.1, x_names[t], learnable=True))
# variational_posterior = ProbabilisticModel(Qx)
# AR_model.set_posterior_model(variational_posterior)
#
# # Inference #
# inference.perform_inference(AR_model,
# number_iterations=N_itr,
# number_samples=N_smpl,
# optimizer=optimizer,
# lr=lr)
#
# loss_list3 = AR_model.diagnostics["loss curve"]
#
# # Plot posterior
# #plot_density(AR_model.posterior_model, variables=["x0", "x1"])
#
# # ELBO
# ELBO3 = AR_model.estimate_log_model_evidence(N_ELBO_smpl)
# print("MN: {}".format(ELBO3))
#
# samples_MN = AR_model.posterior_model.get_sample(1000)
#
# # Statistics
# posterior_samples3 = AR_model._get_posterior_sample(2000)
# #b_posterior_samples3 = posterior_samples3[b].detach().numpy().flatten()
# #b_mean3 = np.mean(b_posterior_samples3)
# #b_sd3 = np.sqrt(np.var(b_posterior_samples3))
#
# x_mean3 = []
# lower_bound3 = []
# upper_bound3 = []
# for xt in x:
# x_posterior_samples3 = posterior_samples3[xt].detach().numpy().flatten()
# mean3 = np.mean(x_posterior_samples3)
# sd3 = np.sqrt(np.var(x_posterior_samples3))
# x_mean3.append(mean3)
# lower_bound3.append(mean3 - sd3)
# upper_bound3.append(mean3 + sd3)
# #print("The estimated coefficient is: {} +- {}".format(b_mean3, b_sd3))
#
# # Structured NN distribution #
# latent_size = 10
# hidden_size = 10
# #Qb = BetaVariable(8., 1., "b", learnable=True)
# Qepsilon = NormalVariable(np.zeros((10,1)), np.ones((10,)), 'epsilon', learnable=True)
# W1 = RootVariable(np.random.normal(0, 0.1, (hidden_size, latent_size)), "W1", learnable=True)
# W2 = RootVariable(np.random.normal(0, 0.1, (T, hidden_size)), "W2", learnable=True)
# pre_x = BF.matmul(W2, BF.sigmoid(BF.matmul(W1, Qepsilon)))
# Qx = []
# for t in range(0, T):
# pre_x_t = DeterministicVariable(pre_x[t], "x{}_mean".format(t), learnable=True)
# Qx.append(NormalVariable(pre_x_t, 1., x_names[t], learnable=True))
# variational_posterior = ProbabilisticModel(Qx)
# AR_model.set_posterior_model(variational_posterior)
#
# # Inference #
# inference.perform_inference(AR_model,
# number_iterations=N_itr,
# number_samples=N_smpl,
# optimizer=optimizer,
# lr=lr)
#
# loss_list4 = AR_model.diagnostics["loss curve"]
#plot_density(AR_model.posterior_model, variables=["x0", "x1"])
#plt.show()
## ELBO
#ELBO4 = AR_model.estimate_log_model_evidence(N_ELBO_smpl)
#print("NN: {}".format(ELBO4))
#
# samples_NN = AR_model.posterior_model.get_sample(1000)
#
# # Statistics
# posterior_samples4 = AR_model._get_posterior_sample(2000)
# #b_posterior_samples4 = posterior_samples4[b].detach().numpy().flatten()
# #b_mean4 = np.mean(b_posterior_samples4)
# #b_sd4 = np.sqrt(np.var(b_posterior_samples2))
#
# x_mean4 = []
# lower_bound4 = []
# upper_bound4 = []
# for xt in x:
# x_posterior_samples4 = posterior_samples4[xt].detach().numpy().flatten()
# mean4 = np.mean(x_posterior_samples4)
# sd4 = np.sqrt(np.var(x_posterior_samples4))
# x_mean4.append(mean4)
# lower_bound4.append(mean4 - sd4)
# upper_bound4.append(mean4 + sd4)
# #print("The estimated coefficient is: {} +- {}".format(b_mean4, b_sd4))
#
# # Densities
# from brancher.visualizations import plot_multiple_samples
# #plot_multiple_samples([samples_PE, samples_MF, samples_NN], variables=["x0", "x1"], labels=["PE","MF", "NN"])
# #plot_multiple_samples([samples_PE, samples_MF, samples_MN, samples_NN], variables=["x0", "x1"], labels=["PE","MF", "MN", "NN"])
# plot_multiple_samples([samples_PE, samples_NN], variables=["x0", "x1"], labels=["PE", "NN"])
# plt.show()
#
# # Two subplots, unpack the axes array immediately
# f, (ax1, ax2) = plt.subplots(1, 2)
# ax1.plot(range(T), x_mean1, color="b", label="PE")
# ax1.plot(range(T), x_mean2, color="r", label="MF")
# ax1.plot(range(T), x_mean3, color="g", label="MV")
# ax1.plot(range(T), x_mean4, color="m", label="NN")
# #ax1.scatter(y_range, time_series, color="k")
# ax1.plot(range(T), ground_truth, color="k", ls ="--", lw=1.5)
# ax1.fill_between(range(T), lower_bound1, upper_bound1, color="b", alpha=0.25)
# ax1.fill_between(range(T), lower_bound2, upper_bound2, color="r", alpha=0.25)
# ax1.fill_between(range(T), lower_bound3, upper_bound3, color="g", alpha=0.25)
# ax1.fill_between(range(T), lower_bound4, upper_bound4, color="m", alpha=0.25)
# ax1.set_title("Time series")
# ax2.plot(np.array(loss_list1), color="b")
# ax2.plot(np.array(loss_list2), color="r")
# ax2.plot(np.array(loss_list3), color="g")
# ax2.plot(np.array(loss_list4), color="m")
# ax2.set_title("Convergence")
# ax2.set_xlabel("Iteration")
# plt.show()
|
1646837
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 22})
import math
plt.rcParams.update({'font.size': 22})
name = "Pl"
data = pd.read_csv(name + ".csv", names=["l", "P"])
K = 0.2
data["P1"] = 9.80665 * K * data["P"]
X = data["l"].values
sigma_X = 0.4
Y = data["P"].values
sigma_Y = 0.7
A = np.vstack([X[1:], np.ones(len(X[1:]))]).T
k, b = np.linalg.lstsq(A, Y[1:], rcond=None)[0]
fig = plt.figure(figsize=(12, 7))
ax = fig.gca()
plt.scatter(X, Y, marker=".")
plt.errorbar(X, Y, xerr=sigma_X, yerr=sigma_Y, linestyle="None")
delta_x = (X.max() - X.min()) / len(X)
delta_y = (Y.max() - Y.min()) / len(Y)
ax.set_xlim(X.min() - delta_x/2, X.max() + delta_x/2)
ax.set_ylim((Y.min() - delta_y/2), Y.max() + delta_y/2)
plt.xlabel("$l, мм$")
plt.ylabel("$P, 10^{-3} Па$")
plt.plot(X, (k*X + b), 'r', label='Fitted line')
plt.grid(True)
plt.savefig("./" + name + ".png")
|
1646844
|
from varifier import truth_variant_finding, utils
def run(options):
if options.truth_mask is None:
mask = None
else:
mask = utils.load_mask_bed_file(options.truth_mask)
truth_variant_finding.make_truth_vcf(
options.ref_fasta,
options.truth_fasta,
options.outdir,
options.flank_length,
debug=options.debug,
truth_mask=mask,
max_ref_len=options.max_recall_ref_len,
split_ref=options.split_ref,
threads=options.cpus,
maxmatch=not options.no_maxmatch,
use_global_align=options.global_align,
global_align_min_coord=options.global_align_min_coord - 1,
global_align_max_coord=options.global_align_max_coord - 1,
)
|
1646855
|
import numpy as np
from pandas import read_csv
# an example of asia bayesian net:
# https://www.eecis.udel.edu/~shatkay/Course/papers/Lauritzen1988.pdf
class BayesianNet(object):
def __init__(self, names, edges, tables=None):
self.n_nodes = len(names)
if tables is None:
tables = [[0]] * self.n_nodes
self.nodes = [{'name': name, 'table': np.array(
table)} for name, table in zip(names, tables)]
self.name2idx = {k: v for v, k in enumerate(names)}
self.graph = np.zeros((self.n_nodes, self.n_nodes))
for edge in edges:
self.graph[self.name2idx[edge[1]], self.name2idx[edge[0]]] = 1
self.binary = np.array(
[1 << self.n_nodes - 1 - i for i in range(self.n_nodes)])
def fit(self, data):
data_size = len(data)
for i, node in enumerate(self.nodes):
table = []
parents = self.graph[i] == 1
marginal = data[:, parents]
index = np.zeros(data.shape[0])
if marginal.shape[1] > 0:
index = (
marginal * self.binary[-marginal.shape[1]:]).sum(axis=1)
for j in range(2**parents.sum()):
table.append(data[(index == j), i].sum() / (index == j).sum())
node['table'] = np.array(table)
def joint_p(self, values):
p = 1
for i in range(self.n_nodes):
index = 0
parents = self.graph[i] == 1
if parents.sum() > 0:
index = np.dot(values[parents], self.binary[-parents.sum():])
p *= (1 - values[i]) + (2 * values[i] - 1) * \
self.nodes[i]['table'][int(index)]
return p
def marginal_p(self, condition):
p = 0
values = -np.ones(self.n_nodes)
for v in condition:
values[self.name2idx[v[1]]] = int(v[0] != '~')
mask = np.arange(self.n_nodes)[(values == -1)]
n_unkowns = self.n_nodes - len(condition)
for i in range(2**n_unkowns):
values[mask] = np.array(
[int(x) for x in '{:0{size}b}'.format(i, size=n_unkowns)])
p += self.joint_p(values)
return p
def query(self, v, condition):
p_pos = self.marginal_p([f'+{v}'] + condition) / self.marginal_p(condition)
return [1 - p_pos, p_pos]
def get_asia_data(url):
return read_csv(url).apply(lambda x: x == 'yes').astype(int).values
def main():
names = 'ATSLBEXD'
edges = ['AT', 'SL', 'SB', 'TE', 'LE', 'BD', 'EX', 'ED']
#tables = [[0.01], [0.01, 0.05], [0.5], [0.01, 0.1], [0.3, 0.6], [0, 1, 1, 1], [0.05, 0.98], [0.1, 0.7, 0.8, 0.9]]
# also can use predefined conditional tables
bn = BayesianNet(list(names), edges)
asia_url = 'http://www.ccd.pitt.edu/wiki/images/ASIA10k.csv'
bn.fit(get_asia_data(asia_url))
print(bn.nodes)
for condition in [[], ['+A', '~S'], ['+A', '~S', '~D', '+X']]:
for c in ['T', 'L', 'B', 'E']:
print('p({}|{})={}'.format(c, ','.join(
condition), bn.query(c, condition)))
if __name__ == "__main__":
main()
|
1646880
|
import unittest
from n0test.tdt_generator.ga import Generation
class TestGeneration(unittest.TestCase):
def test___init__(self):
prev = Generation(seed={"foo": "bar"} , _scoring_seed=False)
prev._score = [-1]
gen = Generation(previous_generation=prev)
self.assertEqual(len(gen._previous), 0)
def test__next_value(self):
seed = [{
"foo": "bar",
}]
gen = Generation(seed=seed, _scoring_seed=False)
self.assertEqual(gen._next_value(1, 1), 2)
self.assertEqual(gen._next_value(1, -1), 0)
self.assertEqual(gen._next_value("aaa", 1), "aab")
self.assertEqual(gen._next_value("aab", -1), "aaa")
self.assertEqual(gen._next_value({"key": "aaa"}, 1), {"key": "aab"})
self.assertEqual(gen._next_value({"key": "aab"}, -1), {"key": "aaa"})
self.assertEqual(gen._next_value(["aaa"], 1), ["aaa", "aaa"])
self.assertEqual(gen._next_value(["aaa"], 2), ["aab"])
self.assertEqual(gen._next_value(["aaa", "aaa"], -1), ["aaa"])
def test_cross(self):
seed = {
"foo": "bar",
"hoge": "hoge",
}
gen = Generation(seed=seed, _scoring_seed=False)
gen._persistence = [
{
"foo": "bar",
"hoge": "hoge",
},
{
"foo": "baa",
"hoge": "hage",
},
]
self.assertNotEqual(gen.cross()[0], gen._persistence[0], msg="This test fails probability, try a few times")
def test_mutation(self):
seed = {
"foo": "bar",
}
result = {
"foo": "bas",
}
gen = Generation(seed=seed, _scoring_seed=False)
self.assertEqual(gen.mutate(1), result)
def test__str_to_dec(self):
seed = [{
"foo": "bar",
}]
gen = Generation(seed=seed, _scoring_seed=False)
self.assertEqual(gen._str_to_dec(""), 0)
self.assertEqual(gen._str_to_dec("0"), 1)
self.assertEqual(gen._str_to_dec("00"), 101)
self.assertEqual(gen._str_to_dec("\f"), 100)
def test__dec_to_str(self):
seed = [{
"foo": "bar",
}]
gen = Generation(seed=seed, _scoring_seed=False)
self.assertEqual(gen._dec_to_str(0), "")
self.assertEqual(gen._dec_to_str(1), "0")
self.assertEqual(gen._dec_to_str(101), "00")
self.assertEqual(gen._dec_to_str(100), "\f")
|
1646882
|
from __future__ import print_function, absolute_import
import unittest, sklearn, sklearn.dummy, numpy as np
from SplitClassifier import SplitClassifier
class T(unittest.TestCase):
def test_split_classifier_with_single_classifier(self):
c = sklearn.dummy.DummyClassifier('constant', constant=0)
sc = SplitClassifier(c, lambda X: np.arange(len(X)) % 2)
X = np.ones(shape=(100, 3))
y = np.zeros(100)
X_test = np.ones(shape=(100, 3))
sc.fit(X, y)
sc.classifiers[1].constant = 1
predictions = sc.predict(X_test)
self.assertEqual((100,), predictions.shape)
np.testing.assert_array_equal(np.arange(100) % 2, predictions)
def test_split_classifier_with_multiple_classifier(self):
c0 = sklearn.dummy.DummyClassifier('constant', constant=0)
c1 = sklearn.dummy.DummyClassifier('constant', constant=1)
sc = SplitClassifier((c0, c1), lambda X: np.arange(len(X)) % 2)
X = np.ones(shape=(100, 3))
y = np.arange(100) % 2
X_test = np.ones(shape=(100, 3))
sc.fit(X, y)
predictions = sc.predict(X_test)
self.assertEqual((100,), predictions.shape)
np.testing.assert_array_equal(np.arange(100) % 2, predictions)
def test_split_classifier_with_3_indexes(self):
c0 = sklearn.dummy.DummyClassifier('constant', constant=0)
c1 = sklearn.dummy.DummyClassifier('constant', constant=1)
c2 = sklearn.dummy.DummyClassifier('constant', constant=2)
sc = SplitClassifier((c0, c1, c2), lambda X: np.arange(len(X)) % 3)
X = np.ones(shape=(100, 3))
y = np.arange(100) % 3
X_test = np.ones(shape=(100, 3))
sc.fit(X, y)
predictions = sc.predict(X_test)
self.assertEqual((100,), predictions.shape)
np.testing.assert_array_equal(np.arange(100) % 3, predictions)
def test_split_classifier_with_fallback_classifier(self):
c0 = sklearn.dummy.DummyClassifier('constant', constant=0)
c1 = sklearn.dummy.DummyClassifier('constant', constant=1)
fallback = sklearn.dummy.DummyClassifier('constant', constant=999)
def indexer(X):
indexes = np.arange(len(X)) % 3
indexes[indexes==2] = -1
return indexes
sc = SplitClassifier((c0, c1), indexer, fallback_classifier=fallback)
X = np.ones(shape=(100, 3))
y = np.arange(100) % 3
X_test = np.ones(shape=(100, 3))
fallback.fit(X, np.array([999] * 100))
sc.fit(X, y)
predictions = sc.predict(X_test)
self.assertEqual((100,), predictions.shape)
expected = np.arange(100) % 3
expected[expected==2] = 999
np.testing.assert_array_equal(expected, predictions)
|
1646891
|
import pytest
import smbl
# todo: ensure that cmake is installed
smbl.prog.CMake.install_all_steps()
@pytest.mark.parametrize("plugin,plugin_name",
[
(x,x.get_plugin_name())
for x in smbl.prog.plugins.get_registered_plugins() if x.is_platform_supported() and x is not smbl.prog.CMake
]
)
def test_plugins(plugin,plugin_name):
plugin.install_all_steps()
assert 1==1
|
1646939
|
import time
import unittest
from typing import Union
import torch
import numpy as np
import random
from functools import lru_cache
from einops import rearrange, repeat
import torch.nn.functional as F
from torch import nn, einsum
@lru_cache()
def get_2dmask(seq_len, nx, ny, w, d):
return torch.BoolTensor([
[
abs(i // ny - j // ny) > w or abs(i % ny - j % ny) > w or (i // ny - j // ny)%d or (i % ny - j % ny)%d for j in range(seq_len)
]
for i in range(seq_len)
], device='cpu')
def naive2d_matmul_qk(q, k, nx, ny, w, d, padding=0.0):
bsz, num_heads, seq_len, head_dim = q.size()
attn_weights = q @ k.transpose(-2, -1)
# get mask
mask = get_2dmask(seq_len, nx, ny, w, d).to(q.device)
mask = mask[None, None, :, :]
attn_weights.masked_fill_(mask, padding)
return attn_weights
def _get_invalid_locations_mask_fixed_dilation(seq_len: int, nx: int, ny: int, w: int, d: int):
c1d = 2 * w + 1
c = 2 * w * (w + 1)
return torch.BoolTensor([
[
i // ny + d * (j // c1d - w) < 0 or i % ny + d * (j % c1d - w) < 0 or i % ny + d * (j % c1d - w) >= ny
for j in range(c)
]
for i in range(seq_len)
], device='cpu')
@lru_cache()
def _get_invalid_locations_mask(seq_len: int, nx: int, ny: int, w: int, d: Union[torch.Tensor,int], autoregressive: bool, device: str):
if isinstance(d, int):
mask = _get_invalid_locations_mask_fixed_dilation(seq_len, nx, ny, w, d)
mask = mask[None, None, :, :]
num_invalid = mask.sum()
else:
head_masks = []
head_invalids = []
d_list = d.cpu().numpy().tolist()
for d in d_list:
one_head_mask = _get_invalid_locations_mask_fixed_dilation(seq_len, nx, ny, w, d)
head_masks.append(one_head_mask)
head_invalids.append(one_head_mask.sum())
mask = torch.stack(head_masks, dim=0)
num_invalid = torch.stack(head_invalids, dim=0)
mask = mask[None, :, :, :]
ending_mask = None if autoregressive else mask.flip(dims=(2, 3)).to(device)
end_num_invalid = None if autoregressive else num_invalid.to(device)
return mask.to(device), ending_mask, num_invalid.to(device), end_num_invalid
def mask_invalid_locations(input_tensor: torch.Tensor, nx: int, ny: int, w: int, d: Union[torch.Tensor, int], autoregressive: bool) -> torch.Tensor:
seq_len = input_tensor.size(2)
beginning_mask, ending_mask, num_invalid, end_num_invalid = \
_get_invalid_locations_mask(seq_len, nx, ny, w, d, autoregressive,
input_tensor.device)
c = 2 * w * (w + 1)
beginning_input = input_tensor[:, :, :, :c]
beginning_mask = beginning_mask.expand(beginning_input.size())
beginning_input.masked_fill_(beginning_mask, -float('inf'))
if not autoregressive:
ending_input = input_tensor[:, :, :, -c:]
ending_mask = ending_mask.expand(ending_input.size())
ending_input.masked_fill_(ending_mask, -float('inf'))
num_invalid = num_invalid + end_num_invalid
return num_invalid
@lru_cache()
def _get_invalid_locations_mask_offical(nx: int, ny: int, w: int, d: int, autoregressive: bool, device: str):
img_seq = torch.arange(nx * ny)
k_img_indices = rearrange(img_seq.float(), '(h w) -> () () h w', h=nx)
k_img_indices = F.pad(k_img_indices, (w * d,) * 4,
value=nx * ny) # padding set to be max, so it is never attended to
k_img_indices = F.unfold(k_img_indices, 2 * w + 1, dilation=d)
k_img_indices = rearrange(k_img_indices, 'b j i -> b i j')
if autoregressive:
q_img_indices = rearrange(img_seq, 'i -> () i ()')
mask = q_img_indices >= k_img_indices
else:
mask = k_img_indices >= nx * ny
num_invalid = mask.sum()
return mask.to(device), num_invalid.to(device)
def mask_invalid_locations_offical(input_tensor: torch.Tensor, nx: int, ny: int, w: int, d: int, autoregressive: bool) -> torch.Tensor:
mask, num_invalid = _get_invalid_locations_mask_offical(
nx, ny, w, d, autoregressive, input_tensor.device
)
input_tensor.masked_fill_(mask, -float('inf'))
return num_invalid
def same_storage(x, y):
'''Tests if two tensors share the same underlying storage (for memory optimizations)'''
return x.storage().data_ptr() == y.storage().data_ptr()
class TestSlidingChunksMM(unittest.TestCase):
def test_tvm_equal_naiven2(self):
np.random.seed(300)
random.seed(300)
torch.manual_seed(300)
torch.cuda.manual_seed(300)
torch.cuda.manual_seed_all(300)
torch.set_printoptions(sci_mode=False)
nx = 30
ny = 26
N = nx * ny # * 16
M = 64 # hidden size
W = 8 # one sided. Actual window size = (2w+1)**2
nlocal = (2 * W + 1) ** 2
B = 2
D = 1 # no dilation
padding = W * D
H = 12 # number of heads
autoregressive = False # not autoregressive
device = 'cuda'
dtype = torch.float32
failed_tests = 0
time1 = time2 = 0
for i in range(100):
if i < 5:
time1 = time2 = 0 # don't include the first few iterations because of high variance
query = torch.randn(B * H * N * M, requires_grad=True, device=device, dtype=dtype).view(B, H, N, M)
query.retain_grad()
key = torch.randn(B * H * N * M, requires_grad=True, device=device, dtype=dtype).flip(dims=(0,)).view(B, H, N, M)
key.retain_grad()
value = torch.randn(B * H * N * M, requires_grad=True, device=device, dtype=dtype).view(B, H, N, M)
value.retain_grad()
# TVM MM
torch.cuda.synchronize()
start = time.time()
(q_img, k_img, v_img) = map(lambda t: t.view(B * H, N, M), (query, key, value))
k_img, v_img = map(lambda t: rearrange(t, 'b (h w) c -> b c h w', h=nx), (k_img, v_img))
# start use torch.nn.F
k_img, v_img = map(lambda t: F.unfold(t, 2*W+1, padding=padding, dilation=D), (k_img, v_img))
k_img, v_img = map(lambda t: rearrange(t, 'b (d j) i -> b i j d', j=nlocal), (k_img, v_img))
# end use torch.nn.F
# start use tensor.unfold
# (k_img, v_img) = map(
# lambda t: F.pad(t, (padding,)*4), (k_img, v_img)
# )
# (k_img, v_img) = map(
# lambda t: t.unfold(2, 2*W+1, 1).unfold(3, 2*W+1, 1), (k_img, v_img) # bh * c * nx * ny * 2w1 * 2w1
# )
# k_img, v_img = map(
# lambda t: rearrange(t, 'b c h w x y -> b (h w) (x y) c'),
# (k_img, v_img))
# end use tensor.unfold
dots_image = einsum('b i d, b i j d -> b i j', q_img, k_img)
mask_invalid_locations_offical(dots_image, nx, ny, W, D, autoregressive)
attention_probs1 = torch.nn.functional.softmax(dots_image, dim=-1)
context1 = einsum('b i j, b i j d -> b i d', attention_probs1, v_img).view(B, H, N, M)
context1.sum().backward()
torch.cuda.synchronize()
end = time.time()
time1 += end - start
query_grad1 = 1.0*query.grad
query.grad.zero_()
key_grad1 = 1.0*key.grad
key.grad.zero_()
value_grad1 = 1.0*value.grad
value.grad.zero_()
torch.cuda.empty_cache()
assert D == 1
assert not autoregressive
torch.cuda.synchronize()
start = time.time()
attention2 = naive2d_matmul_qk(query, key, nx, ny, W, D, float('-inf'))
attention_probs2 = torch.nn.functional.softmax(attention2, dim=-1) # (bsz, num_heads, seq_len, seq_len)
context2 = attention_probs2 @ value # (bsz, num_heads, seq_len, head_dim)
context2.sum().backward()
torch.cuda.synchronize()
end = time.time()
time2 += end - start
query_grad2 = 1.0*query.grad
query.grad.zero_()
key_grad2 = 1.0*key.grad
key.grad.zero_()
value_grad2 = 1.0*value.grad
value.grad.zero_()
torch.cuda.empty_cache()
try:
# assert torch.allclose(attention1, attention2.float(), atol=1e-4, rtol=1e-5)
assert torch.allclose(context1, context2.float(), atol=1e-4, rtol=1e-5), "context1"
assert torch.allclose(query_grad1, query_grad2.float(), atol=1e-4, rtol=1e-3), "query_grad1"
assert torch.allclose(key_grad1, key_grad2.float(), atol=1e-4, rtol=1e-3), "key_grad1"
assert torch.allclose(value_grad1, value_grad2.float(), atol=1e-4, rtol=1e-3), "value_grad1"
except AssertionError:
failed_tests += 1
print('Time unfold total: {0:.5f} s'.format(time1))
print('Time pytorch naive implementation: {0:.5f} s'.format(time2))
print('Unfold vs. Naive speedup: {0:.5f}x'.format(time1/time2))
print(f'Failed tests: {failed_tests}/{i+1}')
assert failed_tests == 0
if __name__ == '__main__':
unittest.main()
|
1646947
|
class Solution:
def simplifyPath(self, path):
"""
:type path: str
:rtype: str
"""
stack = []
for p in path.split("/"):
if p == "..":
if stack:
stack.pop()
elif p and p != ".":
stack.append(p)
return "/" + "/".join(stack)
|
1646984
|
import time
from functools import wraps
def super_func(c,d):
'''
Decorator that reports the execution time.
'''
def decorate(func):
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)+c-d
return wrapper
return decorate
@super_func(c=1,d=2)
def add(x, y):
return x + y
print(add(1,3))
|
1646999
|
import random
from compas_rhino.artists import NetworkArtist
from compas.datastructures import Network
network = Network()
last_node = None
for i in range(12):
node = network.add_node(x=i // 3, y=i % 3, z=0)
network.node_attribute(node, 'weight', random.choice(range(20)))
if last_node:
network.add_edge(node, i-1)
last_node = node
print(network.summary())
print(network.to_data())
text = {node: network.node_attribute(node, 'weight') for node in network.nodes()}
artist = NetworkArtist(network, layer='network')
artist.clear_layer()
artist.draw_nodelabels(text)
artist.draw()
artist.redraw()
|
1647010
|
from tests.example_apps.music.tables import Band
from ..base import DBTestCase
class TestCount(DBTestCase):
def test_exists(self):
self.insert_rows()
response = Band.count().where(Band.name == "Pythonistas").run_sync()
self.assertTrue(response == 1)
|
1647011
|
import datetime
import unittest
from pyramid import testing
from pyramid_mailer import get_mailer
from pyramid import httpexceptions
from ccvpn.models import User, Order, Profile, PasswordResetToken
from ccvpn import views, setup_routes
from ccvpn.tests import BaseTest, DummyRequest
class TestPublicViews(BaseTest):
def test_page(self):
req = DummyRequest()
req.matchdict['page'] = 'help'
resp = views.page(req)
self.assertIsInstance(resp, dict)
self.assertIn('Installation guides:', resp['content'])
def test_page_fail(self):
req = DummyRequest()
req.matchdict['page'] = 'does-not-exists'
resp = views.page(req)
self.assertIsInstance(resp, httpexceptions.HTTPNotFound)
def test_home(self):
req = DummyRequest()
resp = views.home(req)
self.assertIsInstance(resp, dict)
def test_account_redirect(self):
req = DummyRequest()
resp = views.account.account_redirect(req)
self.assertIsInstance(resp, httpexceptions.HTTPMovedPermanently)
self.assertTrue(resp.location.endswith('/account/'))
class TestLoginView(BaseTest):
def setUp(self):
super().setUp()
self.testuser = User(username='test', password='<PASSWORD>')
self.session.add(self.testuser)
self.session.flush()
def test_login_form(self):
req = DummyRequest()
resp = views.account.login(req)
self.assertIsInstance(resp, dict)
def test_login(self):
req = DummyRequest(post={
'username': 'test',
'password': '<PASSWORD>',
})
resp = views.account.login(req)
self.assertIsInstance(resp, httpexceptions.HTTPSeeOther)
self.assertTrue(resp.location.endswith('/account/'))
self.assertEqual(req.session['uid'], self.testuser.id)
def test_login_invalid_password(self):
req = DummyRequest(post={
'username': 'test',
'password': '<PASSWORD>',
})
resp = views.account.login(req)
self.assertIsInstance(resp, dict)
self.assertNotEqual(req.session.get('uid'), self.testuser.id)
def test_login_invalid_req(self):
req = DummyRequest(post={})
resp = views.account.login(req)
self.assertIsInstance(resp, dict)
self.assertNotEqual(req.session.get('uid'), self.testuser.id)
def test_logout(self):
req = DummyRequest()
req.session['uid'] = self.testuser.id
resp = views.account.logout(req)
self.assertIsInstance(resp, httpexceptions.HTTPSeeOther)
self.assertNotEqual(req.session.get('uid'), self.testuser.id)
def test_account_page(self):
req = DummyRequest()
req.session['uid'] = self.testuser.id
resp = views.account.account(req)
self.assertIsInstance(resp, dict)
self.assertEqual(req.response.status_code, 200)
class TestSignupView(BaseTest):
def user_exists(self, name):
u = self.session.query(User.id).filter_by(username=name).first()
return u is not None
def test_form(self):
req = DummyRequest()
resp = views.account.signup(req)
self.assertIsInstance(resp, dict)
def test_valid(self):
req = DummyRequest(post={
'username': 'newtest',
'password': '<PASSWORD>',
'password2': '<PASSWORD>',
'email': 'email@host'
})
resp = views.account.signup(req)
self.assertIsInstance(resp, httpexceptions.HTTPSeeOther)
self.assertTrue(resp.location.endswith('/account/'))
def test_valid_referral(self):
_referrer = User(username='test', password='<PASSWORD>')
self.session.add(_referrer)
referrer = self.session.query(User).filter_by(username='test').first()
self.assertFalse(referrer.is_paid)
req = DummyRequest(post={
'username': 'newtest',
'password': '<PASSWORD>',
'password2': '<PASSWORD>',
'email': 'email@host'
}, params={
'ref': str(referrer.id),
})
resp = views.account.signup(req)
newuser = self.session.query(User).filter_by(username='newtest') \
.first()
self.assertIsInstance(resp, httpexceptions.HTTPSeeOther)
self.assertTrue(resp.location.endswith('/account/'))
self.assertEqual(newuser.referrer_id, referrer.id)
self.assertFalse(referrer.is_paid)
testorder = Order(user=newuser, amount=1,
method=Order.METHOD.BITCOIN,
time=datetime.timedelta(days=30))
self.session.add(testorder)
self.session.add(referrer)
self.session.flush()
testorder.close(force=True)
self.session.flush()
self.session.refresh(referrer)
self.assertTrue(referrer.is_paid)
def test_invalid_username(self):
req = DummyRequest(post={
'username': 'newtest!',
'password': '<PASSWORD>',
'password2': '<PASSWORD>',
'email': 'email@host'
})
resp = views.account.signup(req)
self.assertIsInstance(resp, dict)
self.assertEqual(req.response.status_code, 400)
self.assertFalse(self.user_exists('newtest'))
def test_invalid_password(self):
req = DummyRequest(post={
'username': 'newtest',
'password': 'a' * <PASSWORD>,
'password2': 'a' * <PASSWORD>,
'email': 'email@host'
})
resp = views.account.signup(req)
self.assertIsInstance(resp, dict)
self.assertEqual(req.response.status_code, 400)
self.assertFalse(self.user_exists('newtest'))
def test_invalid_password2(self):
req = DummyRequest(post={
'username': 'newtest',
'password': '<PASSWORD>',
'password2': '<PASSWORD>',
'email': 'email@host'
})
resp = views.account.signup(req)
self.assertIsInstance(resp, dict)
self.assertEqual(req.response.status_code, 400)
self.assertFalse(self.user_exists('newtest'))
def test_invalid_email(self):
req = DummyRequest(post={
'username': 'newtest',
'password': '<PASSWORD>',
'password2': '<PASSWORD>',
'email': 'email host'
})
resp = views.account.signup(req)
self.assertIsInstance(resp, dict)
self.assertEqual(req.response.status_code, 400)
self.assertFalse(self.user_exists('newtest'))
def test_existing_username(self):
u = User(username='newtest', email='user@host', password='<PASSWORD>')
self.session.add(u)
req = DummyRequest(post={
'username': 'newtest',
'password': '<PASSWORD>',
'password2': '<PASSWORD>',
'email': 'user2@host'
})
resp = views.account.signup(req)
self.assertIsInstance(resp, dict)
self.assertEqual(req.response.status_code, 400)
def test_existing_email(self):
u = User(username='newtest', email='user@host', password='<PASSWORD>')
self.session.add(u)
req = DummyRequest(post={
'username': 'newtest2',
'password': '<PASSWORD>',
'password2': '<PASSWORD>',
'email': 'user@host'
})
resp = views.account.signup(req)
self.assertIsInstance(resp, dict)
self.assertEqual(req.response.status_code, 400)
class TestForgotView(BaseTest):
def setUp(self):
super().setUp()
testuser = User(username='test', password='<PASSWORD>',
email='user@host')
self.session.add(testuser)
testuserw = User(username='testWOemail', password='<PASSWORD>')
self.session.add(testuserw)
def test_form(self):
req = DummyRequest()
resp = views.account.forgot(req)
self.assertEqual(req.response.status_code, 200)
self.assertIsInstance(resp, dict)
req = DummyRequest(post={})
resp = views.account.forgot(req)
self.assertEqual(req.response.status_code, 200)
self.assertIsInstance(resp, dict)
def test_valid(self):
req = DummyRequest(post={
'username': 'test',
})
req.remote_addr = '127.0.0.1'
resp = views.account.forgot(req)
self.assertEqual(req.response.status_code, 200)
self.assertIsInstance(resp, dict)
registry = self.config.registry
mailer = get_mailer(registry)
self.assertEqual(len(mailer.outbox), 1)
def test_invalid_username(self):
req = DummyRequest(post={
'username': 'NOTtest',
})
req.remote_addr = '127.0.0.1'
resp = views.account.forgot(req)
self.assertEqual(req.response.status_code, 400)
self.assertIsInstance(resp, dict)
def test_invalid_email(self):
req = DummyRequest(post={
'username': 'testWOemail',
})
req.remote_addr = '127.0.0.1'
resp = views.account.forgot(req)
self.assertEqual(req.response.status_code, 400)
self.assertIsInstance(resp, dict)
class TestResetView(BaseTest):
def setUp(self):
super().setUp()
testuser = User(username='test', password='<PASSWORD>',
email='user@host')
self.session.add(testuser)
self.session.flush()
self.token = PasswordResetToken(uid=testuser.id)
self.session.add(self.token)
self.session.flush()
def test_invalid_token(self):
req = DummyRequest()
req.matchdict['token'] = 'invalidtoken'
req.remote_addr = '127.0.0.1'
resp = views.account.reset(req)
self.assertIsInstance(resp, httpexceptions.HTTPMovedPermanently)
def test_form(self):
req = DummyRequest()
req.matchdict['token'] = self.token.token
req.remote_addr = '127.0.0.1'
resp = views.account.reset(req)
self.assertEqual(req.response.status_code, 200)
self.assertIsInstance(resp, dict)
req = DummyRequest(post={})
req.matchdict['token'] = self.token.token
req.remote_addr = '127.0.0.1'
resp = views.account.reset(req)
self.assertEqual(req.response.status_code, 200)
self.assertIsInstance(resp, dict)
def test_invalid_password(self):
req = DummyRequest(post={
'password': 'pw',
'password2': '<PASSWORD>'
})
req.matchdict['token'] = self.token.token
req.remote_addr = '127.0.0.1'
resp = views.account.reset(req)
self.assertEqual(req.response.status_code, 400)
self.assertIsInstance(resp, dict)
def test_valid(self):
req = DummyRequest(post={
'password': '<PASSWORD>',
'password2': '<PASSWORD>',
})
req.matchdict['token'] = self.token.token
req.remote_addr = '127.0.0.1'
resp = views.account.reset(req)
self.assertIsInstance(resp, httpexceptions.HTTPMovedPermanently)
self.assertTrue(resp.location.endswith('/account/login'))
registry = self.config.registry
mailer = get_mailer(registry)
self.assertEqual(len(mailer.outbox), 1)
# Should not be able to use a token > 1 time
req = DummyRequest(post={
'password': '<PASSWORD>',
'password2': '<PASSWORD>',
})
req.matchdict['token'] = self.token.token
req.remote_addr = '127.0.0.1'
resp = views.account.reset(req)
self.assertIsInstance(resp, httpexceptions.HTTPMovedPermanently)
self.assertTrue(resp.location.endswith('/account/forgot'))
registry = self.config.registry
mailer = get_mailer(registry)
self.assertEqual(len(mailer.outbox), 1)
class TestConfigView(BaseTest):
def setUp(self):
super().setUp()
self.testuser = User(username='test', password='<PASSWORD>')
self.session.add(self.testuser)
self.session.flush()
profile = Profile(uid=self.testuser.id, name='testprofile')
self.session.add(profile)
self.session.flush()
def test_get(self):
gw = bytes(views.account.openvpn_gateway, 'ascii')
ca = bytes(views.account.openvpn_ca, 'ascii')
req = DummyRequest()
req.session['uid'] = self.testuser.id
resp = views.account.config(req)
self.assertEqual(resp.status_code, 200)
self.assertIn(gw, resp.body)
self.assertIn(ca, resp.body)
def test_profile(self):
gw = bytes(views.account.openvpn_gateway, 'ascii')
ca = bytes(views.account.openvpn_ca, 'ascii')
req = DummyRequest()
req.session['uid'] = self.testuser.id
req.matchdict['profile'] = 'testprofile'
resp = views.account.config(req)
self.assertEqual(resp.status_code, 200)
self.assertIn(gw, resp.body)
self.assertIn(ca, resp.body)
def test_unknown_profile(self):
req = DummyRequest()
req.session['uid'] = self.testuser.id
req.GET['profile'] = 'nottestprofile'
resp = views.account.config(req)
self.assertEqual(resp.status_code, 404)
|
1647038
|
import click
from textkit.utils import output
from unidecode import unidecode
import chardet
@click.command()
@click.argument('file', type=click.File('r'), default=click.open_file('-'))
def transliterate(file):
'''Convert international text to ascii.'''
content = ''.join(file.readlines())
try:
content = content.decode(chardet.detect(content)['encoding'])
except AttributeError:
# Strings do not have a decode method in python 3.
pass
[output(unidecode(content).encode('ascii', 'ignore'))]
|
1647043
|
import pyb
# The pyboard has 4 LEDs that can be controlled
# these LEDs have IDs 1 - 4
led = pyb.LED(4) # 4 is the blue LED
# toggle LED state every second using on() and off() methods
while True:
led.on()
pyb.delay(1000)
led.off()
pyb.delay(1000)
|
1647068
|
using_pysqlite3 = False
try:
import pysqlite3 as sqlite3
using_pysqlite3 = True
except ImportError:
import sqlite3
if hasattr(sqlite3, "enable_callback_tracebacks"):
sqlite3.enable_callback_tracebacks(True)
_cached_sqlite_version = None
def sqlite_version():
global _cached_sqlite_version
if _cached_sqlite_version is None:
_cached_sqlite_version = _sqlite_version()
return _cached_sqlite_version
def _sqlite_version():
return tuple(
map(
int,
sqlite3.connect(":memory:")
.execute("select sqlite_version()")
.fetchone()[0]
.split("."),
)
)
def supports_table_xinfo():
return sqlite_version() >= (3, 26, 0)
def supports_generated_columns():
return sqlite_version() >= (3, 31, 0)
|
1647112
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .lsa_attention import LocationSensitiveAttention
from .basic_layers import Linear, Conv1d
from .vc_utils import get_mask_from_lengths
class DecoderPrenet(nn.Module):
def __init__(self, in_dim, sizes):
super().__init__()
in_sizes = [in_dim] + sizes[:-1]
self.layers = nn.ModuleList(
[Linear(in_size, out_size, bias=False)
for (in_size, out_size) in zip(in_sizes, sizes)])
def forward(self, x):
for linear in self.layers:
x = F.dropout(F.relu(linear(x)), p=0.5, training=True)
return x
class Decoder(nn.Module):
"""Mixture of Logistic (MoL) attention-based RNN Decoder."""
def __init__(
self,
enc_dim,
num_mels,
frames_per_step,
attention_rnn_dim,
attention_dim,
attention_location_n_filters,
attention_location_kernel_size,
decoder_rnn_dim,
prenet_dims,
num_decoder_rnn_layer=1,
use_stop_tokens=True,
concat_context_to_last=True,
):
super().__init__()
self.enc_dim = enc_dim
self.num_mels = num_mels
self.frames_per_step = frames_per_step
self.attention_rnn_dim = attention_rnn_dim
self.decoder_rnn_dim = decoder_rnn_dim
self.prenet_dims = prenet_dims
self.use_stop_tokens = use_stop_tokens
self.num_decoder_rnn_layer = num_decoder_rnn_layer
self.concat_context_to_last = concat_context_to_last
# Mel prenet
self.prenet = DecoderPrenet(num_mels, prenet_dims)
# Attention RNN
self.attention_rnn = nn.LSTMCell(
prenet_dims[-1] + enc_dim,
attention_rnn_dim
)
# Attention
self.attention_layer = LocationSensitiveAttention(
attention_rnn_dim=attention_rnn_dim,
embedding_dim=enc_dim,
attention_dim=attention_dim,
attention_location_n_filters=attention_location_n_filters,
attention_location_kernel_size=attention_location_kernel_size,
)
# Decoder RNN
self.decoder_rnn_layers = nn.ModuleList()
for i in range(num_decoder_rnn_layer):
if i == 0:
self.decoder_rnn_layers.append(
nn.LSTMCell(
enc_dim + attention_rnn_dim,
decoder_rnn_dim))
else:
self.decoder_rnn_layers.append(
nn.LSTMCell(
decoder_rnn_dim,
decoder_rnn_dim))
# self.decoder_rnn = nn.LSTMCell(
# 2 * enc_dim + attention_rnn_dim,
# decoder_rnn_dim
# )
if concat_context_to_last:
self.linear_projection = Linear(
enc_dim + decoder_rnn_dim,
num_mels * frames_per_step
)
else:
self.linear_projection = Linear(
decoder_rnn_dim,
num_mels * frames_per_step
)
# Stop-token layer
if self.use_stop_tokens:
if concat_context_to_last:
self.stop_layer = Linear(
enc_dim + decoder_rnn_dim, 1, bias=True, w_init_gain="sigmoid"
)
else:
self.stop_layer = Linear(
decoder_rnn_dim, 1, bias=True, w_init_gain="sigmoid"
)
def get_go_frame(self, memory):
B = memory.size(0)
go_frame = torch.zeros((B, self.num_mels), dtype=torch.float,
device=memory.device)
return go_frame
def initialize_decoder_states(self, memory, mask):
device = next(self.parameters()).device
B = memory.size(0)
T_enc = memory.size(1)
# attention rnn states
self.attention_hidden = torch.zeros(
(B, self.attention_rnn_dim), device=device)
self.attention_cell = torch.zeros(
(B, self.attention_rnn_dim), device=device)
# decoder rnn states
self.decoder_hiddens = []
self.decoder_cells = []
for i in range(self.num_decoder_rnn_layer):
self.decoder_hiddens.append(
torch.zeros((B, self.decoder_rnn_dim),
device=device)
)
self.decoder_cells.append(
torch.zeros((B, self.decoder_rnn_dim),
device=device)
)
self.attention_weights = torch.zeros(
(B, T_enc), device=device)
self.attention_weights_cum = torch.zeros(
(B, T_enc), device=device)
self.attention_context = torch.zeros(
(B, self.enc_dim), device=device)
self.memory = memory
self.processed_memory = self.attention_layer.memory_layer(memory)
self.mask = mask
def parse_decoder_inputs(self, decoder_inputs):
"""Prepare decoder inputs, i.e. gt mel
Args:
decoder_inputs:(B, T_out, n_mel_channels) inputs used for teacher-forced training.
"""
decoder_inputs = decoder_inputs.reshape(
decoder_inputs.size(0),
int(decoder_inputs.size(1)/self.frames_per_step), -1)
# (B, T_out//r, r*num_mels) -> (T_out//r, B, r*num_mels)
decoder_inputs = decoder_inputs.transpose(0, 1)
# (T_out//r, B, num_mels)
decoder_inputs = decoder_inputs[:,:,-self.num_mels:]
return decoder_inputs
def parse_decoder_outputs(self, mel_outputs, alignments, stop_outputs):
""" Prepares decoder outputs for output
Args:
mel_outputs:
alignments:
"""
# (T_out//r, B, T_enc) -> (B, T_out//r, T_enc)
alignments = torch.stack(alignments).transpose(0, 1)
# (T_out//r, B) -> (B, T_out//r)
if stop_outputs is not None:
if alignments.size(0) == 1:
stop_outputs = torch.stack(stop_outputs).unsqueeze(0)
else:
stop_outputs = torch.stack(stop_outputs).transpose(0, 1)
stop_outputs = stop_outputs.contiguous()
# (T_out//r, B, num_mels*r) -> (B, T_out//r, num_mels*r)
mel_outputs = torch.stack(mel_outputs).transpose(0, 1).contiguous()
# decouple frames per step
# (B, T_out, num_mels)
mel_outputs = mel_outputs.view(
mel_outputs.size(0), -1, self.num_mels)
return mel_outputs, alignments, stop_outputs
def attend(self, decoder_input):
cell_input = torch.cat((decoder_input, self.attention_context), -1)
self.attention_hidden, self.attention_cell = self.attention_rnn(
cell_input, (self.attention_hidden, self.attention_cell))
attention_weights_cat = torch.cat(
(self.attention_weights.unsqueeze(1),
self.attention_weights_cum.unsqueeze(1)), dim=1)
self.attention_context, self.attention_weights = self.attention_layer(
self.attention_hidden, self.memory, self.processed_memory,
attention_weights_cat, self.mask)
self.attention_weights_cum += self.attention_weights
decoder_rnn_input = torch.cat(
(self.attention_hidden, self.attention_context), -1)
return decoder_rnn_input, self.attention_context, self.attention_weights
def decode(self, decoder_input):
for i in range(self.num_decoder_rnn_layer):
if i == 0:
self.decoder_hiddens[i], self.decoder_cells[i] = self.decoder_rnn_layers[i](
decoder_input, (self.decoder_hiddens[i], self.decoder_cells[i]))
else:
self.decoder_hiddens[i], self.decoder_cells[i] = self.decoder_rnn_layers[i](
self.decoder_hiddens[i-1], (self.decoder_hiddens[i], self.decoder_cells[i]))
return self.decoder_hiddens[-1]
def forward(self, memory, mel_inputs, memory_lengths):
""" Decoder forward pass for training
Args:
memory: (B, T_enc, enc_dim) Encoder outputs
decoder_inputs: (B, T, num_mels) Decoder inputs for teacher forcing.
memory_lengths: (B, ) Encoder output lengths for attention masking.
Returns:
mel_outputs: (B, T, num_mels) mel outputs from the decoder
alignments: (B, T//r, T_enc) attention weights.
"""
# [1, B, num_mels]
go_frame = self.get_go_frame(memory).unsqueeze(0)
# [T//r, B, num_mels]
mel_inputs = self.parse_decoder_inputs(mel_inputs)
# [T//r + 1, B, num_mels]
mel_inputs = torch.cat((go_frame, mel_inputs), dim=0)
# [T//r + 1, B, prenet_dim]
decoder_inputs = self.prenet(mel_inputs)
# decoder_inputs_pitch = self.prenet_pitch(decoder_inputs__)
self.initialize_decoder_states(
memory, mask=~get_mask_from_lengths(memory_lengths),
)
# self.attention_layer.init_states(memory)
# self.attention_layer_pitch.init_states(memory_pitch)
mel_outputs, alignments = [], []
if self.use_stop_tokens:
stop_outputs = []
else:
stop_outputs = None
while len(mel_outputs) < decoder_inputs.size(0) - 1:
decoder_input = decoder_inputs[len(mel_outputs)]
# decoder_input_pitch = decoder_inputs_pitch[len(mel_outputs)]
decoder_rnn_input, context, attention_weights = self.attend(decoder_input)
decoder_rnn_output = self.decode(decoder_rnn_input)
if self.concat_context_to_last:
decoder_rnn_output = torch.cat(
(decoder_rnn_output, context), dim=1)
mel_output = self.linear_projection(decoder_rnn_output)
if self.use_stop_tokens:
stop_output = self.stop_layer(decoder_rnn_output)
stop_outputs += [stop_output.squeeze()]
mel_outputs += [mel_output.squeeze(1)] #? perhaps don't need squeeze
alignments += [attention_weights]
# alignments_pitch += [attention_weights_pitch]
mel_outputs, alignments, stop_outputs = self.parse_decoder_outputs(
mel_outputs, alignments, stop_outputs)
if stop_outputs is None:
return mel_outputs, alignments
else:
return mel_outputs, stop_outputs, alignments
def inference(self, memory, stop_threshold=0.5):
""" Decoder inference
Args:
memory: (1, T_enc, D_enc) Encoder outputs
Returns:
mel_outputs: mel outputs from the decoder
alignments: sequence of attention weights from the decoder
"""
# [1, num_mels]
decoder_input = self.get_go_frame(memory)
self.initialize_decoder_states(memory, mask=None)
# self.attention_layer.init_states(memory)
mel_outputs, alignments = [], []
max_decoder_step = memory.size(1) * 8 # NOTE(sx): heuristic
min_decoder_step = memory.size(1) * 2
while True:
decoder_input = self.prenet(decoder_input)
decoder_input_final, context, alignment = self.attend(decoder_input)
#mel_output, stop_output, alignment = self.decode(decoder_input)
decoder_rnn_output = self.decode(decoder_input_final)
if self.concat_context_to_last:
decoder_rnn_output = torch.cat(
(decoder_rnn_output, context), dim=1)
mel_output = self.linear_projection(decoder_rnn_output)
stop_output = self.stop_layer(decoder_rnn_output)
mel_outputs += [mel_output.squeeze(1)]
alignments += [alignment]
if torch.sigmoid(stop_output.data) > stop_threshold and len(mel_outputs) >= min_decoder_step:
break
if len(mel_outputs) == max_decoder_step:
print("Warning! Decoding steps reaches max decoder steps.")
break
decoder_input = mel_output[:,-self.num_mels:]
mel_outputs, alignments, _ = self.parse_decoder_outputs(
mel_outputs, alignments, None)
return mel_outputs, alignments
|
1647130
|
import errno
import logging
import os
from unittest import mock
from pytest import raises
from snakeoil.osutils.mount import MS_BIND, MS_REC, MS_REMOUNT, MS_RDONLY
from pychroot.utils import dictbool, getlogger, bind
from pychroot.exceptions import ChrootMountError
def test_dictbool():
assert dictbool({'a': True}, 'a')
assert not dictbool({'a': True}, 'b')
assert not dictbool({'a': False}, 'a')
def test_getlogger():
log = getlogger(logging.Logger, __name__)
assert type(log) == logging.Logger
def test_bind(tmp_path):
with raises(ChrootMountError):
bind('/nonexistent/src/path', '/randomdir', '/chroot/path')
with raises(ChrootMountError):
bind('tmpfs', '/nonexistent/dest/path', '/chroot/path')
with mock.patch('pychroot.utils.mount', side_effect=OSError):
with raises(ChrootMountError):
bind('proc', '/root', '/chroot/path')
# create
with mock.patch('pychroot.utils.mount') as mount, \
mock.patch('os.path.isdir') as isdir, \
mock.patch('os.makedirs') as makedirs:
src = tmp_path / 'pychroot-src'
src.mkdir()
src = str(src)
chroot = tmp_path / 'pychroot-chroot'
chroot.mkdir()
chroot = str(chroot)
dest = os.path.join(chroot, 'dest')
destfile = os.path.join(chroot, 'destfile')
os.mkdir(dest)
isdir.return_value = True
bind(src, dest, chroot, create=True)
makedirs.assert_called_once_with(dest)
makedirs.reset_mock()
## mounting on top of a symlink
# symlink points to an existing path
os.symlink('/dest', os.path.join(chroot, 'existing'))
bind(src, os.path.join(chroot, 'existing'), chroot, create=False)
assert not makedirs.called
makedirs.reset_mock()
# broken symlink
# usually this would raise ChrootMountError but we're catching the makedirs call
with raises(ChrootMountError):
os.symlink('/nonexistent', os.path.join(chroot, 'broken'))
bind(src, os.path.join(chroot, 'broken'), chroot, create=False)
makedirs.assert_called_once_with(os.path.join(chroot, 'nonexistent'))
makedirs.reset_mock()
e = OSError('fake exception')
e.errno = errno.EIO
makedirs.side_effect = e
with raises(OSError):
bind(src, dest, chroot, create=True)
makedirs.reset_mock()
makedirs.side_effect = None
# bind an individual file
isdir.return_value = False
bind(src, destfile, chroot, create=True)
makedirs.assert_called_once_with(chroot)
mount.reset_mock()
# recursive mount
isdir.return_value = True
bind(src, dest, chroot, create=True, recursive=True)
mount.assert_called_once_with(
source=src, target=dest, fstype=None,
flags=(MS_BIND | MS_REC), data='')
mount.reset_mock()
# readonly mount
isdir.return_value = True
bind(src, dest, chroot, create=True, readonly=True)
call1 = mock.call(
source=src, target=dest, fstype=None, flags=(MS_BIND), data='')
call2 = mock.call(
source=src, target=dest, fstype=None,
flags=(MS_BIND | MS_REMOUNT | MS_RDONLY), data='')
mount.assert_has_calls([call1, call2])
#with raises(ChrootMountError):
#bind('/', '/root', readonly=True)
|
1647170
|
import pytest
# Note:
# Definitions for `cut1`, `cut2` and `cut_set` parameters are standard Pytest fixtures located in test/cut/conftest.py
# ########################################
# ############### PADDING ################
# ########################################
@pytest.mark.parametrize("direction", ["right", "left", "both"])
def test_pad_cut_preserve_id_false(cut1, direction: str):
padded = cut1.pad(duration=300, direction=direction)
assert padded.id != cut1.id
@pytest.mark.parametrize("direction", ["right", "left", "both"])
def test_pad_cut_preserve_id_true(cut1, direction: str):
padded = cut1.pad(duration=300, direction=direction, preserve_id=True)
assert padded.id == cut1.id
@pytest.mark.parametrize("direction", ["right", "left", "both"])
def test_pad_mixed_cut_preserve_id_false(cut1, direction: str):
mixed = cut1.append(cut1)
padded = mixed.pad(duration=300, direction=direction)
assert padded.id != mixed.id
@pytest.mark.parametrize("direction", ["right", "left", "both"])
def test_pad_mixed_cut_preserve_id_true(cut1, direction: str):
mixed = cut1.append(cut1)
padded = mixed.pad(duration=300, direction=direction, preserve_id=True)
assert padded.id == mixed.id
# ########################################
# ############## APPENDING ###############
# ########################################
def test_append_cut_preserve_id_none(cut1, cut2):
appended = cut1.append(cut2)
assert appended.id != cut1.id
assert appended.id != cut2.id
def test_append_cut_preserve_id_left(cut1, cut2):
appended = cut1.append(cut2, preserve_id="left")
assert appended.id == cut1.id
assert appended.id != cut2.id
def test_append_cut_preserve_id_right(cut1, cut2):
appended = cut1.append(cut2, preserve_id="right")
assert appended.id != cut1.id
assert appended.id == cut2.id
def test_append_mixed_cut_preserve_id_none(cut1, cut2):
premixed = cut1.append(cut1)
appended = premixed.append(cut2)
assert appended.id != premixed.id
assert appended.id != cut2.id
def test_append_mixed_cut_preserve_id_left(cut1, cut2):
premixed = cut1.append(cut1)
appended = premixed.append(cut2, preserve_id="left")
assert appended.id == premixed.id
assert appended.id != cut2.id
def test_append_mixed_cut_preserve_id_right(cut1, cut2):
premixed = cut1.append(cut1)
appended = premixed.append(cut2, preserve_id="right")
assert appended.id != premixed.id
assert appended.id == cut2.id
# ########################################
# ############### MIXING #################
# ########################################
def test_mix_cut_preserve_id_none(cut1, cut2):
mixed = cut1.mix(cut2)
assert mixed.id != cut1.id
assert mixed.id != cut2.id
def test_mix_cut_preserve_id_left(cut1, cut2):
mixed = cut1.mix(cut2, preserve_id="left")
assert mixed.id == cut1.id
assert mixed.id != cut2.id
def test_mix_cut_preserve_id_right(cut1, cut2):
mixed = cut1.mix(cut2, preserve_id="right")
assert mixed.id != cut1.id
assert mixed.id == cut2.id
def test_mix_mixed_cut_preserve_id_none(cut1, cut2):
premixed = cut1.append(cut1)
mixed = premixed.mix(cut2)
assert mixed.id != premixed.id
assert mixed.id != cut2.id
def test_mix_mixed_cut_preserve_id_left(cut1, cut2):
premixed = cut1.append(cut1)
mixed = premixed.mix(cut2, preserve_id="left")
assert mixed.id == premixed.id
assert mixed.id != cut2.id
def test_mix_mixed_cut_preserve_id_right(cut1, cut2):
premixed = cut1.append(cut1)
mixed = premixed.mix(cut2, preserve_id="right")
assert mixed.id != premixed.id
assert mixed.id == cut2.id
# ########################################
# ############ PERTURB SPEED #############
# ########################################
def test_cut_perturb_speed_affix_id_true(cut1):
cut_sp = cut1.perturb_speed(1.1)
assert cut_sp.id != cut1.id
def test_cut_perturb_speed_affix_id_false(cut1):
cut_sp = cut1.perturb_speed(1.1, affix_id=False)
assert cut_sp.id == cut1.id
def test_mixed_cut_perturb_speed_affix_id_true(cut1):
premixed = cut1.append(cut1)
cut_sp = premixed.perturb_speed(1.1)
assert cut_sp.id != premixed.id
def test_mixed_cut_perturb_speed_affix_id_false(cut1):
premixed = cut1.append(cut1)
cut_sp = premixed.perturb_speed(1.1, affix_id=False)
assert cut_sp.id == premixed.id
# ########################################
# ############ PERTURB TEMPO #############
# ########################################
def test_cut_perturb_tempo_affix_id_true(cut1):
cut_tp = cut1.perturb_tempo(1.1)
assert cut_tp.id != cut1.id
def test_cut_perturb_tempo_affix_id_false(cut1):
cut_tp = cut1.perturb_tempo(1.1, affix_id=False)
assert cut_tp.id == cut1.id
def test_mixed_cut_perturb_tempo_affix_id_true(cut1):
premixed = cut1.append(cut1)
cut_tp = premixed.perturb_tempo(1.1)
assert cut_tp.id != premixed.id
def test_mixed_cut_perturb_tempo_affix_id_false(cut1):
premixed = cut1.append(cut1)
cut_tp = premixed.perturb_tempo(1.1, affix_id=False)
assert cut_tp.id == premixed.id
# ########################################
# ########### PERTURB VOLUME #############
# ########################################
def test_cut_perturb_volume_affix_id_true(cut1):
cut_vp = cut1.perturb_volume(1.1)
assert cut_vp.id != cut1.id
def test_cut_perturb_volume_affix_id_false(cut1):
cut_vp = cut1.perturb_volume(1.1, affix_id=False)
assert cut_vp.id == cut1.id
def test_mixed_cut_perturb_volume_affix_id_true(cut1):
premixed = cut1.append(cut1)
cut_vp = premixed.perturb_volume(1.1)
assert cut_vp.id != premixed.id
def test_mixed_cut_perturb_volume_affix_id_false(cut1):
premixed = cut1.append(cut1)
cut_vp = premixed.perturb_volume(1.1, affix_id=False)
assert cut_vp.id == premixed.id
# ########################################
# ############## RESAMPLE ################
# ########################################
def test_cut_resample_affix_id_true(cut1):
cut_rs = cut1.resample(44100, affix_id=True)
assert cut_rs.id != cut1.id
def test_cut_resample_affix_id_false(cut1):
cut_rs = cut1.resample(44100)
assert cut_rs.id == cut1.id
def test_mixed_cut_resample_affix_id_true(cut1):
premixed = cut1.append(cut1)
cut_rs = premixed.resample(44100, affix_id=True)
assert cut_rs.id != premixed.id
def test_mixed_cut_resample_affix_id_false(cut1):
premixed = cut1.append(cut1)
cut_rs = premixed.resample(44100)
assert cut_rs.id == premixed.id
|
1647172
|
from tweet import TweetClient
import config as cfg
from db import LightningDB
from lndrpc import LndWrapper
from lightningrpc import LightningWrapper
from sys import argv
import os
import logging
def main():
logging.basicConfig(level=logging.INFO)
if len(argv) > 1 and argv[1]=='--clightning':
ln_path = cfg.LN_PATH or os.path.join(os.getenv('HOME'), '.lightning')
rpc_path = os.path.join(ln_path, 'lightning-rpc')
logging.debug(rpc_path)
ln = LightningWrapper(rpc_path,cfg)
else:
cert = open(os.path.expanduser(cfg.LND_CERT_PATH)).read()
ln = LndWrapper(cert, cfg)
db = LightningDB(cfg.DB_PATH)
tweet = TweetClient(cfg.twitter, db, cfg.twitter_owner, ln)
tweet.watch()
if __name__ == "__main__":
main()
|
1647189
|
from six.moves.BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from spells import Spells
class myHandler(BaseHTTPRequestHandler):
# Handler for the GET requests
def do_GET(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
# Send the html message
if (self.path == "/circle"):
spells.cast("circle")
if (self.path == "/square"):
spells.cast("square")
if (self.path == "/zee"):
spells.cast("zee")
if (self.path == "/eight"):
spells.cast("eight")
if (self.path == "/triangle"):
spells("triangle")
if (self.path == "/tee"):
spells.cast("tee")
if (self.path == "/left"):
spells.cast("left")
if (self.path == "/center"):
spells.cast("center")
self.wfile.write(bytes("{'done':true}", "utf-8"))
return
def runServer():
import six.moves.SimpleHTTPServer
import six.moves.socketserver
PORT = 8000
try:
# Create a web server and define the handler to manage the
# incoming request
server = HTTPServer(('', PORT), myHandler)
print('Started httpserver on port ', PORT)
# Wait forever for incoming htto requests
server.serve_forever()
except KeyboardInterrupt:
print('^C received, shutting down the web server')
server.socket.close()
|
1647195
|
import datetime
from django.utils import timezone
from django.utils.dateparse import parse_datetime
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from rest_framework.generics import ListAPIView
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.response import Response
from rest_framework.views import APIView
from .gas_station import GasStationProvider
from .models import GasPrice
from .serializers import GasPriceSerializer
class DefaultPagination(LimitOffsetPagination):
max_limit = 500
default_limit = 500
class GasStationView(APIView):
@swagger_auto_schema(responses={200: GasPriceSerializer()})
def get(self, request, format=None):
"""
Gets current gas prices for the ethereum network (using last 200 blocks)
`Lowest` and `fastest` are the lower and the higher gas prices found in those blocks
The rest are percentiles on all the gas prices in the last blocks.
`safe_low=percentile 30`, `standard=percentile 50` and `fast=percentile 75`
"""
gas_station = GasStationProvider()
gas_prices = gas_station.get_gas_prices()
serializer = GasPriceSerializer(gas_prices)
return Response(serializer.data, headers={"Cache-Control": f"max-age={60 * 4}"})
class GasStationHistoryView(ListAPIView):
serializer_class = GasPriceSerializer
pagination_class = DefaultPagination
def get_queryset(self):
from_date = self.request.query_params.get("fromDate")
to_date = self.request.query_params.get("toDate")
from_date = (
parse_datetime(from_date)
if from_date
else timezone.now() - datetime.timedelta(days=30)
)
to_date = parse_datetime(to_date) if to_date else timezone.now()
return GasPrice.objects.filter(created__range=[from_date, to_date]).order_by(
"created"
)
@swagger_auto_schema(
manual_parameters=[
openapi.Parameter(
"fromDate",
openapi.IN_QUERY,
type=openapi.TYPE_STRING,
format="date-time",
description="ISO 8601 date to filter stats from. If not set, 1 month before now",
),
openapi.Parameter(
"toDate",
openapi.IN_QUERY,
type=openapi.TYPE_STRING,
format="date-time",
description="ISO 8601 date to filter stats to. If not set, now",
),
]
)
def get(self, request, *args, **kwargs):
return super().get(request, *args, **kwargs)
|
1647201
|
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Iterable, List, Union
import sentencepiece as spm
from typeguard import check_argument_types
class AbsTokenizer(ABC):
@abstractmethod
def text2tokens(self, line: str) -> List[str]:
raise NotImplementedError
@abstractmethod
def tokens2text(self, tokens: Iterable[str]) -> str:
raise NotImplementedError
@abstractmethod
def tokens2ids(self, tokens: Iterable[str]) -> List[int]:
raise NotImplementedError
@abstractmethod
def ids2tokens(self, ids: Iterable[int]) -> List[str]:
raise NotImplementedError
def text2ids(self, line: str) -> List[int]:
return self.tokens2ids(self.text2tokens(line))
def ids2text(self, ids: Iterable[int]) -> str:
return self.tokens2text(self.ids2tokens(ids))
@abstractmethod
def __len__(self):
raise NotImplementedError
class SentencepieceTokenizer(AbsTokenizer):
def __init__(self, model: Union[Path, str],
token_list: Union[Path, str, Iterable[str]] = None):
assert check_argument_types()
self.model = str(model)
self.sp = spm.SentencePieceProcessor()
self.sp.load(self.model)
if isinstance(token_list, (Path, str)):
char_list = Path(token_list)
with char_list.open("r", encoding="utf-8") as f:
token_list = [line.rstrip() for line in f]
elif token_list is None:
token_list = [self.sp.IdToPiece(i)
for i in range(self.sp.get_piece_size())]
self.idx2tok = {i: tok for i, tok in enumerate(token_list)}
self.tok2idx = {tok: i for i, tok in enumerate(token_list)}
def __repr__(self):
return f'{self.__class__.__name__}(model="{self.model}")'
def __getstate__(self):
state = self.__dict__.copy()
state["sp"] = None
return state
def __setstate__(self, state):
self.__dict__ = state
self.sp = spm.SentencePieceProcessor()
self.sp.load(self.model)
def text2tokens(self, line: str) -> List[str]:
return self.sp.EncodeAsPieces(line)
def tokens2text(self, tokens: Iterable[str]) -> str:
return self.sp.DecodePieces(list(tokens))
def tokens2ids(self, tokens: Iterable[str]) -> List[int]:
return [self.tok2idx.get(tok, self.tok2idx["<unk>"]) for tok in tokens]
def ids2tokens(self, ids: Iterable[int]) -> List[str]:
return [self.idx2tok[idx] for idx in ids]
def __len__(self):
if self.idx2tok is None:
return self.sp.get_piece_size()
else:
return len(self.idx2tok)
|
1647204
|
import datetime
import matplotlib.pyplot as plt
import numpy as np
import geospacelab.visualization.mpl.geomap.geodashboards as geomap
def test_ampere():
dt_fr = datetime.datetime(2016, 3, 15, 0)
dt_to = datetime.datetime(2016, 3, 15, 23, 59)
time1 = datetime.datetime(2016, 3, 15, 1, 10)
pole = 'N'
load_mode = 'assigned'
# specify the file full path
data_file_paths = ['/home/lei/afys-data/SuperDARN/PotentialMap/2016/test.dat']
# data_file_paths = ['/Users/lcai/Geospacelab/Data/SuperDARN/POTMAP/2016/SuperDARM_POTMAP_20160314_10min_test.txt']
viewer = geomap.GeoDashboard(dt_fr=dt_fr, dt_to=dt_to, figure_config={'figsize': (8, 8)})
viewer.dock(datasource_contents=['superdarn', 'potmap'], load_mode=load_mode, data_file_paths=data_file_paths)
viewer.set_layout(1, 1)
dataset_superdarn = viewer.datasets[1]
phi = viewer.assign_variable('GRID_phi', dataset_index=1)
dts = viewer.assign_variable('DATETIME', dataset_index=1).value.flatten()
mlat = viewer.assign_variable('GRID_MLAT', dataset_index=1)
mlon = viewer.assign_variable('GRID_MLON', dataset_index=1)
mlt = viewer.assign_variable(('GRID_MLT'), dataset_index=1)
ind_t = dataset_superdarn.get_time_ind(ut=time1)
# initialize the polar map
pid = viewer.add_polar_map(row_ind=0, col_ind=0, style='mlt-fixed', cs='AACGM', mlt_c=0., pole=pole, ut=time1, boundary_lat=50, mirror_south=True)
panel1 = viewer.panels[pid]
panel1.add_coastlines()
phi_ = phi.value[ind_t]
mlat_ = mlat.value[ind_t]
mlt_ = mlt.value[ind_t]
mlon_ = mlon.value[ind_t]
# grid_mlat, grid_mlt, grid_phi = dataset_superdarn.grid_phi(mlat_, mlt_, phi_, interp_method='cubic')
grid_mlat, grid_mlt, grid_phi = dataset_superdarn.postprocess_roll(mlat_, mlt_, phi_)
# re-grid the original data with higher spatial resolution, default mlt_res = 0.05, mlat_res = 0.5. used for plotting.
# grid_mlat, grid_mlt, grid_fac = dataset_ampere.grid_fac(phi_, mlt_res=0.05, mlat_res=0.05, interp_method='linear')
levels = np.array([-21e3, -18e3, -15e3, -12e3, -9e3, -6e3, 3e3, 6e3, 9e3, 12e3, 15e3, 18e3, 21e3])
# ipc = panel1.add_pcolor(fac_, coords={'lat': mlat[ind_t, ::], 'lon': None, 'mlt': mlt[ind_t, ::], 'height': 250.}, cs='AACGM', **pcolormesh_config)
ict = panel1.add_contour(grid_phi, coords={'lat': grid_mlat, 'lon': None, 'mlt': grid_mlt}, cs='AACGM', colors='b', levels=levels)
# panel1.major_ax.clabel(ict, inline=True, fontsize=10)
panel1.add_gridlines(lat_res=5, lon_label_separator=5)
polestr = 'North' if pole == 'N' else 'South'
# panel1.add_title('DMSP/SSUSI, ' + band + ', ' + sat_id.upper() + ', ' + polestr + ', ' + time1.strftime('%Y-%m-%d %H%M UT'), pad=20)
plt.savefig('superdarn_example', dpi=300)
plt.show()
if __name__ == "__main__":
test_ampere()
|
1647248
|
from __future__ import unicode_literals
import json
import datetime
from django.core.urlresolvers import reverse
from django.db import models
from django.contrib.auth.models import User
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db.models import Q
from django.db.models.signals import post_save
from django.dispatch import receiver
from jsonfield import JSONField
from onadata.apps.fieldsight.models import Organization, Project, Site
from onadata.apps.users.models import UserProfile
from django.http import JsonResponse
from celery.result import AsyncResult
from django.contrib.contenttypes.fields import GenericRelation
# user_type = ContentType.objects.get(app_label="users", model="userprofile")
from channels import Group as ChannelGroup
class FieldSightLog(models.Model):
ACTION_TYPES = (
(1, 'User was added as the Organization Admin of Organization Name by Invitor Full Name.'),
(2, 'User was added as the Project Manager of Project Name by Invitor Full Name.'),
(3, 'User was added as Reviewer of Site Name by Invitor Full Name.'),
(4, 'User was added as Site Supervisor of Site Name by Invitor Full Name.'),
(5, 'User was assigned as an Organization Admin in Organization Name.'),
(6, 'User was assigned as a Project Manager in Project Name.'),
(7, 'User was assigned as a Reviewer in Site Name.'),
(8, 'User was assigned as a Site Supervisor in Site Name.'),
(9, 'User created a new organization named Organization Name'),
(10, 'User created a new project named Project Name.'),
(11, 'User created a new site named Site Name in Project Name.'),
(12, 'User created number + sites in Project Name.'),
(13, 'User changed the details of Organization Name.'),
(14, 'User changed the details of Project Name.'),
(15, 'User changed the details of Site Name.'),
(16, 'User submitted a response for Form Type Form Name in Site Name.'),
(17, 'User reviewed a response for Form Type Form Name in Site Name.'),
(18, 'User assigned a new Form Type Form Name in Project Name.'),
(19, 'User assigned a new Form Type Form Name to Site Name.'),
(20, 'User edited Form Name form.'),
(21, 'User assign successful in organization.'),
(22, 'User assign sucessfull in project.'),
(23, 'Users were already assigned.'),
(24, 'User was added as unassigned.'),
(25, 'User was added as donor in project.'),
(26, 'User was added as the Project Manager in count project of org by Invitor Full Name.'),
(27, 'User was added as Reviewer in count site of project by Invitor Full Name.'),
(28, 'User was added as Site Supervisor in count site of project by Invitor Full Name.'),
(29, 'Project SIte Import From Project Name Completed SuccessFully'),
(30, 'Project SIte Import From number of region in Project Name Completed SuccessFully'),
(31, 'User edited a response for Form Type Form Name in Site Name.'),
(32, 'Report generated sucessfull.'),
(33, 'Response Delete sucessfull.'),
(34, 'Delete form sucessful.'),
(35, 'Remove roles.'),
(36, 'Delete project/site/org/ .. etc.'),
(412, 'Bulk upload of number + sites in Project Name failed.'),
(421, 'User assign unsuccessful in organization.'),
(422, 'User assign unsucessfull in project.'),
(429, 'Project SIte Import From Project Name Completed SuccessFully'),
(430, 'Project SIte Import From number of region in Project Name Completed SuccessFully'),
(432, 'Report generation failed.'),
)
type = models.IntegerField(default=0, choices=ACTION_TYPES)
title = models.CharField(max_length=255)
description = models.CharField(max_length=255, blank=True, null=True)
date = models.DateTimeField(auto_now_add=True)
is_seen = models.BooleanField(default=False)
seen_by = models.ManyToManyField(User)
source = models.ForeignKey(User, related_name='log', null=True)
organization = models.ForeignKey(Organization, related_name="logs", null=True)
project = models.ForeignKey(Project, related_name="logs", null=True)
site = models.ForeignKey(Site, related_name="logs", null=True)
extra_message = models.TextField(blank=True, null=True)
extra_json = JSONField(blank=True, null=True, default=None)
recipient = models.ForeignKey(User, related_name='recipent_log', null=True)
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
extra_content_type = models.ForeignKey(ContentType, related_name='notify_object', blank=True, null=True)
extra_object_id = models.CharField(max_length=255, blank=True, null=True)
extra_object = GenericForeignKey('extra_content_type', 'extra_object_id')
class Meta:
get_latest_by = "-date"
ordering = ["-date"]
def get_absolute_url(self):
return reverse('eventlog:notification-detail', kwargs={'pk': self.pk})
def get_event_url(self):
try:
return self.content_object.get_absolute_url()
except:
return None
def get_event_name(self):
try:
return self.content_object.getname()
except:
return None
def get_extraobj_url(self):
if self.extra_object is None:
return None
if self.extra_content_type.model == "user":
if self.extra_object.user_profile:
return self.extra_object.user_profile.get_absolute_url()
return "#";
return self.extra_object.get_absolute_url()
def get_extraobj_name(self):
if self.extra_object is None:
return None
if self.extra_content_type.model == "user":
if self.extra_object.user_profile:
return self.extra_object.user_profile.getname()
return self.extra_object.email
return self.extra_object.getname()
def get_source_url(self):
try:
profile = self.source.user_profile
except UserProfile.DoesNotExist:
return None
else:
return profile.get_absolute_url()
def get_source_name(self):
return self.source.first_name + ' ' + self.source.last_name
def get_org_url(self):
if self.organization is None:
return None
return self.organization.get_absolute_url()
def get_project_url(self):
if self.project is None:
return None
return self.project.get_absolute_url()
def get_site_url(self):
if self.site is None:
return None
return self.site.get_absolute_url()
def get_extra_json_string(self):
return json.dumps(self.extra_json)
def __str__(self):
return str(self.get_type_display())
class FieldSightMessage(models.Model):
sender = models.ForeignKey(User, related_name="sender")
receiver = models.ForeignKey(User, related_name="receiver")
msg_content = models.CharField(max_length=255)
date = models.DateTimeField(auto_now_add=True)
is_seen = models.BooleanField(default=False)
class Meta:
ordering = ["-date"]
@classmethod
def inbox(cls, user):
return FieldSightMessage.objects.filter(receiver=user, is_seen=False)
@classmethod
def outbox(cls, user):
return FieldSightMessage.objects.filter(sender=user)
@classmethod
def user_messages(cls, user):
return FieldSightMessage.objects.filter(Q(sender=user) | Q(receiver=user))
class CeleryTaskProgress(models.Model):
Task_Status =(
(0, 'Pending'),
(1, 'In Progress'),
(2, 'Completed'),
(3, 'Failed'),
)
Task_Type =(
(0, 'Bulk Site Update'),
(1, 'User Assign to Project'),
(2, 'User Assign to Site'),
(3, 'Site Response Xls Report'),
(4, 'Site Import'),
(6, 'Zip Site Images'),
(7, 'Remove Roles'),
(8, 'Site Data Export'),
(9, 'Response Pdf Report'),
(10, 'Site Progress Xls Report'),
)
task_id = models.CharField(max_length=255, blank=True, null=True)
date_added = models.DateTimeField(auto_now_add=True)
date_updateded = models.DateTimeField(auto_now=True, blank=True, null=True)
user = models.ForeignKey(User, related_name="task_owner")
file = models.FileField(
upload_to="celeryFiles", max_length=755, blank=True, null=True)
status = models.IntegerField(default=0, choices=Task_Status)
description = models.CharField(max_length=755, blank=True)
task_type = models.IntegerField(default=0, choices=Task_Type)
content_type = models.ForeignKey(ContentType, related_name='task_object', blank=True, null=True)
object_id = models.IntegerField(blank=True, null=True)
content_object = GenericForeignKey('content_type', 'object_id')
logs = GenericRelation('eventlog.FieldSightLog')
def getname(self):
return self.file.name
def get_absolute_url(self):
if self.file:
return self.file.url
else:
return ""
def get_source_url(self):
try:
profile = self.user.user_profile
except UserProfile.DoesNotExist:
return None
else:
return profile.get_absolute_url()
def get_source_name(self):
return self.user.first_name + ' ' + self.user.last_name
def get_event_url(self):
try:
return self.content_object.get_absolute_url()
except:
return None
def get_event_name(self):
try:
return self.content_object.getname()
except:
return None
def get_progress(self):
if self.status == 1:
if self.task_id:
task = AsyncResult(self.task_id)
data = task.result or task.state
return json.dumps(data)
else:
return None
return None
def __str__(self):
return str(self.pk) + " (" + str(self.task_type) + ") " + "-->" + str(self.status) + "--->" + str(self.user) + " | Date_last_updated =" + str(self.date_updateded) + " | Added_On ="+str(self.date_added)
@receiver(post_save, sender=FieldSightLog)
def handle_notification(sender, instance, **kwargs):
from onadata.apps.eventlog.serializers.LogSerializer import NotificationSerializer
data = NotificationSerializer(instance).data
# import ipdb
# ipdb.set_trace()
if instance.project:
ChannelGroup("project-notify-{}".format(instance.project.id)).send({"text": json.dumps(data)})
if instance.organization:
ChannelGroup("org-notify-{}".format(instance.organization.id)).send({"text": json.dumps(data)})
ChannelGroup("user-notify-{}".format(1)).send({"text": json.dumps(data)})
|
1647260
|
import unittest
from drgpy.msdrg import DRGEngine
class TestMCD00(unittest.TestCase):
def test_mdcs00(self):
de = DRGEngine()
drg_lst = de.get_drg_all(["I10", "E0800"], ["02YA0Z0"])
self.assertTrue("001" in drg_lst)
drg_lst = de.get_drg_all(["I10"], ["02YA0Z0"])
self.assertTrue("002" in drg_lst)
drg_lst = de.get_drg_all([], ["02HA0RS"])
self.assertTrue("002" not in drg_lst)
drg_lst = de.get_drg_all([], ["02HA0RS", "02PA0RZ"])
self.assertTrue("002" in drg_lst)
drg_lst = de.get_drg_all([], ["5A1522F"])
self.assertTrue("003" in drg_lst)
drg_lst = de.get_drg_all([], ["0B110F4", "5A1955Z"])
self.assertTrue("003" in drg_lst)
drg_lst = de.get_drg_all(["E0800"], ["0B110F4"])
self.assertTrue("003" in drg_lst)
drg_lst = de.get_drg_all(["A360"], ["0B110F4"])
self.assertTrue("004" not in drg_lst)
drg_lst = de.get_drg_all([], ["0FY00Z0", "0DY80Z0"])
self.assertTrue("005" in drg_lst)
drg_lst = de.get_drg_all(["I10", "E0800"], ["0FY00Z0"])
self.assertTrue("005" in drg_lst)
drg_lst = de.get_drg_all([], ["0DY80Z0"])
self.assertTrue("005" in drg_lst)
drg_lst = de.get_drg_all([], ["0FY00Z0"])
self.assertTrue("006" in drg_lst)
drg_lst = de.get_drg_all([], ["30230G2"])
self.assertTrue("014" in drg_lst)
drg_lst = de.get_drg_all([], ["0BYC0Z0"])
self.assertTrue("007" in drg_lst)
drg_lst = de.get_drg_all(["I120", "E0800"], ["0TY00Z0", "0FYG0Z0"])
self.assertTrue("008" in drg_lst)
drg_lst = de.get_drg_all(["I120"], ["0TY00Z0", "0FYG0Z0"])
self.assertTrue("008" not in drg_lst)
drg_lst = de.get_drg_all([], ["XW033C3"])
self.assertTrue("016" in drg_lst)
drg_lst = de.get_drg_all(["I10", "E0800"], ["30230AZ"])
self.assertTrue("304" in drg_lst)
drg_lst = de.get_drg_all([], ["30230AZ"])
# in v37, this is 983, used to be 017
self.assertTrue("983" in drg_lst)
drg_lst = de.get_drg_all(["E0800"], ["0FYG0Z0"])
self.assertTrue("010" in drg_lst)
drg_lst = de.get_drg_all(["A360", "E0800"], ["0B110F4"])
self.assertTrue("011" in drg_lst)
drg_lst = de.get_drg_all(["A360", "A000"], ["0B110F4"])
self.assertTrue("012" in drg_lst)
drg_lst = de.get_drg_all(["A360"], ["0B110F4"])
self.assertTrue("013" in drg_lst)
if __name__=="__main__":
unittest.main()
|
1647284
|
import ast
import shutil
import os
from pathlib import Path
import unittest
import tests.plugins
from obscurepy.obfuscator import Obfuscator
from obscurepy.handlers.classdef_handler import ClassDefHandler
class ObfuscatorTest(unittest.TestCase):
def setUp(self):
self.fixture = Obfuscator('tests/my_module.py')
self.fixture.filepath = None
self.dynamic_fixture = None
self.log = False
def tearDown(self):
if os.path.exists('tests/obscurepy_out'):
shutil.rmtree('tests/obscurepy_out')
def test_constructor_multi_correct(self):
self.dynamic_fixture = Obfuscator(
project_directory="tests/test_project")
def test_constructor_single_correct(self):
self.dynamic_fixture = Obfuscator('tests/my_module.py')
def test_constructor_incorrect(self):
with self.assertRaises(Exception):
self.dynamic_fixture = Obfuscator(
filepath='tests/my_module.py', project_directory='tests/test_project')
def test_build_chain(self):
self.fixture.chain = None
self.fixture.build_chain()
self.assertEqual(type(self.fixture.chain), ClassDefHandler)
def test_build_chain_custom_handlers(self):
self.fixture.chain = None
self.fixture.plugin_directory = 'tests/plugins'
self.fixture.plugins = True
self.fixture.build_chain()
self.assertEqual(type(self.fixture.chain),
tests.plugins.classdef_handler.ClassDefHandler)
def test_get_project_filepaths(self):
self.fixture.project_directory = 'tests'
self.fixture.get_project_filepaths()
self.assertTrue('tests/test_obfuscator.py' in self.fixture.filepaths)
def test_set_tree(self):
self.fixture.tree = None
self.fixture.set_tree('tests/my_module.py')
self.assertEqual(type(self.fixture.tree), ast.Module)
def test_build_output_directories_multi_file(self):
try:
self.fixture.project_directory = 'tests/test_data'
self.fixture.output_directory = 'tests'
self.fixture.build_output_directories()
self.assertTrue(os.path.exists('tests/obscurepy_out/test_data'))
self.assertTrue(os.path.exists(
'tests/obscurepy_out/test_data/test_files'))
finally:
shutil.rmtree('tests/obscurepy_out')
def test_build_output_directories_single_file(self):
try:
self.fixture.filepath = 'tests/my_module.py'
self.fixture.output_directory = 'tests'
self.fixture.build_output_directories()
self.assertTrue(os.path.exists('tests/obscurepy_out'))
finally:
os.rmdir('tests/obscurepy_out')
def test_build_output_directories_exists(self):
os.mkdir('tests/obscurepy_out')
with open('tests/obscurepy_out/test_file.py', 'w') as file:
pass
self.fixture.project_directory = 'tests/test_data'
self.fixture.output_directory = 'tests'
self.fixture.build_output_directories()
self.assertFalse(os.path.exists('tests/obscurepy_out/test_file.py'))
def test_write_tree_to_file(self):
try:
self.fixture.set_tree('tests/my_module.py')
self.fixture.write_tree_to_file('tests/obfuscated.py')
self.assertTrue(Path('tests/obfuscated.py').exists())
finally:
os.remove('tests/obfuscated.py')
def test_obscure_multi_file(self):
try:
self.fixture.is_project = True
self.fixture.project_directory = 'tests/test_project'
self.fixture.output_directory = 'tests'
self.fixture.obscure()
self.assertTrue(os.path.exists('tests/obscurepy_out/test_project'))
self.assertTrue(os.path.exists(
'tests/obscurepy_out/test_project/my_module.py'))
self.assertTrue(os.path.exists(
'tests/obscurepy_out/test_project/another_module.py'))
finally:
shutil.rmtree('tests/obscurepy_out')
def test_obscure_single_file(self):
try:
self.fixture.filepath = 'tests/my_module.py'
self.fixture.output_directory = 'tests'
self.fixture.obscure()
self.assertTrue(os.path.exists('tests/obscurepy_out/my_module.py'))
finally:
shutil.rmtree('tests/obscurepy_out')
def test_obscure_file(self):
try:
self.fixture.filepath = 'tests/my_module.py'
self.fixture.output_directory = 'tests'
self.fixture.obscure_file()
self.assertTrue(os.path.exists('tests/obscurepy_out/my_module.py'))
finally:
shutil.rmtree('tests/obscurepy_out')
def test_obscure_project(self):
try:
self.fixture.is_project = True
self.fixture.project_directory = 'tests/test_project'
self.fixture.output_directory = 'tests'
self.fixture.obscure_project()
self.assertTrue(os.path.exists('tests/obscurepy_out/test_project'))
self.assertTrue(os.path.exists(
'tests/obscurepy_out/test_project/my_module.py'))
self.assertTrue(os.path.exists(
'tests/obscurepy_out/test_project/another_module.py'))
finally:
shutil.rmtree('tests/obscurepy_out')
def test_is_multi_file_correct(self):
self.fixture.is_project = True
self.fixture.project_directory = 'tests/test_data'
self.assertTrue(self.fixture.is_multi_file())
def test_is_multi_file_incorrect(self):
self.assertFalse(self.fixture.is_multi_file())
def test_is_single_file_correct(self):
self.fixture.filepath = 'tests/test_obfuscator.py'
self.assertTrue(self.fixture.is_single_file())
def test_is_single_file_incorrect(self):
self.fixture.is_project = True
self.assertFalse(self.fixture.is_single_file())
|
1647331
|
import numpy as np
import torch
def objective_function(
config,
model_objective,
model_cost,
task_feature_objective,
task_feature_cost,
x_mean_objective,
x_std_objective,
x_mean_cost,
x_std_cost,
y_mean_objective=None,
y_std_objective=None,
y_mean_cost=None,
y_std_cost=None,
log_objective=False,
with_noise=True,
):
Ht = np.repeat(task_feature_objective[None, :], config.shape[0], axis=0)
x = np.concatenate((config, Ht), axis=1)
x_norm = torch.from_numpy((x - x_mean_objective) / x_std_objective).float()
output = model_objective.forward(x_norm).data.numpy()
mean = output[:, 0]
log_variance = output[:, 1]
if y_mean_objective is not None or y_std_objective is not None:
mean = mean * y_std_objective + y_mean_objective
log_variance *= y_std_objective**2
feval = mean
if with_noise:
feval += np.random.randn() * np.sqrt(np.exp(log_variance))
if log_objective:
feval = np.exp(feval)
Ht = np.repeat(task_feature_cost[None, :], config.shape[0], axis=0)
x = np.concatenate((config, Ht), axis=1)
x_norm = torch.from_numpy((x - x_mean_cost) / x_std_cost).float()
output = model_cost.forward(x_norm).data.numpy()
log_mean = output[:, 0]
log_log_variance = output[:, 1]
if y_mean_cost is not None or y_std_cost is not None:
log_mean = log_mean * y_std_cost + y_mean_cost
log_log_variance *= y_std_cost**2
log_cost = log_mean
if with_noise:
log_cost += np.random.randn() * np.sqrt(np.exp(log_log_variance))
return feval[:, None], np.exp(log_cost)[:, None]
|
1647336
|
class Solution:
# @param gas, a list of integers
# @param cost, a list of integers
# @return an integer
def canCompleteCircuit(self, gas, cost):
n = len(gas)
t = [0 for i in range(n)]
for i in range(n):
t[i] = gas[i] - cost[i]
res = 0
cs = 0 # Current sum
ts = 0 # Total sum
for i in range(n):
cs += t[i]
ts += t[i]
if cs < 0:
res = i + 1
cs = 0
if ts < 0:
return -1
else:
return res
def canCompleteCircuit2(self, gas, cost):
# Brute-force
n = len(gas)
for i in range(n):
if gas[i] - cost[i] < 0:
continue
carry = gas[i] - cost[i]
j = (i + 1) % n
flag = True
while j != i % n:
if carry + gas[j] - cost[j] < 0:
flag = False
break
j = (j + 1) % n
if flag:
return i
return -1
s = Solution()
print s.canCompleteCircuit2([2, 4], [3, 4])
|
1647342
|
from django.test import TestCase, RequestFactory, Client
import django
if django.VERSION >= (2, 0, 0):
from django.urls import reverse
else:
from django.core.urlresolvers import reverse
from django.db import models
from django.views.generic import ListView, CreateView , DetailView, UpdateView, DeleteView
from generic_scaffold import CrudManager, get_url_names
from generic_scaffold.templatetags.generic_scaffold_tags import set_urls_for_scaffold
class TestModel(models.Model):
test = models.CharField(max_length=16)
class TestModel2(models.Model):
test = models.CharField(max_length=16)
class TestEmptyModel(models.Model):
test = models.CharField(max_length=16)
class TestModelImplicit(models.Model):
test = models.CharField(max_length=16)
class TestModelExplicit(models.Model):
test = models.CharField(max_length=16)
class TestCrudManager(CrudManager):
model = TestModel
prefix = 'test'
class TestEmptyPrefixCrudManager(CrudManager):
model = TestEmptyModel
class TestImplicitCrudManager(CrudManager):
model = TestModelImplicit
prefix = 'test_implicit'
class TestExplicitCrudManager(CrudManager):
model = TestModelExplicit
prefix = 'test_explicit'
list_template_name = 'generic_scaffold/list.html'
form_template_name = 'generic_scaffold/form.html'
detail_template_name = 'generic_scaffold/detail.html'
delete_template_name = 'generic_scaffold/confirm_delete.html'
class TestOverrideViewsCrudManager(CrudManager):
model = TestModel2
prefix = 'test_override_views'
list_view_class = type('OverridenListView', (ListView, ), {} )
create_view_class = type('OverridenCreateView', (CreateView, ), {} )
detail_view_class = type('OverridenDetailView', (DetailView, ), {} )
update_view_class = type('OverridenUpdateView', (UpdateView, ), {} )
delete_view_class = type('OverridenDeleteView', (DeleteView, ), {} )
list_template_name = 'generic_scaffold/list.html'
form_template_name = 'generic_scaffold/form.html'
detail_template_name = 'generic_scaffold/detail.html'
delete_template_name = 'generic_scaffold/confirm_delete.html'
test_crud = TestCrudManager()
urlpatterns = test_crud.get_url_patterns()
test_empty_prefix_crud = TestEmptyPrefixCrudManager()
urlpatterns += test_empty_prefix_crud.get_url_patterns()
test_implicit_crud = TestImplicitCrudManager()
urlpatterns += test_implicit_crud.get_url_patterns()
test_explicit_crud = TestExplicitCrudManager()
urlpatterns += test_explicit_crud.get_url_patterns()
test_override_crud = TestOverrideViewsCrudManager()
urlpatterns += test_override_crud.get_url_patterns()
class DuplicatesTest(TestCase):
def test_duplicate_prefix(self):
with self.assertRaises(django.core.exceptions.ImproperlyConfigured):
klazz = type("Thrower", (CrudManager, ), {'prefix': 'test',} )
def test_duplicate_model(self):
with self.assertRaises(django.core.exceptions.ImproperlyConfigured):
klazz = type("Thrower", (CrudManager, ), {
'prefix': 'foo',
'model': TestModel,
} )
class EmptyPrefixTest(TestCase):
def setUp(self):
self.crud = test_empty_prefix_crud
self.list_view = self.crud.get_list_class_view()
self.create_view = self.crud.get_create_class_view()
self.update_view = self.crud.get_update_class_view()
self.delete_view = self.crud.get_delete_class_view()
self.detail_view = self.crud.get_detail_class_view()
TestEmptyModel.objects.create(test='test')
def test_urls_have_correct_name(self):
for attr in ['list', 'create', 'update', 'delete', 'detail']:
self.assertEquals( getattr(self.crud, attr+'_url_name'), "generic_scaffold_testemptymodel_{0}".format(attr))
def test_views_have_correct_parent_class(self):
self.assertEquals(self.list_view.__bases__[-1].__name__, "ListView")
self.assertEquals(self.create_view.__bases__[-1].__name__, "CreateView")
self.assertEquals(self.update_view.__bases__[-1].__name__, "UpdateView")
self.assertEquals(self.delete_view.__bases__[-1].__name__, "DeleteView")
self.assertEquals(self.detail_view.__bases__[-1].__name__, "DetailView")
def test_view_have_correct_model(self):
for attr in ['list', 'create', 'update', 'delete', 'detail']:
self.assertEquals( getattr(self, attr+'_view').model.__name__, "TestEmptyModel")
def test_with_client(self):
c = Client()
list_resp = c.get( reverse(get_url_names(None)['list']))
self.assertEquals(list_resp.status_code, 200)
self.assertTrue(b'TestEmptyModel object' in list_resp.content)
create_resp = c.get( reverse(get_url_names(None)['create']))
self.assertEquals(create_resp.status_code, 200)
self.assertTrue(b'id_test' in create_resp.content)
update_resp = c.get( reverse(get_url_names(None)['update'], args=[1]))
self.assertEquals(update_resp.status_code, 200)
self.assertTrue(b'id_test' in update_resp.content)
detail_resp = c.get( reverse(get_url_names(None)['detail'], args=[1]))
self.assertEquals(detail_resp.status_code, 200)
self.assertTrue(b'TestEmptyModel object' in detail_resp.content)
delete_resp = c.get( reverse(get_url_names(None)['delete'], args=[1]))
self.assertEquals(delete_resp.status_code, 200)
self.assertTrue(b'TestEmptyModel object' in delete_resp.content)
class SimpleParameterTest(TestCase):
def setUp(self):
self.crud = test_crud
self.list_view = self.crud.get_list_class_view()
self.create_view = self.crud.get_create_class_view()
self.update_view = self.crud.get_update_class_view()
self.delete_view = self.crud.get_delete_class_view()
self.detail_view = self.crud.get_detail_class_view()
TestModel.objects.create(test='test')
def test_urls_have_correct_name(self):
for attr in ['list', 'create', 'update', 'delete', 'detail']:
self.assertEquals( getattr(self.crud, attr+'_url_name'), "{0}_generic_scaffold_testmodel_{1}".format(TestCrudManager.prefix, attr))
def test_views_have_correct_parent_class(self):
self.assertEquals(self.list_view.__bases__[-1].__name__, "ListView")
self.assertEquals(self.create_view.__bases__[-1].__name__, "CreateView")
self.assertEquals(self.update_view.__bases__[-1].__name__, "UpdateView")
self.assertEquals(self.delete_view.__bases__[-1].__name__, "DeleteView")
self.assertEquals(self.detail_view.__bases__[-1].__name__, "DetailView")
def test_view_have_correct_model(self):
for attr in ['list', 'create', 'update', 'delete', 'detail']:
self.assertEquals( getattr(self, attr+'_view').model.__name__, "TestModel")
def test_with_client(self):
c = Client()
list_resp = c.get( reverse(get_url_names(prefix='test')['list']))
self.assertEquals(list_resp.status_code, 200)
self.assertTrue(b'TestModel object' in list_resp.content)
create_resp = c.get( reverse(get_url_names(prefix='test')['create']))
self.assertEquals(create_resp.status_code, 200)
self.assertTrue(b'id_test' in create_resp.content)
update_resp = c.get( reverse(get_url_names(prefix='test')['update'], args=[1]))
self.assertEquals(update_resp.status_code, 200)
self.assertTrue(b'id_test' in update_resp.content)
detail_resp = c.get( reverse(get_url_names(prefix='test')['detail'], args=[1]))
self.assertEquals(detail_resp.status_code, 200)
self.assertTrue(b'TestModel object' in detail_resp.content)
delete_resp = c.get( reverse(get_url_names(prefix='test')['delete'], args=[1]))
self.assertEquals(delete_resp.status_code, 200)
self.assertTrue(b'TestModel object' in delete_resp.content)
class TemplateOrderingTest(TestCase):
def setUp(self):
self.client = Client()
TestModel.objects.create(test='test')
TestModelImplicit.objects.create(test='test')
TestModelExplicit.objects.create(test='test')
def test_fallback_templates(self):
list_resp = self.client.get( reverse(get_url_names(prefix='test')['list']))
self.assertTemplateUsed(list_resp, 'generic_scaffold/list.html' )
create_resp = self.client.get( reverse(get_url_names(prefix='test')['create']))
self.assertTemplateUsed(create_resp, 'generic_scaffold/form.html' )
update_resp = self.client.get( reverse(get_url_names(prefix='test')['update'], args=[1]))
self.assertTemplateUsed(update_resp, 'generic_scaffold/form.html' )
detail_resp = self.client.get( reverse(get_url_names(prefix='test')['detail'], args=[1]))
self.assertTemplateUsed(detail_resp, 'generic_scaffold/detail.html' )
delete_resp = self.client.get( reverse(get_url_names(prefix='test')['delete'], args=[1]))
self.assertTemplateUsed(delete_resp, 'generic_scaffold/confirm_delete.html' )
def test_implicit_templates(self):
list_resp = self.client.get( reverse(get_url_names(prefix='test_implicit')['list']))
self.assertTemplateUsed(list_resp, 'generic_scaffold/testmodelimplicit_list.html' )
create_resp = self.client.get( reverse(get_url_names(prefix='test_implicit')['create']))
self.assertTemplateUsed(create_resp, 'generic_scaffold/testmodelimplicit_form.html' )
update_resp = self.client.get( reverse(get_url_names(prefix='test_implicit')['update'], args=[1]))
self.assertTemplateUsed(update_resp, 'generic_scaffold/testmodelimplicit_form.html' )
detail_resp = self.client.get( reverse(get_url_names(prefix='test_implicit')['detail'], args=[1]))
self.assertTemplateUsed(detail_resp, 'generic_scaffold/testmodelimplicit_detail.html' )
delete_resp = self.client.get( reverse(get_url_names(prefix='test_implicit')['delete'], args=[1]))
self.assertTemplateUsed(delete_resp, 'generic_scaffold/testmodelimplicit_confirm_delete.html' )
def test_explicit_templates(self):
list_resp = self.client.get( reverse(get_url_names(prefix='test_explicit')['list']))
self.assertTemplateUsed(list_resp, 'generic_scaffold/list.html' )
create_resp = self.client.get( reverse(get_url_names(prefix='test_explicit')['create']))
self.assertTemplateUsed(create_resp, 'generic_scaffold/form.html' )
update_resp = self.client.get( reverse(get_url_names(prefix='test_explicit')['update'], args=[1]))
self.assertTemplateUsed(update_resp, 'generic_scaffold/form.html' )
detail_resp = self.client.get( reverse(get_url_names(prefix='test_explicit')['detail'], args=[1]))
self.assertTemplateUsed(detail_resp, 'generic_scaffold/detail.html' )
delete_resp = self.client.get( reverse(get_url_names(prefix='test_explicit')['delete'], args=[1]))
self.assertTemplateUsed(delete_resp, 'generic_scaffold/confirm_delete.html' )
class TestUrlNames(TestCase):
def setUp(self):
pass
def test_get_url_names_with_prefix(self):
names = get_url_names(prefix='test')
for attr in ['list', 'create', 'update', 'delete', 'detail']:
self.assertEquals( names[attr], "{0}_generic_scaffold_testmodel_{1}".format(TestCrudManager.prefix, attr))
def test_get_url_names_with_model(self):
names = get_url_names(app='generic_scaffold', model='testmodel')
for attr in ['list', 'create', 'update', 'delete', 'detail']:
self.assertEquals( names[attr], "{0}_generic_scaffold_testmodel_{1}".format(TestCrudManager.prefix, attr))
class TestTempalteTags(TestCase):
def test_template_tags_with_prefix(self):
names = set_urls_for_scaffold(prefix='test')
for attr in ['list', 'create', 'update', 'delete', 'detail']:
self.assertEquals( names[attr], "{0}_generic_scaffold_testmodel_{1}".format(TestCrudManager.prefix, attr))
def test_get_url_names_with_model(self):
names = set_urls_for_scaffold(app='generic_scaffold', model='testmodel')
for attr in ['list', 'create', 'update', 'delete', 'detail']:
self.assertEquals( names[attr], "{0}_generic_scaffold_testmodel_{1}".format(TestCrudManager.prefix, attr))
class TestOverrideViews(TestCase):
def setUp(self):
self.crud = test_override_crud
self.list_view = self.crud.get_list_class_view()
self.create_view = self.crud.get_create_class_view()
self.update_view = self.crud.get_update_class_view()
self.delete_view = self.crud.get_delete_class_view()
self.detail_view = self.crud.get_detail_class_view()
def test_views_have_correct_parent_classes(self):
self.assertEquals(self.list_view.__bases__[-1].__name__, "OverridenListView")
self.assertEquals(self.create_view.__bases__[-1].__name__, "OverridenCreateView")
self.assertEquals(self.update_view.__bases__[-1].__name__, "OverridenUpdateView")
self.assertEquals(self.delete_view.__bases__[-1].__name__, "OverridenDeleteView")
self.assertEquals(self.detail_view.__bases__[-1].__name__, "OverridenDetailView")
|
1647346
|
from unittest import TestCase
from similarityPy.algorithms.find_nearest import FindNearest
from similarityPy.measure.boolean_data.matching_dissimilarity import MatchingDissimilarity
from tests import test_logger
__author__ = 'cenk'
class FindNearestTest(TestCase):
def setUp(self):
pass
def test_matching_dissimilarity(self):
test_logger.debug("FindNearestTest - test_matching_dissimilarity Starts")
points = [(1, 0, 1, 0, 1, 0), (1, 0, 1, 0, 1, 1), (1, 0, 1, 1, 1, 0)]
point = (1, 0, 0, 0, 0, 0)
find_nearest = FindNearest(points, point, MatchingDissimilarity)
find_nearest.process()
nearest = find_nearest.get_result()
self.assertEquals((1, 0, 1, 0, 1, 0), nearest)
points = [(1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1), (1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1)]
point = (0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1)
find_nearest = FindNearest(points, point, MatchingDissimilarity)
find_nearest.process()
nearest = find_nearest.get_result()
self.assertEquals((0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1), nearest)
test_logger.debug("FindNearestTest - test_matching_dissimilarity Ends")
def test_matching_dissimilarity_multiple(self):
test_logger.debug("FindNearestTest - test_matching_dissimilarity_multiple Starts")
points = [(1, 0, 1, 0, 1, 0), (1, 0, 1, 0, 1, 1), (1, 0, 1, 1, 1, 0)]
point = (1, 0, 0, 0, 0, 0)
find_nearest = FindNearest(points, point, MatchingDissimilarity, k=2)
find_nearest.process()
nearest = find_nearest.get_result()
self.assertEquals([(1, 0, 1, 0, 1, 0), (1, 0, 1, 0, 1, 1)], nearest)
points = [(1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1), (1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1)]
point = (0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1)
find_nearest = FindNearest(points, point, MatchingDissimilarity, k=2)
find_nearest.process()
nearest = find_nearest.get_result()
self.assertEquals([(0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1), (1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1)]
, nearest)
test_logger.debug("FindNearestTest - test_matching_dissimilarity_multiple Ends")
|
1647350
|
from kaldi_io import read_vec_flt, write_vec_flt, open_or_fd, write_mat
import sys
import numpy as np
from collections import defaultdict
dev_test_spk = ['p311', 'p226', 'p303', 'p234', 'p302', 'p237', 'p294', 'p225']
with open(sys.argv[1], 'r') as f:
content = f.readlines()
content = [x.strip() for x in content]
spk2mat = defaultdict(list)
for line in content:
(key,rxfile) = line.split()
spk = key.split('-')[0]
if spk in dev_test_spk:
seg = int(key.split('-')[2])
if seg < 25:
continue
spk2mat[spk].append(read_vec_flt(rxfile))
out_file = sys.argv[2]
ark_scp_output = 'ark:| copy-feats --compress=true ark:- ark,scp:' + out_file + '.ark,' + out_file + '.scp'
with open_or_fd(ark_scp_output, 'wb') as f:
for spk,mat in spk2mat.items():
spk_emb = np.mean(mat, axis=0).reshape(-1, 1)
#print(spk)
#print(spk_emb.shape)
write_mat(f, spk_emb, key=spk)
|
1647366
|
import pandas
import numpy as np
import matplotlib.pyplot as plt
COLOR = ['C2', 'C1', 'C0']
SAVE_DIR = "benchmarks_results"
def plot_scaling_1d_benchmark(strategies, list_n_times):
# compute the width of the bars
n_group = len(list_n_times)
n_bar = len(strategies)
width = 1 / ((n_bar + 1) * n_group - 1)
fig = plt.figure('comparison CD', figsize=(6, 3.5))
fig.patch.set_alpha(0)
ax_bar = fig.subplots()
xticks, labels = [], []
for i, n_times in enumerate(list_n_times):
fig_scaling = plt.figure(f'Scaling T={n_times}', figsize=(6, 3))
fig_scaling.patch.set_alpha(0)
ax_scaling = fig_scaling.subplots()
handles = []
xticks.append(((i + .5) * (n_bar + 1)) * width)
labels.append(f"$T = {n_times}L$")
for j, (strategy, name, style) in enumerate(strategies):
col_name = ['pb', 'n_jobs', 'runtime', 'runtime1']
csv_name = (f"benchmarks_results/runtimes_n_jobs_"
f"{n_times}_{strategy}.csv")
try:
df = pandas.read_csv(csv_name, names=col_name)
except FileNotFoundError:
print(f"Not found {csv_name}")
continue
runtimes_1 = df[df['n_jobs'] == 1]['runtime'].values
position = (i * (n_bar + 1) + j + 1) * width
handles.append(ax_bar.bar(position, height=np.mean(runtimes_1),
width=width, color=COLOR[j], label=name,
hatch='//' if strategy == 'lgcd' else '')
)
ax_bar.plot(
np.ones_like(runtimes_1) * position,
runtimes_1, '_', color='k')
n_jobs = df['n_jobs'].unique()
n_jobs.sort()
runtimes_scale = []
runtimes_scale_mean = []
for n in n_jobs:
runtimes_scale.append(df[df['n_jobs'] == n]['runtime'].values)
runtimes_scale_mean.append(np.mean(runtimes_scale[-1]))
runtimes_scale_mean = np.array(runtimes_scale_mean)
if strategy != 'random':
t = np.logspace(0, np.log2(2 * n_jobs.max()), 3, base=2)
R0 = runtimes_scale_mean.max()
# Linear and quadratic lines
p = 1 if strategy == 'lgcd' else 2
ax_scaling.plot(t, R0 / t ** p, 'k--', linewidth=1)
tt = 2
bbox = None # dict(facecolor="white", edgecolor="white")
if strategy == 'lgcd':
ax_scaling.text(tt, 1.4 * R0 / tt, "linear", rotation=-14,
bbox=bbox, fontsize=12)
name_ = "DiCoDiLe-$Z$"
else:
ax_scaling.text(tt, 1.4 * R0 / tt**2, "quadratic",
rotation=-25, bbox=bbox, fontsize=12)
name_ = "DICOD"
ax_scaling.plot(n_jobs, runtimes_scale_mean, style,
label=name_, zorder=10, markersize=8)
# for i, n in enumerate(n_jobs):
# x = np.array(runtimes_scale[i])
# ax_scaling.plot(np.ones(value.shape) * n, value, 'k_')
if n_times == 150:
y_lim = (.5, 1e3)
else:
y_lim = (2, 2e4)
ax_scaling.vlines(n_times / 4, *y_lim, 'g', '-.')
ax_scaling.set_ylim(y_lim)
ax_scaling.set_xscale('log')
ax_scaling.set_yscale('log')
ax_scaling.set_xlim((1, 75))
ax_scaling.grid(True, which='both', axis='x', alpha=.5)
ax_scaling.grid(True, which='major', axis='y', alpha=.5)
# ax_scaling.set_xticks(n_jobs)
# ax_scaling.set_xticklabels(n_jobs, fontsize=12)
ax_scaling.set_ylabel("Runtime [sec]", fontsize=12)
ax_scaling.set_xlabel("# workers $W$", fontsize=12)
ax_scaling.legend(fontsize=14)
fig_scaling.tight_layout()
fig_scaling.savefig(f"benchmarks_results/scaling_T{n_times}.pdf",
dpi=300, bbox_inches='tight', pad_inches=0)
ax_bar.set_ylabel("Runtime [sec]", fontsize=12)
ax_bar.set_yscale('log')
ax_bar.set_xticks(xticks)
ax_bar.set_xticklabels(labels, fontsize=12)
ax_bar.set_ylim(1, 2e4)
ax_bar.legend(bbox_to_anchor=(-.02, 1.02, 1., .3), loc="lower left",
handles=handles, ncol=3, fontsize=14, borderaxespad=0.)
fig.tight_layout()
fig.savefig("benchmarks_results/CD_strategies_comparison.png", dpi=300,
bbox_inches='tight', pad_inches=0)
plt.show()
if __name__ == "__main__":
list_n_times = [150, 750]
strategies = [
('greedy', 'Greedy', 's-'),
('random', 'Random', "h-"),
('lgcd', "LGCD", 'o-')
]
plot_scaling_1d_benchmark(strategies, list_n_times)
|
1647372
|
r"""
Version for trame 1.x - https://github.com/Kitware/trame/blob/release-v1/examples/VTK/SimpleCone/RemoteRendering.py
Delta v1..v2 - https://github.com/Kitware/trame/commit/674f72774228bbcab5689417c1c5642230b1eab8
"""
from trame.app import get_server
from trame.widgets import vuetify, vtk
from trame.ui.vuetify import SinglePageLayout
from vtkmodules.vtkFiltersSources import vtkConeSource
from vtkmodules.vtkRenderingCore import (
vtkRenderer,
vtkRenderWindow,
vtkRenderWindowInteractor,
vtkPolyDataMapper,
vtkActor,
)
# VTK factory initialization
from vtkmodules.vtkInteractionStyle import vtkInteractorStyleSwitch # noqa
import vtkmodules.vtkRenderingOpenGL2 # noqa
# -----------------------------------------------------------------------------
# Trame initialization
# -----------------------------------------------------------------------------
server = get_server()
state, ctrl = server.state, server.controller
state.trame__title = "VTK Remote rendering"
# -----------------------------------------------------------------------------
# Custom / Advanced event handling
# -----------------------------------------------------------------------------
VTK_VIEW_EVENTS = [
"StartAnimation",
"Animation",
"EndAnimation",
"MouseEnter",
"MouseLeave",
"StartMouseMove",
"MouseMove",
"EndMouseMove",
"LeftButtonPress",
"LeftButtonRelease",
"MiddleButtonPress",
"MiddleButtonRelease",
"RightButtonPress",
"RightButtonRelease",
"KeyPress",
"KeyDown",
"KeyUp",
"StartMouseWheel",
"MouseWheel",
"EndMouseWheel",
"StartPinch",
"Pinch",
"EndPinch",
"StartPan",
"Pan",
"EndPan",
"StartRotate",
"Rotate",
"EndRotate",
"Button3D",
"Move3D",
"StartPointerLock",
"EndPointerLock",
"StartInteraction",
"Interaction",
"EndInteraction",
]
DEFAULT_RESOLUTION = 6
def on_event(*args, **kwargs):
print("event", args, kwargs)
def event_listeners(events):
result = {}
for event in events:
result[event] = (on_event, "[utils.vtk.event($event)]")
return result
# -----------------------------------------------------------------------------
# VTK code
# -----------------------------------------------------------------------------
renderer = vtkRenderer()
renderWindow = vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindow.OffScreenRenderingOn()
renderWindowInteractor = vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
renderWindowInteractor.GetInteractorStyle().SetCurrentStyleToTrackballCamera()
cone_source = vtkConeSource()
mapper = vtkPolyDataMapper()
actor = vtkActor()
mapper.SetInputConnection(cone_source.GetOutputPort())
actor.SetMapper(mapper)
renderer.AddActor(actor)
renderer.ResetCamera()
renderWindow.Render()
@state.change("resolution")
def update_cone(resolution=DEFAULT_RESOLUTION, **kwargs):
cone_source.SetResolution(resolution)
ctrl.view_update()
def update_reset_resolution():
state.resolution = DEFAULT_RESOLUTION
# -----------------------------------------------------------------------------
# GUI
# -----------------------------------------------------------------------------
with SinglePageLayout(server) as layout:
layout.icon.click = ctrl.view_reset_camera
layout.title.set_text("Cone Application")
with layout.toolbar:
vuetify.VSpacer()
vuetify.VSlider(
v_model=("resolution", DEFAULT_RESOLUTION),
min=3,
max=60,
step=1,
hide_details=True,
dense=True,
style="max-width: 300px",
)
vuetify.VDivider(vertical=True, classes="mx-2")
with vuetify.VBtn(icon=True, click=update_reset_resolution):
vuetify.VIcon("mdi-undo-variant")
with layout.content:
with vuetify.VContainer(
fluid=True,
classes="pa-0 fill-height",
):
view = vtk.VtkRemoteView(
renderWindow,
ref="view",
# For Custom / Advanced event handling
# interactor_events=("event_types", VTK_VIEW_EVENTS),
# **event_listeners(VTK_VIEW_EVENTS),
)
ctrl.view_update = view.update
ctrl.view_reset_camera = view.reset_camera
# Uncomment following line to hide footer
# layout.footer.hide()
# -----------------------------------------------------------------------------
# Main
# -----------------------------------------------------------------------------
if __name__ == "__main__":
server.start()
|
1647376
|
from pppr import aabb
import numpy as np
from pak.datasets.MOT import MOT16
from pak import utils
from pppr import aabb
from time import time
from cselect import color as cs
# ===========================================
# Helper functions
# ===========================================
def remove_negative_pairs(Dt, W, H, is_gt_trajectory=False):
"""
...
is_gt_trajectory: {boolean} if true than the
structure of the data is slightly different
"""
result = []
if is_gt_trajectory:
for frame, pid, x, y, w, h in Dt:
if x >= 0 and y >= 0 and x + w < W and y + h < H:
result.append((frame, pid, x, y, w, h))
else:
if Dt.shape[1] == 7:
for frame, pid, x, y, w, h, score in Dt:
if x >= 0 and y >= 0 and x + w < W and y + h < H:
result.append((frame, pid, x, y, w, h, score))
else:
for frame, x, y, w, h, score in Dt:
if x >= 0 and y >= 0 and x + w < W and y + h < H:
result.append((frame, x, y, w, h, score))
return np.array(result)
def get_visible_pedestrains(Y_gt, frame):
Y_gt_frame1 = utils.extract_eq(Y_gt, col=0, value=frame)
#Y_gt_frame1 = utils.extract_eq(Y_gt_frame1, col=7, value=1)
#Y_gt_frame1 = utils.extract_eq(Y_gt_frame1, col=8, value=1)
return Y_gt_frame1
def get_visible_pedestrains_det(Y_det, frame):
Y_det_frame1 = utils.extract_eq(Y_det, col=0, value=frame)
return Y_det_frame1
def get_center(d):
""" full detection has 7 parameters:
full_detection: (frame, pid, x, y, w, h, score)
"""
x, y, w, h = d[2], d[3], d[4], d[5]
return x+w/2, y+h/2
# ===========================================
# Experiments implementation
# ===========================================
verbose = False
class MOT16_Experiments:
def __init__(self, folder):
""" For the experiments we need MOT16-02 and
MOT16-11 for the analysis
The detections will have the following structure:
0: frame_nbr
1: person id
2: detection top-left x position
3: detection top-left y position
4: detection bb width
5: detection bb height
6: detection output score
"""
global verbose
mot16 = MOT16(folder, verbose=verbose)
mot16_02 = mot16.get_train("MOT16-02", memmapped=True)
mot16_11 = mot16.get_train("MOT16-11", memmapped=True)
self.mot16_02_X = mot16_02[0]
self.mot16_11_X = mot16_11[0]
detections_per_video = []
gt_per_video = []
true_detections_per_video = []
true_detections_per_video_no_pid = []
color_lookups_per_video = []
for X, Y_det, Y_gt in [mot16_02, mot16_11]:
# --- run for each video ---
# this is not the most efficient way but not important atm..
_, H, W, _ = X.shape
Y_gt = MOT16.simplify_gt(Y_gt)
gt_bbs = []
all_detections = []
detections_per_video.append(all_detections)
true_detections = []
true_detections_per_video.append(true_detections)
true_detections_no_pid = []
true_detections_per_video_no_pid.append(true_detections_no_pid)
gt_per_video.append(gt_bbs)
frames = X.shape[0]
TIMING_start = time()
for frame in range(1, frames+1):
y_gt = get_visible_pedestrains(Y_gt, frame)
y_det = get_visible_pedestrains_det(Y_det, frame)
for ped_ in y_gt:
j, pid, l_gt, t_gt, w_gt, h_gt = ped_
gt_bbs.append((j, pid, l_gt, t_gt, w_gt, h_gt))
for ped in y_det:
i, _,l, t, w, h, score, _, _,_ = ped
if l >= 0 and t >= 0 and l + w < W and \
t + h < H:
all_detections.append(
np.array([i, l, t, w, h, score])
)
for ped_ in y_gt:
j, pid, l_gt, t_gt, w_gt, h_gt = ped_
assert(i == j)
if aabb.IoU((l,t,w,h), (l_gt,t_gt,w_gt,h_gt)) > 0.5:
true_detections.append(
np.array([i, pid, l, t, w, h, score]))
true_detections_no_pid.append(
np.array([i, l, t, w, h, score]))
TIMING_end = time()
if verbose:
print("Handling " + str(frames) + " frames in " + \
str(TIMING_end - TIMING_start) + " seconds")
# --- figure out coloring ---
Y = np.array(true_detections)
U = np.unique(Y[:,1])
Color_lookup = {}
Colors = cs.lincolor(len(U), random_sat=True, random_val=True)
#Colors = cs.poisson_disc_sampling_Lab(len(U))
Colors = np.array(Colors, 'float32') / 255
for u,c in zip(U, Colors):
Color_lookup[u] = c
color_lookups_per_video.append(Color_lookup)
self.mot16_02_gt_bbs = np.array(gt_per_video[0])
self.mot16_11_gt_bbs = np.array(gt_per_video[1])
self.mot16_02_detections = np.array(detections_per_video[0])
self.mot16_11_detections = np.array(detections_per_video[1])
self.mot16_02_true_detections = np.array(true_detections_per_video[0])
self.mot16_11_true_detections = np.array(true_detections_per_video[1])
self.mot16_02_true_detections_no_pid = \
np.array(true_detections_per_video_no_pid[0])
self.mot16_11_true_detections_no_pid = \
np.array(true_detections_per_video_no_pid[1])
self.mot16_02_color_lookup = color_lookups_per_video[0]
self.mot16_11_color_lookup = color_lookups_per_video[1]
def get_MOT16_02_gt_trajectories(self, as_point=False):
return self.get_detections_as_trajectories(
self.mot16_02_gt_bbs, as_point)
def get_MOT16_02_trajectories(self, as_point=False):
return self.get_detections_as_trajectories(
self.mot16_02_true_detections, as_point)
def get_MOT16_11_gt_trajectories(self, as_point=False):
return self.get_detections_as_trajectories(
self.mot16_11_gt_bbs, as_point)
def get_MOT16_11_trajectories(self, as_point=False):
return self.get_detections_as_trajectories(
self.mot16_11_true_detections, as_point)
def get_detections_as_trajectories(self, true_detections, as_point=False):
trajectories = []
for d in true_detections:
frame = d[0]
pid = d[1]
if as_point:
x,y = get_center(d)
trajectories.append((frame, pid, x, y))
else:
x, y, w, h = d[2], d[3], d[4], d[5]
trajectories.append((frame, pid, x, y, w, h))
return np.array(trajectories)
def plot_frame_MOT16_02(self, ax, frame, with_gt=False):
self.plot_frame(ax,
self.mot16_02_X,
self.mot16_02_true_detections,
self.mot16_02_color_lookup,
frame, with_gt, self.mot16_02_gt_bbs)
def plot_frame_MOT16_11(self, ax, frame, with_gt=False):
self.plot_frame(ax,
self.mot16_11_X,
self.mot16_11_true_detections,
self.mot16_11_color_lookup,
frame, with_gt, self.mot16_11_gt_bbs)
def plot_frame(self, ax, X, true_detections, id_colors, frame,
with_gt, gt_bbs):
""" plots the frame with its true detections
"""
Y = utils.extract_eq(true_detections, col=0, value=frame)
X = X[frame]
ax.imshow(X)
for _, pid, x, y, w, h, score in Y:
ax.text(x, y, str(int(pid)), color='white', fontsize=17,
bbox={'facecolor': 'red', 'alpha': 0.5})
bbX, bbY = utils.bb_to_plt_plot(x, y, w, h)
ax.plot(bbX, bbY, linewidth=2, color=id_colors[pid])
if with_gt:
Y = utils.extract_eq(gt_bbs, col=0, value=frame)
for _, pid, x, y, w, h in Y:
bbX, bbY = utils.bb_to_plt_plot(x, y, w, h)
ax.plot(bbX, bbY, 'g--', linewidth=4)
# -------------
|
1647400
|
from torch.nn.modules.module import Module
from ..functions.riroi_align import RiRoIAlignFunction
class RiRoIAlign(Module):
def __init__(self, out_size, spatial_scale, sample_num=0, nOrientation=8):
super(RiRoIAlign, self).__init__()
self.out_size = out_size
self.spatial_scale = float(spatial_scale)
self.sample_num = int(sample_num)
self.nOrientation = int(nOrientation)
def forward(self, features, rois):
return RiRoIAlignFunction.apply(features, rois, self.out_size,
self.spatial_scale, self.sample_num, self.nOrientation)
|
1647407
|
import unittest
class TestSum(unittest.TestCase):
def test_sum(self):
self.assertEqual(sum([1, 2, 3]), 6, "Should be 6")
def test_sum_tuple(self):
self.assertEqual(sum((1, 2, 2)), 6, "Should be 6")
if __name__ == '__main__':
unittest.main()
Method Equivalent to
.assertEqual(a, b) a == b
.assertTrue(x) bool(x) is True
.assertFalse(x) bool(x) is False
.assertIs(a, b) a is b
.assertIsNone(x) x is None
.assertIn(a, b) a in b
.assertIsInstance(a, b) isinstance(a, b)
.assertIs(), .assertIsNone(), .assertIn(), and .assertIsInstance() all have opposite methods, named .assertIsNot(), and so forth.
An integration test checks that components in your application operate with each other.
A unit test checks a small component in your application.
Before you dive into writing tests, you’ll want to first make a couple of decisions:
What do you want to test?
Are you writing a unit test or an integration test?
Then the structure of a test should loosely follow this workflow:
Create your inputs
Execute the code being tested, capturing the output
Compare the output with an expected result
# Note: What if your application is a single script?
# You can import any attributes of the script, such as classes, functions, and variables by using the built-in __import__() function. Instead of from my_sum import sum, you can write the following:
# target = __import__("my_sum.py")
# sum = target.sum
# The benefit of using __import__() is that you don’t have to turn your project folder into a package, and you can specify the file name. This is also useful if your filename collides with any standard library packages. For example, math.py would collide with the math module.
|
1647475
|
import socket
def check_infected(ip):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ip, 6969))
sock.settimeout(5.0)
sock.send("ping".encode())
data = sock.recv(1024)
sock.close()
msg = data.decode('utf-8').strip('\r\n')
if msg == "pong":
return True
else:
return False
except:
return False
|
1647488
|
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
def plot_time_series(x: np.ndarray, title=None) -> None:
sns.set(font_scale=1.5)
sns.set_style("white")
t = np.arange(start=0, stop=x.shape[0])
plt.plot(t, x, linestyle='-', marker='o')
plt.title(title)
plt.xlabel(r'$t$')
plt.ylabel(r'$x_t$')
plt.show()
|
1647548
|
from django.db.models.signals import post_save
from django.contrib.auth.models import User
from django.dispatch import receiver
from .models import ShinyUserHash
from secrets import token_hex
@receiver(post_save, sender=User)
def create_hash(sender, instance, created, **kwargs):
if created:
hash = token_hex(16)
ShinyUserHash.objects.create(user=instance, user_hash=hash)
@receiver(post_save, sender=User)
def save_hash(sender, instance, created, **kwargs):
instance.shinyuserhash.save()
|
1647562
|
import asyncio
from typing import Optional
import aioreactive as rx
import pytest
from aioreactive.notification import OnCompleted, OnError, OnNext
from aioreactive.testing import AsyncTestObserver, VirtualTimeEventLoop
from expression.system.disposable import AsyncDisposable
@pytest.yield_fixture() # type:ignore
def event_loop():
loop = VirtualTimeEventLoop()
yield loop
loop.close()
@pytest.mark.asyncio
async def test_from_iterable_happy():
xs = rx.from_iterable([1, 2, 3])
obv: AsyncTestObserver[int] = AsyncTestObserver()
await rx.run(xs, obv)
assert obv.values == [
(0, OnNext(1)),
(0, OnNext(2)),
(0, OnNext(3)),
(0, OnCompleted),
]
@pytest.mark.asyncio
async def test_from_iterable_observer_throws():
xs = rx.from_iterable([1, 2, 3])
error = Exception("error")
async def asend(value: int) -> None:
raise error
obv: AsyncTestObserver[int] = AsyncTestObserver(asend)
await xs.subscribe_async(obv)
with pytest.raises(Exception):
await obv
assert obv.values == [(0, OnNext(1)), (0, OnError(error))]
# @pytest.mark.asyncio
# async def test_from_iterable_close():
# xs = rx.from_iterable(range(10))
# sub: Optional[AsyncDisposable] = None
# async def asend(value: int) -> None:
# assert sub is not None
# await sub.dispose_async()
# await asyncio.sleep(0.1)
# async def athrow(err: Exception) -> None:
# print("Exception: ", err)
# obv: AsyncTestObserver[int] = AsyncTestObserver(asend, athrow)
# sub = await xs.subscribe_async(obv)
# # with pytest.raises(asyncio.CancelledError):
# await obv
# assert obv.values == [(0, OnNext(0)), (0, OnCompleted)]
|
1647574
|
import PySimpleGUI as sg
from cblaster.gui.parts import TextLabel, TEXT_WIDTH
sg.theme("Lightgrey1")
extract_frame = sg.Frame(
"Extract",
layout=[
[sg.Text(
"This module will allow you to extract sequences from saved cblaster "
"session files. You can filter sequences by the queries they hit, "
"or by the organisms and scaffolds they belong to. This module is "
"designed to answer questions like: 'how can I get all of the "
"methyltransferase sequences from homologous gene clusters in "
"Aspergillus genomes?",
size=(TEXT_WIDTH, 6)
)],
[TextLabel("Session file"),
sg.InputText(size=(34, 1), key="extract_session"),
sg.FileBrowse(key="extract_session")],
[sg.Text(
"A session file (.json) generated by a cblaster search.",
size=(TEXT_WIDTH, 1)
)],
[TextLabel("Output file"),
sg.InputText(key="extract_output", size=(34, 1)),
sg.FileSaveAs(key="extract_output")],
[sg.Text(
"File path to save the extracted sequences to."
" If not provided, they will be printed to the terminal.",
size=(TEXT_WIDTH, 2)
)],
[TextLabel("Query sequences"), sg.InputText(key="queries")],
[sg.Text(
"The names of query sequences which extracted sequences match."
" You can provide multiple names here.",
size=(TEXT_WIDTH, 2)
)],
[TextLabel("Organisms"), sg.InputText(key="organisms")],
[sg.Text(
"Organisms that extracted sequences must be from. These take the "
"form of regular expression patterns and are therefore quite "
"flexible. You can provide more than one pattern. For example, to "
"extract sequences only from Aspergillus and Penicillium genomes, "
"you might specify: 'Aspergillus.*' 'Penicillium.* See the user "
"guide for more examples.",
size=(TEXT_WIDTH, 5)
)],
[TextLabel("Scaffolds"), sg.InputText(key="scaffolds")],
[sg.Text(
"Scaffolds that extracted sequences must be on. These can be "
"scaffold names or names AND coordinate ranges. For example, you "
"could specify ‘scaffold_1’, which would retrieve ALL clusters on "
"scaffold_1, or scaffold_1:10000-50000, which would retrieve only "
"those from position 10000 to 50000. This can be used to extract "
"sequences from specific clusters.",
size=(TEXT_WIDTH, 5)
)],
[TextLabel("Delimiter"), sg.InputText(key="delimiter")],
[sg.Text(
"Generate delimited output instead of human readable.",
size=(TEXT_WIDTH, 1)
)],
[TextLabel("Download"), sg.Checkbox("", key="download", default=False)],
[sg.Text(
"Download extracted sequences from NCBI and convert them to FASTA format.",
size=(TEXT_WIDTH, 1)
)],
[TextLabel("Name only"), sg.Checkbox("", key="name_only", default=False)],
[sg.Text(
"Don't include source information in extracted sequence headers.",
size=(TEXT_WIDTH, 1)
)],
],
title_color="blue",
font="Arial 10 bold",
relief="flat",
)
layout = [[extract_frame]]
|
1647643
|
import hypothesis.strategies as st
import torch
from hypothesis import assume
from hypothesis import given
from myrtlespeech.builders.fully_connected import build
from myrtlespeech.model.fully_connected import FullyConnected
from myrtlespeech.protos import fully_connected_pb2
from tests.builders.test_activation import activation_match_cfg
from tests.protos.test_fully_connected import fully_connecteds
# Utilities -------------------------------------------------------------------
def fully_connected_module_match_cfg(
fully_connected: FullyConnected,
fully_connected_cfg: fully_connected_pb2.FullyConnected,
input_features: int,
output_features: int,
) -> None:
"""Ensures ``FullyConnected`` module matches protobuf configuration."""
fully_connected = fully_connected.fully_connected # get torch module
# if no hidden layers then test that the module is Linear with corret
# sizes, ignore activation
if fully_connected_cfg.num_hidden_layers == 0:
assert isinstance(fully_connected, torch.nn.Linear)
assert fully_connected.in_features == input_features
assert fully_connected.out_features == output_features
assert not fully_connected.HasField("dropout")
return
# otherwise it will be a Sequential of layers
assert isinstance(fully_connected, torch.nn.Sequential)
# expected configuration of each layer in Sequential depends on whether
# both/either of {activation, dropout} are present.
act_fn_is_none = fully_connected_cfg.activation.HasField("identity")
dropout_is_none = not fully_connected_cfg.HasField("dropout")
dropout_is_none = dropout_is_none or fully_connected_cfg.dropout.value == 0
if act_fn_is_none:
expected_len = fully_connected_cfg.num_hidden_layers + 1
else:
expected_len = 2 * fully_connected_cfg.num_hidden_layers + 1
if not dropout_is_none:
expected_len += fully_connected_cfg.num_hidden_layers
assert len(fully_connected) == expected_len
# Now check that the linear/activation_fn/dropout layers appear in the
# expected order. We set the ``module_idx`` and then check for the
# following condition:
# if module_idx % total_types == <module_type>_idx:
# assert isinstance(module, <module_type>)
linear_idx = 0 # in all cases
activation_idx = -1 # infeasible value as default
dropout_idx = -1
if act_fn_is_none and dropout_is_none:
total_types = 1 # (linear layers only)
elif not act_fn_is_none and dropout_is_none:
total_types = 2 # (linear and activation)
activation_idx = 1
elif act_fn_is_none and not dropout_is_none:
total_types = 2
dropout_idx = 1
elif not act_fn_is_none and not dropout_is_none:
total_types = 3
activation_idx = 1
dropout_idx = 2
for module_idx, module in enumerate(fully_connected):
if module_idx % total_types == linear_idx:
assert isinstance(module, torch.nn.Linear)
assert module.in_features == input_features
if module_idx == len(fully_connected) - 1:
assert module.out_features == output_features
else:
assert module.out_features == fully_connected_cfg.hidden_size
input_features = fully_connected_cfg.hidden_size
elif module_idx % total_types == activation_idx:
activation_match_cfg(module, fully_connected_cfg.activation)
elif module_idx % total_types == dropout_idx:
assert isinstance(module, torch.nn.Dropout)
assert abs(module.p - fully_connected_cfg.dropout.value) < 1e-8
else:
raise ValueError(
"Check module_idx and total_types assignment. It "
"**should not** be possible to hit this branch!"
)
# Tests -----------------------------------------------------------------------
@given(
fully_connected_cfg=fully_connecteds(),
input_features=st.integers(min_value=1, max_value=32),
output_features=st.integers(min_value=1, max_value=32),
)
def test_build_fully_connected_returns_correct_module_structure(
fully_connected_cfg: fully_connected_pb2.FullyConnected,
input_features: int,
output_features: int,
) -> None:
"""Ensures Module returned has correct structure."""
if fully_connected_cfg.num_hidden_layers == 0:
assume(fully_connected_cfg.hidden_size is None)
assume(fully_connected_cfg.activation is None)
actual = build(fully_connected_cfg, input_features, output_features)
fully_connected_module_match_cfg(
actual, fully_connected_cfg, input_features, output_features
)
|
1647663
|
import argparse
import os
from tensorflow import keras
import numpy as np
from utils import generator, model, utils
parser = argparse.ArgumentParser()
parser.add_argument('--num_epoch', default=56, type=int, help='训练的轮数')
parser.add_argument('--lr', default=0.001, type=float, help='初始学习率的大小')
parser.add_argument('--batch_size', default=16, type=int, help='训练的批量大小')
parser.add_argument('--num_classes', default=3242, type=int, help='分类的类别数量')
parser.add_argument('--train_list', default='dataset/train_list.txt', type=str, help='训练数据的数据列表路径')
parser.add_argument('--val_list', default='dataset/test_list.txt', type=str, help='测试数据的数据列表路径')
parser.add_argument('--resume', default=None, type=str, help='预训练模型的路径,当为None则不使用预训练模型')
parser.add_argument('--model_path', default='models', type=str, help='模型保存的路径')
args = parser.parse_args()
utils.print_arguments(args)
def main(args):
# Datasets
trnlist, trnlb = utils.get_data_list(path=args.train_list)
vallist, vallb = utils.get_data_list(path=args.val_list)
# Generators
trn_gen = generator.DataGenerator(list_IDs=trnlist.flatten(),
labels=trnlb.flatten(),
n_classes=args.num_classes,
batch_size=args.batch_size)
val_gen = generator.DataGenerator(list_IDs=vallist.flatten(),
labels=vallb.flatten(),
n_classes=args.num_classes,
batch_size=args.batch_size)
image_len = len(trnlist.flatten())
# 获取模型
network = model.vggvox_resnet2d_icassp(num_classes=args.num_classes, mode='train')
# 加载预训练模型
initial_epoch = 0
if args.resume:
network.load_weights(os.path.join(args.resume))
initial_epoch = int(os.path.basename(args.resume)[:-3].split('-')[1])
print('==> successfully loading model {}.'.format(args.resume))
print(network.summary())
print('==> training {} audios, classes: {} '.format(image_len, args.num_classes))
if not os.path.exists(args.model_path):
os.makedirs(args.model_path)
normal_lr = keras.callbacks.LearningRateScheduler(step_decay)
callbacks = [keras.callbacks.ModelCheckpoint(os.path.join(args.model_path, 'resnet34-{epoch:02d}.h5'),
monitor='loss',
mode='min',
save_best_only=True), normal_lr]
network.fit_generator(generator=trn_gen,
steps_per_epoch=int(image_len // args.batch_size),
epochs=args.num_epoch,
initial_epoch=initial_epoch,
max_queue_size=10,
callbacks=callbacks,
use_multiprocessing=True,
validation_data=val_gen,
workers=6,
verbose=1)
# 学习率衰减
def step_decay(epoch):
half_epoch = args.num_epoch // 2
stage1, stage2, stage3 = int(half_epoch * 0.5), int(half_epoch * 0.8), half_epoch
stage4 = stage3 + stage1
stage5 = stage4 + (stage2 - stage1)
stage6 = args.num_epoch
milestone = [stage1, stage2, stage3, stage4, stage5, stage6]
gamma = [1.0, 0.1, 0.01, 1.0, 0.1, 0.01]
lr = 0.005
init_lr = args.lr
stage = len(milestone)
for s in range(stage):
if epoch < milestone[s]:
lr = init_lr * gamma[s]
break
print('Learning rate for epoch {} is {}.'.format(epoch + 1, lr))
return np.float(lr)
if __name__ == "__main__":
main(args)
|
1647675
|
import pytest
from ocdeployer.images import ImageImporter, import_images
@pytest.fixture
def mock_oc(mocker):
_mock_oc = mocker.patch("ocdeployer.images.oc")
mocker.patch("ocdeployer.images.get_json", return_value={})
yield _mock_oc
def _check_oc_calls(mocker, mock_oc):
assert mock_oc.call_count == 2
calls = [
mocker.call(
"import-image",
"image1:tag",
"--from=docker.url/image1:sometag",
"--confirm",
"--scheduled=True",
_reraise=True,
),
mocker.call(
"import-image",
"image2:tag",
"--from=docker.url/image2:sometag",
"--confirm",
"--scheduled=True",
_reraise=True,
),
]
mock_oc.assert_has_calls(calls)
def test_images_short_style_syntax(mocker, mock_oc):
config_content = {
"images": [
{"image1:tag": "docker.url/image1:sometag"},
{"image2:tag": "docker.url/image2:sometag"},
]
}
ImageImporter.imported_istags = []
import_images(config_content, [])
_check_oc_calls(mocker, mock_oc)
def test_images_long_style_syntax(mocker, mock_oc):
config_content = {
"images": [
{"istag": "image1:tag", "from": "docker.url/image1:sometag"},
{"istag": "image2:tag", "from": "docker.url/image2:sometag"},
]
}
ImageImporter.imported_istags = []
import_images(config_content, [])
_check_oc_calls(mocker, mock_oc)
def test_images_old_style_syntax(mocker, mock_oc):
config_content = {
"images": {
"image1:tag": "docker.url/image1:sometag",
"image2:tag": "docker.url/image2:sometag",
}
}
ImageImporter.imported_istags = []
import_images(config_content, [])
_check_oc_calls(mocker, mock_oc)
def test_images_mixed_style_syntax(mocker, mock_oc):
config_content = {
"images": [
{"image1:tag": "docker.url/image1:sometag"},
{"istag": "image2:tag", "from": "docker.url/image2:sometag"},
]
}
ImageImporter.imported_istags = []
import_images(config_content, [])
_check_oc_calls(mocker, mock_oc)
def test_images_conditional_images(mocker, mock_oc):
config_content = {
"images": [
{"istag": "image1:tag", "from": "docker.url/image1:sometag", "envs": ["qa", "prod"]},
{"istag": "image2:tag", "from": "docker.url/image2:sometag"},
]
}
ImageImporter.imported_istags = []
import_images(config_content, ["prod"])
_check_oc_calls(mocker, mock_oc)
def test_images_conditional_ignore_image(mocker, mock_oc):
config_content = {
"images": [
{"istag": "image1:tag", "from": "docker.url/image1:sometag", "envs": ["qa", "prod"]},
{"istag": "image2:tag", "from": "docker.url/image2:sometag"},
]
}
ImageImporter.imported_istags = []
import_images(config_content, ["foo"])
assert mock_oc.call_count == 1
calls = [
mocker.call(
"import-image",
"image2:tag",
"--from=docker.url/image2:sometag",
"--confirm",
"--scheduled=True",
_reraise=True,
)
]
mock_oc.assert_has_calls(calls)
|
1647683
|
import stripe
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
def get_stripe_settings():
from bluebottle.funding_stripe.models import StripePaymentProvider
provider = StripePaymentProvider.objects.first()
if not provider:
raise ImproperlyConfigured('Stripe not enabled for this tenant')
return provider.public_settings
stripe.api_key = settings.STRIPE['api_key']
stripe.api_version = '2019-09-09'
stripe.webhook_secret_sources = settings.STRIPE['webhook_secret_sources']
stripe.webhook_secret_intents = settings.STRIPE['webhook_secret_intents']
stripe.webhook_secret_connect = settings.STRIPE['webhook_secret_connect']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.