content
stringlengths
0
1.05M
origin
stringclasses
2 values
type
stringclasses
2 values
from ..grammar import Grammar from typing import Deque from ..tokenizer import Token class FinalToken: def __init__(self): self.name = '$' self.lexeme = '$' class Parser: def __init__(self, grammar: Grammar, action, go_to): self.grammar = grammar self.action = action self.go_to = go_to self.final = FinalToken() def parse(self, secuence: Deque[Token]): secuence.append(self.final) tokens_stack = [] states_stack = [0] nodes = [] while len(secuence) > 0: token = secuence[0] state_action = self.action[states_stack[len(states_stack)-1]] if token.name not in state_action: raise Exception( f'Unexpected token {token.name} with value {token.lexeme}') do = state_action[token.name] if do[0] == 'OK': return nodes[0] if do[0] == 'S': states_stack.append(do[1]) tokens_stack.append(token.lexeme) secuence.popleft() else: prod = self.grammar.P[do[1]] if prod.func_ast is not None: prod.func_ast(tokens_stack, nodes) out = len(prod) while out != 0: tokens_stack.pop() states_stack.pop() out -= 1 state_go_to = self.go_to[states_stack[len(states_stack)-1]] if prod.head.name not in state_go_to: raise Exception( f"Invalid sequence of tokens {prod.head.name}") tokens_stack.append(prod.head.name) states_stack.append(state_go_to[prod.head.name])
nilq/baby-python
python
def processChange(job): service = job.service args = job.model.args if args.pop('changeCategory') != 'dataschema': return if 'url' in args: service.model.data.url = args['url'] if 'eventtypes' in args: service.model.data.eventtypes = args['eventtypes'] service.saveAll()
nilq/baby-python
python
"""Fire animation, by Uck!""" import dcfurs import random # Colors, from top to bottom of the fire. colors = [0, 0x1f0f0f, 0x3f0000, 0xff0000, 0xff7f00, 0xffff00, 0x1f007f, 0x0000ff, 0xffffff] # Bitmask values copied from emote.boop(). boop_mask = [ 0x0e48e, 0x12b52, 0x12b52, 0x0eb4e, 0x02492, 0x02012, 0x0200e ] class fire(object): """A simple fire animation, inspired by the classic demo fire effect.""" def __init__(self): # Allocate our internal buffer. Values in this buffer range from 0 # to len(colors) - 1. As the fire values move upward on the screen, # these values fall toward 0 (which is black, in the colors array). # # There's an extra row at the bottom that's filled with random values. # That row isn't displayed on the LEDs. self.buffer = [[0] * dcfurs.ncols for y in range(dcfurs.nrows + 1)] self.interval = 25 self.boop_remaining = 0 def draw(self): self.update_fire() self.update_boop() if self.boop_remaining: # Render at 1/4 brightness unless it's the Boop text. for y, row_mask in enumerate(boop_mask): for x in range(dcfurs.ncols): color = colors[self.buffer[y][x]] if (1 << x) & row_mask == 0: # Non-boop pixel. color = (color >> 2) & 0x3f3f3f dcfurs.set_pix_rgb(x, y, color) else: for y in range(dcfurs.nrows): for x in range(dcfurs.ncols): dcfurs.set_pix_rgb(x, y, colors[self.buffer[y][x]]) def update_fire(self): """Update our internal fire buffer, moving the flames upward.""" # Fill the bottom (invisible) row with random values. for x in range(dcfurs.ncols): self.buffer[dcfurs.nrows][x] = random.randint(0, len(colors) - 1) # Propagate the fire colors upward, averaging from the pixels below # and decreasing the value toward 0. for y in range(dcfurs.nrows): for x in range(1, dcfurs.ncols - 1): value = (self.buffer[y + 1][x - 1] + self.buffer[y + 1][x] + self.buffer[y + 1][x + 1]) // 3 if random.randint(0, 2) == 0: value -= 1 self.buffer[y][x] = min(len(colors) - 1, max(0, value)) def boop(self): """Nose Boop start, reset our internal Boop timer.""" self.boop_remaining = 500 / self.interval def update_boop(self): """Check if we need to add Boop to the flames.""" if self.boop_remaining: self.add_boop() self.boop_remaining -= 1 def add_boop(self): """Add Boop to the fire, so it interacts with the flames.""" for y, row_mask in enumerate(boop_mask): for x in range(dcfurs.ncols): if (1 << x) & row_mask: self.buffer[y][x] = len(colors) - 1
nilq/baby-python
python
from agents.common import PLAYER1, PLAYER2, initialize_game_state, apply_player_action, \ evaluate_rows, is_player_blocking_opponent, is_player_winning def test_evaluate_rows_True_Player1_is_player_blocking_opponent(): game = initialize_game_state() num_rows = game.shape[0] num_cols = game.shape[1] for row in range(0, num_rows): for col in range(0, num_cols): if col == 0 and row > 0: game[row - 1][num_cols - 1] = PLAYER2 apply_player_action(game, 0, PLAYER2) elif col < 3: apply_player_action(game, col, PLAYER2) else: game[row][col - 1] = PLAYER2 apply_player_action(game, col, PLAYER1) if col > 2: assert evaluate_rows(game, PLAYER1, is_player_blocking_opponent) == True assert evaluate_rows(game, PLAYER2, is_player_blocking_opponent) == False def test_evaluate_rows_True_Player2_is_player_blocking_opponent(): game = initialize_game_state() num_rows = game.shape[0] num_cols = game.shape[1] for row in range(0, num_rows): for col in range(0, num_cols): if col == 0 and row > 0: game[row - 1][num_cols - 1] = PLAYER1 apply_player_action(game, 0, PLAYER1) elif col < 3: apply_player_action(game, col, PLAYER1) else: game[row][col - 1] = PLAYER1 apply_player_action(game, col, PLAYER2) if col > 2: assert evaluate_rows(game, PLAYER1, is_player_blocking_opponent) == False assert evaluate_rows(game, PLAYER2, is_player_blocking_opponent) == True def test_evaluate_rows_False_is_player_blocking_opponent(): game = initialize_game_state() num_rows = game.shape[0] num_cols = game.shape[1] for row in range(0, num_rows): for col in range(0, num_cols): apply_player_action(game, col, PLAYER1) if col > 2: assert evaluate_rows(game, PLAYER1, is_player_blocking_opponent) == False assert evaluate_rows(game, PLAYER2, is_player_blocking_opponent) == False def test_evaluate_rows_True_Player1_is_player_winning(): game = initialize_game_state() num_rows = game.shape[0] num_cols = game.shape[1] for row in range(0, num_rows): for col in range(0, num_cols): if col == 0 and row > 0: game[row - 1][num_cols - 1] = PLAYER2 game[row - 1][num_cols - 2] = PLAYER2 game[row - 1][num_cols - 3] = PLAYER2 apply_player_action(game, 0, PLAYER1) elif col < 4: apply_player_action(game, col, PLAYER1) else: game[row][col - 4] = PLAYER2 apply_player_action(game, col, PLAYER1) if col > 2: assert evaluate_rows(game, PLAYER1, is_player_winning) == True assert evaluate_rows(game, PLAYER2, is_player_winning) == False def test_evaluate_rows_True_Player2_is_player_winning(): game = initialize_game_state() num_rows = game.shape[0] num_cols = game.shape[1] for row in range(0, num_rows): for col in range(0, num_cols): if col == 0 and row > 0: game[row - 1][num_cols - 1] = PLAYER1 game[row - 1][num_cols - 2] = PLAYER1 game[row - 1][num_cols - 3] = PLAYER1 apply_player_action(game, 0, PLAYER2) elif col < 4: apply_player_action(game, col, PLAYER2) else: game[row][col - 4] = PLAYER1 apply_player_action(game, col, PLAYER2) if col > 2: assert evaluate_rows(game, PLAYER1, is_player_winning) == False assert evaluate_rows(game, PLAYER2, is_player_winning) == True def test_evaluate_rows_False_is_player_winning(): game = initialize_game_state() num_rows = game.shape[0] num_cols = game.shape[1] for row in range(0, num_rows): for col in range(0, num_cols): if col % 2 == 0: apply_player_action(game, col, PLAYER2) else: apply_player_action(game, col, PLAYER1) if col > 2: assert evaluate_rows(game, PLAYER1, is_player_winning) == False assert evaluate_rows(game, PLAYER2, is_player_winning) == False
nilq/baby-python
python
# project/server/tests/base.py from flask_testing import TestCase from price_picker import db, create_app from price_picker.common.create_sample_data import create_sample_data app = create_app() class BaseTestCase(TestCase): def create_app(self): app.config.from_object("config.TestingConfig") return app def setUp(self): db.create_all() create_sample_data() def tearDown(self): db.session.remove() db.drop_all()
nilq/baby-python
python
from resotolib.baseresources import BaseResource import resotolib.logger import socket import multiprocessing import resotolib.proc from concurrent import futures from resotolib.baseplugin import BaseCollectorPlugin from argparse import Namespace from resotolib.args import ArgumentParser from resotolib.config import Config, RunningConfig from .resources import OnpremLocation, OnpremRegion, OnpremNetwork from .ssh import instance_from_ssh from .config import OnpremConfig from paramiko import ssh_exception from typing import Dict log = resotolib.logger.getLogger("resoto." + __name__) class OnpremCollectorPlugin(BaseCollectorPlugin): cloud = "onprem" def collect(self) -> None: log.debug("plugin: collecting on-prem resources") if len(Config.onprem.server) == 0: log.debug("No On-Prem servers specified") return default_location = OnpremLocation(Config.onprem.location) self.graph.add_resource(self.graph.root, default_location) default_region = OnpremRegion(Config.onprem.region) self.graph.add_resource(default_location, default_region) servers = [] for server in Config.onprem.server: location = region = network = None srv = {} if "%" in server: server_location, server = server.split("%", 1) location = self.graph.search_first_all( {"id": server_location, "kind": "onprem_location"} ) if location is None: location = OnpremLocation(server_location, {}) self.graph.add_resource(self.graph.root, location) srv.update({"location": location}) log.debug(f"Location for {server} is {location.rtdname}") if "%" in server: server_region, server = server.split("%", 1) region = self.graph.search_first_all( {"id": server_region, "kind": "onprem_region"} ) if region is None: region = OnpremRegion(server_region, {}) self.graph.add_resource(location, region) srv.update({"region": region}) log.debug(f"Region for {server} is {region.rtdname}") if "%" in server: server_network, server = server.split("%", 1) network = self.graph.search_first_all( {"id": server_network, "kind": "onprem_network"} ) if network is None: network = OnpremNetwork(server_network, {}) self.graph.add_resource(region, network) srv.update({"network": network}) log.debug(f"Network for {server} is {network.rtdname}") srv.update({"hostname": server}) servers.append(srv) max_workers = ( len(servers) if len(servers) < Config.onprem.pool_size else Config.onprem.pool_size ) pool_args = {"max_workers": max_workers} if Config.onprem.fork_process: pool_args["mp_context"] = multiprocessing.get_context("spawn") pool_args["initializer"] = resotolib.proc.initializer pool_executor = futures.ProcessPoolExecutor collect_args = { "args": ArgumentParser.args, "running_config": Config.running_config, } else: pool_executor = futures.ThreadPoolExecutor collect_args = {} with pool_executor(**pool_args) as executor: wait_for = [ executor.submit( collect_server, srv, **collect_args, ) for srv in servers ] for future in futures.as_completed(wait_for): (src, s) = future.result() if src is None: src = default_region if not isinstance(src, BaseResource) or not isinstance(s, BaseResource): log.error(f"Skipping invalid server {type(s)}") continue self.graph.add_resource(src, s) @staticmethod def add_config(config: Config) -> None: config.add_config(OnpremConfig) def collect_server( srv: Dict, args: Namespace = None, running_config: RunningConfig = None ) -> Dict: if args is not None: ArgumentParser.args = args if running_config is not None: Config.running_config.apply(running_config) hostname: str = srv.get("hostname") username = None port = 22 if "@" in hostname: username, hostname = hostname.split("@", 1) if ":" in hostname: hostname, port = hostname.split(":", 1) collector_name = f"onprem_{hostname}" resotolib.proc.set_thread_name(collector_name) try: s = instance_from_ssh( hostname, username=username, port=port, key_filename=Config.onprem.ssh_key, passphrase=Config.onprem.ssh_key_pass, ) src = srv.get("network", srv.get("region", srv.get("location", None))) except (socket.timeout, ssh_exception.PasswordRequiredException): log.exception(f"Failed to collect {hostname}") else: log.debug(f"onprem: collected {s.rtdname}") return (src, s)
nilq/baby-python
python
from enum import Enum __NAMESPACE__ = "http://www.opengis.net/gml" class KnotTypesType(Enum): """ This enumeration type specifies values for the knots’ type (see ISO 19107:2003, 6.4.25). """ UNIFORM = "uniform" QUASI_UNIFORM = "quasiUniform" PIECEWISE_BEZIER = "piecewiseBezier"
nilq/baby-python
python
#!/usr/bin/env python3 """Bumps the detect-secrets version, in both `detect_secrets/__init__.py` and `README.md`. Then commits. """ import argparse import pathlib import subprocess import sys PROJECT_ROOT = pathlib.Path(__file__).absolute().parent.parent INIT_FILE_PATH = PROJECT_ROOT.joinpath('detect_secrets/__init__.py') README_FILE_PATH = PROJECT_ROOT.joinpath('README.md') def _argparse_bump_type(value): VALID_BUMP_TYPES = ('major', 'minor', 'patch') if value in VALID_BUMP_TYPES: return value raise argparse.ArgumentTypeError( f"Argument {value} must be one 'major', 'minor', 'patch'.", ) def parse_args(argv): parser = argparse.ArgumentParser( description=__doc__, prog='bumpity', ) parser.add_argument( '--bump', help='the bump type, specified as one of {major, minor, patch}', metavar='{major,minor,patch}', type=_argparse_bump_type, ) return parser.parse_args(argv) def get_current_version(): with open(INIT_FILE_PATH) as init_file: first_line = init_file.read().splitlines()[0] # e.g. VERSION = '0.13.0' _, semver = first_line.replace(' ', '').split('=') return map( int, # e.g. '0.13.0' semver.strip("'").split('.'), ) def update_init_file(new_version): with open(INIT_FILE_PATH, 'w') as init_file: init_file.write(f"VERSION = '{new_version}'\n") def update_readme(old_version, new_version): with open(README_FILE_PATH, 'r') as readme: original_text = readme.read() with open(README_FILE_PATH, 'w') as readme: readme.write( original_text.replace(old_version, new_version), ) def stage_and_commit(new_version): # Stage files subprocess.check_output( ( 'git', 'add', INIT_FILE_PATH, README_FILE_PATH, ), ) # Check they are the only ones staged staged_files = subprocess.check_output( ( 'git', 'diff', '--staged', '--name-only', ), ).splitlines() if len(staged_files) != 2: raise RuntimeWarning('More files staged than __init__.py and README.md') # Make the commit subprocess.check_output( ( 'git', 'commit', '--message', f':fist: Bumping version to {new_version}', INIT_FILE_PATH, README_FILE_PATH, ), ) def main(argv=sys.argv[1:]): if not argv: argv.append('--help') args = parse_args(argv) major, minor, patch = get_current_version() old_version = f'{major}.{minor}.{patch}' if args.bump == 'major': major += 1 minor = 0 patch = 0 elif args.bump == 'minor': minor += 1 patch = 0 else: patch += 1 new_version = f'{major}.{minor}.{patch}' update_init_file(new_version) update_readme(old_version, new_version) stage_and_commit(new_version) print("Don't forget to update CHANGELOG.md too!") if __name__ == '__main__': sys.exit(main())
nilq/baby-python
python
import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.init as init from models.archs.dcn.deform_conv import ModulatedDeformConvPack as DCN_sep class PCD_Align(nn.Module): ''' Alignment module using Pyramid, Cascading and Deformable convolution with 3 pyramid levels. ''' def __init__(self, nf=64, groups=8): super(PCD_Align, self).__init__() # fea1 # L3: level 3, 1/4 spatial size self.L3_offset_conv1_1 = nn.Conv2d(nf * 2, nf, 3, 1, 1, bias=True) # concat for diff self.L3_offset_conv2_1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) self.L3_dcnpack_1 = DCN_sep(nf, nf, 3, stride=1, padding=1, dilation=1, deformable_groups=groups, extra_offset_mask=True) # L2: level 2, 1/2 spatial size self.L2_offset_conv1_1 = nn.Conv2d(nf * 2, nf, 3, 1, 1, bias=True) # concat for diff self.L2_offset_conv2_1 = nn.Conv2d(nf * 2, nf, 3, 1, 1, bias=True) # concat for offset self.L2_offset_conv3_1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) self.L2_dcnpack_1 = DCN_sep(nf, nf, 3, stride=1, padding=1, dilation=1, deformable_groups=groups, extra_offset_mask=True) self.L2_fea_conv_1 = nn.Conv2d(nf * 2, nf, 3, 1, 1, bias=True) # concat for fea # L1: level 1, original spatial size self.L1_offset_conv1_1 = nn.Conv2d(nf * 2, nf, 3, 1, 1, bias=True) # concat for diff self.L1_offset_conv2_1 = nn.Conv2d(nf * 2, nf, 3, 1, 1, bias=True) # concat for offset self.L1_offset_conv3_1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) self.L1_dcnpack_1 = DCN_sep(nf, nf, 3, stride=1, padding=1, dilation=1, deformable_groups=groups, extra_offset_mask=True) self.L1_fea_conv_1 = nn.Conv2d(nf * 2, nf, 3, 1, 1, bias=True) # concat for fea self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True) def forward(self, fea1, fea2): '''align other neighboring frames to the reference frame in the feature level fea1, fea2: [L1, L2, L3], each with [B,C,H,W] features estimate offset bidirectionally ''' y = [] # param. of fea1 # L3 L3_offset = torch.cat([fea1[2], fea2[2]], dim=1) L3_offset = self.lrelu(self.L3_offset_conv1_1(L3_offset)) L3_offset = self.lrelu(self.L3_offset_conv2_1(L3_offset)) L3_fea = self.lrelu(self.L3_dcnpack_1([fea1[2], L3_offset])) # L2 L2_offset = torch.cat([fea1[1], fea2[1]], dim=1) L2_offset = self.lrelu(self.L2_offset_conv1_1(L2_offset)) L3_offset = F.interpolate(L3_offset, scale_factor=2, mode='bilinear', align_corners=False) L2_offset = self.lrelu(self.L2_offset_conv2_1(torch.cat([L2_offset, L3_offset * 2], dim=1))) L2_offset = self.lrelu(self.L2_offset_conv3_1(L2_offset)) L2_fea = self.L2_dcnpack_1([fea1[1], L2_offset]) L3_fea = F.interpolate(L3_fea, scale_factor=2, mode='bilinear', align_corners=False) L2_fea = self.lrelu(self.L2_fea_conv_1(torch.cat([L2_fea, L3_fea], dim=1))) # L1 L1_offset = torch.cat([fea1[0], fea2[0]], dim=1) L1_offset = self.lrelu(self.L1_offset_conv1_1(L1_offset)) L2_offset = F.interpolate(L2_offset, scale_factor=2, mode='bilinear', align_corners=False) L1_offset = self.lrelu(self.L1_offset_conv2_1(torch.cat([L1_offset, L2_offset * 2], dim=1))) L1_offset = self.lrelu(self.L1_offset_conv3_1(L1_offset)) L1_fea = self.L1_dcnpack_1([fea1[0], L1_offset]) L2_fea = F.interpolate(L2_fea, scale_factor=2, mode='bilinear', align_corners=False) L1_fea = self.L1_fea_conv_1(torch.cat([L1_fea, L2_fea], dim=1)) y = L1_fea # y.append(L1_fea) return y class Easy_PCD(nn.Module): def __init__(self, nf=64, groups=8): super(Easy_PCD, self).__init__() self.fea_L2_conv1 = nn.Conv2d(nf, nf, 3, 2, 1, bias=True) self.fea_L2_conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) self.fea_L3_conv1 = nn.Conv2d(nf, nf, 3, 2, 1, bias=True) self.fea_L3_conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) self.pcd_align = PCD_Align(nf=nf, groups=groups) # self.fusion = nn.Conv2d(2 * nf, nf, 1, 1, bias=True) self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True) def forward(self, f1, f2): # input: extracted features # feature size: f1 = f2 = [B, N, C, H, W] # print(f1.size()) L1_fea = torch.stack([f1, f2], dim=1) B, N, C, H, W = L1_fea.size() L1_fea = L1_fea.view(-1, C, H, W) # L2 L2_fea = self.lrelu(self.fea_L2_conv1(L1_fea)) L2_fea = self.lrelu(self.fea_L2_conv2(L2_fea)) # L3 L3_fea = self.lrelu(self.fea_L3_conv1(L2_fea)) L3_fea = self.lrelu(self.fea_L3_conv2(L3_fea)) L1_fea = L1_fea.view(B, N, -1, H, W) L2_fea = L2_fea.view(B, N, -1, H // 2, W // 2) L3_fea = L3_fea.view(B, N, -1, H // 4, W // 4) fea1 = [L1_fea[:, 0, :, :, :].clone(), L2_fea[:, 0, :, :, :].clone(), L3_fea[:, 0, :, :, :].clone()] fea2 = [L1_fea[:, 1, :, :, :].clone(), L2_fea[:, 1, :, :, :].clone(), L3_fea[:, 1, :, :, :].clone()] aligned_fea = self.pcd_align(fea1, fea2) # fusion_fea = self.fusion(aligned_fea) # [B, N, C, H, W] return aligned_fea class Self_Easy_PCD(nn.Module): def __init__(self, nf=64, groups=8): super(Self_Easy_PCD, self).__init__() self.fea_L2_conv1 = nn.Conv2d(nf, nf, 3, 2, 1, bias=True) self.fea_L2_conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) self.fea_L3_conv1 = nn.Conv2d(nf, nf, 3, 2, 1, bias=True) self.fea_L3_conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) self.pcd_align = PCD_Align(nf=nf, groups=groups) # self.fusion = nn.Conv2d(2 * nf, nf, 1, 1, bias=True) self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True) def forward(self, f1): # input: extracted features # feature size: f1 = f2 = [B, N, C, H, W] # print(f1.size()) # L1_fea = torch.stack([f1, f2], dim=1) B, N, C, H, W = f1.size() L1_fea = f1.view(-1, C, H, W) # L2 L2_fea = self.lrelu(self.fea_L2_conv1(L1_fea)) L2_fea = self.lrelu(self.fea_L2_conv2(L2_fea)) # L3 L3_fea = self.lrelu(self.fea_L3_conv1(L2_fea)) L3_fea = self.lrelu(self.fea_L3_conv2(L3_fea)) L1_fea = L1_fea.view(B, N, -1, H, W) L2_fea = L2_fea.view(B, N, -1, H // 2, W // 2) L3_fea = L3_fea.view(B, N, -1, H // 4, W // 4) ## reference feature ref_fea_l = [ L1_fea[:, N//2,...].clone(), L2_fea[:, N//2,...].clone(), L3_fea[:, N//2,...].clone() ] aligned_fea = [] for i in range(N): nbr_fea_l = [ L1_fea[:, i,...].clone(), L2_fea[:, i,...].clone(), L3_fea[:, i,...].clone() ] a_fea = self.pcd_align(nbr_fea_l, ref_fea_l) aligned_fea.append(a_fea) aligned_fea = torch.stack(aligned_fea, dim=1) # [B, N, C, H, W] return aligned_fea
nilq/baby-python
python
import os os.chdir("../..") print(os.getcwd()) import sys sys.path.append('') from envs.aslaug_v1_cont import AslaugEnv env = None def setup(): global env, obs env = AslaugEnv(gui=True) os.chdir("baselines/mpc-acado") obs = env.reset() def get_obs(): global obs return obs.tolist() def step(inp): global env obs, r, d, _ = env.step(inp) return obs.tolist() def close(): env.close()
nilq/baby-python
python
import re import sqlite3 import util import db def main(): con = db.connect_db() tbl = "art_of_worldly_wisdom" db.purge_table(con, tbl) db.init_table(con, tbl) cur = con.cursor() sql = f"INSERT INTO {tbl} (_id, _body) VALUES (?, ?)" body_lines = [] is_last_line_page_break = False with open("aww.txt", "r") as aww: for line in aww: line = line.rstrip() # Skip over "[p. nnn]" lines if re.search(r'^\[p\..*]', line): is_last_line_page_break = True continue # Skip the line after the "[p. nnn]" line if is_last_line_page_break and line.strip() == "": is_last_line_page_break = False continue # Title: "iii Keep Matters for a Time in Suspense." if re.search(r'^[ivxlcdm]+\s+\w+', line): body = "\n".join(body_lines).strip() print(body) print("---") cur.execute(sql, [util.gen_id(), body]) body_lines = [] line = line.replace("[paragraph continues] ", "") body_lines.append(line) if len(body_lines) > 0: body = "\n".join(body_lines).strip() print(body) print("\n---\n") cur.execute(sql, [util.gen_id(), body]) con.commit() if __name__ == "__main__": main()
nilq/baby-python
python
# 5_pennyBoard.py # A program that assigns each square on a checkerboard to a set number of pennies getting exponentially bigger # Date: 9/22/2020 # Name: Ben Goldstone square = 1 numberOfPennies = 0.01 #Constants ONEPENNYTOGRAMS = 2.5 ONEPOUNDTOGRAMS = 453.6 ONEPOUNDOFCOPPERTODOLLARS = 3.15 #counters totalAmountOfMoney = 0.01 totalWeight = 2.5 print("Square Number of Pennies") print("------ -----------------") for number in range(1,65): print(f"{square:} {int(numberOfPennies*100):,}") #adds amount of pennies up totalAmountOfMoney += numberOfPennies #calculates total weight of pennies totalWeight += numberOfPennies * ONEPENNYTOGRAMS #adds one to move onto the next square square += 1 #doubles number of pennies numberOfPennies *= 2 #converts dollars to pennies and then converts # of pennies to weight in grams totalWeightInLbs = totalWeight*100/ONEPOUNDTOGRAMS print(f"Total amount of money on checkerboard ${totalAmountOfMoney:,.2f}") print(f"Total amount of weight in pennies {totalWeightInLbs:,.2f} lbs") print(f"Cost of copper to produce pennies ${totalWeightInLbs * ONEPOUNDOFCOPPERTODOLLARS:,.2f}")
nilq/baby-python
python
"""attendanceManagement URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.10/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import url,include from django.contrib import admin from attendanceManagement import views as attendance_view urlpatterns = [ url(r'^register/', attendance_view.register), url(r'^markattendance/', attendance_view.markAttendance), url(r'^checkIfAttendanceMarked/', attendance_view.checkIfAttendanceMarked), url(r'^classDetails/', attendance_view.getClassDetails), url(r'^index/', attendance_view.index), url(r'^admin/', admin.site.urls), url(r'^app/', include('app.urls')), url(r'^leaderElection/', include('leaderElection.urls')), ]
nilq/baby-python
python
"""Tests for the to_cnf transformation.""" import unittest from tt.errors import InvalidArgumentTypeError from tt.expressions import BooleanExpression from tt.transformations import to_cnf class TestExpressionToCnf(unittest.TestCase): def assert_to_cnf_transformation(self, original, expected): """Helper for asserting correct to_cnf transformation.""" bexpr = to_cnf(original) self.assertTrue(bexpr.is_cnf) self.assertEqual(expected, str(bexpr)) def test_invalid_expr_type(self): """Test passing an invalid type as the argument.""" with self.assertRaises(InvalidArgumentTypeError): to_cnf(None) def test_from_boolean_expression_object(self): """Test transformation when passing an expr object as the argument.""" self.assert_to_cnf_transformation( BooleanExpression('A or B'), 'A or B') def test_single_operand_expression(self): """Test expressions of single operands.""" self.assert_to_cnf_transformation('A', 'A') self.assert_to_cnf_transformation('0', '0') self.assert_to_cnf_transformation('1', '1') def test_only_unary_operand_expression(self): """Test expressions with only unary operators.""" self.assert_to_cnf_transformation('not A', 'not A') self.assert_to_cnf_transformation('~A', '~A') self.assert_to_cnf_transformation('~~A', 'A') self.assert_to_cnf_transformation('~~~A', '~A') self.assert_to_cnf_transformation('~~~~~~~~~~~~~~~~~~~~~~~~A', 'A') self.assert_to_cnf_transformation( '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~A', '~A') def test_simple_xor(self): """Test simple xor expression.""" self.assert_to_cnf_transformation( 'A xor B', '(not B or not A) and (A or B)') def test_negated_xor(self): """Test negated xor expression.""" self.assert_to_cnf_transformation( 'not (A xor B)', '(not A or B) and (A or not B)') def test_simple_xnor(self): """Test simple xnor expression.""" self.assert_to_cnf_transformation( 'A xnor B', '(B or not A) and (A or not B)') def test_negated_xnor(self): """Test negated xnor expression.""" self.assert_to_cnf_transformation( '~(A xnor B)', r'(~A \/ ~B) /\ (A \/ B)') def test_simple_impl(self): """Test simple implies expression.""" self.assert_to_cnf_transformation( 'A -> B', r'~A \/ B') def test_negated_impl(self): """Test negated implies expression.""" self.assert_to_cnf_transformation( '~(A -> B)', r'A /\ ~B') def test_simple_and(self): """Test simple and expression.""" self.assert_to_cnf_transformation( 'A and B', 'A and B') def test_negated_and(self): """Test negated and expression.""" self.assert_to_cnf_transformation( 'not (A and B)', 'not A or not B') def test_simple_nand(self): """Test simple nand expression.""" self.assert_to_cnf_transformation( 'A nand B', 'not A or not B') def test_negated_nand(self): """Test negated nand expression.""" self.assert_to_cnf_transformation( 'not (A nand B)', 'A and B') def test_simple_or(self): """Test simple or expression.""" self.assert_to_cnf_transformation( 'A or B', 'A or B') def test_negated_or(self): """Test negated or expression.""" self.assert_to_cnf_transformation( '~(A || B)', r'~A /\ ~B') def test_simple_nor(self): """Test simple nor expression.""" self.assert_to_cnf_transformation( 'A nor B', 'not A and not B') def test_negated_nor(self): """Test negated nor expression.""" self.assert_to_cnf_transformation( '~(A nor B)', r'A \/ B') def test_already_cnf_exprs(self): """Test expressions that are already in CNF.""" self.assert_to_cnf_transformation( '(A or B) and (C or D) and E', '(A or B) and (C or D) and E') self.assert_to_cnf_transformation( 'A or B or C or D or E', 'A or B or C or D or E') self.assert_to_cnf_transformation( 'A and 1 and B', 'A and 1 and B') self.assert_to_cnf_transformation( '(A or B or C or D or E) and (A or B) and 0 and (A or E)', '(A or B or C or D or E) and (A or B) and 0 and (A or E)') def test_from_dnf(self): """Test transforming expressions in DNF.""" self.assert_to_cnf_transformation( '(A and B and C) or (D and E) or (F and G and H)', '(A or D or F) and (A or E or F) and (A or D or G) and ' '(A or E or G) and (A or D or H) and (A or E or H) and ' '(B or D or F) and (C or D or F) and (B or E or F) and ' '(C or E or F) and (B or D or G) and (C or D or G) and ' '(B or E or G) and (C or E or G) and (B or D or H) and ' '(C or D or H) and (B or E or H) and (C or E or H)') def test_mix_of_non_primitive_operators(self): """Test expressions combining different non-primitive operators.""" self.assert_to_cnf_transformation( 'A xor (B -> C -> D) nand (E iff F)', '(not A or ~B or ~C or D or not E or not F) and ' '(A or B or not E or not F) and ' '(A or C or not E or not F) and ' '(A or not D or not E or not F) and ' '(not A or ~B or ~C or D or E or F) and ' '(A or B or E or F) and ' '(A or C or E or F) and ' '(A or not D or E or F)') self.assert_to_cnf_transformation( '(A nand B) -> (C nor D) -> (E iff F)', r'(A \/ C \/ D \/ F or not E) /\ (A \/ C \/ D \/ E or not F) /\ ' r'(B \/ C \/ D \/ F or not E) /\ (B \/ C \/ D \/ E or not F)') def test_mix_of_primitive_operators(self): """Test expressions with mixed primitive operators.""" self.assert_to_cnf_transformation( 'A and (B or C and D) and not (C or not D and not E)', 'A and (B or C) and (B or D) and not C and (D or E)') self.assert_to_cnf_transformation( '(A and B and C) or not (A and D) or (A and (B or C) or ' '(D and (E or F)))', '(C or not A or not D or B or E or F) and ' '(B or not A or not D or C or E or F)') def test_deeply_nested_mixed_operators(self): """Test expressions with deeply nested operators.""" self.assert_to_cnf_transformation( '(A nand (B impl (D or E or F))) iff ~~~(A nor B nor C)', '(A or not B) and (A or not C) and ' '(A or not B or D or E or F) and ' r'(A \/ not C or not B or D or E or F) and ' '(not A or B) and (not A or not D) and (not A or not E) and ' '(not A or not F) and (not A or B or C) and ' '(not A or not D or B or C) and (not A or not E or B or C) and ' '(not A or not F or B or C)') self.assert_to_cnf_transformation( '(A nand ((B or C) iff (D nor E) iff (F or G or H)) nand C) nor D', 'A and (not B or D or E or not F or not C) and ' '(not C or D or E or not F) and ' '(not B or D or E or not G or not C) and ' '(not C or D or E or not G) and ' '(not B or D or E or not H or not C) and ' '(not C or D or E or not H) and ' '(not B or not D or F or G or H or not C) and ' '(not C or not D or F or G or H) and ' '(not B or not E or F or G or H or not C) and ' '(not C or not E or F or G or H) and not D') def test_deeply_nested_primitive_operators(self): """Test expressions with deeply nested primitive operators.""" self.assert_to_cnf_transformation( '(A or (B and (C or (D and (E or (F and (G or (H and I))))))))', '(A or B) and (A or C or D) and (A or C or E or F) and ' '(A or C or E or G or H) and (A or C or E or G or I)') self.assert_to_cnf_transformation( '(((((((((A or B) and C) or D) and E) or F) and G) or H) and I) ' 'or J)', '((((A or B or D or F or H or J) and (C or D or F or H or J)) and ' '(E or F or H or J)) and (G or H or J)) and (I or J)') self.assert_to_cnf_transformation( '((A and (B or not (C and D)) and E) or (F and G)) and ((A or B) ' 'and (C or (D and E)))', '(A or F) and (B or not C or not D or F) and (E or F) and ' '(A or G) and (B or not C or not D or G) and (E or G) and ' '(A or B) and (C or D) and (C or E)')
nilq/baby-python
python
initialized = True class TestFrozenUtf8_1: """\u00b6""" class TestFrozenUtf8_2: """\u03c0""" class TestFrozenUtf8_4: """\U0001f600""" def main(): print("Hello world!") if __name__ == '__main__': main()
nilq/baby-python
python
from __future__ import print_function, division, absolute_import from datetime import timedelta import errno import logging import socket import struct import sys from tornado import gen from tornado.iostream import IOStream, StreamClosedError from tornado.tcpclient import TCPClient from tornado.tcpserver import TCPServer from .. import config from ..compatibility import finalize from ..utils import ensure_bytes from .core import (connectors, listeners, Comm, Listener, CommClosedError, parse_host_port, unparse_host_port) from .utils import (to_frames, from_frames, get_tcp_server_address, ensure_concrete_host) logger = logging.getLogger(__name__) def get_total_physical_memory(): try: import psutil return psutil.virtual_memory().total / 2 except ImportError: return 2e9 MAX_BUFFER_SIZE = get_total_physical_memory() def set_tcp_timeout(stream): """ Set kernel-level TCP timeout on the stream. """ if stream.closed(): return timeout = int(config.get('tcp-timeout', 30)) sock = stream.socket # Default (unsettable) value on Windows # https://msdn.microsoft.com/en-us/library/windows/desktop/dd877220(v=vs.85).aspx nprobes = 10 assert timeout >= nprobes + 1, "Timeout too low" idle = max(2, timeout // 4) interval = max(1, (timeout - idle) // nprobes) idle = timeout - interval * nprobes assert idle > 0 try: if sys.platform.startswith("win"): logger.debug("Setting TCP keepalive: idle=%d, interval=%d", idle, interval) sock.ioctl(socket.SIO_KEEPALIVE_VALS, (1, idle * 1000, interval * 1000)) else: sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) try: TCP_KEEPIDLE = socket.TCP_KEEPIDLE TCP_KEEPINTVL = socket.TCP_KEEPINTVL TCP_KEEPCNT = socket.TCP_KEEPCNT except AttributeError: if sys.platform == "darwin": TCP_KEEPIDLE = 0x10 # (named "TCP_KEEPALIVE" in C) TCP_KEEPINTVL = 0x101 TCP_KEEPCNT = 0x102 else: TCP_KEEPIDLE = None if TCP_KEEPIDLE is not None: logger.debug("Setting TCP keepalive: nprobes=%d, idle=%d, interval=%d", nprobes, idle, interval) sock.setsockopt(socket.SOL_TCP, TCP_KEEPCNT, nprobes) sock.setsockopt(socket.SOL_TCP, TCP_KEEPIDLE, idle) sock.setsockopt(socket.SOL_TCP, TCP_KEEPINTVL, interval) if sys.platform.startswith("linux"): logger.debug("Setting TCP user timeout: %d ms", timeout * 1000) TCP_USER_TIMEOUT = 18 # since Linux 2.6.37 sock.setsockopt(socket.SOL_TCP, TCP_USER_TIMEOUT, timeout * 1000) except EnvironmentError as e: logger.warn("Could not set timeout on TCP stream: %s", e) def convert_stream_closed_error(exc): """ Re-raise StreamClosedError as CommClosedError. """ if exc.real_error is not None: # The stream was closed because of an underlying OS error exc = exc.real_error raise CommClosedError("%s: %s" % (exc.__class__.__name__, exc)) else: raise CommClosedError(str(exc)) class TCP(Comm): """ An established communication based on an underlying Tornado IOStream. """ def __init__(self, stream, peer_addr, deserialize=True): self._peer_addr = peer_addr self.stream = stream self.deserialize = deserialize self._finalizer = finalize(self, self._get_finalizer()) self._finalizer.atexit = False stream.set_nodelay(True) set_tcp_timeout(stream) def _get_finalizer(self): def finalize(stream=self.stream, r=repr(self)): if not stream.closed(): logger.warn("Closing dangling stream in %s" % (r,)) stream.close() return finalize def __repr__(self): return "<TCP %r>" % (self._peer_addr,) @property def peer_address(self): return self._peer_addr @gen.coroutine def read(self, deserialize=None): stream = self.stream if stream is None: raise CommClosedError if deserialize is None: deserialize = self.deserialize try: n_frames = yield stream.read_bytes(8) n_frames = struct.unpack('Q', n_frames)[0] lengths = yield stream.read_bytes(8 * n_frames) lengths = struct.unpack('Q' * n_frames, lengths) frames = [] for length in lengths: if length: frame = yield stream.read_bytes(length) else: frame = b'' frames.append(frame) except StreamClosedError as e: self.stream = None convert_stream_closed_error(e) msg = from_frames(frames, deserialize=deserialize) raise gen.Return(msg) @gen.coroutine def write(self, msg): stream = self.stream if stream is None: raise CommClosedError # IOStream.write() only takes bytes objects, not memoryviews frames = [ensure_bytes(f) for f in to_frames(msg)] try: lengths = ([struct.pack('Q', len(frames))] + [struct.pack('Q', len(frame)) for frame in frames]) stream.write(b''.join(lengths)) for frame in frames: # Can't wait for the write() Future as it may be lost # ("If write is called again before that Future has resolved, # the previous future will be orphaned and will never resolve") stream.write(frame) except StreamClosedError as e: stream = None convert_stream_closed_error(e) raise gen.Return(sum(map(len, frames))) @gen.coroutine def close(self): stream, self.stream = self.stream, None if stream is not None and not stream.closed(): try: # Flush the stream's write buffer by waiting for a last write. if stream.writing(): yield stream.write(b'') stream.socket.shutdown(socket.SHUT_RDWR) except EnvironmentError: pass finally: self._finalizer.detach() stream.close() def abort(self): stream, self.stream = self.stream, None if stream is not None and not stream.closed(): self._finalizer.detach() stream.close() def closed(self): return self.stream is None or self.stream.closed() class TCPConnector(object): @gen.coroutine def connect(self, address, deserialize=True): ip, port = parse_host_port(address) client = TCPClient() try: stream = yield client.connect(ip, port, max_buffer_size=MAX_BUFFER_SIZE) except StreamClosedError as e: # The socket connect() call failed convert_stream_closed_error(e) raise gen.Return(TCP(stream, 'tcp://' + address, deserialize)) class TCPListener(Listener): def __init__(self, address, comm_handler, deserialize=True, default_port=0): self.ip, self.port = parse_host_port(address, default_port) self.comm_handler = comm_handler self.deserialize = deserialize self.tcp_server = None self.bound_address = None def start(self): self.tcp_server = TCPServer(max_buffer_size=MAX_BUFFER_SIZE) self.tcp_server.handle_stream = self.handle_stream for i in range(5): try: self.tcp_server.listen(self.port, self.ip) except EnvironmentError as e: # EADDRINUSE can happen sporadically when trying to bind # to an ephemeral port if self.port != 0 or e.errno != errno.EADDRINUSE: raise exc = e else: break else: raise exc def stop(self): tcp_server, self.tcp_server = self.tcp_server, None if tcp_server is not None: tcp_server.stop() def _check_started(self): if self.tcp_server is None: raise ValueError("invalid operation on non-started TCPListener") def get_host_port(self): """ The listening address as a (host, port) tuple. """ self._check_started() if self.bound_address is None: self.bound_address = get_tcp_server_address(self.tcp_server) # IPv6 getsockname() can return more a 4-len tuple return self.bound_address[:2] @property def listen_address(self): """ The listening address as a string. """ return 'tcp://' + unparse_host_port(*self.get_host_port()) @property def contact_address(self): """ The contact address as a string. """ host, port = self.get_host_port() host = ensure_concrete_host(host) return 'tcp://' + unparse_host_port(host, port) def handle_stream(self, stream, address): address = 'tcp://' + unparse_host_port(*address[:2]) comm = TCP(stream, address, self.deserialize) self.comm_handler(comm) connectors['tcp'] = TCPConnector() listeners['tcp'] = TCPListener
nilq/baby-python
python
from output.models.nist_data.atomic.integer.schema_instance.nistschema_sv_iv_atomic_integer_max_inclusive_3_xsd.nistschema_sv_iv_atomic_integer_max_inclusive_3 import NistschemaSvIvAtomicIntegerMaxInclusive3 __all__ = [ "NistschemaSvIvAtomicIntegerMaxInclusive3", ]
nilq/baby-python
python
import pygame import copy from vector import Vec2, Vec4 from pixel import Pixel from colors import * from mymath import clamp, get_line_pixels class Canvas: def __init__(self, x, y, width, height, zoom=1.00): self.pos = Vec2(x, y) self.size = Vec2(width, height) self.zoom = zoom self.scaled_size = Vec2(self.size.x * self.zoom, self.size.y * self.zoom) self.origin = Vec2(self.size.x / 2, self.size.y / 2) self.scaled_origin = Vec2(self.origin.x * self.zoom, self.origin.y * self.zoom) self.surface = pygame.Surface((self.size.x, self.size.y)) self.current_surface = self.surface self.pixels = [] self.changed_pixels = [] self.buffer = [] self.buffers = [] self.pressed_count = 0 self.released_count = 2 # Start at 2, because if it's 1 means it's released and 0 means that is pressed self.start_pos = Vec2(-1, -1) self.end_pos = Vec2(-1, -1) self.selected_color = BLACK.copy() self.init_pixels() def init_pixels(self, surface=None): self.pixels.clear() if surface is None: # Alocate memory for the pixels for y in range(self.size.y): self.pixels.append([]) for x in range(self.size.x): self.pixels[y].append(None) self.clear(WHITE) else: # Load image (for example .png) self.surface = surface.copy() self.size = Vec2(self.surface.get_width(), self.surface.get_height()) self.scaled_size = Vec2(self.size.x * self.zoom, self.size.y * self.zoom) self.origin = Vec2(self.size.x / 2, self.size.y / 2) self.scaled_origin = Vec2(self.origin.x * self.zoom, self.origin.y * self.zoom) for y in range(self.size.x): self.pixels.append([]) for x in range(self.size.y): color = surface.get_at((x, y)) self.pixels[y].append(Pixel( Vec2(x, y), color.copy() )) self.update_pixels() def set_zoom(self, zoom): self.zoom = zoom self.scaled_origin.x = self.origin.x * self.zoom self.scaled_origin.y = self.origin.y * self.zoom self.scaled_size.x = self.size.x * self.zoom self.scaled_size.y = self.size.y * self.zoom def scale_zoom(self, percent): self.zoom *= percent self.scaled_origin.x = self.origin.x * self.zoom self.scaled_origin.y = self.origin.y * self.zoom self.scaled_size.x = self.size.x * self.zoom self.scaled_size.y = self.size.y * self.zoom def is_mouse_inside(self, x, y): if (x >= self.pos.x - self.scaled_origin.x and x <= self.pos.x - self.scaled_origin.x + self.scaled_size.x) and \ (y >= self.pos.y - self.scaled_origin.y and y <= self.pos.y - self.scaled_origin.y + self.scaled_size.y): return True return False def fill_pixel(self, pixel): self.pixels[pixel.pos.y][pixel.pos.x] = pixel.copy() self.changed_pixels.append(pixel.copy()) def fill_line(self, start_pos: Vec2, end_pos: Vec2, color: Vec4): line_pixels = get_line_pixels(start_pos, end_pos) for vec in line_pixels: self.fill_pixel(Pixel( Vec2(vec.x, vec.y), color.copy() )) def get_inside_mouse_pos(self, x, y): relative_pos = Vec2( x - (self.pos.x - self.scaled_origin.x), y - (self.pos.y - self.scaled_origin.y) ) converted_pos = Vec2( clamp(int(relative_pos.x / self.zoom), 0, self.size.x - 1), clamp(int(relative_pos.y / self.zoom), 0, self.size.y - 1) ) return converted_pos def clear(self, color: Vec4): for y in range(self.size.y): for x in range(self.size.x): self.pixels[y][x] = Pixel(Vec2(x, y), color.copy()) self.surface.fill(color.as_tuple()) self.changed_pixels.clear() def update_pixels(self): for y in range(self.size.y): for x in range(self.size.x): pixel = self.pixels[y][x] pygame.draw.rect(self.surface, pixel.as_tuple(), (x, y, 1, 1)) self.changed_pixels.clear() def update_changes(self): for pixel in self.changed_pixels: pygame.draw.rect(self.surface, pixel.color.as_tuple(), (pixel.pos.x, pixel.pos.y, 1, 1)) self.changed_pixels.clear() def undo(self): # TODO: # Get the correct color self.clear(WHITE) self.changed_pixels.clear() for buffer in self.buffers[:-1]: for sub_buffer in buffer: for pixel in sub_buffer: self.fill_pixel(pixel) if len(self.buffers) >= 1: self.buffers.pop(-1) self.update_changes() def update(self): mouse_buttons = pygame.mouse.get_pressed() mouse_pos = Vec2(*pygame.mouse.get_pos()) if mouse_buttons[0]: self.released_count = 0 if self.is_mouse_inside(mouse_pos.x, mouse_pos.y): self.released_count = 0 if self.pressed_count == 0: self.start_pos = self.get_inside_mouse_pos(mouse_pos.x, mouse_pos.y) self.end_pos = self.get_inside_mouse_pos(mouse_pos.x, mouse_pos.y) else: # Create something to now have duplicate pixels in the same buffer self.start_pos = self.end_pos.copy() self.end_pos = self.get_inside_mouse_pos(mouse_pos.x, mouse_pos.y) self.fill_line(self.start_pos, self.end_pos, self.selected_color) self.buffer.append(copy.deepcopy(self.changed_pixels)) self.update_changes() self.pressed_count += 1 else: self.pressed_count = 0 if self.released_count == 1: self.buffers.append(copy.deepcopy(self.buffer)) # Show that it's adding to buffer repeated pixels print(len(self.buffer)) self.buffer.clear() self.released_count += 1 def draw(self, win): # If zoom has not be changed no need to resize if self.scaled_size.x != self.size.x or self.scaled_size.y != self.size.y: self.current_surface = pygame.transform.scale(self.surface, (round(self.scaled_size.x), round(self.scaled_size.y))) win.blit(self.current_surface, (self.pos.x - self.scaled_origin.x, self.pos.y - self.scaled_origin.y))
nilq/baby-python
python
from __future__ import annotations from threading import Thread from typing import Callable, Optional from .interfaces import ITimeoutSendService from ..clock import IClock from ..service import IService, IServiceManager from ..send import ISendService from ..util.Atomic import Atomic from ..util.InterruptableSleep import InterruptableSleep class TimeoutSendService(IService, ITimeoutSendService): def __init__( self, clock: IClock, send_service: ISendService, service_manager: IServiceManager, timeout_seconds: float, message_callback: Callable[[], Optional[bytes]] = lambda: None, ) -> None: self.interruptable_sleep = InterruptableSleep(clock) self.timeout_seconds = timeout_seconds self.message_callback = Atomic(message_callback) self.send_count = 0 self.send_service = send_service self.should_run = True self.thread = Thread(target=self.run) service_manager.add_service(self) def get_send_count(self) -> int: return self.send_count def get_service_name(self) -> str: return __name__ def join_service(self, timeout_seconds: Optional[float] = None) -> bool: self.thread.join(timeout_seconds) return self.thread.is_alive() def run(self) -> None: should_send = True while self.should_run: if should_send: with self.message_callback as (message_callback, _): self._send(message_callback()) should_send = self.interruptable_sleep.sleep(self.timeout_seconds) def set_and_send_immediately(self, message_callback: Callable[[], Optional[bytes]]) -> Optional[bytes]: with self.message_callback as (_, set_message_callback): message = message_callback() self._send(message) set_message_callback(message_callback) self.interruptable_sleep.interrupt() return message def _send(self, message: Optional[bytes]) -> None: if message is not None: self.send_count += 1 self.send_service.send(message) def start_service(self) -> None: self.thread.start() def stop_service(self) -> None: self.should_run = False self.interruptable_sleep.interrupt()
nilq/baby-python
python
""" IOEcho device, receive GPIO, send them thought TCP to target """ __all__ = ['IOEcho'] __version__ = '0.1' from .deviceBase import DeviceBase from time import sleep from lib.common import PrintColor import requests import json from socket import * try: import RPi.GPIO as GPIO is_running_on_pi = True except RuntimeError: print("Starting without GPIO") is_running_on_pi = False pass class IOEcho(DeviceBase): pin_and_label_matrix = '' def __init__(self, name, pin_and_label_matrix, target_address='', target_port=9100): DeviceBase.__init__(self, name) DeviceBase.type = "IOEcho" if is_running_on_pi == True: print("Starting IOEcho device...") self.target_address = target_address self.target_port = target_port """ Set pin numbering mode """ GPIO.setmode(GPIO.BOARD) """ TODO : Add dynamic configuration, or stroe pin map in a file """ self.pin_and_label_matrix = [ {'pin': 3, 'label': 'S011', 'value': 1}, {'pin': 5, 'label': 'S012', 'value': 1}, {'pin': 7, 'label': 'S013', 'value': 1}, {'pin': 11, 'label': 'S021', 'value': 1}, {'pin': 13, 'label': 'S022', 'value': 1}, {'pin': 15, 'label': 'S023', 'value': 1}, {'pin': 19, 'label': 'S031', 'value': 1}, {'pin': 21, 'label': 'S032', 'value': 1}, {'pin': 23, 'label': 'S033', 'value': 1} ] for pin_and_label in self.pin_and_label_matrix: """ Should add a physical pull up """ GPIO.setup(pin_and_label['pin'], GPIO.IN) """ Set falling edge detection, callback and debounce time to 300 ms """ GPIO.add_event_detect(pin_and_label['pin'], GPIO.FALLING, callback=self._on_data_received, bouncetime=300) print("Pin " + str(pin_and_label['pin']) + " initialized as input.") self.pre_start_diagnose() #Overrided from DeviceBase def main_loop(self): """ Starts RFID reading loop """ try: print("Starting controller...") if is_running_on_pi == True: while self.must_stop == False : if self.is_zone_enabled == True: self.is_running = True """ Controller is enable, start reading """ #Prevent over-header sleep(1) else: """ Controller is disable, wait for a valid configuration """ break finally: print("Reading loop stopped") def pre_start_diagnose(self): for pin_and_label in self.pin_and_label_matrix: if pin_and_label['value'] != GPIO.input(pin_and_label['pin']): print(str(PrintColor.WARNING) + "[W] Pin " + str(pin_and_label['pin']) + " is not set to initialization value.") #Overrided from DeviceBase def get_status(self): for pin_and_label in self.pin_and_label_matrix: pin_and_label['value'] = GPIO.input(pin_and_label['pin']) return str(self.pin_and_label_matrix) def _on_data_received(self, gpio): if is_running_on_pi == True: try: """ Send GPIO signal to open the door """ for pin_and_label in self.pin_and_label_matrix: if pin_and_label['pin'] == gpio: self.echo_signal_to_target(pin_and_label['label']) break except RuntimeError: pass def echo_signal_to_target(self, signal): print("Sending " + str(signal) + " signal to " + str(self.target_address) + ":" + str(self.target_port)) client_socket = socket(AF_INET, SOCK_DGRAM) client_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) client_socket.sendto(bytes(str(signal).encode('utf-8')), (self.target_address, self.target_port)) #Overrided from DeviceBase def stop_loop(self): if is_running_on_pi == True: GPIO.cleanup() self.must_stop = True
nilq/baby-python
python
import os import sys import numpy as np import csv import matplotlib.pyplot as plt from src import readFiles as rf from os import listdir import re def getDirectoriesAtPath(path): return [name for name in os.listdir(path) if os.path.isdir(os.path.join(path, name))] # Function to read the execution time an application at a given path def readExecTimeKernels(data_path, bench, gpu): files = listdir('%s/%s/' % (data_path, bench)) files.sort() total_execution_all_kernels_aux = [] #cycles accross the files at the given path for file in files: #only interested in nvprof output files if file.startswith("output_nvprof_"): #not interested in other metrics than the execution time if not file.startswith("output_nvprof_metrics_"): total_time = [] time_per_kernel = {} num_calls = [] avg_time_call = [] max_time_call = [] with open('%s/%s/%s' % (data_path, bench, file)) as csvfile: spamreader = csv.reader(csvfile, delimiter=',') for row_id, row in enumerate(spamreader): if row_id > 4: # header of the file # only accounts kernels (in CUDA 10.0 only the kernels don't have the CUDA word in the output line) if not 'CUDA' in row[7]: total_time.append(float(row[2])) time_per_kernel[row[7]] = float(row[2]) num_calls.append(float(row[3])) avg_time_call.append(float(row[4])) max_time_call.append(float(row[6])) num_calls = np.asarray(num_calls, dtype=np.int32) total_time = np.asarray(total_time, dtype=np.float32) avg_time_call = np.asarray(avg_time_call, dtype=np.float32) max_time_call = np.asarray(max_time_call, dtype=np.float32) total_execution_all_kernels_aux.append(np.sum(total_time)) if len(total_execution_all_kernels_aux) == 0: print('Missing execution times for %s benchmark' % (bench)) sys.exit() total_execution_all_kernels = np.asarray(total_execution_all_kernels_aux) return np.mean(total_execution_all_kernels), time_per_kernel def checkEqual1(iterator): iterator = iter(iterator) try: first = next(iterator) except StopIteration: return True return all(first == rest for rest in iterator) # Function to read the performance metrics files of an application at a given path def readNvprofFile(data_path, bench, gpu, list_event_names, time_per_kernel): files = listdir('%s/%s/' % (data_path, bench)) files.sort() list_events = {} total_execution_all_kernels_aux = [] for file in files: if file.startswith("output_nvprof_metrics_"): with open('%s/%s/%s' % (data_path, bench, file)) as csvfile: spamreader = csv.reader(csvfile, delimiter=',') for row_id, row in enumerate(spamreader): if row_id > 5: if 'overflow' in row[0]: pass else: event = row[3] if event in list_event_names: kernel = row[1] if event not in list_events: num_kernels = 1 list_events[event] = {} else: num_kernels += 1 if 'utilization' in event: list_events[event][kernel] = int( row[7].split()[1][1:-1]) else: # print(row[7]) aux_value = float( (re.findall('\d+\.\d+', row[7]))[0]) if 'GB/s' in row[7]: aux_value = aux_value * 1000000 elif 'MB/s' in row[7]: aux_value = aux_value * 1000 elif 'KB/s' in row[7]: aux_value = aux_value elif 'B/s' in row[7]: aux_value = aux_value / 1000.0 list_events[event][kernel] = aux_value #confirms if all values were Measured if checkEqual1([list_events[key].keys() for key in list_events]) == False: print('Missing values (possible overflow) for benchmarks: %s' % bench) #currently the program ends if there are missing values for event in list_event_names: if event not in list_events.keys(): print(list_events) print('Missing values for event \'%s\' (possible overflow) for benchmarks: %s' % ( event, bench)) sys.exit() aggregated_list = {} for event_id, event in enumerate(list_event_names): aggregated_list[event] = 0.0 total_time = 0 for kernel_name in time_per_kernel.keys(): aggregated_list[event] += time_per_kernel[kernel_name] * \ list_events[event][kernel_name] total_time += time_per_kernel[kernel_name] aggregated_list[event] = aggregated_list[event] / total_time return aggregated_list # Function that cycles across all benchmarks at a given path # and reads their profiling data # # OUTPUTS: lists_data dictionary with keys: # lists_data["time"]: list of the execution times of the diferent kernels # lists_data["pow"]: list of the power consumptions of the diferent kernels # lists_data["energy"]: list of the energies of the diferent kernels def readListsData(benchs, clocks, benchs_data_path, gpu_name, idle_powers): mem_clocks = clocks['mem_clocks'] core_clocks = clocks['core_clocks'] num_mem_clocks = clocks['num_mem_clocks'] num_core_clocks = clocks['num_core_clocks'] max_num_core_clocks = np.max(num_core_clocks) num_benchs = len(benchs) list_pow = [None]*num_benchs list_time = [None]*num_benchs list_energy = [None]*num_benchs for bench_id, bench in enumerate(benchs): list_pow[bench_id] = np.zeros( (num_mem_clocks, max_num_core_clocks), dtype=np.float32) list_time[bench_id] = np.zeros( (num_mem_clocks, max_num_core_clocks), dtype=np.float32) list_energy[bench_id] = np.zeros( (num_mem_clocks, max_num_core_clocks), dtype=np.float32) for clock_mem_id, clock_mem in enumerate(mem_clocks): for clock_core_id, clock_core in enumerate(core_clocks[clock_mem_id]): avg_pow_bench = rf.readPowerBench(benchs_data_path, '%s/%d/%d' % ( bench, clock_mem, clock_core), idle_powers[clock_mem_id][clock_core_id]) time_bench = rf.readExecTime( benchs_data_path, '%s/%d/%d' % (bench, clock_mem, clock_core), gpu_name) list_pow[bench_id][clock_mem_id, clock_core_id] = avg_pow_bench list_time[bench_id][clock_mem_id, clock_core_id] = (time_bench/1000.0) list_energy[bench_id][clock_mem_id, clock_core_id] = avg_pow_bench*(time_bench/1000.0) lists_data = {'time': list_time, 'pow': list_pow, 'energy': list_energy} return lists_data # Creates and completes the output file aggregating the dataset of all considered kernels # Format: # line 1: benchmark_name_0 # line 2: clock_mem_0, clock_core_0, time_sample, power_sample, energy_sample # line 3: clock_mem_0, clock_core_1, time_sample, power_sample, energy_sample # ... def writeOutputFile(benchs_data_path, lists, benchs, clocks, gpu_name): mem_clocks = clocks['mem_clocks'] core_clocks = clocks['core_clocks'] list_time = lists['time'] list_pow = lists['pow'] list_energy = lists['energy'] out = open("%s/aggregated_dataset_%s.csv" % (benchs_data_path, gpu_name), "w") # output file for bench_id, bench in enumerate(benchs): out.write("%s\n" % (bench)) for clock_mem_id, clock_mem in enumerate(mem_clocks): for clock_core_id, clock_core in enumerate(core_clocks[clock_mem_id]): out.write("%d,%d,%f,%f,%f\n" % (clock_mem, clock_core, list_time[bench_id][clock_mem_id, clock_core_id], list_pow[bench_id][clock_mem_id, clock_core_id], list_energy[bench_id][clock_mem_id, clock_core_id])) out.close() # Function to print to output display the lists information def printListsData(benchs, clocks, lists, energy_mode): mem_clocks = clocks['mem_clocks'] core_clocks = clocks['core_clocks'] list_time = lists['time'] list_pow = lists['pow'] list_energy = lists['energy'] maxwidth = len(max(benchs, key=len)) for bench_id, bench in enumerate(benchs): if bench_id == 0: header_line = '{message: >{width}}'.format( message='Clock Mem|', width=maxwidth+21) for clock_core_id, clock_core in enumerate(core_clocks[0]): if clock_core_id > 0: header_line += '|' if energy_mode == True: header_line += '{clock: >{width}}'.format( clock='%d MHz' % clock_core, width=11) else: header_line += '{clock: >{width}}'.format( clock='%d MHz' % clock_core, width=13) print(header_line) bench_line = '{message: >{width}}: '.format( message=bench, width=maxwidth+2) for clock_mem_id, clock_mem in enumerate(mem_clocks): if clock_mem_id > 0: bench_line += '\n{message: >{width}}'.format( message="%s MHz| " % clock_mem, width=maxwidth+22) else: bench_line += '%4d MHz| ' % clock_mem for clock_core_id, clock_core in enumerate(core_clocks[clock_mem_id]): if clock_core_id > 0: bench_line += '| ' if energy_mode == True: bench_line += '{energy:8.1f} J'.format( energy=list_energy[bench_id][clock_mem_id, clock_core_id]) else: bench_line += '{time:6.1f},{power:5.1f}'.format( time=list_time[bench_id][clock_mem_id, clock_core_id], power=list_pow[bench_id][clock_mem_id, clock_core_id]) print(bench_line) # Function to create 3 output plots with the time, power and energy over different frequencies # (core and memory) across all the considered benchmarks # vertical axis represents the considered metric (time, power or energy depending on the plot) # horizontal axis displays the core frequency values, and different subplots correspond to # different memory frequencies. Each line in a subplot corresponds to a different benchmark. def plotValues(name, lists, clocks, benchmarks, normalized_t, normalized_p, normalized_e, type): mem_clocks = clocks['mem_clocks'] core_clocks = clocks['core_clocks'] list_data_time = lists['time'] list_data_pow = lists['pow'] list_data_energy = lists['energy'] fig_t = plt.figure(1) axes_t = fig_t.subplots(clocks['num_mem_clocks'], 1, sharex=True) fig_p = plt.figure(2) axes_p = fig_p.subplots(clocks['num_mem_clocks'], 1, sharex=True) fig_e = plt.figure(3) axes_e = fig_e.subplots(clocks['num_mem_clocks'], 1, sharex=True) count_bad = 0 #plot the time, power and energy lines for bench_id, bench in enumerate(benchmarks): good_bench = True for clock_mem_id, clock_mem in enumerate(mem_clocks): if clocks['num_mem_clocks'] > 1: axis_t = axes_t[clock_mem_id] axis_p = axes_p[clock_mem_id] axis_e = axes_e[clock_mem_id] else: axis_t = axes_t axis_p = axes_p axis_e = axes_e #this cycle if the gathered samples display a consistent behaviour, i.e. if the time and power curves of an application are monotonic when the core frequency decreases for clock_core_id, clock_core in enumerate(core_clocks[clock_mem_id]): if clock_core_id + 1 < clocks['num_core_clocks'][clock_mem_id] and list_data_time[bench_id][clock_mem_id, clock_core_id] < list_data_time[bench_id][clock_mem_id, clock_core_id+1]: good_bench = False count_bad += 1 break if clock_core_id + 1 < clocks['num_core_clocks'][clock_mem_id] and list_data_pow[bench_id][clock_mem_id, clock_core_id] > list_data_pow[bench_id][clock_mem_id, clock_core_id+1]: good_bench = False count_bad += 1 break # type determines the benchmarks to be plotted (type=0 plots all benchmarks; type=1 plots only good benchmarks; and type=2 plots only bad benchmarks) if (type == 0) or (type == 1 and good_bench == True) or (type == 2 and good_bench == False): if normalized_t == True: axis_t.plot(core_clocks[clock_mem_id], list_data_time[bench_id][clock_mem_id, :]/list_data_time[bench_id][-1, -1], linestyle='--', label=bench) else: axis_t.plot(core_clocks[clock_mem_id], list_data_time[bench_id] [clock_mem_id, :], linestyle='--', label=bench) if normalized_p == True: axis_p.plot(core_clocks[clock_mem_id], list_data_pow[bench_id][clock_mem_id, :]/list_data_pow[bench_id][-1, -1], linestyle='--', label=bench) else: axis_p.plot(core_clocks[clock_mem_id], list_data_pow[bench_id] [clock_mem_id, :], linestyle='--', label=bench) if normalized_e == True: axis_e.plot(core_clocks[clock_mem_id], list_data_energy[bench_id][clock_mem_id, :]/list_data_energy[bench_id][-1, -1], linestyle='--', label=bench) else: axis_e.plot(core_clocks[clock_mem_id], list_data_energy[bench_id] [clock_mem_id, :], linestyle='--', label=bench) for clock_mem_id, clock_mem in enumerate(mem_clocks): if clocks['num_mem_clocks'] > 1: ax2_aux = axes_e[clock_mem_id].twinx() else: ax2_aux = axes_e.twinx() if type == 2: name = 'bad_' + name elif type == 1: name = 'good_' + name else: name = 'all_' + name if clocks['num_mem_clocks'] > 1: axes_t[0].set_title('time_%s' % name) axes_p[0].set_title('power_%s' % name) axes_e[0].set_title('energy_%s' % name) else: axes_t.set_title('time_%s' % name) axes_p.set_title('power_%s' % name) axes_e.set_title('energy_%s' % name) print('bad benchmarks %s: %d' % (name, count_bad)) fig_t.savefig('time_%s.pdf' % (name)) fig_p.savefig('pow_%s.pdf' % (name)) fig_e.savefig('energy_%s.pdf' % (name)) plt.close("all") def main(): """Main function.""" import argparse import sys import sys from src import globalStuff as gls from src.globalStuff import printing, output_dir_train, list_event_names from src.readFiles import readIdlePowers, getBenchmarksAvailable use_test = False gls.init() parser = argparse.ArgumentParser() # path to the microbenchmarks dataset parser.add_argument('benchs_data_path', type=str) parser.add_argument('gpu', type=str) # gpu name # path to the standard benchmarks dataset parser.add_argument('--test_data_path', type=str, default='') # file with the microbenchmark names parser.add_argument('--benchs_file', type=str, default='all') # file with the standard benchmarks names parser.add_argument('--benchs_test_file', type=str, default='all') parser.add_argument('--tdp', type=int, default=250) # TDP parser.add_argument('--v', action='store_const', const=True, default=False) # verbose mode # calculates energy values from time and power samples parser.add_argument('--e', action='store_const', const=False, default=True) # plot/print only bad benchmarks (default is ALL benchmarks) parser.add_argument('--bad', action='store_const', const=True, default=False) # plot/print only good benchmarks (default is ALL benchmarks) parser.add_argument('--good', action='store_const', const=True, default=False) # create output file (file aggregated_dataset_<gpu_name>.csv) parser.add_argument('--o', action='store_const', const=True, default=False) # also reads performance counters samples parser.add_argument('--pc', action='store_const', const=True, default=False) args = vars(parser.parse_args()) print(args) benchs_data_path = args['benchs_data_path'] gpu_name = args['gpu'] test_data_path = args['test_data_path'] benchs_file = args['benchs_file'] benchs_test_file = args['benchs_test_file'] tdp = args['tdp'] verbose = args['v'] energy_mode = args['e'] bad_values_mode = args['bad'] good_values_mode = args['good'] create_output_file = args['o'] ubenchmarks = getBenchmarksAvailable( gls.benchmarks_info_folder, benchs_file, benchs_data_path) ubenchmarks.sort() num_ubenchmarks = len(ubenchmarks) print("\n=============================Reading Data=============================\n") print('Number of microbenchmarks: %d' % (num_ubenchmarks)) print('Benchs file: %s' % benchs_file) clocks = rf.getClocksGPU(gpu_name) idle_powers = readIdlePowers(clocks, gpu_name) lists_data_ubench = readListsData( ubenchmarks, clocks, benchs_data_path, gpu_name, idle_powers) if test_data_path != '': use_test = True test_benchmarks = getBenchmarksAvailable( gls.benchmarks_info_folder, benchs_test_file, test_data_path) test_benchmarks.sort() num_test_benchmarks = len(test_benchmarks) print('\nNumber of testing benchmarks: %d' % (num_test_benchmarks)) print('Test Benchs file: %s' % benchs_test_file) lists_data_testbench = readListsData( test_benchmarks, clocks, test_data_path, gpu_name, idle_powers) #print read values if verbose == True: #if core clocks for all memory levels are the same if clocks['core_clocks'].count(clocks['core_clocks'][0]) == len(clocks['core_clocks']): print( "\n=============================Microbenchmarks=============================\n") printListsData(ubenchmarks, clocks, lists_data_ubench, energy_mode) if test_data_path != '': print( "\n\n=============================Test Benchmarks=============================\n") printListsData(test_benchmarks, clocks, lists_data_testbench, energy_mode) else: print("Cannot print list of values") print("\n=============================The End=============================\n") if bad_values_mode == True: type = 2 elif good_values_mode: type = 1 else: type = 0 #choose if the output plots have the values normalized or not normalized_t = True normalized_p = False normalized_e = True plotValues('micro_%s' % gpu_name, lists_data_ubench, clocks, ubenchmarks, normalized_t, normalized_p, normalized_e, type) plotValues('test_%s' % gpu_name, lists_data_testbench, clocks, test_benchmarks, normalized_t, normalized_p, normalized_e, type) if create_output_file == True: writeOutputFile(benchs_data_path, lists_data_ubench, ubenchmarks, clocks, gpu_name) writeOutputFile(test_data_path, lists_data_testbench, test_benchmarks, clocks, gpu_name) if __name__ == "__main__": main()
nilq/baby-python
python
class NoSuchOmekaClassicItemException(Exception): pass
nilq/baby-python
python
''' 1) Add an item to your cart 2) Proceed to checkout 3) Quit $ 1 Enter item description: Red Bull Enter item quantity: 48 Price per unit: $2.00 $ 2 48 - Red Bull @ $2.00 ea Subtotal: $96.00 Tax (4.712%): $4.52 Total: $100.52 age = raw_input("How old is you? ") height = raw_input("How tall is u? ") weight = raw_input("How much mass is u? ") print 'so youz %r old, %r tall, and %r heavy.'\ %(age, height, weight) ''' menu = ['Checkout', 'Hershey\'s Chocolate Bar', \ 'Red Bull Energy Drink', 'Monster Energy Drink'] prices = [' ', 1.00, 2.00, 2.00] cart = [] def Menu(): count = 0 print("Below is the menu, enter the number corresponding to\ the item you would like, 0 is to checkout") print("0. Checkout") print("1. Hershey\'s Chocolate Bar $1.00") print("2. Red Bull Energy Drink $2.00") print("3. Monster Energy Drink $2.00") item = raw_input("> ") try: if int(item) == 0: Checkout(); elif int(item) <= 3 and int(item) != 0: cart.append(item) count = count + 1 print("Please specify a quantity of the item you would like ") qty = raw_input("> ") cart.append(qty) count = count + 1 print(cart[count-1]) Menu() else: print "Please specify a number that is in our menu." except ValueError as e: print("Please enter an integer.") Menu() def Checkout(): total = 0.0 for i in range(0,len(cart)): if i == 0: continue # print(cart[i]) print("You have ordered %s %s" %(cart[i-1], menu[i])) total = prices[i] + total print("Your total is $%s" %(total)) if __name__ == '__main__': Menu()
nilq/baby-python
python
from django.template.defaulttags import register @register.filter def index(sequence, position): return sequence[position]
nilq/baby-python
python
# Noysim -- Noise simulation tools for Aimsun. # Copyright (c) 2010-2011 by Bert De Coensel, Ghent University & Griffith University. # # Run the viewer as a windows program import noysim.viewer app = noysim.viewer.wx.PySimpleApp() app.frame = noysim.viewer.ViewerFrame() app.frame.Show() app.MainLoop()
nilq/baby-python
python
__all__ = ('reduce',) from asyncio import create_task from ._create_channel import create_channel async def _reduce(out, fn, ch, init): """Reduce items from channel.""" acc = init async for x in ch: acc = fn(acc, x) await out.put(acc) out.close() def reduce(fn, ch, init=None, *, _create_channel=create_channel, _create_task=create_task): """Reduce items taken from channel. Returns a new channel which will receive the result, or init if channel closes without yielding an item. fn will receive two arguments: init and the first item taken from channel, then that result and the second item taken from channel, and so on until the channel closes. The final result will be put on the returned channel. """ out = _create_channel() _create_task(_reduce(out, fn, ch, init)) return out
nilq/baby-python
python
class WizardPlayer: def __init__(self, player_id, np_random): ''' Initilize a player. Args: player_id (int): The id of the player ''' self.np_random = np_random self.player_id = player_id self.hand = [] self.stack = [] # might need to be changed. self.tricks_predicted = None def get_player_id(self): ''' Return the id of the player ''' return self.player_id
nilq/baby-python
python
""" На региональном этапе Всероссийской олимпиады школьников по информатике в 2009 году предлагалась следующая задача. Всем известно, что со временем клавиатура изнашивается, и клавиши на ней начинают залипать. Конечно, некоторое время такую клавиатуру еще можно использовать, но для нажатий клавиш приходиться использовать большую силу. При изготовлении клавиатуры изначально для каждой клавиши задается количество нажатий, которое она должна выдерживать. Если знать эти величины для используемой клавиатуры, то для определенной последовательности нажатых клавиш можно определить, какие клавиши в процессе их использования сломаются, а какие — нет. Требуется написать программу, определяющую, какие клавиши сломаются в процессе заданного варианта эксплуатации клавиатуры. Формат ввода Первая строка входных данных содержит целое число n (1≤n≤1000) — количество клавиш на клавиатуре. Вторая строка содержит n целых чисел —с₁, с₂, … , сn, где сᵢ (1≤cᵢ≤100000) — количество нажатий,выдерживаемых i-ой клавишей. Третья строка содержит целое число k (1≤k≤100000) — общее количество нажатий клавиш, и последняя строка содержит k целых чисел pj (1≤pj≤n) — последовательность нажатых клавиш. Формат вывода Программа должна вывести n строк, содержащих информацию об исправности клавиш. Если i-я клавиша сломалась, то i-ая строка должна содержать слово YES, если же клавиша работоспособна — слово NO. """ n = int(input()) list1 = list(map(int, input().split())) k = int(input()) list2 = list(map(int, input().split())) scope = max(list2) + 1 c = [0] * scope for x in list2: c[x] += 1 for i in range(1, len(c)): print('YES') if list1[i-1] < c[i] else print('NO')
nilq/baby-python
python
import logging import docker from docker.models.containers import Container from factioncli.processing.cli import log from factioncli.processing.cli.printing import error_out client = docker.from_env() class container_status: name = "" status = "" ip_address = "" message = "" created = "" def get_container(container_name): log.debug("Searching for container named: {0}".format(container_name)) containers = client.containers.list() for container in containers: if container.attrs['Name'] == "/{0}".format(container_name): return container log.debug("Could not find container named: {0}".format(container_name)) return None def get_container_ip_address(container_name, network_name='faction_default'): log.debug("Getting IP for container named {0} on network {1}".format(container_name, network_name)) container = get_container(container_name) if container: return container.attrs["NetworkSettings"]["Networks"][network_name]['IPAddress'] else: return None def start_container(container): log.debug("Stopping container: {0}".format(container.attrs["Name"])) if isinstance(container, Container): if container.status == 'running': log.debug("Container {0} is not running. No need to stop it") else: container.start() else: error_out("{0} is not a container object".format(container)) def stop_container(container): log.debug("Stopping container: {0}".format(container.attrs["Name"])) if isinstance(container, Container): if container.status == 'running': container.stop() else: log.debug("Container {0} is not running. No need to stop it") else: error_out("{0} is not a container object".format(container)) def restart_container(container): log.debug("Stopping container: {0}".format(container.attrs["Name"])) if isinstance(container, Container): if container.status == 'running': container.restart() else: log.debug("Container {0} is not running. No need to stop it") else: error_out("{0} is not a container object".format(container)) def remove_container(container): log.debug("Stopping container: {0}".format(container.attrs["Name"])) if isinstance(container, Container): if container.status == 'running': container.stop() else: log.debug("Container {0} is not running. No need to stop it") else: error_out("{0} is not a container object".format(container)) def execute_container_command(container, command): log.debug("Executing {0} against container: {1}".format(command, container.attrs["Name"])) if isinstance(container, Container): if container.status == 'running': return container.exec_run(command) else: error_out("Container {0} is not running. Can not execute commands against it") error_out("{0} is not a container object".format(container)) def get_container_status(container_name, network_name='faction_default'): container = get_container(container_name) if container: status = container_status container_name = container.attrs["Name"] if container_name[0] == "/": container_name = container_name[1:] status.name = container_name status.status = container.status status.ip_address = container.attrs["NetworkSettings"]["Networks"][network_name]['IPAddress'] status.created = container.attrs["Created"] return status
nilq/baby-python
python
""" The ``zen.nx`` module provides functions for converting graph objects to and from the `NetworkX <http://networkx.lanl.gov/>`_ library. .. autofunction:: to_networkx(G) .. autofunction:: from_networkx(G) """ from graph import Graph from digraph import DiGraph import networkx __all__ = ['to_networkx','from_networkx'] def to_networkx(G): """ Convert a Zen graph object into a NetworkX graph object. In creating the object, the node object and node/edge data will be copied over (a shallow copy). The edge weight will be lost, as there is no separate edge weight attribute in NetworkX graphs. **Returns**: The return type depends on the input type. * :py:class:`networkx.Graph` if the input graph was a :py:class:`zen.Graph`. * :py:class:`networkx.DiGraph` if the input graph was a :py:class:`zen.DiGraph`. """ import networkx if type(G) == Graph: Gdest = networkx.Graph() # copy node objects and data for nobj,ndata in G.nodes_iter(data=True): if ndata is None: Gdest.add_node(nobj) else: Gdest.add_node(nobj,data=ndata) # copy edge objects and data for u,v,edata in G.edges_iter(data=True): if edata is None: Gdest.add_edge(u,v) else: Gdest.add_edge(u,v,data=edata) return Gdest elif type(G) == DiGraph: Gdest = networkx.DiGraph() # copy node objects and data for nobj,ndata in G.nodes_iter(data=True): if ndata is None: Gdest.add_node(nobj) else: Gdest.add_node(nobj,data=ndata) # copy edge objects and data for u,v,edata in G.edges_iter(data=True): if edata is None: Gdest.add_edge(u,v) else: Gdest.add_edge(u,v,data=edata) return Gdest else: raise ZenException, 'Cannot convert objects of type %s to NetworkX graph objects' % str(type(G)) # def to_wrapped_networkx(G): # """ # This function accepts a Zen graph object and returns an object which has the networkx interface. # Note that this object will wrap the graph object passed in, so any changes made to the networkx # object will also be reflected in the underlying graph. The object returned maintains no state, # so changes can be made to the underlying Zen graph without affecting the validity of the # wrapper. # """ # import networkx # # if type(G) == DiGraph: # return DiGraphNXWrapper(G) # else: # raise Exception, 'Unable to convert graph object type %s' % str(type(G)) def from_networkx(G): """ Convert a NetworkX graph into a Zen graph object. In creating the object, the NetworkX node object and node/edge data will be copied over (a shallow copy). **Returns**: The return type depends on the input type. * :py:class:`zen.Graph` if the input graph was a :py:class:`networkx.Graph`. * :py:class:`zen.DiGraph` if the input graph was a :py:class:`networkx.DiGraph`. """ Gdest = None if type(G) == networkx.DiGraph: Gdest = DiGraph() elif type(G) == networkx.Graph: Gdest = Graph() else: raise Exception, 'Unable to convert graph object type %s' % str(type(G)) # add nodes for n,nd in G.nodes_iter(data=True): Gdest.add_node(n,nd) # add edges for x,y,ed in G.edges_iter(data=True): Gdest.add_edge(x,y,ed) return Gdest
nilq/baby-python
python
# encoding: utf-8 # from flask.sessions import SessionInterface as FlaskSessionInterface from mo_dots import Data, wrap, exists, is_data from mo_future import first from mo_json import json2value, value2json from mo_kwargs import override from mo_logs import Log from mo_math import bytes2base64URL, crypto from mo_threads import Till from mo_threads.threads import register_thread, Thread from mo_times import Date from mo_times.dates import parse, RFC1123, unix2Date from pyLibrary.sql import SQL_WHERE, sql_list, SQL_SET, SQL_UPDATE from pyLibrary.sql.sqlite import ( sql_create, sql_eq, quote_column, sql_query, sql_insert, Sqlite, sql_lt, ) DEBUG = False def generate_sid(): """ GENERATE A UNIQUE SESSION ID """ return bytes2base64URL(crypto.bytes(32)) SINGLTON = None class SqliteSessionInterface(FlaskSessionInterface): """STORE SESSION DATA IN SQLITE :param db: Sqlite database :param table: The table name you want to use. :param use_signer: Whether to sign the session id cookie or not. """ @override def __init__(self, flask_app, db, cookie, table="sessions"): global SINGLTON if SINGLTON: Log.error("Can only handle one session manager at a time") SINGLTON = self if is_data(db): self.db = Sqlite(db) else: self.db = db self.table = table self.cookie = cookie self.cookie.max_lifetime = parse(self.cookie.max_lifetime) self.cookie.inactive_lifetime = parse(self.cookie.inactive_lifetime) if not self.db.about(self.table): self.setup() Thread.run("session monitor", self.monitor) def create_session(self, session): session.session_id = generate_sid() session.permanent = True session.expires = (Date.now() + self.cookie.max_lifetime).unix def monitor(self, please_stop): while not please_stop: # Delete expired session try: with self.db.transaction() as t: t.execute( "DELETE FROM " + quote_column(self.table) + SQL_WHERE + sql_lt(expires=Date.now().unix) ) except Exception as e: Log.warning("problem with session expires", cause=e) (please_stop | Till(seconds=60)).wait() def setup(self): with self.db.transaction() as t: t.execute( sql_create( self.table, { "session_id": "TEXT PRIMARY KEY", "data": "TEXT", "last_used": "NUMBER", "expires": "NUMBER", }, ) ) def cookie_data(self, session): return { "session_id": session.session_id, "expires": session.expires, "inactive_lifetime": self.cookie.inactive_lifetime.seconds, } def update_session(self, session_id, props): """ UPDATE GIVEN SESSION WITH PROPERTIES :param session_id: :param props: :return: """ now = Date.now().unix session = self.get_session(session_id) for k, v in props.items(): session[k] = v session.last_used = now record = { "session_id": session_id, "data": value2json(session), "expires": session.expires, "last_used": session.last_used, } with self.db.transaction() as t: t.execute( SQL_UPDATE + quote_column(self.table) + SQL_SET + sql_list(sql_eq(**{k: v}) for k, v in record.items()) + SQL_WHERE + sql_eq(session_id=session_id) ) def get_session(self, session_id): now = Date.now().unix result = self.db.query( sql_query({"from": self.table, "where": {"eq": {"session_id": session_id}}}) ) saved_record = first(Data(zip(result.header, r)) for r in result.data) if not saved_record or saved_record.expires <= now: return Data() session = json2value(saved_record.data) DEBUG and Log.note("record from db {{session}}", session=saved_record) return session @register_thread def open_session(self, app, request): session_id = request.headers.get("Authorization") DEBUG and Log.note("got session_id {{session|quote}}", session=session_id) if not session_id: return Data() return self.get_session(session_id) @register_thread def save_session(self, app, session, response): if not session or not session.keys(): return if not session.session_id: session.session_id = generate_sid() session.permanent = True DEBUG and Log.note("save session {{session}}", session=session) now = Date.now().unix session_id = session.session_id result = self.db.query( sql_query({"from": self.table, "where": {"eq": {"session_id": session_id}}}) ) saved_record = first(Data(zip(result.header, r)) for r in result.data) expires = min(session.expires, now + self.cookie.inactive_lifetime.seconds) if saved_record: DEBUG and Log.note("found session {{session}}", session=saved_record) saved_record.data = value2json(session) saved_record.expires = expires saved_record.last_used = now with self.db.transaction() as t: t.execute( "UPDATE " + quote_column(self.table) + SQL_SET + sql_list(sql_eq(**{k: v}) for k, v in saved_record.items()) + SQL_WHERE + sql_eq(session_id=session_id) ) else: new_record = { "session_id": session_id, "data": value2json(session), "expires": expires, "last_used": now, } DEBUG and Log.note("new record for db {{session}}", session=new_record) with self.db.transaction() as t: t.execute(sql_insert(self.table, new_record)) def setup_flask_session(flask_app, session_config): """ SETUP FlASK SESSION MANAGEMENT :param flask_app: USED TO SET THE flask_app.config :param session_config: CONFIGURATION :return: THE SESSION MANAGER """ session_config = wrap(session_config) output = flask_app.session_interface = SqliteSessionInterface( flask_app, kwargs=session_config ) return output
nilq/baby-python
python
#!/usr/bin/env pypy from random import * from sys import * n, q = map(int, argv[1:]) print n L = range(0, n) shuffle(L) print ' '.join(map(str, L)) print ' '.join(map(str, (randint(1, i - 1) for i in xrange(2, n + 1)))) print q * 2 for i in xrange(q): print 1, randint(1, n), randint(1, n) print 2
nilq/baby-python
python
job = 'source $HOME/.bashrc ; source activate threshold-devel ; python experiment.py --method {} --thresh {} --max-iters {} --num-burn {} --num-samples {} --num-steps-hyper {} --partial-momentum {} --check-prob 0.01 {} {} 2>/dev/null' with open('joblist.txt', 'w') as f: for nb in [10000]: for ns in [100000]: for thresh in [1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7, 1e-8, 1e-9, 1e-10]: for num_steps_hyper in [6]: for partial_momentum in [0.0]: for nm in ['--newton-momentum', '--no-newton-momentum']: nps = ['--no-newton-position'] if nm == '--no-newton-momentum' else ['--newton-position', '--no-newton-position'] for np in nps: f.write(job.format('riemannian', thresh, 100, nb, ns, num_steps_hyper, partial_momentum, nm, np) + '\n') f.write(job.format('euclidean', 0.0, 0, nb, ns, 50, 0.0, '--no-newton-momentum', '--no-newton-position') + '\n')
nilq/baby-python
python
import base64 import cattle import os import pytest import random import time import inspect from datetime import datetime, timedelta import requests import fcntl import logging @pytest.fixture(scope='session', autouse=os.environ.get('DEBUG')) def log(): logging.basicConfig(level=logging.DEBUG) @pytest.fixture(scope='session') def api_url(): return 'http://localhost:1234/v3/schemas' @pytest.fixture def client(api_url): return cattle.from_env(url=api_url) def random_str(): return 'random-{0}-{1}'.format(random_num(), int(time.time())) def random_num(): return random.randint(0, 1000000)
nilq/baby-python
python
import sys def is_triangle(a,b,c): if a + b > c: if a + c > b: if b + c > a: print("True") else: print("False") else: print("False") else: print("False") def read_nonnegative(word): num = float(input(word)) if num<0 : print("Invalid value: input must be nonnegative") del num else : return num # a = read_nonnegative("Enter a nonnegative number: ") # print(a) a = float(input("Enter 1st line's length: ")) if a<0: print("Invalid value: input must be nonnegative") sys.exit() b = float(input("Enter 2nd line's length: ")) if b<0: print("Invalid value: input must be nonnegative") sys.exit() c = float(input("Enter 3rd line's length: ")) if c<0: print("Invalid value: input must be nonnegative") sys.exit() if a+b>c: if a+c>b: if b+c>a: print("It's a triangle.") else: print("It's not a triangle.") else: print("It's not a triangle.") else: print("It's not a triangle.")
nilq/baby-python
python
import os import easypost from dotenv import load_dotenv load_dotenv() easypost.api_key = os.getenv('EASYPOST_TEST_API_KEY') try: shipment = easypost.Shipment.retrieve('shp_123...') smartrates = shipment.get_smartrates() print(smartrates) except Exception as error: print(error)
nilq/baby-python
python
from django.template import Library, Node, TemplateSyntaxError from django.utils.encoding import force_unicode from convert.base import MediaFile, EmptyMediaFile, convert_solo from convert.conf import settings register = Library() class ConvertBaseNode(Node): def error(self, context): if settings.CONVERT_DEBUG: raise elif self.as_var: context[self.as_var] = EmptyMediaFile() return '' return EmptyMediaFile().tag def success(self, context, dest): if self.as_var: context[self.as_var] = dest return '' return dest.tag class ThumbnailNode(ConvertBaseNode): def __init__(self, input_file, options, as_var): self.input_file = input_file self.options = options self.as_var = as_var def render(self, context): try: input_file = force_unicode(self.input_file.resolve(context)) options = self.options.resolve(context) source = MediaFile(input_file) dest = source.thumbnail(options) except: return self.error(context) return self.success(context, dest) class ConvertNode(ConvertBaseNode): def __init__(self, input_file, options, ext, as_var): self.input_file = input_file self.options = options self.ext = ext self.as_var = as_var def render(self, context): try: input_file = force_unicode(self.input_file.resolve(context)) options = self.options.resolve(context) ext = self.ext and self.ext.resolve(context) if not input_file: dest = convert_solo(options, ext) else: source = MediaFile(input_file) dest = source.convert(options, ext) except: return self.error(context) return self.success(context, dest) @register.tag def thumbnail(parser, token): args = token.split_contents() invalid_syntax = TemplateSyntaxError('Invalid syntax.\nGot: %s\n' 'Expected: thumbnail "input-file" "options" [as var]' % " ".join(args)) as_var = None if len(args) not in (3, 5): raise invalid_syntax if args[-2] == 'as': as_var = args[-1] args = args[:-2] if len(args) != 3: raise invalid_syntax input_file, options = map(parser.compile_filter, args[1:]) return ThumbnailNode(input_file, options, as_var) @register.tag def convert(parser, token): args = token.split_contents() invalid_syntax = TemplateSyntaxError('Invalid syntax.\nGot: %s.\n' 'Expected: convert "input-file" "options" ["extension"] ' '[as var]' % " ".join(args)) as_var = None ext = None if len(args) < 3: raise invalid_syntax if args[-2] == 'as': as_var = args[-1] args = args[:-2] if len(args) == 4: ext = parser.compile_filter(args.pop(3)) if len(args) != 3: raise invalid_syntax input_file, options = map(parser.compile_filter, args[1:]) return ConvertNode(input_file, options, ext, as_var)
nilq/baby-python
python
import examples.PEs.alu_basic as alu_basic import examples.PEs.PE_lut as PE_lut from hwtypes import BitVector as BV from peak import family from metamapper import CoreIRContext def test_alu(): CoreIRContext(reset=True) width = 8 ALU_fc = alu_basic.gen_ALU(width) isa_fc = alu_basic.gen_isa(width) isa = isa_fc.Py inst = isa.Inst(op=isa.OP.Add, imm=family.PyFamily().BitVector[8](0)) alu = (ALU_fc.Py)() #check add assert BV[8](10) == alu(inst, a=BV[8](6), b=BV[8](4)) #check if it can compile to magma alu_m = ALU_fc.Magma def test_PE_lut(): CoreIRContext(reset=True) PE_fc = PE_lut.gen_PE(8) PE_fc.Py PE_fc.SMT PE_fc.Magma isa = PE_lut.gen_isa(8).Py inst = isa.Inst( alu_inst=isa.AluInst( op=isa.OP.Add, imm=isa.Data(5) ), lut=isa.LUT_t(3), ) res = PE_fc.Py()(inst, isa.Data(3), isa.Data(1), isa.Bit(1), isa.Bit(0), isa.Bit(1))
nilq/baby-python
python
""" This module contains the WPS inputs and outputs that are reused across multiple WPS processes. """ from dataclasses import fields from pywps import ( FORMATS, ComplexInput, ComplexOutput, Format, LiteralInput, LiteralOutput, ) from pywps.app.Common import Metadata from ravenpy.config.rvs import RVI from ravenpy.models.emulators import GR4JCN, HBVEC, HMETS, MOHYSE from raven import config # ---------------------------------------- # # ---------------- Inputs ---------------- # # ---------------------------------------- # ts = ComplexInput( "ts", "Input time series files", abstract="Files (text or netCDF) storing" "daily liquid precipitation (pr), " "solid precipitation (prsn), " "minimum temperature (tasmin), " "maximum temperature (tasmax), " "potential evapotranspiration (evspsbl) and " "observed streamflow (qobs [m3/s]).", min_occurs=1, max_occurs=100, supported_formats=[FORMATS.NETCDF, FORMATS.DODS, FORMATS.TEXT, FORMATS.SHP], ) # This can only be used with zipped file preserving the name and suffix of the individual files. conf = ComplexInput( "conf", "Zipped Raven/Ostrich configuration files", abstract="Model configuration files, including the primary input file (rvi), the parameter " "input file (rvp), the basin definition file (rvh), the time series input file " "(rvt), the initial conditions file (rvc). For Ostrich, include the Ostrich " "calibration config (txt) and templates (tpl).", min_occurs=1, max_occurs=1, supported_formats=[FORMATS.ZIP], ) rvi = ComplexInput( "rvi", "Primary input file", abstract="The primary input file stores the model simulation options and numerical options.", min_occurs=1, max_occurs=1, supported_formats=[FORMATS.TEXT], ) rvp = ComplexInput( "rvp", "Classed parameter input file", abstract="The classed parameter input file stores a database of soil, vegetation, river, " "aquifer, and land class pro-perties. Not all classes specified in the *.rvp file " "need to be included in the model.", min_occurs=1, max_occurs=1, supported_formats=[FORMATS.TEXT], ) rvh = ComplexInput( "rvh", "HRU / Basin definition file", abstract="The HRU/basin definition file describes the topology of the basin network and the " "class membership of all constituent HRUs.", min_occurs=1, max_occurs=1, supported_formats=[FORMATS.TEXT], ) rvt = ComplexInput( "rvt", "Time series input file", abstract="The time series input file is used to store time series of forcing functions (" "precipitation, temperature, etc.).", min_occurs=1, max_occurs=1, supported_formats=[FORMATS.TEXT], ) rvc = ComplexInput( "rvc", "Initial conditions input file", abstract="The initial conditions input file is used to store the initial conditions for the " "model. By default, the initial conditions for all model state variables is zero, " "and there are no required commands in this file (it could even be completely " "empty).", min_occurs=0, max_occurs=1, supported_formats=[FORMATS.TEXT], ) start_date = LiteralInput( "start_date", "Simulation start date (AAAA-MM-DD)", abstract="Start date of the simulation (AAAA-MM-DD). " "Defaults to the start of the forcing file. ", data_type="dateTime", default="0001-01-01 00:00:00", min_occurs=0, max_occurs=config.max_parallel_processes, ) end_date = LiteralInput( "end_date", "Simulation end date (AAAA-MM-DD)", abstract="End date of the simulation (AAAA-MM-DD). " "Defaults to the end of the forcing file.", data_type="dateTime", default="0001-01-01 00:00:00", min_occurs=0, max_occurs=config.max_parallel_processes, ) duration = LiteralInput( "duration", "Simulation duration (days)", abstract="Number of simulated days, defaults to the length of the input forcings.", data_type="nonNegativeInteger", default=0, min_occurs=0, max_occurs=config.max_parallel_processes, ) run_name = LiteralInput( "run_name", "Simulation name", abstract="The name given to the simulation, for example <watershed>_<experiment>", data_type="string", default="raven-gr4j-cemaneige-sim", min_occurs=0, max_occurs=config.max_parallel_processes, ) # Note that this is a newer, alternate interface to the area/latitude/longitude/elevation legacy one for HRUs hrus = ComplexInput( "hrus", "JSON-serialized HRUs", supported_formats=[ FORMATS.JSON, ], min_occurs=0, max_occurs=1, ) area = LiteralInput( "area", "Watershed area (km2)", abstract="Watershed area (km2)", data_type="float", default=0.0, min_occurs=0, max_occurs=config.max_parallel_processes, ) latitude = LiteralInput( "latitude", "Latitude", abstract="Watershed's centroid latitude", data_type="float", min_occurs=0, max_occurs=config.max_parallel_processes, ) longitude = LiteralInput( "longitude", "Longitude", abstract="Watershed's centroid longitude", data_type="float", min_occurs=0, max_occurs=config.max_parallel_processes, ) elevation = LiteralInput( "elevation", "Elevation (m)", abstract="Watershed's mean elevation (m)", data_type="float", min_occurs=0, max_occurs=config.max_parallel_processes, ) model_name = LiteralInput( "model_name", "Hydrological model identifier", abstract="Hydrological model identifier: {HMETS, GR4JCN, MOHYSE}", data_type="string", allowed_values=("HMETS", "GR4JCN", "MOHYSE"), min_occurs=1, max_occurs=config.max_parallel_processes, ) nc_index = LiteralInput( "nc_index", "NetCDF site coordinate index", abstract="The site index for a multi-basin netCDF file. This is ONLY necessary if the " "NetCDF variable is 2-dimensional (time, site).", data_type="integer", min_occurs=0, max_occurs=config.max_parallel_processes, ) suppress_output = LiteralInput( "suppress_output", "Do not write hydrograph to disk", abstract="If True (default), hydrographs are not written to disk and thus not" "returned.", data_type="boolean", default=True, ) rain_snow_fraction = LiteralInput( "rain_snow_fraction", "Rain snow partitioning", abstract="Algorithm used to partition rain and snow from the total precipitions", data_type="string", allowed_values=[e.value for e in RVI.RainSnowFractionOptions], min_occurs=0, ) evaporation = LiteralInput( "evaporation", "Evaporation scheme", abstract="Algorithm used to compute potential evapotranspiration (PET).", data_type="string", allowed_values=[e.value for e in RVI.EvaporationOptions], min_occurs=0, ) ow_evaporation = LiteralInput( "ow_evaporation", "Open-water evaporation scheme", abstract="Algorithm used to compute potential evapotranspiration (PET) over open " "water", data_type="string", allowed_values=[e.value for e in RVI.EvaporationOptions], min_occurs=0, ) nc_spec = LiteralInput( "nc_spec", "NetCDF input file specifications", abstract="Configuration of individual netCDF input files, such as `scale`, `offset`" "and `time_shift`. Should be passed as a dictionary keyed by variable, e.g. `tas` " "json-serialized.", data_type="string", min_occurs=0, max_occurs=20, ) forecast_model = LiteralInput( "forecast_model", "ECCC forecast model", abstract="The name of the forecast model run by Environment and Climate Change " "Canada.", data_type="string", allowed_values=("GEPS",), # 'REPS', 'GDPS', 'RDPS'), default="GEPS", min_occurs=1, ) hdate = LiteralInput( "hdate", "Hindcast start date (AAAA-MM-DD)", abstract="Start date of the hindcast (AAAA-MM-DD). " "Defaults to the start of the forcing file. ", data_type="dateTime", min_occurs=1, max_occurs=1, ) hmets = LiteralInput( "hmets", "Comma separated list of HMETS parameters", abstract="Parameters: " + ", ".join(f.name for f in fields(HMETS.Params)), data_type="string", min_occurs=0, ) gr4jcn = LiteralInput( "gr4jcn", "Comma separated list of GR4JCN parameters", abstract="Parameters: " + ", ".join(f.name for f in fields(GR4JCN.Params)), data_type="string", min_occurs=0, ) mohyse = LiteralInput( "mohyse", "Comma separated list of MOHYSE parameters", abstract="Parameters: " + ", ".join(f.name for f in fields(MOHYSE.Params)), data_type="string", min_occurs=0, ) hbvec = LiteralInput( "hbvec", "Comma separated list of HBV-EC parameters", abstract="Parameters: " + ", ".join(f.name for f in fields(HBVEC.Params)), data_type="string", min_occurs=0, ) # --- GIS Inputs --- # region_vector = ComplexInput( "region_vector", "Vector shape file of a region", abstract="An ESRI Shapefile, GML, JSON, GeoJSON, or single layer GeoPackage." " The ESRI Shapefile must be zipped and contain the .shp, .shx, and .dbf.", min_occurs=1, max_occurs=1, supported_formats=[ FORMATS.GEOJSON, FORMATS.GML, FORMATS.JSON, FORMATS.SHP, FORMATS.ZIP, ], ) shape = ComplexInput( "shape", "Vector shape of a region", abstract="An ESRI Shapefile, GML, JSON, GeoJSON, or single layer GeoPackage." " The ESRI Shapefile must be zipped and contain the .shp, .shx, and .dbf.", min_occurs=1, max_occurs=1, supported_formats=[ FORMATS.GEOJSON, FORMATS.GML, FORMATS.JSON, FORMATS.SHP, FORMATS.ZIP, ], ) land_use_raster = ComplexInput( "raster", "Gridded Land Use raster data set", abstract="The Land Use raster to be queried. Default is the CEC NALCMS 2010. Provided " "raster " "must use the UN FAO Land Cover Classification System (19 types).", metadata=[ Metadata( "Commission for Environmental Cooperation North American Land Change Monitoring " "System", "http://www.cec.org/tools-and-resources/map-files/land-cover-2010-landsat-30m", ), Metadata( "Latifovic, R., Homer, C., Ressl, R., Pouliot, D., Hossain, S.N., Colditz, R.R.," "Olthof, I., Giri, C., Victoria, A., (2012). North American land change " "monitoring system. In: Giri, C., (Ed), Remote Sensing of Land Use and Land " "Cover: Principles and Applications, CRC-Press, pp. 303-324" ), ], min_occurs=0, max_occurs=1, supported_formats=[FORMATS.GEOTIFF], ) dem_raster = ComplexInput( "raster", "Gridded raster data set", abstract="The DEM to be queried. Defaults to the EarthEnv-DEM90 product.", metadata=[ Metadata("EarthEnv-DEM90", "https://www.earthenv.org/DEM"), Metadata( "Robinson, Natalie, James Regetz, and Robert P. Guralnick (2014). " "EarthEnv-DEM90: A Nearly-Global, Void-Free, Multi-Scale Smoothed, 90m Digital " "Elevation Model from Fused ASTER and SRTM Data. ISPRS Journal of " "Photogrammetry and Remote Sensing 87: 57–67.", "https://doi.org/10.1016/j.isprsjprs.2013.11.002", ), ], min_occurs=0, max_occurs=1, supported_formats=[FORMATS.GEOTIFF], ) simple_categories = LiteralInput( "simple_categories", "Use simplified land classification categories for hydrological " "modeling purposes.", data_type="boolean", default="false", min_occurs=0, max_occurs=1, ) raster_band = LiteralInput( "band", "Raster band", data_type="integer", default=1, abstract="Band of raster examined to perform zonal statistics.", min_occurs=0, max_occurs=1, ) select_all_touching = LiteralInput( "select_all_touching", "Additionally select boundary pixels that are touched by shape.", data_type="boolean", default="false", min_occurs=0, max_occurs=1, ) # --- # rv_config = ComplexOutput( "rv_config", "Raven/Ostrich configuration files", abstract="Model configuration files, including the primary input file (rvi), the parameter " "input file (rvp), the basin definition file (rvh), the time series input file " "(rvt), the initial conditions file (rvc). For Ostrich, include the Ostrich " "calibration config (txt) and templates (tpl).", supported_formats=[FORMATS.ZIP], as_reference=True, ) hydrograph = ComplexOutput( "hydrograph", "Hydrograph time series (m3/s)", supported_formats=[ FORMATS.NETCDF, Format("application/zip", extension=".zip", encoding="base64"), ], abstract="A netCDF file containing the outflow hydrographs (in m3/s) for all subbasins " "specified as `gauged` in the .rvh file. It reports period-ending time-" "averaged flows for the preceding time step, as is consistent with most " "measured stream gauge data (again, the initial flow conditions at the " "start of the first time step are included). If observed hydrographs are " "specified, they will be output adjacent to the corresponding modelled " "hydrograph. ", as_reference=True, ) ensemble = ComplexOutput( "ensemble", "Multiple hydrograph time series (m3/s)", supported_formats=[FORMATS.NETCDF], abstract="A netCDF file containing the outflow hydrographs (in m3/s) for the basin " "on which the regionalization method has been applied. The number of outflow " "hydrographs is equal to the number of donors (ndonors) passed to the method. " "The average of these hydrographs (either using equal or Inverse-Distance Weights) " 'is the hydrograph generated in "hydrograph".', as_reference=True, ) forecast = ComplexOutput( "forecast", "Multiple forecasted hydrograph time series (m3/s)", supported_formats=[FORMATS.NETCDF], abstract="A netCDF file containing the outflow hydrographs (in m3/s) for the basin " "on which the forecasting method has been applied. The number of members " "(hydrographs) is equal to the number of input weather forecast members " "passed to the method. ", as_reference=True, ) storage = ComplexOutput( "storage", "Watershed storage time series (mm)", abstract="A netCDF file describing the total storage of water (in mm) in all water " "storage compartments for each time step of the simulation. Mass balance " "errors, cumulative input (precipitation), and output (channel losses) are " "also included. Note that the precipitation rates in this file are " "period-ending, i.e., this is the precipitation rate for the time step " "preceding the time stamp; all water storage variables represent " "instantaneous reports of the storage at the time stamp indicate.", supported_formats=[ FORMATS.NETCDF, Format("application/zip", extension=".zip", encoding="base64"), ], as_reference=True, ) solution = ComplexOutput( "solution", "solution.rvc file to restart another simulation with the conditions " "at the end of this simulation.", supported_formats=[ FORMATS.TEXT, Format("application/zip", extension=".zip", encoding="base64"), ], as_reference=True, ) diagnostics = ComplexOutput( "diagnostics", "Performance diagnostic values", abstract="Model diagnostic CSV file.", supported_formats=[ FORMATS.TEXT, Format("application/zip", extension=".zip", encoding="base64"), ], as_reference=True, ) features = ComplexOutput( "features", "DEM properties within the region defined by the vector provided.", abstract="Category pixel counts using either standard or simplified UNFAO categories", supported_formats=[FORMATS.GEOJSON], ) statistics = ComplexOutput( "statistics", "DEM properties by feature", abstract="Land-use type pixel counts using either standard or simplified UNFAO categories.", supported_formats=[FORMATS.JSON], ) calibparams = LiteralOutput( "calibparams", "Calibrated prameters", abstract="Comma separated list of parameters.", data_type="string", ) # --- OSTRICH --- # algorithm = LiteralInput( "algorithm", "OSTRICH Algorithm to use to calibrate model parameters", abstract="Optimization algorithm to implement for this calibration run", data_type="string", default="DDS", allowed_values=("DDS", "SCEUA"), min_occurs=0, ) max_iterations = LiteralInput( "max_iterations", "Maximum number of model evaluations for the calibration run (budget)", abstract="Maximum number of times OSTRICH can call the hydrological model during the " "model parameter calibrationn", data_type="integer", default=50, allowed_values=list(range(25001)), min_occurs=0, ) random_seed = LiteralInput( "random_seed", "Seed for random number generator", abstract="Set this value to obtain replicable results. Set to -1 to let it be random.", data_type="integer", default=-1, min_occurs=0, ) random_numbers = ComplexInput( "random_numbers", "File containing a list of random numbers (aka. OstRandomNumbers.txt)", abstract="These numbers will be used directly by Ostrich for its randomness (the first line must be the size of the list)", min_occurs=0, max_occurs=1, supported_formats=[FORMATS.TEXT], ) calibration = ComplexOutput( "calibration", "Ostrich calibration output", abstract="Output file from Ostrich calibration run.", supported_formats=[FORMATS.TEXT], as_reference=True, ) CalibrationResults = ComplexOutput( "CalibrationResults", "ObjectiveFunction and calibrated parameters computed by Ostrich", abstract="Objective Function value after calibration using user-selected " "function, as well as the calibrated parameter set", supported_formats=[FORMATS.TEXT], as_reference=True, ) calibrated_params = ComplexOutput( "calibrated_params", "Calibrated parameters", abstract="Model parameters estimated by minimizing the objective function.", supported_formats=[FORMATS.TEXT], as_reference=False, ) # TODO: Add configuration files to output # config = ComplexOutput('config', 'Configuration files', # abstract="Link to configuration files.", # supported_formats=)
nilq/baby-python
python
import os from deepartransit.utils import data_generator from deepartransit.utils.config import process_config config_path = os.path.join('tests', 'deepar_config_test.yml') def test_data(): config = process_config(config_path) data = data_generator.DataGenerator(config) batch_Z, batch_X = next(data.next_batch(config.batch_size)) assert batch_Z.shape[0] == config.batch_size == batch_X.shape[0] assert batch_Z.shape[1] == config.cond_length + config.pred_length == batch_X.shape[1] Z_test, X_test = data.get_test_data() assert Z_test.shape[1] == X_test.shape[1] == config.test_length + config.cond_length config_path_2 = os.path.join('tests', 'deepar_config_test_2.yml') def test_data_config_update(): config = process_config(config_path_2) data = data_generator.DataGenerator(config) config = data.update_config() assert 'num_cov' in config assert 'num_features' in config assert 'num_ts' in config assert config.batch_size == config.num_ts config_path_3 = os.path.join('tests', 'deeparsys_config_test_2.yml') def test_data(): config = process_config(config_path) data = data_generator.DataGenerator(config) batch_Z, batch_X = next(data.next_batch(config.batch_size)) assert batch_Z.shape[0] == config.batch_size == batch_X.shape[0] assert batch_Z.shape[1] == config.cond_length + config.pred_length == batch_X.shape[1] assert data.Z.shape[0] == data.X.shape[0] # Z_test, X_test = data.get_test_data() # assert Z_test.shape[1] == X_test.shape[1] == config.test_length + config.cond_length
nilq/baby-python
python
''' Helper functions to select and combine data ''' from __future__ import division import logging import re import os from collections import Iterable import numpy as np import tables as tb import numexpr as ne from tqdm import tqdm from beam_telescope_analysis.telescope.telescope import Telescope from beam_telescope_analysis.tools import analysis_utils def combine_files(input_files, output_file=None, names=None, event_number_offsets=None, chunk_size=1000000): ''' Combine tables from different files and merge it into one single table. Some use cases: - Merging hit tables from different runs for combined analysis (under the assumption that telescope geometry has not changed between the runs) - Merging of tracks tables from different runs for combined efficiency analysis. (telescope geometry has changed between the runs and each run requires a separate alignment) Parameters ---------- input_files : list Filenames of the input files containing a table. output_file : string Filename of the output file containing the merged table. names : list or string List of table names that will be merged. If None, all tables will be merged event_number_offsets : list Manually set start event number offset for each hit array. The event number is increased by the given number. If None, the event number will be generated automatically. If no "event_number" column is available, this parameter will be ignored. chunk_size : int Chunk size of the data when reading from the table. Returns ------- applied_event_number_offsets : dict The dictinary contains the the lists of the event numbers offsets of each table. ''' logging.info('=== Combining %d files ===' % len(input_files)) if not output_file: prefix = os.path.commonprefix(input_files) output_file = os.path.splitext(prefix)[0] + '_combined.h5' # convert to list if names is not None and not isinstance(names, (list, tuple, set)): names = [names] out_tables = {} last_event_numbers = {} applied_event_number_offsets = {} with tb.open_file(filename=output_file, mode="w") as out_file_h5: for file_index, input_file in enumerate(input_files): with tb.open_file(filename=input_file, mode='r') as in_file_h5: # get all nodes of type 'table' in_tables = in_file_h5.list_nodes('/', classname='Table') for table in in_tables: if names is not None and table.name not in names: continue if table.name not in out_tables: out_tables[table.name] = out_file_h5.create_table( where=out_file_h5.root, name=table.name, description=table.dtype, title=table.title, filters=tb.Filters( complib='blosc', complevel=5, fletcher32=False)) if 'event_number' in table.dtype.names: last_event_numbers[table.name] = -1 applied_event_number_offsets[table.name] = [] else: last_event_numbers[table.name] = None applied_event_number_offsets[table.name] = None event_number_offset = 0 if last_event_numbers[table.name] is not None and event_number_offsets is not None and event_number_offsets[file_index] is not None: event_number_offset = event_number_offsets[file_index] elif last_event_numbers[table.name] is not None: # increase by 1 to avoid duplicate event number event_number_offset += last_event_numbers[table.name] + 1 for read_index in range(0, table.nrows, chunk_size): data_chunk = table.read(start=read_index, stop=read_index + chunk_size) if last_event_numbers[table.name] is not None and event_number_offset != 0: data_chunk[:]['event_number'] += event_number_offset out_tables[table.name].append(data_chunk) out_tables[table.name].flush() if last_event_numbers[table.name] is not None: last_event_numbers[table.name] = data_chunk[-1]['event_number'] applied_event_number_offsets[table.name].append(event_number_offset) return applied_event_number_offsets def reduce_events(input_file, max_events, output_file=None, chunk_size=1000000): ''' Reducing the size of a file to a given number of events. Parameters ---------- input_file : string Filename of the input file. output_file : string Filename of the output file. max_events : utint Maximum number of radomly selected events. chunk_size : int Chunk size of the data when reading from file. ''' if not output_file: output_file = os.path.splitext(input_file)[0] + '_reduced.h5' with tb.open_file(input_file, mode='r') as in_file_h5: with tb.open_file(output_file, mode="w") as out_file_h5: for node in in_file_h5.root: logging.info('Reducing events for node %s', node.name) total_n_tracks = node.shape[0] total_n_tracks_stored = 0 total_n_events_stored = 0 progress_bar = tqdm(total=total_n_tracks, ncols=80) tracks_table_out = out_file_h5.create_table( where=out_file_h5.root, name=node.name, description=node.dtype, title=node.title, filters=tb.Filters( complib='blosc', complevel=5, fletcher32=False)) for data_chunk, index_chunk in analysis_utils.data_aligned_at_events(node, chunk_size=chunk_size): n_tracks_chunk = data_chunk.shape[0] unique_events = np.unique(data_chunk["event_number"]) n_events_chunk = unique_events.shape[0] if total_n_tracks == index_chunk: # last chunk, adding all remaining events select_n_events = max_events - total_n_events_stored elif total_n_events_stored == 0: # first chunk select_n_events = int(round(max_events * (n_tracks_chunk / total_n_tracks))) else: # calculate correction of number of selected events correction = (total_n_tracks - index_chunk)/total_n_tracks * 1 / (((total_n_tracks-last_index_chunk)/total_n_tracks)/((max_events-total_n_events_stored_last)/max_events)) \ + (index_chunk)/total_n_tracks * 1 / (((last_index_chunk)/total_n_tracks)/((total_n_events_stored_last)/max_events)) select_n_events = int(round(max_events * (n_tracks_chunk / total_n_tracks) * correction)) # do not store more events than in current chunk select_n_events = min(n_events_chunk, select_n_events) # do not store more events than given by max_events select_n_events = min(select_n_events, max_events - total_n_events_stored) np.random.seed(seed=0) selected_events = np.random.choice(unique_events, size=select_n_events, replace=False) store_n_events = selected_events.shape[0] total_n_events_stored += store_n_events selected_tracks = np.in1d(data_chunk["event_number"], selected_events) store_n_tracks = np.count_nonzero(selected_tracks) total_n_tracks_stored += store_n_tracks data_chunk = data_chunk[selected_tracks] tracks_table_out.append(data_chunk) tracks_table_out.flush() total_n_events_stored_last = total_n_events_stored total_n_tracks_last = total_n_tracks last_index_chunk = index_chunk progress_bar.update(index_chunk) progress_bar.close() def select_tracks(telescope_configuration, input_tracks_file, select_duts, output_tracks_file=None, condition=None, max_events=None, select_hit_duts=None, select_no_hit_duts=None, select_quality_duts=None, select_no_quality_duts=None, chunk_size=1000000): ''' Selecting tracks that are matching the conditions. Parameters ---------- telescope_configuration : string Filename of the telescope configuration file. input_tracks_file : string Filename of the input tracks file. ''' telescope = Telescope(telescope_configuration) logging.info('=== Selecting tracks of %d DUTs ===' % len(select_duts)) if not output_tracks_file: output_tracks_file = os.path.splitext(input_tracks_file)[0] + '_selected.h5' # Check select_duts # Check for value errors if not isinstance(select_duts, Iterable): raise ValueError("select_duts is no iterable") elif not select_duts: # empty iterable raise ValueError("select_duts has no items") # Check if only non-iterable in iterable if not all(map(lambda val: isinstance(val, (int,)), select_duts)): raise ValueError("not all items in select_duts are integer") # Create select_hit_duts if select_hit_duts is None: # If None, use no selection select_hit_duts = [[] for _ in select_duts] # Check iterable and length if not isinstance(select_hit_duts, Iterable): raise ValueError("select_hit_duts is no iterable") elif not select_hit_duts: # empty iterable raise ValueError("select_hit_duts has no items") # Check if only non-iterable in iterable if all(map(lambda val: not isinstance(val, Iterable), select_hit_duts)): select_hit_duts = [select_hit_duts[:] for _ in select_duts] # Check if only iterable in iterable if not all(map(lambda val: isinstance(val, Iterable), select_hit_duts)): raise ValueError("not all items in select_hit_duts are iterable") # Finally check length of all arrays if len(select_hit_duts) != len(select_duts): # empty iterable raise ValueError("select_hit_duts has the wrong length") # Create select_no_hit_duts if select_no_hit_duts is None: # If None, use no selection select_no_hit_duts = [[] for _ in select_duts] # Check iterable and length if not isinstance(select_no_hit_duts, Iterable): raise ValueError("select_no_hit_duts is no iterable") elif not select_no_hit_duts: # empty iterable raise ValueError("select_no_hit_duts has no items") # Check if only non-iterable in iterable if all(map(lambda val: not isinstance(val, Iterable), select_no_hit_duts)): select_no_hit_duts = [select_no_hit_duts[:] for _ in select_duts] # Check if only iterable in iterable if not all(map(lambda val: isinstance(val, Iterable), select_no_hit_duts)): raise ValueError("not all items in select_no_hit_duts are iterable") # Finally check length of all arrays if len(select_no_hit_duts) != len(select_duts): # empty iterable raise ValueError("select_no_hit_duts has the wrong length") # Create select_quality_duts if select_quality_duts is None: # If None, use no selection select_quality_duts = [[] for _ in select_duts] # Check iterable and length if not isinstance(select_quality_duts, Iterable): raise ValueError("select_quality_duts is no iterable") elif not select_quality_duts: # empty iterable raise ValueError("select_quality_duts has no items") # Check if only non-iterable in iterable if all(map(lambda val: not isinstance(val, Iterable), select_quality_duts)): select_quality_duts = [select_quality_duts[:] for _ in select_duts] # Check if only iterable in iterable if not all(map(lambda val: isinstance(val, Iterable), select_quality_duts)): raise ValueError("not all items in select_quality_duts are iterable") # Finally check length of all arrays if len(select_quality_duts) != len(select_duts): # empty iterable raise ValueError("select_quality_duts has the wrong length") # Create select_no_quality_duts if select_no_quality_duts is None: # If None, use no selection select_no_quality_duts = [[] for _ in select_duts] # Check iterable and length if not isinstance(select_no_quality_duts, Iterable): raise ValueError("select_no_quality_duts is no iterable") elif not select_no_quality_duts: # empty iterable raise ValueError("select_no_quality_duts has no items") # Check if only non-iterable in iterable if all(map(lambda val: not isinstance(val, Iterable), select_no_quality_duts)): select_no_quality_duts = [select_no_quality_duts[:] for _ in select_duts] # Check if only iterable in iterable if not all(map(lambda val: isinstance(val, Iterable), select_no_quality_duts)): raise ValueError("not all items in select_no_quality_duts are iterable") # Finally check length of all arrays if len(select_no_quality_duts) != len(select_duts): # empty iterable raise ValueError("select_no_quality_duts has the wrong length") # Create condition if condition is None: # If None, use empty strings for all DUTs condition = ['' for _ in select_duts] # Check if iterable if isinstance(condition, str): condition = [condition] * len(select_duts) # Check if only strings in iterable if not all(map(lambda val: isinstance(val, str), condition)): raise ValueError("not all items in condition are strings") # Finally check length of all arrays if len(condition) != len(select_duts): # empty iterable raise ValueError("condition has the wrong length") with tb.open_file(input_tracks_file, mode='r') as in_file_h5: with tb.open_file(output_tracks_file, mode="w") as out_file_h5: for index, actual_dut_index in enumerate(select_duts): node = in_file_h5.get_node(in_file_h5.root, 'Tracks_DUT%d' % actual_dut_index) logging.info('== Selecting tracks for %s ==', telescope[actual_dut_index].name) hit_flags = 0 hit_mask = 0 for dut in select_hit_duts[index]: hit_flags |= (1 << dut) hit_mask |= (1 << dut) for dut in select_no_hit_duts[index]: hit_mask |= (1 << dut) quality_flags = 0 quality_mask = 0 for dut in select_quality_duts[index]: quality_flags |= (1 << dut) quality_mask |= (1 << dut) for dut in select_no_quality_duts[index]: quality_mask |= (1 << dut) tracks_table_out = out_file_h5.create_table( where=out_file_h5.root, name=node.name, description=node.dtype, title=node.title, filters=tb.Filters( complib='blosc', complevel=5, fletcher32=False)) total_n_tracks = node.shape[0] total_n_tracks_stored = 0 total_n_events_stored = 0 progress_bar = tqdm(total=total_n_tracks, ncols=80) for tracks, index_chunk in analysis_utils.data_aligned_at_events(node, chunk_size=chunk_size): n_tracks_chunk = tracks.shape[0] if hit_mask != 0 or quality_mask != 0: select = np.ones(n_tracks_chunk, dtype=np.bool) if hit_mask != 0: select &= ((tracks['hit_flag'] & hit_mask) == hit_flags) if quality_mask != 0: select &= ((tracks['quality_flag'] & quality_mask) == quality_flags) tracks = tracks[select] if condition[index]: tracks = _select_rows_with_condition(tracks, condition[index]) unique_events = np.unique(tracks["event_number"]) n_events_chunk = unique_events.shape[0] # print "n_events_chunk", n_events_chunk # print "n_tracks_chunk", n_tracks_chunk if max_events: if total_n_tracks == index_chunk: # last chunk, adding all remaining events select_n_events = max_events - total_n_events_stored elif total_n_events_stored == 0: # first chunk select_n_events = int(round(max_events * (n_tracks_chunk / total_n_tracks))) else: # calculate correction of number of selected events correction = (total_n_tracks - index_chunk)/total_n_tracks * 1 / (((total_n_tracks-last_index_chunk)/total_n_tracks)/((max_events-total_n_events_stored_last)/max_events)) \ + (index_chunk)/total_n_tracks * 1 / (((last_index_chunk)/total_n_tracks)/((total_n_events_stored_last)/max_events)) # select_n_events = np.ceil(n_events_chunk * correction) # # calculate correction of number of selected events # correction = 1/(((total_n_tracks-last_index_chunk)/total_n_tracks_last)/((max_events-total_n_events_stored_last)/max_events)) select_n_events = int(round(max_events * (n_tracks_chunk / total_n_tracks) * correction)) # print "correction", correction # do not store more events than in current chunk select_n_events = min(n_events_chunk, select_n_events) # do not store more events than given by max_events select_n_events = min(select_n_events, max_events - total_n_events_stored) np.random.seed(seed=0) selected_events = np.random.choice(unique_events, size=select_n_events, replace=False) store_n_events = selected_events.shape[0] total_n_events_stored += store_n_events # print "store_n_events", store_n_events selected_tracks = np.in1d(tracks["event_number"], selected_events) store_n_tracks = np.count_nonzero(selected_tracks) # TODO: total_n_tracks_stored not used... total_n_tracks_stored += store_n_tracks tracks = tracks[selected_tracks] tracks_table_out.append(tracks) tracks_table_out.flush() total_n_events_stored_last = total_n_events_stored total_n_tracks_last = total_n_tracks last_index_chunk = index_chunk progress_bar.update(index_chunk) progress_bar.close() # print "***************" # print "total_n_tracks_stored", total_n_tracks_stored # print "total_n_events_stored", total_n_events_stored def _select_rows_with_condition(rec_array, condition): for variable in set(re.findall(r'(\d*[a-zA-Z_]+\d*)', condition)): exec(variable + ' = rec_array[\'' + variable + '\']') # expose variables; not a copy, this is just a reference return rec_array[ne.evaluate(condition, casting="safe")]
nilq/baby-python
python
# coding: utf-8 """ Gate API v4 Welcome to Gate.io API APIv4 provides spot, margin and futures trading operations. There are public APIs to retrieve the real-time market statistics, and private APIs which needs authentication to trade on user's behalf. # noqa: E501 Contact: support@mail.gate.io Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from gate_api.configuration import Configuration class OptionsAccountBook(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = {'time': 'float', 'change': 'str', 'balance': 'str', 'type': 'str', 'text': 'str'} attribute_map = {'time': 'time', 'change': 'change', 'balance': 'balance', 'type': 'type', 'text': 'text'} def __init__( self, time=None, change=None, balance=None, type=None, text=None, local_vars_configuration=None ): # noqa: E501 # type: (float, str, str, str, str, Configuration) -> None """OptionsAccountBook - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._time = None self._change = None self._balance = None self._type = None self._text = None self.discriminator = None if time is not None: self.time = time if change is not None: self.change = change if balance is not None: self.balance = balance if type is not None: self.type = type if text is not None: self.text = text @property def time(self): """Gets the time of this OptionsAccountBook. # noqa: E501 Change time # noqa: E501 :return: The time of this OptionsAccountBook. # noqa: E501 :rtype: float """ return self._time @time.setter def time(self, time): """Sets the time of this OptionsAccountBook. Change time # noqa: E501 :param time: The time of this OptionsAccountBook. # noqa: E501 :type: float """ self._time = time @property def change(self): """Gets the change of this OptionsAccountBook. # noqa: E501 Amount changed # noqa: E501 :return: The change of this OptionsAccountBook. # noqa: E501 :rtype: str """ return self._change @change.setter def change(self, change): """Sets the change of this OptionsAccountBook. Amount changed # noqa: E501 :param change: The change of this OptionsAccountBook. # noqa: E501 :type: str """ self._change = change @property def balance(self): """Gets the balance of this OptionsAccountBook. # noqa: E501 Account total balance after change # noqa: E501 :return: The balance of this OptionsAccountBook. # noqa: E501 :rtype: str """ return self._balance @balance.setter def balance(self, balance): """Sets the balance of this OptionsAccountBook. Account total balance after change # noqa: E501 :param balance: The balance of this OptionsAccountBook. # noqa: E501 :type: str """ self._balance = balance @property def type(self): """Gets the type of this OptionsAccountBook. # noqa: E501 Changing Type: - dnw: Deposit & Withdraw - prem: Trading premium - fee: Trading fee - refr: Referrer rebate - point_dnw: POINT Deposit & Withdraw - point_fee: POINT Trading fee - point_refr: POINT Referrer rebate # noqa: E501 :return: The type of this OptionsAccountBook. # noqa: E501 :rtype: str """ return self._type @type.setter def type(self, type): """Sets the type of this OptionsAccountBook. Changing Type: - dnw: Deposit & Withdraw - prem: Trading premium - fee: Trading fee - refr: Referrer rebate - point_dnw: POINT Deposit & Withdraw - point_fee: POINT Trading fee - point_refr: POINT Referrer rebate # noqa: E501 :param type: The type of this OptionsAccountBook. # noqa: E501 :type: str """ self._type = type @property def text(self): """Gets the text of this OptionsAccountBook. # noqa: E501 custom text # noqa: E501 :return: The text of this OptionsAccountBook. # noqa: E501 :rtype: str """ return self._text @text.setter def text(self, text): """Sets the text of this OptionsAccountBook. custom text # noqa: E501 :param text: The text of this OptionsAccountBook. # noqa: E501 :type: str """ self._text = text def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict( map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items(), ) ) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, OptionsAccountBook): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, OptionsAccountBook): return True return self.to_dict() != other.to_dict()
nilq/baby-python
python
import pathlib import numpy as np import matplotlib.pyplot as plt from os import listdir from tqdm import tqdm from visionutils import flow2mag # workaround for bug https://github.com/tqdm/tqdm/issues/481 tqdm.monitor_interval = 0 font = {'family' : 'DejaVu Sans', 'weight' : 'bold', 'size' : 50} plt.rc('font', **font) def movie(solution, rundir): # directory where to save the movie paths = rundir.split('/') paths[0] = "movies" moviedir = '/'.join(paths) # directory where to save the optical flow paths[0] = "flows" flowdir = '/'.join(paths) # create directory if it does not exist pathlib.Path(moviedir).mkdir(parents=True, exist_ok=True) pathlib.Path(flowdir).mkdir(parents=True, exist_ok=True) # setup progress bar nimgs = len(listdir(rundir)) progress = tqdm(total=nimgs) # save original and predicted frames for t, (imgtrue, imghat) in enumerate(solution.play(rundir)): # convert from torch to matplotlib format if imgtrue.shape[0] == 3: # RGB imgtrue = imgtrue.transpose([1,2,0]) imghat = imghat.transpose([1,2,0]) if imgtrue.shape[0] == 2: # FLOW flowtrue = np.copy(imgtrue.transpose([1,2,0])) flowhat = np.copy(imghat.transpose([1,2,0])) np.save(flowdir+"/{:04}.npy".format(t+1), flowhat) imgtrue = flow2mag(flowtrue) imghat = flow2mag(flowhat) else: imgtrue = imgtrue[0,:,:] imghat = imghat[0,:,:] fig, ax = plt.subplots(1,2, figsize=(20,20)) plt.subplot(1,2,1) plt.imshow(imgtrue, cmap="binary_r") plt.gca().axes.xaxis.set_ticklabels([]) plt.gca().axes.yaxis.set_ticklabels([]) plt.axis("off") plt.title("original", fontsize=50) plt.subplot(1,2,2) plt.imshow(imghat, cmap="binary_r") plt.axis("off") plt.title("neural network", fontsize=50) plt.annotate("time {:04}".format(t+1), xy=(.01,.92), xycoords="figure fraction") plt.tight_layout() plt.savefig(moviedir+"/{:04}.png".format(t+1), bbox_inches="tight") plt.close() progress.update() def diffplot(solution, rundir): # directory name for saving the diff plot paths = rundir.split('/') paths[0] = "diffplots" diffdir = '/'.join(paths) # create directory if it does not exist pathlib.Path(diffdir).mkdir(parents=True, exist_ok=True) trues, fakes = [], [] for (imgtrue, imghat) in solution.play(rundir): trues.append(imgtrue) fakes.append(imghat) dtrues = np.diff(trues) dfakes = np.diff(fakes) dtrues = [np.sum(np.abs(d)) for d in dtrues] dfakes = [np.sum(np.abs(d)) for d in dfakes] X = np.array([dtrues, dfakes]).T np.savetxt(diffdir+"/plot.dat", X, header="1st column = original, 2nd column = neural network") fig = plt.figure(figsize=(20,20)) plt.plot(dtrues/dtrues[0], label="original") plt.plot(dfakes/dfakes[0], label="neural network") plt.xlabel("time step") plt.ylabel("normalized difference") plt.legend() plt.savefig(diffdir+"/plot.png", bbox_inches="tight") plt.close()
nilq/baby-python
python
#Declare and initialize the variables monthlyPayment = 0 loanAmount = 0 interestRate = 0 numberOfPayments = 0 loanDurationInYears = 0 #Ask the user for the values needed to calculate the monthly payments strLoanAmount = input("How much money will you borrow? ") strInterestRate = input("What is the interest rate on the loan? ") strLoanDurationInYears = input("How many years will it take you to pay off the loan? " ) #Convert the strings into floating numbers so we can use them in teh formula loanDurationInYears = float(strLoanDurationInYears) loanAmount = float(strLoanAmount) interestRate = float(strInterestRate) #Since payments are once per month, number of payments is number of years for the loan * 12 numberOfPayments = loanDurationInYears*12 #Calculate the monthly payment based on the formula monthlyPayment = loanAmount * interestRate * (1+ interestRate) * numberOfPayments \ / ((1 + interestRate) * numberOfPayments -1) #provide the result to the user print("Your monthly payment will be " + str(monthlyPayment)) #Extra credit print("Your monthly payment will be $%.2f" % monthlyPayment)
nilq/baby-python
python
from difflib import SequenceMatcher from six import iteritems from datadog_checks.base.stubs.common import MetricStub, ServiceCheckStub ''' Build similar message for better test assertion failure message. ''' MAX_SIMILAR_TO_DISPLAY = 15 def build_similar_elements_msg(expected, submitted_elements): """ Return formatted similar elements (metrics, service checks) received compared to submitted elements """ similar_metrics = _build_similar_elements(expected, submitted_elements) similar_metrics_to_print = [] for score, metric_stub in similar_metrics[:MAX_SIMILAR_TO_DISPLAY]: if metric_stub.tags: metric_stub.tags.sort() similar_metrics_to_print.append("{:.2f} {}".format(score, metric_stub)) return ( "Expected:\n" + " {}\n".format(expected) + "Similar submitted:\n" + "Score Most similar\n" + "\n".join(similar_metrics_to_print) ) def _build_similar_elements(expected_element, submitted_elements): """ Return similar elements (metrics, service checks) received compared to the submitted elements """ if isinstance(expected_element, MetricStub): scoring_fn = _get_similarity_score_for_metric elif isinstance(expected_element, ServiceCheckStub): scoring_fn = _get_similarity_score_for_service_check else: raise NotImplementedError("Invalid type: {}".format(expected_element)) similar_elements = [] for _, metric_stubs in iteritems(submitted_elements): for candidate_metric in metric_stubs: score = scoring_fn(expected_element, candidate_metric) similar_elements.append((score, candidate_metric)) return sorted(similar_elements, reverse=True) def _get_similarity_score_for_metric(expected_metric, candidate_metric): # Tuple of (score, weight) scores = [(_is_similar_text_score(expected_metric.name, candidate_metric.name), 3)] if expected_metric.type is not None: score = 1 if expected_metric.type == candidate_metric.type else 0 scores.append((score, 1)) if expected_metric.tags is not None: score = _is_similar_text_score(str(sorted(expected_metric.tags)), str(sorted(candidate_metric.tags))) scores.append((score, 1)) if expected_metric.value is not None: score = 1 if expected_metric.value == candidate_metric.value else 0 scores.append((score, 1)) if expected_metric.hostname: score = _is_similar_text_score(expected_metric.hostname, candidate_metric.hostname) scores.append((score, 1)) return _compute_score(scores) def _get_similarity_score_for_service_check(expected_service_check, candidate_service_check): # Tuple of (score, weight) scores = [(_is_similar_text_score(expected_service_check.name, candidate_service_check.name), 3)] if expected_service_check.status is not None: score = 1 if expected_service_check.status == candidate_service_check.status else 0 scores.append((score, 1)) if expected_service_check.tags is not None: score = _is_similar_text_score( str(sorted(expected_service_check.tags)), str(sorted(candidate_service_check.tags)) ) scores.append((score, 1)) if expected_service_check.hostname: score = _is_similar_text_score(expected_service_check.hostname, candidate_service_check.hostname) scores.append((score, 1)) if expected_service_check.message: score = _is_similar_text_score(expected_service_check.message, candidate_service_check.message) scores.append((score, 1)) return _compute_score(scores) def _compute_score(scores): score_total = 0 weight_total = 0 for score, weight in scores: score_total += score * weight weight_total += weight return score_total / weight_total def _is_similar_text_score(a, b): return SequenceMatcher(None, a, b).ratio()
nilq/baby-python
python
from database.mysql import MySQLDatabase from settings import db_config """ Retrieve the settings from the 'db_config' dictionary to connect to our database so we can instantiate our MySQLDatabase object """ db = MySQLDatabase(db_config.get('db_name'), db_config.get('user'), db_config.get('pass'), db_config.get('host')) # Get all the available tables for # our database and print them out tables = db.get_available_tables() print tables # Get all the available columns for our # articles table and print them out columns = db.get_columns_for_table('articles') print columns # Get all the records from the people table all_records = db.select('people') print "All records: %s" % str(all_records) # Get all the records from the people table # but only the `id` and `first_name` columns column_specific_records = db.select('people', ['id', 'first_name']) print "Column specific records: %s" % str(column_specific_records) # Select data using the WHERE clause where_expression_records = db.select('people', ['first_name'], where="first_name='John'") print "Where Records: %s" % str(where_expression_records) # Select data using the WHERE clause and the JOIN clause joined_records = db.select('people', ['first_name'], where="people.id=3", join="orders ON people.id=orders.person_id") print "Joined records: %s" % str(joined_records) # Test our new DELETE ROW function # Delete a record from the database db.delete('orders', id="=3") # We can also use a multiple WHERE clause/s db.delete('orders', id=">4", amount=">1")
nilq/baby-python
python
# -*- coding: utf-8 -*- # ====================================================================================================================== # Copyright (©) 2015-2021 LCS - Laboratoire Catalyse et Spectrochimie, # Caen, France. = # CeCILL-B FREE SOFTWARE LICENSE AGREEMENT - See full LICENSE agreement in # the root directory = # ====================================================================================================================== """ This module implements the class |Coord|. """ __all__ = ['Coord', 'LinearCoord'] import textwrap from traitlets import Bool, observe, All, Unicode, Integer from spectrochempy.core.dataset.ndarray import NDArray from spectrochempy.core.dataset.ndmath import NDMath, _set_operators from spectrochempy.utils import colored_output, NOMASK from spectrochempy.units import Quantity, ur # ====================================================================================================================== # Coord # ====================================================================================================================== class Coord(NDMath, NDArray): _copy = Bool() _html_output = False _parent_dim = Unicode(allow_none=True) # ------------------------------------------------------------------------------------------------------------------ # initialization # ------------------------------------------------------------------------------------------------------------------ def __init__(self, data=None, **kwargs): """ Explicit coordinates for a dataset along a given axis. The coordinates of a |NDDataset| can be created using the |Coord| object. This is a single dimension array with either numerical (float) values or labels (str, `Datetime` objects, or any other kind of objects) to represent the coordinates. Only a one numerical axis can be defined, but labels can be multiple. Parameters ----------- data : ndarray, tuple or list The actual data array contained in the |Coord| object. The given array (with a single dimension) can be a list, a tuple, a |ndarray|, or a |ndarray|-like object. If an object is passed that contains labels, or units, these elements will be used to accordingly set those of the created object. If possible, the provided data will not be copied for `data` input, but will be passed by reference, so you should make a copy the `data` before passing it in the object constructor if that's the desired behavior or set the `copy` argument to True. **kwargs See other parameters Other Parameters ---------------- dtype : str or dtype, optional, default=np.float64 If specified, the data will be casted to this dtype, else the type of the data will be used dims : list of chars, optional. if specified the list must have a length equal to the number od data dimensions (ndim) and the chars must be taken among among x,y,z,u,v,w or t. If not specified, the dimension names are automatically attributed in this order. name : str, optional A user friendly name for this object. If not given, the automatic `id` given at the object creation will be used as a name. labels : array of objects, optional Labels for the `data`. labels can be used only for 1D-datasets. The labels array may have an additional dimension, meaning several series of labels for the same data. The given array can be a list, a tuple, a |ndarray|, a ndarray-like, a |NDArray| or any subclass of |NDArray|. units : |Unit| instance or str, optional Units of the data. If data is a |Quantity| then `units` is set to the unit of the `data`; if a unit is also explicitly provided an error is raised. Handling of units use the `pint <https://pint.readthedocs.org/>`_ package. title : str, optional The title of the dimension. It will later be used for instance for labelling plots of the data. It is optional but recommended to give a title to each ndarray. dlabel : str, optional. Alias of `title`. meta : dict-like object, optional. Additional metadata for this object. Must be dict-like but no further restriction is placed on meta. copy : bool, optional Perform a copy of the passed object. Default is False. linear : bool, optional If set to True, the coordinate is considered as a ``LinearCoord`` object. See Also -------- NDDataset : Main SpectroChemPy object: an array with masks, units and coordinates. LinearCoord : Implicit linear coordinates. Examples -------- We first import the object from the api : >>> from spectrochempy import Coord We then create a numpy |ndarray| and use it as the numerical `data` axis of our new |Coord| object. >>> c0 = Coord.arange(1., 12., 2., title='frequency', units='Hz') >>> c0 Coord: [float64] Hz (size: 6) We can take a series of str to create a non numerical but labelled axis : >>> tarr = list('abcdef') >>> tarr ['a', 'b', 'c', 'd', 'e', 'f'] >>> c1 = Coord(labels=tarr, title='mylabels') >>> c1 Coord: [labels] [ a b c d e f] (size: 6) """ super().__init__(data=data, **kwargs) if len(self.shape) > 1: raise ValueError('Only one 1D arrays can be used to define coordinates') # .................................................................................................................. def implements(self, name=None): """ Utility to check if the current object implement `Coord`. Rather than isinstance(obj, Coord) use object.implements('Coord'). This is useful to check type without importing the module """ if name is None: return 'Coord' else: return name == 'Coord' # ------------------------------------------------------------------------------------------------------------------ # readonly property # ------------------------------------------------------------------------------------------------------------------ # .................................................................................................................. @property def reversed(self): """bool - Whether the axis is reversed (readonly property). """ if self.units in ['1 / centimeter', 'ppm']: return True return False # Return a correct result only if the data are sorted # return # bool(self.data[0] > self.data[-1]) @property def default(self): # this is in case default is called on a coord, while it is a coordset property return self # ------------------------------------------------------------------------------------------------------------------ # hidden properties (for the documentation, only - we remove the docstring) # some of the property of NDArray has to be hidden because they # are not useful for this Coord class # ------------------------------------------------------------------------------------------------------------------ # NDarray methods # .................................................................................................................. @property def is_complex(self): return False # always real # .................................................................................................................. @property def ndim(self): ndim = super().ndim if ndim > 1: raise ValueError("Coordinate's array should be 1-dimensional!") return ndim # .................................................................................................................. @property def T(self): # no transpose return self # .................................................................................................................. # @property # def values(self): # return super().values # .................................................................................................................. @property def masked_data(self): return super().masked_data # .................................................................................................................. @property def is_masked(self): return False # .................................................................................................................. @property def mask(self): return super().mask # .................................................................................................................. @mask.setter def mask(self, val): # Coordinates cannot be masked. Set mask always to NOMASK self._mask = NOMASK # NDmath methods # .................................................................................................................. def cumsum(self, **kwargs): raise NotImplementedError # .................................................................................................................. def mean(self, **kwargs): raise NotImplementedError # .................................................................................................................. def pipe(self, func=None, *args, **kwargs): raise NotImplementedError # .................................................................................................................. def remove_masks(self, **kwargs): raise NotImplementedError # .................................................................................................................. def std(self, *args, **kwargs): raise NotImplementedError # .................................................................................................................. def sum(self, *args, **kwargs): raise NotImplementedError # .................................................................................................................. def swapdims(self, *args, **kwargs): raise NotImplementedError # .................................................................................................................. def swapaxes(self, *args, **kwargs): raise NotImplementedError # .................................................................................................................. def squeeze(self, *args, **kwargs): raise NotImplementedError # .................................................................................................................. def random(self, *args, **kwargs): raise NotImplementedError # .................................................................................................................. def empty(self, *args, **kwargs): raise NotImplementedError # .................................................................................................................. def empty_like(self, *args, **kwargs): raise NotImplementedError # .................................................................................................................. def var(self, *args, **kwargs): raise NotImplementedError # .................................................................................................................. def ones(self, *args, **kwargs): raise NotImplementedError # .................................................................................................................. def ones_like(self, *args, **kwargs): raise NotImplementedError # .................................................................................................................. def full(self, *args, **kwargs): raise NotImplementedError # .................................................................................................................. def diag(self, *args, **kwargs): raise NotImplementedError # .................................................................................................................. def diagonal(self, *args, **kwargs): raise NotImplementedError # .................................................................................................................. def full_like(self, *args, **kwargs): raise NotImplementedError # .................................................................................................................. def identity(self, *args, **kwargs): raise NotImplementedError # .................................................................................................................. def eye(self, *args, **kwargs): raise NotImplementedError # .................................................................................................................. def zeros(self, *args, **kwargs): raise NotImplementedError # .................................................................................................................. def zeros_like(self, *args, **kwargs): raise NotImplementedError # .................................................................................................................. def coordmin(self, *args, **kwargs): raise NotImplementedError # .................................................................................................................. def coordmax(self, *args, **kwargs): raise NotImplementedError # .................................................................................................................. def conjugate(self, *args, **kwargs): raise NotImplementedError # .................................................................................................................. def conj(self, *args, **kwargs): raise NotImplementedError # .................................................................................................................. def abs(self, *args, **kwargs): raise NotImplementedError # .................................................................................................................. def absolute(self, *args, **kwargs): raise NotImplementedError # .................................................................................................................. def all(self, *args, **kwargs): raise NotImplementedError # .................................................................................................................. def any(self, *args, **kwargs): raise NotImplementedError # .................................................................................................................. def argmax(self, *args, **kwargs): raise NotImplementedError # .................................................................................................................. def argmin(self, *args, **kwargs): raise NotImplementedError # .................................................................................................................. def asfortranarray(self, *args, **kwargs): raise NotImplementedError # .................................................................................................................. def average(self, *args, **kwargs): raise NotImplementedError # .................................................................................................................. def clip(self, *args, **kwargs): raise NotImplementedError # .................................................................................................................. def get_axis(self, *args, **kwargs): return super().get_axis(*args, **kwargs) # .................................................................................................................. @property def origin(self, *args, **kwargs): return None # .................................................................................................................. @property def author(self): return None @property def descendant(self): return (self.data[-1] - self.data[0]) < 0 # .................................................................................................................. @property def dims(self): return ['x'] # .................................................................................................................. @property def is_1d(self): return True # .................................................................................................................. def transpose(self): return self # ------------------------------------------------------------------------------------------------------------------ # public methods # ------------------------------------------------------------------------------------------------------------------ def loc2index(self, loc): return self._loc2index(loc) # ------------------------------------------------------------------------------------------------------------------ # special methods # ------------------------------------------------------------------------------------------------------------------ # .................................................................................................................. def __copy__(self): res = self.copy(deep=False) # we keep name of the coordinate by default res.name = self.name return res # .................................................................................................................. def __deepcopy__(self, memo=None): res = self.copy(deep=True, memo=memo) res.name = self.name return res # .................................................................................................................. def __dir__(self): # remove some methods with respect to the full NDArray # as they are not usefull for Coord. return ['data', 'labels', 'units', 'meta', 'title', 'name', 'offset', 'increment', 'linear', 'roi'] # .................................................................................................................. def __getitem__(self, items, return_index=False): # we need to keep the names when copying coordinates to avoid later # problems res = super().__getitem__(items, return_index=return_index) res.name = self.name return res # .................................................................................................................. def __str__(self): return repr(self) # .................................................................................................................. def _cstr(self, header=' coordinates: ... \n', print_size=True, **kwargs): indent = kwargs.get('indent', 0) out = '' if not self.is_empty and print_size: out += f'{self._str_shape().rstrip()}\n' out += f' title: {self.title}\n' if self.title else '' if self.has_data: out += '{}\n'.format(self._str_value(header=header)) elif self.is_empty and not self.is_labeled: out += header.replace('...', '\0Undefined\0') if self.is_labeled: header = ' labels: ... \n' text = str(self.labels.T).strip() if '\n' not in text: # single line! out += header.replace('...', '\0\0{}\0\0'.format(text)) else: out += header out += '\0\0{}\0\0'.format(textwrap.indent(text.strip(), ' ' * 9)) if out[-1] == '\n': out = out[:-1] if indent: out = "{}".format(textwrap.indent(out, ' ' * indent)) first_indent = kwargs.get("first_indent", 0) if first_indent < indent: out = out[indent - first_indent:] if not self._html_output: return colored_output(out) else: return out # .................................................................................................................. def __repr__(self): out = self._repr_value().rstrip() return out # ------------------------------------------------------------------------------------------------------------------ # Events # ------------------------------------------------------------------------------------------------------------------ # .................................................................................................................. @observe(All) def _anytrait_changed(self, change): # ex: change { # 'owner': object, # The HasTraits instance # 'new': 6, # The new value # 'old': 5, # The old value # 'name': "foo", # The name of the changed trait # 'type': 'change', # The event type of the notification, usually # 'change' # } if change.name in ['_linear', '_increment', '_offset', '_size']: super()._anytrait_changed(change) class LinearCoord(Coord): _use_time = Bool(False) _show_datapoints = Bool(True) _zpd = Integer def __init__(self, *args, offset=0.0, increment=1.0, **kwargs): """ Linear coordinates. Such coordinates correspond to a ascending or descending linear sequence of values, fully determined by two parameters, i.e., an offset (off) and an increment (inc) : .. math:: \\mathrm{data} = i*\\mathrm{inc} + \\mathrm{off} Parameters ---------- data : a 1D array-like object, optional wWen provided, the `size` parameters is adjusted to the size of the array, and a linearization of the array is performed (only if it is possible: regular spacing in the 1.e5 relative accuracy) offset : float, optional If omitted a value of 0.0 is taken for tje coordinate offset. increment : float, optional If omitted a value of 1.0 is taken for the coordinate increment. Other Parameters ---------------- dtype : str or dtype, optional, default=np.float64 If specified, the data will be casted to this dtype, else the type of the data will be used dims : list of chars, optional. if specified the list must have a length equal to the number od data dimensions (ndim) and the chars must be taken among among x,y,z,u,v,w or t. If not specified, the dimension names are automatically attributed in this order. name : str, optional A user friendly name for this object. If not given, the automatic `id` given at the object creation will be used as a name. labels : array of objects, optional Labels for the `data`. labels can be used only for 1D-datasets. The labels array may have an additional dimension, meaning several series of labels for the same data. The given array can be a list, a tuple, a |ndarray|, a ndarray-like, a |NDArray| or any subclass of |NDArray|. units : |Unit| instance or str, optional Units of the data. If data is a |Quantity| then `units` is set to the unit of the `data`; if a unit is also explicitly provided an error is raised. Handling of units use the `pint <https://pint.readthedocs.org/>`_ package. title : str, optional The title of the dimension. It will later be used for instance for labelling plots of the data. It is optional but recommended to give a title to each ndarray. dlabel : str, optional. Alias of `title`. meta : dict-like object, optional. Additional metadata for this object. Must be dict-like but no further restriction is placed on meta. copy : bool, optional Perform a copy of the passed object. Default is False. fill_missing : bool Create a linear coordinate array where missing data are masked. See Also -------- NDDataset : Main SpectroChemPy object: an array with masks, units and coordinates. Coord : Explicit coordinates. Examples -------- >>> from spectrochempy import LinearCoord, Coord To create a linear coordinate, we need to specify an offset, an increment and the size of the data >>> c1 = LinearCoord(offset=2.0, increment=2.0, size=10) Alternatively, linear coordinates can be created using the ``linear`` keyword >>> c2 = Coord(linear=True, offset=2.0, increment=2.0, size=10) """ if args and isinstance(args[0], Coord) and not args[0].linear: raise ValueError('Only linear Coord (with attribute linear set to True, can be transformed into ' 'LinearCoord class') super().__init__(*args, **kwargs) # when data is present, we don't need offset and increment, nor size, # we just do linear=True and these parameters are ignored if self._data is not None: self._linear = True elif not self.linear: # in case it was not already a linear array self.offset = offset self.increment = increment self._linear = True # .................................................................................................................. def implements(self, name=None): """ Utility to check if the current object implement `LinearCoord`. Rather than isinstance(obj, Coord) use object.implements( 'LinearCoord'). This is useful to check type without importing the module """ if name is None: return 'LinearCoord' else: return name == 'LinearCoord' # .................................................................................................................. @property # read only def linear(self): return self._linear # .................................................................................................................. def geomspace(self): raise NotImplementedError # .................................................................................................................. def logspace(self): raise NotImplementedError # .................................................................................................................. def __dir__(self): # remove some methods with respect to the full NDArray # as they are not usefull for Coord. return ['data', 'labels', 'units', 'meta', 'title', 'name', 'offset', 'increment', 'linear', 'size', 'roi', 'show_datapoints'] def set_laser_frequency(self, frequency=15798.26 * ur('cm^-1')): if not isinstance(frequency, Quantity): frequency = frequency * ur('cm^-1') frequency.ito('Hz') self.meta.laser_frequency = frequency if self._use_time: spacing = 1. / frequency spacing.ito('picoseconds') self.increment = spacing.m self.offset = 0 self._units = ur.picoseconds self.title = 'time' else: frequency.ito('cm^-1') spacing = 1. / frequency spacing.ito('mm') self.increment = spacing.m self.offset = -self.increment * self._zpd self._units = ur.mm self.title = 'optical path difference' @property def _use_time_axis(self): # private property # True if time scale must be used for interferogram axis. Else it # will be set to optical path difference. return self._use_time @_use_time_axis.setter def _use_time_axis(self, val): self._use_time = val if 'laser_frequency' in self.meta: self.set_laser_frequency(self.meta.laser_frequency) @property def show_datapoints(self): """ Bool : True if axis must discard values and show only datapoints. """ if 'laser_frequency' not in self.meta or self.units.dimensionality not in ['[time]', '[length]']: return False return self._show_datapoints @show_datapoints.setter def show_datapoints(self, val): self._show_datapoints = val @property def laser_frequency(self): """ Quantity: Laser frequency (if needed) """ return self.meta.laser_frequency @laser_frequency.setter def laser_frequency(self, val): self.meta.aser_frequency = val # ====================================================================================================================== # Set the operators # ====================================================================================================================== _set_operators(Coord, priority=50) # ====================================================================================================================== if __name__ == '__main__': pass
nilq/baby-python
python
t1 = [[1], [1], [1], [1], [1]] t2 = [[1], [1, [1, 1]], [1]] t3 = [[1], [1, [1, [1], 1]], [1]]
nilq/baby-python
python
# -------------- # Import packages import numpy as np import pandas as pd from scipy.stats import mode # code starts here bank = pd.read_csv(path) categorical_var = bank.select_dtypes(include = 'object') print(categorical_var) numerical_var = bank.select_dtypes(include = 'number') print(numerical_var) # code ends here # -------------- # code starts here banks = bank.drop(['Loan_ID'],axis = 1) #print(banks.isnull().sum()) bank_mode = banks.mode() #print(bank_mode.iloc[0]) banks = banks.fillna(bank_mode.iloc[0]) print(banks.isnull().sum()) #code ends here # -------------- # Code starts here avg_loan_amount = banks.pivot_table(index=['Gender','Married','Self_Employed'],values = 'LoanAmount') # code ends here # -------------- # code starts here loan_approved_se = len(banks[banks['Self_Employed'] == 'Yes']\ [banks['Loan_Status']=="Y"]) print(loan_approved_se) loan_approved_nse = len(banks[banks['Self_Employed']=='No']\ [banks['Loan_Status']=='Y']) print(loan_approved_nse) percentage_se = (loan_approved_se/614)*100 percentage_nse = (loan_approved_nse/614)*100 # code ends here # -------------- # code starts here #print(banks['Loan_Amount_Term']) loan_term = banks['Loan_Amount_Term'].\ apply(lambda x: int(x)/12) #print(type(loan_term)) big_loan_term = len(loan_term[loan_term >= 25]) print(big_loan_term) # code ends here # -------------- # code starts here loan_groupby = banks.groupby('Loan_Status')[['ApplicantIncome','Credit_History']] print(loan_groupby) mean_values = loan_groupby.mean() # code ends here
nilq/baby-python
python
# ----------------------------------------------------------------------------- # Copyright (c) 2016+ Buro Petr van Blokland + Claudia Mens & Font Bureau # www.pagebot.io # # P A G E B O T # # Free to use. Licensed under MIT conditions # Made for usage in DrawBot, www.drawbot.com # ----------------------------------------------------------------------------- # # Upgrade.py # # Build automatic website for designdesign.space, hosted in github. # # http://upgrade.typetr.com # http://localhost:8888/typetr/index.html # # # import os from pagebot.contexts import HtmlContext from pagebot.typesetter import Typesetter from pagebot.composer import Composer from pagebot.publications.publication import Publication from pagebot.elements import * from pagebot.conditions import * # Path to markdown file, including Python code blocks. MD_PATH = u"Site.md" NAME = 'upgrade' DOMAIN = 'upgrade.typetr.com' DO_GIT = False DO_MAMP = not DO_GIT from website import Website doc = Website(autoPages=0) doc.info.cssPath = 'sources/assets/css/main.css' # Create a Typesetter for this document, then create pages and fill content. # As no Galley instance is supplied to the Typesetter, it will create one, # or put the current page/box variables to where the MarkDown file indicates. t = Typesetter(doc, tryExcept=False, verbose=False) # Parse the markdown content and execute the embedded Python code blocks. # The blocks, global defined feedback variables and text content are in the # typesetter t.galley. # By default, the typesetter produces a single Galley with content and code blocks. # In this case it directly writes into the boxes on the Website template pages. t.typesetFile(MD_PATH) if DO_MAMP: # Internal CSS file may be switched of for development. view = t.doc.setView('Mamp') if not os.path.exists(view.MAMP_PATH): print 'The local MAMP server application does not exist. Download and in stall from %s.' % view.MAMP_SHOP_URL os.system(u'open %s' % view.MAMP_SHOP_URL) else: t.doc.build(path=NAME) #t.doc.export('_export/%s.pdf' % NAME, multiPages=True) os.system(u'open "%s"' % view.getUrl(NAME)) elif DO_GIT: # Make sure outside always has the right generated CSS view = t.doc.setView('Git') t.doc.build(path=NAME) # Open the css file in the default editor of your local system. os.system('git pull; git add *;git commit -m "Updating website changes.";git pull; git push') os.system(u'open "%s"' % view.getUrl(DOMAIN)) else: print 'Select DO_MAMP or DO_GIT' print 'Done'
nilq/baby-python
python
#entrada while True: n = int(input()) if n == 0: break tempos = str(input()).split() #processamento tempoTotal = 10 for i in range(1, len(tempos)): if (int(tempos[i]) - int(tempos[i - 1])) < 10: tempoTotal += int(tempos[i]) - int(tempos[i - 1]) else: tempoTotal += 10 #saida print(tempoTotal)
nilq/baby-python
python
#!/usr/bin/env python # Copyright (c) 2011 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """A class to help start/stop a local apache http server.""" from __future__ import print_function import logging import optparse import os import subprocess import sys import time import urllib import google.path_utils import google.platform_utils class HttpdNotStarted(Exception): pass def UrlIsAlive(url): """Checks to see if we get an http response from |url|. We poll the url 5 times with a 1 second delay. If we don't get a reply in that time, we give up and assume the httpd didn't start properly. Args: url: The URL to check. Return: True if the url is alive. """ wait_time = 5 while wait_time > 0: try: response = urllib.urlopen(url) # Server is up and responding. return True except IOError: pass wait_time -= 1 # Wait a second and try again. time.sleep(1) return False def ApacheConfigDir(start_dir): """Returns a path to the directory holding the Apache config files.""" return google.path_utils.FindUpward(start_dir, 'tools', 'python', 'google', 'httpd_config') def GetCygserverPath(start_dir, apache2=False): """Returns the path to the directory holding cygserver.exe file.""" cygserver_path = None if apache2: cygserver_path = google.path_utils.FindUpward(start_dir, 'third_party', 'cygwin', 'usr', 'sbin') return cygserver_path def StartServer(document_root=None, output_dir=None, apache2=False): """Starts a local server on port 8000 using the basic configuration files. Args: document_root: If present, specifies the document root for the server; otherwise, the filesystem's root (e.g., C:/ or /) will be used. output_dir: If present, specifies where to put server logs; otherwise, they'll be placed in the system's temp dir (e.g., $TEMP or /tmp). apache2: boolean if true will cause this function to configure for Apache 2.x as opposed to Apache 1.3.x Returns: the ApacheHttpd object that was created """ script_dir = google.path_utils.ScriptDir() platform_util = google.platform_utils.PlatformUtility(script_dir) if not output_dir: output_dir = platform_util.GetTempDirectory() if not document_root: document_root = platform_util.GetFilesystemRoot() apache_config_dir = ApacheConfigDir(script_dir) if apache2: httpd_conf_path = os.path.join(apache_config_dir, 'httpd2.conf') else: httpd_conf_path = os.path.join(apache_config_dir, 'httpd.conf') mime_types_path = os.path.join(apache_config_dir, 'mime.types') start_cmd = platform_util.GetStartHttpdCommand(output_dir, httpd_conf_path, mime_types_path, document_root, apache2=apache2) stop_cmd = platform_util.GetStopHttpdCommand() httpd = ApacheHttpd(start_cmd, stop_cmd, [8000], cygserver_path=GetCygserverPath(script_dir, apache2)) httpd.StartServer() return httpd def StopServers(apache2=False): """Calls the platform's stop command on a newly created server, forcing it to stop. The details depend on the behavior of the platform stop command. For example, it's often implemented to kill all running httpd processes, as implied by the name of this function. Args: apache2: boolean if true will cause this function to configure for Apache 2.x as opposed to Apache 1.3.x """ script_dir = google.path_utils.ScriptDir() platform_util = google.platform_utils.PlatformUtility(script_dir) httpd = ApacheHttpd('', platform_util.GetStopHttpdCommand(), [], cygserver_path=GetCygserverPath(script_dir, apache2)) httpd.StopServer(force=True) class ApacheHttpd(object): def __init__(self, start_command, stop_command, port_list, cygserver_path=None): """Args: start_command: command list to call to start the httpd stop_command: command list to call to stop the httpd if one has been started. May kill all httpd processes running on the machine. port_list: list of ports expected to respond on the local machine when the server has been successfully started. cygserver_path: Path to cygserver.exe. If specified, exe will be started with server as well as stopped when server is stopped. """ self._http_server_proc = None self._start_command = start_command self._stop_command = stop_command self._port_list = port_list self._cygserver_path = cygserver_path def StartServer(self): if self._http_server_proc: return if self._cygserver_path: cygserver_exe = os.path.join(self._cygserver_path, "cygserver.exe") cygbin = google.path_utils.FindUpward(cygserver_exe, 'third_party', 'cygwin', 'bin') env = os.environ env['PATH'] += ";" + cygbin subprocess.Popen(cygserver_exe, env=env) logging.info('Starting http server') self._http_server_proc = subprocess.Popen(self._start_command) # Ensure that the server is running on all the desired ports. for port in self._port_list: if not UrlIsAlive('http://127.0.0.1:%s/' % str(port)): raise HttpdNotStarted('Failed to start httpd on port %s' % str(port)) def StopServer(self, force=False): """If we started an httpd.exe process, or if force is True, call self._stop_command (passed in on init so it can be platform-dependent). This will presumably kill it, and may also kill any other httpd.exe processes that are running. """ if force or self._http_server_proc: logging.info('Stopping http server') kill_proc = subprocess.Popen(self._stop_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) logging.info('%s\n%s' % (kill_proc.stdout.read(), kill_proc.stderr.read())) self._http_server_proc = None if self._cygserver_path: subprocess.Popen(["taskkill.exe", "/f", "/im", "cygserver.exe"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) def main(): # Provide some command line params for starting/stopping the http server # manually. option_parser = optparse.OptionParser() option_parser.add_option('-k', '--server', help='Server action (start|stop)') option_parser.add_option('-r', '--root', help='Document root (optional)') option_parser.add_option('-a', '--apache2', action='store_true', default=False, help='Starts Apache 2 instead of Apache 1.3 (default). ' 'Ignored on Mac (apache2 is used always)') options, args = option_parser.parse_args() if not options.server: print("Usage: %s -k {start|stop} [-r document_root] [--apache2]" % sys.argv[0]) return 1 document_root = None if options.root: document_root = options.root if 'start' == options.server: StartServer(document_root, apache2=options.apache2) else: StopServers(apache2=options.apache2) if '__main__' == __name__: sys.exit(main())
nilq/baby-python
python
import sys a = sys.stdin.readline().split() def main(): a.sort(reverse=True) a.insert(2, '+') return eval(''.join(a)) if __name__ == '__main__': ans = main() print(ans)
nilq/baby-python
python
from config.config import success, header, proxy_ip_type from proxyip import proxy_ip def get_rest_list(sort: int): ''' :param sort: 0最新,1最低价格 :return: ''' api = "https://api-app.ibox.art/nft-mall-web/v1.2/nft/product/getResellList?origin=0&page=1&pageSize=20&sort=%s&type=0" % sort while True: try: if proxy_ip_type: http_args = {"url": api, "headers": header(is_login=False), "proxies": proxy_ip()} else: http_args = {"url": api, "headers": header(is_login=False)} req = success.get(**http_args) req_json = req.json() datas = sorted(req_json.get("data").get("list"), key=lambda key: float(key.__getitem__('priceCny')), reverse=False) datas = [ {"gName": item.get("gName"), "albumName": item.get("albumName"), "albumId": item.get("albumId"), "gId": item.get("gId"), "priceCny": item.get("priceCny"), "gNum": item.get("gNum") } for item in datas[0:6]] return datas except: print("信息源获取失败,重试中。") continue if __name__ == "__main__": test = get_rest_list(sort=0) print(test)
nilq/baby-python
python
""" This file contains the necessary to reconstruct the intermediary featuress from a save of the models an inputs Author Hugues """ import torch from pathlib import Path if __name__ == '__main__': import sys sys.path.append("..") from param import data_path file_location = Path(data_path) / Path('models') from models.store_model_SHL import create_filename, Diagnostic_CNN from models.store_model_CIFAR import Diagnostic_ResNet # Diagnostic_ResNet and Diagnostic_CNN will be used for class loading datasets = ["CIFAR_10", "SHL_2018"] sensors = {"CIFAR_10":["CIFAR_10"], "SHL_2018":["Gyr_y", "Acc_norm", "Mag_norm"]} n_trials = 3 *2 #%% def load_data(file_location, dataset, sanity_check=False): """ Loads the data and performs some verificaions on the ordering and performance Parameters ---------- file_location (Path object or str): the absolute or reltive path to the .pickle objects dataset (str): either 'SHL_2018' or 'CIFAR_10' sanity_check (bool): if True, also loads the raw data an makes sure that we can recreate the predictions. Defaults to False Returns ------- data: dict keys = sensor (ex "Acc_norm" or "CIFAR_10") values = dict keys = split ('train' or 'val') values = list of numpy arrays (n_samples, ...) one array per initialization (3*2 = 6 by default) models: dict keys = sensor (ex "Acc_norm" or "CIFAR_10") values = list of PyTorch nn.Module objects ground_truth: dict keys = split ('train' or 'val') values = np array of ints, containing the class between 0 and n-1 """ sensors_list = sensors[dataset] data = {sensor: {split: [] for split in ["train", "val"]} for sensor in sensors_list} models = {sensor: [] for sensor in sensors_list} ground_truth = {split: [] for split in ["train", "val"]} if sanity_check: previous_GT = {"train":None, "val":None} # we will check that # the dataloader does not shuffle the position of the samples # basic sensors for sensor in sensors_list: if sanity_check: train_dataloader, val_dataloader = torch.load(Path(data_path) / Path("models") / Path("dataloaders-"+dataset+"-"+sensor+'.pt')) dataloaders = {'train':train_dataloader, 'val': val_dataloader} for trial_index in range(n_trials): filename = create_filename(dataset, sensor, trial_index) features_filepath = Path(data_path) / Path("models") / Path('features-' + filename) model_filepath = Path(data_path) / Path("models") / Path('model-' + filename) print(f"loading '{features_filepath}'...", end='') features_pred_GT_train, features_pred_GT_val = torch.load(features_filepath) model = torch.load(model_filepath) features_pred_GT = {"train":features_pred_GT_train, "val" :features_pred_GT_val } print(' ... done') for i_split, split in enumerate(["train", "val"]): features, prediction, this_gt = features_pred_GT[split] ground_truth[split] = this_gt # the value is replaced every time, which is not # a problem because all GT should be equal if sanity_check: score_name, score_value = model.validate(dataloaders[split]) print(f" {dataset:5s} {score_name} {100*score_value:.2f} %") if previous_GT[split] is None: previous_GT[split] = this_gt else : assert (previous_GT[split] == this_gt).all(), "the order of the samples changed between runs" data[sensor][split].append(features) model.cpu() # we dont need the model to be on GPU anymore models[sensor].append(model) return data, models, ground_truth #%% if __name__ == "__main__": load_data(file_location, dataset="SHL_2018", sanity_check=True)
nilq/baby-python
python
from discord import DMChannel, User from discord import Message import stummtaube.data.rounds as rounds_management from stummtaube import main from stummtaube.commands import START, JOIN, END from stummtaube.data.game import players from stummtaube.data.round import Round async def handle_message(message: Message) -> None: if not isinstance(message.channel, DMChannel) or message.author == main.client: return if existing_round := rounds_management.get_round_for_reply(message): await handle_reply(existing_round, message) elif message.content == JOIN: join_player(message.author) elif message.content.startswith(START) and message.author in players: await rounds_management.create_round(message) async def handle_reply(existing_round: Round, message: Message) -> None: if message.content == END: await rounds_management.end_round(existing_round) else: await rounds_management.add_new_message(existing_round, message) def join_player(author: User) -> None: players.add(author)
nilq/baby-python
python
# Copyright (C) 2018-2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from openvino.tools.mo.ops.range import Range from openvino.tools.mo.front.extractor import FrontExtractorOp from openvino.tools.mo.graph.graph import Node class RangeFrontExtractor(FrontExtractorOp): op = 'Range' enabled = True @classmethod def extract(cls, node: Node): # output_type attribute will be deduced during shape infer Range.update_node_stat(node, {}) return cls.enabled
nilq/baby-python
python
from .bslcp import bslcp from .phoenix14 import phoenix14 __all__ = ( "bslcp", "phoenix14", )
nilq/baby-python
python
from __future__ import division, print_function import numpy as np from mlfromscratch.unsupervised_learning import Apriori def main(): # Demo transaction set # Example 2: https://en.wikipedia.org/wiki/Apriori_algorithm transactions = np.array([[1, 2, 3, 4], [1, 2, 4], [1, 2], [2, 3, 4], [2, 3], [3, 4], [2, 4]]) print("+-------------+") print("| Apriori |") print("+-------------+") min_sup = 0.25 min_conf = 0.8 print("Minimum Support: %.2f" % (min_sup)) print("Minimum Confidence: %s" % (min_conf)) print("Transactions:") for transaction in transactions: print("\t%s" % transaction) apriori = Apriori(min_sup=min_sup, min_conf=min_conf) # Get and print the frequent itemsets frequent_itemsets = apriori.find_frequent_itemsets(transactions) print("Frequent Itemsets:\n\t%s" % frequent_itemsets) # Get and print the rules rules = apriori.generate_rules(transactions) print("Rules:") for rule in rules: print("\t%s -> %s (support: %.2f, confidence: %s)" % (rule.antecedent, rule.concequent, rule.support, rule.confidence,)) if __name__ == "__main__": main()
nilq/baby-python
python
from aiogram import types from ..bot import bot, dispatcher @dispatcher.message_handler(commands=["start"]) async def start_handler(message: types.Message): await message.answer(bot.phrases.start_message)
nilq/baby-python
python
from itertools import chain from euclidean.R2.cartesian import P2, V2, cross2 from euclidean.R2.line import LineSegment from .hull import convex_hull from .line_sweep import shamos_hoey class Polygon: """ """ @classmethod def ConvexHull(cls, points): return cls(convex_hull(points), is_convex=True) def __init__(self, points, is_convex=None): self._points = tuple(points) if len(self._points) < 3: raise ValueError("At least 3 points are required to define a polygon.") self._min_index = _min_idx(self._points) self.__is_convex = is_convex def __len__(self): return len(self._points) def standard_form(self): """Normalize point order to begin traversal from minimum point. #todo: also detect if CW -> iterate backwards, ie. CCW? #todo: make this the default __iter__ method? Returns: """ return self._rolled(self._min_index) def _rolled(self, offset): return _rolled(self._points, offset) def _cross_products(self): return map(cross2, self._points, self._rolled(1)) def area(self): """Find the area of this polygon. Notes: This will return an incorrect value if the polygon is complex. Returns: """ return 0.5 * abs(sum(self._cross_products())) def centroid(self): """Find the centroid of this polygon. Notes: This will return an incorrect value if the polygon is complex. Returns: """ cx, cy, a = 0, 0, 0 for p1, p2 in zip(self._points, self._rolled(1)): cross = cross2(p1, p2) cx += (p1.x + p2.x) * cross cy += (p1.y + p2.y) * cross a += cross a *= 3 return P2(cx / a, cy / a) def translate(self, vector): return Polygon(p + vector for p in self._points) def centered_at(self, new_center_point): """Copy this polygon centered at the provided point. Returns: (Polygon): """ vector = new_center_point - self.centroid() return Polygon(p + vector for p in self._points) def rotate(self, radians, center_point=None): """Rotate the polygon by radians around a center point or the centroid if none is provided. Args: radians: center_point: Returns: (Polygon) """ center_point = center_point if center_point else self.centroid() return Polygon(p.rotate(radians, center_point) for p in self._points) def points(self): return self._points def xs(self): return (p.x for p in self._points) def ys(self): return (p.y for p in self._points) def __ccws(self): return map(P2.CCW, self._rolled(0), self._rolled(1), self._rolled(2)) def __maybe_convex(self): return all(c <= 0 for c in self.__ccws()) or all(c >= 0 for c in self.__ccws()) def is_convex(self): if self.__is_convex is None: self.__is_convex = len(self._points) < 4 or ( self.__maybe_convex() and self.is_simple() ) return self.__is_convex def is_simple(self): return shamos_hoey(self.edges()) def edges(self): return map(LineSegment, self._points, chain(self._points[1:], self._points[:1])) def contains(self, test_point, atol=1e-6, closed=True): if self.winding_number(test_point) > 0: return True if closed: return self.on_perimeter(test_point, atol) return False def perimeter(self): return sum(edge.length() for edge in self.edges()) def on_perimeter(self, point, atol=1e-6): return any(edge.contains(point, atol) for edge in self.edges()) def winding_number(self, test_point): order = sum(self._cross_products()) wn = 0 for edge in self.edges(): if edge._p1.y <= test_point.y: if edge._p2.y > test_point.y: if order * P2.CCW(edge._p1, edge._p2, test_point) > 0: wn += 1 else: if edge._p2.y <= test_point.y: if order * P2.CCW(edge._p1, edge._p2, test_point) < 0: wn -= 1 return wn def __eq__(self, other): if not isinstance(other, Polygon): return NotImplemented if len(self._points) != len(other._points): return False for p1, p2 in zip(self.standard_form(), other.standard_form()): if p1 != p2: return False return True def __ne__(self, other): return not self == other def plot(self, **kwargs): xs = list(self.xs()) xs.append(xs[0]) ys = list(self.ys()) ys.append(ys[0]) return plt.plot(xs, ys, **kwargs) def _rolled(points, offset): return chain(points[offset:], points[:offset]) def _standard_form(points): return tuple(_rolled(points, _min_idx(points))) def _min_idx(points): min_idx = 0 for idx, point in enumerate(points): if point._coords < points[min_idx]._coords: min_idx = idx return min_idx
nilq/baby-python
python
from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ("auth", "0006_require_contenttypes_0002"), ] operations = [ migrations.CreateModel( name="Booking", fields=[ ( "id", models.AutoField( verbose_name="ID", serialize=False, auto_created=True, primary_key=True, ), ), ("confirmedOn", models.DateTimeField(null=True, blank=True)), ("cancelledOn", models.DateTimeField(null=True, blank=True)), ("datePaid", models.DateTimeField(null=True, blank=True)), ("exempt_of_payment", models.BooleanField(default=False)), ( "cancelledBy", models.ForeignKey( related_name="cancelled_bookings", blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.deletion.SET_NULL, ), ), ], options={"ordering": ["id"]}, ), migrations.CreateModel( name="BookingOption", fields=[ ( "id", models.AutoField( verbose_name="ID", serialize=False, auto_created=True, primary_key=True, ), ), ( "booking", models.ForeignKey( related_name="options", to="oneevent.Booking", on_delete=models.deletion.CASCADE, ), ), ], options={"ordering": ["option__choice__id", "option__id", "id"]}, ), migrations.CreateModel( name="Choice", fields=[ ( "id", models.AutoField( verbose_name="ID", serialize=False, auto_created=True, primary_key=True, ), ), ("title", models.CharField(max_length=64)), ], options={"ordering": ["id"]}, ), migrations.CreateModel( name="Event", fields=[ ( "id", models.AutoField( verbose_name="ID", serialize=False, auto_created=True, primary_key=True, ), ), ("title", models.CharField(unique=True, max_length=64)), ("start", models.DateTimeField(help_text="Local start date and time")), ( "end", models.DateTimeField( help_text="Local end date and time", null=True, blank=True ), ), ( "city", models.CharField( help_text="Timezone of your event", max_length=32, choices=[ ("Boston", "Boston"), ("Erding", "Erding"), ("London", "London"), ("Miami", "Miami"), ("Munich", "Munich"), ("Nice", "Nice"), ("Sydney", "Sydney"), ("Toronto", "Toronto"), ("UTC", "UTC"), ], ), ), ("description", models.TextField(blank=True)), ( "pub_status", models.CharField( default="UNPUB", help_text="Public: Visible and bookable by all; Restricted: " "Visible and Bookable by invited groups; Private: " "Visible by participant, bookable by all; " "Unpublished: Visible by organisers, not bookable; " "Archived: Not visible, not bookable", max_length=8, verbose_name="Publication status", choices=[ ("PUB", "Public"), ("REST", "Restricted"), ("PRIV", "Private"), ("UNPUB", "Unpublished"), ("ARCH", "Archived"), ], ), ), ( "location_name", models.CharField( help_text="Venue of your event", max_length=64, null=True, blank=True, ), ), ("location_address", models.TextField(null=True, blank=True)), ( "booking_close", models.DateTimeField( help_text="Limit date and time for registering", null=True, blank=True, ), ), ( "choices_close", models.DateTimeField( help_text="Limit date and time for changing choices", null=True, blank=True, ), ), ( "max_participant", models.PositiveSmallIntegerField( default=0, help_text="Maximum number of participants to this event (0 = " "no limit)", ), ), ( "price_for_employees", models.DecimalField(default=0, max_digits=6, decimal_places=2), ), ( "price_for_contractors", models.DecimalField(default=0, max_digits=6, decimal_places=2), ), ( "price_currency", models.CharField( max_length=3, null=True, verbose_name="Currency for prices", blank=True, ), ), ( "contractors_groups", models.ManyToManyField( related_name="contractors_for_event+", verbose_name="Groups considered as Contractors", to="auth.Group", blank=True, ), ), ( "employees_groups", models.ManyToManyField( related_name="employees_for_event+", verbose_name="Groups considered as Employees", to="auth.Group", blank=True, ), ), ( "organisers", models.ManyToManyField( related_name="events_organised", to=settings.AUTH_USER_MODEL, blank=True, ), ), ( "owner", models.ForeignKey( related_name="events_owned", to=settings.AUTH_USER_MODEL, help_text="Main organiser", on_delete=models.deletion.PROTECT, ), ), ], ), migrations.CreateModel( name="Message", fields=[ ( "id", models.AutoField( verbose_name="ID", serialize=False, auto_created=True, primary_key=True, ), ), ( "category", models.CharField( max_length=8, verbose_name="Reason", choices=[ ("QUERY", "Question"), ("COMMENT", "Comment"), ("BUG", "Bug report"), ("FEAT", "Feature request"), ("ADMIN", "Administration Request"), ], ), ), ("title", models.CharField(max_length=128)), ("text", models.TextField(max_length=2048)), ("created", models.DateTimeField(auto_now_add=True)), ("safe_content", models.BooleanField(default=False)), ( "sender", models.ForeignKey( to=settings.AUTH_USER_MODEL, on_delete=models.deletion.CASCADE ), ), ( "thread_head", models.ForeignKey( related_name="thread", blank=True, to="oneevent.Message", null=True, on_delete=models.deletion.CASCADE, ), ), ], options={"ordering": ["-created"]}, ), migrations.CreateModel( name="Option", fields=[ ( "id", models.AutoField( verbose_name="ID", serialize=False, auto_created=True, primary_key=True, ), ), ("title", models.CharField(max_length=256)), ("default", models.BooleanField(default=False)), ( "choice", models.ForeignKey( related_name="options", to="oneevent.Choice", on_delete=models.deletion.CASCADE, ), ), ], options={"ordering": ["choice__id", "id"]}, ), migrations.CreateModel( name="Session", fields=[ ( "id", models.AutoField( verbose_name="ID", serialize=False, auto_created=True, primary_key=True, ), ), ("title", models.CharField(unique=True, max_length=64)), ("start", models.DateTimeField(help_text="Local start date and time")), ( "end", models.DateTimeField( help_text="Local end date and time", null=True, blank=True ), ), ( "max_participant", models.PositiveSmallIntegerField( default=0, help_text="Maximum number of participants to this session (0 " "= no limit)", ), ), ( "event", models.ForeignKey( related_name="sessions", to="oneevent.Event", on_delete=models.deletion.CASCADE, ), ), ], options={"ordering": ["event", "title"]}, ), migrations.AddField( model_name="choice", name="event", field=models.ForeignKey( related_name="choices", to="oneevent.Event", on_delete=models.deletion.CASCADE, ), ), migrations.AddField( model_name="bookingoption", name="option", field=models.ForeignKey( blank=True, to="oneevent.Option", null=True, on_delete=models.deletion.CASCADE, ), ), migrations.AddField( model_name="booking", name="event", field=models.ForeignKey( related_name="bookings", to="oneevent.Event", on_delete=models.deletion.CASCADE, ), ), migrations.AddField( model_name="booking", name="paidTo", field=models.ForeignKey( related_name="received_payments", blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.deletion.SET_NULL, ), ), migrations.AddField( model_name="booking", name="person", field=models.ForeignKey( related_name="bookings", to=settings.AUTH_USER_MODEL, on_delete=models.deletion.CASCADE, ), ), migrations.AddField( model_name="booking", name="session", field=models.ForeignKey( related_name="bookings", blank=True, to="oneevent.Session", null=True, on_delete=models.deletion.CASCADE, ), ), migrations.AlterUniqueTogether( name="session", unique_together=set([("event", "title")]), ), migrations.AlterUniqueTogether( name="option", unique_together=set([("choice", "title")]), ), migrations.AlterUniqueTogether( name="choice", unique_together=set([("event", "title")]), ), migrations.AlterUniqueTogether( name="bookingoption", unique_together=set([("booking", "option")]), ), migrations.AlterUniqueTogether( name="booking", unique_together=set([("event", "person")]), ), ]
nilq/baby-python
python
import requests import time class PickCourse(object): def __init__(self): """ 复制粘贴请求头 将下面值不一样的换掉即可 """ self.headers = { 'accept': '*/*', 'accept-encoding': 'gzip, deflate, br', 'accept-language': 'zh-CN,zh;q=0.9', 'content-type': 'application/x-www-form-urlencoded; charset=UTF-8', 'cookie': '你的Cookie', 'origin': 'https://jw.ustc.edu.cn', 'referer': '你的referer', 'sec-ch-ua': '"Google Chrome";v="93", " Not;A Brand";v="99", "Chromium";v="93"', 'sec-ch-ua-mobile': '?0', 'sec-ch-ua-platform': "macOS", 'sec-fetch-dest': 'empty', 'sec-fetch-mode': 'cors', 'sec-fetch-site': 'same-origin', 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36', 'x-requested-with': 'XMLHttpRequest' } self.session = requests.Session() def add(self, course_id): """ 发起选课请求 """ url = 'https://jw.ustc.edu.cn//ws/for-std/course-select/add-request' payload = { 'studentAssoc': '你的studentAssoc', 'lessonAssoc': course_id, # 要选择的课程id 'courseSelectTurnAssoc': '481', # 根据实际情况填写对应数值 'scheduleGroupAssoc': '', 'virtualCost': '0' } r = self.session.post(url, data=payload, headers=self.headers) #print(r.text) return r.text def add_result(self, id): """ 发起是否选课成功请求 """ url = 'https://jw.ustc.edu.cn/ws/for-std/course-select/add-drop-response' payload = { 'studentId': '你的studentId', 'requestId': id # 发起选课请求后的返回值 } r = self.session.post(url, data=payload, headers=self.headers) #print(r.json()) return r.json() def pick(self, course_id): while(True): id = self.add(course_id=course_id) result = self.add_result(id=id) if result is not None: if(result['success'] == False ): print("课程:"+ course_id+ "选择失败!") time.sleep(0.5) # 每隔0.5秒发起一次选课请求 elif(result['success'] == True): print("课程:"+ course_id+ "选择成功!") break if __name__ == "__main__": pick_course = PickCourse() pick_course.pick(course_id='137459') # 137459 软件体系结构
nilq/baby-python
python
import contextlib from Qt import QtCore def _iter_model_rows( model, column, include_root=False ): """Iterate over all row indices in a model""" indices = [QtCore.QModelIndex()] # start iteration at root for index in indices: # Add children to the iterations child_rows = model.rowCount(index) for child_row in range(child_rows): child_index = model.index(child_row, column, index) indices.append(child_index) if not include_root and not index.isValid(): continue yield index @contextlib.contextmanager def preserve_states( tree_view, column=0, role=None, preserve_expanded=True, preserve_selection=True, expanded_role=QtCore.Qt.DisplayRole, selection_role=QtCore.Qt.DisplayRole ): """Preserves row selection in QTreeView by column's data role. This function is created to maintain the selection status of the model items. When refresh is triggered the items which are expanded will stay expanded and vise versa. tree_view (QWidgets.QTreeView): the tree view nested in the application column (int): the column to retrieve the data from role (int): the role which dictates what will be returned Returns: None """ # When `role` is set then override both expanded and selection roles if role: expanded_role = role selection_role = role model = tree_view.model() selection_model = tree_view.selectionModel() flags = selection_model.Select | selection_model.Rows expanded = set() if preserve_expanded: for index in _iter_model_rows( model, column=column, include_root=False ): if tree_view.isExpanded(index): value = index.data(expanded_role) expanded.add(value) selected = None if preserve_selection: selected_rows = selection_model.selectedRows() if selected_rows: selected = set(row.data(selection_role) for row in selected_rows) try: yield finally: if expanded: for index in _iter_model_rows( model, column=0, include_root=False ): value = index.data(expanded_role) is_expanded = value in expanded # skip if new index was created meanwhile if is_expanded is None: continue tree_view.setExpanded(index, is_expanded) if selected: # Go through all indices, select the ones with similar data for index in _iter_model_rows( model, column=column, include_root=False ): value = index.data(selection_role) state = value in selected if state: tree_view.scrollTo(index) # Ensure item is visible selection_model.select(index, flags)
nilq/baby-python
python
#!/usr/bin/env python # Programa 5.1 - Estrutura de repetição while x = 1 while x <= 3: print(x) x = x + 1 print (' FIM ')
nilq/baby-python
python
# 2019-11-24 20:59:47(JST) import sys def main(): n = int(sys.stdin.readline().rstrip()) m = map(int, sys.stdin.read().split()) ab = list(zip(m, m)) graph = [[] for _ in range(n + 1)] for a, b in ab: graph[a].append(b) graph[b].append(a) root = 1 parent = [0] * (n + 1) order = [] stack = [root] while stack: x = stack.pop() order.append(x) for y in graph[x]: if y == parent[x]: continue parent[y] = x stack.append(y) color = [-1] * (n + 1) for x in order: ng = color[x] c = 1 for y in graph[x]: if y == parent[x]: continue if c == ng: c += 1 color[y] = c c += 1 res = [] for a, b in ab: if parent[a] == b: res.append(color[a]) else: res.append(color[b]) print(max(res)) print('\n'.join(map(str, res))) if __name__ == '__main__': main()
nilq/baby-python
python
# Copyright (c) 2017 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import sys from kmip.core import enums from kmip.demos import utils from kmip.pie import client from kmip.pie import objects if __name__ == '__main__': logger = utils.build_console_logger(logging.INFO) # Build and parse arguments parser = utils.build_cli_parser(enums.Operation.SIGN) opts, args = parser.parse_args(sys.argv[1:]) config = opts.config # Build the client and connect to the server with client.ProxyKmipClient( config=config, config_file=opts.config_file ) as client: # Create keys to use for derivation try: signing_key_id = client.register( objects.PrivateKey( enums.CryptographicAlgorithm.RSA, 1024, ( b'\x30\x82\x02\x76\x02\x01\x00\x30\x0d\x06\x09\x2a\x86' b'\x48\x86\xf7\x0d\x01\x01\x01\x05\x00\x04\x82\x02\x60' b'\x30\x82\x02\x5c\x02\x01\x00\x02\x81\x81\x00\xc0\x0f' b'\x0b\x0a\xc5\x72\x36\x81\x71\x6c\x59\xd7\x14\x42\x31' b'\x6a\xb9\xb2\x32\xd8\x91\x65\xab\xaa\x54\xab\xf7\x6a' b'\xdc\xe4\x5c\x9c\x91\x8f\x0c\x90\xa7\x48\x9f\x65\x9c' b'\x3f\xc9\x80\xcb\x51\x05\xf9\x11\x9a\xa2\x13\xd9\x15' b'\x39\x8b\x97\xe8\xf5\xf0\x8b\xa0\xf8\xe5\x34\x47\x2a' b'\xea\x87\xdf\x64\x2a\x16\x5f\xd0\x85\xf5\x8a\x60\xed' b'\x97\xcd\x2b\x96\x72\x04\xf5\xd0\x99\x6a\x53\x90\xc2' b'\xd0\xdf\x38\xa8\x9e\x61\xd0\xb7\x46\xe8\x4e\x48\x7d' b'\x37\x85\x2f\xaf\xba\x70\x06\x31\x07\x65\x13\xb7\x78' b'\xd2\x25\x34\x30\x2c\xf7\x4c\x9e\x17\x02\x03\x01\x00' b'\x01\x02\x81\x80\x16\xc6\xb1\xec\x89\x15\xce\x58\xf3' b'\x76\x82\x00\xfb\xaa\x0d\xea\x36\x33\x67\xcc\x3f\x11' b'\xeb\x95\xbb\x8c\xd9\x3e\x97\x0b\x8d\xe0\x13\x72\xff' b'\x6e\x78\x28\x28\x9f\x08\x34\x98\x54\xe9\xc7\xa6\x09' b'\xaf\x88\xc3\x07\xcf\x8a\xb0\xd4\x59\x23\x8b\x67\x07' b'\x68\x03\x9c\x16\x3d\xa1\xc9\x00\xf3\x31\x0a\x38\x0b' b'\x76\x89\x8d\xb1\x86\x03\xaf\x81\xcb\x47\x37\xd0\x9f' b'\x1c\x99\x6e\xb6\xb9\x7f\x1c\x8a\x07\x88\xb2\x9b\x2b' b'\xc3\xb5\x93\xfd\xfc\x23\x6f\x31\xfb\xf0\xc7\xc1\x83' b'\x86\x6a\x05\xc0\x9a\xfa\x79\x7e\xe3\x02\x80\x06\xa6' b'\x3a\x81\x02\x41\x00\xe8\x06\x53\x54\x96\x8d\xa1\x35' b'\xdf\xf8\x1a\x69\xd1\xbf\x53\x52\xd6\x4f\xe3\xd5\xef' b'\x6d\x31\xd1\x51\xee\x89\x09\x62\x9b\xab\x5b\xfc\x87' b'\xeb\xa7\x22\x1f\x99\x90\x00\x18\xe7\xa5\x78\xe9\x90' b'\xae\xd9\xed\xa4\x25\x91\x11\x0f\x0d\xb1\x1c\xd0\xc4' b'\xbf\x7d\x43\xa7\x02\x41\x00\xd3\xe7\x82\xe9\x84\x59' b'\xff\x1e\x9a\x16\x98\xd3\xaa\xbd\x9f\xae\x56\x52\xe5' b'\x2a\x78\x95\xb1\x61\x27\xc0\xd3\x59\x76\xef\x33\xfd' b'\xc8\xdf\x20\xf8\x79\x92\x90\xe6\x11\x88\xf6\x3b\xd6' b'\xd4\xcc\x43\xc4\x0c\x21\xa0\xec\x29\x68\x6f\x29\xc3' b'\xcb\x58\xa2\x0f\xe0\x11\x02\x40\x38\xd5\x5b\xd2\x0b' b'\x72\xb3\xbb\x53\x9a\x1d\x36\x30\x67\x72\x0c\x87\x6c' b'\x58\x3d\x8e\x01\x2c\x43\xbe\x92\xf4\x44\x35\x40\x36' b'\x50\x38\xe2\x3e\x49\xd9\x24\xee\x63\x84\x72\x95\x43' b'\x46\x03\xc8\x29\xdc\x3d\xc6\x88\x61\x29\x51\x8b\xa4' b'\x07\x8f\xe7\xb1\x94\x08\x5f\x02\x41\x00\xb0\x28\x08' b'\x43\x39\xfc\x5a\xc2\x44\xd4\x3e\x2d\xd0\x05\x9d\x06' b'\x1f\xca\xff\xa9\x43\xdf\x25\x3b\x20\x02\x03\x70\x9f' b'\x17\x91\x40\x0b\x49\xba\x2d\xf5\x5a\xab\x4c\x27\x0d' b'\x95\xac\xff\x15\x9d\xcd\x43\xdf\xd5\xe0\xe2\x12\x36' b'\x38\x1b\x1f\x22\x1f\x47\x72\x2d\x11\x02\x40\x20\x9b' b'\x55\xb5\x2d\xce\x33\x45\xed\x29\x2a\x95\xa2\x2b\x03' b'\xa4\x2b\xd3\x75\x8d\xe6\xa1\x24\x0d\x5a\xc4\xe2\x96' b'\x80\x90\x74\xc3\x8d\xaf\x17\x69\x4d\x70\x1d\x62\xaf' b'\x79\x94\xfe\x74\xd3\x7b\x40\x0c\x60\x36\xde\x2c\x51' b'\x4a\x66\x66\x73\x10\x9f\xd7\x86\x7f\x70' ), enums.KeyFormatType.PKCS_8, masks=[ enums.CryptographicUsageMask.SIGN ] ) ) logger.info("Successfully created a new signing key.") logger.info("Signing Key ID: {0}".format(signing_key_id)) except Exception as e: logger.error(e) sys.exit(-1) # Activate the signing key. try: client.activate(signing_key_id) logger.info( "Signing key {0} has been activated.".format(signing_key_id) ) except Exception as e: logger.error(e) sys.exit(-1) # Generate a signature. try: result = client.sign( ( b'\xe1\xc0\xf9\x8d\x53\xf8\xf8\xb1\x41\x90\x57\xd5\xb9\xb1' b'\x0b\x07\xfe\xea\xec\x32\xc0\x46\x3a\x4d\x68\x38\x2f\x53' b'\x1b\xa1\xd6\xcf\xe4\xed\x38\xa2\x69\x4a\x34\xb9\xc8\x05' b'\xad\xf0\x72\xff\xbc\xeb\xe2\x1d\x8d\x4b\x5c\x0e\x8c\x33' b'\x45\x2d\xd8\xf9\xc9\xbf\x45\xd1\xe6\x33\x75\x11\x33\x58' b'\x82\x29\xd2\x93\xc6\x49\x6b\x7c\x98\x3c\x2c\x72\xbd\x21' b'\xd3\x39\x27\x2d\x78\x28\xb0\xd0\x9d\x01\x0b\xba\xd3\x18' b'\xd9\x98\xf7\x04\x79\x67\x33\x8a\xce\xfd\x01\xe8\x74\xac' b'\xe5\xf8\x6d\x2a\x60\xf3\xb3\xca\xe1\x3f\xc5\xc6\x65\x08' b'\xcf\xb7\x23\x78\xfd\xd6\xc8\xde\x24\x97\x65\x10\x3c\xe8' b'\xfe\x7c\xd3\x3a\xd0\xef\x16\x86\xfe\xb2\x5e\x6a\x35\xfb' b'\x64\xe0\x96\xa4' ), uid=signing_key_id, cryptographic_parameters={ 'cryptographic_algorithm': enums.CryptographicAlgorithm.RSA, 'hashing_algorithm': enums.HashingAlgorithm.SHA_1, 'padding_method': enums.PaddingMethod.PSS }, ) logger.info("Signature: {0}".format(result)) except Exception as e: logger.error(e)
nilq/baby-python
python
import asyncio import nertivia import nertivia.bot from nertivia import http URL = "https://nertivia.net/api/messages/channels/" URL_MSG = "https://nertivia.net/api/messages/" URL_STA = "https://nertivia.net/api/settings/status" class Message: # __slots__ = ('id', 'content', 'author') def __init__(self, message): self.id: int = message['message']['messageID'] self.content: str = message['message']['message'] self.channel: nertivia.Channel = http.HTTPClient().get_channel(message["message"]["channelID"]) self.server: nertivia.Server = self.channel.server self.author: str = message['message']['creator']['username'] + '@' + message['message']['creator']['tag'] self.http = nertivia.bot.HTTPClient() @property def _id(self): return self.id @property def _content(self): return self.content @property def _author(self): return self.author async def edit(self, channel, content): await self.http.edit_message(self.id, channel, content) async def delete(self): await self.http.delete_message(self.id, self.channel.id)
nilq/baby-python
python
from TestHelperSuperClass import testHelperSuperClass from unittest.mock import patch import passwordmanpro_cli import datetime from python_Testing_Utilities import assertMultiLineStringsEqual from samplePayloadsAndEnvs import envNoKey, envUrlWithSlash, envAPIKEYFILE, env, resourseResponse, resourseResponseRAW, resourseResponseNoResourses, errorResourseResponseRAW, accountsResponse, accountsResponseRAW, passwordResponse, passwordResponseRAW, userNotAllowedToAccessFromThisHost appObj = passwordmanpro_cli.AppObjClass() class test_AppObj(testHelperSuperClass): def test_withEmptyEnv(self): returnedValue = appObj.run({}, []) self.assertEqual(returnedValue, 'ERROR - you must specify PASSMANCLI_URL enviroment variable\n', msg='Incorrect output') def test_URLWithSlashIsRejected(self): returnedValue = appObj.run(envUrlWithSlash, []) self.assertEqual(returnedValue, 'ERROR - PASSMANCLI_URL can not end with a slash\n', msg='Incorrect output') self.assertEqual(appObj.url,envNoKey['PASSMANCLI_URL']) def test_withNoAuthTokenSet(self): returnedValue = appObj.run(envNoKey, []) self.assertEqual(returnedValue, 'ERROR - you must specify PASSMANCLI_AUTHTOKEN or PASSMANCLI_AUTHTOKENFILE enviroment variable\n', msg='Incorrect output') self.assertEqual(appObj.url,envNoKey['PASSMANCLI_URL']) @patch('passwordmanpro_cli.AppObjClass._getAuthTokenFromFile', return_value='abc123') def test_withAuthTokenSetFromFile(self, _getAuthTokenFromFileResult): returnedValue = appObj.run(envAPIKEYFILE, []) self.assertEqual(appObj.url,envNoKey['PASSMANCLI_URL']) self.assertEqual(appObj.authtoken,'abc123') self.assertEqual(returnedValue, 'ERROR - you must specify at least one argument\n', msg='Incorrect output') def test_MissingArguments(self): returnedValue = appObj.run(env, []) self.assertEqual(appObj.url,envNoKey['PASSMANCLI_URL']) self.assertEqual(appObj.authtoken,env['PASSMANCLI_AUTHTOKEN']) self.assertEqual(returnedValue, 'ERROR - you must specify at least one argument\n', msg='Incorrect output') def test_UnknownCommand(self): returnedValue = appObj.run(env, ['passwordmanpro_cli', 'XXX']) expectedOutput = 'ERROR - Unknown command supplied in first argument\n' expectedOutput += ' Supported Commands -\n' expectedOutput += ' GET\n' expectedOutput += ' JAVAPROPS\n' expectedOutput += ' JSONSINGLELINE\n' expectedOutput += ' JSONSINGLELINEESCAPEQUOTES\n' expectedOutput += ' RAWGET\n' assertMultiLineStringsEqual(returnedValue, expectedOutput, self, "returnedValue", "expectedOutput") def test_GetMissingArguments(self): returnedValue = appObj.run(env, ['passwordmanpro_cli', 'get']) self.assertEqual(appObj.url,envNoKey['PASSMANCLI_URL']) self.assertEqual(appObj.authtoken,env['PASSMANCLI_AUTHTOKEN']) self.assertEqual(returnedValue, 'ERROR - get needs arguments "passwordmanpro_cli get **RESOURSE_NAME** **ACCOUNT_NAME**"\n', msg='Incorrect output') def test_GetMissingPassword(self): returnedValue = appObj.run(env, ['passwordmanpro_cli', 'get', 'someResourse']) self.assertEqual(appObj.url,envNoKey['PASSMANCLI_URL']) self.assertEqual(appObj.authtoken,env['PASSMANCLI_AUTHTOKEN']) self.assertEqual(returnedValue, 'ERROR - get needs arguments "passwordmanpro_cli get **RESOURSE_NAME** **ACCOUNT_NAME**"\n', msg='Incorrect output') @patch('passwordmanpro_cli.AppObjClass._callGet') def test_GetNormal(self, getResoursesResponse): getResoursesResponse.side_effect = [ { 'responseCode': 200, 'response': resourseResponseRAW}, { 'responseCode': 200, 'response': accountsResponseRAW}, { 'responseCode': 200, 'response': passwordResponseRAW} ] returnedValue = appObj.run(env, ['passwordmanpro_cli', 'get', 'soadevteamserver-konga', 'kongaadmin']) self.assertEqual(appObj.url,envNoKey['PASSMANCLI_URL']) self.assertEqual(appObj.authtoken,env['PASSMANCLI_AUTHTOKEN']) self.assertEqual(appObj.resourseName,'soadevteamserver-konga') self.assertEqual(appObj.accountName,'kongaadmin') #NOTE- no line break when password is supplied self.assertEqual(returnedValue, 'dummyPasswordForTest', msg='Incorrect output') @patch('passwordmanpro_cli.AppObjClass._callGet') def test_GetNormalNOSSL(self, getResoursesResponse): getResoursesResponse.side_effect = [ { 'responseCode': 200, 'response': resourseResponseRAW}, { 'responseCode': 200, 'response': accountsResponseRAW}, { 'responseCode': 200, 'response': passwordResponseRAW} ] returnedValue = appObj.run(env, ['passwordmanpro_cli', 'get', 'soadevteamserver-konga', 'kongaadmin','NOSSLCHECKS']) self.assertEqual(appObj.url,envNoKey['PASSMANCLI_URL']) self.assertEqual(appObj.authtoken,env['PASSMANCLI_AUTHTOKEN']) self.assertEqual(appObj.resourseName,'soadevteamserver-konga') self.assertEqual(appObj.accountName,'kongaadmin') #NOTE- no line break when password is supplied self.assertEqual(returnedValue, 'dummyPasswordForTest', msg='Incorrect output') #Sometimes an error is returned with 200 code @patch('passwordmanpro_cli.AppObjClass._callGet', return_value={ 'responseCode': 200, 'response': errorResourseResponseRAW}) def test_GetErrorResponse(self, getResoursesResponse): with self.assertRaises(Exception) as context: returnedValue = appObj.run(env, ['passwordmanpro_cli', 'get', 'someResourse', 'somePass']) self.checkGotRightException(context,passwordmanpro_cli.passwordProErrorException) @patch('passwordmanpro_cli.AppObjClass._callGet', return_value={ 'responseCode': 400, 'response': errorResourseResponseRAW}) def test_GetErrorResponseWith400(self, getResoursesResponse): with self.assertRaises(Exception) as context: returnedValue = appObj.run(env, ['passwordmanpro_cli', 'get', 'someResourse', 'somePass']) self.checkGotRightException(context,passwordmanpro_cli.webserviceErrorException) def test_GetRawMustStartWithSlash(self): returnedValue = appObj.run(env, ['passwordmanpro_cli', 'rawget', 'restapi/json/v1/resources']) self.assertEqual(returnedValue, 'ERROR - rawget uri must start with a slash\n', msg='Incorrect output') @patch('passwordmanpro_cli.AppObjClass._callGet') def test_GetNormalResourseNotFound(self, getResoursesResponse): getResoursesResponse.side_effect = [ { 'responseCode': 200, 'response': resourseResponseRAW} ] with self.assertRaises(Exception) as context: returnedValue = appObj.run(env, ['passwordmanpro_cli', 'get', 'someResourse', 'somePass']) self.checkGotRightException(context,passwordmanpro_cli.resourseNotFoundException) #Test password not found passwordNotFoundException @patch('passwordmanpro_cli.AppObjClass._callGetResourses') @patch('passwordmanpro_cli.AppObjClass._callGetAccounts') def test_GetNormalPasswordNotFound(self, _callGetAccountsResponse, _callGetResoursesResponse): _callGetResoursesResponse.side_effect = [ { 'responseCode': 200, 'response': resourseResponse} ] _callGetAccountsResponse.side_effect = [ { 'responseCode': 200, 'response': accountsResponse} ] with self.assertRaises(Exception) as context: returnedValue = appObj.run(env, ['passwordmanpro_cli', 'get', 'soadevteamserver-konga', 'somePass']) self.checkGotRightException(context,passwordmanpro_cli.accountNotFoundException) @patch('passwordmanpro_cli.AppObjClass._callGetResourses') def test_GetZeroResoursesShared(self, _callGetResoursesResponse): _callGetResoursesResponse.side_effect = [ { 'responseCode': 200, 'response': resourseResponseNoResourses} ] with self.assertRaises(Exception) as context: returnedValue = appObj.run(env, ['passwordmanpro_cli', 'get', 'soadevteamserver-konga', 'somePass']) self.checkGotRightException(context,passwordmanpro_cli.resourseNotFoundException) @patch('passwordmanpro_cli.AppObjClass._callGetResourses') def test_UserNotAllowedToAccess(self, _callGetResoursesResponse): _callGetResoursesResponse.side_effect = [ { 'responseCode': 200, 'response': userNotAllowedToAccessFromThisHost} ] with self.assertRaises(Exception) as context: returnedValue = appObj.run(env, ['passwordmanpro_cli', 'get', 'soadevteamserver-konga', 'somePass']) self.checkGotRightException(context,passwordmanpro_cli.resourseNotFoundException)
nilq/baby-python
python
#!/usr/bin/env python3 import utils, os, random, time, open_color, arcade utils.check_version((3,7)) SCREEN_WIDTH = 800 SCREEN_HEIGHT = 600 SCREEN_TITLE = "Sprites Example" class MyGame(arcade.Window): def __init__(self): super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE) file_path = os.path.dirname(os.path.abspath(__file__)) os.chdir(file_path) arcade.set_background_color(open_color.white) self.car_list = arcade.SpriteList() def setup(self): cars = ['bus','kart','police','buggy','ambulance','bus_school','hotdog','scooter','station','cycle'] for i in range(20): car = random.choice(cars) for i in range(10): x = random.randint(0,i) y = random.randint(0,600) self.car_sprite = arcade.Sprite("Cars/{car}.png".format(car=car), 2) self.car_sprite.center_x = x self.car_sprite.center_y = y self.car_list.append(self.car_sprite) def on_draw(self): arcade.start_render() self.car_list.draw() pass def update(self, delta_time): pass def on_mouse_motion(self, x, y, dx, dy): for i in self.car_list: self.car_sprite.center_x = x self.car_sprite.center_y = y pass def main(): """ Main method """ window = MyGame() window.setup() arcade.run() if __name__ == "__main__": main()
nilq/baby-python
python
#!/usr/bin/python3 import sys import os from tqdm import tqdm from binascii import b2a_hex import pandas as pd import pickle from pdfminer.pdfparser import PDFParser from pdfminer.pdfdocument import PDFDocument, PDFNoOutlines from pdfminer.pdfpage import PDFPage from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter from pdfminer.converter import PDFPageAggregator from pdfminer.layout import LAParams, LTTextBox, LTTextLine, LTFigure, LTImage, LTChar, LTPage from logging import getLogger, StreamHandler, Formatter, DEBUG, INFO, WARN formatter = Formatter('%(asctime)s %(name)s[%(levelname)s] %(message)s', "%Y-%m-%d %H:%M:%S") logger = getLogger(__name__) logger.setLevel(INFO) handler = StreamHandler() handler.setLevel(logger.getEffectiveLevel()) handler.setFormatter(formatter) logger.addHandler(handler) logger.propagate = False def with_pdf(pdf_doc, fn, pdf_pwd, *args): """Open the pdf document, and apply the function, returning the results""" result = None try: # open the pdf file fp = open(pdf_doc, "rb") # create a parser object associated with the file object parser = PDFParser(fp) # create a PDFDocument object that stores the document structure doc = PDFDocument(parser, pdf_pwd) # connect the parser and document objects parser.set_document(doc) if doc.is_extractable: # apply the function and return the result result = fn(doc, *args) # close the pdf file fp.close() except IOError: # the file doesn't exist or similar problem pass return result # Table of Contents def _parse_toc(doc): """With an open PDFDocument object, get the table of contents (toc) data [this is a higher-order function to be passed to with_pdf()]""" toc = [] try: outlines = doc.get_outlines() for (level, title, dest, a, se) in outlines: toc.append((level, title)) except PDFNoOutlines: pass return toc def get_toc(pdf_doc, pdf_pwd=""): """Return the table of contents (toc), if any, for this pdf file""" return with_pdf(pdf_doc, _parse_toc, pdf_pwd) # Extracting Images def write_file(folder, filename, filedata, flags="w"): """Write the file data to the folder and filename combination (flags: 'w' for write text, 'wb' for write binary, use 'a' instead of 'w' for append)""" if os.path.isdir(folder): file_obj = open(os.path.join(folder, filename), flags) file_obj.write(filedata) file_obj.close() def determine_image_type(stream_first_4_bytes): """Find out the image file type based on the magic number comparison of the first 4 (or 2) bytes""" file_type = None bytes_as_hex = str(b2a_hex(stream_first_4_bytes)) if bytes_as_hex.startswith("ffd8"): file_type = ".jpeg" elif bytes_as_hex == "89504e47": file_type = ".png" elif bytes_as_hex == "47494638": file_type = ".gif" elif bytes_as_hex.startswith("424d"): file_type = ".bmp" return file_type def save_image(lt_image, page_number, images_folder): """Try to save the image data from this LTImage object, and return the file name, if successful""" if not lt_image.stream: raise RuntimeError file_stream = lt_image.stream.get_rawdata() if not file_stream: raise RuntimeError file_ext = determine_image_type(file_stream[0:4]) if not file_ext: raise RuntimeError file_name = "".join([str(page_number), "_", lt_image.name, file_ext]) write_file(images_folder, file_name, file_stream, flags="wb") return file_name # Extracting Text def to_bytestring(s, enc="utf-8"): """Convert the given unicode string to a bytestring, using the standard encoding, unless it's already a bytestring""" if s: if isinstance(s, str): return s else: return s.encode(enc) def update_page_text(df, lt_obj, pct=0.2, logger=logger): """ Use the bbox x0,x1 values within pct% to produce lists of associated text within the hash df: cols = [x0, y0, x1, y1, class, objs, str] """ if df is None: df = pd.DataFrame(columns=['x0', 'y0', 'x1', 'y1', 'class', 'objs', 'str']) if isinstance(lt_obj, (LTTextLine, LTTextBox)): store_new_line(df, lt_obj, pct, logger) else: raise NotImplementedError(lt_obj) return df def store_new_line(df, lt_obj, pct, logger=logger): ''' store a new line to df ''' x0, y0, x1, y1 = lt_obj.bbox candidates = df[ (df['class'] == lt_obj.__class__) & (df['x0'] >= x0 * (1 - pct)) & (df['x0'] <= x0 * (1 + pct)) & (df['x1'] >= x1 * (1 - pct)) & (df['x1'] <= x1 * (1 + pct)) & (df['y1'] <= y0) ] if candidates.shape[0] > 0: if candidates.shape[0] > 1: logger.warn('candidates has shape {}'.format(candidates.shape)) target = candidates.iloc[0] df.at[target.name, 'y1'] = y1 df.at[target.name, 'objs'].append(lt_obj) df.at[target.name, 'str'].append(to_bytestring(lt_obj.get_text())) else: df.loc[0 if pd.isnull(df.index.max()) else df.index.max() + 1] = [ *lt_obj.bbox, lt_obj.__class__, [lt_obj], [to_bytestring(lt_obj.get_text())] ] return df def parse_lt_objs( lt_objs, page_number, images_folder, text_content=None, return_df=False, progressbar=False, logger=logger, ): """Iterate through the list of LT* objects and capture the text or image data contained in each""" if text_content is None: text_content = [] if progressbar: generator = tqdm(lt_objs, desc='parse objs') else: generator = lt_objs page_text = None # k=(x0, x1) of the bbox, v=list of text strings within that bbox width (physical column) for lt_obj in generator: if isinstance(lt_obj, (LTTextBox, LTTextLine, LTChar)): # text, so arrange is logically based on its column width page_text = update_page_text(page_text, lt_obj) elif isinstance(lt_obj, LTImage): # an image, so save it to the designated folder, and note its place in the text try: saved_file = save_image(lt_obj, page_number, images_folder) # use html style <img /> tag to mark the position of the image within the text text_content.append( '<img src="' + os.path.join(images_folder, saved_file) + '" />' ) except (IOError, RuntimeError): logger.error("failed to save image on page{} {}".format(page_number, lt_obj)) elif isinstance(lt_obj, LTFigure): # LTFigure objects are containers for other LT* objects, so recurse through the children text_content.append( parse_lt_objs( lt_obj, page_number, images_folder, text_content, return_df=return_df, progressbar=progressbar, ) ) if page_text is None: if return_df: return pd.DataFrame() else: return '' if return_df: text_content.append(page_text) return pd.concat(text_content) else: page_text = page_text.sort_values('y0') page_text = page_text['str'].apply(lambda x: text_content.append(''.join(x))) return "\n".join(text_content) # Processing Pages def _parse_pages(doc, images_folder, return_df=False, progressbar=False): """With an open PDFDocument object, get the pages and parse each one [this is a higher-order function to be passed to with_pdf()]""" rsrcmgr = PDFResourceManager() laparams = LAParams(detect_vertical=True, all_texts=True) # all_texts will enable layout analysis in LTFigure objs device = PDFPageAggregator(rsrcmgr, laparams=laparams) interpreter = PDFPageInterpreter(rsrcmgr, device) if progressbar: generator = tqdm(enumerate(PDFPage.create_pages(doc)), desc='pages') else: generator = enumerate(PDFPage.create_pages(doc)) text_content = [] for i, page in generator: interpreter.process_page(page) # receive the LTPage object for this page layout = device.get_result() # layout is an LTPage object which may contain child objects like LTTextBox, LTFigure, LTImage, etc. text_content.append( parse_lt_objs( layout, (i + 1), images_folder, return_df=return_df, progressbar=progressbar, ) ) if return_df: return pd.concat(text_content) else: return text_content def _get_page_size(doc, images_folder): """With an open PDFDocument object, get the pages and parse each one [this is a higher-order function to be passed to with_pdf()]""" rsrcmgr = PDFResourceManager() laparams = LAParams(detect_vertical=True, all_texts=True) # all_texts will enable layout analysis in LTFigure objs device = PDFPageAggregator(rsrcmgr, laparams=laparams) interpreter = PDFPageInterpreter(rsrcmgr, device) sizes = [] for i, page in enumerate(PDFPage.create_pages(doc)): interpreter.process_page(page) # receive the LTPage object for this page layout = device.get_result() # layout is an LTPage object which may contain child objects like LTTextBox, LTFigure, LTImage, etc. sizes.append(layout.cropbox) return sizes def get_pages(pdf_doc, pdf_pwd="", images_folder="/tmp", return_df=False, progressbar=False): """Process each of the pages in this pdf file and return a list of strings representing the text found in each page""" return with_pdf(pdf_doc, _parse_pages, pdf_pwd, images_folder, return_df, progressbar) def get_sizes(pdf_doc, pdf_pwd=""): '''get the sizes of each page''' return with_pdf(pdf_doc, _get_page_size, pdf_pwd)
nilq/baby-python
python
# ___________________________________________________________________________ # # Prescient # Copyright 2020 National Technology & Engineering Solutions of Sandia, LLC # (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S. # Government retains certain rights in this software. # This software is distributed under the Revised BSD License. # ___________________________________________________________________________ import os import subprocess import sys import unittest import pandas as pd import numpy as np from prescient.downloaders import rts_gmlc from prescient.scripts import runner from tests.simulator_tests import simulator_diff this_file_path = os.path.dirname(os.path.realpath(__file__)) class _SimulatorModRTSGMLC: """Test class for running the simulator.""" # arbitrary comparison threshold COMPARISON_THRESHOLD = .01 def setUp(self): self.this_file_path = this_file_path self.test_cases_path = os.path.join(self.this_file_path, 'test_cases') self._set_names() self._run_simulator() test_results_dir = os.path.join(self.test_cases_path, self.results_dir_name) control_results_dir = os.path.join(self.test_cases_path, self.baseline_dir_name) output_files = ["bus_detail", "daily_summary", "hourly_gen_summary", "hourly_summary", "line_detail", "overall_simulation_output", "renewables_detail", "runtimes", "thermal_detail" ] self.test_results = {} self.baseline_results = {} for f in output_files: self.test_results[f] = pd.read_csv(f"{test_results_dir}/{f}.csv") self.baseline_results[f] = pd.read_csv(f"{control_results_dir}/{f}.csv") def _run_simulator(self): """Runs the simulator for the test data set.""" os.chdir(self.test_cases_path) simulator_config_filename = self.simulator_config_filename script, options = runner.parse_commands(simulator_config_filename) if sys.platform.startswith('win'): subprocess.call([script] + options, shell=True) else: subprocess.call([script] + options) os.chdir(self.this_file_path) def test_simulator(self): #test overall output self._assert_file_equality("overall_simulation_output") #test thermal detail self._assert_column_equality("thermal_detail", "Hour") self._assert_column_equality("thermal_detail", "Dispatch") self._assert_column_equality("thermal_detail", "Headroom") self._assert_column_equality("thermal_detail", "Unit Cost") # test renewables detail self._assert_column_equality("renewables_detail", "Hour") self._assert_column_equality("renewables_detail", "Output") self._assert_column_equality("renewables_detail", "Curtailment") # test hourly summary self._assert_file_equality("hourly_summary") #test hourly gen summary self._assert_column_equality("hourly_gen_summary", "Available reserves") self._assert_column_equality("hourly_gen_summary", "Load shedding") self._assert_column_equality("hourly_gen_summary", "Reserve shortfall") self._assert_column_equality("hourly_gen_summary", "Over generation") #test line detail self._assert_file_equality("line_detail") #assert that the busses are the same self._assert_column_equality("bus_detail", "Bus") #assert that the shortfall is the same self._assert_column_totals("bus_detail", "Shortfall") #assert that the LMP is the same self._assert_column_totals("bus_detail", "LMP") #assert that the Overgeneration is the same self._assert_column_totals("bus_detail", "Overgeneration") def _assert_file_equality(self, filename): columns = list(self.test_results[filename]) for col_name in columns: self._assert_column_equality(filename, col_name) def _assert_column_totals(self, filename, column_name): diff = abs(self.test_results[filename][column_name].sum() - self.baseline_results[filename][column_name].sum()) assert diff < self.COMPARISON_THRESHOLD, f"Column: '{column_name}' of file: '{filename}.csv' diverges." def _assert_column_equality(self, filename, column_name): df_a = self.test_results[filename] df_b = self.baseline_results[filename] dtype = df_a.dtypes[column_name] if dtype == 'float' or dtype == 'int': diff = np.allclose(df_a[column_name].to_numpy(dtype=dtype), df_b[column_name].to_numpy(dtype=dtype), atol=self.COMPARISON_THRESHOLD) assert diff, f"Column: '{column_name}' of File: '{filename}.csv' diverges." elif column_name != 'Date' and column_name != 'Hour': diff = df_a[column_name].equals(df_b[column_name]) assert diff, f"Column: '{column_name}' of File: '{filename}.csv' diverges." class TestSimulatorModRTSGMLCNetwork(_SimulatorModRTSGMLC, unittest.TestCase): def _set_names(self): self.simulator_config_filename = 'simulate_with_network_deterministic.txt' self.results_dir_name = 'deterministic_with_network_simulation_output' self.baseline_dir_name = 'deterministic_with_network_simulation_output_baseline' class TestSimulatorModRTSGMLCCopperSheet(_SimulatorModRTSGMLC, unittest.TestCase): def _set_names(self): self.simulator_config_filename = 'simulate_deterministic.txt' self.results_dir_name = 'deterministic_simulation_output' self.baseline_dir_name = 'deterministic_simulation_output_baseline' if __name__ == '__main__': unittest.main()
nilq/baby-python
python
import telegram from telegram import * from telegram.ext import * import os import responses from dotenv import load_dotenv load_dotenv() TELEBOT_API_KEY = os.environ.get('TELE_BOT_API') bot = telegram.Bot(token=TELEBOT_API_KEY) updater = Updater(token=TELEBOT_API_KEY, use_context=True) # Dispatcher ud = updater.dispatcher # /hello # def hello(update:Update,context:CallbackContext): # context.bot.send_message(chat_id = update.effective_chat.id,text= f'{responses.greet()}') # /start def start(update, context): update.message.reply_text(f'Hello👋, {update.effective_user.first_name}, I am a DeFi Bot. I talk about Blockchain and Decentaized Finance realated stuff , Developed by @Pradumna_saraf') # every message handler def handleAllUserText(update, context): userText = str(update.message.text).lower() botResponse = responses.allMessages(userText) update.message.reply_text(botResponse) # /myid def myid(update:Update,CallbackContext): update.message.reply_text(f"@{update.effective_user.username}") # # /myid def price(update:Update,CallbackContext): slugPart = str(update.message.text).split() tickeValue = responses.slugValue(slugPart[1].lower()) update.message.reply_text(tickeValue) ud.add_handler(CommandHandler('start', start)) ud.add_handler(CommandHandler('myid', myid)) ud.add_handler(CommandHandler('price', price)) ud.add_handler(MessageHandler(Filters.text & (~Filters.command), handleAllUserText)) # For terminal purpose print("Bot Started") # Starting the bot updater.start_polling() # idle state updater.idle()
nilq/baby-python
python
#!/usr/bin/python # coding=utf-8 class BadRequest(Exception): def __init__(self): self.message = "Bad request" class DuplicationError(BadRequest): def __init__(self, field): self.message = "Field {wrong} already exist".format(wrong=field) class MissingFieldError(BadRequest): def __init__(self,field): self.message = "Field {field} is required".format(field=field) class NotFound(Exception): def __init__(self, resource): self.message = "Could not find the resource: {resource}".format(resource=resource)
nilq/baby-python
python
#!/usr/bin/env python # -*- encoding: utf-8 -*- # Copyright (c) 2002-2019 "Neo4j," # Neo4j Sweden AB [http://neo4j.com] # # This file is part of Neo4j. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from asyncio import ( IncompleteReadError, Lock, StreamReader, StreamReaderProtocol, StreamWriter, get_event_loop, wait, ) from collections import deque from logging import getLogger from os import strerror from random import choice from ssl import SSLError from sys import platform, version_info from time import perf_counter from neo4j.addressing import Address from neo4j.aio._collections import WaitingList from neo4j.aio._mixins import Addressable, Breakable from neo4j.errors import ( BoltError, BoltConnectionError, BoltSecurityError, BoltConnectionBroken, BoltHandshakeError, Neo4jAvailabilityError, ) from neo4j.api import Version from neo4j.conf import Config, PoolConfig from neo4j.meta import version as neo4j_version from neo4j.routing import RoutingTable log = getLogger(__name__) MAGIC = b"\x60\x60\xB0\x17" class Bolt(Addressable, object): #: True if this instance uses secure communication, false #: otherwise. secure = None #: As a class attribute, this denotes the version of Bolt handled #: by that subclass. As an instance attribute, this represents the #: version of the protocol in use. protocol_version = () # Record of the time at which this connection was opened. __t_opened = None # Handle to the StreamReader object. __reader = None # Handle to the StreamWriter object, which can be used on close. __writer = None # Flag to indicate that the connection is closed __closed = False @classmethod def default_user_agent(cls): """ Return the default user agent string for a connection. """ template = "neo4j-python/{} Python/{}.{}.{}-{}-{} ({})" fields = (neo4j_version,) + tuple(version_info) + (platform,) return template.format(*fields) @classmethod def protocol_handlers(cls, protocol_version=None): """ Return a dictionary of available Bolt protocol handlers, keyed by version tuple. If an explicit protocol version is provided, the dictionary will contain either zero or one items, depending on whether that version is supported. If no protocol version is provided, all available versions will be returned. :param protocol_version: tuple identifying a specific protocol version (e.g. (3, 5)) or None :return: dictionary of version tuple to handler class for all relevant and supported protocol versions :raise TypeError: if protocol version is not passed in a tuple """ # Carry out subclass imports locally to avoid circular # dependency issues. from neo4j.aio.bolt3 import Bolt3 handlers = {bolt.protocol_version: bolt for bolt in [ # This list can be updated as protocol # versions are added and removed. Bolt3, ]} if protocol_version is None: return handlers if not isinstance(protocol_version, tuple): raise TypeError("Protocol version must be specified as a tuple") return {version: handler for version, handler in handlers.items() if version == protocol_version} @classmethod def opener(cls, auth=None, **config): """ Create and return an opener function for a given set of configuration parameters. This is useful when multiple servers share the same configuration details, such as within a connection pool. """ async def f(address, *, loop=None): return await Bolt.open(address, auth=auth, loop=loop, **config) return f @classmethod async def open(cls, address, *, auth=None, loop=None, **config): """ Open a socket connection and perform protocol version negotiation, in order to construct and return a Bolt client instance for a supported Bolt protocol version. :param address: tuples of host and port, such as ("127.0.0.1", 7687) :param auth: :param loop: :param config: :return: instance of a Bolt subclass :raise BoltConnectionError: if a connection could not be established :raise BoltConnectionLost: if an I/O error occurs on the underlying socket connection :raise BoltHandshakeError: if handshake completes without a successful negotiation :raise TypeError: if any of the arguments provided are passed as incompatible types :raise ValueError: if any of the arguments provided are passed with unsupported values """ # Args address = Address(address) if loop is None: loop = get_event_loop() config = PoolConfig.consume(config) # Connect reader, writer = await cls._connect(address, loop, config) try: # Handshake subclass = await cls._handshake(reader, writer, config.protocol_version) # Instantiation obj = subclass(reader, writer) obj.secure = bool(config.secure) assert hasattr(obj, "__ainit__") await obj.__ainit__(auth) return obj except BoltError: writer.write_eof() writer.close() raise @classmethod async def _connect(cls, address, loop, config): """ Attempt to establish a TCP connection to the address provided. :param address: :param loop: :param config: :return: a 3-tuple of reader, writer and security settings for the new connection :raise BoltConnectionError: if a connection could not be established """ assert isinstance(address, Address) assert loop is not None assert isinstance(config, Config) connection_args = { "host": address.host, "port": address.port, "family": address.family, # TODO: other args } ssl_context = config.get_ssl_context() if ssl_context: connection_args["ssl"] = ssl_context connection_args["server_hostname"] = address.host log.debug("[#0000] C: <DIAL> %s", address) try: reader = BoltStreamReader(loop=loop) protocol = StreamReaderProtocol(reader, loop=loop) transport, _ = await loop.create_connection(lambda: protocol, **connection_args) writer = BoltStreamWriter(transport, protocol, reader, loop) except SSLError as err: log.debug("[#%04X] S: <REJECT> %s (%d %s)", 0, address, err.errno, strerror(err.errno)) raise BoltSecurityError("Failed to establish a secure connection", address) from err except OSError as err: log.debug("[#%04X] S: <REJECT> %s (%d %s)", 0, address, err.errno, strerror(err.errno)) raise BoltConnectionError("Failed to establish a connection", address) from err else: local_address = Address(transport.get_extra_info("sockname")) remote_address = Address(transport.get_extra_info("peername")) log.debug("[#%04X] S: <ACCEPT> %s -> %s", local_address.port_number, local_address, remote_address) return reader, writer @classmethod async def _handshake(cls, reader, writer, protocol_version): """ Carry out a Bolt handshake, optionally requesting a specific protocol version. :param reader: :param writer: :param protocol_version: :return: :raise BoltConnectionLost: if an I/O error occurs on the underlying socket connection :raise BoltHandshakeError: if handshake completes without a successful negotiation """ local_address = Address(writer.transport.get_extra_info("sockname")) remote_address = Address(writer.transport.get_extra_info("peername")) handlers = cls.protocol_handlers(protocol_version) if not handlers: raise ValueError("No protocol handlers available (requested Bolt %r)", protocol_version) offered_versions = sorted(handlers.keys(), reverse=True)[:4] request_data = MAGIC + b"".join( v.to_bytes() for v in offered_versions).ljust(16, b"\x00") log.debug("[#%04X] C: <HANDSHAKE> %r", local_address.port_number, request_data) writer.write(request_data) await writer.drain() response_data = await reader.readexactly(4) log.debug("[#%04X] S: <HANDSHAKE> %r", local_address.port_number, response_data) try: agreed_version = Version.from_bytes(response_data) except ValueError as err: writer.close() raise BoltHandshakeError("Unexpected handshake response %r" % response_data, remote_address, request_data, response_data) from err try: subclass = handlers[agreed_version] except KeyError: log.debug("Unsupported Bolt protocol version %s", agreed_version) raise BoltHandshakeError("Unsupported Bolt protocol version", remote_address, request_data, response_data) else: return subclass def __new__(cls, reader, writer): obj = super().__new__(cls) obj.__t_opened = perf_counter() obj.__reader = reader obj.__writer = writer Addressable.set_transport(obj, writer.transport) return obj def __repr__(self): return "<Bolt address=%r protocol_version=%r>" % (self.remote_address, self.protocol_version) async def __ainit__(self, auth): """ Asynchronous initializer for implementation by subclasses. :param auth: """ @property def age(self): """ The age of this connection in seconds. """ return perf_counter() - self.__t_opened @property def broken(self): """ Flag to indicate whether this connection has been broken by the network or remote peer. """ return self.__reader.broken or self.__writer.broken @property def closed(self): """ Flag to indicate whether this connection has been closed locally.""" return self.__closed async def close(self): """ Close the connection. """ if self.closed: return if not self.broken: log.debug("[#%04X] S: <HANGUP>", self.local_address.port_number) self.__writer.write_eof() self.__writer.close() try: await self.__writer.wait_closed() except BoltConnectionBroken: pass self.__closed = True async def reset(self, force=False): """ Reset the connection to a clean state. By default, a RESET message will only be sent if required, i.e. if the connection is not already in a clean state. If forced, this check will be overridden and a RESET will be sent regardless. """ async def run(self, cypher, parameters=None, discard=False, readonly=False, bookmarks=None, timeout=None, metadata=None): """ Run an auto-commit transaction. :param cypher: :param parameters: :param discard: :param readonly: :param bookmarks: :param timeout: :param metadata: :raise BoltTransactionError: if a transaction cannot be carried out at this time """ async def begin(self, readonly=False, bookmarks=None, timeout=None, metadata=None): """ Begin an explicit transaction. :param readonly: :param bookmarks: :param timeout: :param metadata: :return: """ async def run_tx(self, f, args=None, kwargs=None, readonly=False, bookmarks=None, timeout=None, metadata=None): """ Run a transaction function and return the return value from that function. """ async def get_routing_table(self, context=None): """ Fetch a new routing table. :param context: the routing context to use for this call :return: a new RoutingTable instance or None if the given router is currently unable to provide routing information :raise ServiceUnavailable: if no writers are available :raise ProtocolError: if the routing information received is unusable """ class BoltStreamReader(Addressable, Breakable, StreamReader): """ Wrapper for asyncio.streams.StreamReader """ def set_transport(self, transport): Addressable.set_transport(self, transport) StreamReader.set_transport(self, transport) async def readuntil(self, separator=b'\n'): # pragma: no cover assert False # not used by current implementation async def read(self, n=-1): # pragma: no cover assert False # not used by current implementation async def readexactly(self, n): try: return await super().readexactly(n) except IncompleteReadError as err: message = ("Network read incomplete (received {} of {} " "bytes)".format(len(err.partial), err.expected)) log.debug("[#%04X] S: <CLOSE>", self.local_address.port_number) Breakable.set_broken(self) raise BoltConnectionBroken(message, self.remote_address) from err except OSError as err: log.debug("[#%04X] S: <CLOSE> %d %s", err.errno, strerror(err.errno)) Breakable.set_broken(self) raise BoltConnectionBroken("Network read failed", self.remote_address) from err class BoltStreamWriter(Addressable, Breakable, StreamWriter): """ Wrapper for asyncio.streams.StreamWriter """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) Addressable.set_transport(self, self.transport) async def drain(self): try: await super().drain() except OSError as err: log.debug("[#%04X] S: <CLOSE> (%s)", self.local_address.port_number, err) Breakable.set_broken(self) raise BoltConnectionBroken("Network write failed", self.remote_address) from err async def wait_closed(self): try: await super().wait_closed() except AttributeError: # pragma: no cover # This is a dirty hack for Python 3.6, which didn't include # 'wait_closed'. The code polls waiting for the stream # reader inside the protocol to go away which, by the # implementation of 3.6, occurs on 'connection_lost'. This # hack is likely safe unless the implementation of 3.6 # changes in a subsequent patch, and can be removed when # Python 3.6 support is no longer required. # from asyncio import sleep try: while self._protocol._stream_reader is not None: await sleep(0.1) except AttributeError: pass class Pool: def acquire(self, *, force_reset=False, timeout=None): raise NotImplementedError def release(self, *connections, force_reset=False): raise NotImplementedError def close(self, *, force=False): raise NotImplementedError class BoltPool: """ A pool of connections to a single address. :param opener: a function to which an address can be passed that returns an open and ready Bolt connection :param address: the remote address for which this pool operates :param max_size: the maximum permitted number of simultaneous connections that may be owned by this pool, both in-use and free :param max_age: the maximum permitted age, in seconds, for connections to be retained in this pool """ @classmethod async def open(cls, address, *, auth=None, loop=None, **config): """ Create a new connection pool, with an option to seed one or more initial connections. """ pool_config = PoolConfig.consume(config) def opener(addr): return Bolt.open(addr, auth=auth, loop=loop, **pool_config) pool = cls(loop, opener, pool_config, address) seeds = [await pool.acquire() for _ in range(pool_config.init_size)] for seed in seeds: await pool.release(seed) return pool def __init__(self, loop, opener, config, address): if loop is None: self._loop = get_event_loop() else: self._loop = loop self._opener = opener self._address = Address(address) self._max_size = config.max_size self._max_age = config.max_age self._in_use_list = deque() self._free_list = deque() self._waiting_list = WaitingList(loop=self._loop) def __repr__(self): return "<{} addr'{}' [{}{}{}]>".format( self.__class__.__name__, self.address, "|" * len(self._in_use_list), "." * len(self._free_list), " " * (self.max_size - self.size), ) def __contains__(self, cx): return cx in self._in_use_list or cx in self._free_list def __len__(self): return self.size @property def address(self): """ The remote address for which this pool operates. """ return self._address @property def max_size(self): """ The maximum permitted number of simultaneous connections that may be owned by this pool, both in-use and free. """ return self._max_size @max_size.setter def max_size(self, value): old_value = self._max_size self._max_size = value if value > old_value: # The maximum size has grown, so new slots have become # available. Notify any waiting acquirers of this extra # capacity. self._waiting_list.notify() @property def max_age(self): """ The maximum permitted age, in seconds, for connections to be retained in this pool. """ return self._max_age @property def in_use(self): """ The number of connections in this pool that are currently in use. """ return len(self._in_use_list) @property def size(self): """ The total number of connections (both in-use and free) currently owned by this connection pool. """ return len(self._in_use_list) + len(self._free_list) async def _sanitize(self, cx, *, force_reset): """ Attempt to clean up a connection, such that it can be reused. If the connection is broken or closed, it can be discarded. Otherwise, the age of the connection is checked against the maximum age permitted by this pool, consequently closing it on expiry. Should the connection be neither broken, closed nor expired, it will be reset (optionally forcibly so) and the connection object will be returned, indicating success. """ if cx.broken or cx.closed: return None expired = self.max_age is not None and cx.age > self.max_age if expired: await cx.close() return None await cx.reset(force=force_reset) return cx async def acquire(self, *, force_reset=False): """ Acquire a connection from the pool. In the simplest case, this will return an existing open connection, if one is free. If not, and the pool is not full, a new connection will be created. If the pool is full and no free connections are available, this will block until a connection is released, or until the acquire call is cancelled. :param force_reset: if true, the connection will be forcibly reset before being returned; if false, this will only occur if the connection is not already in a clean state :return: a Bolt connection object """ log.debug("Acquiring connection from pool %r", self) cx = None while cx is None or cx.broken or cx.closed: try: # Plan A: select a free connection from the pool cx = self._free_list.popleft() except IndexError: if self.size < self.max_size: # Plan B: if the pool isn't full, open # a new connection cx = await self._opener(self.address) else: # Plan C: wait for more capacity to become # available, then try again log.debug("Joining waiting list") await self._waiting_list.join() else: cx = await self._sanitize(cx, force_reset=force_reset) self._in_use_list.append(cx) return cx async def release(self, cx, *, force_reset=False): """ Release a Bolt connection, putting it back into the pool if the connection is healthy and the pool is not already at capacity. :param cx: the connection to release :param force_reset: if true, the connection will be forcibly reset before being released back into the pool; if false, this will only occur if the connection is not already in a clean state :raise ValueError: if the connection is not currently in use, or if it does not belong to this pool """ log.debug("Releasing connection %r", cx) if cx in self._in_use_list: self._in_use_list.remove(cx) if self.size < self.max_size: # If there is spare capacity in the pool, attempt to # sanitize the connection and return it to the pool. cx = await self._sanitize(cx, force_reset=force_reset) if cx: # Carry on only if sanitation succeeded. if self.size < self.max_size: # Check again if there is still capacity. self._free_list.append(cx) self._waiting_list.notify() else: # Otherwise, close the connection. await cx.close() else: # If the pool is full, simply close the connection. await cx.close() elif cx in self._free_list: raise ValueError("Connection is not in use") else: raise ValueError("Connection does not belong to this pool") async def prune(self): """ Close all free connections. """ await self.__close(self._free_list) async def close(self): """ Close all connections immediately. This does not permanently disable the connection pool, it merely shuts down all open connections, including those in use. Depending on the applications, it may be perfectly acceptable to re-acquire connections after pool closure, which will have the implicit affect of reopening the pool. To close gracefully, allowing work in progress to continue until connections are released, use the following sequence instead: pool.max_size = 0 pool.prune() This will force all future connection acquisitions onto the waiting list, and released connections will be closed instead of being returned to the pool. """ await self.prune() await self.__close(self._in_use_list) async def __close(self, connections): """ Close all connections in the given list. """ closers = deque() while True: try: cx = connections.popleft() except IndexError: break else: closers.append(cx.close()) if closers: await wait(closers, loop=self._loop) class Neo4jPool: """ Connection pool with routing table. """ @classmethod async def open(cls, *addresses, auth=None, routing_context=None, loop=None, **config): pool_config = PoolConfig.consume(config) def opener(addr): return Bolt.open(addr, auth=auth, **pool_config) obj = cls(loop, opener, config, addresses, routing_context) # TODO: get initial routing table and construct await obj._ensure_routing_table_is_fresh() return obj def __init__(self, loop, opener, config, addresses, routing_context): if loop is None: self._loop = get_event_loop() else: self._loop = loop self._opener = opener self._config = config self._pools = {} self._missing_writer = False self._refresh_lock = Lock(loop=self._loop) self._routing_context = routing_context self._max_size_per_host = config.max_size self._initial_routers = addresses self._routing_table = RoutingTable(addresses) self._activate_new_pools_in(self._routing_table) def _activate_new_pools_in(self, routing_table): """ Add pools for addresses that exist in the given routing table but which don't already have pools. """ for address in routing_table.servers(): if address not in self._pools: self._pools[address] = BoltPool(self._loop, self._opener, self._config, address) async def _deactivate_pools_not_in(self, routing_table): """ Deactivate any pools that aren't represented in the given routing table. """ for address in self._pools: if address not in routing_table: await self._deactivate(address) async def _get_routing_table_from(self, *routers): """ Try to update routing tables with the given routers. :return: True if the routing table is successfully updated, otherwise False """ log.debug("Attempting to update routing table from " "{}".format(", ".join(map(repr, routers)))) for router in routers: pool = self._pools[router] cx = await pool.acquire() try: new_routing_table = await cx.get_routing_table(self._routing_context) except BoltError: await self._deactivate(router) else: num_routers = len(new_routing_table.routers) num_readers = len(new_routing_table.readers) num_writers = len(new_routing_table.writers) # No writers are available. This likely indicates a temporary state, # such as leader switching, so we should not signal an error. # When no writers available, then we flag we are reading in absence of writer self._missing_writer = (num_writers == 0) # No routers if num_routers == 0: continue # No readers if num_readers == 0: continue log.debug("Successfully updated routing table from " "{!r} ({!r})".format(router, self._routing_table)) return new_routing_table finally: await pool.release(cx) return None async def _get_routing_table(self): """ Update the routing table from the first router able to provide valid routing information. """ # copied because it can be modified existing_routers = list(self._routing_table.routers) has_tried_initial_routers = False if self._missing_writer: has_tried_initial_routers = True rt = await self._get_routing_table_from(self._initial_routers) if rt: return rt rt = await self._get_routing_table_from(*existing_routers) if rt: return rt if not has_tried_initial_routers and self._initial_routers not in existing_routers: rt = await self._get_routing_table_from(self._initial_routers) if rt: return rt # None of the routers have been successful, so just fail log.error("Unable to retrieve routing information") raise Neo4jAvailabilityError("Unable to retrieve routing information") async def _ensure_routing_table_is_fresh(self, readonly=False): """ Update the routing table if stale. This method performs two freshness checks, before and after acquiring the refresh lock. If the routing table is already fresh on entry, the method exits immediately; otherwise, the refresh lock is acquired and the second freshness check that follows determines whether an update is still required. """ if self._routing_table.is_fresh(readonly=readonly): return async with self._refresh_lock: if self._routing_table.is_fresh(readonly=readonly): if readonly: # if reader is fresh but writers are not, then # we are reading in absence of writer self._missing_writer = not self._routing_table.is_fresh(readonly=False) else: rt = await self._get_routing_table() self._activate_new_pools_in(rt) self._routing_table.update(rt) await self._deactivate_pools_not_in(rt) async def _select_pool(self, readonly=False): """ Selects the pool with the fewest in-use connections. """ await self._ensure_routing_table_is_fresh(readonly=readonly) if readonly: addresses = self._routing_table.readers else: addresses = self._routing_table.writers pools = [pool for address, pool in self._pools.items() if address in addresses] pools_by_usage = {} for pool in pools: pools_by_usage.setdefault(pool.in_use, []).append(pool) if not pools_by_usage: raise Neo4jAvailabilityError("No {} service currently " "available".format("read" if readonly else "write")) return choice(pools_by_usage[min(pools_by_usage)]) async def acquire(self, *, readonly=False, force_reset=False): """ Acquire a connection to a server that can satisfy a set of parameters. :param readonly: true if a readonly connection is required, otherwise false :param force_reset: """ while True: pool = await self._select_pool(readonly=readonly) try: cx = await pool.acquire(force_reset=force_reset) except BoltError: await self._deactivate(pool.address) else: if not readonly: # If we're not acquiring a connection as # readonly, then intercept NotALeader and # ForbiddenOnReadOnlyDatabase errors to # invalidate the routing table. from neo4j.errors import ( NotALeader, ForbiddenOnReadOnlyDatabase, ) def handler(failure): """ Invalidate the routing table before raising the failure. """ log.debug("[#0000] C: <ROUTING> Invalidating routing table") self._routing_table.ttl = 0 raise failure cx.set_failure_handler(NotALeader, handler) cx.set_failure_handler(ForbiddenOnReadOnlyDatabase, handler) return cx async def release(self, connection, *, force_reset=False): """ Release a connection back into the pool. This method is thread safe. """ for pool in self._pools.values(): try: await pool.release(connection, force_reset=force_reset) except ValueError: pass else: # Unhook any custom error handling and exit. from neo4j.errors import ( NotALeader, ForbiddenOnReadOnlyDatabase, ) connection.del_failure_handler(NotALeader) connection.del_failure_handler(ForbiddenOnReadOnlyDatabase) break else: raise ValueError("Connection does not belong to this pool") async def _deactivate(self, address): """ Deactivate an address from the connection pool, if present, remove from the routing table and also closing all idle connections to that address. """ log.debug("[#0000] C: <ROUTING> Deactivating address %r", address) # We use `discard` instead of `remove` here since the former # will not fail if the address has already been removed. self._routing_table.routers.discard(address) self._routing_table.readers.discard(address) self._routing_table.writers.discard(address) log.debug("[#0000] C: <ROUTING> table=%r", self._routing_table) try: pool = self._pools.pop(address) except KeyError: pass # assume the address has already been removed else: pool.max_size = 0 await pool.prune() async def close(self, force=False): """ Close all connections and empty the pool. If forced, in-use connections will be closed immediately; if not, they will remain open until released. """ pools = dict(self._pools) self._pools.clear() for address, pool in pools.items(): if force: await pool.close() else: pool.max_size = 0 await pool.prune() class Neo4j: # The default router address list to use if no addresses are specified. default_router_addresses = Address.parse_list(":7687 :17601 :17687") # TODO # @classmethod # async def open(cls, *addresses, auth=None, security=False, protocol_version=None, loop=None): # opener = Bolt.opener(auth=auth, security=security, protocol_version=protocol_version) # router_addresses = Address.parse_list(" ".join(addresses), default_port=7687) # return cls(opener, router_addresses, loop=loop) # # def __init__(self, opener, router_addresses, loop=None): # self._routers = Neo4jPool(opener, router_addresses or self.default_router_addresses) # self._writers = Neo4jPool(opener) # self._readers = Neo4jPool(opener) # self._routing_table = None # # @property # def routing_table(self): # return self._routing_table # # async def update_routing_table(self): # cx = await self._routers.acquire() # try: # result = await cx.run("CALL dbms.cluster.routing.getRoutingTable($context)", {"context": {}}) # record = await result.single() # self._routing_table = RoutingTable.parse_routing_info([record]) # TODO: handle ValueError? # return self._routing_table # finally: # self._routers.release(cx) # async def main(): # from neo4j.debug import watch; watch("neo4j") # neo4j = await Neo4j.open(":17601 :17602 :17603", auth=("neo4j", "password")) # await neo4j.update_routing_table() # print(neo4j.routing_table) # # # if __name__ == "__main__": # run(main())
nilq/baby-python
python
import logging from device.base.power_source import PowerSource from device.simulated.battery import Battery from power_source_item import PowerSourceItem from simulation_logger import message_formatter class PowerSourceManager(object): def __init__(self): self.power_sources = [] self.logger = logging.getLogger("lpdm") self._device_id = "power_source_manager" self._load = 0.0 self._capacity = 0.0 self._time = 0 def __repr__(self): return "Load->{}, Capacity->{}".format( self._load, self._capacity ) def build_message(self, message="", tag="", value=""): """Build the log message string""" return message_formatter.build_message( message=message, tag=tag, value=value, time_seconds=self._time, device_id=self._device_id ) def set_time(self, new_time): self._time = new_time def shutdown(self): """remove load from all power sources""" [p.set_load(0.0) for p in self.power_sources] self._load = 0.0 def count(self): """Return the number of power sources connected""" return len(self.power_sources) def add(self, device_id, DeviceClass, device_instance=None): """Register a power source""" # make sure the type of object added is a power source if not issubclass(DeviceClass, PowerSource): raise Exception("The PowerSourceManager can only accepts PowerSource devices.") # make sure a device with the same id does not exist found = filter(lambda d: d.device_id == device_id, self.power_sources) if len(found) == 0: self.power_sources.append(PowerSourceItem(device_id, DeviceClass, device_instance)) else: raise Exception("The device_id already exists {}".format(device_id)) def set_capacity(self, device_id, capacity): """set the capacity for a power source""" if not capacity is None: d = self.get(device_id) diff = capacity - d.capacity if not d.capacity is None else capacity d.set_capacity(capacity) self._capacity += diff if abs(self._capacity) < 1e-7: self._capacity = 0 self.logger.debug( self.build_message( message="set capacity from {}".format(device_id), tag="set_capacity".format(device_id), value=capacity ) ) self.logger.debug( self.build_message( message="total capacity", tag="total_capacity", value=self._capacity ) ) def set_price(self, device_id, price): """set the price of electricity for a power source""" d = self.get(device_id) d.price = price def set_load(self, device_id, load): """set the load for a specific power source""" d = self.get(device_id) if load > 0 and not d.is_available(): raise Exception("The power source {} has not been configured".format(device_id)) if load <= d.capacity: d.set_load(load) else: raise Exception( "Attempt to set the load for a power source that is greater than capacity ({} > {})".format(load, d.capacity) ) def get(self, device_id=None): """Get the info for a power source by its ID""" if device_id is None: # return all devices return self.power_sources else: found = filter(lambda d: d.device_id == device_id, self.power_sources) if len(found) == 1: return found[0] else: return None def total_capacity(self): """calculate the total capacity for all power sources""" # return sum(d.capacity for d in self.power_sources if d.is_available()) return self._capacity def total_load(self): """calculate the total load on all the power sources""" # return sum(d.load for d in self.power_sources) return self._load def output_capacity(self): """Calculate the output capacity (total_load / total_capaacity)""" return self._load / self._capacity if self._capacity else None def can_handle_load(self, new_load): """Is there enough capacity to handle the load?""" return (self._load + new_load) <= self.total_capacity() def has_available_power_sources(self): """Are there powersources configured and available for use?""" return True if len([p for p in self.power_sources if p.is_available()]) else False def add_load(self, new_load): """ Add load to the various power sources """ self._load += new_load def remove_load(self, new_load): """Remove load from the system""" self.add_load(-1.0 * new_load) def update_rechargeable_items(self): """Update the status of rechargeable items""" for p in self.power_sources: if p.DeviceClass is Battery and p.device_instance: # update the battery (direct connect) p.device_instance.update_status() def optimize_load(self): """ Check that the loads are optimally distributed among the power sources. Move load from the more expensive power sources to the cheaper ones. """ # update the status of rechargeable itmes self.update_rechargeable_items() # get the current total load on the system # add the new load remaining_load = self._load starting_load = remaining_load # get the power sources and sort by the cheapest price power_sources = [p for p in self.power_sources if p.is_configured()] power_sources = sorted(power_sources, lambda a, b: cmp(a.price, b.price)) for ps in power_sources: # how much power is available for the device if remaining_load == 0: # no more load left to distribute, remove power ps.set_load(0.0) else: # there is power available for this device and power left to distribute if not ps.is_available(): if ps.load > 0: # self.logger.debug(self.build_message(message="set load for {} to {}".format(ps, 0))) ps.set_load(0.0) else: if remaining_load > ps.capacity: # can't put all the remaining load on this power source # set to 100% and try the next power source if ps.load != ps.capacity: # self.logger.debug(self.build_message(message="set load for {} to {}".format(ps, ps.capacity))) ps.set_load(ps.capacity) remaining_load -= ps.capacity else: # this power source can handle all of the remaining load # self.logger.debug(self.build_message(message="set load for {} to {}".format(ps, remaining_load))) if ps.load != remaining_load: ps.set_load(remaining_load) remaining_load = 0 diff = abs(starting_load - self._load) if remaining_load > 1e-7: self.logger.debug( self.build_message( message="Unable to handle the load, total_load = {}, total_capacity = {}".format(self.total_load(), self.total_capacity())) ) return False elif diff > 1e-7: # compare the difference being below some threshhold instead of equality self.logger.debug(self.build_message(message="starting load = {}, total_load = {}, equal ? {}".format(starting_load, self._load, abs(starting_load - self._load)))) raise Exception("starting/ending loads do not match {} != {}".format(starting_load, self._load)) # self.logger.debug(self.build_message(message="optimize_load (load = {}, cap = P{})".format(self._load, self._capacity), tag="optimize_after")) self.logger.debug( self.build_message( message="total load", tag="total_load", value=self.total_load() ) ) # self.logger.debug( # self.build_message( # message="total capacity", # tag="total_capacity", # value=self.total_capacity() # ) # ) return True def get_available_power_sources(self): """get the power sources that have a non-zero capacity""" return filter(lambda d: d.is_available(), self.power_sources) def get_changed_power_sources(self): """return a list of powersources that have been changed""" return [p for p in self.power_sources if p.load_changed] def reset_changed(self): """Reset all the changed flags on all power sources""" [p.reset_changed() for p in self.power_sources]
nilq/baby-python
python
import tools import torch a = torch.randn(1,6).cuda() b = tools.stereographic_project(a) c = tools.stereographic_unproject(b) print (tools.normalize_vector(a)) print (tools.normalize_vector(b)) print (tools.normalize_vector(c))
nilq/baby-python
python
from .move import MoveAction # noqa from .inspect import InspectAction # noqa from .menus import ( # noqa ShowMenuAction, ShowInventoryAction, SelectInventoryItemAction, BackToGameAction, BackToInventoryMenuAction, ShowCharacterScreenAction) from .action import NoopAction, WaitAction # noqa from .toggle_fullscreen import ToggleFullscreenAction # noqa from .exceptions import ShowMenuException # noqa from .combat import CycleTargetAction, ShootAction # noqa from .items import ( # noqa PickupAction, DropItemAction, EquipItemAction, UnequipItemAction)
nilq/baby-python
python
# -*- coding: utf-8 -*- from __future__ import (nested_scopes, generators, division, absolute_import, with_statement, print_function, unicode_literals) from grass.pygrass.modules.interface.docstring import docstring_property from grass.pygrass.modules.interface import read class Flag(object): """The Flag object store all information about a flag of module. It is possible to set flags of command using this object. >>> flag = Flag(diz=dict(name='a', description='Flag description', ... default=True)) >>> flag.name 'a' >>> flag.special False >>> flag.description 'Flag description' >>> flag = Flag(diz=dict(name='overwrite')) >>> flag.name 'overwrite' >>> flag.special True """ def __init__(self, xflag=None, diz=None): self.value = False diz = read.element2dict(xflag) if xflag is not None else diz self.name = diz['name'] self.special = True if self.name in ( 'verbose', 'overwrite', 'quiet', 'run') else False self.description = diz.get('description', None) self.default = diz.get('default', None) self.guisection = diz.get('guisection', None) self.suppress_required = True if 'suppress_required' in diz else False def get_bash(self): """Return the BASH representation of a flag. >>> flag = Flag(diz=dict(name='a', description='Flag description', ... default=True)) >>> flag.get_bash() '' >>> flag.value = True >>> flag.get_bash() '-a' >>> flag = Flag(diz=dict(name='overwrite')) >>> flag.get_bash() '' >>> flag.value = True >>> flag.get_bash() '--o' """ if self.value: if self.special: return '--%s' % self.name[0] else: return '-%s' % self.name else: return '' def get_python(self): """Return the python representation of a flag. >>> flag = Flag(diz=dict(name='a', description='Flag description', ... default=True)) >>> flag.get_python() '' >>> flag.value = True >>> flag.get_python() 'a' >>> flag = Flag(diz=dict(name='overwrite')) >>> flag.get_python() '' >>> flag.value = True >>> flag.get_python() 'overwrite=True' """ if self.value: return '%s=True' % self.name if self.special else self.name return '' def __str__(self): """Return the BASH representation of the flag.""" return self.get_bash() def __repr__(self): """Return a string with the python representation of the instance.""" return "Flag <%s> (%s)" % (self.name, self.description) def __bool__(self): """Return a boolean value""" return self.value def __nonzero__(self): return self.__bool__() @docstring_property(__doc__) def __doc__(self): """Return a documentation string, something like: {name}: {default}, suppress required {supress} {description} >>> flag = Flag(diz=dict(name='a', description='Flag description', ... default=True)) >>> print(flag.__doc__) a: True Flag description >>> flag = Flag(diz=dict(name='overwrite')) >>> print(flag.__doc__) overwrite: None None """ return read.DOC['flag'].format(name=self.name, default=repr(self.default), description=self.description, supress=('suppress required' if self.suppress_required else ''))
nilq/baby-python
python
#!/usr/bin/python3 """ Script to delete all of the CloudFormation stacks in an account. This will loop until all of them are deleted, with an exponental backoff. """ import boto3 from time import sleep from colorama import Fore, Style client = boto3.client("cloudformation") cloudformation = boto3.resource("cloudformation") MAX_WAIT_TIME = 45 def get_stacks(): return client.list_stacks( StackStatusFilter=[ "CREATE_IN_PROGRESS", "CREATE_FAILED", "CREATE_COMPLETE", "ROLLBACK_IN_PROGRESS", "ROLLBACK_FAILED", "ROLLBACK_COMPLETE", "DELETE_IN_PROGRESS", "DELETE_FAILED", # "DELETE_COMPLETE", "UPDATE_IN_PROGRESS", "UPDATE_COMPLETE_CLEANUP_IN_PROGRESS", "UPDATE_COMPLETE", "UPDATE_FAILED", "UPDATE_ROLLBACK_IN_PROGRESS", "UPDATE_ROLLBACK_FAILED", "UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS", "UPDATE_ROLLBACK_COMPLETE", "REVIEW_IN_PROGRESS", "IMPORT_IN_PROGRESS", "IMPORT_COMPLETE", "IMPORT_ROLLBACK_IN_PROGRESS", "IMPORT_ROLLBACK_FAILED", "IMPORT_ROLLBACK_COMPLETE", ], ) # for incremental back off def get_wait_time_exp(retry_count): if retry_count == 0: return 0 return pow(2, retry_count) response = get_stacks() retry_count = 0 stacks = 1 while stacks > 0: wait_time = min(get_wait_time_exp(retry_count), MAX_WAIT_TIME) print(Fore.RED + "Deleting:" + Style.RESET_ALL) for stack_summary in response["StackSummaries"]: stack = cloudformation.Stack(stack_summary["StackName"]) print(stack.name, end="...") stack.delete() sleep(wait_time) print(Fore.GREEN + "DONE" + Style.RESET_ALL) response = get_stacks() stacks = len(response["StackSummaries"]) retry_count = retry_count + 1
nilq/baby-python
python
from database.database import Database from flask import request from flask_restful import Resource import re class Sources(Resource): def post(self): body = request.get_json() db = Database() results = [] if "domain" in body: results += db.find_by_domain(body["domain"]) if "name" in body: name = body["name"].lower() stripped = re.sub(r"^the ", "", name, re.IGNORECASE) print(stripped) results += db.find_by_name(stripped) return results
nilq/baby-python
python
from res_manager import ResultManager import os def test_all(): if os.path.exists('./data.db'): os.remove('./data.db') rm = ResultManager('.') rm.save([1, 2, 3], topic='test saving', name='data1', comment='Test saving a list') rm.save(65535, topic='test saving', comment='Test saving a number without a name') rm.save(rm, topic='topic 2', name="object of \"ResultManager\"", comment='Saving an object') rm.save({0: 1, 1: 'string'}, name="hongshan's dict without topic") rm.print_meta_info() rm.load(3) rm.load(3, version='first') rm.delete_by_id(3, version='latest') rm.update_meta(2, name='name', topic='topic 5') rm.save(12, name='b', topic='topic 5') rm.save(12, name='b', topic='topic 5') rm.save(14, name='b', topic='topic 5', replace_version='latest') rm.save(14, name='name', topic='topic 5', replace_version='latest') rm.save(13, name='b', topic='topic 5') rm.print_meta_info() print(rm.load(5, version='first')) print(rm.load(5)) rm.print_meta_info(topic='topic 5') return rm if __name__ == '__main__': rm = test_all()
nilq/baby-python
python
import socket, re, subprocess, os, time, threading, sys, re, requests server = "192.186.157.43" channel = "#channel_to_connect" #write here the channel you want to connect botnick = "youtubeBot" ircsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) ircsock.connect((server, 6667)) ircsock.send("USER "+ botnick +" "+ botnick +" "+ botnick + " " + botnick + "\n") ircsock.send("NICK "+ botnick +"\n") def ping(): # respond to server Pings. ircsock.send("PONG :pingis\n") def sendmsg(msg): # sends messages to the channel. ircsock.send("PRIVMSG "+ channel +" :"+ msg +"\n") def youtubeSearch(request): r = requests.get('https://www.youtube.com/results', params={'search_query': request}) v = re.search(r'/watch\?v=([\w-]*)', r.text) sendmsg('https://www.youtube.com' + v.group()) def youtubeEmptySearch(): r = requests.get('https://www.youtube.com/') v = re.search(r'/watch\?v=([\w-]*)', r.text) sendmsg('https://www.youtube.com' + v.group()) def emptyinput(): sendmsg("Usage:") sendmsg("youtubeBot: do some magic <your search request on YouTube>") sendmsg("Random video from \"On trending\" for you:") youtubeEmptySearch() def main(): ircsock.send("JOIN "+ channel +"\n") while 1: # clear ircmsg value every time ircmsg = "" # set ircmsg to new data received from server ircmsg = ircsock.recv(2048) # remove any line breaks ircmsg = ircmsg.strip('\n\r') # print received message to stdout (mostly for debugging). #print(ircmsg) # repsond to pings so server doesn't think we've disconnected if ircmsg.find("PING :") != -1: ping() # look for PRIVMSG lines as these are messages in the channel or sent to the bot if ircmsg.find("PRIVMSG") != -1: name = ircmsg.split('!',1)[0][1:] message = ircmsg.split('PRIVMSG',1)[1].split(':',1)[1] print(name+ ": "+ message) message = ircmsg.split('PRIVMSG',1)[1].split(':',1)[1] if ircmsg.find("youtubeBot: do some magic") != -1: searchWord = message.split('youtubeBot: do some magic', 1)[1][1:] if (searchWord == ''): emptyinput() else: youtubeSearch(searchWord) main()
nilq/baby-python
python
import os import json import time from copy import deepcopy from datetime import date, datetime from decimal import Decimal from random import random, randint, choice import stdnet from stdnet.utils import test, zip, to_string, unichr, ispy3k, range from stdnet.utils import date2timestamp from stdnet.utils.populate import populate from examples.models import Statistics, Statistics3, Role class make_random(object): rtype = ['number','list',None] + ['dict']*3 def __init__(self): self.count = 0 def make(self, size = 5, maxsize = 10, nesting = 1, level = 0): keys = populate(size = size) if level: keys.append('') for key in keys: t = choice(self.rtype) if level else 'dict' if nesting and t == 'dict': yield key,dict(self.make(size = randint(0,maxsize), maxsize = maxsize, nesting = nesting - 1, level = level + 1)) else: if t == 'list': v = [random() for i in range(10)] elif t == 'number': v = random() elif t == 'dict': v = random() else: v = t yield key,v class TestJsonField(test.TestCase): models = [Statistics, Role] def test_default(self): models = self.mapper a = Statistics(dt=date.today()) self.assertEqual(a.data, {}) yield models.add(a) self.assertEqual(a.data, {}) a = yield models.statistics.get(id=a.id) self.assertEqual(a.data, {}) def testMetaData(self): field = Statistics._meta.dfields['data'] self.assertEqual(field.type,'json object') self.assertEqual(field.index,False) self.assertEqual(field.as_string,True) def testCreate(self): models = self.mapper mean = Decimal('56.4') started = date(2010,1,1) timestamp = datetime.now() a = yield models.statistics.new(dt=date.today(), data={'mean': mean, 'std': 5.78, 'started': started, 'timestamp':timestamp}) self.assertEqual(a.data['mean'], mean) a = yield models.statistics.get(id=a.id) self.assertEqual(len(a.data), 4) self.assertEqual(a.data['mean'], mean) self.assertEqual(a.data['started'], started) self.assertAlmostEqual(date2timestamp(a.data['timestamp']), date2timestamp(timestamp), 5) def testCreateFromString(self): models = self.mapper mean = 'mean' timestamp = time.time() data = {'mean': mean, 'std': 5.78, 'timestamp': timestamp} datas = json.dumps(data) a = yield models.statistics.new(dt=date.today(), data=datas) a = yield models.statistics.get(id=a.id) self.assertEqual(a.data['mean'], mean) a = yield models.statistics.get(id=a.id) self.assertEqual(len(a.data),3) self.assertEqual(a.data['mean'],mean) self.assertAlmostEqual(a.data['timestamp'], timestamp) def test_default(self): models = self.mapper a = Statistics(dt=date.today()) self.assertEqual(a.data, {}) yield models.add(a) self.assertEqual(a.data, {}) a = yield models.statistics.get(id=a.id) self.assertEqual(a.data, {}) def testValueError(self): models = self.mapper a = models.statistics(dt=date.today(), data={'mean': self}) yield self.async.assertRaises(stdnet.FieldValueError, models.session().add, a) self.assertTrue('data' in a._dbdata['errors']) def testDefaultValue(self): models = self.mapper role = models.role(name='test') self.assertEqual(role.permissions, []) role.permissions.append('ciao') role.permissions.append(4) yield models.session().add(role) self.assertTrue(role.id) role = yield models.role.get(id=role.id) self.assertEqual(role.permissions, ['ciao', 4]) class TestJsonFieldAsData(test.TestCase): '''Test a model with a JSONField which expand as instance fields. The `as_string` atttribute is set to ``False``.''' model = Statistics3 def_data = {'mean': 1.0, 'std': 5.78, 'pv': 3.2, 'name': 'bla', 'dt': date.today()} def_baddata = {'': 3.2, 'ts': {'a':[1,2,3,4,5,6,7], 'b':[10,11,12]}, 'mean': {'1y':1.0,'2y':1.1}, 'std': {'1y':4.0,'2y':5.1}, 'dt': datetime.now()} def_data2 = {'pv': {'':3.2, 'ts': {'a':[1,2,3,4,5,6,7], 'b':[10,11,12]}, 'mean': {'1y':1.0,'2y':1.1}, 'std': {'1y':4.0,'2y':5.1}}, 'dt': datetime.now()} def make(self, data=None, name=None): data = data or self.def_data name = name or self.data.random_string() return self.model(name=name, data=data) def testMeta(self): field = self.model._meta.dfields['data'] self.assertFalse(field.as_string) def testMake(self): m = self.make() self.assertTrue(m.is_valid()) data = m._dbdata['cleaned_data'] data.pop('data') self.assertEqual(len(data), 6) self.assertEqual(float(data['data__mean']), 1.0) self.assertEqual(float(data['data__std']), 5.78) self.assertEqual(float(data['data__pv']), 3.2) def testGet(self): models = self.mapper session = models.session() m = yield session.add(self.make()) m = yield models.statistics3.get(id=m.id) self.assertEqual(m.data['mean'], 1.0) self.assertEqual(m.data['std'], 5.78) self.assertEqual(m.data['pv'], 3.2) self.assertEqual(m.data['dt'], date.today()) self.assertEqual(m.data['name'], 'bla') def testmakeEmptyError(self): '''Here we test when we have a key which is empty.''' models = self.mapper session = models.session() m = self.make(self.def_baddata) self.assertFalse(m.is_valid()) yield self.async.assertRaises(stdnet.FieldValueError, session.add, m) def testmakeEmpty(self): models = self.mapper session = models.session() m = self.make(self.def_data2) self.assertTrue(m.is_valid()) cdata = m._dbdata['cleaned_data'] self.assertEqual(len(cdata),10) self.assertTrue('data' in cdata) self.assertEqual(cdata['data__pv__mean__1y'],'1.0') obj = yield session.add(m) obj = yield models.statistics3.get(id=obj.id) self.assertEqual(obj.data['dt'].date(), date.today()) self.assertEqual(obj.data__dt.date(), date.today()) self.assertEqual(obj.data['pv']['mean']['1y'], 1.0) self.assertEqual(obj.data__pv__mean__1y, 1.0) self.assertEqual(obj.data__dt.date(), date.today()) def testmakeEmpty2(self): models = self.mapper session = models.session() m = self.make({'ts': [1,2,3,4]}) obj = yield models.add(m) obj = yield models.statistics3.get(id=obj.id) self.assertEqual(obj.data, {'ts': [1, 2, 3, 4]}) def __testFuzzySmall(self): #TODO: This does not pass in pypy models = self.mapper session = models.session() r = make_random() data = dict(r.make(nesting = 0)) m = self.make(data) self.assertTrue(m.is_valid()) cdata = m._dbdata['cleaned_data'] cdata.pop('data') for k in cdata: if k is not 'name': self.assertTrue(k.startswith('data__')) obj = yield session.add(m) obj = yield models.statistics3.get(id=obj.id) self.assertEqualDict(data, obj.data) def __testFuzzyMedium(self): #TODO: This does not pass in pypy models = self.mapper session = models.session() r = make_random() data = dict(r.make(nesting = 1)) m = self.make(data) self.assertTrue(m.is_valid()) cdata = m._dbdata['cleaned_data'] cdata.pop('data') for k in cdata: if k is not 'name': self.assertTrue(k.startswith('data__')) obj = yield session.add(m) #obj = self.model.objects.get(id=obj.id) #self.assertEqualDict(data,obj.data) def __testFuzzy(self): #TODO: This does not pass in pypy models = self.mapper session = models.session() r = make_random() data = dict(r.make(nesting = 3)) m = self.make(deepcopy(data)) self.assertTrue(m.is_valid()) cdata = m._dbdata['cleaned_data'] cdata.pop('data') for k in cdata: if k is not 'name': self.assertTrue(k.startswith('data__')) obj = yield session.add(m) #obj = self.model.objects.get(id=obj.id) #self.assertEqualDict(data,obj.data) def testEmptyDict(self): models = self.mapper session = models.session() r = yield session.add(self.model(name='bla', data = {'bla':'ciao'})) self.assertEqual(r.data, {'bla':'ciao'}) r.data = None yield session.add(r) r = yield models.statistics3.get(id=r.id) self.assertEqual(r.data, {}) def testFromEmpty(self): '''Test the change of a data jsonfield from empty to populated.''' models = self.mapper session = models.session() r = yield models.statistics3.new(name = 'bla') self.assertEqual(r.data, {}) r.data = {'bla':'ciao'} yield session.add(r) r = yield models.statistics3.get(id=r.id) self.assertEqual(r.data, {'bla':'ciao'}) def assertEqualDict(self,data1,data2): for k in list(data1): v1 = data1.pop(k) v2 = data2.pop(k,{}) if isinstance(v1,dict): self.assertEqualDict(v1,v2) else: self.assertAlmostEqual(v1,v2) self.assertFalse(data1) self.assertFalse(data2)
nilq/baby-python
python
with open('2016/day_03/list.txt', encoding="utf-8") as f: lines = f.readlines() t = [] c = 0 for i in lines: w = '' for a in i: if a != ' ': w += str(a) if a == ' ' and w != '': t.append(int(w)) w = '' if a == '\n': t.append(int(w.split('\n')[0])) t.sort() if t[0]+t[1]>t[2]: c += 1 t = [] print(c)
nilq/baby-python
python
# -*- coding = utf-8 -*- # @Time:2021/3/1917:36 # @Author:Linyu # @Software:PyCharm from datetime import datetime from web import db from flask_wtf import FlaskForm from wtforms import StringField,SubmitField,TextAreaField from wtforms.validators import DataRequired,Length class Message(db.Model): id = db.Column(db.Integer, primary_key=True) body = db.Column(db.String(200)) name = db.Column(db.String(20)) timestamp = db.Column(db.DateTime,default=datetime.now,index = True) #创建表单 class HelloForm(FlaskForm): name = StringField('Name', validators=[DataRequired(), Length(1, 20)]) body = TextAreaField('Message', validators=[DataRequired(), Length(1, 200)]) submit = SubmitField()
nilq/baby-python
python
import numpy as np import pytest from ..simulator import adjacent, Simulator from ..problem import Problem def simple_problem(): return Problem(10, 10, np.ones((3, 3)) * 5) def test_adjacent(): assert adjacent((1, 1), (1, 2)) assert adjacent((1, 1), (2, 1)) assert adjacent((1, 1), (1, 0)) assert adjacent((1, 1), (0, 1)) assert not adjacent((1, 1), (2, 2)) assert not adjacent((1, 1), (0, 0)) def test_simulator_init(): robots = [['gavin', [-1, 0]], ['jeremy', [-1, 1]]] sim = Simulator(simple_problem(), robots) np.testing.assert_equal(sim.contamination, np.ones((3, 3)) * 5) assert sim.stations == {(-1, 0), (-1, 1)} assert sim.robot_positions == {(-1, 0), (-1, 1)} def test_simulator_init_failures(): robots = [['gavin', [-1, 0]], ['jeremy', [-1, 0]]] with pytest.raises(ValueError): sim = Simulator(simple_problem(), robots) robots = [['gavin', [2, 2]]] with pytest.raises(ValueError): sim = Simulator(simple_problem(), robots) def test_simulator_move(): robots = [['gavin', [-1, 0]], ['jeremy', [-1, 1]]] sim = Simulator(simple_problem(), robots) sim.apply(['gavin', 'move', [0, 0]]) assert sim.robots['gavin'].pos == (0, 0) assert sim.robots['gavin'].fuel == 9 assert sim.robot_positions == {(0, 0), (-1, 1)} assert sim.fuel_expended == 1 def test_simulator_move_failure(): robots = [['gavin', [-1, 0]], ['jeremy', [-1, 1]]] sim = Simulator(simple_problem(), robots) sim.apply(['gavin', 'move', [-1, 1]]) assert sim.robots['gavin'].pos == (-1, 0) assert sim.robots['gavin'].fuel == 10 assert sim.robot_positions == {(-1, 0), (-1, 1)} assert sim.fuel_expended == 0 def test_simulator_clean(): robots = [['gavin', [-1, 0]], ['jeremy', [-1, 1]]] sim = Simulator(simple_problem(), robots) sim.apply(['gavin', 'move', [0, 0]]) sim.apply(['gavin', 'clean', 2]) assert sim.robots['gavin'].fluid == 8 assert sim.contamination[0, 0] == 3 sim.apply(['gavin', 'clean', 4]) assert sim.robots['gavin'].fluid == 4 assert sim.contamination[0, 0] == 0 sim.apply(['gavin', 'move', [0, 1]]) sim.apply(['gavin', 'clean', 5]) assert sim.robots['gavin'].fluid == 0 assert sim.contamination[0, 1] == 1 def test_simulator_clean_off_board(): robots = [['gavin', [-1, 0]], ['jeremy', [-1, 1]]] sim = Simulator(simple_problem(), robots) sim.apply(['gavin', 'clean', 2]) assert sim.robots['gavin'].fluid == 8 np.testing.assert_equal(sim.contamination, np.ones((3, 3)) * 5) def test_simulator_resupply(): robots = [['gavin', [-1, 0]], ['jeremy', [-1, 1]]] sim = Simulator(simple_problem(), robots) sim.apply(['gavin', 'move', [0, 0]]) sim.apply(['gavin', 'clean', 3]) assert sim.robots['gavin'].fuel == 9 assert sim.robots['gavin'].fluid == 7 sim.apply(['gavin', 'resupply']) assert sim.robots['gavin'].fuel == 9 assert sim.robots['gavin'].fluid == 7 sim.apply(['gavin', 'move', [-1, 0]]) sim.apply(['gavin', 'resupply']) assert sim.robots['gavin'].fuel == 10 assert sim.robots['gavin'].fluid == 10
nilq/baby-python
python
from pptx import Presentation from paragraphs_extractor.file_iterator_interface import FileIteratorInterface class PPTXIterator(FileIteratorInterface): def __init__(self, filename): super().__init__() self.filename = filename prs = Presentation(filename) for slide in prs.slides: for shape in slide.shapes: if hasattr(shape, 'text'): cleaned_text = shape.text.replace('\n', '') if cleaned_text: self.paragraphs.append(cleaned_text)
nilq/baby-python
python
import turtle def draw_square(some_turtle, shape, color, side_length, speed): some_turtle.shape(shape) some_turtle.color(color) some_turtle.speed(speed) for i in range(1,5): some_turtle.forward(side_length) some_turtle.right(90) def draw_circle(some_turtle, shape, color, radius): some_turtle.shape(shape) some_turtle.color(color) some_turtle.circle(radius) def draw_art(): window = turtle.Screen() window.bgcolor("red") #Create the turtle Brad - Draws a square brad = turtle.Turtle() for i in range(1,37): draw_square(brad, "turtle", "yellow", 100, 5) brad.right(10) #Create the turtle Angie - Draws a circle angie = turtle.Turtle() draw_circle(angie, "arrow", "blue", 100) window.exitonclick() draw_art()
nilq/baby-python
python
from django.apps import AppConfig class GgConfig(AppConfig): default_auto_field = 'django.db.models.BigAutoField' name = 'GG'
nilq/baby-python
python
from django.shortcuts import render, get_object_or_404 from django.contrib.auth.decorators import login_required from django.core.exceptions import PermissionDenied from website.models import Exam, Problem, Task, Competitor, Score from django.http import HttpResponse @login_required def view_problem(request, exam_id, problem_number): user = request.user exam = get_object_or_404(Exam, pk=exam_id) if not user.can_view_exam(exam): raise PermissionDenied("You must be registered for the contest to see \ the problems") problem = get_object_or_404(Problem, exam=exam, problem_number=problem_number) # TODO: needs to work for coaches too (except they can't submit) if user.is_mathlete: mathlete = user.mathlete competitor = Competitor.objects.getCompetitor(exam, mathlete) score = Score.objects.get(problem=problem, competitor=competitor) context = { 'problem': problem, 'score': score, 'exam': exam, 'aiprob': problem.aiproblem.first(), } return render(request, 'exam/view_problem.html', context)
nilq/baby-python
python
"""Apply high level effects to images such as shadows and convert to black and white.""" from __future__ import annotations from pathlib import Path from blendmodes.blend import BlendType, blendLayers from colourswatch.io import openColourSwatch from layeredimage.layeredimage import LayeredImage from PIL import Image, ImageDraw, ImageFilter, ImageFont from imageedit.io import getPixelDimens, getSortedColours from imageedit.transform import findAndReplace, resize, resizeSquare THISDIR = str(Path(__file__).resolve().parent) # pylint:disable=unbalanced-tuple-unpacking def roundCorners(image: Image.Image, radius: int | str) -> Image.Image: """Round the corners by a number of pixels. May be preferable to use... roundCornersAntiAlias. Use with caution as it modifies the image param. radius can be one of the following: pixel: int, percent: "val%", scale: "valx" Args: image (Image.Image): A PIL Image radius (int,str): One of pixel, percent, scale Returns: Image.Image: A PIL Image """ [radius] = getPixelDimens(image, [radius]) circle = Image.new("RGBA", (radius * 2, radius * 2), "#00000000") draw = ImageDraw.Draw(circle) draw.ellipse((0, 0, radius * 2, radius * 2), "#ffffffff") alpha = Image.new("RGBA", image.size, "#ffffffff") background = Image.new("RGBA", image.size, "#00000000") w, h = image.size alpha.paste(circle.crop((0, 0, radius, radius)), (0, 0)) alpha.paste(circle.crop((0, radius, radius, radius * 2)), (0, h - radius)) alpha.paste(circle.crop((radius, 0, radius * 2, radius)), (w - radius, 0)) alpha.paste(circle.crop((radius, radius, radius * 2, radius * 2)), (w - radius, h - radius)) background.paste(image, (0, 0), alpha.convert("RGBA")) return background def addDropShadowSimple(image: Image.Image, offset: list[int]) -> Image.Image: """Add a simple drop shadow. Args: image (Image.Image): Base image to give a drop shadow offset (list[int, int]): Offset of the shadow as [x,y] Returns: Image.Image: A PIL Image """ border = max(abs(x) for x in offset) return addDropShadowComplex(image, 11, border, offset, "#ffffff00", "#00000055") def addDropShadowComplex( image: Image.Image, iterations: int, border: int, offset: list[int], backgroundColour: str, shadowColour: str, ) -> Image.Image: """From https://en.wikibooks.org/wiki/Python_Imaging_Library/Drop_Shadows. Args: image (Image.Image): Base image to give a drop shadow iterations (int): Number of times to apply the blur filter to the shadow border (int): Border to give the image to leave space for the shadow offset (list[int, int]): Offset of the shadow as [x,y] backgroundColour (str): Colour of the background shadowColour (str): Colour of the drop shadow Returns: Image.Image: A PIL Image """ originalSize = image.size # Calculate the size of the intermediate image fullWidth = image.size[0] + abs(offset[0]) + 2 * border fullHeight = image.size[1] + abs(offset[1]) + 2 * border # Create the shadow's image. Match the parent image's mode. background = Image.new("RGBA", (fullWidth, fullHeight), backgroundColour) shadow = Image.new("RGBA", (originalSize[0], originalSize[1]), shadowColour) # Place the shadow, with the required offset shadowLeft = border + max(offset[0], 0) shadowTop = border + max(offset[1], 0) # Paste in the constant colour background.paste(shadow.convert("RGBA"), (shadowLeft, shadowTop), image.convert("RGBA")) # Apply the BLUR filter repeatedly for _ in range(iterations): background = background.filter(ImageFilter.BLUR) # Paste the original image on top of the shadow imgLeft = border - min(offset[0], 0) imgTop = border - min(offset[1], 0) background.paste(image.convert("RGBA"), (imgLeft, imgTop), image.convert("RGBA")) return resize(background, originalSize[0], originalSize[1]) def roundCornersAntiAlias(image: Image.Image, radius: int) -> Image.Image: """Round Corners taking a radius int as an arg and do antialias. Args: image (Image.Image): A PIL Image radius (int): radius in px Returns: Image.Image: Image """ factor = 2 imageTemp = resizeSquare(image, str(factor) + "x") [radius] = getPixelDimens(image, [radius]) imageTemp = roundCorners(imageTemp, radius * factor) return resizeSquare(imageTemp, str(1 / factor) + "x") def convertBlackAndWhite(image: Image.Image, mode: str = "filter-darker"): """Convert a PIL Image to black and white from a colour image. Some implementations use numpy but im not going to include the extra import Args: image (Image.Image): A PIL Image to act on mode (str, optional): Any of ["filter-darker", "filter-lighter", "background", "foreground", "edges"] Specify the mode for the function to use. filter-darker and lighter respectively make pixels darker than the average black and pixels that are lighter than the average black. background sets the most dominant colour to white and foreground sets the second most dominant color to black. edges finds the edges and sets them to black. non edges are white. Defaults to "filter-darker". Returns: Image.Image: The black and white image """ if mode in ["background", "foreground"]: image = doConvertBlackAndWhiteBGFG(image, mode) if mode in ["filter-darker", "filter-lighter"]: image = doConvertBlackAndWhiteFilter(image, mode) if mode == "edges": image = doConvertBlackAndWhiteFilter( image.convert("RGB").filter(ImageFilter.FIND_EDGES), "filter-lighter" ) return image def doConvertBlackAndWhiteFilter(image: Image.Image, mode: str): """Low level function... Convert an image to black and white based on a filter: filter-darker and lighter respectively make pixels darker than the average black and pixels that are lighter than the average black. Args: image (Image.Image): A PIL Image to act on mode (str): filter-darker and lighter respectively make pixels darker than the average black and pixels that are lighter than the average black. Returns: Image.Image: The black and white image """ img = image.convert("L") img.thumbnail((1, 1)) averageColour = img.getpixel((0, 0)) # Default tp "filter-lighter" threshold = lambda pixel: 0 if pixel > averageColour else 255 if mode == "filter-darker": threshold = lambda pixel: 0 if pixel < averageColour else 255 converted = image.convert("L").point(threshold, mode="1") return converted.convert("RGBA") def doConvertBlackAndWhiteBGFG(image, mode): """Low level function... Convert an image to black and white based on the foreground/ background: background sets the most dominant colour to white and foreground sets the second most dominant color to black. Args: image (Image.Image): A PIL Image to act on mode (str): background sets the most dominant colour to white and foreground sets the second most dominant color to black. Returns: Image.Image: The black and white image """ if mode == "background": image = findAndReplace( image, getSortedColours(image)[0][1], (255, 255, 255, 255), (0, 0, 0, 255) ) if mode == "foreground": image = findAndReplace( image, getSortedColours(image)[1][1], (0, 0, 0, 255), (255, 255, 255, 255) ) return image def addText(image: Image.Image, text: str) -> Image.Image: """Add text to an image such that the resultant image is in the form... [img]|text. The text is in fira code and has a maximum length of 16 chars (text longer than this is truncated with "...") Args: image (Image.Image): A PIL Image to add text to text (str): A string containing text to add to the image Returns: Image.Image: Image with text """ if len(text) > 15: text = text[:13] + ".." width, height = image.size font = ImageFont.truetype(THISDIR + "/resources/FiraCode-Light.ttf", int(height / 2 * 0.8)) colours = getSortedColours(image) backgroundColour = colours[0][1] foregroundColour = colours[1][1] background = Image.new("RGBA", (width * 5, height), backgroundColour) imageText = ImageDraw.Draw(background) imageText.text( (int(width * 0.9), int(height / 4)), "|" + text, font=font, fill=foregroundColour ) background.paste(image.convert("RGBA"), (0, 0), image.convert("RGBA")) return background def blend( background: Image.Image, foreground: Image.Image, blendType: BlendType, opacity: float = 1 ) -> Image.Image: """Blend layers using numpy array. Args: background (Image.Image): background layer foreground (Image.Image): foreground layer (must be same size as background) blendType (BlendType): The blendtype opacity (float): The opacity of the foreground image Returns: Image: combined image Specify supported blend types NORMAL MULTIPLY ADDITIVE COLOURBURN COLOURDODGE REFLECT GLOW OVERLAY DIFFERENCE NEGATION LIGHTEN DARKEN SCREEN XOR SOFTLIGHT HARDLIGHT GRAINEXTRACT GRAINMERGE DIVIDE HUE SATURATION COLOUR LUMINOSITY PINLIGHT VIVIDLIGHT EXCLUSION DESTIN DESTOUT DESTATOP SRCATOP """ # We are just aliasing the blendLayers function and making the type checker happy del foreground, blendType, opacity return background blend = blendLayers def applySwatch(image, swatchFile): """Apply a swatch to the image using colourswatch. Args: image (Image.Image): The PIL Image swatchFile (string): Path to the swatch file Returns: Image: quantized image """ pal = Image.new("P", (1, 1)) pal.putpalette(openColourSwatch(swatchFile).toPILPalette()) rgbImage = image.convert("RGB").quantize(palette=pal, method=2, dither=0) background = Image.new("RGBA", image.size, "#00000000") background.paste(rgbImage.convert("RGBA"), (0, 0), image.convert("RGBA")) return background def pixelate(image: Image.Image, pixelSize: int = 4): """Apply a pixelate effect to an image. This might be used to create a retro effect. Args: image (Image.Image): A pillow image pixelSize (int, optional): X, Y pixels to merge. E.g. assuming image dimensions of 256x256 and pixelSize of 4, an image with dimensions 256x256 will be returned with the effect of an image with size 64x64. Defaults to 4. Returns: Image: pixelated image """ originalSize = image.size width, height = int(image.size[0] / pixelSize), int(image.size[1] / pixelSize) downsize = image.resize((width, height), Image.NEAREST) return downsize.resize(originalSize, Image.NEAREST) def removeBG(image: Image.Image): """Remove the background from an image or a layeredimage. Args: image (Image.Image|layeredimage.layeredimage.LayeredImage): An image or a layered image Returns: Image: image without bg """ if isinstance(image, Image.Image): return findAndReplace(image, getSortedColours(image)[0][1], (0, 0, 0, 0)) return LayeredImage(image.extractLayers()[1:]).getFlattenLayers()
nilq/baby-python
python
''' Python module for creating synthetic data sets. ''' import os import csv import math import random from typing import List, Dict param_funcs = [ lambda x: math.factorial(abs(x) ** 0.1 // 1), lambda x: math.frexp(x)[0], lambda x: math.log(abs(x) + 0.1), lambda x: math.log(abs(x) + 0.1, 5), lambda x: math.log(abs(x) + 0.1, 10), lambda x: math.pow(x, 1), lambda x: math.pow(x, 2), lambda x: math.pow(x, 3), lambda x: math.sqrt(abs(x)), lambda x: math.atan(x), lambda x: math.cos(x), lambda x: math.sin(x), lambda x: math.tan(x), lambda x: math.erf(x), lambda x: math.erfc(x), lambda x: math.gamma((abs(x) + 0.1) ** 0.1), lambda x: math.lgamma((abs(x) + 0.1) ** 0.1), lambda x: x + (random.random() - 0.5) * x, lambda x: 1 / x if x != 0 else 1 / 0.00001, lambda x: random.random() * 5 * x, lambda x: x ** random.random(), lambda x: 0.25 * x, lambda x: 0.5 * x, lambda x: 0.75 * x, lambda x: random.random(), lambda x: x ** 2 - x ] negative_param_funcs = [lambda x: -f(x) for f in param_funcs] param_funcs = param_funcs + negative_param_funcs def rand_func(): if random.random() < 0.5: return random.choice(param_funcs) else: # combine two functions to create a new function f1 = random.choice(param_funcs) f2 = random.choice(param_funcs) return lambda x: f2(f1(x)) if not isinstance(f1(x), complex) else f2(f1(x).real) def fuzzify(x, factor: float=0.5) -> float: ''' Randomly change given number a bit to add noise/fuzz to data. factor is float [0 < factor < 1] that adjusts how much fuzz. ''' if isinstance(x, complex): x = x.real try: return x * (random.random() + 0.5) ** (factor + 0.1) except OverflowError: if x > 0: return 10 ** 10 else: return -10 ** 10 class DataCreator(): def __init__(self, num_params: int=10, num_samples: int=100) -> None: self.num_params = num_params self.num_samples = num_samples self.data = self.create_data(num_params, num_samples) # [{}, {}, ...] def create_data(self, num_params: int=10, num_samples: int=100) -> List[Dict[str, float]]: '''Creates a new data set.''' # create initial data set structure with target values target_func = rand_func() min_initial_target = -random.random() * 10 max_initial_target = random.random() * 10 initial_values = [random.uniform(min_initial_target, max_initial_target) for _ in range(num_samples)] target_values = [fuzzify(target_func(x) if not isinstance(target_func(x), complex) else target_func(x).real) for x in initial_values] data = [{'Target': x} for x in target_values] # create associated parameters for i in range(1, num_params + 1): param = f'Param_{i}' fuzz_factor = random.random() param_func = rand_func() for index, d in enumerate(data): value = fuzzify(param_func(d['Target']), fuzz_factor) if isinstance(value, complex): value = value.real d[param] = value return data def save_data_as_pymodule(self, module_name='newdata_set.py') -> None: ''' Create a string which creates a Python module with self's created data as an importable list. ''' s = "'''\nPython module with a synthetic data set created by newdata.\n" s += f"Number of Parameters: {self.num_params}\n" s += f"Number of Samples: {self.num_samples}\n'''\n\n" s += f"data = [\n" for d in self.data: s += f" {str(d)},\n" s += " ]\n" with open(os.path.join('.', module_name), 'w') as f: f.write(s) print(f'New synthetic data saved to {module_name}!') def save_data_as_csv(self, csv_filename='newdata_set.csv') -> None: ''' Output a CSV file with the synthetic data set. ''' with open(csv_filename, 'w', newline='') as f: keys = self.data[0].keys() dict_writer = csv.DictWriter(f, keys, delimiter=',') dict_writer.writeheader() dict_writer.writerows(self.data)
nilq/baby-python
python
#!/usr/bin/env python from logging import StreamHandler from typing import Optional from datetime import datetime class CLIHandler(StreamHandler): def formatException(self, _) -> Optional[str]: return None def format(self, record) -> str: exc_info = record.exc_info if record.exc_info is not None: record.exc_info = None retval = f'{datetime.fromtimestamp(record.created).strftime("%H:%M:%S")} - {record.name.split(".")[-1]} - ' \ f'{record.msg}' if exc_info: retval += " (See log file for stack trace dump)" record.exc_info = exc_info return retval
nilq/baby-python
python
from __future__ import division from ...problem_classes.heat_exchange import * from pyomo.environ import * from pyomo.opt import SolverFactory # Helper for precision issues epsilon = 0.0000001 def solve_fractional_relaxation(inst,lamda): # Local copy of the instance n = inst.n m = inst.m k = inst.k QH = list(inst.QH) QC = list(inst.QC) # Fixing precision errors for i in range(inst.n): for s in range(inst.k): if QH[i][s] < epsilon: QH[i][s] = 0 for j in range(inst.m): for t in range(inst.k): if QC[j][t] < epsilon: QC[j][t] = 0 # Computation of heat residuals R = [sum(QH[i][s] for i in range(n) for s in range(u+1))-sum(QC[j][t] for j in range(m) for t in range(u+1)) for u in range(k)] for u in range(k): if R[u]<0 : R[u]=0 (A,VH,VC) = valid_quadruples_set(n,m,k,QH,QC,R) model = AbstractModel() model.n = Param(within=NonNegativeIntegers, initialize=n) # number of hot streams model.m = Param(within=NonNegativeIntegers, initialize=m) # number of cold streams model.k = Param(within=NonNegativeIntegers, initialize=k) # number of temperature intervals model.H = RangeSet(0, model.n-1) # set of hot streams model.C = RangeSet(0, model.m-1) # set of cold streams model.T = RangeSet(0, model.k-1) # set of temperature intervals model.A = Set(within=model.H*model.T*model.C*model.T, initialize=A) # set of valid quadruples (arcs) model.VH = Set(within=model.H*model.T, initialize=VH) # set of valid hot pairs (vertices) model.VC = Set(within=model.C*model.T, initialize=VC) # set of valid cold pairs (vertices) # Parameter: heat load of hot stream i in temperature interval t model.QH = Param(model.VH, within=NonNegativeReals, initialize=lambda model, i, s: QH[i][s]) # Parameter: heat load of cold stream j in temperature interval t model.QC = Param(model.VC, within=NonNegativeReals, initialize=lambda model, j, t: QC[j][t]) # Parameter: fractional cost values model.lamda = Param(model.H, model.C, within=NonNegativeReals, initialize=lambda model, i, j: lamda[i][j]) # Variable: heat transferred from (i,s) to (j,t) model.q = Var(model.A, within=NonNegativeReals) # Objective: minimization of the cost of the network flow def min_cost_flow_objective_rule(model): return sum(model.lamda[i,j]*model.q[i,s,j,t] for (i,s,j,t) in model.A) model.obj_value = Objective(rule=min_cost_flow_objective_rule, sense=minimize) #Constraint: heat conservation of hot streams def hot_supply_rule(model, i, s): return sum(model.q[temp_i,temp_s,j,t] for (temp_i,temp_s,j,t) in model.A if temp_i==i and temp_s==s) == model.QH[i,s] model.hot_supply_constraint = Constraint(model.VH, rule=hot_supply_rule) #Constraint: heat conservation of cold streams def cold_demand_rule(model, j, t): return sum(model.q[i,s,temp_j,temp_t] for (i,s,temp_j,temp_t) in model.A if temp_j==j and temp_t==t) == model.QC[j,t] model.cold_demand_constraint = Constraint(model.VC, rule=cold_demand_rule) solver = 'cplex' opt = SolverFactory(solver) opt.options['threads'] = 1 LP = model.create_instance() results = opt.solve(LP) elapsed_time = results.solver.time # Problem variables y=[[0 for j in range(inst.m)] for i in range(inst.n)] q=[[[[0 for t in range(inst.k)] for j in range(inst.m)] for s in range(inst.k)] for i in range(inst.n)] for (i,s,j,t) in A: if LP.q[i,s,j,t].value > epsilon: q[i][s][j][t] = LP.q[i,s,j,t].value y[i][j] = 1 matches=sum(sum(y[i]) for i in range(inst.n)) sol=Heat_Exchange('relaxation_rounding',inst.n,inst.m,inst.k,matches,y,q) relaxation_value = results.problem.lower_bound return (sol, elapsed_time, relaxation_value) # It computes the set A of valid quadruples which is required for building the min cost flow LP model. # A set M of matches is passed as parameter. def valid_quadruples_set(n,m,k,QH,QC,R): A = [] # quadruples (i,s,j,t) VH = [] # vertices (i,s) VC = [] # vertices (j,t) for i in range(n): for j in range(m): for s in range(k): for t in range(k): zero_residual = False for u in range(s,t): if R[u] == 0: zero_residual = True if s <= t and QH[i][s] > epsilon and QC[j][t] > epsilon and not zero_residual: A.append((i,s,j,t)) if (i,s) not in VH: VH.append((i,s)) if (j,t) not in VC: VC.append((j,t)) return (A,VH,VC) def fractional_relaxation_lower_bound(inst): # inst is a network epsilon=10**(-7) lamda = [[0 for j in range(inst.m)] for i in range(inst.n)] for i in range(inst.n): for j in range(inst.m): if inst.U[i][j] > epsilon: lamda[i][j] = 1 / inst.U[i][j] else: lamda[i][j] = float('inf') (sol, elapsed_time, relaxation_value) = solve_fractional_relaxation(inst,lamda) return relaxation_value
nilq/baby-python
python
from __future__ import absolute_import, unicode_literals import itertools import django from django import template from wagtail.wagtailcore import hooks register = template.Library() @register.inclusion_tag('wagtailusers/groups/includes/formatted_permissions.html') def format_permissions(permission_bound_field): """ Given a bound field with a queryset of Permission objects - which must be using the CheckboxSelectMultiple widget - construct a list of dictionaries for 'objects': 'objects': [ { 'object': name_of_some_content_object, 'add': checkbox 'change': checkbox 'delete': checkbox }, ] and a list of other permissions: 'others': [ (any_non_add_change_delete_permission, checkbox), ] (where 'checkbox' is an object with a tag() method that renders the checkbox as HTML; this is an instance of django.forms.widgets.CheckboxChoiceInput on Django <1.11, and a BoundWidget on Django >=1.11) - and returns a table template formatted with this list. """ permissions = permission_bound_field.field._queryset # get a distinct list of the content types that these permissions relate to content_type_ids = set(permissions.values_list('content_type_id', flat=True)) # iterate over permission_bound_field to build a lookup of individual renderable # checkbox objects if django.VERSION < (1, 11): # On Django <1.11, iterating over the BoundField returns a sequence of CheckboxChoiceInput objects, # whose ID is available as .choice_value checkboxes_by_id = { int(checkbox.choice_value): checkbox for checkbox in permission_bound_field } else: # On Django >=1.11, iterating over the BoundField returns a sequence of BoundWidget objects, # whose ID is available as .data['value'] checkboxes_by_id = { int(checkbox.data['value']): checkbox for checkbox in permission_bound_field } object_perms = [] other_perms = [] for content_type_id in content_type_ids: content_perms = permissions.filter(content_type_id=content_type_id) content_perms_dict = {} for perm in content_perms: checkbox = checkboxes_by_id[perm.id] # identify the three main categories of permission, and assign to # the relevant dict key, else bung in the 'other_perms' list permission_action = perm.codename.split('_')[0] if permission_action in ['add', 'change', 'delete']: content_perms_dict['object'] = perm.content_type.name content_perms_dict[permission_action] = checkbox else: other_perms.append((perm, checkbox)) if content_perms_dict: object_perms.append(content_perms_dict) return { 'object_perms': object_perms, 'other_perms': other_perms, } @register.inclusion_tag("wagtailadmin/pages/listing/_buttons.html", takes_context=True) def user_listing_buttons(context, user): button_hooks = hooks.get_hooks('register_user_listing_buttons') buttons = sorted(itertools.chain.from_iterable( hook(context, user) for hook in button_hooks)) return {'user': user, 'buttons': buttons}
nilq/baby-python
python
import math import matplotlib.pyplot as plt import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from sklearn.metrics import roc_auc_score from torchvision import datasets, transforms from tqdm import tqdm, trange DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") eps = 1e-10 class Gaussian: def __init__(self, mu, rho): self.mu = mu self.rho = rho self.normal = torch.distributions.Normal(0, 1) @property def sigma(self): return torch.log1p(torch.exp(self.rho)) def sample(self): epsilon = self.normal.sample(self.rho.size()).to(DEVICE) return self.mu + self.sigma * epsilon def log_prob(self, input): return ( -math.log(math.sqrt(2 * math.pi)) - torch.log(self.sigma + eps) - ((input - self.mu) ** 2) / (2 * self.sigma ** 2) ).sum() class GaussianPrior: def __init__(self, mu, sigma): self.mu = mu self.sigma = sigma def log_prob(self, input): return ( -math.log(math.sqrt(2 * math.pi)) - torch.log(self.sigma) - ((input - self.mu) ** 2) / (2 * self.sigma ** 2) ).sum() class BayesianLinear(nn.Module): def __init__(self, n_input, n_output, sigma1, T, lower_bound, upper_bounnd): super().__init__() self.n_input = n_input self.n_output = n_output self.w_mu = nn.Parameter( torch.Tensor(3, n_output, n_input).normal_(0, math.sqrt(2 / n_input)) ) self.w_rho = nn.Parameter( torch.Tensor(3, n_output, n_input).uniform_(lower_bound, upper_bounnd) ) self.w = Gaussian(self.w_mu, self.w_rho) self.b_mu = nn.Parameter(torch.Tensor(3, n_output).normal_(0, math.sqrt(2 / n_input))) self.b_rho = nn.Parameter(torch.Tensor(3, n_output).uniform_(lower_bound, upper_bounnd)) self.b = Gaussian(self.b_mu, self.b_rho) # Prior: Gaussian self.w_prior = GaussianPrior(0, sigma1) self.b_prior = GaussianPrior(0, sigma1) self.log_prior = 0 self.log_variational_posterior = 0 # self.KL = 0 self.sigma_mean = 0 self.sigma_std = 0 def forward(self, input, sample=False): if self.training or sample: w = self.w.sample() b = self.b.sample() cc = random.randint(0, 2) w = w[cc, :, :] b = b[cc, :] w_mat = w.repeat(3, 1, 1).to(DEVICE) b_mat = b.repeat(3, 1, 1).to(DEVICE) else: w = self.w_mu b = self.b_mu w_mat = w b_mat = b self.log_prior = self.w_prior.log_prob(w_mat) / 3 + self.b_prior.log_prob(b_mat) / 3 self.log_variational_posterior = self.w.log_prob(w_mat) / 3 + self.b.log_prob(b_mat) / 3 self.sigma_mean = self.w.sigma.mean() self.sigma_std = self.w.sigma.std() return F.linear(input, w, b) class BayesianNetwork(nn.Module): def __init__(self, n_units, sigma1, T): super().__init__() self.l1 = BayesianLinear(28 * 28, n_units, sigma1, T, -5, -4) self.l2 = BayesianLinear(n_units, n_units, sigma1, T, -5, -4) self.l3 = BayesianLinear(n_units, 10, sigma1, T, -5, -4) def forward(self, x, sample=False): x = x.view(-1, 28 * 28) x = F.relu(self.l1(x, sample), inplace=False) x = F.relu(self.l2(x, sample), inplace=False) x = F.softmax(self.l3(x, sample)) return x def log_prior(self): return self.l1.log_prior + self.l2.log_prior + self.l3.log_prior def log_variational_posterior(self): return ( self.l1.log_variational_posterior + self.l2.log_variational_posterior + self.l3.log_variational_posterior ) def KL_q_p(self): return self.l1.KL + self.l2.KL + self.l3.KL def free_energy(self, input, target, batch_size, num_batches, n_samples, T): outputs = torch.zeros(batch_size, 10).to(DEVICE) log_prior = torch.zeros(1).to(DEVICE) log_variational_posterior = torch.zeros(1).to(DEVICE) negative_log_likelihood = torch.zeros(1).to(DEVICE) for i in range(n_samples): output = self(input, sample=True) outputs += output / n_samples log_prior += self.log_prior() / n_samples log_variational_posterior += self.log_variational_posterior() / n_samples negative_log_likelihood += ( F.nll_loss(torch.log(output + eps), target, size_average=False) / n_samples ) # new target function, not absorb T into prior loss = ( log_variational_posterior - log_prior / T ) + negative_log_likelihood / T * num_batches corrects = outputs.argmax(dim=1).eq(target).sum().item() return ( loss, log_prior, log_variational_posterior, negative_log_likelihood, corrects, ) class BayesianConv2D(nn.Module): def __init__(self, in_channels, out_channels, sigma1, kernel_size=3, stride=1, padding=1): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.stride = stride self.padding = padding self.w_mu = nn.Parameter( torch.Tensor(3, out_channels, in_channels, kernel_size, kernel_size).normal_( 0, math.sqrt(2 / (out_channels * in_channels * kernel_size * kernel_size)), ) ) self.w_rho = nn.Parameter( torch.Tensor(3, out_channels, in_channels, kernel_size, kernel_size).uniform_( -2.253, -2.252 ) ) self.w = Gaussian(self.w_mu, self.w_rho) # prior: Gaussian self.w_prior = GaussianPrior(0, sigma1) self.log_prior = 0 self.log_variational_posterior = 0 def forward(self, input, sample=True): if self.training or sample: w = self.w.sample() cc = random.randint(0, 2) w = w[cc, :, :, :, :] w_mat = w.repeat(3, 1, 1, 1, 1).to(DEVICE) else: w = self.w_mu w_mat = w self.log_prior = self.w_prior.log_prob(w_mat) / 3 self.log_variational_porsterior = self.w.log_prob(w_mat) / 3 return F.conv2d(input, w, bias=None, stride=self.stride, padding=self.padding) def BayesianConv3x3(in_channels, out_channels, sigma1, stride=1): return BayesianConv2D( in_channels, out_channels, sigma1, kernel_size=3, stride=stride, padding=1 ) class BayesianResNet14(nn.Module): def __init__(self, block, sigma1, num_class=10): super().__init__() self.in_channels = 16 self.conv = BayesianConv3x3(3, 16, sigma1) self.frn = nn.BatchNorm2d(16) self.tlu = nn.ReLU(inplace=True) self.block1 = ResidualBlock(16, 16, sigma1) self.block2 = ResidualBlock(16, 16, sigma1) downsample1 = nn.Sequential(BayesianConv3x3(16, 32, sigma1, 2), nn.BatchNorm2d(32)) self.block3 = ResidualBlock(16, 32, sigma1, 2, downsample1) self.block4 = ResidualBlock(32, 32, sigma1) downsample2 = nn.Sequential(BayesianConv3x3(32, 64, sigma1, 2), nn.BatchNorm2d(64)) self.block5 = ResidualBlock(32, 64, sigma1, 2, downsample2) self.block6 = ResidualBlock(64, 64, sigma1) self.avg_pool = nn.AvgPool2d(8) self.fc = BayesianLinear(64, num_class, sigma1, -2.253, -2.252) def forward(self, x, sample=False): out = self.conv(x) out = self.frn(out) out = self.tlu(out) out = self.block1(out) out = self.block2(out) out = self.block3(out) out = self.block4(out) out = self.block5(out) out = self.block6(out) out = self.avg_pool(out) out = out.view(out.size(0), -1) out = F.softmax(self.fc(out, sample)) return out def log_prior(self): return ( self.conv.log_prior + self.block1.log_prior + self.block2.log_prior + self.block3.log_prior + self.block4.log_prior + self.block5.log_prior + self.block6.log_prior + self.fc.log_prior ) def log_variational_posterior(self): return ( self.conv.log_variational_posterior + self.block1.log_variational_posterior + self.block2.log_variational_posterior + self.block3.log_variational_posterior + self.block4.log_variational_posterior + self.block5.log_variational_posterior + self.block6.log_variational_posterior + self.fc.log_variational_posterior ) def free_energy(self, input, target, batch_size, num_batches, n_samples, T): outputs = torch.zeros(batch_size, 10).to(DEVICE) log_prior = torch.zeros(1).to(DEVICE) log_variational_posterior = torch.zeros(1).to(DEVICE) negative_log_likelihood = torch.zeros(1).to(DEVICE) loss = 0 for i in range(n_samples): output = self(input, sample=True) outputs += output / n_samples neg = F.nll_loss(torch.log(output + eps), target, size_average=False) negative_log_likelihood += neg / n_samples const = ( self.log_variational_posterior() - self.log_prior() / T + neg / T * num_batches ) / n_samples loss += const.detach() * self.log_variational_posterior() log_prior += self.log_prior() / n_samples log_variational_posterior += self.log_variational_posterior() / n_samples corrects = outputs.argmax(dim=1).eq(target).sum().item() return ( loss, log_prior, log_variational_posterior, negative_log_likelihood, corrects, )
nilq/baby-python
python
import Tkinter as tk import random import time import pygame as p import random import math mutation_rate=10 increase_rate=0.1 complex=True pop_size=200 black=((0,0,0)) fps=60 WHITE=(255,255,255) RED=(255,0,0) GREEN=(0,255,0) BLUE=(0,0,255) grid=[] size=20 w=32 flag=0 mousepos=[] space="udlr" splen=len(space) length=400 startx=0 starty=0 finishx=19 finishy=19 prev_steps=-1 def find_dupes(st): seen=[] for i in range(len(st)): if i in seen: k=gen_population(1,len(i))[0] if k not in st: st[i]=k else: st[i]=k[::-1] seen.append(i) return st def get_numbers(currentx,currenty,steps): global prev_steps,mutation_rate,complex if complex==False: d = abs(finishy - currenty) d1 = abs(finishx - currentx) td = d + d1#+steps/((d+d1)*length) maxd = abs(finishy - starty) + abs(finishx - startx) if steps>prev_steps: prev_steps=steps return 100-int((float(td)/maxd) * 100) else: st=float(steps)/length d = abs(finishy - currenty) d1 = abs(finishx - currentx) td = d + d1 # +steps/((d+d1)*length) st=steps if steps>prev_steps: prev_steps=steps #complex=False return (float(st)/length)*100 def clear(): for i in range(len(grid)): for j in range(len(grid[i])): if grid[i][j]=="P": grid[i][j]=0 grid[startx][starty]="P" def fitness(player,gri): global mutation_rate board=gri #mutation_rate=10 steps=0 start=[startx,starty] currentx=startx currenty=starty visited=[] visited=[[currentx,currenty]] for i in range(len(player)): #print "STEPS",steps #print board k=player[i] steps+=1 if k=='u': #print "UP" try: if board[currentx-1][currenty]==0 and [currentx-1,currenty] not in visited and currentx>=1: #print "NO ERROR" if currentx-1<0: board[500][500]="p" board[currentx][currenty]=0 board[currentx-1][currenty]='P' currentx=currentx-1 #gridplayer(board) #time.sleep(0.5) visited.append([currentx,currenty]) mutation_rate-=increase_rate/(float(2)) elif board[currentx-1][currenty]=="F" and currentx>=1: clear() return 100 else: clear() mutation_rate+=increase_rate*5 return get_numbers(currentx,currenty,steps) except: clear() mutation_rate += increase_rate*5 return get_numbers(currentx,currenty,steps) if k == 'd': #print "DOWN" try: if board[currentx+1][currenty ] == 0 and [currentx+1,currenty] not in visited: #print "NO ERROR" board[currentx][currenty] = 0 board[currentx+1][currenty] = 'P' currentx = currentx + 1 #gridplayer(board) #time.sleep(0.5) visited.append([currentx, currenty]) #print "DOWN DONE" mutation_rate -= increase_rate/(float(2)) elif board[currentx+1][currenty] == "F": #print "REACHED FINISH" clear() return 100 else: #"NO VALID DOWN" mutation_rate += increase_rate*5 clear() return get_numbers(currentx,currenty,steps) except Exception as e: #print e clear() mutation_rate += increase_rate*5 return get_numbers(currentx, currenty,steps) if k == 'l': #print "LEFT" try: if board[currentx][currenty-1] == 0 and [currentx,currenty-1] not in visited and currenty>=1: #print "NO ERROR" if currenty-1<0: board[500][500]="LOL" board[currentx][currenty] = 0 board[currentx][currenty-1] = 'P' currenty = currenty - 1 #gridplayer(board) #time.sleep(0.5) visited.append([currentx, currenty]) mutation_rate -= increase_rate/(float(2)) elif board[currentx][currenty-1] == "F" and currenty>=1: clear() return 100 else: clear() mutation_rate += increase_rate*5 return get_numbers(currentx,currenty,steps) except: clear() mutation_rate += increase_rate*5 return get_numbers(currentx, currenty,steps) if k == 'r': #print "RIGHT" try: if board[currentx][currenty+1] == 0 and [currentx,currenty+1] not in visited: board[currentx][currenty] = 0 board[currentx][currenty+1] = 'P' currenty = currenty + 1 #gridplayer(board) #time.sleep(0.5) visited.append([currentx, currenty] ) mutation_rate -= increase_rate/(float(2)) elif board[currentx][currenty+1] == "F": clear() return 100 else: clear() mutation_rate += increase_rate*5 return get_numbers(currentx,currenty,steps) except: clear() mutation_rate += increase_rate*5 return get_numbers(currentx, currenty,steps) mutation_rate += increase_rate*5 return get_numbers(currentx,currenty,steps) def create_pairs(pop): pai = [] selected = [] pop_score = [] # print len(p),"CreatePairs" for i in pop: pop_score.append([i,fitness(i,grid)]) pi = [] # print len(pop_score),"After pop score" l=max(pop_score, key=lambda x:x[1])[1] # print len(pop_score),"pop score" p = sorted(pop_score, key=lambda x:x[1]) # print len(p),"After sorting ascending" p = p[::-1] #print p, len(p) #print p[0] while (len(pai) * 2) < len(p): # print len(pai) if len(pi) == 2: pai.append(pi) pi = [] continue for i in p: if len(pi) == 2: break #if i[0] not in selected: #k = random.randint(0, l) #if k <= i[1]: pi.append(i[0]) selected.append(i[0]) #print pai return pai def crossover(pai): po = [] global mutation_rate for i in pai: t = i x = t[0] y = t[1] tl = random.randint(0, len(x) - 1) l = len(x) / 2 t1 = x[:l] + y[l:] t2 = x[l:] + y[:l] t3 = y[:l] + x[l:] t4 = y[l:] + x[:l] t5 = x[:tl] + y[tl:] t6 = x[tl:] + y[:tl] t7 = y[:tl] + x[tl:] t8 = y[tl:] + x[:tl] t9 = x t10 = y for j in range(1, len(x), 2): t11 = x[:j] + y[j] + x[j + 1:] t12 = y[:j] + x[j] + y[j + 1:] x = t11 y = t12 txf = {} txf[t1] = fitness(t1,grid) txf[t2] = fitness(t2,grid) txf[t3] = fitness(t3,grid) txf[t4] = fitness(t4,grid) txf[t5] = fitness(t5,grid) txf[t6] = fitness(t6,grid) txf[t7] = fitness(t7,grid) txf[t8] = fitness(t8,grid) txf[t9] = fitness(t9,grid) txf[t10] = fitness(t10,grid) txf[t11] = fitness(t11,grid) txf[t12] = fitness(t12,grid) for i in range(15 - len(txf)): tmp = "" tmp = gen_population(1)[0] txf[tmp] = fitness(tmp,grid) p = sorted(txf, key=txf.get) p = p[::-1] #print p flag = 0 l = max(txf, key=lambda x: x[1]) l=txf[l] for i in p: if flag>=2: break po.append(i) flag+=1 #print l # print len(po),"Cross" po = find_dupes(po) return po def mutations(pop): global complex global mutation_rate po = [] print complex,"Complex",mutation_rate,prev_steps for i in pop: t = i for j in range(len(t)): k = random.randint(0, 100) if mutation_rate<1: mutation_rate=10 complex=False if mutation_rate>20: mutation_rate=19 if mutation_rate>10: complex=True #print mutation_rate,"MUTE" if k <= mutation_rate: x = random.randint(0, splen - 1) t = t[:j] + space[x] + t[j + 1:] po.append(t) # print len(po),"Mut" mutation_rate=0 po = find_dupes(po) return po def gen_population(size): pop=[] while len(pop)<size: temp="" for j in range(length): k=random.randint(0,splen-1) #print k temp += space[k] ''' x=0 y=0 if space[k] == "u": y+=1 temp[x][y]="P" if space[k] == "d": y-=1 temp[x][y]="P" if space[k] == "r": x+=1 temp[x][y]="P" if space[k] == "l": x-=1 temp[x][y]="P"''' if temp not in pop: pop.append(temp) return pop p.init() Res=(1270,720) screen=p.display.set_mode(Res) clock = p.time.Clock() for j in range(size): a=[] for i in range(size): a.append(0) grid.append(a) grid[finishx][finishy]="F" grid[startx][starty]="P" #print grid def gridf(grid): x = 64 y = 64 for row in grid: for col in row: box = p.Rect(x, y, w, w) p.draw.rect(screen, WHITE, box,1) #screen.blit(screen,box) p.draw.rect(screen, RED, (32*(startx+2),32*(starty+2),w,w)) p.draw.rect(screen, GREEN, (32*(finishx+2), 32*(finishy+2), w,w)) p.draw.rect(screen, GREEN, (736, 640, w+64, w)) x = x + w y = y + w x = 64 for i in range(len(grid)): for j in range(len(grid[i])): if grid[i][j]==1: p.draw.rect(screen, WHITE,(32*(j+2),32*(i+2),w,w)) def gridplayer(board): for i in range(len(board)): for j in range(len(board[i])): if board[i][j] == "P": #print i,j p.draw.rect(screen, BLUE, ((j+2)*32, (i+2)*32, w, w)) p.display.flip() def clearboard(board): for i in range(len(board)): for j in range(len(board[i])): if board[i][j] == "P": print i,j p.draw.rect(screen, WHITE, ((j + 2) * 32, (i + 2) * 32, w, w), 1) p.draw.rect(screen, black, ((j+2)*32, (i+2)*32, w, w)) p.draw.rect(screen, BLUE, ((startx + 2) * 32, (starty + 2) * 32, w, w)) def draw_player(moves): currentx=startx currenty=starty #clearboard(grid) screen.fill(black) gridf(grid) for i in range(len(moves)): #print grid k=moves[i] if k=="u": if currentx>=1 and grid[currentx-1][currenty]==0: grid[currentx-1][currenty]="P" currentx-=1 else: gridplayer(grid) #clearboard(grid) return if k=="l": if currenty>=1 and grid[currentx][currenty-1]==0: grid[currentx][currenty-1] = "P" currenty-=1 else: gridplayer(grid) #clearboard(grid) return if k == "r": if currenty <= size-2 and grid[currentx][currenty +1] == 0: grid[currentx][currenty + 1] = "P" currenty+=1 else: gridplayer(grid) #clearboard(grid) return if k == "d": if currentx <= size-2 and grid[currentx+1][currenty ] == 0: grid[currentx+1][currenty] = "P" currentx+=1 else: gridplayer(grid) #clearboard(grid) return gridplayer(grid) return def run_algo(): #s="rdruruuddddrduluuullrduurrulrurdluulrllllluluudul" #print fitness(s) count = 0 for i in grid: for j in i: if j == 1: count += 1 print count gen=0 best_fitness=0 best_dir="" avg=0 players = gen_population(pop_size) while best_fitness<100: #print grid gen+=1 pairs=create_pairs(players) children=crossover(pairs) children=mutations(children) for i in children: r=fitness(i,grid) #print r avg+=r #draw_player(i) #print r,i if r>best_fitness: best_fitness=r best_dir=i avg=float(avg)/len(children) #print best_fitness #print best_dir print avg avg=0 draw_player(best_dir) #time.sleep(1) #print best_dir players=children #print fitness(player[0]) draw_player(best_dir) #print best_dir #print gen while 1: #print grid #gridplayer(grid# ) if flag==1: flag=2 if flag == 0: gridf(grid) flag=1 #print LOL clock.tick(30) for event in p.event.get(): if event.type == p.MOUSEBUTTONDOWN: mousepos= p.mouse.get_pos() x = mousepos[0] / 32 y = mousepos[1] / 32 try: if grid[y-2][x-2]==0: grid[y-2][x-2]=1 x=x*32 y=y*32 box = p.Rect(x, y, w, w) p.draw.rect(screen, WHITE, box) elif grid[y-2][x-2]==1: grid[y - 2][x - 2] = 0 x = x * 32 y = y * 32 box = p.Rect(x, y, w, w) p.draw.rect(screen, black, box) p.draw.rect(screen, WHITE, box, 1) except: pass if mousepos[0] >= 736 and mousepos[0] <= 736+w+64 and mousepos[1] >= 640 and mousepos[1] <= 640+w: run_algo() #s="rrdddrrrrrdddd" #draw_player(s) #print "Done drawing" #print mousepos,x,y,grid p.display.flip()
nilq/baby-python
python
#! /usr/bin/env python # -*- coding: utf-8 -*- from glob import glob ################################# # # MAIN # ################################# if __name__ == "__main__": import argparse from pysedm import io, rainbowcam parser = argparse.ArgumentParser( description="""Build the guider images | to be run on pharos """, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('infile', type=str, default=None, help='The date YYYYMMDD') parser.add_argument('--contains', type=str, default="*", help='Provide here part of the filename. This will build the guider images of all crr images of the given night having `contains` in there name') parser.add_argument('--solvewcs', action="store_true", default=False, help='Shall the wcs solution of the guider be solved (ignored if --noguider). [part of the --build]') parser.add_argument('--quite', action="store_true", default=False, help='Set verbose to False') # ================ # # END of Option # # ================ # args = parser.parse_args() # Matplotlib # ================= # # The Scripts # # ================= # # --------- # # Date # # --------- # date = args.infile # ---------------- # # Guider loop # # ---------------- # files_to_use = io.get_night_files(date, "ccd.crr", args.contains) print(" Guider images will be build for :") print(", ".join(files_to_use) ) for filename in files_to_use: print( "** Starting %s **"%filename ) rainbowcam.build_meta_ifu_guider(filename, solve_wcs = args.solvewcs, verbose = False if args.quite else True)
nilq/baby-python
python
# oci-utils # # Copyright (c) 2018, 2019 Oracle and/or its affiliates. All rights reserved. # Licensed under the Universal Permissive License v 1.0 as shown # at http://oss.oracle.com/licenses/upl. import logging import os import os.path import subprocess import cache import oci_utils from oci_utils import _configuration as OCIUtilsConfiguration from .oci_api import HAVE_OCI_SDK, OCISession # TODO: can we move this under 'impl' ? _logger = logging.getLogger('oci-utils.vnicutils') _secondary_vnic_all_configure_path = os.path.join(os.path.dirname(oci_utils.__file__), 'impl', '.vnic_script.sh') class VNICUtils(object): """Class for managing VNICs """ # file with saved vnic information __vnic_info_file = "/var/lib/oci-utils/vnic_info" # OBSOLETE: file with VNICs and stuff to exclude from automatic # configuration __net_exclude_file = "/var/lib/oci-utils/net_exclude" def __init__(self): """ Class VNICUtils initialisation. """ self.vnic_info = None self.vnic_info_ts = 0 @staticmethod def __new_vnic_info(): """ Create a new vnic info file Returns ------- tuple (vnic info timestamp: datetime, vnic info: dict) """ vnic_info = { 'ns': None, 'sshd': False, 'exclude': [], 'sec_priv_ip': []} vnic_info_ts = 0 # migration from oci-utils 0.5's net_exclude file excludes = cache.load_cache(VNICUtils.__net_exclude_file)[1] if excludes is not None: vnic_info['exclude'] = excludes vnic_info_ts = \ cache.write_cache(cache_content=vnic_info, cache_fname=VNICUtils.__vnic_info_file) try: os.remove(VNICUtils.__net_exclude_file) except Exception: pass # can we make API calls? oci_sess = None if HAVE_OCI_SDK: try: oci_sess = OCISession() except Exception: pass if oci_sess is not None: p_ips = oci_sess.this_instance().all_private_ips(refresh=True) sec_priv_ip = \ [[ip.get_address(), ip.get_vnic().get_ocid()] for ip in p_ips] vnic_info['sec_priv_ip'] = sec_priv_ip vnic_info_ts = \ cache.write_cache(cache_content=vnic_info, cache_fname=VNICUtils.__vnic_info_file) return vnic_info_ts, vnic_info @staticmethod def get_vnic_info_timestamp(): """ Get timestamp of vnic info repository The last modification time of the vnic info file Returns ------- int The last modification time since epoch in seconds. """ return cache.get_timestamp(VNICUtils.__vnic_info_file) def get_vnic_info(self): """ Load the vnic_info file. If the file is missing , a new one is created. Returns ------- tuple (int, dict) (vnic info timestamp: datetime, vnic info: dict) """ self.vnic_info_ts, self.vnic_info = \ cache.load_cache(VNICUtils.__vnic_info_file) if self.vnic_info is None: self.vnic_info_ts, self.vnic_info = VNICUtils.__new_vnic_info() return self.vnic_info_ts, self.vnic_info def save_vnic_info(self): """ Save self.vnic_info in the vnic_info file. Returns ------- int The timestamp of the file or None on failure. """ _logger.debug("Saving vnic_info.") vnic_info_ts = cache.write_cache(cache_content=self.vnic_info, cache_fname=VNICUtils.__vnic_info_file) if vnic_info_ts is not None: self.vnic_info_ts = vnic_info_ts else: _logger.warn("Failed to save VNIC info to %s" % VNICUtils.__vnic_info_file) return vnic_info_ts def _run_sec_vnic_script(self, script_args): """ Run secondary_vnic_all_configure.sh. Parameters ---------- script_args: list of string Arguments to be passed to the script. Returns ------- tuple (The exit code of the script, the output of the script) """ true_val = ['true', 'True', 'TRUE'] vf_net = OCIUtilsConfiguration.get('vnic', 'vf_net') in true_val if vf_net and '-s' not in script_args: _logger.debug( 'Skipping execution of the secondary vnic script') return 0, 'Info: vf_net is enabled in the oci-utils configuration' all_args = [_secondary_vnic_all_configure_path] all_args += script_args if "-c" in script_args: if 'sshd' in self.vnic_info: if self.vnic_info['sshd']: all_args += ['-r'] if 'ns' in self.vnic_info: if self.vnic_info['ns'] is not None: all_args += ['-n', self.vnic_info['ns']] if "-c" in script_args or "-s" in script_args: if 'exclude' in self.vnic_info: for exc in self.vnic_info['exclude']: all_args += ['-X', exc] if 'sec_priv_ip' in self.vnic_info: for ipaddr, vnic_id in self.vnic_info['sec_priv_ip']: all_args += ['-e', ipaddr, vnic_id] _logger.debug('Executing "%s"' % " ".join(all_args)) try: output = subprocess.check_output( all_args, stderr=subprocess.STDOUT) except OSError: _logger.debug('failed to execute ' '/usr/libexec/secondary_vnic_all_configure.sh') return 404, 'failed to execute secondary VNIC script' except subprocess.CalledProcessError as e: _logger.debug('Error running command "%s":' % ' '. join(all_args)) _logger.error(e.output) return e.returncode, e.output return 0, output def set_namespace(self, ns): """ Set the 'ns' field of the vnic_info dict to the given value. This value is passed to the secondary vnic script with the -n option and is used to place the interface in the given namespace. The default is no namespace. Parameters ---------- ns: str The namespace value. """ self.vnic_info['ns'] = ns self.save_vnic_info() def set_sshd(self, val): """ Set the 'sshd' field of the vnic_info dict to the given value. Parameters ---------- val: bool When set to True, the secondary vnic script is called with the -r option, which, if a namespace is also specified, runs sshd in the namespace. The default is False. """ self.vnic_info['sshd'] = val self.save_vnic_info() def add_private_ip(self, ipaddr, vnic_id): """ Add the given secondary private IP to vnic_info save vnic info to the vnic_info file. Parameters ---------- ipaddr: str The secondary IP address. vnic_id: int The VNIC id. """ if [ipaddr, vnic_id] not in self.vnic_info['sec_priv_ip']: self.vnic_info['sec_priv_ip'].append([ipaddr, vnic_id]) self.save_vnic_info() def set_private_ips(self, priv_ips): """ Set the secondary private IP. Parameters ---------- priv_ips: str The private IP addresses. """ self.vnic_info['sec_priv_ip'] = priv_ips self.save_vnic_info() def delete_all_private_ips(self, vnic_id): """ Delete all private IPs attached to a given VNIC. Parameters ---------- vnic_id: int The vnic ID from which we delete private IP's. """ remove_privip = [] for privip in self.vnic_info['sec_priv_ip']: if privip[1] == vnic_id: remove_privip.append(privip) self.include(privip[0], save=False) for pi in remove_privip: self.vnic_info['sec_priv_ip'].remove(pi) self.save_vnic_info() def del_private_ip(self, ipaddr, vnic_id): """ Delete secondary private IP from vnic_info save vnic_info to the vnic_info file. Parameters ---------- ipaddr: str The IP addr to be removed. vnic_id: int The VNIC ID. Returns ------- tuple (exit code: int, output from the "sec vnic" script execution). # See _run_sec_vnic_script() """ if vnic_id is None: for ip in self.vnic_info['sec_priv_ip']: if ip[0] == ipaddr: vnic_id = ip[1] break if vnic_id is None: return 0, 'IP %s is not configured.' % ipaddr ret, info = self._run_sec_vnic_script(['-d', '-e', ipaddr, vnic_id]) if ret == 0: if [ipaddr, vnic_id] in self.vnic_info['sec_priv_ip']: self.vnic_info['sec_priv_ip'].remove([ipaddr, vnic_id]) self.include(ipaddr, save=False) self.save_vnic_info() return ret, info def exclude(self, item, save=True): """ Add item to the "exclude" list. IP addresses or interfaces that are excluded from automatic configuration. Parameters ---------- item: str Item (IP or interface) to be excluded. save: bool If True save to persistent configuration (vnic_info file) (the default is True). """ if item not in self.vnic_info['exclude']: _logger.debug('Adding %s to "exclude" list' % item) self.vnic_info['exclude'].append(item) if save: self.save_vnic_info() def include(self, item, save=True): """ Remove item from the "exclude" list, IP addresses or interfaces that are excluded from automatic configuration. Parameters ---------- item: str Item (IP or interface) to be excluded. save: bool If True save to persistent configuration (vnic_info file) (the default is True). """ if item in self.vnic_info['exclude']: _logger.debug('Removing %s from "exclude" list' % item) self.vnic_info['exclude'].remove(item) if save: self.save_vnic_info() def auto_config(self, sec_ip, quiet, show): """ Auto configure VNICs. Run the secondary vnic script in automatic configuration mode (-c). Parameters ---------- sec_ip: str secondary IP quiet: bool Do we run the underlying script silently? show: bool Do network config should be part of the output? Returns ------- tuple (exit code: int, output from the "sec vnic" script execution.) # See _run_sec_vnic_script() """ args = ['-c'] if quiet: args += ['-q'] if show: args += ['-s'] if sec_ip: for si in sec_ip: args += ['-e', si[0], si[1]] if [si[0], si[1]] not in self.vnic_info['sec_priv_ip']: self.vnic_info['sec_priv_ip'].append((si[0], si[1])) self.include(si[0], save=False) self.save_vnic_info() return self._run_sec_vnic_script(args) def auto_deconfig(self, sec_ip, quiet, show): """ De-configure VNICs. Run the secondary vnic script in automatic de-configuration mode (-d). Parameters ---------- sec_ip: str The secondary IP. quiet: bool Do we run the underlying script silently? show: bool Do network config should be part of the output? Returns ------- tuple (exit code: int, output from the "sec vnic" script execution.) # See _run_sec_vnic_script() """ args = ['-d'] if quiet: args += ['-q'] if show: args += ['-s'] if sec_ip: for si in sec_ip: args += ['-e', si[0], si[1]] if [si[0], si[1]] in self.vnic_info['sec_priv_ip']: self.vnic_info['sec_priv_ip'].remove([si[0], si[1]]) self.exclude(si[0], save=False) self.save_vnic_info() return self._run_sec_vnic_script(args) def get_network_config(self): """ Get network configuration. Run the secondary vnic script in show configuration mode (-s). Returns ------- tuple (exit code: int, output from the "sec vnic" script execution.) # See _run_sec_vnic_script() """ return self._run_sec_vnic_script(['-s'])
nilq/baby-python
python