content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
import sys
sys.path.append("/Users/zhouxuerong/projects/autotest/autotest/autotest")
from django.test import TestCase
from apitest.views import Login
from django.http import HttpRequest
class titlePageTest(TestCase):
def test_loginPage(self):
request = HttpRequest()
Response = Login(request)
print(Response.content)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
def main():
s = input()
if s == 'Sunny':
print('Cloudy')
elif s == 'Cloudy':
print('Rainy')
else:
print('Sunny')
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
from sqlalchemy import MetaData
from sqlalchemy.ext.declarative import declarative_base
metadata = MetaData()
Base = declarative_base(metadata=metadata)
from . import Assignment, Driver, DriverAssignment, Location, LocationPair, MergeAddress, RevenueRate, Trip
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
import numpy as np
from morphforge.morphology.core import MorphologyArray
from morphforge.morphology.importer.morphologyimporter import MorphologyImporter
from StringIO import StringIO
from morphforge.morphology.errors import MorphologyImportError
from morphforge.morphology.core import MorphologyTree
class NewSWCLoader(object):
@classmethod
def load_swc_single(cls, src, name=None):
dtype = {'names': ('id', 'type', 'x', 'y', 'z', 'r', 'pid'),
'formats': ('int32', 'int32', 'f4', 'f4', 'f4', 'f4', 'int32') }
swc_data_raw = np.loadtxt(src, dtype=dtype)
if len(np.nonzero(swc_data_raw['pid'] == -1)) != 1:
assert False, "Unexpected number of id'errstr of -1 in file"
# We might not nessesarily have continuous indices in the
# SWC file, so lets convert them:
index_to_id = swc_data_raw['id']
id_to_index_dict = dict([(_id, index) for (index, _id) in enumerate(index_to_id)])
if len(id_to_index_dict) != len(index_to_id):
errstr = "Internal Error Loading SWC: Index and ID map are different lengths."
errstr += " [ID:%swc_data_raw, Index:%swc_data_raw]" % (len(index_to_id), len(id_to_index_dict))
raise MorphologyImportError(errstr)
# Vertices are easy:
vertices = swc_data_raw[['x', 'y', 'z', 'r']]
vertices = np.vstack([swc_data_raw['x'], swc_data_raw['y'], swc_data_raw['z'], swc_data_raw['r']]).T
# Connections need to translate id_to_index:
connection_indices = [(id_to_index_dict[ID], id_to_index_dict[parent_id]) for ID, parent_id in swc_data_raw[['id', 'pid']] if parent_id != -1]
# Types are specified per connection:
section_types = [swctype for ID, swctype, parent_id in swc_data_raw[['id', 'type', 'pid']] if parent_id != -1]
return MorphologyArray(vertices=vertices, connectivity=connection_indices, section_types=section_types, dummy_vertex_index=0, name=name)
@classmethod
def load_swc_set(cls, src):
"""Naive implementation, that doesn't take account of interleaving of nodes"""
lines = [line.strip() for line in src.readlines()]
lines = [line for line in lines if line and line[0] != '#']
# Break into sections where we get a new parent:
splits = [[]]
for line in lines:
if int(line.split()[-1]) == -1:
splits.append([])
splits[-1].append(line)
splits = splits[1:]
data_blocks = ['\n'.join(blk) for blk in splits]
file_objs = [StringIO(blk) for blk in data_blocks]
morphs = [cls.load_swc_single(src=fobj) for fobj in file_objs]
return morphs
# To Array:
MorphologyImporter.register('fromSWC', NewSWCLoader.load_swc_single, as_type=MorphologyArray)
# To Tree:
def _load_swc_single_tree(*args, **kwargs):
return NewSWCLoader.load_swc_single(*args, **kwargs).to_tree()
MorphologyImporter.register('fromSWC', _load_swc_single_tree, as_type=MorphologyTree)
|
nilq/baby-python
|
python
|
import utils
from symbolic.symbolic_types.symbolic_int import SymbolicInteger
from symbolic.symbolic_types.symbolic_type import SymbolicType
from z3 import *
class Z3Expression(object):
def __init__(self):
self.z3_vars = {}
def toZ3(self,solver,asserts,query):
self.z3_vars = {}
solver.assert_exprs([self.predToZ3(p,solver) for p in asserts])
solver.assert_exprs(Not(self.predToZ3(query,solver)))
def predToZ3(self,pred,solver,env=None):
sym_expr = self._astToZ3Expr(pred.symtype,solver,env)
if env == None:
if not is_bool(sym_expr):
sym_expr = sym_expr != self._constant(0,solver)
if not pred.result:
sym_expr = Not(sym_expr)
else:
if not pred.result:
sym_expr = not sym_expr
return sym_expr
def getIntVars(self):
return [ v[1] for v in self.z3_vars.items() if self._isIntVar(v[1]) ]
# ----------- private ---------------
def _isIntVar(self, v):
raise NotImplementedException
def _getIntegerVariable(self,name,solver):
if name not in self.z3_vars:
self.z3_vars[name] = self._variable(name,solver)
return self.z3_vars[name]
def _variable(self,name,solver):
raise NotImplementedException
def _constant(self,v,solver):
raise NotImplementedException
def _wrapIf(self,e,solver,env):
if env == None:
return If(e,self._constant(1,solver),self._constant(0,solver))
else:
return e
# add concrete evaluation to this, to check
def _astToZ3Expr(self,expr,solver,env=None):
if isinstance(expr, list):
op = expr[0]
args = [ self._astToZ3Expr(a,solver,env) for a in expr[1:] ]
z3_l,z3_r = args[0],args[1]
# arithmetical operations
if op == "+":
return self._add(z3_l, z3_r, solver)
elif op == "-":
return self._sub(z3_l, z3_r, solver)
elif op == "*":
return self._mul(z3_l, z3_r, solver)
elif op == "//":
return self._div(z3_l, z3_r, solver)
elif op == "%":
return self._mod(z3_l, z3_r, solver)
# bitwise
elif op == "<<":
return self._lsh(z3_l, z3_r, solver)
elif op == ">>":
return self._rsh(z3_l, z3_r, solver)
elif op == "^":
return self._xor(z3_l, z3_r, solver)
elif op == "|":
return self._or(z3_l, z3_r, solver)
elif op == "&":
return self._and(z3_l, z3_r, solver)
# equality gets coerced to integer
elif op == "==":
return self._wrapIf(z3_l == z3_r,solver,env)
elif op == "!=":
return self._wrapIf(z3_l != z3_r,solver,env)
elif op == "<":
return self._wrapIf(z3_l < z3_r,solver,env)
elif op == ">":
return self._wrapIf(z3_l > z3_r,solver,env)
elif op == "<=":
return self._wrapIf(z3_l <= z3_r,solver,env)
elif op == ">=":
return self._wrapIf(z3_l >= z3_r,solver,env)
else:
utils.crash("Unknown BinOp during conversion from ast to Z3 (expressions): %s" % op)
elif isinstance(expr, SymbolicInteger):
if expr.isVariable():
if env == None:
return self._getIntegerVariable(expr.name,solver)
else:
return env[expr.name]
else:
return self._astToZ3Expr(expr.expr,solver,env)
elif isinstance(expr, SymbolicType):
utils.crash("{} is an unsupported SymbolicType of {}".
format(expr, type(expr)))
elif isinstance(expr, int):
if env == None:
return self._constant(expr,solver)
else:
return expr
else:
utils.crash("Unknown node during conversion from ast to Z3 (expressions): %s" % expr)
def _add(self, l, r, solver):
return l + r
def _sub(self, l, r, solver):
return l - r
def _mul(self, l, r, solver):
return l * r
def _div(self, l, r, solver):
return l / r
def _mod(self, l, r, solver):
return l % r
def _lsh(self, l, r, solver):
return l << r
def _rsh(self, l, r, solver):
return l >> r
def _xor(self, l, r, solver):
return l ^ r
def _or(self, l, r, solver):
return l | r
def _and(self, l, r, solver):
return l & r
|
nilq/baby-python
|
python
|
import xgboost as xgb
# read in data
dtrain = xgb.DMatrix('../../data/data_20170722_01/train_data.txt')
dtest = xgb.DMatrix('../../data/data_20170722_01/test_data.txt')
# specify parameters via map, definition are same as c++ version
param = {'max_depth':22, 'eta':0.1, 'silent':0, 'objective':'binary:logistic','min_child_weight':3,'gamma':14 }
# specify validations set to watch performance
watchlist = [(dtest,'eval'), (dtrain,'train')]
num_round = 60
bst = xgb.train(param, dtrain, num_round, watchlist)
# this is prediction
preds = bst.predict(dtest)
labels = dtest.get_label()
positive_threshold_list = [0.50, 0.67, 0.80, 0.90, 0.95]
for positive_threshold in positive_threshold_list:
print('positive_threshold: ' + str(positive_threshold))
num_correct = sum(1 for i in range(len(preds)) if int(preds[i]>positive_threshold)==labels[i])
num_pred = len(preds)
num_error = num_pred - num_correct
print ('error=%d/%d=%f' % (num_error, num_pred, num_error /float(num_pred)))
print ('accuracy=%d/%d=%f' % ( num_correct, num_pred, num_correct /float(num_pred)))
num_true_positive = sum(1 for i in range(len(preds)) if int(preds[i]>positive_threshold)==labels[i] and labels[i]==1)
num_positive_pred = sum(1 for i in range(len(preds)) if preds[i]>positive_threshold)
print ('precision=%d/%d=%f' % ( num_true_positive, num_positive_pred, num_true_positive /float(num_positive_pred)))
print('')
|
nilq/baby-python
|
python
|
N = int(input())
N = str(N)
if len(N)==1:
print(1)
elif len(N)==2:
print(2)
elif len(N)==3:
print(3)
elif len(N)>3:
print("More than 3 digits")
|
nilq/baby-python
|
python
|
from passlib.context import CryptContext
PWD_CONTEXT = CryptContext(schemes=["bcrypt"], deprecated="auto")
def verify_password(plain_password: str, hashed_password: str) -> bool:
return PWD_CONTEXT.verify(plain_password, hashed_password)
def get_password_hash(password: str) -> str:
return PWD_CONTEXT.hash(password)
|
nilq/baby-python
|
python
|
from .simple_ga import SimpleGA
from .simple_es import SimpleES
from .cma_es import CMA_ES
from .de import DE
from .pso import PSO
from .open_es import OpenES
from .pgpe import PGPE
from .pbt import PBT
from .persistent_es import PersistentES
from .xnes import xNES
from .ars import ARS
from .sep_cma_es import Sep_CMA_ES
from .bipop_cma_es import BIPOP_CMA_ES
from .ipop_cma_es import IPOP_CMA_ES
from .full_iamalgam import Full_iAMaLGaM
from .indep_iamalgam import Indep_iAMaLGaM
from .ma_es import MA_ES
from .lm_ma_es import LM_MA_ES
from .rm_es import RmES
from .gld import GLD
from .sim_anneal import SimAnneal
__all__ = [
"SimpleGA",
"SimpleES",
"CMA_ES",
"DE",
"PSO",
"OpenES",
"PGPE",
"PBT",
"PersistentES",
"xNES",
"ARS",
"Sep_CMA_ES",
"BIPOP_CMA_ES",
"IPOP_CMA_ES",
"Full_iAMaLGaM",
"Indep_iAMaLGaM",
"MA_ES",
"LM_MA_ES",
"RmES",
"GLD",
"SimAnneal",
]
|
nilq/baby-python
|
python
|
import random
from model import Actor, Critic
from ounoise import OUNoise
import torch
import torch.optim as optim
GAMMA = 0.99 # discount factor
TAU = 0.01 # for soft update of target parameters
LR_ACTOR = 0.001 # learning rate of the actor
LR_CRITIC = 0.001 # learning rate of the critic
class Agent():
def __init__(self, state_size, action_size, num_agents, device, gamma=GAMMA,
tau=TAU, lr_actor=LR_ACTOR, lr_critic=LR_CRITIC, random_seed=0):
"""
Initialize an Agent object.
:param state_size: size of state
:param action_size: size of action
:param num_agents: number of agents
:param gamma: discount factor
:param tau: factor for soft update of target parameters
:param lr_actor: Learning rate of actor
:param lr_critic: Learning rate of critic
:param random_seed: Random seed
:param device: cuda or cpu
"""
self.device=device
self.gamma = gamma
self.tau=tau
self.num_agents=num_agents
self.state_size = state_size
self.action_size = action_size
self.full_state_size = state_size * num_agents
self.full_action_size = action_size * num_agents
self.seed = random.seed(random_seed)
# Actor Network (w/ Target Network)
self.actor_local = Actor(state_size, action_size, device, random_seed).to(device)
self.actor_target = Actor(state_size, action_size, device, random_seed).to(device)
self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=lr_actor)
# Critic Network (w/ Target Network)
self.critic_local = Critic(self.full_state_size, self.full_action_size, device=device, random_seed=random_seed).to(device)
self.critic_target = Critic(self.full_state_size, self.full_action_size, device=device, random_seed=random_seed).to(device)
self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=lr_critic, weight_decay=0)
self.noise = OUNoise(action_size, random_seed)
def save_model(self, agent_number):
torch.save(self.actor_local.state_dict(), f'models/checkpoint_actor_{agent_number}.pth')
torch.save(self.critic_local.state_dict(), f'models/checkpoint_critic_{agent_number}.pth')
def load_model(self, agent_number):
checkpoint = torch.load(f'models/checkpoint_actor_{agent_number}.pth', map_location=torch.device('cpu'))
self.actor_local.load_state_dict(checkpoint)
checkpoint = torch.load(f'models/checkpoint_critic_{agent_number}.pth', map_location=torch.device('cpu'))
self.critic_local.load_state_dict(checkpoint)
def act(self, state, noise = 0., train = False):
"""Returns actions for given state as per current policy.
:param state: state as seen from single agent
"""
if train is True:
self.actor_local.train()
else:
self.actor_local.eval()
action = self.actor_local(state)
if noise > 0:
noise = torch.tensor(noise*self.noise.sample(), dtype=state.dtype, device=state.device)
return action + noise
def target_act(self, state, noise = 0.):
#self.actor_target.eval()
# convert to cpu() since noise is in cpu()
self.actor_target.eval()
action = self.actor_target(state).cpu()
if noise > 0.:
noise = torch.tensor(noise*self.noise.sample(), dtype=state.dtype, device=state.device)
return action + noise
def update_critic(self, rewards, dones, all_states, all_actions, all_next_states, all_next_actions):
with torch.no_grad():
Q_targets_next = self.critic_target(all_next_states, all_next_actions)
# Compute Q targets for current states (y_i)
q_targets = rewards + (self.gamma * Q_targets_next * (1 - dones))
# Compute critic loss
q_expected = self.critic_local(all_states, all_actions)
# critic_loss = F.mse_loss(q_expected, q_targets)
critic_loss = ((q_expected - q_targets.detach()) ** 2).mean()
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
def update_actor(self, all_states, all_predicted_actions):
"""Update actor network
:param all_states: all states
:param all_predicted_actions: all predicted actions
"""
actor_loss = -self.critic_local(all_states, all_predicted_actions).mean()
self.actor_optimizer.zero_grad()
actor_loss.backward(retain_graph=True)
self.actor_optimizer.step()
def update_targets(self):
self.soft_update(self.actor_local, self.actor_target, self.tau)
self.soft_update(self.critic_local, self.critic_target, self.tau)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model: PyTorch model (weights will be copied from)
target_model: PyTorch model (weights will be copied to)
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau * local_param.data + (1.0 - tau) * target_param.data)
def reset(self):
self.noise.reset()
|
nilq/baby-python
|
python
|
from __future__ import print_function
from sublime import Region, load_settings
from sublime_plugin import TextCommand
from collections import Iterable
DEBUG = False
def dbg(*msg):
if DEBUG:
print(' '.join(map(str, msg)))
class MyCommand(TextCommand):
def set_cursor_to(self, pos):
""" Sets the cursor to a given position. If multiple
positions are given, a multicursor will be made.
"""
dbg('setting cursor to {0}'.format(pos))
if not isinstance(pos, Iterable):
pos = [pos]
self.view.sel().clear()
for p in pos:
self.view.sel().add(Region(p, p))
def set_selection_to(self, start, end):
dbg("setting selection to {0}".format((start, end)))
self.view.sel().clear()
self.view.sel().add(Region(start, end))
def get_char_at(self, pos):
""" Return the character at a position """
return self.view.substr(Region(pos, pos + 1))
def get_current_line(self):
""" Return the line at the current cursor """
return self.get_line_at(self.get_cursor())
def get_line_at(self, region):
""" Returns the :class:`sublime.Line` at a
:class:`sublime.Region`
"""
return self.view.line(region)
def get_cursor(self):
""" Returns the first current cursor """
return self.view.sel()[0]
class MoveByParagraphCommand(MyCommand):
def run(self,
edit,
extend=False,
forward=False,
ignore_blank_lines=True,
stop_at_paragraph_begin=True,
stop_at_paragraph_end=False):
"""
The cursor will move to beginning of a non-empty line that succeeds
an empty one. Selection is supported when "extend" is True.
"""
if not stop_at_paragraph_begin and not stop_at_paragraph_end:
print('[WARNING] MoveByParagraph: stop_at_paragraph_begin and '
'stop_at_paragraph_end are both False, nothing will happen')
return
cursor = self.get_cursor()
if cursor.a < cursor.b:
start = cursor.end()
else:
start = cursor.begin()
kwargs = dict(ignore_blank_lines=ignore_blank_lines,
stop_at_paragraph_begin=stop_at_paragraph_begin,
stop_at_paragraph_end=stop_at_paragraph_end)
dbg('Starting from', cursor)
if forward:
next_cursor = self._find_paragraph_position_forward(start,
**kwargs)
else:
next_cursor = self._find_paragraph_position_backward(start,
**kwargs)
dbg('Stopping at', next_cursor)
if extend:
dbg('set_selection_to', cursor.a, next_cursor.begin())
self.set_selection_to(cursor.a, next_cursor.begin())
else:
dbg('set_cursor_to', next_cursor.begin())
self.set_cursor_to(next_cursor.begin())
cursor = self.get_cursor()
self.view.show(cursor)
def _find_paragraph_position_forward(self,
start,
ignore_blank_lines=True,
stop_at_paragraph_begin=True,
stop_at_paragraph_end=False):
size = self.view.size()
r = Region(start, size)
# Obtain the lines that intersect the region
lines = self.view.lines(r)
for n, line in enumerate(lines[:-1]):
if (stop_at_paragraph_begin and
self._line_begins_paragraph(lines[n+1],
line,
ignore_blank_lines)):
return Region(lines[n+1].a, lines[n+1].a)
if (line.b != start and
stop_at_paragraph_end and
self._line_ends_paragraph(line,
lines[n+1],
ignore_blank_lines)):
return Region(line.b, line.b)
# Check if the last line is empty or not
# If it is empty, make sure we jump to the end of the file
# If it is not empty, jump to the end of the line
if self._substr(lines[-1], ignore_blank_lines) == '':
return Region(size, size)
end = lines[-1].b
# If the file ends with a single newline, it will be stuck
# before this newline character unless we do this
if end == start:
return Region(end+1, end+1)
return Region(end, end)
def _find_paragraph_position_backward(self,
start,
ignore_blank_lines=True,
stop_at_paragraph_begin=True,
stop_at_paragraph_end=False):
r = Region(0, start)
# Obtain the lines that intersect the region
lines = self.view.lines(r)
lines.reverse()
for n, line in enumerate(lines[:-1]):
if (stop_at_paragraph_begin and
self._line_begins_paragraph(line,
lines[n+1],
ignore_blank_lines)):
return Region(line.a, line.a)
if (stop_at_paragraph_end and
self._line_ends_paragraph(lines[n+1],
line,
ignore_blank_lines)):
return Region(lines[n+1].b, lines[n+1].b)
return lines[-1]
def _line_begins_paragraph(self, line, line_above, ignore_blank_lines):
a = self._substr(line, ignore_blank_lines)
b = self._substr(line_above, ignore_blank_lines)
dbg('line_above', line_above, self.view.substr(line_above))
dbg('line', line, self.view.substr(line))
return a and not b
def _line_ends_paragraph(self, line, line_below, ignore_blank_lines):
a = self._substr(line, ignore_blank_lines)
dbg('line', line, self.view.substr(line))
dbg('line_below', line_below, self.view.substr(line_below))
b = self._substr(line_below, ignore_blank_lines)
return a and not b
def _substr(self, line, ignore_blank_lines):
s = self.view.substr(line)
if ignore_blank_lines:
return s.strip()
return s
|
nilq/baby-python
|
python
|
import sys
sys.path.append('C:\Python27\Lib\site-packages')
import cv2
import numpy as np
import os
import pytesseract
from PIL import Image
from ConnectedAnalysis import ConnectedAnalysis
import post_process as pp
input_folder = r"C:\Users\SRIDHAR\Documents\python\final\seg_new";
output_folder= "temp";
def postProcess(str): #processess plate string
res=""
for ind in range(len(str)):
ch = str[ind]
if ((ch>='A' and ch<= 'Z') or (ch>='0' and ch<='9')):
res+=ch
if (ind>0 and str[ind-1]=='\\' and str[ind]=='n'):
continue
return res
i=1
ini_res = []
for filename in os.listdir(input_folder):
# print "alkfa"
img = cv2.imread(os.path.join(input_folder,filename)); #44 #165
outName=os.path.join(output_folder,filename);
thresh_image=img;
finalstr = ""
finalstr= pytesseract.image_to_string(Image.fromarray(cv2.bitwise_not(img)));
finalstr = postProcess(finalstr)
temp = ""
for ind in range(len(filename)):
if ind<len(filename)-4:
temp += filename[ind]
ini_res.append([int(temp),finalstr])
print (temp+" "+finalstr);
i = i+1
#print(i)
pp.result(ini_res)
|
nilq/baby-python
|
python
|
from pymongo import MongoClient
import os
class Mongo:
def __init__(self):
self.__client = MongoClient(os.environ['MONGODB_CONNECTIONSTRING'])
self.__db = self.__client.WebScrapingStocks
def insert_quotes(self, quotes):
dict_quotes = []
for quote in quotes:
dict_quotes.append(quote.__dict__)
self.__db.quotes.insert_many(dict_quotes)
def get_quotes(self):
return self.__db.quotes.find()
def update_quote(self, quote):
query = { "codigo": quote.codigo, "date": quote.date }
new_value = {"$set": {"value" : quote.value}}
self.__db.quotes.update_one(query, new_value)
|
nilq/baby-python
|
python
|
class DubboError(RuntimeError):
def __init__(self, status, msg):
self.status = status
self.message = msg
|
nilq/baby-python
|
python
|
test_issue_data = """
#### Advanced Settings Modified? (Yes or No)
## What is your overall Commons Configuration strategy?
{overall_strategy}
### [FORK MY PROPOSAL]() (link)
# Module 1: Token Freeze and Token Thaw
- **Token Freeze** is set to **{token_freeze_period} weeks**, meaning that 100% of TEC tokens minted for Hatchers will remain locked from being sold or transferred for {token_freeze_period} weeks.
- **Token Thaw** is set to **{token_thaw_period} weeks**, meaning that from the end of Token Freeze, over the course of {token_thaw_period} weeks tokens minted for Hatchers gradually become liquid. At the end of {token_thaw_period} weeks 100% of the Hatchers' TEC tokens have become liquid.
- The **Opening Price** is set to **{opening_price} wxDAI**, meaning at the outset of the Commons Upgrade the price to buy TEC on the Augmented Bonding Curve will be {opening_price} wxDAI.
### Strategy:
{token_lockup_strategy}
### Data:

| # of Weeks | % of Tokens Released | Price Floor of Token |
| ---------------------------- | --------------------- | ---------------------- |
| {token_lockup_week[0]} weeks | {tokens_released[0]}% | {price_floor[0]} wxDAI |
| {token_lockup_week[1]} weeks | {tokens_released[1]}% | {price_floor[1]} wxDAI |
| {token_lockup_week[2]} weeks | {tokens_released[2]}% | {price_floor[2]} wxDAI |
| {token_lockup_week[3]} weeks | {tokens_released[3]}% | {price_floor[3]} wxDAI |
| {token_lockup_week[4]} weeks | {tokens_released[4]}% | {price_floor[4]} wxDAI |
| {token_lockup_week[5]} weeks | {tokens_released[5]}% | {price_floor[5]} wxDAI |
# Module 2: Augmented Bonding Curve (ABC)
- **Commons Tribute** is set to **{commons_tribute}%**, which means that {commons_tribute}% of the Hatch funds will go to the Common Pool and {commons_tribute_remainder}% will go to the Reserve Balance.
- **Entry Tribute** is set to **{entry_tribute}%** meaning that from every **BUY** order on the ABC, {entry_tribute}% of the order value in wxDAI is subtracted and sent to the Common Pool.
- **Exit Tribute** is set to **{exit_tribute}%** meaning that from every **SELL** order on the ABC, {exit_tribute}% of the order value in wxDAI is subtracted and sent to the Common Pool.
### Strategy:
{abc_strategy}
### Data:
>We're very bullish on TEC so we only provide the BUY scenario as the standard 3 steps that are used to compare different proposals

| Step # | Current Price | Amount In | Tribute Collected | Amount Out | New Price | Price Slippage |
| ------------------ | ------------------ | -------------- | ---------------------- | --------------- | -------------- | ------------------- |
| **Step {step[0]}** | {current_price[0]} | {amount_in[0]} | {tribute_collected[0]} | {amount_out[0]} | {new_price[0]} | {price_slippage[0]} |
| **Step {step[1]}** | {current_price[1]} | {amount_in[1]} | {tribute_collected[1]} | {amount_out[1]} | {new_price[1]} | {price_slippage[1]} |
| **Step {step[2]}** | {current_price[2]} | {amount_in[2]} | {tribute_collected[2]} | {amount_out[2]} | {new_price[1]} | {price_slippage[2]} |
# Module 3: Tao Voting
- **Support Required** is set to **{support_required}%**, which means {support_required}% of all votes must be in favour of a proposal for it to pass.
- **Minimum Quorum** is set to **{minimum_quorum}%**, meaning that {minimum_quorum}% of all tokens need to have voted on a proposal in order for it to become valid.
- **Vote Duration** is **{vote_duration_days} day(s)**, meaning that eligible voters will have {vote_duration_days} day(s) to vote on a proposal.
- **Delegated Voting Period** is set for **{delegated_voting_days} day(s)**, meaning that Delegates will have {delegated_voting_days} day(s) to use their delegated voting power to vote on a proposal.
- **Quiet Ending Period** is set to **{quiet_ending_days} day(s)**, this means that {quiet_ending_days} day(s) before the end of the Vote Duration, if the vote outcome changes, the Quiet Ending Extension will be triggered.
- **Quiet Ending Extension** is set to **{quiet_ending_extension_days} day(s)**, meaning that if the vote outcome changes during the Quiet Ending Period, an additional {quiet_ending_extension_days} day(s) will be added for voting.
- **Execution Delay** is set to **{execution_delay_days} days(s)**, meaning that there is an {execution_delay_days} day delay after the vote is passed before the proposed action is executed.
### Strategy:
{tao_voting_strategy}
### Data:

|# of Quiet Ending Extensions | No Extensions | With 1 Extension | With 2 Extensions |
| ------------------------------------------- | ------------------------- | ------------------------------------- | -------------------------------------- |
| **Total Amount of Time to Complete a Vote** | {vote_duration_days} days | {vote_duration_days_1_extension} days | {vote_duration_days_2_extensions} days |
# Module 4: Conviction Voting Strategy
- **Conviction Growth** is set to **{conviction_growth_days} day(s)**, meaning that Conviction will increase by 50% every {conviction_growth_days} day(s).
- **Minimum Conviction** is set to **{minimum_conviction}%**, this means that to pass a funding request for an infinitely small amount will still take a minimum of {minimum_conviction}% of the total TEC currently active in the Conviction Voting application.
- The **Spending Limit** is set to **{relative_spending_limit}%**, which means that no more than {relative_spending_limit}% of the total funds in the Common Pool can be requested by a single proposal.
### Strategy:
{conviction_voting_strategy}
### Data:

| Variables | Scenario 1 | Scenario 2 | Scenario 3 | Scenario 4 | Scenario 5 | Scenario 6 |
| -------------------------------- | ------------------------- | ------------------------- | ------------------------- | ------------------------- | ------------------------- | ------------------------- |
| **Effective Supply** | {effective_supply[0]} | {effective_supply[1]} | {effective_supply[2]} | {effective_supply[3]} | {effective_supply[4]} | {effective_supply[5]} |
| **Requested Amount (wxDAI)** | **{requested_amount[0]}** | **{requested_amount[1]}** | **{requested_amount[2]}** | **{requested_amount[3]}** | **{requested_amount[4]}** | **{requested_amount[5]}** |
| Amount in Common Pool (wxDAI) | {amount_common_pool[0]} | {amount_common_pool[1]} | {amount_common_pool[2]} | {amount_common_pool[3]} | {amount_common_pool[4]} | {amount_common_pool[5]} |
| Minimum Tokens Needed to Pass | {min_tokens_pass[0]} | {min_tokens_pass[1]} | {min_tokens_pass[2]} | {min_tokens_pass[3]} | {min_tokens_pass[4]} | {min_tokens_pass[5]} |
| Tokens Needed To Pass in 2 weeks | {tokens_pass_2_weeks[0]} | {tokens_pass_2_weeks[1]} | {tokens_pass_2_weeks[2]} | {tokens_pass_2_weeks[3]} | {tokens_pass_2_weeks[4]} | {tokens_pass_2_weeks[5]} |
------
### [FORK MY PROPOSAL]() (link)
# Summary
### Module 1: Token Freeze & Token Thaw
| Parameter | Value |
| ------------- | --------------------------- |
| Token Freeze | {token_freeze_period} Weeks |
| Token Thaw | {token_thaw_period} Weeks |
| Opening Price | {opening_price} wxDAI |
### Module 2: Augmented Bonding Curve
| Parameter | Value |
| ---------------- | ------------------ |
| Commons Tribute | {commons_tribute}% |
| Entry Tribute | {entry_tribute}% |
| Exit Tribute | {commons_tribute}% |
| *_Reserve Ratio_ | {reserve_ratio}% |
*Reserve Ratio is an output derived from the Opening Price and Commons Tribute. [Learn more about the Reserve Ratio here](https://forum.tecommons.org/t/augmented-bonding-curve-opening-price-reserve-ratio/516).
### Module 3: Disputable Voting
| Parameters | Value |
| ----------------------- | ------------------------------------ |
| Support Required | {support_required}% |
| Minimum Quorum | {minimum_quorum}% |
| Vote Duration | {vote_duration_days} days(s) |
| Delegated Voting Period | {delegated_voting_days} day(s) |
| Quiet Ending Period | {quiet_ending_days} day(s) |
| Quiet Ending Extension | {quiet_ending_extension_days} day(s) |
| Execution Delay | {execution_delay_days} hour(s) |
### Module 4: Conviction Voting
| Parameter | Value |
| ------------------ | ------------------------------- |
| Conviction Growth | {conviction_growth_days} day(s) |
| Minimum Conviction | {minimum_conviction}% |
| Spending Limit | {relative_spending_limit}% |
### *Advanced Settings
>This will be empty or non-existant if the user did not change any advanced settings from their default. Any settings changed from default will show up here
| Parameter | Value |
| ----------------------- | --------------------------- |
| Minmum Effective Supply | {minimum_effective_supply}% |
| Hatchers Rage Quit | {hatchers_rage_quit}% |
| Virtual Balance | {virtual_balance} wxDAI |
[*Learn more about Advanced Settings on the TEC forum](https://forum.tecommons.org/c/defi-legos-and-how-they-work-together/adv-ccd-params/27)
### [FORK MY PROPOSAL]() (link)
"""
|
nilq/baby-python
|
python
|
import unittest
from rooms.room import Room
from rooms.position import Position
from rooms.vector import build_vector
from rooms.actor import Actor
from rooms.vision import Vision
from rooms.geography.basic_geography import BasicGeography
class SimpleVisionTest(unittest.TestCase):
def setUp(self):
self.room = Room("game1", "map1.room1", None)
self.room.coords(0, 0, 100, 100)
self.vision = Vision(self.room)
self.room.vision = self.vision
self.room.geography = BasicGeography()
self.actor1 = Actor(self.room, None, None, actor_id="actor1")
self.actor1.position = Position(1, 1)
self.actor1.move_to(Position(5, 5))
self.actor2 = Actor(self.room, None, None, actor_id="actor2")
self.actor2.position = Position(1, 1)
self.actor2.move_to(Position(5, 5))
def testPropagateMessages(self):
self.room.put_actor(self.actor1)
queue = self.room.vision.connect_vision_queue(self.actor1.actor_id)
command = queue.get_nowait()
self.assertEquals("sync", command['command'])
command = queue.get_nowait()
self.assertEquals("actor_update", command['command'])
self.assertTrue(queue.empty())
self.actor1.state.something = "else"
command = queue.get_nowait()
self.assertEquals("actor_update", command['command'])
self.assertTrue(queue.empty())
self.actor1.visible = False
command = queue.get_nowait()
self.assertEquals("actor_update", command['command'])
self.assertTrue(queue.empty())
def testRemoveActor(self):
self.room.put_actor(self.actor1)
self.room.put_actor(self.actor2)
queue = self.room.vision.connect_vision_queue(self.actor1.actor_id)
command = queue.get_nowait()
self.assertEquals("sync", command['command'])
command = queue.get_nowait()
self.assertEquals("actor_update", command['command'])
command = queue.get_nowait()
self.assertEquals("actor_update", command['command'])
self.assertTrue(queue.empty())
self.room._remove_actor(self.actor2)
command = queue.get_nowait()
self.assertEquals("remove_actor", command['command'])
self.assertTrue(queue.empty())
def testActorInvisible(self):
self.room.put_actor(self.actor1)
self.room.put_actor(self.actor2)
queue = self.room.vision.connect_vision_queue(self.actor1.actor_id)
command = queue.get_nowait()
self.assertEquals("sync", command['command'])
command = queue.get_nowait()
self.assertEquals("actor_update", command['command'])
command = queue.get_nowait()
self.assertEquals("actor_update", command['command'])
self.assertTrue(queue.empty())
self.actor2.visible = False
command = queue.get_nowait()
self.assertEquals("remove_actor", command['command'])
self.assertTrue(queue.empty())
def testMultiLayeredDockingVisibility(self):
# test if a is docked with b is docked with c that:
# c is visible to all
# b is invisible to all, but visible to a
# a is invisible to all, but visible to a
pass
def testSendActorEvent(self):
self.room.put_actor(self.actor1)
self.room.put_actor(self.actor2)
queue1 = self.room.vision.connect_vision_queue(self.actor1.actor_id)
queue2 = self.room.vision.connect_vision_queue(self.actor2.actor_id)
# clear out sync events
queue1.queue.clear()
queue2.queue.clear()
self.actor1.send_message({'type': 'random'})
self.assertEquals(
{'command': 'actor_message', 'actor_id': self.actor1.actor_id,
'data': {'type': 'random'}}, queue1.get_nowait())
self.assertEquals(
{'command': 'actor_message', 'actor_id': self.actor1.actor_id,
'data': {'type': 'random'}}, queue2.get_nowait())
# invisible actors tell no tales
self.actor1.visible = False
# clear out invisible events
queue1.queue.clear()
queue2.queue.clear()
self.actor1.send_message({'type': 'second'})
self.assertEquals(
{'command': 'actor_message', 'actor_id': self.actor1.actor_id,
'data': {'type': 'second'}}, queue1.get_nowait())
self.assertTrue(queue2.empty())
# also docked actors
# also admin queues
def testSendRoomEvent(self):
self.room.put_actor(self.actor1)
self.room.put_actor(self.actor2)
queue1 = self.room.vision.connect_vision_queue(self.actor1.actor_id)
queue2 = self.room.vision.connect_vision_queue(self.actor2.actor_id)
# clear out sync events
queue1.queue.clear()
queue2.queue.clear()
self.room.send_message('test', Position(0, 0), {'type': 'random'})
self.assertEquals(
{'command': 'message',
'data': {'type': 'random'},
'message_type': 'test',
'position': {u'x': 0.0, u'y': 0.0, u'z': 0.0}},
queue1.get_nowait())
self.assertEquals(
{'command': 'message',
'data': {'type': 'random'},
'message_type': 'test',
'position': {u'x': 0.0, u'y': 0.0, u'z': 0.0}},
queue2.get_nowait())
# also admin queues
|
nilq/baby-python
|
python
|
# coding: utf-8
import sys, os
sys.path.append(os.pardir)
import numpy as np
from common.layers import *
from common.gradient import numerical_gradient
from collections import OrderedDict
from dataset.mnist import load_mnist
class SGD:
def __init__(self, lr=0.01):
self.lr = lr
def update(self, params, grads):
for key in params.keys():
params[key] -= self.lr * grads[key]
class TwoLayerNet():
def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01):
self.params = {}
self.params['W1'] = weight_init_std * np.random.randn(input_size, hidden_size)
self.params['b1'] = weight_init_std * np.zeros(hidden_size)
self.params['W2'] = weight_init_std * np.random.randn(hidden_size, output_size)
self.params['b2'] = weight_init_std * np.zeros(output_size)
self.layers = OrderedDict()
self.layers['Affine1'] = Affine(self.params['W1'], self.params['b1'])
self.layers['Relu1'] = Relu()
self.layers['Affine2'] = Affine(self.params['W2'], self.params['b2'])
self.lastLayer = SoftmaxWithLoss()
def predict(self, x):
for layer in self.layers.values():
x = layer.forward(x)
return x
def loss(self, x, t):
y = self.predict(x)
return self.lastLayer.forward(y, t)
def accuracy(self, x, t):
y = self.predict(x)
y = np.argmax(y, axis=1)
if t.ndim != 1 : t = np.argmax(t, axis=1)
accuracy = np.sum(y == t) / float(x.shape[0])
return accuracy
def numerical_gradient(self, x, t):
loss_W = lambda W: self.loss(x, t)
grads = {}
grads['W1'] = numerical_gradient(loss_W, self.params['W1'])
grads['b1'] = numerical_gradient(loss_W, self.params['b1'])
grads['W2'] = numerical_gradient(loss_W, self.params['W2'])
grads['b2'] = numerical_gradient(loss_W, self.params['b2'])
return grads
def gradient(self, x, t):
# forward
self.loss(x, t)
# backward
dout = 1
dout = self.lastLayer.backward(dout)
layers = list(self.layers.values())
layers.reverse()
for layer in layers:
dout = layer.backward(dout)
grads = {}
grads['W1'] = self.layers['Affine1'].dW
grads['b1'] = self.layers['Affine1'].db
grads['W2'] = self.layers['Affine2'].dW
grads['b2'] = self.layers['Affine2'].db
return grads
def gradient_check():
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)
network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)
x_batch = x_train[:3]
t_batch = t_train[:3]
grad_numerical = network.numerical_gradient(x_batch, t_batch)
grad_backprop = network.gradient(x_batch, t_batch)
for key in grad_numerical.keys():
diff = np.average( np.abs(grad_backprop[key] - grad_numerical[key]))
print(key + ':' + str(diff))
def train_network():
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)
network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)
optimizer = SGD()
iters_num = 10000
batch_size = 100
train_size = x_train.shape[0]
iter_per_epoch = max(train_size / batch_size, 1)
for i in range(iters_num):
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
grads = network.gradient(x_batch, t_batch)
optimizer.update(network.params, grads)
if i % iter_per_epoch == 0:
loss = network.loss(x_batch, t_batch)
train_acc = network.accuracy(x_train, t_train)
test_acc = network.accuracy(x_test, t_test)
print('-----------------')
print('loss : ' + str(loss))
print('train accuracy : ' + str(train_acc))
print('test accuracy : ' + str(test_acc))
if __name__ == '__main__':
# gradient_check()
train_network()
|
nilq/baby-python
|
python
|
# Generated by Django 3.0.7 on 2020-08-04 09:56
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('contr_clienti', '0010_contractscan_actaditional'),
]
operations = [
migrations.AlterField(
model_name='contractscan',
name='contract',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='contr_clienti.Contract'),
),
]
|
nilq/baby-python
|
python
|
#!/usr/bin/python
import requests, json, fire, os
slack_webhook_url = os.environ['XKCD_SLACK_WEBHOOK_URL']
slack_headers={'Content-Type': 'application/json'}
def slack_post(content):
_slack_post = requests.post(slack_webhook_url, data=json.dumps(content), headers=slack_headers)
return(_slack_post.text)
def slack_content_build(title, image, alt):
_output = {"text": "*{0}*\n{1}\n{2}".format(title,alt,image)}
return(_output)
# class named Get for cli usability
class Get(object):
def comic_current(self):
_current_comic = requests.get("https://xkcd.com/info.0.json").json()
_title = _current_comic["title"]
_alt = _current_comic["alt"]
_image = _current_comic["img"]
_content = slack_content_build(_title, _image, _alt)
slack_post(_content)
def comic_by_id(self, comic_id):
_comic = requests.get("https://xkcd.com/{0}/info.0.json".format(comic_id)).json()
_title = _comic["title"]
_alt = _comic["alt"]
_image = _comic["img"]
_content = slack_content_build(_title, _image, _alt)
slack_post(_content)
class Pipeline(object):
def __init__(self):
self.get = Get()
if __name__ == '__main__':
fire.Fire(Pipeline)
|
nilq/baby-python
|
python
|
import pytest
import torch
from nnrl.nn.actor import (
Alpha,
DeterministicPolicy,
MLPContinuousPolicy,
MLPDeterministicPolicy,
)
from nnrl.nn.critic import ActionValueCritic, MLPVValue
from nnrl.nn.model import EnsembleSpec, build_ensemble, build_single
from ray.rllib import SampleBatch
from raylab.utils.debug import fake_batch
@pytest.fixture(scope="module")
def reward_fn():
def func(obs, act, new_obs):
return new_obs[..., 0] - obs[..., 0] - act.norm(dim=-1)
return func
@pytest.fixture(scope="module")
def termination_fn():
def func(obs, *_):
return torch.randn_like(obs[..., 0]) > 0
return func
@pytest.fixture
def batch(obs_space, action_space):
samples = fake_batch(obs_space, action_space, batch_size=256)
return {k: torch.from_numpy(v) for k, v in samples.items()}
@pytest.fixture
def obs(batch):
return batch[SampleBatch.CUR_OBS]
@pytest.fixture
def rew(batch):
return batch[SampleBatch.REWARDS]
@pytest.fixture
def done(batch):
return batch[SampleBatch.DONES]
@pytest.fixture
def new_obs(batch):
return batch[SampleBatch.NEXT_OBS]
@pytest.fixture
def model_spec():
spec = EnsembleSpec()
spec.network.units = (32,)
spec.network.input_dependent_scale = True
spec.residual = True
return spec
@pytest.fixture
def model(obs_space, action_space, model_spec):
return build_single(obs_space, action_space, model_spec)
@pytest.fixture(params=(1, 2, 4), ids=(f"Models({n})" for n in (1, 2, 4)))
def models(request, obs_space, action_space, model_spec):
spec = model_spec
spec.ensemble_size = request.param
spec.parallelize = True
return build_ensemble(obs_space, action_space, spec)
@pytest.fixture(params=(1, 2), ids=(f"Critics({n})" for n in (1, 2)))
def action_critics(request, obs_space, action_space):
config = {
"encoder": {"units": [32]},
"double_q": request.param == 2,
"parallelize": False,
}
spec = ActionValueCritic.spec_cls.from_dict(config)
act_critic = ActionValueCritic(obs_space, action_space, spec)
return act_critic.q_values, act_critic.target_q_values
@pytest.fixture
def state_critics(obs_space):
spec = MLPVValue.spec_cls()
spec.units = (32,)
spec.activation = "ReLU"
spec.layer_norm = False
main, target = MLPVValue(obs_space, spec), MLPVValue(obs_space, spec)
return main, target
@pytest.fixture
def deterministic_policies(obs_space, action_space):
spec = MLPDeterministicPolicy.spec_cls(
units=(32,), activation="ReLU", norm_beta=1.2
)
policy = MLPDeterministicPolicy(obs_space, action_space, spec)
target_policy = DeterministicPolicy.add_gaussian_noise(policy, noise_stddev=0.3)
return policy, target_policy
@pytest.fixture(params=(True, False), ids=(f"PiScaleDep({b})" for b in (True, False)))
def policy_input_scale(request):
return request.param
@pytest.fixture
def stochastic_policy(obs_space, action_space, policy_input_scale):
config = {"encoder": {"units": (32,)}}
mlp_spec = MLPContinuousPolicy.spec_cls.from_dict(config)
return MLPContinuousPolicy(
obs_space, action_space, mlp_spec, input_dependent_scale=policy_input_scale
)
@pytest.fixture
def alpha_module():
return Alpha(1.0)
|
nilq/baby-python
|
python
|
_base_ = './fcn_r50-d8_512x512_20k_voc12aug.py'
model = dict(pretrained='open-mmlab://resnet101_v1c',
backbone=dict(depth=101),
decode_head=dict(num_classes=2),
auxiliary_head=dict(num_classes=2)
)
dataset_type = 'PLDUDataset' # Dataset type, this will be used to define the dataset.
data_root = '../data/pldu/'
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type='PLDUDataset',
data_root='../data/pldu/',
img_dir='img_dir/train',
ann_dir='ann_dir/train',
split=None,
),
val=dict(
type='PLDUDataset',
data_root='../data/pldu/',
img_dir='img_dir/val',
ann_dir='ann_dir/val',
split=None,
),
test=dict(
type='PLDUDataset',
data_root='../data/pldu/',
img_dir='img_dir/val',
ann_dir='ann_dir/val',
split=None,
)
)
|
nilq/baby-python
|
python
|
import sys
import os
import argparse
def make_streams_binary():
sys.stdin = sys.stdin.detach()
sys.stdout = sys.stdout.detach()
parser = argparse.ArgumentParser(description='generate random data.')
parser.add_argument('--octets', metavar='N', dest='octets',
type=int, nargs='?', default=2048,
help='octetss length (default: 2048)')
args = parser.parse_args()
octets = args.octets
make_streams_binary()
random_data = os.urandom(octets)
sys.stdout.write(random_data)
|
nilq/baby-python
|
python
|
# pvtrace is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# pvtrace is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from Geometry import Box, Cylinder, Ray, cmp_points, separation
from external.transformations import translation_matrix, rotation_matrix
import external.transformations as tf
import numpy as np
def transform_point(point, transform):
return np.array(np.dot(transform, np.matrix(np.concatenate((point, [1.]))).transpose()).transpose()[0,0:3]).squeeze()
def transform_direction(direction, transform):
angle, axis, point = tf.rotation_from_matrix(transform)
rotation_transform = tf.rotation_matrix(angle, axis)
return np.array(np.dot(rotation_transform, np.matrix(np.concatenate((direction, [1.]))).transpose()).transpose()[0,0:3]).squeeze()
class CSGadd(object):
"""
Constructive Solid Geometry Boolean Addition
"""
def __init__(self, ADDone, ADDtwo):
super(CSGadd, self).__init__()
self.ADDone = ADDone
self.ADDtwo = ADDtwo
self.reference = 'CSGadd'
self.transform = tf.identity_matrix()
def append_name(self, namestring):
"""
In case a scene contains several CSG objects, this helps
with surface identification (see return value of def surface_identifier(..))
"""
self.reference = namestring
def append_transform(self, new_transform):
self.transform = tf.concatenate_matrices(new_transform, self.transform)
self.ADDone.transform = tr.concatenate_matrices(new_transform, self.ADDone.transform)
self.ADDtwo.transform = tr.concatenate_matrices(new_transform, self.ADDtwo.transform)
def contains(self, point):
"""
Returns True if ray contained by CSGadd, False otherwise
"""
invtransform = tf.inverse_matrix(self.transform)
local_point = transform_point(point, invtransform)
bool1 = self.ADDone.contains(local_point)
bool2 = self.ADDtwo.contains(local_point)
bool3 = self.ADDone.on_surface(local_point)
bool4 = self.ADDtwo.on_surface(local_point)
if bool1 or bool2:
return True
if bool3 and bool4:
return True
return False
def intersection(self, ray):
"""
Returns the intersection points of ray with CSGadd in global frame
"""
# We will need the invtransform later when we return the results..."
invtransform = tf.inverse_matrix(self.transform)
localray = Ray()
localray.position = transform_point(ray.position, invtransform)
localray.direction = transform_direction(ray.direction, invtransform)
ADDone__intersections = self.ADDone.intersection(localray)
ADDtwo__intersections = self.ADDtwo.intersection(localray)
"""
Cover the simpler cases
"""
if ADDone__intersections == None and ADDtwo__intersections == None:
return None
"""
Change ..._intersections into tuples
"""
if ADDone__intersections != None:
for i in range(0,len(ADDone__intersections)):
point = ADDone__intersections[i]
new_point = (point[0], point[1], point[2])
ADDone__intersections[i] = new_point
if ADDtwo__intersections != None:
for i in range(0,len(ADDtwo__intersections)):
point = ADDtwo__intersections[i]
new_point = (point[0],point[1],point[2])
ADDtwo__intersections[i] = new_point
"""
Only intersection points NOT containted in resp. other structure relevant
"""
ADDone_intersections = []
ADDtwo_intersections = []
if ADDone__intersections != None:
for i in range(0,len(ADDone__intersections)):
if self.ADDtwo.contains(ADDone__intersections[i]) == False:
ADDone_intersections.append(ADDone__intersections[i])
if ADDtwo__intersections != None:
for j in range(0,len(ADDtwo__intersections)):
if self.ADDone.contains(ADDtwo__intersections[j]) == False:
ADDtwo_intersections.append(ADDtwo__intersections[j])
"""
=> Convert to list
"""
ADDone_set = set(ADDone_intersections[:])
ADDtwo_set = set(ADDtwo_intersections[:])
combined_set = ADDone_set | ADDtwo_set
combined_intersections = list(combined_set)
"""
Just in case...
"""
if len(combined_intersections) == 0:
return None
"""
Sort by separation from ray origin
"""
intersection_separations = []
for point in combined_intersections:
intersection_separations.append(separation(ray.position, point))
"""
Convert into Numpy arrays in order to sort
"""
intersection_separations = np.array(intersection_separations)
sorted_indices = intersection_separations.argsort()
sorted_combined_intersections = []
for index in sorted_indices:
sorted_combined_intersections.append(np.array(combined_intersections[index]))
global_frame_intersections = []
for point in sorted_combined_intersections:
global_frame_intersections.append(transform_point(point, self.transform))
global_frame_intersections_cleared = []
for point in global_frame_intersections:
if self.on_surface(point) == True:
"""
This is only necessary if the two objects have an entire surface region in common,
for example consider two boxes joined at one face.
"""
global_frame_intersections_cleared.append(point)
if len(global_frame_intersections_cleared) == 0:
return None
return global_frame_intersections_cleared
def on_surface(self, point):
"""
Returns True or False dependent on whether point on CSGadd surface or not
"""
if self.contains(point):
return False
invtransform = tf.inverse_matrix(self.transform)
local_point = transform_point(point, invtransform)
bool1 = self.ADDone.on_surface(local_point)
bool2 = self.ADDtwo.on_surface(local_point)
if bool1 == True and self.ADDtwo.contains(local_point) == False:
return True
if bool2 == True and self.ADDone.contains(local_point) == False:
return True
if bool1 == bool2 == True:
return True
else:
return False
def surface_identifier(self, surface_point, assert_on_surface = True):
"""
Returns surface-ID name if surface_point located on CSGadd surface
"""
"""
Ensure surface_point on CSGadd surface
"""
invtransform = tf.inverse_matrix(self.transform)
local_point = transform_point(surface_point, invtransform)
bool1 = self.ADDone.on_surface(local_point)
bool2 = self.ADDtwo.on_surface(local_point)
assertbool = False
if bool1 == True and self.ADDtwo.contains(local_point) == False:
assertbool = True
elif bool2 == True and self.ADDone.contains(local_point) == False:
assertbool = True
elif bool1 == bool2 == True:
assertbool = True
if assert_on_surface == True:
assert assertbool == True
if bool1 == True and self.ADDtwo.contains(local_point) == False:
return self.reference + "_ADDone_" + self.ADDone.surface_identifier(local_point)
if bool2 == True and self.ADDone.contains(local_point) == False:
return self.reference + "_ADDtwo_" + self.ADDtwo.surface_identifier(local_point)
def surface_normal(self, ray, acute=True):
"""
Returns surface normal in point where ray hits CSGint surface
"""
"""
Ensure surface_point on CSGint surface
"""
invtransform = tf.inverse_matrix(self.transform)
localray = Ray()
localray.position = transform_point(ray.position, invtransform)
localray.direction = transform_direction(ray.direction, invtransform)
bool1 = self.ADDone.on_surface(localray.position)
bool2 = self.ADDtwo.on_surface(localray.position)
assertbool = False
if bool1 == True and self.ADDtwo.contains(localray.position) == False:
assertbool = True
elif bool2 == True and self.ADDone.contains(localray.position) == False:
assertbool = True
elif bool1 == bool2 == True:
assertbool = True
assert assertbool == True
if bool1 == True and self.ADDtwo.contains(localray.position) == False:
local_normal = self.ADDone.surface_normal(localray, acute)
return transform_direction(local_normal, self.transform)
if bool2 == True and self.ADDone.contains(localray.position) == False:
local_normal = self.ADDtwo.surface_normal(localray, acute)
return transform_direction(local_normal, self.transform)
class CSGsub(object):
"""
Constructive Solid Geometry Boolean Subtraction
"""
def __init__(self, SUBplus, SUBminus):
"""
Definition {CSGsub} := {SUBplus}/{SUBminus}
"""
super(CSGsub, self).__init__()
self.SUBplus = SUBplus
self.SUBminus = SUBminus
self.reference = 'CSGsub'
self.transform = tf.identity_matrix()
def append_name(self, namestring):
"""
In case a scene contains several CSG objects, this helps
with surface identification
"""
self.reference = namestring
def append_transform(self, new_transform):
self.transform = tf.concatenate_matrices(new_transform, self.transform)
def contains(self, point):
"""
Returns True if ray contained by CSGsub, False otherwise
"""
invtransform = tf.inverse_matrix(self.transform)
local_point = transform_point(point, invtransform)
bool1 = self.SUBplus.contains(local_point)
bool2 = self.SUBminus.contains(local_point)
if bool1 == False:
return False
if bool2 == True:
return False
else:
return True
def intersection(self, ray):
"""
Returns the intersection points of ray with CSGsub in global frame
"""
# We will need the invtransform later when we return the results..."
invtransform = tf.inverse_matrix(self.transform)
localray = Ray()
localray.position = transform_point(ray.position, invtransform)
localray.direction = transform_direction(ray.direction, invtransform)
SUBplus__intersections = self.SUBplus.intersection(localray)
SUBminus__intersections = self.SUBminus.intersection(localray)
"""
Cover the simpler cases
"""
if SUBplus__intersections == None and SUBminus__intersections == None:
return None
"""
Change ..._intersections into tuples
"""
if SUBplus__intersections != None:
for i in range(0,len(SUBplus__intersections)):
point = SUBplus__intersections[i]
new_point = (point[0], point[1], point[2])
SUBplus__intersections[i] = new_point
if SUBminus__intersections != None:
for i in range(0,len(SUBminus__intersections)):
point = SUBminus__intersections[i]
new_point = (point[0], point[1], point[2])
SUBminus__intersections[i] = new_point
"""
Valid intersection points:
SUBplus intersections must lie outside SUBminus
SUBminus intersections must lie inside SUBplus
"""
SUBplus_intersections = []
SUBminus_intersections = []
if SUBplus__intersections != None:
for intersection in SUBplus__intersections:
if not self.SUBminus.contains(intersection):
SUBplus_intersections.append(intersection)
if SUBminus__intersections != None:
for intersection in SUBminus__intersections:
if self.SUBplus.contains(intersection):
SUBminus_intersections.append(intersection)
# SUBplus_set = set(SUBplus_intersections[:])
# SUBminus_set = set(SUBminus_intersections[:])
# combined_set = SUBplus_set ^ SUBminus_set
# combined_intersections = list(combined_set)
combined_intersections = np.array(list(set(SUBplus_intersections+SUBminus_intersections)))
# intersection_separations = combined_intersections[0]**2+combined_intersections[1]**2+combined_intersections[2]**2
"""
Just in case...
"""
if len(combined_intersections) == 0:
return None
transposed_intersections = combined_intersections.transpose()
intersection_vectors = transposed_intersections[0]-ray.position[0], transposed_intersections[1]-ray.position[1], transposed_intersections[2]-ray.position[2]
# intersection_separations= []
# print combined_intersections, point, intersection_vectors
intersection_separations = intersection_vectors[0]**2+intersection_vectors[1]**2+intersection_vectors[2]**2
# for point in combined_intersections:
# intersection_separations.append(separation(ray.position, point))
# for i in range(len(intersection_separations)):
# print intersection_separations[i], intersection_separations2[i]
"""
Sort by distance from ray origin => Use Numpy arrays
"""
# intersection_separations = np.array(intersection_separations)
sorted_combined_intersections = combined_intersections[intersection_separations.argsort()]
# sorted_combined_intersections = []
# for index in sorted_indices:
# sorted_combined_intersections.append(np.array(combined_intersections[index]))
# global_frame_intersections = []
# for point in sorted_combined_intersections:
# global_frame_intersections.append(transform_point(point, self.transform))
global_frame_intersections = [transform_point(point, self.transform) for point in sorted_combined_intersections]
return global_frame_intersections
def on_surface(self, point):
"""
Returns True if the point is on the outer or inner surface of the CSGsub, and False othewise.
"""
invtransform = tf.inverse_matrix(self.transform)
local_point = transform_point(point, invtransform)
bool1 = self.SUBplus.on_surface(local_point)
bool2 = self.SUBminus.on_surface(local_point)
if bool1 == True and self.SUBminus.contains(local_point) == False:
return True
if bool2 == True and self.SUBplus.contains(local_point) == True:
return True
else:
return False
""" Alternatively:
if bool1 == bool2 == False:
return False
if bool1 == True and bool2 == True or SUBminus.contains(point) == True:
return False
if bool2 == True and bool1 == True or SUBplus.contains(point) == False:
return False
else:
return True
"""
def surface_identifier(self, surface_point, assert_on_surface = True):
"""
Returns a unique identifier for the surface location on the CSGsub.
"""
invtransform = tf.inverse_matrix(self.transform)
local_point = transform_point(surface_point, invtransform)
bool1 = self.SUBplus.on_surface(local_point)
bool2 = self.SUBminus.on_surface(local_point)
assertbool = False
if bool1 == True and self.SUBminus.contains(local_point) == False:
assertbool = True
elif bool2 == True and self.SUBplus.contains(local_point) == True:
assertbool = True
if assert_on_surface == True:
assert assertbool == True
if bool1 == True and self.SUBminus.contains(local_point) == False:
return self.reference + "_SUBplus_" + self.SUBplus.surface_identifier(local_point)
if bool2 == True and self.SUBplus.contains(local_point) == True:
return self.reference + "_SUBminus_" + self.SUBminus.surface_identifier(local_point)
def surface_normal(self, ray, acute=True):
"""
Return the surface normal for a ray arriving on the CSGsub surface.
"""
invtransform = tf.inverse_matrix(self.transform)
localray = Ray()
localray.position = transform_point(ray.position, invtransform)
localray.direction = transform_direction(ray.direction, invtransform)
bool1 = self.SUBplus.on_surface(localray.position)
bool2 = self.SUBminus.on_surface(localray.position)
assertbool = False
if bool1 == True and self.SUBminus.contains(localray.position) == False:
assertbool = True
if bool2 == True and self.SUBplus.contains(localray.position) == True:
assertbool = True
assert assertbool == True
if bool1 == True and self.SUBminus.contains(localray.position) == False:
return self.SUBplus.surface_normal(ray, acute)
if bool2 == True and self.SUBplus.contains(localray.position) == True:
if acute:
return self.SUBminus.surface_normal(ray,acute)
else:
normal = -1 * self.SUBminus.surface_normal(ray, acute=True)
# Remove signed zeros
for i in range(0,3):
if normal[i] == 0.0:
normal[i] = 0.0
return normal
class CSGint(object):
"""
Constructive Solid Geometry Boolean Intersection
"""
def __init__(self, INTone, INTtwo):
super(CSGint, self).__init__()
self.INTone = INTone
self.INTtwo = INTtwo
self.reference = 'CSGint'
self.transform = tf.identity_matrix()
def append_name(self, namestring):
"""
In case a scene contains several CSG objects, this helps
with surface identification
"""
self.reference = namestring
def append_transform(self, new_transform):
self.transform = tf.concatenate_matrices(new_transform, self.transform)
def contains(self, point):
"""
Returns True if ray contained by CSGint, False otherwise
"""
invtransform = tf.inverse_matrix(self.transform)
point = transform_point(point, invtransform)
bool1 = self.INTone.contains(point)
bool2 = self.INTtwo.contains(point)
if bool1 == bool2 == True:
return True
else:
return False
def intersection(self, ray):
"""
Returns the intersection points of ray with CSGint in global frame
"""
# We will need the invtransform later when we return the results..."
invtransform = tf.inverse_matrix(self.transform)
localray = Ray()
localray.position = transform_point(ray.position, invtransform)
localray.direction = transform_direction(ray.direction, invtransform)
INTone__intersections = self.INTone.intersection(localray)
INTtwo__intersections = self.INTtwo.intersection(localray)
"""
Cover the simpler cases
"""
if INTone__intersections == None and INTtwo__intersections == None:
return None
"""
Change ..._intersections into tuples
"""
if INTone__intersections != None:
for i in range(0,len(INTone__intersections)):
point = INTone__intersections[i]
new_point = (point[0], point[1], point[2])
INTone__intersections[i] = new_point
if INTtwo__intersections != None:
for i in range(0,len(INTtwo__intersections)):
point = INTtwo__intersections[i]
new_point = (point[0], point[1], point[2])
INTtwo__intersections[i] = new_point
"""
Only intersection points contained in resp. other structure relevant
"""
INTone_intersections = []
INTtwo_intersections = []
if INTone__intersections != None:
for i in range(0,len(INTone__intersections)):
if self.INTtwo.contains(INTone__intersections[i]) == True:
INTone_intersections.append(INTone__intersections[i])
if INTtwo__intersections != None:
for j in range(0,len(INTtwo__intersections)):
if self.INTone.contains(INTtwo__intersections[j]) == True:
INTtwo_intersections.append(INTtwo__intersections[j])
"""
=> Convert to list
"""
INTone_set = set(INTone_intersections[:])
INTtwo_set = set(INTtwo_intersections[:])
combined_set = INTone_set | INTtwo_set
combined_intersections = list(combined_set)
"""
Just in case...
"""
if len(combined_intersections) == 0:
return None
"""
Sort by separation from ray origin
"""
intersection_separations = []
for point in combined_intersections:
intersection_separations.append(separation(ray.position, point))
"""
Convert into Numpy arrays in order to sort
"""
intersection_separations = np.array(intersection_separations)
sorted_indices = intersection_separations.argsort()
sorted_combined_intersections = []
for index in sorted_indices:
sorted_combined_intersections.append(np.array(combined_intersections[index]))
global_frame_intersections = []
for point in sorted_combined_intersections:
global_frame_intersections.append(transform_point(point, self.transform))
return global_frame_intersections
def on_surface(self, point):
"""
Returns True or False dependent on whether point on CSGint surface or not
"""
invtransform = tf.inverse_matrix(self.transform)
local_point = transform_point(point, invtransform)
bool1 = self.INTone.on_surface(local_point)
bool2 = self.INTtwo.on_surface(local_point)
if bool1 == bool2 == True:
return True
if bool1 == True and self.INTtwo.contains(local_point):
return True
if bool2 == True and self.INTone.contains(local_point):
return True
else:
return False
def surface_identifier(self, surface_point, assert_on_surface = True):
"""
Returns surface-ID name if surface_point located on CSGint surface
"""
"""
Ensure surface_point on CSGint surface
"""
invtransform = tf.inverse_matrix(self.transform)
local_point = transform_point(surface_point, invtransform)
bool1 = self.INTone.on_surface(local_point)
bool2 = self.INTtwo.on_surface(local_point)
assertbool = False
if bool1 == True and self.INTtwo.contains(local_point) == True:
assertbool = True
if bool2 == True and self.INTone.contains(local_point) == True:
assertbool = True
if bool1 == bool2 == True:
assertbool = True
if assert_on_surface == True:
assert assertbool == True
if bool1 == True:
return self.reference + "_INTone_" + self.INTone.surface_identifier(local_point)
if bool2 == True:
return self.reference + "_INTtwo_" + self.INTtwo.surface_identifier(local_point)
def surface_normal(self, ray, acute=True):
"""
Returns surface normal in point where ray hits CSGint surface
"""
"""
Ensure surface_point on CSGint surface
"""
invtransform = tf.inverse_matrix(self.transform)
localray = Ray()
localray.position = transform_point(ray.position, invtransform)
localray.direction = transform_direction(ray.direction, invtransform)
bool1 = self.INTone.on_surface(localray.position)
bool2 = self.INTtwo.on_surface(localray.position)
assertbool = False
if bool1 == True and self.INTtwo.contains(localray.position) == True:
assertbool = True
if bool2 == True and self.INTone.contains(localray.position) == True:
assertbool = True
if bool1 == bool2 == True:
assertbool = True
assert assertbool == True
if bool1 == True:
return self.INTone.surface_normal(ray, acute)
else:
return self.INTtwo.surface_normal(ray, acute)
if __name__ == '__main__':
"""
TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST
"""
"""
# EXAMPLE ZERO
INTone = Box(origin = (-1.,0.,0.), extent = (1,1,1))
INTtwo = Cylinder(1, 1)
#one.append_transform(tf.rotation_matrix(np.pi/4, (0,0,1)))
intersect = CSGint(INTone, INTtwo)
INTthree = Cylinder(0.5,1)
intersect2 = CSGint(intersect, INTthree)
"""
"""
# EXAMPLE ONE
obj1 = Box(origin=(0,0,0), extent=(3,3,5))
obj2 = Box(origin=(1,1,0), extent=(2,2,7))
boxbox = CSGadd(obj2, obj1)
boxbox.append_name('MyBoxBox')
pt = (1,3,1.5)
ray = Ray(position=(1,3,1.5), direction=(0.,-1.,0.))
print "Point: "
print pt
print "Ray position: "
print ray.position
print "Ray direction: "
print ray.direction
print "\n----> test .contains(pt) "
print obj1.contains(pt)
print obj2.contains(pt)
print boxbox.contains(pt)
print "\n----> test .on_surface(pt)"
print obj1.on_surface(pt)
print obj2.on_surface(pt)
print boxbox.on_surface(pt)
print "\n----> test .surface_identifier(pt)"
print boxbox.surface_identifier(pt)
print "\n----> test .intersection(ray)"
print obj1.intersection(ray)
print obj2.intersection(ray)
print boxbox.intersection(ray)
print "\n----> test .surface_normal(ray)"
print boxbox.surface_normal(ray)
# END EXAMPLE ONE
"""
"""
# EXAMPLE TWO: ITERATIVE ADDITION
obj1 = Box(origin=(0,0,0), extent=(1,1,1))
obj2 = Box(origin=(0,0,0), extent=(1,1,1))
#obj2.append_transform(tf.translation_matrix((0,2,0)))
obj2.append_transform(tf.rotation_matrix(np.pi/4, (0,0,1)))
print obj2.transform
boxbox1 = CSGadd(obj2, obj1)
boxbox1.append_name('MyBoxBox1')
boxbox1.append_transform(tf.translation_matrix((0,0,0)))
boxbox2 = CSGadd(obj2, obj1)
boxbox2.append_name('MyBoxBox2')
boxbox2.append_transform(tf.translation_matrix((0,0,2)))
fourbox = CSGadd(boxbox1, boxbox2)
fourbox.append_name('MyFourBox')
print boxbox1.transform
print '\n'
print boxbox2.transform
print '\n'
print fourbox.transform
print '\n'
print obj2.intersection(ray)
ray = Ray(position=(0.5,10,0.5), direction=(0,-1,0))
print fourbox.intersection(ray)
ray = Ray(position=(0.5,10,2.5), direction=(0,-1,0))
print fourbox.intersection(ray)
print '\nSurface_ID for FourBox'
print fourbox.surface_identifier((0.9,3,0.5))
"""
"""
obj1 = Box(origin=(0,0,0), extent=(1,1,1))
obj2 = Box(origin=(0,0,0), extent=(1,1,1))
obj2.append_transform(tf.rotation_matrix(np.pi/4, (0,0,1)))
obj2.append_transform(tf.translation_matrix((0.5,0,0)))
add = CSGadd(obj1, obj2)
ray = Ray(position=(0.50000000001,10,0.5), direction=(0,-1,0))
print add.intersection(ray)
"""
"""
# EXAMPLE THREE
# Illustrates that if for example two boxes are joined at
# one face with CSGadd, then none of the points on this face are
# surface points (as should be for most of these points).
# However, a ray that is contained in that face will
# not return any intersection points with the CSGadd object
# (which should not be for some points).
obj1 = Box(origin=(0,0,0), extent=(1,1,1))
obj2 = Box(origin=(0,1,0), extent=(1,2,1))
add = CSGadd(obj1, obj2)
ray = Ray(position=(0.5,10,0.5), direction=(0,-1,0))
print add.intersection(ray)
print add.on_surface((0.5,1,0.5))
print add.contains((0.5,1.,0.5))
ray = Ray(position=(10,1,0.5), direction=(-1,0,0))
print add.intersection(ray)
"""
"""
# EXAMPLE FOUR: CSG VISUALISER
INTone = Box(origin = (-1.,-1.,-0.), extent = (1,1,7))
INTtwo = Box(origin = (-0.5,-0.5,0), extent = (0.5,0.5,7))
#INTtwo.append_transform(tf.translation_matrix((0,0.5,0)))
INTtwo.append_transform(tf.rotation_matrix(np.pi/4, (0,0,1)))
MyObj = CSGsub(INTone, INTtwo)
MyObj.append_name('myobj')
vis=Visualiser()
vis.VISUALISER_ON = True
vis.addCSG(MyObj,0.03,-1,1,-1,1,0,10,visual.color.green)
#vis.addCSG(MyObj, visual.color.blue)
"""
"""
box1 = Box()
box2 = Box(origin = (0.2,.2,0), extent = (0.8,0.8,1))
csg = CSGsub(box1, box2)
ray = Ray(position = (0.5,0.8,0.5), direction = (0,-1,0))
normal = csg.surface_normal(ray, acute = False)
print normal
normal = csg.surface_normal(ray, acute = False)
print normal
"""
|
nilq/baby-python
|
python
|
import numpy as np
import math
import Graphics
from typing import List
import json
from scipy.optimize import fmin_powell
Vector = List[float]
import time
class Node (object):
"""A object that defines a position"""
def __init__(self, name: str, pos, constraint_x=0, constraint_y=0):
"""Node: has a name, position and constraints. The loads are are added when the distributed weight is placed
on the beam. A optional value is optimize, for each dimension the position of the node can be optimized
t optimise the construction"""
self.name: str = name
self.pos = np.array(pos)
self.load: Vector = np.array([0, 0])
self.load_list = np.array([0])
self.constraint_x = constraint_x
self.constraint_y = constraint_y
self.optimize: List = np.array([0, 0])
def __str__(self):
text: str = self.name
text += ": " + str(self.pos)
return text
class Beam (object):
"""A beam or rod that is positioned between two nodes
A beam knows the two nodes it is placed between and therefore its length,
with other data as density and cross-section area the weight can be determined,
the placed load is divided for the two nodes."""
def __init__(self, name: str, nodes, v_load, a, b):
self.name: str = name
self.length: float = self.absolute(nodes[a].pos-nodes[b].pos)
self.a_node = a
self.b_node = b
self.pos1: Vector = nodes[a].pos
self.pos2: Vector = nodes[b].pos
self.load: Vector = np.array(v_load)
self.load_nodes: Vector = 0.5 * np.array(v_load) * self.length
self.delta_0: Vector = nodes[a].pos-nodes[b].pos
self.delta_1: Vector = nodes[b].pos - nodes[a].pos
self.angle_0: float = math.atan2(self.delta_0[1], self.delta_0[0])
self.angle_1: float = math.atan2(self.delta_1[1], self.delta_1[0])
self.area = 0.10
self.E_modulus = 210 * 1e+9
self.density = 7850
self.yield_strength = 250 * 1e+6
self.internal_force = 0
self.weight = 0.0
self.connections = np.zeros(len(2 * nodes))
self.connections[2 * a] = math.cos(self.angle_0)
self.connections[2 * a + 1] = math.sin(self.angle_0)
self.connections[2 * b] = math.cos(self.angle_1)
self.connections[2 * b + 1] = math.sin(self.angle_1)
@staticmethod
def absolute(arr):
"""Return the absolute length of a vector"""
return np.linalg.norm(arr)
def calculate_beam_weight(self, new_force):
"""
calculates weight of a beam using the internal force of the beam and yield strength of the material
:param new_force:
:return: -
"""
self.internal_force = abs(new_force)
if new_force >= 0:
# Force is stretching beam
self.area = self.internal_force / self.yield_strength
else:
# Force is compressing beam
self.area = math.pow(((self.internal_force * (0.5 * self.length) ** 2 / (
math.pi ** 2 * self.E_modulus)) / (math.pi / 4)), 1 / 2) * math.pi
self.weight = self.area * self.length * self.density
def __str__(self):
"""
Overwrites str method, prints important data of the beam
:return text:
"""
text: str = "\n"
text += "Beam: " + self.name + "\n"
text += "\tLength: {0:.2f} m\n".format(round(self.length, 2))
text += "\tArea: {0:.2f} mm²\n".format(round(self.area * 1e6, 2))
text += "\tWeight: {0:.3f} kg\n".format(round(self.weight, 3))
return text
def single_line(self):
text: str = self.name
text += ": {0:.2f}m".format(round(self.length, 2))
text += ", {0:.2f}mm²".format(round(self.area * 1e6, 2))
text += ", {0:.3f}kg".format(round(self.weight, 3))
return text
class Construction(object):
def __init__(self, name: str, nodes: List, beam_list: List, load_list: List):
"""
Creates a construction with the given nodes, beam, loads and constraints
:param name:
:param nodes:
:param beam_list:
"""
self.temp_beams = beam_list
self.materials = {}
self.material: str = ""
self.name: str = name
self.window = Graphics.Construction("Bridge 1", 1280, 720)
self.nodes: List = nodes
self.beams: List = []
self.current_loads = 0
self.load_list = load_list
self.beams = []
self.last_iteration = False
self.max_beams = []
self.set_beams()
self.optional_loads: List = []
self.iteration = 0
# Declare later used data
self.matrix = []
self.B = []
self.X = []
self.weight = np.inf
self.get_materials()
self.inter_plot = False
print("Construction created...")
def set_beams(self):
"""
Rebuilds all beams between the nodes with the new values
:return:
"""
self.beams = []
for x in range(0, len(self.temp_beams)):
self.beams.append(Beam(str(self.temp_beams[x][0]),
self.nodes,
self.load_list[self.current_loads][x],
self.temp_beams[x][1],
self.temp_beams[x][2]))
def optimize(self, active=True, inter_plot=True):
"""
Optimize will generate a construction with minimal weight for the load that is given
Optional: active will activate the minimization function to create a highly optimized construction
:param active:
:param inter_plot:
:return:
"""
self.inter_plot = inter_plot
initial_guess = []
for x in range(0, len(self.nodes)):
if not np.any(self.nodes[x].optimize):
continue
for val in range(0, len(self.nodes[x].optimize)):
if self.nodes[x].optimize[val] != 0:
initial_guess.append(self.nodes[x].pos[val])
initial_guess = np.array(initial_guess)
print("Initial Guess", initial_guess)
print("Calculating Construction....")
constructions_weights = []
load_nr_max_weight = []
results = []
self.max_beams = []
for a in range(0, len(self.load_list)):
# Loop through all loads
self.current_loads = a
print("\n\nCalculating construction for load: ", self.current_loads)
# Create optimal for current load
if active:
result = fmin_powell(self.set_and_calculate, initial_guess, xtol=0.01, ftol=0.005)
else:
result = self.set_and_calculate(initial_guess)
self.plot_construction()
constructions_weights.append(self.weight)
load_nr_max_weight.append(a)
results.append(result)
self.max_beams.append(self.beams)
for y in range(0, len(self.load_list)):
# Make construction strong so that current optimal can hold all loads
if a == y:
continue
self.current_loads = y
self.set_and_calculate(result)
for t in range(0, len(self.beams)):
if self.max_beams[a][t].weight < self.beams[t].weight:
self.max_beams[a][t] = self.beams[t]
# Calculate the weight of current strong optimal
self.weight = 0
for t in range(0, len(self.beams)):
self.beams[t] = self.max_beams[a][t]
self.weight += self.beams[t].weight
if self.weight > constructions_weights[a]:
constructions_weights[a] = self.weight
load_nr_max_weight[a] = y
minimum = min(constructions_weights)
load_index = constructions_weights.index(minimum)
self.current_loads = load_nr_max_weight[load_index]
self.set_and_calculate(results[load_index])
self.beams = self.max_beams[load_index]
self.weight = minimum
print("\n\nThe best weight for all loads is:", minimum, "kg")
print("This is bridge is optimized for load nr: ", load_index)
self.plot_construction(finished=True)
while True:
self.window.hold()
def set_and_calculate(self, new_values):
"""
Sets the variable positions, rebuilds all the beams and calculates the weight of the construction
:return:
"""
self.iteration += 1
t = 0
for x in range(0, len(self.nodes)):
if not np.any(self.nodes[x].optimize):
continue
for val in range(0, len(self.nodes[x].optimize)):
if self.nodes[x].optimize[val] != 0:
self.nodes[x].pos[val] = new_values[t]
t += 1
self.set_beams()
self.get_weight()
if self.inter_plot:
try:
self.plot_construction()
except:
print("\nWarning plot failed \n")
return self.weight
def get_weight(self):
lightest_weight = np.inf
best_material = {}
for material in self.materials:
self.set_material(self.materials[material])
self.calculate_weight()
if self.weight < lightest_weight:
best_material = material
lightest_weight = self.weight
self.set_material(self.materials[best_material])
self.material = str(best_material)
self.calculate_weight()
def get_max_beams(self):
pass
def calculate_weight(self):
"""
Calculates the weight of each beam and the total weight of the construction using linear algebra
:return:
"""
self.matrix = []
for x in range(0, len(self.beams)):
self.matrix.append(self.beams[x].connections)
self.matrix = np.array(self.matrix)
self.matrix = self.matrix.transpose()
size = np.shape(self.matrix)
missing = size[0] - size[1]
for x in range(0, missing):
zeros = np.array([np.zeros(size[0])])
self.matrix = np.concatenate((self.matrix, zeros.T), axis=1)
t = size[1]
for x in range(0, len(self.nodes)):
if self.nodes[x].constraint_x != 0:
self.matrix[2 * x][t] = self.nodes[x].constraint_x
t += 1
if self.nodes[x].constraint_y != 0:
self.matrix[2 * x + 1][t] = self.nodes[x].constraint_y
t += 1
self.B = np.zeros(np.shape(self.matrix)[0])
for x in range(0, len(self.nodes)):
self.nodes[x].load = np.array([0, 0])
for x in range(0, len(self.beams)):
self.nodes[self.beams[x].a_node].load = \
self.nodes[self.beams[x].a_node].load + self.beams[x].load_nodes
self.nodes[self.beams[x].b_node].load = \
self.nodes[self.beams[x].b_node].load + self.beams[x].load_nodes
for x in range(0, len(self.nodes)):
self.B[2 * x] = self.nodes[x].load[0]
self.B[2 * x + 1] = self.nodes[x].load[1]
self.weight = 0
try:
self.X = np.dot(np.linalg.inv(self.matrix), self.B)
except np.linalg.linalg.LinAlgError:
print("\nWarning linear algebra Error\n")
self.X = np.full(size[0], 1e20)
for x in range(0, len(self.beams)):
self.beams[x].calculate_beam_weight(self.X[x])
self.weight += self.beams[x].weight
return self.weight
def set_material(self, current_material: dict):
"""Sets the currently selected material"""
for beam in self.beams:
beam.yield_strength = current_material["yield_strength"]
beam.E_modulus = current_material["E_modulus"]
beam.density = current_material["density"]
def get_materials(self):
"""Gets all available materials from the materials.json dictionary"""
with open("materials.json", "r") as read_file:
self.materials = json.load(read_file)
read_file.close()
self.set_material(self.materials[list(self.materials.keys())[0]])
def __str__(self):
"""Overwritten method to print its data in a certain format when using print() or str()"""
text: str = "\n "
text += "\nA =\n" + str(self.matrix)
text += "\n\nB = \n" + str(self.B)
text += "\n\nX = \n" + str(self.X)
text += "\n\n\t "
for x in range(0, len(self.beams)):
text += str(self.beams[x])
text += "\n\nTotal weight bridge: {0:.3f} kg\n".format(round(self.weight, 3))
return text
def plot_construction(self, finished=False):
offset: Vector = (200, 400)
def inv(pos: Vector):
pos: Vector = pos * np.array([1, -1]) # invert y-axis for graphics
pos: Vector = pos * 200 + offset
return pos
for beam in self.beams:
self.window.draw_beam(beam.name,
inv(beam.pos1),
inv(beam.pos2),
beam.internal_force,
size=int((beam.area * 1e6)**0.7))
for node in self.nodes:
self.window.draw_node(node.name, inv(node.pos))
self.window.draw_force(node.name, inv(node.pos), node.load)
if node.constraint_x != 0:
self.window.draw_constraint_x(node.name + "x", inv(node.pos))
if node.constraint_y != 0:
self.window.draw_constraint_y(node.name + "y", inv(node.pos))
if np.any(node.optimize):
self.window.draw_editable(inv(node.pos))
self.window.add_text((50, 50), "Weight: {0:.3f} kg".format(round(self.weight, 3)))
self.window.add_text((50, 70), "Material: " + self.material)
self.window.add_text((50, 90), "Iteration: " + str(self.iteration))
if finished:
self.window.add_text((50, 30), "OPTIMAL SOLUTION FOUND: ")
self.window.add_text((50, 520), "NODES: ")
for x in range(0, len(self.nodes)):
b = 50 + (x // 5) * 150
h = (x % 5) * 30 + 550
self.window.add_text((b, h), str(self.nodes[x]))
self.window.add_text((400, 520), "BEAMS: ")
for x in range(0, len(self.beams)):
b = 400 + (x // 5) * 300
h = (x % 5) * 30 + 550
self.window.add_text((b, h), self.beams[x].single_line())
self.window.show()
if __name__ == "__main__":
np.set_printoptions(precision=2)
scale: float = 1 # meter
load: float = 1000 # Newton
# A list of all the nodes in the construction
o_nodes = [
Node("A", (0.00001, 0.00001), constraint_x=-1, constraint_y=-1),
Node("B", (1.00001 * scale, 0.00001)),
Node("C", (1.99999 * scale, 0.00001)),
Node("D", (3.00001 * scale, 0.00001)),
Node("E", (4.00001 * scale, 0.00001), constraint_y=-1),
Node("F", (3.00002 * scale, 1.00002 * scale)),
Node("G", (2.00001 * scale, 1.000001 * scale)),
Node("H", (1.00003 * scale, 1.00003 * scale))
]
# A list of all the beams or rods that connect to certain nodes
o_beams = [
["AB", 0, 1],
["AH", 0, 7],
["BC", 1, 2],
["BH", 1, 7],
["BG", 1, 6],
["CD", 2, 3],
["CG", 2, 6],
["DE", 3, 4],
["DF", 3, 5],
["DG", 3, 6],
["EF", 4, 5],
["FG", 5, 6],
["GH", 6, 7],
]
# A list of all the different loads placed on the beams
o_loads = [
[
[0, -1 * load],
[0, 0],
[0, -1 * load],
[0, 0],
[0, 0],
[0, -1 * load],
[0, 0],
[0, -1 * load],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0]
]
,
[
[0, -2 * load],
[0, 0],
[0, -1 * load],
[0, 0],
[0, 0],
[0, -0.5 * load],
[0, 0],
[0, -1 * load],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0]
]
,
[
[0, -3 * load],
[0, 0],
[0, -1 * load],
[0, 0],
[0, 0],
[0, -4 * load],
[0, 0],
[0, -1 * load],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0]
]
]
# All dimensions of the nodes that will be optimized are given a 1 value
o_nodes[1].optimize = np.array([1, 0])
o_nodes[2].optimize = np.array([1, 0])
o_nodes[3].optimize = np.array([1, 0])
o_nodes[5].optimize = np.array([1, 1])
o_nodes[6].optimize = np.array([1, 1])
o_nodes[7].optimize = np.array([1, 1])
# Creates a construction with the given nodes and beams
bridge_1 = Construction("Bridge 1", o_nodes, o_beams, o_loads)
# The bridge is calculated for most optimal weight/load ratio
bridge_1.optimize(active=True, inter_plot=True)
print(bridge_1)
|
nilq/baby-python
|
python
|
"""
This commander shell will be a implementation of the PX4 'commander' CLI (https://docs.px4.io/v1.9.0/en/flight_modes/).
Here you can switch modes on the go. Will require root access for safety reasons.
"""
from cmd import Cmd
import logger
import rospy
from mavros_msgs.srv import CommandBool
banner = """
_____ ____ __ __ __ __ _ _ _____ ______ _____
/ ____/ __ \| \/ | \/ | /\ | \ | | __ \| ____| __ \
| | | | | | \ / | \ / | / \ | \| | | | | |__ | |__) |
| | | | | | |\/| | |\/| | / /\ \ | . ` | | | | __| | _ /
| |___| |__| | | | | | | |/ ____ \| |\ | |__| | |____| | \ \
\_____\____/|_| |_|_| |_/_/ \_\_| \_|_____/|______|_| \_\
"""
log = logger.get_logger(__name__)
def parse(arg):
'Convert a series of zero or more numbers to an argument tuple'
return tuple(arg.split())
class CommanderCmd(Cmd):
intro = banner+ "\nType ? to see a list of available commands"
prompt = "Commander > "
#Takeoff Auto [Position fix required (e.g. GPS)] Vehicle initiates the takeoff sequence using either catapult/hand-launch mode or runway takeoff mode (in the current direction).
def do_takeoff(self, inp):
pass
def help_takeoff(self):
pass
#Land Auto [Position fix required (e.g. GPS)] Vehicle initiates the fixed-wing landing sequence.
def do_land(self,inp):
pass
def help_land(self):
pass
#Hold Auto [Position fix required (e.g. GPS)] Vehicle circles around the GPS hold position at the current altitude.
def do_hold(self,inp):
pass
def help_hold(self):
pass
#Return Auto [Position fix required (e.g. GPS)] Vehicle ascends to a safe height and then returns to its home position and circles.
def do_return(self, inp):
pass
def help_return(self):
pass
#Mission Auto [Position fix required (e.g. GPS)] Vehicle executes a predefined mission/flight plan that has been uploaded to the flight controller.
def do_mission(self, inp):
pass
def help_mission(self):
pass
### Commander Shell functionality ##
def do_arm(self,inp):
rospy.wait_for_service("/mavros/cmd/arming")
try:
arming = rospy.ServiceProxy("mavros/cmd/arming", CommandBool)
if inp.lower() == "true":
resp = arming(True)
resp = "Success: " + str(resp.success)
elif inp.lower() == "false":
resp = arming(False)
resp = "Success: " + str(resp.success)
else:
resp = "No value argument (true/false) given"
print(resp)
except rospt.ServiceException, e:
print("Service arm call failed: %s"%e)
### WP Shell functionality ##
def do_exit(self,inp):
print()
if input("Do you want to exit commander? Y/[N] ").lower() == "y":
log.info("Exiting the commander")
return True
def help_exit(self):
pass
help_EOF = help_exit
do_EOF = do_exit
def emptyline(self):
pass
|
nilq/baby-python
|
python
|
from data.scraper import DataScraper
from PIL import Image,ImageFont,ImageDraw
import time
class GenerateTiles:
def __init__(self,FONT,FONT_SIZE,FONT_COLOR,TILE_SIZE,TILE_BG_COLOR):
self.FONT = FONT
self.FONT_COLOR = FONT_COLOR
self.FONT_SIZE = FONT_SIZE
self.TILE_SIZE = TILE_SIZE
self.TILE_BG_COLOR = TILE_BG_COLOR
#for the logo and title
self.LOGO_SIZE = (50,50)
self.TITLE_FONT_SIZE = int(sum(self.TILE_SIZE) / 40)
self.TITLE_FONT_COLOR = (255,255,255,255)
self.LOGO_TITLE_FONT = ImageFont.truetype(font="arial.ttf",size=self.TITLE_FONT_SIZE)
#last updated field
self.LU_FONT_SIZE = 12
self.LU_FONT_COLOR = (255,255,255,255)
self.LU_FONT = ImageFont.truetype(font="arial.ttf",size=self.LU_FONT_SIZE)
#Titles
self.LOCAL_NEWS_TITLE = "My Republica"
self.INT_NEWS_TITLE = "New York Times"
# News parser is needed to make sure that the text doesn't render out of the screen
def news_parser(self,NEWS):
#The "breadth" of the canvas
render_limit = self.TILE_SIZE[1]
#check for each article in the list.
for artice_no,news_articles in enumerate(NEWS):
length_of_article = len(news_articles)
no_of_chars = 0
index = 0
#for each character consider a certain no of pixels are used up. So, if the text is long it takes up more pixels than the render_limit,
#in which case we add a break line in the article
for characters in news_articles:
# The multiplication factor can be changed if needed, but 0.5 seems to work the best; the 0.5 essentially means a line break is added
# after a certain no of characters have been printed
no_of_chars += 0.50 * self.FONT_SIZE # this cannot be 1 because different characters seem to take up different amont of pixels to render
index += 1
if no_of_chars > render_limit:
news_articles = news_articles[:index] + "-\n" + news_articles[index:]
no_of_chars = 0
NEWS[artice_no] = news_articles
return NEWS
def generate_localnews_tile(self):
with Image.open("./images/local_logo.png") as logo:
logo = logo.convert(mode="RGBA",colors=(0,0,0,0))
logo = logo.resize(self.LOGO_SIZE)
logo = logo.copy()
# A blank image where the text is rendered;
canvas = Image.new("RGBA",size=self.TILE_SIZE,color=self.TILE_BG_COLOR)
# Rendering the actual text
drawing = ImageDraw.Draw(canvas)
'''Text Rendering Settings'''
'''Starting posn for drawing text; certain % times size of the canvas '''
# Changing the multiplication factor is enough to change the position
__TEXT_POSN_X = 0 * self.TILE_SIZE[0]
__TEXT_POSN_Y = 0.1 * self.TILE_SIZE[1]
# Spacing between each line; changing the multiplication factor is enough
__SPACING_BETN_LINES = int(1.4 * self.FONT_SIZE)
# keeps track of the lines printed on the screen
_lines = 0
# Scrapes the data required
__LOCAL_NEWS = self.news_parser(DataScraper().localnews())
#draw the logo
canvas.paste(im=logo,box=(0,0))
drawing.text(xy=(__TEXT_POSN_X+200,__TEXT_POSN_Y-40),text=self.LOCAL_NEWS_TITLE,font=self.LOGO_TITLE_FONT,fill=self.TITLE_FONT_COLOR)
_lines+=1
#draw updated time
last_updated = time.strftime("Last Updated: %x At %X %p")
drawing.text(xy=(self.TILE_SIZE[0]-225,self.TILE_SIZE[1]-15),text=last_updated,font=self.LU_FONT,fill=self.LU_FONT_COLOR)
for news_article in __LOCAL_NEWS:
drawing.multiline_text(xy=(__TEXT_POSN_X,__TEXT_POSN_Y+(__SPACING_BETN_LINES*_lines)),text=news_article,font=self.FONT,fill=self.FONT_COLOR)
_lines += 1
if "\n" in news_article:
_lines += news_article.count("\n")
return canvas
# canvas.save("local_news.png")
def generate_int_news_tile(self):
with Image.open("./images/int_logo.png") as logo:
logo = logo.convert(mode="RGBA",colors=(0,0,0,0))
logo = logo.resize(self.LOGO_SIZE)
logo = logo.copy()
# A blank image where the text is rendered;
canvas = Image.new("RGBA",size=self.TILE_SIZE,color=self.TILE_BG_COLOR)
# Rendering the actual text
drawing = ImageDraw.Draw(canvas)
'''Text Rendering Settings'''
'''Starting posn for drawing text; certain % times size of the canvas '''
# Changing the multiplication factor is enough to change the position
__TEXT_POSN_X = 0 * self.TILE_SIZE[0]
__TEXT_POSN_Y = 0.1 * self.TILE_SIZE[1]
# Spacing between each line; changing the multiplication factor is enough
__SPACING_BETN_LINES = int(1.4 * self.FONT_SIZE)
# keeps track of the lines printed on the screen
_lines = 0
# Scrapes the data required
__LOCAL_NEWS = self.news_parser(DataScraper().int_news())
#draw the logo
canvas.paste(im=logo,box=(0,0))
drawing.text(xy=(__TEXT_POSN_X+200,__TEXT_POSN_Y-40),text=self.INT_NEWS_TITLE,font=self.LOGO_TITLE_FONT,fill=self.TITLE_FONT_COLOR )
_lines+=1
#draw updated time
last_updated = time.strftime("Last Updated: %x At %X %p")
drawing.text(xy=(self.TILE_SIZE[0]-225,self.TILE_SIZE[1]-15),text=last_updated,font=self.LU_FONT,fill=self.LU_FONT_COLOR)
for news_article in __LOCAL_NEWS:
drawing.multiline_text(xy=(__TEXT_POSN_X,__TEXT_POSN_Y+(__SPACING_BETN_LINES*_lines)),text=news_article,font=self.FONT,fill=self.FONT_COLOR)
_lines += 1
if "\n" in news_article:
_lines += news_article.count("\n")
return canvas
# canvas.save("int_news.png")
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 2 14:56:30 2020
@author: sanja
"""
import numpy as np
from matplotlib import pyplot as plt
import cv2
import binascii
img = cv2.imread('4119.png')
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY ) # Converting RGB to gray
#height, width = fi # Find height and width of image
#print(filename.shape)
#with open(filename, 'rb') as f:
# content = f.read()
#print(binascii.hexlify(content))
#print(len(content))
#print(img)
height, width = img.shape # Find height and width of image
#img= str(img)
img1 = ""
#print(img1)
for i in range(width):
for j in range(height):
if int(img[j][i]) < 10:
img1 = img1 + "00" + str(int(img[j][i]))
elif int(img[j][i]) < 100:
img1 = img1 + "0" + str(img[j][i])
else:
img1 = img1 + str(img[j][i])
#print(img1)
#img_String=''
#for i in range(width):
# for j in range(height):
# img_String= img_String + str(img1[j][i])
#img_String.replace("0x","")
#print(img_String)
|
nilq/baby-python
|
python
|
from ConexionSQL import ConexionSQL
def clean_api_count():
conSql = ConexionSQL()
conn = conSql.getConexion()
cur = conSql.getCursor()
query = """DELETE FROM tokens_count WHERE tiempo < (current_timestamp - interval \'15 minutes\');"""
cur.execute(query)
conn.commit()
query = 'VACUUM FULL tokens_count'
old_isolation_level = conn.isolation_level
conn.set_isolation_level(0)
query = "VACUUM FULL"
cur.execute(query)
conn.set_isolation_level(old_isolation_level)
conn.commit()
query = 'SELECT count(id) from tokens_count'
cur.execute(query)
print "quedan %d tokens_count"%cur.fetchone()[0]
if __name__ == '__main__':
clean_api_count()
|
nilq/baby-python
|
python
|
def get_knockout_options(model_class, form):
knockout_options = {
'knockout_exclude': [],
'knockout_fields': [],
'knockout_field_names': [],
'click_checked': True,
}
for item in (model_class, form):
if not item:
continue
has_fields_and_exclude = (
hasattr(item, 'knockout_exclude') and
hasattr(item, 'knockout_fields')
)
if has_fields_and_exclude:
raise Exception(
'Define knockout_exclude or knockout_fields, not both'
)
for option, default in knockout_options.items():
if hasattr(item, option):
value = getattr(item, option)
if callable(value):
knockout_options[option] = value()
else:
knockout_options[option] = value
return knockout_options
def get_knockout_field_options(
field,
knockout_fields,
knockout_exclude,
knockout_field_names
):
exclude = (
(knockout_fields and field.name not in knockout_fields) or
(field.name in knockout_exclude)
)
if field.name in knockout_field_names:
field_name = knockout_field_names[field.name]
else:
field_name = field.name
return exclude, field_name
|
nilq/baby-python
|
python
|
# from coursesical.course import *
from course import *
def test0():
t = TimeTable([("08:00", "10:10")])
s = Semester("2021-03-01", t)
r = RawCourse(
name="通用魔法理论基础(2)",
group="(下课派:DD23333;疼逊会议)",
teacher="伊蕾娜",
zc="1-16(周)",
classroom="王立瑟雷斯特利亚",
weekday=0,
time=0,
text="""71010223-1
通用魔法理论基础(2)
(下课派:DD23333;疼逊会议)
伊蕾娜
1-16(周)
王立瑟雷斯特利亚
"""
)
c = Course(s, r)
print(c.name, c.class_begin, c.class_over, c.until)
print(new_course(s, r))
def test1():
t = TimeTable([("08:00", "09:40"), ("10:00", "11:40"), ("14:30", "16:10"), ("16:30", "18:10"), ("19:30", "21:10")])
s = Semester("2021-03-01", t)
r = RawCourse(
name="形势与政策(20212)",
group="",
teacher="思政",
zc="12,14-16(周)",
classroom="教三十楼B座709",
weekday=1,
time=2,
text="""71420212-41
形势与政策(20212)
思政
12,14(周)
教三十楼B座709
星期六 第六大节
"""
)
for c in new_course(s, r):
print(c.name, c.class_begin, c.class_over, c.until)
print(new_course(s, r))
if __name__ == "__main__":
print('---0:')
test0()
print('---1:')
test1()
|
nilq/baby-python
|
python
|
from django.conf.urls import url
from bluebottle.funding_flutterwave.views import FlutterwavePaymentList, FlutterwaveWebhookView, \
FlutterwaveBankAccountAccountList, FlutterwaveBankAccountAccountDetail
urlpatterns = [
url(r'^/payments/$',
FlutterwavePaymentList.as_view(),
name='flutterwave-payment-list'),
url(r'^/webhook/$',
FlutterwaveWebhookView.as_view(),
name='flutterwave-payment-webhook'),
url(r'^/bank-accounts/$',
FlutterwaveBankAccountAccountList.as_view(),
name='flutterwave-external-account-list'),
url(r'^/bank-accounts/(?P<pk>[\d]+)$',
FlutterwaveBankAccountAccountDetail.as_view(),
name='flutterwave-external-account-detail'),
]
|
nilq/baby-python
|
python
|
"""
Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
NVIDIA CORPORATION and its licensors retain all intellectual property
and proprietary rights in and to this software, related documentation
and any modifications thereto. Any use, reproduction, disclosure or
distribution of this software and related documentation without an express
license agreement from NVIDIA CORPORATION is strictly prohibited.
DOF control methods example
---------------------------
An example that demonstrates various DOF control methods:
- Load cartpole asset from an urdf
- Get/set DOF properties
- Set DOF position and velocity targets
- Get DOF positions
- Apply DOF efforts
"""
import math
from numpy.core.getlimits import _fr1
from isaacgym import gymapi
from isaacgym import gymutil
from isaacgym import gymtorch
import torch
import time
def QUEST_Algo():
# Average of quaternions.
pass
# initialize gym
gym = gymapi.acquire_gym()
# parse arguments
args = gymutil.parse_arguments(description="Joint control Methods Example")
# create a simulator
sim_params = gymapi.SimParams()
sim_params.substeps = 2
sim_params.dt = 1.0 / 1000.0
# sim_params.flex.shape_collision_margin = 0.25
# sim_params.flex.num_outer_iterations = 4
# sim_params.flex.num_inner_iterations = 10
# sim_params.flex.solver_type = 2
# sim_params.flex.deterministic_mode = 1
sim_params.physx.solver_type = 1
sim_params.physx.num_position_iterations = 4
sim_params.physx.num_velocity_iterations = 1
sim_params.physx.num_threads = args.num_threads
sim_params.physx.use_gpu = args.use_gpu
sim_params.use_gpu_pipeline = False
# sim_params.gravity = gymapi.Vec3(0.0, 0.0, 0.0)
if args.use_gpu_pipeline:
print("WARNING: Forcing CPU pipeline.")
device = 'cpu'
sim = gym.create_sim(args.compute_device_id, args.graphics_device_id, args.physics_engine, sim_params)
# sim = gym.create_sim(args.compute_device_id, args.graphics_device_id, gymapi.SIM_FLEX, sim_params)
if sim is None:
print("*** Failed to create sim")
quit()
# create viewer using the default camera properties
viewer = gym.create_viewer(sim, gymapi.CameraProperties())
if viewer is None:
raise ValueError('*** Failed to create viewer')
# add ground plane
plane_params = gymapi.PlaneParams()
plane_params.static_friction = 0.0
plane_params.dynamic_friction = 0.0
gym.add_ground(sim, gymapi.PlaneParams())
# set up the env grid
num_envs = 1
spacing = 1.5
env_lower = gymapi.Vec3(-spacing, 0.0, -spacing)
env_upper = gymapi.Vec3(spacing, 0.0, spacing)
collision_group = 0
collision_filter = 0
# add cartpole urdf asset
asset_root = "../../assets"
asset_file = "urdf/RodAssembly/urdf/RodAssembly.urdf"
# Load asset with default control type of position for all joints
asset_options = gymapi.AssetOptions()
asset_options.fix_base_link = False
asset_options.angular_damping = 1
asset_options.max_angular_velocity = 100
asset_options.default_dof_drive_mode = gymapi.DOF_MODE_POS
print("Loading asset '%s' from '%s'" % (asset_file, asset_root))
post_asset = gym.load_asset(sim, asset_root, asset_file, asset_options)
asset_options.fix_base_link = False
sling_asset = gym.load_asset(sim, asset_root, asset_file, asset_options)
# initial root pose for cartpole actors
initial_pose = gymapi.Transform()
# Create environment 0
# Cart held steady using position target mode.
# Pole held at a 45 degree angle using position target mode.
env0 = gym.create_env(sim, env_lower, env_upper, 2)
radius = 0.05
theta = torch.tensor(0*3.1415/180)
initial_pose.p = gymapi.Vec3(radius*torch.cos(theta), 0.25, radius*torch.sin(theta))
initial_pose.r = gymapi.Quat.from_euler_zyx(-3.1415/4, 0, 0)
Sling = gym.create_actor(env0, sling_asset, initial_pose, 'Sling', collision_group, collision_filter)
theta = torch.tensor(120*3.1415/180)
initial_pose.p = gymapi.Vec3(radius*torch.cos(theta), 0.25, radius*torch.sin(theta))
initial_pose.r = gymapi.Quat.from_euler_zyx(-3.1415/4, 3.1415*2/3, 0)
LeftPost = gym.create_actor(env0, post_asset, initial_pose, 'LeftPost', collision_group, collision_filter)
theta = torch.tensor(240*3.1415/180)
initial_pose.p = gymapi.Vec3(radius*torch.cos(theta), 0.25, radius*torch.sin(theta))
initial_pose.r = gymapi.Quat.from_euler_zyx(-3.1415/4, 3.1415*4/3, 0)
RightPost = gym.create_actor(env0, post_asset, initial_pose, 'RightPost', collision_group, collision_filter)
gym.set_rigid_body_color(env0, Sling, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.97, 0.38, 0.06))
gym.set_rigid_body_color(env0, Sling, 1, gymapi.MESH_VISUAL, gymapi.Vec3(0.06, 0.97, 0.38))
gym.set_rigid_body_color(env0, Sling, 2, gymapi.MESH_VISUAL, gymapi.Vec3(0.38, 0.06, 0.97))
gym.set_rigid_body_color(env0, LeftPost, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.97, 0.38, 0.06))
gym.set_rigid_body_color(env0, LeftPost, 1, gymapi.MESH_VISUAL, gymapi.Vec3(0.06, 0.97, 0.38))
gym.set_rigid_body_color(env0, LeftPost, 2, gymapi.MESH_VISUAL, gymapi.Vec3(0.38, 0.06, 0.97))
gym.set_rigid_body_color(env0, RightPost, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.97, 0.38, 0.06))
gym.set_rigid_body_color(env0, RightPost, 1, gymapi.MESH_VISUAL, gymapi.Vec3(0.06, 0.97, 0.38))
gym.set_rigid_body_color(env0, RightPost, 2, gymapi.MESH_VISUAL, gymapi.Vec3(0.38, 0.06, 0.97))
# Look at the first env
cam_pos = gymapi.Vec3(0.5, 0.5, 0)
cam_target = gymapi.Vec3(0, 0, 0)
gym.viewer_camera_look_at(viewer, None, cam_pos, cam_target)
num_actors = gym.get_actor_count(env0)
num_bodies = gym.get_env_rigid_body_count(env0)
# Get state tensors
rb_state_tensor = gym.acquire_rigid_body_state_tensor(sim)
rb_state = gymtorch.wrap_tensor(rb_state_tensor)
print(rb_state.shape)
rb_pos = rb_state.view(num_bodies, 13)[:,0:3] #(num_envs, num_rigid_bodies, 13)[pos,ori,Lin-vel,Ang-vel]
rb_ori = rb_state.view(num_bodies, 13)[:,3:7] #(num_envs, num_rigid_bodies, 13)[pos,ori,Lin-vel,Ang-vel]
rb_lin_vel = rb_state.view(num_bodies, 13)[:,7:10] #(num_envs, num_rigid_bodies, 13)[pos,ori,Lin-vel,Ang-vel]
rb_ang_vel = rb_state.view(num_bodies, 13)[:,10:13] #(num_envs, num_rigid_bodies, 13)[pos,ori,Lin-vel,Ang-vel]
# gym.refresh_dof_state_tensor(sim)
# gym.refresh_actor_root_state_tensor(sim)
gym.refresh_rigid_body_state_tensor(sim)
print('rb_pos')
print(rb_pos)
body_names = [gym.get_asset_rigid_body_name(post_asset, i) for i in range(gym.get_asset_rigid_body_count(post_asset))]
extremity_names = [s for s in body_names if "endpoint" in s]
extremity_indices = [gym.find_asset_rigid_body_index(post_asset, name) for name in extremity_names]
print(body_names)
print(extremity_names)
print(extremity_indices)
# Simulate
spring_coff = 50
damping_coff = 0.999
spring_length = 0.0
frame_count = 0
connection_list = []
# (1,2),(4,5),(7,8)
# Connect All Bottoms
connection_list.append((1, 4, 0.1))
connection_list.append((1, 7, 0.1))
connection_list.append((4, 7, 0.1))
#Connect All Tops
connection_list.append((2, 5, 0.1))
connection_list.append((2, 8, 0.1))
connection_list.append((5, 8, 0.1))
#Top1 to Bottom2
connection_list.append((2, 4, 0.1)) #Body0 top is connected to Body1 bottom
#Top2 to Bottom3
connection_list.append((5, 7, 0.1)) #Body0 top is connected to Body1 bottom
#Top3 to Bottom1
connection_list.append((8, 1, 0.1)) #Body0 top is connected to Body1 bottom
centerleftright = 1
counter = torch.tensor(0)
while not gym.query_viewer_has_closed(viewer):
# time.sleep(2)
spring_length_multiplier = torch.cos(counter/100)*0.8 + 1 #Modifies the length from 0.2 to 1.8 the specified length
counter += 1
gym.refresh_rigid_body_state_tensor(sim)
forces = torch.zeros((num_envs, num_bodies, 3), device=device, dtype=torch.float)
force_positions = rb_pos.clone()
num_lines = len(connection_list)
line_vertices = torch.zeros((num_lines*2,3), device=device, dtype=torch.float)
line_colors = torch.zeros((num_lines,3), device=device, dtype=torch.float)
i = 0
for connection in connection_list:
# print(connection)
P1 = force_positions[connection[0],:]
P2 = force_positions[connection[1],:]
spring_constant = spring_coff
spring_length = connection[2]*spring_length_multiplier
endpoint_distance = torch.norm(P1-P2)
endpoint_normalized_vector = (P1-P2)/endpoint_distance
spring_force = spring_constant*(endpoint_distance-spring_length)
# Set springs to only work for tension and not compression
spring_force = torch.max(torch.tensor(spring_force), torch.zeros_like(spring_force))
appled_force = endpoint_normalized_vector*spring_force
# R2 = (P2-P1)/N
# F1 = torch.max(torch.tensor(spring_constant*R1*(N-spring_length)), torch.zeros_like(N))
# F1 = torch.min(torch.tensor(spring_constant*R1*(N-spring_length)), torch.tensor(0))
print('Spring {} Tension = {}'.format(i, spring_force))
forces[0, connection[0], :] -= appled_force
forces[0, connection[1], :] += appled_force
test = torch.zeros((2,3), device=device, dtype=torch.float)
test[0, :] = rb_lin_vel[connection[0], :]
test[1, :] = rb_lin_vel[connection[1], :]
# print(test.size())
R1T = torch.unsqueeze(endpoint_normalized_vector, 1)
print(test.shape)
print(R1T.shape)
# time.sleep(5)
diffthinggy = torch.tensor([[-1, 1]], device=device, dtype=torch.float)
# print(diffthinggy)
test2 = torch.matmul(diffthinggy, torch.matmul(test, R1T))
# print(R1*test2*damping_coff)
# print(R1)
forces[0, connection[0], :] += torch.squeeze(endpoint_normalized_vector*test2*damping_coff)
forces[0, connection[1], :] -= torch.squeeze(endpoint_normalized_vector*test2*damping_coff)
# print(test2)
line_vertices[i*2,:] = force_positions[connection[0],:]
line_vertices[i*2+1,:] = force_positions[connection[1],:]
line_colors[i,:] = torch.tensor([1.0, 0.0, 0.0])
i += 1
# print('forces')
# print(forces)
# print('force_positions')
# print(force_positions)
# if((frame_count % 1000) == 0):
# forces[0, 0, :] += torch.tensor([0.0, 0.0, 100.0])
gym.apply_rigid_body_force_at_pos_tensors(sim, gymtorch.unwrap_tensor(forces), gymtorch.unwrap_tensor(force_positions), gymapi.ENV_SPACE)
# Draw Lines
# print('line_verts')
# print(line_vertices)
gym.clear_lines(viewer)
gym.add_lines(viewer, env0, num_lines, line_vertices, line_colors)
frame_count += 1
# step the physics
gym.simulate(sim)
gym.fetch_results(sim, True)
# update the viewer
gym.step_graphics(sim)
gym.draw_viewer(viewer, sim, True)
# Wait for dt to elapse in real time.
# This synchronizes the physics simulation with the rendering rate.
gym.sync_frame_time(sim)
print('Done')
gym.destroy_viewer(viewer)
gym.destroy_sim(sim)
|
nilq/baby-python
|
python
|
from django.urls import path
from .views import encuesta
urlpatterns = [
path('', encuesta, name='encuesta'),
]
|
nilq/baby-python
|
python
|
#
# PySNMP MIB module REDSTONE-TC (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/REDSTONE-TC
# Produced by pysmi-0.3.4 at Mon Apr 29 20:46:57 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion, ConstraintsIntersection, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueSizeConstraint")
rsMgmt, = mibBuilder.importSymbols("REDSTONE-SMI", "rsMgmt")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Counter32, MibIdentifier, Gauge32, ObjectIdentity, TimeTicks, NotificationType, ModuleIdentity, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, IpAddress, Unsigned32, iso, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "MibIdentifier", "Gauge32", "ObjectIdentity", "TimeTicks", "NotificationType", "ModuleIdentity", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "IpAddress", "Unsigned32", "iso", "Bits")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
rsTextualConventions = ModuleIdentity((1, 3, 6, 1, 4, 1, 2773, 2, 1))
rsTextualConventions.setRevisions(('1998-01-01 00:00',))
if mibBuilder.loadTexts: rsTextualConventions.setLastUpdated('9801010000Z')
if mibBuilder.loadTexts: rsTextualConventions.setOrganization('Redstone Communications, Inc.')
class RsEnable(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1))
namedValues = NamedValues(("disable", 0), ("enable", 1))
class RsName(DisplayString):
status = 'current'
subtypeSpec = DisplayString.subtypeSpec + ValueSizeConstraint(0, 15)
class RsNextIfIndex(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 2147483647)
class RsIpAddrLessIf(TextualConvention, IpAddress):
status = 'current'
class RsTimeSlotMap(TextualConvention, OctetString):
status = 'current'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(4, 4)
fixedLength = 4
class RsAcctngAdminType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1))
namedValues = NamedValues(("disabled", 0), ("enabled", 1))
class RsAcctngOperType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2))
namedValues = NamedValues(("disable", 0), ("enable", 1), ("notSupported", 2))
mibBuilder.exportSymbols("REDSTONE-TC", RsAcctngOperType=RsAcctngOperType, rsTextualConventions=rsTextualConventions, RsAcctngAdminType=RsAcctngAdminType, RsName=RsName, PYSNMP_MODULE_ID=rsTextualConventions, RsEnable=RsEnable, RsIpAddrLessIf=RsIpAddrLessIf, RsNextIfIndex=RsNextIfIndex, RsTimeSlotMap=RsTimeSlotMap)
|
nilq/baby-python
|
python
|
import asyncio
import aiohttp
import time
import sys
from aiohttp.client_exceptions import ClientConnectorError
try:
from aiohttp import ClientError
except:
from aiohttp import ClientProxyConnectionError as ProxyConnectionError
from proxypool.db import RedisClient
from proxypool.setting import *
class Tester(object):
def __init__(self, redis_key):
self.redis = RedisClient(redis_key)
async def test_single_proxy(self, proxy):
"""
测试单个代理
:param proxy:
:return:
"""
conn = aiohttp.TCPConnector(ssl=False)
async with aiohttp.ClientSession(connector=conn) as session:
try:
if isinstance(proxy, bytes):
proxy = proxy.decode('utf-8')
real_proxy = 'http://' + proxy
print('正在测试', proxy)
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en;q=0.9,ja;q=0.8,fr;q=0.7',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:76.0) Gecko/20100101 Firefox/76.0',
# 'Upgrade-Insecure-Requests': 1,
'Connection': 'close',
}
async with session.get(TEST_URL, headers=headers, proxy=real_proxy, timeout=TIMEOUT, allow_redirects=False) as response:
if response.status in VALID_STATUS_CODES:
self.redis.max(proxy)
print('代理可用', proxy)
else:
self.redis.decrease(proxy)
print('请求响应码不合法 ', response.status, 'IP', proxy)
except (ClientError, ClientConnectorError, asyncio.TimeoutError, AttributeError):
self.redis.decrease(proxy)
print('代理请求失败', proxy)
def run(self):
"""
测试主函数
:return:
"""
print('测试器开始运行')
try:
count = self.redis.count()
print('当前剩余', count, '个代理')
for i in range(0, count, BATCH_TEST_SIZE):
start = i
stop = min(i + BATCH_TEST_SIZE, count)
print('正在测试第', start + 1, '-', stop, '个代理')
test_proxies = self.redis.batch(start, stop)
loop = asyncio.get_event_loop()
tasks = [self.test_single_proxy(proxy) for proxy in test_proxies]
loop.run_until_complete(asyncio.wait(tasks))
sys.stdout.flush()
time.sleep(5)
except Exception as e:
print('测试器发生错误', e.args)
if __name__ == '__main__':
tester = Tester()
while True:
print('测试器开始运行')
tester.run()
time.sleep(20)
|
nilq/baby-python
|
python
|
import sys
sys.path.append("..")
import cv2
from CORE.streamServerDependency.camera import Camera
c = Camera()
cv2.namedWindow("test")
while True:
cv2.imshow("test", c.image)
cv2.waitKey(1)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import image_cropping.fields
class Migration(migrations.Migration):
dependencies = [
('artwork', '0006_auto_20151010_2243'),
]
operations = [
migrations.AlterField(
model_name='artwork',
name='thumbnail',
field=image_cropping.fields.ImageRatioField('image', '640x400', hide_image_field=False, adapt_rotation=False, size_warning=True, verbose_name='thumbnail', help_text=None, allow_fullsize=False, free_crop=False),
),
]
|
nilq/baby-python
|
python
|
import image2ascii.boot
import image2ascii.lib
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--filename', required=True, type=str)
parser.add_argument('-W', '--width', type=int)
parser.add_argument('-H', '--height', type=int)
parser.add_argument('-greysave', action='store_true')
parser.add_argument('-colorsave', action='store_true')
args = parser.parse_args()
image2ascii.boot.BootScreen()
image2ascii.lib.Create( filename = args.filename, \
width = args.width, \
height = args.height, \
greySave = args.greysave, \
colorSave = args.colorsave )
if __name__ == '__main__':
main()
print()
|
nilq/baby-python
|
python
|
import itertools
from intcode import Computer
def run(data):
code = [int(c) for c in data.split(',')]
return find_max_thrust(code)[-1], find_max_thrust_feedback(code)[-1]
def find_max_thrust(code):
max_thrust = 0
for phases in itertools.permutations(range(5), 5):
val = 0
for phase in phases:
c = Computer(code)
c.run([phase, val])
val = c.output[0]
if c.output[0] > max_thrust:
max_thrust = c.output[0]
best = phases
return best, max_thrust
def find_max_thrust_feedback(code):
max_thrust = 0
for phases in itertools.permutations(range(5, 10), 5):
amps = [Computer(code, id=i) for i in range(5)]
for i, (phase, amp) in enumerate(zip(phases, amps)):
amps[i-1].connect_sink(amp)
amp.send_input([phase])
amps[0].send_input([0])
while any(amp.running for amp in amps):
for amp in amps:
amp.run()
if amps[-1].output[0] > max_thrust:
max_thrust = amps[-1].output[0]
best = phases
return best, max_thrust
if __name__ == '__main__':
from aocd.models import Puzzle
assert find_max_thrust([3,15,3,16,1002,16,10,16,1,16,15,15,4,15,99,0,0]) == ((4, 3, 2, 1, 0), 43210)
assert (find_max_thrust([3,23,3,24,1002,24,10,24,1002,23,-1,23,101,5,23,23,1,24,23,23,4,23,99,0,0])
== ((0, 1, 2, 3, 4), 54321))
assert (find_max_thrust([3,31,3,32,1002,32,10,32,1001,31,-2,31,1007,31,0,33,1002,33,7,33,1,33,31,31,1,32,31,31,4,31,99,0,0,0])
== ((1, 0, 4, 3, 2), 65210))
assert (find_max_thrust_feedback([3,26,1001,26,-4,26,3,27,1002,27,2,27,1,27,26,27,4,27,1001,28,-1,28,1005,28,6,99,0,0,5])
== ((9, 8, 7, 6, 5), 139629729))
assert (find_max_thrust_feedback([3,52,1001,52,-5,52,3,53,1,52,56,54,1007,54,5,55,1005,55,26,1001,54,
-5,54,1105,1,12,1,53,54,53,1008,54,0,55,1001,55,1,55,2,53,55,53,4,
53,1001,56,-1,56,1005,56,6,99,0,0,0,0,10])
== ((9, 7, 8, 5, 6), 18216))
puz = Puzzle(2019, 7)
part_a, part_b = run(puz.input_data)
puz.answer_a = part_a
print(f'Part 1: {puz.answer_a}')
puz.answer_b = part_b
print(f'Part 2: {puz.answer_b}')
|
nilq/baby-python
|
python
|
'''
Development Test Module
'''
# import os
import argparse
from dotenv import load_dotenv
#from pyspreader.client import SpreadClient, MSSQLSpreadClient
from pyspreader.worker import SpreadWorker
load_dotenv(verbose=True)
if __name__ == '__main__':
# cli = MSSQLSpreadClient(connection_string=os.environ.get('SPREADER_LIVE_DSN'), debug=True)
# cli.agent_name = 'Test Agent'
# agentid = cli.connect()
# print('Current Agent ID is', agentid)
parser = argparse.ArgumentParser(prefix_chars='/')
parser.add_argument('/id', required=True)
xargs = parser.parse_args()
print('*******************************')
worker = SpreadWorker(debug=True, id=xargs.id)
print('SpreadWorker: ', worker)
print('Starting...')
worker.start()
print('Waiting for Client to close process...')
worker.wait_for_worker_close()
print('Finished')
|
nilq/baby-python
|
python
|
"""
Django settings for ac_mediator project.
Generated by 'django-admin startproject' using Django 1.10.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import dj_database_url
import raven
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv('DJANGO_SECRET_KEY', 'default_secret_key')
# Debug, allowed hosts and database
if os.getenv('DEPLOY_ENV', 'dev') == 'prod':
if SECRET_KEY == 'default_secret_key':
print("Please configure your secret key by setting DJANGO_SECRET_KEY environment variable")
DEBUG = False
ALLOWED_HOSTS = ['localhost', 'asplab-web1', 'm.audiocommons.org', 'asplab-web1.s.upf.edu', 'docker.sb.upf.edu']
else:
DEBUG = True
DATABASE_URL_ENV_NAME = 'DJANGO_DATABASE_URL'
DATABASES = {'default': dj_database_url.config(
DATABASE_URL_ENV_NAME, default='postgres://postgres:postgres@db/ac_mediator')}
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'accounts',
'api',
'rest_framework',
'oauth2_provider',
'developers',
'services',
'docs',
'raven.contrib.django.raven_compat',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ac_mediator.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ac_mediator.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Madrid'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATIC_ROOT = '/static/'
# API settings
ALLOW_UNAUTHENTICATED_API_REQUESTS_ON_DEBUG = True
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': ('rest_framework.renderers.JSONRenderer',),
'DEFAULT_AUTHENTICATION_CLASSES': (
'oauth2_provider.ext.rest_framework.OAuth2Authentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated' if not DEBUG or not ALLOW_UNAUTHENTICATED_API_REQUESTS_ON_DEBUG
else 'rest_framework.permissions.AllowAny',
),
'EXCEPTION_HANDLER': 'api.utils.custom_exception_handler',
'URL_FORMAT_OVERRIDE': None, # disable DRF use of 'format' parameter (we have our own)
}
OAUTH2_PROVIDER_APPLICATION_MODEL = 'api.ApiClient'
OAUTH2_PROVIDER = {
'ACCESS_TOKEN_EXPIRE_SECONDS': 60*60*24, # 1 day
'REFRESH_TOKEN_EXPIRE_SECONDS': 60*60*15, # 2 weeks
'AUTHORIZATION_CODE_EXPIRE_SECONDS': 10*60, # 10 minutes
'SCOPES': {'read': 'Read scope'},
'OAUTH2_VALIDATOR_CLASS': 'api.utils.ACOAuth2Validator',
}
JSON_LD_FORMAT_KEY = 'jsonld'
JSON_FORMAT_KEY = 'json'
DEFAULT_RESPONSE_FORMAT = JSON_FORMAT_KEY
# Registration
AUTH_USER_MODEL = 'accounts.Account'
LOGIN_URL = '/login/'
LOGOUT_URL = '/'
LOGIN_REDIRECT_URL = '/'
# Site
BASE_URL = os.getenv('DJANGO_BASE_URL', 'http://example.com')
# Documentation
DOCS_ROOT = os.path.join(BASE_DIR, 'docs/_build/html')
DOCS_ACCESS = 'public'
# Redis
REDIS_HOST = 'redis' # Host where redis is running (we use docker alias here)
REDIS_PORT = 6379
# Celery
CELERY_BROKER_URL = "redis://redis"
CELERY_RESULT_BACKEND = "redis://redis"
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TIMEZONE = 'Europe/Madrid'
# Set this to False so that requests are submitted sequentially and from the webserver when in DEBUG mode instead of
# in parallel and using Celery. This can be useful so that Celery workers don't need to be restarted when making
# changes to the code
USE_CELERY_IN_DEBUG_MODE = False
# Shared respones backend and async responses
DELETE_RESPONSES_AFTER_CONSUMED = False
RESPONSE_EXPIRY_TIME = 3600*24 # Response objects are deleted after 24 hours
RAVEN_CONFIG = {
'dsn': os.getenv('SENTRY_DSN', None),
}
# Email configuration
DEFAULT_FROM_EMAIL = 'Audio Commons <audiocommons@upf.edu>'
EMAIL_SUBJECT_PREFIX = '[AudioCommons] '
EMAIL_HOST = 'smtp-rec.upf.edu'
EMAIL_PORT = 25
if DEBUG:
# In development environment, use email file backend
EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
EMAIL_FILE_PATH = os.path.join(BASE_DIR, "mail")
# Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '%(levelname)s %(message)s'
},
'simplest': {
'format': '%(message)s'
},
},
'filters': {
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'stdout': {
'level': 'INFO',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
'gelf': {
'class': 'logging.NullHandler', # This will be redefined later if configuration is provided
},
},
'loggers': {
'management': {
'handlers': ['stdout', 'gelf'],
'level': 'INFO',
'propagate': False,
},
},
}
if DEBUG:
# In development we log all requests made into a file
LOGS_BASE_DIR = os.path.join(BASE_DIR, 'logs')
if not os.path.exists(LOGS_BASE_DIR):
os.makedirs(LOGS_BASE_DIR)
LOGGING['handlers'].update({
'logfile_requests': {
'class': 'logging.FileHandler',
'filename': os.path.join(LOGS_BASE_DIR, 'requests.log'),
'formatter': 'simplest'
}
})
LOGGING['loggers'].update({
'requests_sent': {
'handlers': ['logfile_requests'],
'level': 'INFO',
'propagate': False,
}
})
# Read logserver config settings, if present, then update the corresponding handler
GELF_IP_ADDRESS = os.getenv('GELF_IP_ADDRESS', None)
GELF_PORT = int(os.getenv('GELF_PORT', 0))
if GELF_IP_ADDRESS is not None and GELF_PORT is not None:
LOGGING['handlers'].update(
{
'gelf': {
'level': 'INFO',
'class': 'graypy.GELFHandler',
'host': GELF_IP_ADDRESS,
'port': GELF_PORT,
'formatter': 'simple',
},
}
)
|
nilq/baby-python
|
python
|
import time
import aiohttp
import asyncio
import statistics
runs = []
async def fetch(session, url):
async with session.get(url) as response:
return await response.text()
async def main(loop):
for i in range(3):
latencies = []
expected_response = ','.join(['OK']*100)
async def iterate():
nonlocal latencies
start = time.time()
async with aiohttp.ClientSession() as session:
response = await fetch(session, 'http://localhost:1995')
try:
assert response == expected_response
except AssertionError as e:
print(e)
latencies.append(time.time() - start)
coroutines = [asyncio.create_task(iterate()) for _ in range(100)]
await asyncio.gather(*coroutines)
runs.append((statistics.mean(latencies), statistics.stdev(latencies), max(latencies)))
loop = asyncio.get_event_loop()
loop.run_until_complete(main(loop))
print(f"Mean Latency: {statistics.mean([run[0] for run in runs])}, Standard Deviation: {statistics.mean([run[1] for run in runs])}, Max Latency: {statistics.mean([run[2] for run in runs])}")
|
nilq/baby-python
|
python
|
import logging
import torch.nn
from torch_scatter import scatter
from nequip.data import AtomicDataDict
from nequip.utils import instantiate_from_cls_name
class SimpleLoss:
"""wrapper to compute weighted loss function
if atomic_weight_on is True, the loss function will search for
AtomicDataDict.WEIGHTS_KEY+key in the reference data.
Args:
func_name (str): any loss function defined in torch.nn that
takes "reduction=none" as init argument, uses prediction tensor,
and reference tensor for its call functions, and outputs a vector
with the same shape as pred/ref
params (str): arguments needed to initialize the function above
"""
def __init__(self, func_name: str, params: dict = {}):
func, _ = instantiate_from_cls_name(
torch.nn,
class_name=func_name,
prefix="",
positional_args=dict(reduction="none"),
optional_args=params,
all_args={},
)
self.func = func
def __call__(
self,
pred: dict,
ref: dict,
key: str,
atomic_weight_on: bool = False,
mean: bool = True,
):
loss = self.func(pred[key], ref[key])
weights_key = AtomicDataDict.WEIGHTS_KEY + key
if weights_key in ref and atomic_weight_on:
weights = ref[weights_key]
# TO DO
if mean:
return (loss * weights).mean() / weights.mean()
else:
raise NotImplementedError(
"metrics and running stat needs to be compatible with this"
)
return loss * weights, weights
else:
if mean:
return loss.mean()
else:
return loss
return loss
class PerSpeciesLoss(SimpleLoss):
"""Compute loss for each species and average among the same species
before summing them up.
Args same as SimpleLoss
"""
def __call__(
self,
pred: dict,
ref: dict,
key: str,
atomic_weight_on: bool = False,
mean: bool = True,
):
if not mean:
raise NotImplementedError("cannot handle this yet")
per_atom_loss = self.func(pred[key], ref[key])
per_atom_loss = per_atom_loss.mean(dim=-1, keepdim=True)
# if there is atomic weights
weights_key = AtomicDataDict.WEIGHTS_KEY + key
if weights_key in ref and atomic_weight_on:
weights = ref[weights_key]
per_atom_loss = per_atom_loss * weights
else:
atomic_weight_on = False
species_index = pred[AtomicDataDict.SPECIES_INDEX_KEY]
_, inverse_species_index = torch.unique(species_index, return_inverse=True)
if atomic_weight_on:
# TO DO
per_species_weight = scatter(weights, inverse_species_index, dim=0)
per_species_loss = scatter(per_atom_loss, inverse_species_index, dim=0)
return (per_species_loss / per_species_weight).mean()
else:
return scatter(
per_atom_loss, inverse_species_index, reduce="mean", dim=0
).mean()
def find_loss_function(name: str, params):
wrapper_list = dict(
PerSpecies=PerSpeciesLoss,
)
if isinstance(name, str):
for key in wrapper_list:
if name.startswith(key):
logging.debug(f"create loss instance {wrapper_list[key]}")
return wrapper_list[key](name[len(key) :], params)
return SimpleLoss(name, params)
elif callable(name):
return name
else:
raise NotImplementedError(f"{name} Loss is not implemented")
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
# Copyright 2017 ROBOTIS CO., LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
# Author: Ryu Woon Jung (Leon)
from .robotis_def import *
class GroupSyncRead:
def __init__(self, port, ph, start_address, data_length):
self.port = port
self.ph = ph
self.start_address = start_address
self.data_length = data_length
self.last_result = False
self.is_param_changed = False
self.param = []
self.data_dict = {}
self.clearParam()
def makeParam(self):
if self.ph.getProtocolVersion() == 1.0:
return
if not self.data_dict: # len(self.data_dict.keys()) == 0:
return
self.param = []
for dxl_id in self.data_dict:
self.param.append(dxl_id)
def addParam(self, dxl_id):
if self.ph.getProtocolVersion() == 1.0:
return False
if dxl_id in self.data_dict: # dxl_id already exist
return False
self.data_dict[dxl_id] = [] # [0] * self.data_length
self.is_param_changed = True
return True
def removeParam(self, dxl_id):
if self.ph.getProtocolVersion() == 1.0:
return
if dxl_id not in self.data_dict: # NOT exist
return
del self.data_dict[dxl_id]
self.is_param_changed = True
def clearParam(self):
if self.ph.getProtocolVersion() == 1.0:
return
self.data_dict.clear()
def txPacket(self):
if self.ph.getProtocolVersion() == 1.0 or len(self.data_dict.keys()) == 0:
return COMM_NOT_AVAILABLE
if self.is_param_changed is True or not self.param:
self.makeParam()
return self.ph.syncReadTx(self.port, self.start_address, self.data_length, self.param,
len(self.data_dict.keys()) * 1)
def rxPacket(self):
self.last_result = False
if self.ph.getProtocolVersion() == 1.0:
return COMM_NOT_AVAILABLE
result = COMM_RX_FAIL
if len(self.data_dict.keys()) == 0:
return COMM_NOT_AVAILABLE
for dxl_id in self.data_dict:
self.data_dict[dxl_id], result, _ = self.ph.readRx(self.port, dxl_id, self.data_length)
if result != COMM_SUCCESS:
return result
if result == COMM_SUCCESS:
self.last_result = True
return result
def txRxPacket(self):
if self.ph.getProtocolVersion() == 1.0:
return COMM_NOT_AVAILABLE
result = self.txPacket()
if result != COMM_SUCCESS:
return result
return self.rxPacket()
def isAvailable(self, dxl_id, address, data_length):
if self.ph.getProtocolVersion() == 1.0 or self.last_result is False or dxl_id not in self.data_dict:
return False
if (address < self.start_address) or (self.start_address + self.data_length - data_length < address):
return False
return True
def getData(self, dxl_id, address, data_length):
if not self.isAvailable(dxl_id, address, data_length):
return 0
if data_length == 1:
return self.data_dict[dxl_id][address - self.start_address]
elif data_length == 2:
return DXL_MAKEWORD(self.data_dict[dxl_id][address - self.start_address],
self.data_dict[dxl_id][address - self.start_address + 1])
elif data_length == 4:
return DXL_MAKEDWORD(DXL_MAKEWORD(self.data_dict[dxl_id][address - self.start_address + 0],
self.data_dict[dxl_id][address - self.start_address + 1]),
DXL_MAKEWORD(self.data_dict[dxl_id][address - self.start_address + 2],
self.data_dict[dxl_id][address - self.start_address + 3]))
else:
return 0
|
nilq/baby-python
|
python
|
import pandas as pd
import S3Api
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, ENGLISH_STOP_WORDS
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.decomposition import PCA
import numpy as np
import plotly.express as px
from sklearn import preprocessing
from sklearn.cluster import AgglomerativeClustering
import plotly.figure_factory as ff
from sklearn.cluster import DBSCAN
import os
import glob
import codecs
STORE_DATA = True
class CustomSearchClustering:
def __init__(self, file_storage, s3_api):
""" Create a new instance of the CustomSearchData class
Parameters
----------
:param file_storage: FileStorage, Required
The file storage class used to store raw/processed data
:param s3_api: S3_API, Required
The S3 api wrapper class used to store data in AWS S3
----------
"""
self._file_storage = file_storage
self._s3_api = s3_api
self.__processed_data_location = 'processed_data/search_results/cleaned_search_data.csv'
self.__processed_pdf_data_location = '/Users/sampastoriza/Documents/Programming/DataScienceDevelopment/DataSciencePortfolioCode/PandemicComparison/processed_data/corpus_data/cleaned_corpus_data.csv'
self.__clustered_visualizations_location = 'clustered_data_visualizations/search_results'
self.__clustered_data_location = 'clustered_data/search_results'
self._additional_stop_words = ['title', 'journal', 'volume', 'author', 'scholar', 'article', 'issue']
self._other_k_values = [3, 4, 6, 8, 10]
def cluster_search_data(self):
self.__clean_clustered_visualizations()
processed_df = pd.read_csv(self.__processed_data_location, index_col=False)
processed_pdf_df = pd.read_csv(self.__processed_pdf_data_location, index_col=False)
processed_df = pd.concat([processed_df, processed_pdf_df], ignore_index=True)
processed_df.to_csv('processed_data/search_results/combined_search_data.csv', index=False)
print(processed_df.head())
stop_words = ENGLISH_STOP_WORDS.union(self._additional_stop_words)
print('----------------------------------')
print('Trying count vectorizer...')
print('----------------------------------')
vectorizer = CountVectorizer(stop_words=stop_words)
self.__cluster_using_vectorizer(processed_df, vectorizer, 'count')
print('----------------------------------')
print('Trying td vectorizer...')
print('----------------------------------')
vectorizer = TfidfVectorizer(stop_words=stop_words)
self.__cluster_using_vectorizer(processed_df, vectorizer, 'tfidf')
def __clean_clustered_visualizations(self):
all_files = list(glob.iglob(f'{self.__clustered_visualizations_location}/**/*.html', recursive=True)) + \
list(glob.iglob(f'{self.__clustered_visualizations_location}/**/*.png', recursive=True))
print('Remove all files in the directory', all_files)
for f in all_files:
os.remove(f)
def __cluster_using_vectorizer(self, df, vectorizer, vectorizer_type):
normalized_label = f'normalized_{vectorizer_type}'
not_normalized_label = f'not_{normalized_label}'
v = vectorizer.fit_transform(df['text'])
vocab = vectorizer.get_feature_names()
values = v.toarray()
v_df = pd.DataFrame(values, columns=vocab)
print('----------------------------------')
print('Non normalized data')
print('----------------------------------')
df_not_normalized = pd.DataFrame(v_df)
self.__cluster(df_not_normalized, df, not_normalized_label, 'Not Normalized', vectorizer_type)
pca_analysis_results_nn = self.__run_pca_analysis(df_not_normalized, df)
df['PC0_NN'] = pca_analysis_results_nn['PC0']
df['PC1_NN'] = pca_analysis_results_nn['PC1']
df['PC2_NN'] = pca_analysis_results_nn['PC2']
print('----------------------------------')
print('Normalized data')
print('----------------------------------')
df_normalized = pd.DataFrame(preprocessing.normalize(v_df))
self.__cluster(df_normalized, df, normalized_label, 'Normalized', vectorizer_type)
pca_analysis_results_n = self.__run_pca_analysis(df_normalized, df)
self.__run_density_clustering(df_normalized, df, normalized_label)
df['PC0_N'] = pca_analysis_results_n['PC0']
df['PC1_N'] = pca_analysis_results_n['PC1']
df['PC2_N'] = pca_analysis_results_n['PC2']
print('Plotting clusters using k-means, hierarchical, and density scan')
self.__plot_clusters(df, f'{normalized_label}_calculated_k_means', 'PC0_N', 'PC1_N', 'PC2_N', f'Plot of normalized clusters using K-Means ({vectorizer_type})')
self.__plot_clusters(df, f'{normalized_label}_3_k_means', 'PC0_N', 'PC1_N', 'PC2_N', f'Plot of normalized clusters using K-Means (k=3) ({vectorizer_type})')
self.__plot_clusters(df, f'{normalized_label}_4_k_means', 'PC0_N', 'PC1_N', 'PC2_N', f'Plot of normalized clusters using K-Means (k=4) ({vectorizer_type})')
self.__plot_clusters(df, f'{normalized_label}_6_k_means', 'PC0_N', 'PC1_N', 'PC2_N', f'Plot of normalized clusters using K-Means (k=6) ({vectorizer_type})')
self.__plot_clusters(df, f'{normalized_label}_8_k_means', 'PC0_N', 'PC1_N', 'PC2_N', f'Plot of normalized clusters using K-Means (k=8) ({vectorizer_type})')
self.__plot_clusters(df, f'{normalized_label}_10_k_means', 'PC0_N', 'PC1_N', 'PC2_N', f'Plot of normalized clusters using K-Means (k=10) ({vectorizer_type})')
self.__plot_clusters(df, f'{not_normalized_label}_calculated_k_means', 'PC0_NN', 'PC1_NN', 'PC2_NN', f'Plot of non normalized clusters using K-Means ({vectorizer_type})')
self.__plot_clusters(df, f'{normalized_label}_3_hierarchical', 'PC0_N', 'PC1_N', 'PC2_N', f'Plot of normalized clusters using Hiearchical Clustering ({vectorizer_type}) (k=3)')
self.__plot_clusters(df, f'{normalized_label}_4_hierarchical', 'PC0_N', 'PC1_N', 'PC2_N', f'Plot of normalized clusters using Hiearchical Clustering ({vectorizer_type}) (k=4)')
self.__plot_clusters(df, f'{normalized_label}_6_hierarchical', 'PC0_N', 'PC1_N', 'PC2_N', f'Plot of normalized clusters using Hiearchical Clustering ({vectorizer_type}) (k=6)')
self.__plot_clusters(df, f'{normalized_label}_8_hierarchical', 'PC0_N', 'PC1_N', 'PC2_N', f'Plot of normalized clusters using Hiearchical Clustering ({vectorizer_type}) (k=8)')
self.__plot_clusters(df, f'{normalized_label}_10_hierarchical', 'PC0_N', 'PC1_N', 'PC2_N', f'Plot of normalized clusters using Hiearchical Clustering ({vectorizer_type}) (k=10)')
self.__plot_clusters(df, f'{normalized_label}_density', 'PC0_N', 'PC1_N', 'PC2_N', f'Plot of normalized clusters using Density Scan ({vectorizer_type})')
df = df.drop(columns=['text'])
df.to_csv(f'{self.__clustered_data_location}/clustered_search_data.csv', index=False)
def __cluster(self, df, input_df, clustering_type, graph_prefix, vectorizer_type):
list_of_inertias = []
list_of_silhouette_scores = []
k_range = list(range(2, 10))
for k in k_range:
k_means = KMeans(k, max_iter=1000)
k_means.fit_predict(df)
list_of_inertias.append(k_means.inertia_)
score = metrics.silhouette_score(df, k_means.labels_, metric='correlation')
list_of_silhouette_scores.append(score)
self.plot_elbow_method(k_range, list_of_inertias, graph_prefix, vectorizer_type, clustering_type)
self.plot_silhouette_method(k_range, list_of_silhouette_scores, graph_prefix, vectorizer_type, clustering_type)
k_range_np = np.array(k_range)
sil_scores = np.array(list_of_silhouette_scores)
# Find the max k-value from the silhouette scores
k_value = k_range_np[sil_scores == np.max(sil_scores)][0]
print('Max k-value', k_value)
k_means = KMeans(k_value).fit(df)
k_means_label = f'{clustering_type}_calculated_k_means_label'
input_df[k_means_label] = k_means.labels_
self.__plot_silhouette_clusters(df, k_means, k_value, vectorizer_type, clustering_type)
print('Analysing 5 other random k values for comparison purposes', self._other_k_values)
for random_k_value in self._other_k_values:
k_means_r = KMeans(random_k_value).fit(df)
k_means_label_r = f'{clustering_type}_{random_k_value}_k_means_label'
input_df[k_means_label_r] = k_means_r.labels_
self.__plot_silhouette_clusters(df, k_means_r, random_k_value, vectorizer_type, clustering_type)
self.__run_hierarchical_clustering(df, 3, input_df, clustering_type)
self.__run_hierarchical_clustering(df, 4, input_df, clustering_type)
self.__run_hierarchical_clustering(df, 6, input_df, clustering_type)
self.__run_hierarchical_clustering(df, 8, input_df, clustering_type)
self.__run_hierarchical_clustering(df, 10, input_df, clustering_type)
self.__plot_dendrogram(df, input_df, clustering_type, vectorizer_type)
def plot_elbow_method(self, k_range, list_of_inertias, graph_prefix, vectorizer_type, clustering_type):
print('Plotting elbow method')
plt.figure()
plt.plot(k_range, list_of_inertias, 'bx-')
plt.xlabel('k')
plt.ylabel('Inertia')
plt.title(f'Plot of elbow method using Inertia -- {graph_prefix} ({vectorizer_type})')
plt.savefig(f'{self.__clustered_visualizations_location}/elbow_method/elbow_method_{clustering_type}.png')
df = pd.DataFrame(data={'K': k_range, 'Inertia': list_of_inertias})
df.to_csv(f'{self.__clustered_data_location}/elbow_method/elbow_method_{clustering_type}.csv', index=False)
def plot_silhouette_method(self, k_range, list_of_silhouette_scores, graph_prefix, vectorizer_type, clustering_type):
print('Plotting silhouette method')
plt.figure()
plt.plot(k_range, list_of_silhouette_scores, 'bx-')
plt.xlabel('k')
plt.ylabel('Silhouette Score')
plt.title(f'Plot of silhouette method -- {graph_prefix} ({vectorizer_type})')
plt.savefig(f'{self.__clustered_visualizations_location}/silhouette_method/silhouette_method_{clustering_type}.png')
df = pd.DataFrame(data={'K': k_range, 'Silhouette Score': list_of_silhouette_scores})
df.to_csv(f'{self.__clustered_data_location}/silhouette_method/silhouette_method_{clustering_type}.csv', index=False)
def __run_pca_analysis(self, df_normalized, input_df):
print('Running PCA Analysis to reduce dimensionality')
text_pca = PCA(n_components=3)
df_normalized = np.transpose(df_normalized)
text_pca.fit(df_normalized)
components = pd.DataFrame(text_pca.components_.T, columns=['PC%s' % _ for _ in range(3)])
components['topic'] = input_df['topic']
return components
def clusterByTopic(self, cluster, topic):
return cluster.value_counts()[topic] if topic in cluster.value_counts() else 0
def __plot_clusters(self, df, clustering_type, x, y, z, title):
k_means_label = f'{clustering_type}_label'
fig = px.scatter(df, x=x, y=y, text="topic", color=k_means_label, hover_data=['topic', 'link'], log_x=True,
size_max=60)
fig.update_traces(textposition='top center')
fig.update_layout(
height=800,
title_text=title
)
output_file = f'{self.__clustered_visualizations_location}/clustered_2d/{clustering_type}.html'
fig.write_html(output_file)
fig3d = px.scatter_3d(df, x=x, y=y, z=z, text="topic", color=k_means_label, hover_data=['topic', 'link'],)
fig3d.update_traces(textposition='top center')
fig3d.update_layout(
height=800,
title_text=title
)
output_file = f'{self.__clustered_visualizations_location}/clustered_3d/{clustering_type}.html'
fig3d.write_html(output_file)
print('Gathering Statistics')
statistics_df = df[['topic', k_means_label]].groupby([k_means_label]).agg(
covid=pd.NamedAgg(column='topic', aggfunc=lambda t: self.clusterByTopic(t, 'covid')),
drought=pd.NamedAgg(column='topic', aggfunc=lambda t: self.clusterByTopic(t, 'drought')),
locusts=pd.NamedAgg(column='topic', aggfunc=lambda t: self.clusterByTopic(t, 'locusts')),
ebola=pd.NamedAgg(column='topic', aggfunc=lambda t: self.clusterByTopic(t, 'ebola'))
)
statistics_df['Cluster'] = [i for i in range(statistics_df.shape[0])]
output_file = f'{self.__clustered_data_location}/clustering_statistics/{clustering_type}.csv'
statistics_df.to_csv(output_file, index=False)
print(statistics_df)
def __plot_silhouette_clusters(self, df, k_means, k_value, vectorizer_type, clustering_type):
print('Plotting silhouette clusters', k_value)
plt.figure()
# get silhouette scores
sil_coe = metrics.silhouette_samples(df, k_means.labels_)
sil_score = metrics.silhouette_score(df, k_means.labels_)
# create subplots and define range
low_range = 0
up_range = 0
# plot bar plot for each cluster
for cluster in set(k_means.labels_):
cluster_coefs = sil_coe[k_means.labels_ == cluster]
cluster_coefs.sort()
up_range += len(cluster_coefs)
plt.barh(range(low_range, up_range), cluster_coefs, height=1)
plt.text(-0.05, (up_range + low_range) / 2, str(cluster))
low_range += len(cluster_coefs)
plt.suptitle("Silhouette Coefficients for k = " + str(k_value) + " -- Vectorizer Type = " + vectorizer_type + "\n Score = " + str(round(sil_score, 2)), y=1)
plt.title("Coefficient Plots")
plt.xlabel("Silhouette Coefficients")
plt.ylabel("Cluster")
plt.yticks([])
plt.axvline(sil_score, color="red", linestyle="--")
plt.savefig(f'{self.__clustered_visualizations_location}/silhouette/silhouette_cluster_{k_value}_{clustering_type}.png')
def __run_hierarchical_clustering(self, df, k_value, input_df, clustering_type):
print('Running hierarchical clustering with k =', k_value)
clustered_data = AgglomerativeClustering(n_clusters=k_value, affinity='euclidean', linkage='ward')
fitted_data = clustered_data.fit(df)
input_df[f'{clustering_type}_{k_value}_hierarchical_label'] = fitted_data.labels_
def __plot_dendrogram(self, df, input_df, clustering_type, vectorizer_type):
print('Plotting dendrogram')
fig = ff.create_dendrogram(df, labels=input_df['topic'].to_list())
fig.update_layout(width=800, height=500, title=f'Hierarchical Clustering Dendrogram with '
f'Vectorizer Type = {vectorizer_type}')
output_file = f'{self.__clustered_visualizations_location}/dendrogram/dendrogram_{clustering_type}.html'
fig.write_html(output_file)
def __run_density_clustering(self, df, input_df, clustering_type):
print('Running density clustering')
max_clusters = 0
associated_labels = []
for i in map(lambda x: x / 10.0, range(2, 20, 2)):
for j in range(5, 40):
set_of_labels = DBSCAN(eps=i, min_samples=j, metric='cosine').fit(df).labels_
if len(set(set_of_labels)) >= max_clusters:
max_clusters = len(set(set_of_labels))
associated_labels = set_of_labels
input_df[f'{clustering_type}_density_label'] = associated_labels
print('Number of clusters for density', len(set(associated_labels)))
def store_clustered_search_data(self):
print('Store processed survey data in S3')
html_visualizations = list(glob.iglob(f'{self.__clustered_visualizations_location}/**/*.html', recursive=True))
for file in html_visualizations:
print('Opening file', file)
contents = codecs.open(file, 'r')
print('Uploading', file, 'to S3')
self._s3_api.upload_html(contents.read(), file.replace('clustered_data_visualizations/', ''), S3Api.S3Location.CLUSTERED_DATA_VISUALIZATIONS)
contents.close()
png_visualizations = list(glob.iglob(f'{self.__clustered_visualizations_location}/**/*.png', recursive=True))
for file in png_visualizations:
print('Opening file', file)
png = open(file, "rb")
print('Attempting to upload clustered visualized search data to s3')
self._s3_api.upload_png(png, file.replace('clustered_data_visualizations/', ''), S3Api.S3Location.CLUSTERED_DATA_VISUALIZATIONS)
print('Uploading', file, 'to S3')
print('Successfully uploaded')
png.close()
clustered_csv_data = list(glob.iglob(f'{self.__clustered_data_location}/**/*.csv', recursive=True))
for file in clustered_csv_data:
print('Opening file', file)
df = pd.read_csv(file)
print('Attempting to upload clustered search data to s3')
self._s3_api.upload_df(df, file.replace('clustered_data/', ''), S3Api.S3Location.CLUSTERED_DATA)
print('Uploading', file, 'to S3')
print('Successfully uploaded')
print('Uploaded all files')
if __name__ == '__main__':
from dotenv import load_dotenv
from FileStorage import FileStorage
load_dotenv()
fs = FileStorage()
fs.create_directory_if_not_exists('clustered_data_visualizations/search_results/elbow_method/')
fs.create_directory_if_not_exists('clustered_data_visualizations/search_results/silhouette_method/')
fs.create_directory_if_not_exists('clustered_data_visualizations/search_results/clustered_2d/')
fs.create_directory_if_not_exists('clustered_data_visualizations/search_results/clustered_3d/')
fs.create_directory_if_not_exists('clustered_data_visualizations/search_results/silhouette/')
fs.create_directory_if_not_exists('clustered_data_visualizations/search_results/dendrogram/')
fs.create_directory_if_not_exists('clustered_data/search_results/clustering_statistics/')
fs.create_directory_if_not_exists('clustered_data/search_results/elbow_method/')
fs.create_directory_if_not_exists('clustered_data/search_results/silhouette_method/')
search_clustering = CustomSearchClustering(fs, S3Api.S3Api())
search_clustering.cluster_search_data()
if STORE_DATA:
search_clustering.store_clustered_search_data()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 15 17:35:51 2018
@author: Dr Kaustav Das (kaustav.das@monash.edu)
"""
# import numpy as np
import copy as cp
from math import sqrt, exp, log
from collections import deque
from scipy.stats import norm
# Computes the usual Black Scholes Put/Call formula (not PutBS) for piecewise-constant
# parameters.
# S0 (float): initial spot.
# sig (float): initial volatility.
# Strk (float): strike value of the contract.
# rd_deque (deque): domestic interest rate, given backward, e.g., rd_deque = deque([rd2, rd1]).
# rf_deque (deque): foreign interest rate, given backward, e.g., rf_deque = deque([rf2, rf1]).
# dt (deque): deque of time increments over which each parameter is 'alive',
# given backward, e.g., dt = deque([dt2, dt1]). Note sum(dt) gives option maturity T.
# option (str): 'Put' or 'Call'.
def BSform_pw(S0, sig, Strk, _rd_deque, _rf_deque, _dt, option):
# Copy deques
rd_deque = cp.copy(_rd_deque)
rf_deque = cp.copy(_rf_deque)
dt = cp.copy(_dt)
# We now compute discretised versions of int (rd - rf)dt, e^(-int rd dt)
# and e^(-int rf dt), as well as T
rsumdt = 0
expmrd = 1
expmrf = 1
T = 0
lastlayer = deque([])
while dt != lastlayer:
DT = dt.popleft()
RD = rd_deque.popleft()
RF = rf_deque.popleft()
R = RD - RF
rsumdt += R*DT
expmrd *= exp(-DT*RD)
expmrf *= exp(-DT*RF)
T += DT
sqrtT = sqrt(T)
sigsqrtT = sig*sqrtT
lograt = log(S0/Strk)
dpl = (lograt + rsumdt)/sigsqrtT+ 0.5*sigsqrtT
dm = dpl - sigsqrtT
if option == 'Put':
H = Strk*expmrd*norm.cdf(-1.0*dm) - S0*expmrf*norm.cdf(-1.0*dpl)
elif option == 'Call':
H = S0*expmrf*norm.cdf(dpl) - Strk*expmrd*norm.cdf(dm)
return H
# Example code.
if __name__ == '__main__':
S0 = 100
sig = 0.20
Strk = S0*1.01
rd3 = 0.02
rd2 = 0.01
rd1 = 0.01
rf3 = 0.00
rf2 = 0.00
rf1 = 0.00
dt3 = 1/12
dt2 = 1/12
dt1 = 1/12
rd_deque = deque([rd3, rd2, rd1])
rf_deque = deque([rf3, rf2, rf1])
dt = deque([dt3, dt2, dt1])
option = 'Put'
print(BSform_pw(S0, sig, Strk, rd_deque, rf_deque, dt, option))
|
nilq/baby-python
|
python
|
import xml.etree.ElementTree as ET
import traceback
def build_crafting_lookup():
# TODO: Keep working on this, I think only one ingredient is in the list currently.
"""
Returns a crafting lookup table
:return:
"""
crafting_dict = {}
itemtree = ET.parse('libs/game_data/items.xml')
itemroot = itemtree.getroot()
for item in itemroot.getchildren():
# Check if item is craftable
crafting_requirements = item.findall('craftingrequirements')
print(item.attrib['uniquename'])
# If this is greater than 0, there's items that can craft into this item
if len(crafting_requirements) > 0:
recipes = []
for recipe in crafting_requirements:
recipe_dict = {}
for ingredient in recipe.getchildren():
recipe_dict['uniquename'] = ingredient.attrib['uniquename']
recipe_dict['count'] = ingredient.attrib['count']
recipes.append(recipe_dict)
print(recipes)
def build_item_lookup(localization_dictionary):
"""
Creates a dictionary of items with the localization provided.
:return: dictionary of items
"""
item_xml = ET.parse('libs/game_data/items.xml')
item_root = item_xml.getroot()
items = item_root.getchildren()
"""
Example Item Format: (T2_2H_Bow)
In [29]: ri
Out[29]: <Element 'weapon' at 0x7fd2afa6b688>
In [30]: ri.attrib
Out[30]:
{'abilitypower': '120',
'activespellslots': '3',
'attackdamage': '29',
'attackrange': '11',
'attackspeed': '1',
'attacktype': 'ranged',
'durability': '5647',
'durabilityloss_attack': '1',
'durabilityloss_receivedattack': '1',
'durabilityloss_receivedspell': '1',
'durabilityloss_spelluse': '1',
'focusfireprotectionpeneration': '0',
'fxbonename': 'LeftArm_3',
'fxboneoffset': '0.2 -0.227 0.135',
'hitpointsmax': '0',
'hitpointsregenerationbonus': '0',
'itempower': '300',
'itempowerprogressiontype': 'mainhand',
'magicspelldamagebonus': '0',
'mainhandanimationtype': 'bow',
'maxqualitylevel': '5',
'passivespellslots': '1',
'physicalspelldamagebonus': '0',
'shopcategory': 'ranged',
'shopsubcategory1': 'bow',
'slottype': 'mainhand',
'tier': '2',
'twohanded': 'true',
'uiatlas': 'RefItemAtlas',
'uniquename': 'T2_2H_BOW',
'unlockedtocraft': 'false',
'unlockedtoequip': 'false',
'weight': '3'}
In [31]: ri.getchildren()
Out[31]:
[<Element 'projectile' at 0x7fd2afa6b728>,
<Element 'SocketPreset' at 0x7fd2afa6b818>,
<Element 'craftingrequirements' at 0x7fd2afa6b868>,
<Element 'craftingspelllist' at 0x7fd2afa6b908>,
<Element 'AudioInfo' at 0x7fd2afa6bb88>]
In [32]: ri.get('projectile')
In [33]: ri.find('projectile')
Out[33]: <Element 'projectile' at 0x7fd2afa6b728>
In [34]: ri.find('craftingrequirements')
Out[34]: <Element 'craftingrequirements' at 0x7fd2afa6b868>
In [35]: c = _
In [36]: c
Out[36]: <Element 'craftingrequirements' at 0x7fd2afa6b868>
In [37]: c.getchildren()
Out[37]: [<Element 'craftresource' at 0x7fd2afa6b8b8>]
In [38]: c.getchildren()[0]
Out[38]: <Element 'craftresource' at 0x7fd2afa6b8b8>
In [39]: c.getchildren()[0].attrib
Out[39]: {'count': '32', 'uniquename': 'T2_PLANKS'}
"""
def build_localization_lookup(lang='EN-US'):
"""
Takes the localization XML and builds a lookup dictionary for the language given
:return: dictionary of {itemID:localized name}
"""
loc_dict = {}
loc_tree = ET.parse('libs/game_data/localization.xml')
loc_root = loc_tree.getroot()
# TODO: This [0] reference might cause a bug, find a cleaner way
loc_items = loc_root.getchildren()[0]
for item in loc_items:
try:
# Get the item ID string
item_id = item.attrib['tuid']
# Get the target lang for localization
for loc_str in item:
if loc_str.attrib['{http://www.w3.org/XML/1998/namespace}lang'] == lang:
localized = loc_str.find('seg').text
if localized is not None:
loc_dict[item_id] = localized
else:
loc_dict[item_id] = item_id
break
else:
loc_dict[item_id] = item_id
except:
print(traceback.format_exc())
return loc_dict
|
nilq/baby-python
|
python
|
def main():
for a in range(1,int(1000/3)+1):
for b in range(a+1, int(500-a/2)+1): # b < c <=> b < 1000-(a+b) <=> b < 500 - a/2
if chkVal(a, b):
print(a * b * (1000-(a+b)))
def chkVal(a, b):
left_term = a**2 + b**2
right_term = (1000 - (a + b))**2
return left_term == right_term
if __name__=="__main__":
main()
# Answer: 31875000
|
nilq/baby-python
|
python
|
import time
from hyades.inventory.inventory import InventoryManager
inventory = InventoryManager('inventory.yml')
connectors_result = {}
for device in inventory.filter(mode='sync'):
connector = device.connection_manager.registry_name
print(f'\nStart collecting {device.name} with {connector}')
connectors_result[connector] = []
for it in range(10):
start = time.time()
device.connect()
output = device.parse("show version")
print(output)
device.disconnect()
end = time.time()
connectors_result[connector].append(end - start)
print('\n\n')
for connector in connectors_result:
total_time = sum(connectors_result[connector])
mean_time = total_time/len(connectors_result[connector])
min_time = min(connectors_result[connector])
max_time = max(connectors_result[connector])
print(f"Connector: {connector}:\n"
f"Max time: {max_time}\n"
f"Min time: {min_time}\n"
f"Mean time: {mean_time}\n\n")
|
nilq/baby-python
|
python
|
from yourproduct.config import Config
CONFIG: Config = Config()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from ._version import get_versions
__author__ = 'Juan Ortiz'
__email__ = 'ortizub41@gmail.com'
__version__ = get_versions()['version']
del get_versions
def hello_world():
print('Hello, world!')
return True
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import json
import time
try:
import requests
except ImportError:
print "Install requests python module. pip install requests"
exit(1)
GREEN = '\033[92m'
RED = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
def check_file(value):
try:
f = open(value)
return f.read().strip()
except IOError:
print "[-] The file '{}' not found in current directory.".format(value)
exit(1)
URL = "http://{}:{}/configs/".format(check_file("ip"), check_file("nodePort"))
try:
f = open("sample_data.json")
except IOError:
print "[-] sample_data.json file is missing"
exit(0)
json_data = json.load(f)
print "[+] JSON Data Loaded"
with open("deleted_list.txt") as f:
stored_list = f.readlines()
print GREEN+"[+] Trying fetch deleted items which should FAIL!"+ENDC
for config in stored_list:
res = requests.get(url=URL+config.strip())
print "[*] [GET] HTTP Status code for the config {0} is {1}".format(config.strip(), res.status_code)
print " [-] Response Text ->", res.text
time.sleep(1)
with open("available_list.txt") as f:
stored_list = f.readlines()
print GREEN+"[+] Trying fetch available items which should SUCCESS!"+ENDC
for config in stored_list:
res = requests.get(url=URL+config.strip())
print "[*] [GET] HTTP Status code for the config {0} is {1}".format(config.strip(), res.status_code)
print " [-] Response Text ->", res.text
time.sleep(1)
|
nilq/baby-python
|
python
|
"""Tests for provide_url_scheme function."""
from url_normalize.url_normalize import provide_url_scheme
EXPECTED_DATA = {
"": "",
"-": "-",
"/file/path": "/file/path",
"//site/path": "https://site/path",
"ftp://site/": "ftp://site/",
"site/page": "https://site/page",
}
def test_provide_url_scheme_result_is_expected():
"""Assert we got expected results from the provide_url_scheme function."""
for url, expected in EXPECTED_DATA.items():
result = provide_url_scheme(url)
assert result == expected, url
def test_provide_url_scheme_accept_default_scheme_param():
"""Assert we could provide default_scheme param other than https."""
url = "//site/path"
expected = "http://site/path"
actual = provide_url_scheme(url, default_scheme="http")
assert actual == expected
|
nilq/baby-python
|
python
|
import sys
import os
from datetime import datetime
import unittest
import xlwings as xw
from xlwings.constants import RgbColor
from .common import TestBase, this_dir
# Mac imports
if sys.platform.startswith('darwin'):
from appscript import k as kw
class TestRangeInstantiation(TestBase):
def test_range1(self):
r = self.wb1.sheets[0].range('A1')
self.assertEqual(r.address, '$A$1')
def test_range2(self):
r = self.wb1.sheets[0].range('A1:A1')
self.assertEqual(r.address, '$A$1')
def test_range3(self):
r = self.wb1.sheets[0].range('B2:D5')
self.assertEqual(r.address, '$B$2:$D$5')
def test_range4(self):
r = self.wb1.sheets[0].range((1, 1))
self.assertEqual(r.address, '$A$1')
def test_range5(self):
r = self.wb1.sheets[0].range((1, 1), (1, 1))
self.assertEqual(r.address, '$A$1')
def test_range6(self):
r = self.wb1.sheets[0].range((2, 2), (5, 4))
self.assertEqual(r.address, '$B$2:$D$5')
def test_range7(self):
r = self.wb1.sheets[0].range('A1', (2, 2))
self.assertEqual(r.address, '$A$1:$B$2')
def test_range8(self):
r = self.wb1.sheets[0].range((1, 1), 'B2')
self.assertEqual(r.address, '$A$1:$B$2')
def test_range9(self):
r = self.wb1.sheets[0].range(self.wb1.sheets[0].range('A1'), self.wb1.sheets[0].range('B2'))
self.assertEqual(r.address, '$A$1:$B$2')
def test_range10(self):
with self.assertRaises(ValueError):
r = self.wb1.sheets[0].range(self.wb2.sheets[0].range('A1'), self.wb1.sheets[0].range('B2'))
def test_range11(self):
with self.assertRaises(ValueError):
r = self.wb1.sheets[1].range(self.wb1.sheets[0].range('A1'), self.wb1.sheets[0].range('B2'))
def test_range12(self):
with self.assertRaises(ValueError):
r = self.wb1.sheets[0].range(self.wb1.sheets[1].range('A1'), self.wb1.sheets[0].range('B2'))
def test_range13(self):
with self.assertRaises(ValueError):
r = self.wb1.sheets[0].range(self.wb1.sheets[0].range('A1'), self.wb1.sheets[1].range('B2'))
def test_zero_based_index1(self):
with self.assertRaises(IndexError):
self.wb1.sheets[0].range((0, 1)).value = 123
def test_zero_based_index2(self):
with self.assertRaises(IndexError):
a = self.wb1.sheets[0].range((1, 1), (1, 0)).value
def test_zero_based_index3(self):
with self.assertRaises(IndexError):
xw.Range((1, 0)).value = 123
def test_zero_based_index4(self):
with self.assertRaises(IndexError):
a = xw.Range((1, 0), (1, 0)).value
def test_jagged_array(self):
with self.assertRaises(Exception):
self.wb1.sheets[0].range('A1').value = [[1], [1, 2]]
with self.assertRaises(Exception):
self.wb1.sheets[0].range('A1').value = [[1, 2, 3], [4, 5], [6, 7, 8]]
with self.assertRaises(Exception):
self.wb1.sheets[0].range('A1').value = ((1,), (1, 2))
# the following should not raise an error
self.wb1.sheets[0].range('A1').value = 1
self.wb1.sheets[0].range('A1').value = 's'
self.wb1.sheets[0].range('A1').value = [[1, 2], [1, 2]]
self.wb1.sheets[0].range('A1').value = [1, 2, 3]
self.wb1.sheets[0].range('A1').value = [[1, 2, 3]]
self.wb1.sheets[0].range('A1').value = []
class TestRangeAttributes(TestBase):
def test_iterator(self):
self.wb1.sheets[0].range('A20').value = [[1., 2.], [3., 4.]]
r = self.wb1.sheets[0].range('A20:B21')
self.assertEqual([c.value for c in r], [1., 2., 3., 4.])
# check that reiterating on same range works properly
self.assertEqual([c.value for c in r], [1., 2., 3., 4.])
def test_sheet(self):
self.assertEqual(self.wb1.sheets[1].range('A1').sheet.name, self.wb1.sheets[1].name)
def test_len(self):
self.assertEqual(len(self.wb1.sheets[0].range('A1:C4')), 12)
def test_count(self):
self.assertEqual(len(self.wb1.sheets[0].range('A1:C4')), self.wb1.sheets[0].range('A1:C4').count)
def test_row(self):
self.assertEqual(self.wb1.sheets[0].range('B3:F5').row, 3)
def test_column(self):
self.assertEqual(self.wb1.sheets[0].range('B3:F5').column, 2)
def test_row_count(self):
self.assertEqual(self.wb1.sheets[0].range('B3:F5').rows.count, 3)
def test_column_count(self):
self.assertEqual(self.wb1.sheets[0].range('B3:F5').columns.count, 5)
def raw_value(self):
pass # TODO
def test_clear_content(self):
self.wb1.sheets[0].range('G1').value = 22
self.wb1.sheets[0].range('G1').clear_contents()
self.assertEqual(self.wb1.sheets[0].range('G1').value, None)
def test_clear(self):
self.wb1.sheets[0].range('G1').value = 22
self.wb1.sheets[0].range('G1').clear()
self.assertEqual(self.wb1.sheets[0].range('G1').value, None)
def test_end(self):
self.wb1.sheets[0].range('A1:C5').value = 1.
self.assertEqual(self.wb1.sheets[0].range('A1').end('d'), self.wb1.sheets[0].range('A5'))
self.assertEqual(self.wb1.sheets[0].range('A1').end('down'), self.wb1.sheets[0].range('A5'))
self.assertEqual(self.wb1.sheets[0].range('C5').end('u'), self.wb1.sheets[0].range('C1'))
self.assertEqual(self.wb1.sheets[0].range('C5').end('up'), self.wb1.sheets[0].range('C1'))
self.assertEqual(self.wb1.sheets[0].range('A1').end('right'), self.wb1.sheets[0].range('C1'))
self.assertEqual(self.wb1.sheets[0].range('A1').end('r'), self.wb1.sheets[0].range('C1'))
self.assertEqual(self.wb1.sheets[0].range('C5').end('left'), self.wb1.sheets[0].range('A5'))
self.assertEqual(self.wb1.sheets[0].range('C5').end('l'), self.wb1.sheets[0].range('A5'))
def test_formula(self):
self.wb1.sheets[0].range('A1').formula = '=SUM(A2:A10)'
self.assertEqual(self.wb1.sheets[0].range('A1').formula, '=SUM(A2:A10)')
def test_formula2(self):
self.wb1.sheets[0].range('A1').formula2 = '=UNIQUE(A2:A10)'
self.assertEqual(self.wb1.sheets[0].range('A1').formula2, '=UNIQUE(A2:A10)')
def test_formula_array(self):
self.wb1.sheets[0].range('A1').value = [[1, 4], [2, 5], [3, 6]]
self.wb1.sheets[0].range('D1').formula_array = '=SUM(A1:A3*B1:B3)'
self.assertEqual(self.wb1.sheets[0].range('D1').value, 32.)
def test_column_width(self):
self.wb1.sheets[0].range('A1:B2').column_width = 10.0
result = self.wb1.sheets[0].range('A1').column_width
self.assertEqual(10.0, result)
self.wb1.sheets[0].range('A1:B2').value = 'ensure cells are used'
self.wb1.sheets[0].range('B2').column_width = 20.0
result = self.wb1.sheets[0].range('A1:B2').column_width
if sys.platform.startswith('win'):
self.assertEqual(None, result)
else:
self.assertEqual(kw.missing_value, result)
def test_row_height(self):
self.wb1.sheets[0].range('A1:B2').row_height = 15.0
result = self.wb1.sheets[0].range('A1').row_height
self.assertEqual(15.0, result)
self.wb1.sheets[0].range('A1:B2').value = 'ensure cells are used'
self.wb1.sheets[0].range('B2').row_height = 20.0
result = self.wb1.sheets[0].range('A1:B2').row_height
if sys.platform.startswith('win'):
self.assertEqual(None, result)
else:
self.assertEqual(kw.missing_value, result)
def test_width(self):
"""test_width: Width depends on default style text size, so do not test absolute widths"""
self.wb1.sheets[0].range('A1:D4').column_width = 10.0
result_before = self.wb1.sheets[0].range('A1').width
self.wb1.sheets[0].range('A1:D4').column_width = 12.0
result_after = self.wb1.sheets[0].range('A1').width
self.assertTrue(result_after > result_before)
def test_height(self):
self.wb1.sheets[0].range('A1:D4').row_height = 60.0
result = self.wb1.sheets[0].range('A1:D4').height
self.assertEqual(240.0, result)
def test_left(self):
self.assertEqual(self.wb1.sheets[0].range('A1').left, 0.0)
self.wb1.sheets[0].range('A1').column_width = 20.0
self.assertEqual(self.wb1.sheets[0].range('B1').left, self.wb1.sheets[0].range('A1').width)
def test_top(self):
self.assertEqual(self.wb1.sheets[0].range('A1').top, 0.0)
self.wb1.sheets[0].range('A1').row_height = 20.0
self.assertEqual(self.wb1.sheets[0].range('A2').top, self.wb1.sheets[0].range('A1').height)
def test_number_format_cell(self):
format_string = "mm/dd/yy;@"
self.wb1.sheets[0].range('A1').number_format = format_string
result = self.wb1.sheets[0].range('A1').number_format
self.assertEqual(format_string, result)
def test_number_format_range(self):
format_string = "mm/dd/yy;@"
self.wb1.sheets[0].range('A1:D4').number_format = format_string
result = self.wb1.sheets[0].range('A1:D4').number_format
self.assertEqual(format_string, result)
def test_get_address(self):
wb1 = self.app1.books.open(os.path.join(this_dir, 'test book.xlsx'))
res = wb1.sheets[0].range((1, 1), (3, 3)).get_address()
self.assertEqual(res, '$A$1:$C$3')
res = wb1.sheets[0].range((1, 1), (3, 3)).get_address(False)
self.assertEqual(res, '$A1:$C3')
res = wb1.sheets[0].range((1, 1), (3, 3)).get_address(True, False)
self.assertEqual(res, 'A$1:C$3')
res = wb1.sheets[0].range((1, 1), (3, 3)).get_address(False, False)
self.assertEqual(res, 'A1:C3')
res = wb1.sheets[0].range((1, 1), (3, 3)).get_address(include_sheetname=True)
self.assertEqual(res, "'Sheet1'!$A$1:$C$3")
res = wb1.sheets[1].range((1, 1), (3, 3)).get_address(include_sheetname=True)
self.assertEqual(res, "'Sheet2'!$A$1:$C$3")
res = wb1.sheets[0].range((1, 1), (3, 3)).get_address(external=True)
self.assertEqual(res, "'[test book.xlsx]Sheet1'!$A$1:$C$3")
def test_address(self):
self.assertEqual(self.wb1.sheets[0].range('A1:B2').address, '$A$1:$B$2')
def test_current_region(self):
values = [[1., 2.], [3., 4.]]
self.wb1.sheets[0].range('A20').value = values
self.assertEqual(self.wb1.sheets[0].range('B21').current_region.value, values)
def test_autofit_range(self):
self.wb1.sheets[0].range('A1:D4').value = 'test_string'
self.wb1.sheets[0].range('A1:D4').row_height = 40
self.wb1.sheets[0].range('A1:D4').column_width = 40
self.assertEqual(40, self.wb1.sheets[0].range('A1:D4').row_height)
self.assertEqual(40, self.wb1.sheets[0].range('A1:D4').column_width)
self.wb1.sheets[0].range('A1:D4').autofit()
self.assertTrue(40 != self.wb1.sheets[0].range('A1:D4').column_width)
self.assertTrue(40 != self.wb1.sheets[0].range('A1:D4').row_height)
self.wb1.sheets[0].range('A1:D4').row_height = 40
self.assertEqual(40, self.wb1.sheets[0].range('A1:D4').row_height)
self.wb1.sheets[0].range('A1:D4').rows.autofit()
self.assertTrue(40 != self.wb1.sheets[0].range('A1:D4').row_height)
self.wb1.sheets[0].range('A1:D4').column_width = 40
self.assertEqual(40, self.wb1.sheets[0].range('A1:D4').column_width)
self.wb1.sheets[0].range('A1:D4').columns.autofit()
self.assertTrue(40 != self.wb1.sheets[0].range('A1:D4').column_width)
self.wb1.sheets[0].range('A1:D4').rows.autofit()
self.wb1.sheets[0].range('A1:D4').columns.autofit()
def test_autofit_col(self):
self.wb1.sheets[0].range('A1:D4').value = 'test_string'
self.wb1.sheets[0].range('A:D').column_width = 40
self.assertEqual(40, self.wb1.sheets[0].range('A:D').column_width)
self.wb1.sheets[0].range('A:D').autofit()
self.assertTrue(40 != self.wb1.sheets[0].range('A:D').column_width)
# Just checking if they don't throw an error
self.wb1.sheets[0].range('A:D').rows.autofit()
self.wb1.sheets[0].range('A:D').columns.autofit()
def test_autofit_row(self):
self.wb1.sheets[0].range('A1:D4').value = 'test_string'
self.wb1.sheets[0].range('1:10').row_height = 40
self.assertEqual(40, self.wb1.sheets[0].range('1:10').row_height)
self.wb1.sheets[0].range('1:10').autofit()
self.assertTrue(40 != self.wb1.sheets[0].range('1:10').row_height)
# Just checking if they don't throw an error
self.wb1.sheets[0].range('1:1000000').rows.autofit()
self.wb1.sheets[0].range('1:1000000').columns.autofit()
def test_color(self):
rgb = (30, 100, 200)
self.wb1.sheets[0].range('A1').color = rgb
self.assertEqual(rgb, self.wb1.sheets[0].range('A1').color)
self.wb1.sheets[0].range('A2').color = RgbColor.rgbAqua
self.assertEqual((0, 255, 255), self.wb1.sheets[0].range('A2').color)
self.wb1.sheets[0].range('A2').color = None
self.assertEqual(self.wb1.sheets[0].range('A2').color, None)
self.wb1.sheets[0].range('A1:D4').color = rgb
self.assertEqual(rgb, self.wb1.sheets[0].range('A1:D4').color)
def test_len_rows(self):
self.assertEqual(len(self.wb1.sheets[0].range('A1:C4').rows), 4)
def test_count_rows(self):
self.assertEqual(len(self.wb1.sheets[0].range('A1:C4').rows), self.wb1.sheets[0].range('A1:C4').rows.count)
def test_len_cols(self):
self.assertEqual(len(self.wb1.sheets[0].range('A1:C4').columns), 3)
def test_count_cols(self):
self.assertEqual(len(self.wb1.sheets[0].range('A1:C4').columns), self.wb1.sheets[0].range('A1:C4').columns.count)
def test_shape(self):
self.assertEqual(self.wb1.sheets[0].range('A1:C4').shape, (4, 3))
def test_size(self):
self.assertEqual(self.wb1.sheets[0].range('A1:C4').size, 12)
def test_table(self):
data = [[1, 2.222, 3.333],
['Test1', None, 'éöà'],
[datetime(1962, 11, 3), datetime(2020, 12, 31, 12, 12, 20), 9.999]]
self.wb1.sheets[0].range('A1').value = data
if sys.platform.startswith('win') and self.wb1.app.version == '14.0':
self.wb1.sheets[0].range('A3:B3').number_format = 'dd/mm/yyyy' # Hack for Excel 2010 bug, see GH #43
cells = self.wb1.sheets[0].range('A1').expand('table').value
self.assertEqual(cells, data)
def test_vertical(self):
data = [[1, 2.222, 3.333],
['Test1', None, 'éöà'],
[datetime(1962, 11, 3), datetime(2020, 12, 31, 12, 12, 20), 9.999]]
self.wb1.sheets[0].range('A10').value = data
if sys.platform.startswith('win') and self.wb1.app.version == '14.0':
self.wb1.sheets[0].range('A12:B12').number_format = 'dd/mm/yyyy' # Hack for Excel 2010 bug, see GH #43
cells = self.wb1.sheets[0].range('A10').expand('vertical').value
self.assertEqual(cells, [row[0] for row in data])
cells = self.wb1.sheets[0].range('A10').expand('d').value
self.assertEqual(cells, [row[0] for row in data])
cells = self.wb1.sheets[0].range('A10').expand('down').value
self.assertEqual(cells, [row[0] for row in data])
def test_horizontal(self):
data = [[1, 2.222, 3.333],
['Test1', None, 'éöà'],
[datetime(1962, 11, 3), datetime(2020, 12, 31, 12, 12, 20), 9.999]]
self.wb1.sheets[0].range('A20').value = data
cells = self.wb1.sheets[0].range('A20').expand('horizontal').value
self.assertEqual(cells, data[0])
cells = self.wb1.sheets[0].range('A20').expand('r').value
self.assertEqual(cells, data[0])
cells = self.wb1.sheets[0].range('A20').expand('right').value
self.assertEqual(cells, data[0])
def test_hyperlink(self):
address = 'www.xlwings.org'
# Naked address
self.wb1.sheets[0].range('A1').add_hyperlink(address)
self.assertEqual(self.wb1.sheets[0].range('A1').value, address)
hyperlink = self.wb1.sheets[0].range('A1').hyperlink
if not hyperlink.endswith('/'):
hyperlink += '/'
self.assertEqual(hyperlink, 'http://' + address + '/')
# Address + FriendlyName
self.wb1.sheets[0].range('A2').add_hyperlink(address, 'test_link')
self.assertEqual(self.wb1.sheets[0].range('A2').value, 'test_link')
hyperlink = self.wb1.sheets[0].range('A2').hyperlink
if not hyperlink.endswith('/'):
hyperlink += '/'
self.assertEqual(hyperlink, 'http://' + address + '/')
def test_hyperlink_formula(self):
self.wb1.sheets[0].range('B10').formula = '=HYPERLINK("http://xlwings.org", "xlwings")'
self.assertEqual(self.wb1.sheets[0].range('B10').hyperlink, 'http://xlwings.org')
def test_insert_cell(self):
self.wb1.sheets[0].range('A1:C1').value = 'test'
self.wb1.sheets[0].range('A1').insert()
self.assertIsNone(self.wb1.sheets[0].range('A1').value)
self.assertEqual(self.wb1.sheets[0].range('A2').value, 'test')
def test_insert_row(self):
self.wb1.sheets[0].range('A1:C1').value = 'test'
self.wb1.sheets[0].range('1:1').insert()
self.assertEqual(self.wb1.sheets[0].range('A1:C1').value, [None, None, None])
self.assertEqual(self.wb1.sheets[0].range('A2:C2').value, ['test', 'test', 'test'])
def test_insert_column(self):
self.wb1.sheets[0].range('A1:A3').value = 'test'
self.wb1.sheets[0].range('A:A').insert()
self.assertEqual(self.wb1.sheets[0].range('A1:A3').value, [None, None, None])
self.assertEqual(self.wb1.sheets[0].range('B1:B3').value, ['test', 'test', 'test'])
def test_insert_cell_shift_down(self):
self.wb1.sheets[0].range('A1:C1').value = 'test'
self.wb1.sheets[0].range('A1').insert(shift='down')
self.assertIsNone(self.wb1.sheets[0].range('A1').value)
self.assertEqual(self.wb1.sheets[0].range('A2').value, 'test')
def test_insert_cell_shift_right(self):
self.wb1.sheets[0].range('A1:C1').value = 'test'
self.wb1.sheets[0].range('A1').insert(shift='right')
self.assertIsNone(self.wb1.sheets[0].range('A1').value)
self.assertEqual(self.wb1.sheets[0].range('B1:D1').value, ['test', 'test', 'test'])
def test_delete_cell(self):
self.wb1.sheets[0].range('A1').value = ['one', 'two', 'three']
self.wb1.sheets[0].range('A1').delete()
self.assertIsNone(self.wb1.sheets[0].range('C1').value)
self.assertEqual(self.wb1.sheets[0].range('A1').value, 'two')
def test_delete_row(self):
self.wb1.sheets[0].range('A1:C1').value = 'one'
self.wb1.sheets[0].range('A2:C2').value = 'two'
self.wb1.sheets[0].range('1:1').delete()
self.assertEqual(self.wb1.sheets[0].range('A1:C1').value, ['two', 'two', 'two'])
self.assertEqual(self.wb1.sheets[0].range('A2:C2').value, [None, None, None])
def test_delete_column(self):
self.wb1.sheets[0].range('A1:A1').value = 'one'
self.wb1.sheets[0].range('B1:B2').value = 'two'
self.wb1.sheets[0].range('C1:C2').value = 'two'
self.wb1.sheets[0].range('A:A').delete()
self.assertEqual(self.wb1.sheets[0].range('C1:C2').value, [None, None])
self.assertEqual(self.wb1.sheets[0].range('A1:A2').value, ['two', 'two'])
def test_delete_cell_shift_up(self):
self.wb1.sheets[0].range('A1').value = ['one', 'two', 'three']
self.wb1.sheets[0].range('A1').delete('up')
self.assertIsNone(self.wb1.sheets[0].range('A1').value)
self.assertEqual(self.wb1.sheets[0].range('B1:C1').value, ['two', 'three'])
def test_delete_cell_shift_left(self):
self.wb1.sheets[0].range('A1').value = ['one', 'two', 'three']
self.wb1.sheets[0].range('A1').delete('left')
self.assertIsNone(self.wb1.sheets[0].range('C1').value)
self.assertEqual(self.wb1.sheets[0].range('A1').value, 'two')
def test_copy_destination(self):
sheet = self.wb1.sheets[0]
sheet.range('A1:B1').value = 'test'
sheet.range('A1:B1').copy(destination=sheet.range('A2'))
self.assertEqual(sheet.range('A1:B1').value, sheet.range('A2:B2').value)
def test_copy_clipboard(self):
sheet = self.wb1.sheets[0]
sheet.range('A1:B1').value = 'test'
sheet.range('A1:B1').copy()
def test_paste(self):
sheet = self.wb1.sheets[0]
sheet.range('A1:B1').value = 'test'
sheet.range('A1:B1').color = (34, 34, 34)
sheet.range('A1:B1').copy()
sheet.range('A2').paste()
self.assertEqual(sheet['A1:B1'].value, sheet['A2:B2'].value)
self.assertEqual(sheet['A1:B1'].color, sheet['A2:B2'].color)
def test_paste_values(self):
sheet = self.wb1.sheets[0]
sheet.range('A1:B1').value = 'test'
sheet.range('A1:B1').color = (34, 34, 34)
sheet.range('A1:B1').copy()
sheet.range('A2').paste(paste='values')
self.assertEqual(sheet['A1:B1'].value, sheet['A2:B2'].value)
self.assertNotEqual(sheet['A1:B1'].color, sheet['A2:B2'].color)
def test_resize(self):
r = self.wb1.sheets[0].range('A1').resize(4, 5)
self.assertEqual(r.address, '$A$1:$E$4')
r = self.wb1.sheets[0].range('A1').resize(row_size=4)
self.assertEqual(r.address, '$A$1:$A$4')
r = self.wb1.sheets[0].range('A1:B4').resize(column_size=5)
self.assertEqual(r.address, '$A$1:$E$4')
r = self.wb1.sheets[0].range('A1:B4').resize(row_size=5)
self.assertEqual(r.address, '$A$1:$B$5')
r = self.wb1.sheets[0].range('A1:B4').resize()
self.assertEqual(r.address, '$A$1:$B$4')
r = self.wb1.sheets[0].range('A1:C5').resize(row_size=1)
self.assertEqual(r.address, '$A$1:$C$1')
with self.assertRaises(AssertionError):
self.wb1.sheets[0].range('A1:B4').resize(row_size=0)
with self.assertRaises(AssertionError):
self.wb1.sheets[0].range('A1:B4').resize(column_size=0)
def test_offset(self):
o = self.wb1.sheets[0].range('A1:B3').offset(3, 4)
self.assertEqual(o.address, '$E$4:$F$6')
o = self.wb1.sheets[0].range('A1:B3').offset(row_offset=3)
self.assertEqual(o.address, '$A$4:$B$6')
o = self.wb1.sheets[0].range('A1:B3').offset(column_offset=4)
self.assertEqual(o.address, '$E$1:$F$3')
def test_last_cell(self):
self.assertEqual(self.wb1.sheets[0].range('B3:F5').last_cell.row, 5)
self.assertEqual(self.wb1.sheets[0].range('B3:F5').last_cell.column, 6)
def test_select(self):
self.wb2.sheets[0].range('C10').select()
self.assertEqual(self.app2.selection.address, self.wb2.sheets[0].range('C10').address)
def test_wrap_text(self):
self.assertFalse(self.wb1.sheets[0]['A1'].wrap_text)
self.wb1.sheets[0]['A1'].wrap_text = True
self.assertTrue(self.wb1.sheets[0]['A1'].wrap_text)
class TestRangeIndexing(TestBase):
# 2d Range
def test_index1(self):
r = self.wb1.sheets[0].range('A1:B2')
self.assertEqual(r[0].address, '$A$1')
self.assertEqual(r(1).address, '$A$1')
self.assertEqual(r[0, 0].address, '$A$1')
self.assertEqual(r(1, 1).address, '$A$1')
def test_index2(self):
r = self.wb1.sheets[0].range('A1:B2')
self.assertEqual(r[1].address, '$B$1')
self.assertEqual(r(2).address, '$B$1')
self.assertEqual(r[0, 1].address, '$B$1')
self.assertEqual(r(1, 2).address, '$B$1')
def test_index3(self):
with self.assertRaises(IndexError):
r = self.wb1.sheets[0].range('A1:B2')
a = r[4].address
def test_index4(self):
r = self.wb1.sheets[0].range('A1:B2')
self.assertEqual(r(5).address, '$A$3')
def test_index5(self):
with self.assertRaises(IndexError):
r = self.wb1.sheets[0].range('A1:B2')
a = r[0, 4].address
def test_index6(self):
r = self.wb1.sheets[0].range('A1:B2')
self.assertEqual(r(1, 5).address, '$E$1')
# Row
def test_index1row(self):
r = self.wb1.sheets[0].range('A1:D1')
self.assertEqual(r[0].address, '$A$1')
self.assertEqual(r(1).address, '$A$1')
self.assertEqual(r[0, 0].address, '$A$1')
self.assertEqual(r(1, 1).address, '$A$1')
def test_index2row(self):
r = self.wb1.sheets[0].range('A1:D1')
self.assertEqual(r[1].address, '$B$1')
self.assertEqual(r(2).address, '$B$1')
self.assertEqual(r[0, 1].address, '$B$1')
self.assertEqual(r(1, 2).address, '$B$1')
def test_index3row(self):
with self.assertRaises(IndexError):
r = self.wb1.sheets[0].range('A1:D1')
a = r[4].address
def test_index4row(self):
r = self.wb1.sheets[0].range('A1:D1')
self.assertEqual(r(5).address, '$A$2')
def test_index5row(self):
with self.assertRaises(IndexError):
r = self.wb1.sheets[0].range('A1:D1')
a = r[0, 4].address
def test_index6row(self):
r = self.wb1.sheets[0].range('A1:D1')
self.assertEqual(r(1, 5).address, '$E$1')
# Column
def test_index1col(self):
r = self.wb1.sheets[0].range('A1:A4')
self.assertEqual(r[0].address, '$A$1')
self.assertEqual(r(1).address, '$A$1')
self.assertEqual(r[0, 0].address, '$A$1')
self.assertEqual(r(1, 1).address, '$A$1')
def test_index2col(self):
r = self.wb1.sheets[0].range('A1:A4')
self.assertEqual(r[1].address, '$A$2')
self.assertEqual(r(2).address, '$A$2')
self.assertEqual(r[1, 0].address, '$A$2')
self.assertEqual(r(2, 1).address, '$A$2')
def test_index3col(self):
with self.assertRaises(IndexError):
r = self.wb1.sheets[0].range('A1:A4')
a = r[4].address
def test_index4col(self):
r = self.wb1.sheets[0].range('A1:A4')
self.assertEqual(r(5).address, '$A$5')
def test_index5col(self):
with self.assertRaises(IndexError):
r = self.wb1.sheets[0].range('A1:A4')
a = r[4, 0].address
def test_index6col(self):
r = self.wb1.sheets[0].range('A1:A4')
self.assertEqual(r(5, 1).address, '$A$5')
class TestRangeSlicing(TestBase):
# 2d Range
def test_slice1(self):
r = self.wb1.sheets[0].range('B2:D4')
self.assertEqual(r[0:, 1:].address, '$C$2:$D$4')
def test_slice2(self):
r = self.wb1.sheets[0].range('B2:D4')
self.assertEqual(r[1:2, 1:2].address, '$C$3')
def test_slice3(self):
r = self.wb1.sheets[0].range('B2:D4')
self.assertEqual(r[:1, :2].address, '$B$2:$C$2')
def test_slice4(self):
r = self.wb1.sheets[0].range('B2:D4')
self.assertEqual(r[:, :].address, '$B$2:$D$4')
# Row
def test_slice1row(self):
r = self.wb1.sheets[0].range('B2:D2')
self.assertEqual(r[1:].address, '$C$2:$D$2')
def test_slice2row(self):
r = self.wb1.sheets[0].range('B2:D2')
self.assertEqual(r[1:2].address, '$C$2')
def test_slice3row(self):
r = self.wb1.sheets[0].range('B2:D2')
self.assertEqual(r[:2].address, '$B$2:$C$2')
def test_slice4row(self):
r = self.wb1.sheets[0].range('B2:D2')
self.assertEqual(r[:].address, '$B$2:$D$2')
# Column
def test_slice1col(self):
r = self.wb1.sheets[0].range('B2:B4')
self.assertEqual(r[1:].address, '$B$3:$B$4')
def test_slice2col(self):
r = self.wb1.sheets[0].range('B2:B4')
self.assertEqual(r[1:2].address, '$B$3')
def test_slice3col(self):
r = self.wb1.sheets[0].range('B2:B4')
self.assertEqual(r[:2].address, '$B$2:$B$3')
def test_slice4col(self):
r = self.wb1.sheets[0].range('B2:B4')
self.assertEqual(r[:].address, '$B$2:$B$4')
class TestRangeShortcut(TestBase):
def test_shortcut1(self):
self.assertEqual(self.wb1.sheets[0]['A1'], self.wb1.sheets[0].range('A1'))
def test_shortcut2(self):
self.assertEqual(self.wb1.sheets[0]['A1:B5'], self.wb1.sheets[0].range('A1:B5'))
def test_shortcut3(self):
self.assertEqual(self.wb1.sheets[0][0, 1], self.wb1.sheets[0].range('B1'))
def test_shortcut4(self):
self.assertEqual(self.wb1.sheets[0][:5, :5], self.wb1.sheets[0].range('A1:E5'))
def test_shortcut5(self):
with self.assertRaises(TypeError):
r = self.wb1.sheets[0]['A1', 'B5']
def test_shortcut6(self):
with self.assertRaises(TypeError):
r = self.wb1.sheets[0][self.wb1.sheets[0]['A1'], 'B5']
def test_shortcut7(self):
with self.assertRaises(TypeError):
r = self.wb1.sheets[0]['A1', self.wb1.sheets[0]['B5']]
class TestRangeExpansion(TestBase):
def test_table(self):
sht = self.wb1.sheets[0]
rng = sht[0, 0]
rng.value = [['a'] * 5] * 5
self.assertEqual(rng.options(expand='table').value, [['a'] * 5] * 5)
def test_vertical(self):
sht = self.wb1.sheets[0]
rng = sht[0, 0:3]
sht[0, 0].value = [['a'] * 3] * 5
self.assertEqual(rng.options(expand='down').value, [['a'] * 3] * 5)
def test_horizontal(self):
sht = self.wb1.sheets[0]
rng = sht[0:5, 0]
sht[0, 0].value = [['a'] * 3] * 5
self.assertEqual(rng.options(expand='right').value, [['a'] * 3] * 5)
class TestCellErrors(TestBase):
def test_cell_erros(self):
wb = xw.Book('cell_errors.xlsx')
sheet = wb.sheets[0]
for i in range(1, 8):
self.assertIsNone(sheet.range((i, 1)).value)
wb.close()
class TestMerging(TestBase):
def test_merge(self):
sheet = self.wb1.sheets[0]
self.assertEqual(sheet['A1'].merge_area, sheet['A1'])
self.assertEqual(sheet['A1'].merge_cells, False)
sheet["A1:A2"].merge()
self.assertEqual(sheet['A1'].merge_area, sheet['A1:A2'])
self.assertEqual(sheet['A1'].merge_cells, True)
sheet["A1:B2"].merge()
self.assertEqual(sheet['A1'].merge_area, sheet['A1:B2'])
sheet["A1:B2"].unmerge()
self.assertEqual(sheet['A1'].merge_area, sheet['A1'])
sheet["A1:B2"].merge(True)
self.assertEqual(sheet['A1'].merge_area, sheet['A1:B1'])
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pylab
import numpy
import Image # PIL
from supreme.lib import pywt
im = Image.open("data/aero.png").convert('L')
arr = numpy.fromstring(im.tostring(), numpy.uint8)
arr.shape = (im.size[1], im.size[0])
pylab.imshow(arr, interpolation="nearest", cmap=pylab.cm.gray)
for LL, (LH, HL, HH) in pywt.swt2(arr, 'bior1.3', level=3, start_level=0):
pylab.figure()
for i,a in enumerate([LL, LH, HL, HH]):
pylab.subplot(2,2,i+1)
pylab.imshow(a, origin='image', interpolation="nearest", cmap=pylab.cm.gray)
pylab.show()
|
nilq/baby-python
|
python
|
import sys
import math
def count_digit(p1, p2):
l1 = len(str(p1))
l2 = len(str(p2))
count = 0
for i in range(l1, l2+1):
if i == l1:
st = p1
else:
st = 10**(i-1)
if i == l2:
ed = p2
else:
ed = 10**i - 1
count += (ed - st + 1) * i
return count
def dichotomic_search(p1, p2, target):
pin = p1
pout = p2
p = p1
while pout - pin > 1:
p = (pin + pout) // 2
count = count_digit(p1, p)
if count == target:
return p
elif count < target:
pin = p
else:
pout = p
p = p - 1
return p
n = int(input())
for i in range(n):
st, ed = [int(j) for j in input().split()]
target_count = count_digit(st, ed) // 2
page = dichotomic_search(st, ed, target_count)
print(page)
|
nilq/baby-python
|
python
|
from utils import pandaman, handyman
from feature_extraction import data_loader
from feature_extraction import feature_preprocessor
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
plt.style.use('ggplot')
if __name__ == '__main__':
train_data, test_data = data_loader.create_flat_intervals_structure()
# print(os.path.join(".", "moving_avg_data.pkl"))
# train_data_mvng_avg = feature_preprocessor.reduce_noise(train_data.copy(), "moving_avg")
# handyman.dump_pickle(train_data_mvng_avg, os.path.join(".", "moving_avg_data.pkl"))
# train_data_butter = feature_preprocessor.reduce_noise(train_data.copy(), "butter")
# handyman.dump_pickle(train_data_butter, os.path.join(".", "butter_data.pkl"))
# train_data_gaussian = feature_preprocessor.reduce_noise(train_data.copy(), "guassian")
# handyman.dump_pickle(train_data_gaussian, os.path.join(".", "gaussian_data.pkl"))
train_data_rolling = handyman.load_pickle(os.path.join(".", "rolling_data.pkl"))
train_data_mvng_avg = handyman.load_pickle(os.path.join(".", "moving_avg_data.pkl"))
train_data_butter = handyman.load_pickle(os.path.join(".", "butter_data.pkl"))
train_data_gaussian = handyman.load_pickle(os.path.join(".", "gaussian_data.pkl"))
plt.figure()
plt.plot(np.arange(0,2,0.01), train_data.loc[20, "interval_data"].iloc[:, 0],
label='Before smoothing',
color='blue', linestyle=':', linewidth=2)
plt.plot(np.arange(0,2,0.01), train_data_butter.loc[20, "interval_data"].iloc[:, 0],
label='Butterworth',
color='red', linewidth=1)
plt.plot(np.arange(0,2,0.01), train_data_gaussian.loc[20, "interval_data"].iloc[:, 0],
label='Gaussian',
color='yellow', linewidth=1)
plt.plot(np.arange(0,2,0.01), train_data_rolling.loc[20, "interval_data"].iloc[:, 0],
label='Rolling average',
color='green', linewidth=1)
plt.title("Acceloremeter X before and after smoothing")
plt.legend(loc="lower right")
plt.show()
|
nilq/baby-python
|
python
|
import numpy as np
import pandas as pd
import pytest
from dku_timeseries import IntervalRestrictor
from recipe_config_loading import get_interval_restriction_params
@pytest.fixture
def datetime_column():
return "Date"
@pytest.fixture
def df(datetime_column):
co2 = [315.58, 316.39, 316.79, 316.2]
country = ["first", "first", "second", "second"]
time_index = pd.date_range("1-1-1959", periods=4, freq="M")
df = pd.DataFrame.from_dict(
{"value1": co2, "value2": co2, "country": country, datetime_column: time_index})
return df
@pytest.fixture
def long_df(datetime_column):
co2 = [315.58, 316.39, 100, 116.2, 345, 234, 201, 100]
country = ["first", "first", "first", "first", "second", "second", "second", "second"]
time_index = pd.date_range("1-1-1959", periods=4, freq="D").append(pd.date_range("1-1-1959", periods=4, freq="D"))
df = pd.DataFrame.from_dict(
{"value1": co2, "value2": co2, "country": country, datetime_column: time_index})
return df
@pytest.fixture
def long_df_2(datetime_column):
co2 = [315.58, 316.39, 316.79, 316.2, 9, 10]
country = ["first", "first", "second", "second", "third", "third"]
country_2 = ["first", "first", "second", "second", "third", "third"]
time_index = pd.date_range("1-1-1959", periods=2, freq="M").append(pd.date_range("1-1-1959", periods=2, freq="M")).append(
pd.date_range("1-1-1959", periods=2, freq="M"))
df = pd.DataFrame.from_dict(
{"value1": co2, "value2": co2, "country": country, "item": country_2, datetime_column: time_index})
return df
@pytest.fixture
def long_df_3(datetime_column):
co2 = [315.58, 316.39, 316.79, 316.2, 9, 319, 250, 300]
country = ["first", "first", "second", "second", "third", "third", "fourth", "fourth"]
country_2 = ["first", "first", "second", "second", "third", "third", "fourth", "fourth"]
country_3 = ["first", "first", "second", "second", "third", "third", "fourth", "fourth"]
time_index = pd.date_range("1-1-1959", periods=2, freq="M").append(pd.date_range("1-1-1959", periods=2, freq="M")).append(
pd.date_range("1-1-1959", periods=2, freq="M")).append(pd.date_range("1-1-1959", periods=2, freq="M"))
df = pd.DataFrame.from_dict(
{"value1": co2, "value2": co2, "country": country, "item": country_2, "store": country_3, datetime_column: time_index})
return df
@pytest.fixture
def long_df_4(datetime_column):
co2 = [315.58, 316.39, 316.79, 316.2, 9, 319, 250, 300]
country = ["first", "first", "second", "second", "third", "third", "first", "first"]
country_2 = ["first", "first", "second", "second", "third", "third", "second", "first"]
country_3 = ["first", "first", "second", "second", "third", "third", "third", "fourth"]
time_index = pd.date_range("1-1-2020", periods=2, freq="M").append(pd.date_range("1-1-2020", periods=2, freq="M")).append(
pd.date_range("1-1-2020", periods=2, freq="M")).append(pd.date_range("1-1-2020", periods=2, freq="M"))
df = pd.DataFrame.from_dict(
{"value1": co2, "value2": co2, "country": country, "item": country_2, "store": country_3, datetime_column: time_index})
return df
@pytest.fixture
def long_df_numerical(datetime_column):
co2 = [315.58, 316.39, 100, 116.2, 345, 234, 201, 100]
country = [1, 1, 1, 1, 2, 2, 2, 2]
time_index = pd.date_range("1-1-1959", periods=4, freq="D").append(pd.date_range("1-1-1959", periods=4, freq="D"))
df = pd.DataFrame.from_dict(
{"value1": co2, "value2": co2, "country": country, datetime_column: time_index})
return df
@pytest.fixture
def recipe_config(datetime_column):
config = {u'groupby_columns': [u'country'], u'max_threshold': 320, u'min_threshold': 200, u'datetime_column': u'Date', u'advanced_activated': True,
u'time_unit': u'days', u'min_deviation_duration_value': 0, u'value_column': u'value1', u'min_valid_values_duration_value': 0}
return config
@pytest.fixture
def threshold_dict(recipe_config):
min_threshold = recipe_config.get('min_threshold')
max_threshold = recipe_config.get('max_threshold')
value_column = recipe_config.get('value_column')
threshold_dict = {value_column: (min_threshold, max_threshold)}
return threshold_dict
@pytest.fixture
def params(recipe_config):
return get_interval_restriction_params(recipe_config)
class TestIntervalLongFormat:
def test_long_format(self, long_df, params, recipe_config, threshold_dict, datetime_column):
groupby_columns = ["country"]
interval_restrictor = IntervalRestrictor(params)
output_df = interval_restrictor.compute(long_df, datetime_column, threshold_dict, groupby_columns=groupby_columns)
np.testing.assert_array_equal(output_df.Date.values, pd.DatetimeIndex(['1959-01-01T00:00:00.000000000', '1959-01-02T00:00:00.000000000',
'1959-01-02T00:00:00.000000000', '1959-01-03T00:00:00.000000000']))
def test_two_identifiers(self, long_df_2, params, recipe_config, threshold_dict, datetime_column):
groupby_columns = ["country", "item"]
interval_restrictor = IntervalRestrictor(params)
output_df = interval_restrictor.compute(long_df_2, datetime_column, threshold_dict, groupby_columns=groupby_columns)
np.testing.assert_array_equal(output_df.Date.values, pd.DatetimeIndex(['1959-01-31T00:00:00.000000000', '1959-02-28T00:00:00.000000000',
'1959-01-31T00:00:00.000000000', '1959-02-28T00:00:00.000000000']))
def test_three_identifiers(self, long_df_3, params, recipe_config, threshold_dict, datetime_column):
groupby_columns = ["country", "item", "store"]
interval_restrictor = IntervalRestrictor(params)
output_df = interval_restrictor.compute(long_df_3, datetime_column, threshold_dict, groupby_columns=groupby_columns)
np.testing.assert_array_equal(output_df.Date.values, pd.DatetimeIndex(['1959-01-31T00:00:00.000000000', '1959-02-28T00:00:00.000000000',
'1959-01-31T00:00:00.000000000', '1959-02-28T00:00:00.000000000',
'1959-01-31T00:00:00.000000000', '1959-02-28T00:00:00.000000000',
'1959-02-28T00:00:00.000000000']))
def test_mix_identifiers(self, long_df_4, params, recipe_config, threshold_dict, datetime_column):
groupby_columns = ["country", "item", "store"]
interval_restrictor = IntervalRestrictor(params)
output_df = interval_restrictor.compute(long_df_4, datetime_column, threshold_dict, groupby_columns=groupby_columns)
np.testing.assert_array_equal(output_df.Date.values, pd.DatetimeIndex(['2020-01-31T00:00:00.000000000', '2020-02-29T00:00:00.000000000',
'2020-02-29T00:00:00.000000000', '2020-01-31T00:00:00.000000000',
'2020-01-31T00:00:00.000000000', '2020-02-29T00:00:00.000000000',
'2020-02-29T00:00:00.000000000']))
def test_empty_identifiers(self, df, params, recipe_config, threshold_dict, datetime_column):
interval_restrictor = IntervalRestrictor(params)
output_df = interval_restrictor.compute(df, datetime_column, threshold_dict, groupby_columns=[])
assert output_df.shape == (4, 5)
output_df = interval_restrictor.compute(df, datetime_column, threshold_dict)
assert output_df.shape == (4, 5)
output_df = interval_restrictor.compute(df, datetime_column, threshold_dict, groupby_columns=None)
assert output_df.shape == (4, 5)
def test_long_format_numerical(self, long_df_numerical, params, recipe_config, threshold_dict, datetime_column):
groupby_columns = ["country"]
interval_restrictor = IntervalRestrictor(params)
output_df = interval_restrictor.compute(long_df_numerical, datetime_column, threshold_dict, groupby_columns=groupby_columns)
np.testing.assert_array_equal(output_df.Date.values, pd.DatetimeIndex(['1959-01-01T00:00:00.000000000', '1959-01-02T00:00:00.000000000',
'1959-01-02T00:00:00.000000000', '1959-01-03T00:00:00.000000000']))
np.testing.assert_array_equal(output_df.country.values, np.array([1, 1, 2, 2]))
|
nilq/baby-python
|
python
|
# Dan Thayer
# PID control servo motor and distance sensor.. just messing around
from range_sensor import measure_distance
# gains
k_p = 1.0
k_d = 1.0
k_i = 0.001
def run(target_dist, debug=False):
"""
Sense distance and drive motor towards a given target
:param target_dist: distance in cm
:return:
"""
sum_err = 0.0
last_err = 0.0
while 1:
dist = measure_distance()
err = target_dist - dist
sum_err += err
signal = k_p*err + k_i*sum_err + k_d*(err-last_err)
control(signal)
last_err = err
if debug:
print("dist={}, err={}, sum={}, signal={}".format(dist, err, sum_err, signal))
def control(input):
print("control w/ input ", input)
if __name__ == "__main__":
print("starting control loop...")
run(target_dist=4.0, debug=True)
|
nilq/baby-python
|
python
|
# Copyright 2011 Justin Santa Barbara
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import multiprocessing
import os
import shutil
import signal
import subprocess
import sys
import tempfile
import threading
import time
from oslo_config import cfg
from oslotest import base as test_base
import six
from oslo_concurrency.fixture import lockutils as fixtures
from oslo_concurrency import lockutils
from oslo_config import fixture as config
if sys.platform == 'win32':
import msvcrt
else:
import fcntl
def lock_file(handle):
if sys.platform == 'win32':
msvcrt.locking(handle.fileno(), msvcrt.LK_NBLCK, 1)
else:
fcntl.flock(handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
def unlock_file(handle):
if sys.platform == 'win32':
msvcrt.locking(handle.fileno(), msvcrt.LK_UNLCK, 1)
else:
fcntl.flock(handle, fcntl.LOCK_UN)
def lock_files(handles_dir, out_queue):
with lockutils.lock('external', 'test-', external=True):
# Open some files we can use for locking
handles = []
for n in range(50):
path = os.path.join(handles_dir, ('file-%s' % n))
handles.append(open(path, 'w'))
# Loop over all the handles and try locking the file
# without blocking, keep a count of how many files we
# were able to lock and then unlock. If the lock fails
# we get an IOError and bail out with bad exit code
count = 0
for handle in handles:
try:
lock_file(handle)
count += 1
unlock_file(handle)
except IOError:
os._exit(2)
finally:
handle.close()
return out_queue.put(count)
class LockTestCase(test_base.BaseTestCase):
def setUp(self):
super(LockTestCase, self).setUp()
self.config = self.useFixture(config.Config(lockutils.CONF)).config
def test_synchronized_wrapped_function_metadata(self):
@lockutils.synchronized('whatever', 'test-')
def foo():
"""Bar."""
pass
self.assertEqual('Bar.', foo.__doc__, "Wrapped function's docstring "
"got lost")
self.assertEqual('foo', foo.__name__, "Wrapped function's name "
"got mangled")
def test_lock_internally_different_collections(self):
s1 = lockutils.Semaphores()
s2 = lockutils.Semaphores()
trigger = threading.Event()
who_ran = collections.deque()
def f(name, semaphores, pull_trigger):
with lockutils.internal_lock('testing', semaphores=semaphores):
if pull_trigger:
trigger.set()
else:
trigger.wait()
who_ran.append(name)
threads = [
threading.Thread(target=f, args=(1, s1, True)),
threading.Thread(target=f, args=(2, s2, False)),
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertEqual([1, 2], sorted(who_ran))
def test_lock_internally(self):
"""We can lock across multiple threads."""
saved_sem_num = len(lockutils._semaphores)
seen_threads = list()
def f(_id):
with lockutils.lock('testlock2', 'test-', external=False):
for x in range(10):
seen_threads.append(_id)
threads = []
for i in range(10):
thread = threading.Thread(target=f, args=(i,))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
self.assertEqual(100, len(seen_threads))
# Looking at the seen threads, split it into chunks of 10, and verify
# that the last 9 match the first in each chunk.
for i in range(10):
for j in range(9):
self.assertEqual(seen_threads[i * 10],
seen_threads[i * 10 + 1 + j])
self.assertEqual(saved_sem_num, len(lockutils._semaphores),
"Semaphore leak detected")
def test_lock_internal_fair(self):
"""Check that we're actually fair."""
def f(_id):
with lockutils.lock('testlock', 'test-',
external=False, fair=True):
lock_holder.append(_id)
lock_holder = []
threads = []
# While holding the fair lock, spawn a bunch of threads that all try
# to acquire the lock. They will all block. Then release the lock
# and see what happens.
with lockutils.lock('testlock', 'test-', external=False, fair=True):
for i in range(10):
thread = threading.Thread(target=f, args=(i,))
threads.append(thread)
thread.start()
# Allow some time for the new thread to get queued onto the
# list of pending writers before continuing. This is gross
# but there's no way around it without using knowledge of
# fasteners internals.
time.sleep(0.5)
# Wait for all threads.
for thread in threads:
thread.join()
self.assertEqual(10, len(lock_holder))
# Check that the threads each got the lock in fair order.
for i in range(10):
self.assertEqual(i, lock_holder[i])
def test_fair_lock_with_semaphore(self):
def do_test():
s = lockutils.Semaphores()
with lockutils.lock('testlock', 'test-', semaphores=s, fair=True):
pass
self.assertRaises(NotImplementedError, do_test)
def test_nested_synchronized_external_works(self):
"""We can nest external syncs."""
tempdir = tempfile.mkdtemp()
try:
self.config(lock_path=tempdir, group='oslo_concurrency')
sentinel = object()
@lockutils.synchronized('testlock1', 'test-', external=True)
def outer_lock():
@lockutils.synchronized('testlock2', 'test-', external=True)
def inner_lock():
return sentinel
return inner_lock()
self.assertEqual(sentinel, outer_lock())
finally:
if os.path.exists(tempdir):
shutil.rmtree(tempdir)
def _do_test_lock_externally(self):
"""We can lock across multiple processes."""
handles_dir = tempfile.mkdtemp()
try:
children = []
for n in range(50):
queue = multiprocessing.Queue()
proc = multiprocessing.Process(
target=lock_files,
args=(handles_dir, queue))
proc.start()
children.append((proc, queue))
for child, queue in children:
child.join()
count = queue.get(block=False)
self.assertEqual(50, count)
finally:
if os.path.exists(handles_dir):
shutil.rmtree(handles_dir, ignore_errors=True)
def test_lock_externally(self):
lock_dir = tempfile.mkdtemp()
self.config(lock_path=lock_dir, group='oslo_concurrency')
try:
self._do_test_lock_externally()
finally:
if os.path.exists(lock_dir):
shutil.rmtree(lock_dir, ignore_errors=True)
def test_lock_externally_lock_dir_not_exist(self):
lock_dir = tempfile.mkdtemp()
os.rmdir(lock_dir)
self.config(lock_path=lock_dir, group='oslo_concurrency')
try:
self._do_test_lock_externally()
finally:
if os.path.exists(lock_dir):
shutil.rmtree(lock_dir, ignore_errors=True)
def test_synchronized_with_prefix(self):
lock_name = 'mylock'
lock_pfix = 'mypfix-'
foo = lockutils.synchronized_with_prefix(lock_pfix)
@foo(lock_name, external=True)
def bar(dirpath, pfix, name):
return True
lock_dir = tempfile.mkdtemp()
self.config(lock_path=lock_dir, group='oslo_concurrency')
self.assertTrue(bar(lock_dir, lock_pfix, lock_name))
def test_synchronized_without_prefix(self):
lock_dir = tempfile.mkdtemp()
self.config(lock_path=lock_dir, group='oslo_concurrency')
@lockutils.synchronized('lock', external=True)
def test_without_prefix():
# We can't check much
pass
try:
test_without_prefix()
finally:
if os.path.exists(lock_dir):
shutil.rmtree(lock_dir, ignore_errors=True)
def test_synchronized_prefix_without_hypen(self):
lock_dir = tempfile.mkdtemp()
self.config(lock_path=lock_dir, group='oslo_concurrency')
@lockutils.synchronized('lock', 'hypen', True)
def test_without_hypen():
# We can't check much
pass
try:
test_without_hypen()
finally:
if os.path.exists(lock_dir):
shutil.rmtree(lock_dir, ignore_errors=True)
def test_contextlock(self):
lock_dir = tempfile.mkdtemp()
self.config(lock_path=lock_dir, group='oslo_concurrency')
try:
# Note(flaper87): Lock is not external, which means
# a semaphore will be yielded
with lockutils.lock("test") as sem:
if six.PY2:
self.assertIsInstance(sem, threading._Semaphore)
else:
self.assertIsInstance(sem, threading.Semaphore)
# NOTE(flaper87): Lock is external so an InterProcessLock
# will be yielded.
with lockutils.lock("test2", external=True) as lock:
self.assertTrue(lock.exists())
with lockutils.lock("test1",
external=True) as lock1:
self.assertIsInstance(lock1,
lockutils.InterProcessLock)
finally:
if os.path.exists(lock_dir):
shutil.rmtree(lock_dir, ignore_errors=True)
def test_contextlock_unlocks(self):
lock_dir = tempfile.mkdtemp()
self.config(lock_path=lock_dir, group='oslo_concurrency')
sem = None
try:
with lockutils.lock("test") as sem:
if six.PY2:
self.assertIsInstance(sem, threading._Semaphore)
else:
self.assertIsInstance(sem, threading.Semaphore)
with lockutils.lock("test2", external=True) as lock:
self.assertTrue(lock.exists())
# NOTE(flaper87): Lock should be free
with lockutils.lock("test2", external=True) as lock:
self.assertTrue(lock.exists())
# NOTE(flaper87): Lock should be free
# but semaphore should already exist.
with lockutils.lock("test") as sem2:
self.assertEqual(sem, sem2)
finally:
if os.path.exists(lock_dir):
shutil.rmtree(lock_dir, ignore_errors=True)
def _test_remove_lock_external_file(self, lock_dir, use_external=False):
lock_name = 'mylock'
lock_pfix = 'mypfix-remove-lock-test-'
if use_external:
lock_path = lock_dir
else:
lock_path = None
lockutils.remove_external_lock_file(lock_name, lock_pfix, lock_path)
for ent in os.listdir(lock_dir):
self.assertRaises(OSError, ent.startswith, lock_pfix)
if os.path.exists(lock_dir):
shutil.rmtree(lock_dir, ignore_errors=True)
def test_remove_lock_external_file(self):
lock_dir = tempfile.mkdtemp()
self.config(lock_path=lock_dir, group='oslo_concurrency')
self._test_remove_lock_external_file(lock_dir)
def test_remove_lock_external_file_lock_path(self):
lock_dir = tempfile.mkdtemp()
self._test_remove_lock_external_file(lock_dir,
use_external=True)
def test_no_slash_in_b64(self):
# base64(sha1(foobar)) has a slash in it
with lockutils.lock("foobar"):
pass
def test_deprecated_names(self):
paths = self.create_tempfiles([['fake.conf', '\n'.join([
'[DEFAULT]',
'lock_path=foo',
'disable_process_locking=True'])
]])
conf = cfg.ConfigOpts()
conf(['--config-file', paths[0]])
conf.register_opts(lockutils._opts, 'oslo_concurrency')
self.assertEqual('foo', conf.oslo_concurrency.lock_path)
self.assertTrue(conf.oslo_concurrency.disable_process_locking)
class FileBasedLockingTestCase(test_base.BaseTestCase):
def setUp(self):
super(FileBasedLockingTestCase, self).setUp()
self.lock_dir = tempfile.mkdtemp()
def test_lock_file_exists(self):
lock_file = os.path.join(self.lock_dir, 'lock-file')
@lockutils.synchronized('lock-file', external=True,
lock_path=self.lock_dir)
def foo():
self.assertTrue(os.path.exists(lock_file))
foo()
def test_interprocess_lock(self):
lock_file = os.path.join(self.lock_dir, 'processlock')
pid = os.fork()
if pid:
# Make sure the child grabs the lock first
start = time.time()
while not os.path.exists(lock_file):
if time.time() - start > 5:
self.fail('Timed out waiting for child to grab lock')
time.sleep(0)
lock1 = lockutils.InterProcessLock('foo')
lock1.lockfile = open(lock_file, 'w')
# NOTE(bnemec): There is a brief window between when the lock file
# is created and when it actually becomes locked. If we happen to
# context switch in that window we may succeed in locking the
# file. Keep retrying until we either get the expected exception
# or timeout waiting.
while time.time() - start < 5:
try:
lock1.trylock()
lock1.unlock()
time.sleep(0)
except IOError:
# This is what we expect to happen
break
else:
self.fail('Never caught expected lock exception')
# We don't need to wait for the full sleep in the child here
os.kill(pid, signal.SIGKILL)
else:
try:
lock2 = lockutils.InterProcessLock('foo')
lock2.lockfile = open(lock_file, 'w')
have_lock = False
while not have_lock:
try:
lock2.trylock()
have_lock = True
except IOError:
pass
finally:
# NOTE(bnemec): This is racy, but I don't want to add any
# synchronization primitives that might mask a problem
# with the one we're trying to test here.
time.sleep(.5)
os._exit(0)
def test_interthread_external_lock(self):
call_list = []
@lockutils.synchronized('foo', external=True, lock_path=self.lock_dir)
def foo(param):
"""Simulate a long-running threaded operation."""
call_list.append(param)
# NOTE(bnemec): This is racy, but I don't want to add any
# synchronization primitives that might mask a problem
# with the one we're trying to test here.
time.sleep(.5)
call_list.append(param)
def other(param):
foo(param)
thread = threading.Thread(target=other, args=('other',))
thread.start()
# Make sure the other thread grabs the lock
# NOTE(bnemec): File locks do not actually work between threads, so
# this test is verifying that the local semaphore is still enforcing
# external locks in that case. This means this test does not have
# the same race problem as the process test above because when the
# file is created the semaphore has already been grabbed.
start = time.time()
while not os.path.exists(os.path.join(self.lock_dir, 'foo')):
if time.time() - start > 5:
self.fail('Timed out waiting for thread to grab lock')
time.sleep(0)
thread1 = threading.Thread(target=other, args=('main',))
thread1.start()
thread1.join()
thread.join()
self.assertEqual(['other', 'other', 'main', 'main'], call_list)
def test_non_destructive(self):
lock_file = os.path.join(self.lock_dir, 'not-destroyed')
with open(lock_file, 'w') as f:
f.write('test')
with lockutils.lock('not-destroyed', external=True,
lock_path=self.lock_dir):
with open(lock_file) as f:
self.assertEqual('test', f.read())
class LockutilsModuleTestCase(test_base.BaseTestCase):
def setUp(self):
super(LockutilsModuleTestCase, self).setUp()
self.old_env = os.environ.get('OSLO_LOCK_PATH')
if self.old_env is not None:
del os.environ['OSLO_LOCK_PATH']
def tearDown(self):
if self.old_env is not None:
os.environ['OSLO_LOCK_PATH'] = self.old_env
super(LockutilsModuleTestCase, self).tearDown()
def test_main(self):
script = '\n'.join([
'import os',
'lock_path = os.environ.get("OSLO_LOCK_PATH")',
'assert lock_path is not None',
'assert os.path.isdir(lock_path)',
])
argv = ['', sys.executable, '-c', script]
retval = lockutils._lock_wrapper(argv)
self.assertEqual(0, retval, "Bad OSLO_LOCK_PATH has been set")
def test_return_value_maintained(self):
script = '\n'.join([
'import sys',
'sys.exit(1)',
])
argv = ['', sys.executable, '-c', script]
retval = lockutils._lock_wrapper(argv)
self.assertEqual(1, retval)
def test_direct_call_explodes(self):
cmd = [sys.executable, '-m', 'oslo_concurrency.lockutils']
with open(os.devnull, 'w') as devnull:
retval = subprocess.call(cmd, stderr=devnull)
self.assertEqual(1, retval)
class TestLockFixture(test_base.BaseTestCase):
def setUp(self):
super(TestLockFixture, self).setUp()
self.config = self.useFixture(config.Config(lockutils.CONF)).config
self.tempdir = tempfile.mkdtemp()
def _check_in_lock(self):
self.assertTrue(self.lock.exists())
def tearDown(self):
self._check_in_lock()
super(TestLockFixture, self).tearDown()
def test_lock_fixture(self):
# Setup lock fixture to test that teardown is inside the lock
self.config(lock_path=self.tempdir, group='oslo_concurrency')
fixture = fixtures.LockFixture('test-lock')
self.useFixture(fixture)
self.lock = fixture.lock
class TestGetLockPath(test_base.BaseTestCase):
def setUp(self):
super(TestGetLockPath, self).setUp()
self.conf = self.useFixture(config.Config(lockutils.CONF)).conf
def test_get_default(self):
lockutils.set_defaults(lock_path='/the/path')
self.assertEqual('/the/path', lockutils.get_lock_path(self.conf))
def test_get_override(self):
lockutils._register_opts(self.conf)
self.conf.set_override('lock_path', '/alternate/path',
group='oslo_concurrency')
self.assertEqual('/alternate/path', lockutils.get_lock_path(self.conf))
|
nilq/baby-python
|
python
|
from urlparse import urlparse, urlunparse
import re
from bs4 import BeautifulSoup
from .base import BaseCrawler
from ...models import Author, AuthorType
class CitizenCrawler(BaseCrawler):
TL_RE = re.compile('(www\.)?citizen.co.za')
def offer(self, url):
""" Can this crawler process this URL? """
parts = urlparse(url)
return bool(self.TL_RE.match(parts.netloc))
def canonicalise_url(self, url):
""" Strip anchors, etc. """
url = super(CitizenCrawler, self).canonicalise_url(url)
parts = urlparse(url)
# force http, strip www, enforce trailing slash
path = parts.path
if not path.endswith('/'):
path = path + '/'
return urlunparse(['http', 'citizen.co.za', path, parts.params, None, None])
def extract(self, doc, raw_html):
""" Extract text and other things from the raw_html for this document. """
super(CitizenCrawler, self).extract(doc, raw_html)
soup = BeautifulSoup(raw_html)
doc.title = self.extract_plaintext(soup.select(".post h1"))
doc.summary = self.extract_plaintext(soup.select(".post .single-excerpt"))
doc.text = doc.summary + "\n\n" + "\n\n".join(p.text for p in soup.select(".post .single-content > p"))
doc.published_at = self.parse_timestamp(self.extract_plaintext(soup.select(".post .single-date")))
author = self.extract_plaintext(soup.select(".post .single-byline"))
if author:
doc.author = Author.get_or_create(author, AuthorType.journalist())
else:
doc.author = Author.unknown()
|
nilq/baby-python
|
python
|
import sys
sys.path.insert(1, "../")
import pickle
from story_environment_neuro import *
from decode_redo_pipeline_top_p_multi import Decoder
import numpy as np
from data_utils import *
import argparse
from memoryGraph_scifi2 import MemoryGraph
import datetime
from semantic_fillIn_class_offloaded_vn34 import FillIn
from aster_utils import *
from BERT_fill_class import *
models = DataHolder(model_name="scifi")
parser = argparse.ArgumentParser()
parser.add_argument(
"--config",
help="path to json config",
required=True
)
args = parser.parse_args()
config_filepath = args.config
config = read_config(config_filepath)
env = storyEnv(config['data']['verbose'])
h = None
c = None
word2id, id2word = read_vocab(config['data']['vocab_file'])
seq2seq_model = Decoder(config_path=config_filepath, top_n=config['data']['top_n'])
src_data = read_bucket_data(word2id, id2word,
src = config['data']['test_src'],
config = None
)
seeds = [x.split("|||")[0] for x in open("../data/bucketed_events_test_nameFix.txt", 'r').readlines()]
fillObject = FillIn(models, verbose=config['data']['verbose'], remove_transitive=True)
#test verbs
verbs = ["fill-9.8", "suspect-81", "keep-15.2", "throw-17.1"]
######################################
def cleanBERT(string):
while "[UNK]" in string:
string = string.replace("[UNK]","")
while "# " in string:
string = string.replace("# ","#")
while " #" in string:
string = string.replace(" #","#")
while "#" in string:
string = string.replace("#","")
while " ," in string:
string = string.replace(" ,",",")
while " ." in string:
string = string.replace(" .",".")
while " " in string:
string = string.replace(" "," ")
string = string.strip()
return string
def read_vocab(file_path):
vocab = [word for word in pickle.load(open(file_path, 'rb'))]
word2id = {}
id2word = {}
for ind, word in enumerate(vocab):
word2id[word] = ind
id2word[ind] = word
return word2id, id2word
def printPred(pred):
x = ""
if pred[0] == True:
x = "not "
print("<fact>"+x+rep(pred[1])+"("+rep(",".join(pred[2:]))+") </fact>")
def printState(state):
print("<state>")
print("<!--Current story world state-->")
state_keys = list(state.keys())
state_keys.sort()
for entity in state_keys:
if state[entity]:
print("<entity>")
print("<name>"+rep(entity)+"</name>")
print("<facts>")
for fact in state[entity]:
if not type(fact) == str:
printPred(fact)
else:
print("<fact>"+rep(fact)+"</fact>")
print("</facts>")
print("</entity>")
print("</state>")
def prepToPrint(event, memory):
event = swapLastParams(event)
unfilled = rep(str(event))
filled, memory = fillObject.fillEvent(event, memory)
return unfilled, filled, memory
def getAction(config, env, results, memory, history):
if config['model']['causal'] == True:
pruned_candidates = env.validate(results, memory, history, models, config['model']['forced_frame'])
#print(f"NUM AFTER PRUNED (MAX {config['data']['top_n']}): {len(pruned_candidates)}")
print(f"<numValidCandidates> {len(pruned_candidates)} out of {len(results)} possible </numValidCandidates>")
if len(pruned_candidates) == 0:
print("</step>\n</story>")
print("No more candidate events!")
env.reset()
return None, env
action = pruned_candidates[0] #this is a (event, manipulateState object) tuple
#TODO: should env come from pruned_candidates?
next_state = env.step(action)
printState(next_state)
return action, env
else:
if config['model']["original_mode"] == True:
pruned_candidates = env.onlyFillPronouns(results, memory, history)
else:
pruned_candidates = env.nonCausal_validate(results, memory, history)
if len(pruned_candidates) == 0:
print("</step>\n</story>")
print("No more candidate events!")
env.reset()
return None, env
print(f"<numValidCandidates> {len(pruned_candidates)} out of {len(results)} possible </numValidCandidates>")
action = pruned_candidates[0]
return action, env
def getSentence(filled_event, event, sentence_memory):
#clean before BERT
final_event = []
for i, param in enumerate(filled_event):
if i != 1 and isVerbNet(event[i]):
final_event += ["to",param]
elif "EmptyParameter" in param:
continue
else:
final_event += [param]
#E2S
max_masks = 3
sentence = BERT_fill(sentence_memory, final_event, max_masks)
if sentence:
sentence = cleanBERT(" ".join(sentence))
sentence = sentence.strip()
while " " in sentence:
sentence = sentence.replace(" "," ")
print("SENTENCE",sentence)
return sentence
else:
return ""
######################################
print('<?xml version="1.0" encoding="UTF-8" ?>')
print(f"<!--{datetime.date.today()}-->")
print("<!--**Version Information**-->")
print(f"<!--CAUSAL: {config['model']['causal']}-->")
if config['model']['causal']:
print(f"<!--FORCE FIND VERBNET FRAME: {config['model']['forced_frame']}-->")
print(f"<!--VERB RESAMPLING (for DRL): {config['model']['forced_frame']}-->")
else:
print(f"<!--PROPERLY FORMATED EVENTS ONLY: {not config['model']['original_mode']}-->")
print("<!--#########################-->")
print("<!--**About**-->")
print("<!--Log file for ASTER story generator system. Each story has a number of steps. In each step, the system goes through a set of candidate events, determining if each is valid and giving reasons why or why not it is. Out of the valid events, the system selects one.-->")
for j, event in enumerate(seeds):
env.reset()
event = event.split(" ")
print("<story>")
print("<!--A new story-->")
memory = MemoryGraph(models)
print("<step id=\""+str(0)+"\">")
action, env = getAction(config, env, [event], memory, [])
if not action:
print("<error> Start event cannot be properly added to state </error>\n</story>")
continue
if type(action) == tuple:
event = action[0]
else:
event = action
print_event, filled_event, memory = prepToPrint(copy.deepcopy(event), memory)
print("<startingEvent>\n<!--The user-given event to start the story-->\n"+print_event+ "</startingEvent>")
print("<filledEvent>\n<!--An example of the event, randomly filled with real words-->\n"+str(filled_event)+"</filledEvent>")
memory.add_event(event)
history = [event]
print("</step>")
print_history = [filled_event]
sentence = getSentence(filled_event, event, [])
if not sentence:
print("<error> Can't turn event "+str(filled_event)+" into a sentence. </error>\n</story>")
continue
sentence_memory = [sentence]
#####Generate Events#####
for i in range(0,5): #length of story
#run through seq2seq/seq2seq2seq to get next distribution of events
print("<step id=\""+str(i+1)+"\">")
print("<!--Going through candidate events to find the next event in the story-->")
results, h, c = seq2seq_model.pipeline_predict([event], h, c, start=True)
#find a consistent one
action, env = getAction(config, env, results, memory, history)
if not action:
print("</step>\n<final_story>"+str(print_history)+"</final_story>\n</story>")
break
if type(action) == tuple:
event = action[0]
else:
event = action
memory.add_event(event)
history.append(event)
print_event, filled_event, memory = prepToPrint(copy.deepcopy(event), memory)
print("<selectedEvent>"+print_event+ "</selectedEvent>")
print("<filledEvent>\n<!--An example of the event, randomly filled with real words-->\n"+str(filled_event)+"</filledEvent>")
print_history.append(filled_event)
print("<story_so_far>"+str(print_history)+"</story_so_far>")
sentence = getSentence(filled_event, event, sentence_memory)
if not sentence:
print("<error> Can't turn event "+str(filled_event)+" into a sentence. </error>")
break
sentence_memory.append(sentence)
print("</step>")
print("<final_story>"+str(sentence_memory)+"</final_story>")
print("</story>")
|
nilq/baby-python
|
python
|
usrLvl = int(input("What level are you right now (1-50)?"))
usrXP = int(input("What is your XP count right now?"))
usrPrs = int(input("What prestige are you right now (0-10)?"))
usr20 = str(input("Are you a Kamado (write \"y\" or \"n\")?"))
usr10 = str(input("Are you a Tokito or Ubuyashiki (write \"y\" or \"n\")?"))
xpMod = int(input("What is the current XP modifier (1 for default)?"))
if(usrLvl==1):
baseXP = (142058-0)
elif(usrLvl==2):
baseXP = (142058-132)
elif(usrLvl==3):
baseXP = (142058-271)
elif(usrLvl==4):
baseXP = (142058-421)
elif(usrLvl==5):
baseXP = (142058-587)
elif(usrLvl==6):
baseXP = (142058-773)
elif(usrLvl==7):
baseXP = (142058-985)
elif(usrLvl==8):
baseXP = (142058-1228)
elif(usrLvl==9):
baseXP = (142058-1508)
elif(usrLvl==10):
baseXP = (142058-1831)
elif(usrLvl==11):
baseXP = (142058-2204)
elif(usrLvl==12):
baseXP = (142058-2633)
elif(usrLvl==13):
baseXP = (142058-3125)
elif(usrLvl==14):
baseXP = (142058-3687)
elif(usrLvl==15):
baseXP = (142058-4327)
elif(usrLvl==16):
baseXP = (142058-5051)
elif(usrLvl==17):
baseXP = (142058-5868)
elif(usrLvl==18):
baseXP = (142058-6785)
elif(usrLvl==19):
baseXP = (142058-7810)
elif(usrLvl==20):
baseXP = (142058-8951)
elif(usrLvl==21):
baseXP = (142058-10216)
elif(usrLvl==22):
baseXP = (142058-11614)
elif(usrLvl==23):
baseXP = (142058-13152)
elif(usrLvl==24):
baseXP = (142058-14840)
elif(usrLvl==25):
baseXP = (142058-16686)
elif(usrLvl==26):
baseXP = (142058-18700)
elif(usrLvl==27):
baseXP = (142058-20890)
elif(usrLvl==28):
baseXP = (142058-23265)
elif(usrLvl==29):
baseXP = (142058-25834)
elif(usrLvl==31):
baseXP = (142058-28607)
elif(usrLvl==32):
baseXP = (142058-31593)
elif(usrLvl==33):
baseXP = (142058-34802)
elif(usrLvl==34):
baseXP = (142058-38243)
elif(usrLvl==35):
baseXP = (142058-41926)
elif(usrLvl==36):
baseXP = (142058-50057)
elif(usrLvl==37):
baseXP = (142058-54525)
elif(usrLvl==38):
baseXP = (142058-59275)
elif(usrLvl==39):
baseXP = (142058-64317)
elif(usrLvl==40):
baseXP = (142058-69661)
elif(usrLvl==41):
baseXP = (142058-75318)
elif(usrLvl==42):
baseXP = (142058-81298)
elif(usrLvl==43):
baseXP = (142058-87612)
elif(usrLvl==44):
baseXP = (142058-94270)
elif(usrLvl==45):
baseXP = (142058-101283)
elif(usrLvl==46):
baseXP = (142058-108662)
elif(usrLvl==47):
baseXP = (142058-116418)
elif(usrLvl==48):
baseXP = (142058-124562)
elif(usrLvl==49):
baseXP = (142058-133105)
elif(usrLvl==50):
baseXP = (142058-142058)
elif(usrLvl>50):
baseXP = (142058-142058)
print("You are already higher than level 50, you don't need any more experience to prestige.")
elif(usrLvl<1):
print("You entered an impossible level.")
baseXP = null
if(usr20=="y"):
familyXP = 0.2
elif(usr10=="y"):
familyXP = 0.1
elif(usr10!="y" + usr20!="y"):
familyXP = 0.0
rqrXP = ((baseXP-usrXP)/((baseXP-usrXP)*(xpMod + familyXP+(usrPrs*0.2+1)))*(baseXP-usrXP))
displayXP = int(rqrXP)
print("You need " + str(displayXP) + "EXP in order to get to level 50.")
print("You need to do " + str((rqrXP//21000) + (0 < rqrXP%21000)) + " (" + str(round((rqrXP/21000),2)) + ")" + " infinity castles (assuming you beat all bosses and grip no demons) to get to level 50.")
print("You need to do " + str((rqrXP//6000) + (0 < rqrXP%6000)) + " (" + str(round((rqrXP/6000),2)) + ")" + " Kaigakus in order to get to level 50.")
print("You need to do " + str((rqrXP//300) + (0 < rqrXP%300)) + " (" + str(round((rqrXP/300),2)) + ")" + " Zenitsus in order to get to level 50.")
|
nilq/baby-python
|
python
|
import math
import os
import pytest
import torch
from tests import _PATH_DATA
@pytest.mark.skipif(not os.path.exists(_PATH_DATA), reason="Data files not found")
def test_load_traindata():
dataset = torch.load(f"{_PATH_DATA}/processed/train.pt")
assert len(dataset) == math.ceil(25000 / 64)
@pytest.mark.skipif(not os.path.exists(_PATH_DATA), reason="Data files not found")
def test_load_testdata():
dataset = torch.load(f"{_PATH_DATA}/processed/test.pt")
assert len(dataset) == math.ceil(5000 / 64)
|
nilq/baby-python
|
python
|
"""
====================
Fetching Evaluations
====================
Evalutions contain a concise summary of the results of all runs made. Each evaluation
provides information on the dataset used, the flow applied, the setup used, the metric
evaluated, and the result obtained on the metric, for each such run made. These collection
of results can be used for efficient benchmarking of an algorithm and also allow transparent
reuse of results from previous experiments on similar parameters.
In this example, we shall do the following:
* Retrieve evaluations based on different metrics
* Fetch evaluations pertaining to a specific task
* Sort the obtained results in descending order of the metric
* Plot a cumulative distribution function for the evaluations
* Compare the top 10 performing flows based on the evaluation performance
* Retrieve evaluations with hyperparameter settings
"""
############################################################################
import openml
############################################################################
# Listing evaluations
# *******************
# Evaluations can be retrieved from the database in the chosen output format.
# Required filters can be applied to retrieve results from runs as required.
# We shall retrieve a small set (only 10 entries) to test the listing function for evaluations
openml.evaluations.list_evaluations(function='predictive_accuracy', size=10,
output_format='dataframe')
# Using other evaluation metrics, 'precision' in this case
evals = openml.evaluations.list_evaluations(function='precision', size=10,
output_format='dataframe')
# Querying the returned results for precision above 0.98
print(evals[evals.value > 0.98])
#############################################################################
# Viewing a sample task
# =====================
# Over here we shall briefly take a look at the details of the task.
# We will start by displaying a simple *supervised classification* task:
task_id = 167140 # https://www.openml.org/t/167140
task = openml.tasks.get_task(task_id)
print(task)
#############################################################################
# Obtaining all the evaluations for the task
# ==========================================
# We'll now obtain all the evaluations that were uploaded for the task
# we displayed previously.
# Note that we now filter the evaluations based on another parameter 'task'.
metric = 'predictive_accuracy'
evals = openml.evaluations.list_evaluations(function=metric, task=[task_id],
output_format='dataframe')
# Displaying the first 10 rows
print(evals.head(n=10))
# Sorting the evaluations in decreasing order of the metric chosen
evals = evals.sort_values(by='value', ascending=False)
print("\nDisplaying head of sorted dataframe: ")
print(evals.head())
#############################################################################
# Obtaining CDF of metric for chosen task
# ***************************************
# We shall now analyse how the performance of various flows have been on this task,
# by seeing the likelihood of the accuracy obtained across all runs.
# We shall now plot a cumulative distributive function (CDF) for the accuracies obtained.
from matplotlib import pyplot as plt
def plot_cdf(values, metric='predictive_accuracy'):
max_val = max(values)
n, bins, patches = plt.hist(values, density=True, histtype='step',
cumulative=True, linewidth=3)
patches[0].set_xy(patches[0].get_xy()[:-1])
plt.xlim(max(0, min(values) - 0.1), 1)
plt.title('CDF')
plt.xlabel(metric)
plt.ylabel('Likelihood')
plt.grid(b=True, which='major', linestyle='-')
plt.minorticks_on()
plt.grid(b=True, which='minor', linestyle='--')
plt.axvline(max_val, linestyle='--', color='gray')
plt.text(max_val, 0, "%.3f" % max_val, fontsize=9)
plt.show()
plot_cdf(evals.value, metric)
# This CDF plot shows that for the given task, based on the results of the
# runs uploaded, it is almost certain to achieve an accuracy above 52%, i.e.,
# with non-zero probability. While the maximum accuracy seen till now is 96.5%.
#############################################################################
# Comparing top 10 performing flows
# *********************************
# Let us now try to see which flows generally performed the best for this task.
# For this, we shall compare the top performing flows.
import numpy as np
import pandas as pd
def plot_flow_compare(evaluations, top_n=10, metric='predictive_accuracy'):
# Collecting the top 10 performing unique flow_id
flow_ids = evaluations.flow_id.unique()[:top_n]
df = pd.DataFrame()
# Creating a data frame containing only the metric values of the selected flows
# assuming evaluations is sorted in decreasing order of metric
for i in range(len(flow_ids)):
flow_values = evaluations[evaluations.flow_id == flow_ids[i]].value
df = pd.concat([df, flow_values], ignore_index=True, axis=1)
fig, axs = plt.subplots()
df.boxplot()
axs.set_title('Boxplot comparing ' + metric + ' for different flows')
axs.set_ylabel(metric)
axs.set_xlabel('Flow ID')
axs.set_xticklabels(flow_ids)
axs.grid(which='major', linestyle='-', linewidth='0.5', color='gray', axis='y')
axs.minorticks_on()
axs.grid(which='minor', linestyle='--', linewidth='0.5', color='gray', axis='y')
# Counting the number of entries for each flow in the data frame
# which gives the number of runs for each flow
flow_freq = list(df.count(axis=0, numeric_only=True))
for i in range(len(flow_ids)):
axs.text(i + 1.05, np.nanmin(df.values), str(flow_freq[i]) + '\nrun(s)', fontsize=7)
plt.show()
plot_flow_compare(evals, metric=metric, top_n=10)
# The boxplots below show how the flows perform across multiple runs on the chosen
# task. The green horizontal lines represent the median accuracy of all the runs for
# that flow (number of runs denoted at the bottom of the boxplots). The higher the
# green line, the better the flow is for the task at hand. The ordering of the flows
# are in the descending order of the higest accuracy value seen under that flow.
# Printing the corresponding flow names for the top 10 performing flow IDs
top_n = 10
flow_ids = evals.flow_id.unique()[:top_n]
flow_names = evals.flow_name.unique()[:top_n]
for i in range(top_n):
print((flow_ids[i], flow_names[i]))
#############################################################################
# Obtaining evaluations with hyperparameter settings
# ==================================================
# We'll now obtain the evaluations of a task and a flow with the hyperparameters
# List evaluations in descending order based on predictive_accuracy with
# hyperparameters
evals_setups = openml.evaluations.list_evaluations_setups(function='predictive_accuracy', task=[31],
size=100, sort_order='desc')
""
print(evals_setups.head())
""
# Return evaluations for flow_id in descending order based on predictive_accuracy
# with hyperparameters. parameters_in_separate_columns returns parameters in
# separate columns
evals_setups = openml.evaluations.list_evaluations_setups(function='predictive_accuracy',
flow=[6767],
size=100,
parameters_in_separate_columns=True)
""
print(evals_setups.head(10))
""
|
nilq/baby-python
|
python
|
#!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import argparse
def plot_err_cdf(data1, data2):
"""draw cdf to see the error between without/with freeze"""
err_data = []
for x, y in zip(data1, data2):
err_data.append(abs(x-y))
sorted_err_data = np.sort(err_data)
cdf_err = np.arange(len(sorted_err_data)) / float(len(sorted_err_data))
avg = np.mean(data1)
variance = []
for x in data1:
variance.append(abs(x-avg))
num_bins = 5000
counts, bin_edges = np.histogram(variance, bins=num_bins)
cdf_variance = np.cumsum(counts) / float(len(variance))
xlim_min = min(min(err_data), min(variance))
xlim_max = max(max(err_data), max(variance))
plt.figure()
# plt.xlim(0.99 * xlim_min, 1.02 * xlim_max)
# plt.xlim(0, 5000)
p1 = plt.plot(sorted_err_data, cdf_err, 'b', label=label_err)
p2 = plt.plot(bin_edges[1:], cdf_variance, 'r', label='Variance of GTOD')
plt.legend() #(loc='lower right')
plt.xlabel('Time Error (Milliseconds)', fontsize=20)
plt.ylabel('Cumulative Distribution', fontsize=20)
plt.grid(True)
# plt.show()
plt.savefig('err_%s_cdf.eps' % topic_name, format='eps')
def plot_compare_cdf(data1, data2):
"""draw cdf to compare without/with freeze elapsed time"""
num_bins = 5000
counts1, bin_edges1 = np.histogram(data1, bins=num_bins)
cdf1 = np.cumsum(counts1) / float(len(data1))
counts2, bin_edges2 = np.histogram(data2, bins=num_bins)
cdf2 = np.cumsum(counts2) / float(len(data2))
xlim_min = min(min(data1), min(data2))
xlim_max = max(max(data1), max(data2))
plt.figure()
# plt.xlim(0.98 * xlim_min, 1.02 * xlim_max)
# plt.xlim(65000, 70000)
p1 = plt.plot(bin_edges1[1:], cdf1, 'b', label=label1)
p2 = plt.plot(bin_edges2[1:], cdf2, 'r', label=label2)
plt.legend() #(loc='lower right')
plt.xlabel('PING RTT / Milliseconds', fontsize=20)
plt.ylabel('Cumulative Distribution', fontsize=20)
plt.grid(True)
# plt.show()
plt.savefig('cmp_%s_cdf.eps' % topic_name, format='eps')
def plot_variance_cdf(data):
avg = np.mean(data)
variance = []
for x in data:
variance.append(abs(x-avg))
num_bins = 10
counts, bin_edges = np.histogram(variance, bins=num_bins)
cdf = np.cumsum(counts) / float(len(variance))
xlim_min = min(variance)
xlim_max = max(variance)
plt.figure()
plt.xlim(0.99 * xlim_min, 1.02 * xlim_max)
p = plt.plot(bin_edges[1:], cdf, 'b', label='Variance of GTOD')
plt.legend(loc='lower right')
plt.xlabel('Absolute Variance / Microseconds', fontsize=20)
plt.ylabel('Cumulative Distribution', fontsize=20)
plt.grid(True)
#plt.show()
plt.savefig('var_%s_cdf.eps' % topic_name, format='eps')
def main():
"""draw 2 cdf figures"""
data1 = np.loadtxt(bsl_file)
data2 = np.loadtxt(vir_file)
font = {'size':16}
matplotlib.rc('lines', lw=2)
matplotlib.rc('font', **font)
plot_compare_cdf(data1, data2)
plot_err_cdf(data1, data2)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-b', '--bsl_file', action='store')
parser.add_argument('-v', '--vir_file', action='store')
parser.add_argument('-d', '--dilation', action='store', default=1, type=int)
parser.add_argument('--label1', action='store')
parser.add_argument('--label2', action='store')
parser.add_argument('--label_err', action='store', default='Abs Error')
parser.add_argument('--topic_name', action='store')
results = parser.parse_args()
bsl_file = results.bsl_file
vir_file = results.vir_file
dilation = results.dilation
label1 = results.label1
label2 = results.label2
topic_name = results.topic_name
label_err = results.label_err
main()
|
nilq/baby-python
|
python
|
from datetime import timedelta
from django.test import TestCase
from django.core.exceptions import ValidationError
from django.utils import timezone
from url_shortener.links.models import Link
class LinkTest(TestCase):
def create_link(self, expires_at=None, short_url="asdf", full_url="https://google.com"):
return Link.objects.create(short_url=short_url, full_url=full_url, expires_at=expires_at)
def test_creation_of_link(self):
link = self.create_link()
assert isinstance(link, Link)
def test_has_expired(self):
yesterday = timezone.now() - timedelta(days=1)
link = self.create_link(expires_at=yesterday)
assert link.has_expired
tomorrow = timezone.now() + timedelta(days=1)
link_2 = self.create_link(expires_at=tomorrow, short_url="banan")
assert not link_2.has_expired
|
nilq/baby-python
|
python
|
import configparser
from datetime import datetime
import os
import sys
from pyspark.sql import SparkSession
from pyspark.sql.functions import count
from pyspark.sql.types import DateType
def create_spark_session():
spark = SparkSession \
.builder \
.config("spark.jars.packages", "org.apache.hadoop:hadoop-aws:2.7.0") \
.getOrCreate()
return spark
def check_music_data(spark, datalake_bucket):
music_df = spark.read.parquet(os.path.join(datalake_bucket, 'music_table/*.parquet'))
if music_df.count() == 0:
raise AssertionError('Music table is empty.')
if music_df.where(col("track_id").isNull()):
raise AssertionError('Primary key cannot be null.')
def check_lyrics_data(spark, datalake_bucket):
lyrics_df = spark.read.parquet(os.path.join(datalake_bucket, 'lyrics_table/*.parquet'))
if lyrics_df.count() == 0:
raise AssertionError('Lyrics table is empty.')
if lyrics_df.select(F.countDistinct("track_name")) != lyrics_df.select(F.count("track_name")):
raise AssertionError('Primary key should be unique.')
def check_track_data(spark, datalake_bucket):
track_df = spark.read.parquet(os.path.join(datalake_bucket, 'track_table/*.parquet'))
if track_df.count() == 0:
raise AssertionError('Track table is empty.')
if dict(track_df.dtypes)[count_words] != 'int':
raise AssertionError('Data type mis-match.')
def check_song_data(spark, datalake_bucket):
song_df = spark.read.parquet(os.path.join(datalake_bucket, 'song_table/*.parquet'))
if song_df.count() == 0:
raise AssertionError('Song table is empty.')
def check_artists_data(spark, datalake_bucket):
artists_df = spark.read.parquet(os.path.join(datalake_bucket, 'artists_table/*.parquet'))
if artists_df.count() == 0:
raise AssertionError('Artists table is empty.')
def check_features_data(spark, datalake_bucket):
features_df = spark.read.parquet(os.path.join(datalake_bucket, 'features_table/*.parquet'))
if features_df.count() == 0:
raise AssertionError('Aeatures table is empty.')
def main():
if len(sys.argv) == 2:
datalake_bucket = sys.argv[1]
else:
config = configparser.ConfigParser()
config.read('../dl.cfg')
os.environ['AWS_ACCESS_KEY_ID'] = config['AWS']['AWS_ACCESS_KEY_ID']
os.environ['AWS_SECRET_ACCESS_KEY'] = config['AWS']['AWS_SECRET_ACCESS_KEY']
datalake_bucket = 's3a://' + config['S3']['BIKESHARE_DATALAKE_BUCKET'] + '/'
spark = create_spark_session()
check_music_data(spark, datalake_bucket)
check_lyrics_data(spark, datalake_bucket)
check_track_data(spark, datalake_bucket)
check_song_data(spark, datalake_bucket)
check_features_data(spark, datalake_bucket)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
from src.loader.interface import ILoader
from src.loader.impl import DataLoader
|
nilq/baby-python
|
python
|
#!/usr/bin/python
import sys, re
import fabric.docs
import fabric, simplejson, inspect, pprint
from lib import fabfile
action_dir = "./"
def generate_meta(fabfile):
for i in dir(fabfile):
action_meta = {}
fabtask = getattr(fabfile,i)
if isinstance(fabtask,fabric.tasks.WrappedCallableTask):
print "%s is a Fabric Callable Task..." % i
fabparams = getArgs(i,fabfile)
print "\n"
try:
action_meta['name'] = fabtask.wrapped.func_name
action_meta['description'] = fabtask.wrapped.func_doc
except TypeError, e:
print e
next
action_meta['entry_point'] = "fabaction.py"
action_meta['runner_type'] = "run-local-script"
action_meta['enabled'] = True
parameters = {}
parameters['kwarg_op'] = {"immutable": True, "type": "string", "default": ""}
parameters['user'] = {"immutable": True}
parameters['dir'] = {"immutable": True}
parameters["task"] = { "type": "string",
"description": "task name to be executed",
"immutable": True,
"default": fabtask.wrapped.func_name }
if fabparams:
parameters.update(fabparams)
action_meta['parameters'] = parameters
fname = action_dir + action_meta['name'] + ".json"
try:
print "Writing %s..." % fname
fh = open(fname, 'w')
fh.write(simplejson.dumps(action_meta,indent=2,sort_keys=True))
except:
print "Could not write file %s" % fname
next
print "\n"
def getArgs(task, fabfile):
args = {}
sourcelines = inspect.getsourcelines(fabfile)[0]
for i, line in enumerate(sourcelines):
line = line.rstrip()
pattern = re.compile('def ' + task + '\(')
if pattern.search(line):
filtered = filter(None,re.split('\((.*)\):.*',line))
if len(filtered) < 2:
return None
argstring = filtered[1]
for arg in argstring.split(','):
if re.search('=',arg):
arg,v = arg.split('=')
if v == "''" or v == '""' or v == 'None':
value={"type":"string"}
else:
value={"type":"string","default":v.strip()}
else:
value={"type":"string"}
args[arg.strip()]=value
return args
generate_meta(fabfile)
|
nilq/baby-python
|
python
|
from spike import PrimeHub
hub = PrimeHub()
while True:
if hub.left_button.was_pressed():
print("Left button was Pressed")
elif hub.right_button.was_pressed():
print("Right button was Pressed")
|
nilq/baby-python
|
python
|
from distutils.core import setup
import glob, os
from osg_configure.version import __version__
def get_data_files():
"""
Generates a list of data files for packaging and locations where
they should be placed
"""
# create a list of test files
fileList = []
for root, subFolders, files in os.walk('tests'):
for name in files:
fileList.append(os.path.join(root, name))
temp = filter(lambda x: '.svn' not in x, fileList)
temp = filter(lambda x: not os.path.isdir(x), temp)
temp = map(lambda x: (x.replace('tests', '/usr/share/osg-configure/tests', 1), x),
temp)
file_mappings = {}
for (dest, source) in temp:
dest_dir = os.path.dirname(dest)
if dest_dir in file_mappings:
file_mappings[dest_dir].append(source)
else:
file_mappings[dest_dir] = [source]
data_file_list = []
for key in file_mappings:
data_file_list.append((key, file_mappings[key]))
# generate config file entries
data_file_list.append(('/etc/osg/config.d', glob.glob('config/*.ini')))
# add grid3-locations file
data_file_list.append(('/etc/osg/', ['data_files/grid3-locations.txt']))
return data_file_list
setup(name='osg-configure',
version=__version__,
description='Package for osg-configure and associated scripts',
author='Suchandra Thapa',
maintainer='Matyas Selmeci',
maintainer_email='matyas@cs.wisc.edu',
url='http://www.opensciencegrid.org',
packages=['osg_configure', 'osg_configure.modules', 'osg_configure.configure_modules'],
scripts=['scripts/osg-configure'],
data_files=get_data_files(),
classifiers=[
"Development Status :: 6 - Mature",
"Environment :: Console",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: Apache Software License",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
],
platforms=["Linux"],
license="Apache Software License 2.0"
)
|
nilq/baby-python
|
python
|
import socket
from tkinter import*
#Python socket client by Jeferson Oliveira
#ESSE É O CLIENTE ESSE CÓDIGO DEVER SER ADAPTADO NO PROJETO
HOST = 'ip' #DEFINE O IP DO SERVIDOR
PORT = 11000
tela = Tk()
def LerComando(comando):
if comando == "b1":
botao['text'] = "1"
def de(): #ESSA FUNÇÃO INFORMA QUE ESTÁ ONLINE AO SERVIDOR VAI RODAR EM LOOP
EnviarMenssagem("on")
tela.after(100, de)
def EnviarMenssagem(msg):
try: #PARA TRATAMENTO DE ERROS
CLIENTE = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
CLIENTE.connect((HOST,PORT))
CLIENTE.sendall(str.encode(msg))
data = CLIENTE.recv(1024)
print("Resposta do servidor:", data.decode())
CLIENTE.close()
LerComando(data.decode())
data = ""
except:
CLIENTE.close()
CLIENTE.close()
#EXEMPLO COM UM BOTÃO=====================
def btn1clique(): #MÉTODOPARA O CLIQUE DO BOTÃO
botao['text'] = "1" #O TEXTO DO BOTÃO MUDARÁ PARA 1
EnviarMenssagem("b1")# ENVIA ESSA INFORMAÇÃO AO SERVIDOR SOCKET
tela.title('Exemplo') #TITULO DA TELA DO FORMULARIO
tela.geometry('720x500') #TAMANHO DA TELA
botao = Button(tela, text=" ", command = lambda:btn1clique()) #CRIA UM BOTÃO QUE COM O EVENTO DE CLIQUE QUE MÉTODO btnclique()
botao.grid()#DESENHA O BOTÃO NA TELA
botao['width'] = 30 #DEFINE O TAMANHO HORIZONTAL DO BOTÃO
botao['height'] = 20 #DEFEINE A ALTURA DO BOTÃO
tela.after(100, de)
tela.mainloop()# COLOCA O FORMULARIO EM LOOP PRINCIPAL
|
nilq/baby-python
|
python
|
import asyncio
import copy
import logging
import time
from collections import defaultdict
from decimal import Decimal
from typing import Any, Dict, List, Mapping, Optional
from bidict import bidict, ValueDuplicationError
import hummingbot.connector.derivative.binance_perpetual.binance_perpetual_utils as utils
import hummingbot.connector.derivative.binance_perpetual.binance_perpetual_web_utils as web_utils
import hummingbot.connector.derivative.binance_perpetual.constants as CONSTANTS
from hummingbot.connector.derivative.binance_perpetual.binance_perpetual_order_book import BinancePerpetualOrderBook
from hummingbot.connector.time_synchronizer import TimeSynchronizer
from hummingbot.connector.utils import combine_to_hb_trading_pair
from hummingbot.core.api_throttler.async_throttler import AsyncThrottler
from hummingbot.core.data_type.funding_info import FundingInfo
from hummingbot.core.data_type.order_book import OrderBook
from hummingbot.core.data_type.order_book_message import OrderBookMessage
from hummingbot.core.data_type.order_book_tracker_data_source import OrderBookTrackerDataSource
from hummingbot.core.utils.async_utils import safe_gather
from hummingbot.core.web_assistant.connections.data_types import (
RESTMethod,
WSRequest,
WSResponse,
)
from hummingbot.core.web_assistant.web_assistants_factory import WebAssistantsFactory
from hummingbot.core.web_assistant.ws_assistant import WSAssistant
from hummingbot.logger import HummingbotLogger
class BinancePerpetualAPIOrderBookDataSource(OrderBookTrackerDataSource):
_bpobds_logger: Optional[HummingbotLogger] = None
_trading_pair_symbol_map: Dict[str, Mapping[str, str]] = {}
_mapping_initialization_lock = asyncio.Lock()
def __init__(
self,
trading_pairs: List[str] = None,
domain: str = CONSTANTS.DOMAIN,
throttler: Optional[AsyncThrottler] = None,
api_factory: Optional[WebAssistantsFactory] = None,
time_synchronizer: Optional[TimeSynchronizer] = None,
):
super().__init__(trading_pairs)
self._time_synchronizer = time_synchronizer
self._domain = domain
self._throttler = throttler
self._api_factory: WebAssistantsFactory = api_factory or web_utils.build_api_factory(
throttler=self._throttler,
time_synchronizer=self._time_synchronizer,
domain=self._domain,
)
self._order_book_create_function = lambda: OrderBook()
self._funding_info: Dict[str, FundingInfo] = {}
self._message_queue: Dict[int, asyncio.Queue] = defaultdict(asyncio.Queue)
@property
def funding_info(self) -> Dict[str, FundingInfo]:
return copy.deepcopy(self._funding_info)
def is_funding_info_initialized(self) -> bool:
return all(trading_pair in self._funding_info for trading_pair in self._trading_pairs)
@classmethod
def logger(cls) -> HummingbotLogger:
if cls._bpobds_logger is None:
cls._bpobds_logger = logging.getLogger(__name__)
return cls._bpobds_logger
@classmethod
async def get_last_traded_prices(cls,
trading_pairs: List[str],
domain: str = CONSTANTS.DOMAIN) -> Dict[str, float]:
tasks = [cls.get_last_traded_price(t_pair, domain) for t_pair in trading_pairs]
results = await safe_gather(*tasks)
return {t_pair: result for t_pair, result in zip(trading_pairs, results)}
@classmethod
async def get_last_traded_price(cls,
trading_pair: str,
domain: str = CONSTANTS.DOMAIN,
api_factory: Optional[WebAssistantsFactory] = None,
throttler: Optional[AsyncThrottler] = None,
time_synchronizer: Optional[TimeSynchronizer] = None) -> float:
params = {"symbol": await cls.convert_to_exchange_trading_pair(
hb_trading_pair=trading_pair,
domain=domain,
throttler=throttler,
api_factory=api_factory,
time_synchronizer=time_synchronizer)}
response = await web_utils.api_request(
path=CONSTANTS.TICKER_PRICE_CHANGE_URL,
api_factory=api_factory,
throttler=throttler,
time_synchronizer=time_synchronizer,
domain=domain,
params=params,
method=RESTMethod.GET)
return float(response["lastPrice"])
@classmethod
def trading_pair_symbol_map_ready(cls, domain: str = CONSTANTS.DOMAIN):
"""
Checks if the mapping from exchange symbols to client trading pairs has been initialized
:param domain: the domain of the exchange being used
:return: True if the mapping has been initialized, False otherwise
"""
return domain in cls._trading_pair_symbol_map and len(cls._trading_pair_symbol_map[domain]) > 0
@classmethod
async def trading_pair_symbol_map(
cls,
domain: Optional[str] = CONSTANTS.DOMAIN,
throttler: Optional[AsyncThrottler] = None,
api_factory: WebAssistantsFactory = None,
time_synchronizer: Optional[TimeSynchronizer] = None
) -> Mapping[str, str]:
if not cls.trading_pair_symbol_map_ready(domain=domain):
async with cls._mapping_initialization_lock:
# Check condition again (could have been initialized while waiting for the lock to be released)
if not cls.trading_pair_symbol_map_ready(domain=domain):
await cls.init_trading_pair_symbols(domain, throttler, api_factory, time_synchronizer)
return cls._trading_pair_symbol_map[domain]
@classmethod
async def init_trading_pair_symbols(
cls,
domain: str = CONSTANTS.DOMAIN,
throttler: Optional[AsyncThrottler] = None,
api_factory: WebAssistantsFactory = None,
time_synchronizer: Optional[TimeSynchronizer] = None
):
"""Initialize _trading_pair_symbol_map class variable"""
mapping = bidict()
try:
data = await web_utils.api_request(
path=CONSTANTS.EXCHANGE_INFO_URL,
api_factory=api_factory,
throttler=throttler,
time_synchronizer=time_synchronizer,
domain=domain,
method=RESTMethod.GET,
timeout=10)
for symbol_data in filter(utils.is_exchange_information_valid, data["symbols"]):
try:
mapping[symbol_data["pair"]] = combine_to_hb_trading_pair(
symbol_data["baseAsset"],
symbol_data["quoteAsset"])
except ValueDuplicationError:
continue
except Exception as ex:
cls.logger().exception(f"There was an error requesting exchange info ({str(ex)})")
cls._trading_pair_symbol_map[domain] = mapping
@staticmethod
async def fetch_trading_pairs(
domain: str = CONSTANTS.DOMAIN,
throttler: Optional[AsyncThrottler] = None,
api_factory: Optional[WebAssistantsFactory] = None,
time_synchronizer: Optional[TimeSynchronizer] = None,
) -> List[str]:
trading_pair_list: List[str] = []
symbols_map = await BinancePerpetualAPIOrderBookDataSource.trading_pair_symbol_map(
domain=domain,
throttler=throttler,
api_factory=api_factory,
time_synchronizer=time_synchronizer)
trading_pair_list.extend(list(symbols_map.values()))
return trading_pair_list
@classmethod
async def convert_from_exchange_trading_pair(
cls,
exchange_trading_pair: str,
domain: str = CONSTANTS.DOMAIN,
throttler: Optional[AsyncThrottler] = None,
api_factory: Optional[WebAssistantsFactory] = None,
time_synchronizer: Optional[TimeSynchronizer] = None) -> str:
symbol_map = await cls.trading_pair_symbol_map(
domain=domain,
throttler=throttler,
api_factory=api_factory,
time_synchronizer=time_synchronizer)
try:
pair = symbol_map[exchange_trading_pair]
except KeyError:
raise ValueError(f"There is no symbol mapping for exchange trading pair {exchange_trading_pair}")
return pair
@classmethod
async def convert_to_exchange_trading_pair(
cls,
hb_trading_pair: str,
domain=CONSTANTS.DOMAIN,
throttler: Optional[AsyncThrottler] = None,
api_factory: Optional[WebAssistantsFactory] = None,
time_synchronizer: Optional[TimeSynchronizer] = None) -> str:
symbol_map = await cls.trading_pair_symbol_map(
domain=domain,
throttler=throttler,
api_factory=api_factory,
time_synchronizer=time_synchronizer)
try:
symbol = symbol_map.inverse[hb_trading_pair]
except KeyError:
raise ValueError(f"There is no symbol mapping for trading pair {hb_trading_pair}")
return symbol
@staticmethod
async def get_snapshot(
trading_pair: str,
limit: int = 1000,
domain: str = CONSTANTS.DOMAIN,
throttler: Optional[AsyncThrottler] = None,
api_factory: Optional[WebAssistantsFactory] = None,
time_synchronizer: Optional[TimeSynchronizer] = None
) -> Dict[str, Any]:
params = {"symbol": await BinancePerpetualAPIOrderBookDataSource.convert_to_exchange_trading_pair(
hb_trading_pair=trading_pair,
domain=domain,
throttler=throttler,
api_factory=api_factory,
time_synchronizer=time_synchronizer)}
if limit != 0:
params.update({"limit": str(limit)})
data = await web_utils.api_request(
path=CONSTANTS.SNAPSHOT_REST_URL,
api_factory=api_factory,
throttler=throttler,
time_synchronizer=time_synchronizer,
domain=domain,
params=params,
method=RESTMethod.GET)
return data
async def get_new_order_book(self, trading_pair: str) -> OrderBook:
snapshot: Dict[str, Any] = await self.get_snapshot(trading_pair, 1000, self._domain, self._throttler,
self._api_factory)
snapshot_timestamp: float = time.time()
snapshot_msg: OrderBookMessage = BinancePerpetualOrderBook.snapshot_message_from_exchange(
snapshot, snapshot_timestamp, metadata={"trading_pair": trading_pair}
)
order_book = self.order_book_create_function()
order_book.apply_snapshot(snapshot_msg.bids, snapshot_msg.asks, snapshot_msg.update_id)
return order_book
async def _get_funding_info_from_exchange(self, trading_pair: str) -> FundingInfo:
"""
Fetches the funding information of the given trading pair from the exchange REST API. Parses and returns the
respsonse as a FundingInfo data object.
:param trading_pair: Trading pair of which its Funding Info is to be fetched
:type trading_pair: str
:return: Funding Information of the given trading pair
:rtype: FundingInfo
"""
params = {"symbol": await self.convert_to_exchange_trading_pair(
hb_trading_pair=trading_pair,
domain=self._domain,
throttler=self._throttler,
api_factory=self._api_factory,
time_synchronizer=self._time_synchronizer)}
try:
data = await web_utils.api_request(
path=CONSTANTS.MARK_PRICE_URL,
api_factory=self._api_factory,
throttler=self._throttler,
time_synchronizer=self._time_synchronizer,
domain=self._domain,
params=params,
method=RESTMethod.GET)
except asyncio.CancelledError:
raise
except Exception as exception:
self.logger().exception(f"There was a problem getting funding info from exchange. Error: {exception}")
return None
funding_info = FundingInfo(
trading_pair=trading_pair,
index_price=Decimal(data["indexPrice"]),
mark_price=Decimal(data["markPrice"]),
next_funding_utc_timestamp=int(data["nextFundingTime"]),
rate=Decimal(data["lastFundingRate"]),
)
return funding_info
async def get_funding_info(self, trading_pair: str) -> FundingInfo:
"""
Returns the FundingInfo of the specified trading pair. If it does not exist, it will query the REST API.
"""
if trading_pair not in self._funding_info:
self._funding_info[trading_pair] = await self._get_funding_info_from_exchange(trading_pair)
return self._funding_info[trading_pair]
async def _subscribe_to_order_book_streams(self) -> WSAssistant:
url = f"{web_utils.wss_url(CONSTANTS.PUBLIC_WS_ENDPOINT, self._domain)}"
ws: WSAssistant = await self._api_factory.get_ws_assistant()
await ws.connect(ws_url=url, ping_timeout=CONSTANTS.HEARTBEAT_TIME_INTERVAL)
stream_id_channel_pairs = [
(CONSTANTS.DIFF_STREAM_ID, "@depth"),
(CONSTANTS.TRADE_STREAM_ID, "@aggTrade"),
(CONSTANTS.FUNDING_INFO_STREAM_ID, "@markPrice"),
]
for stream_id, channel in stream_id_channel_pairs:
params = []
for trading_pair in self._trading_pairs:
symbol = await self.convert_to_exchange_trading_pair(
hb_trading_pair=trading_pair,
domain=self._domain,
throttler=self._throttler,
api_factory=self._api_factory,
time_synchronizer=self._time_synchronizer)
params.append(f"{symbol.lower()}{channel}")
payload = {
"method": "SUBSCRIBE",
"params": params,
"id": stream_id,
}
subscribe_request: WSRequest = WSRequest(payload)
await ws.send(subscribe_request)
return ws
async def listen_for_subscriptions(self):
ws = None
while True:
try:
ws = await self._subscribe_to_order_book_streams()
async for msg in ws.iter_messages():
if "result" in msg.data:
continue
if "@depth" in msg.data["stream"]:
self._message_queue[CONSTANTS.DIFF_STREAM_ID].put_nowait(msg)
elif "@aggTrade" in msg.data["stream"]:
self._message_queue[CONSTANTS.TRADE_STREAM_ID].put_nowait(msg)
elif "@markPrice" in msg.data["stream"]:
self._message_queue[CONSTANTS.FUNDING_INFO_STREAM_ID].put_nowait(msg)
except asyncio.CancelledError:
raise
except Exception:
self.logger().error(
"Unexpected error with Websocket connection. Retrying after 30 seconds...", exc_info=True
)
await self._sleep(30.0)
finally:
ws and await ws.disconnect()
async def listen_for_order_book_diffs(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):
while True:
msg = await self._message_queue[CONSTANTS.DIFF_STREAM_ID].get()
timestamp: float = time.time()
msg.data["data"]["s"] = await self.convert_from_exchange_trading_pair(
exchange_trading_pair=msg.data["data"]["s"],
domain=self._domain,
throttler=self._throttler,
api_factory=self._api_factory,
time_synchronizer=self._time_synchronizer)
order_book_message: OrderBookMessage = BinancePerpetualOrderBook.diff_message_from_exchange(
msg.data, timestamp
)
output.put_nowait(order_book_message)
async def listen_for_trades(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):
while True:
msg = await self._message_queue[CONSTANTS.TRADE_STREAM_ID].get()
msg.data["data"]["s"] = await self.convert_from_exchange_trading_pair(
exchange_trading_pair=msg.data["data"]["s"],
domain=self._domain,
throttler=self._throttler,
api_factory=self._api_factory,
time_synchronizer=self._time_synchronizer)
trade_message: OrderBookMessage = BinancePerpetualOrderBook.trade_message_from_exchange(msg.data)
output.put_nowait(trade_message)
async def listen_for_order_book_snapshots(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):
while True:
try:
for trading_pair in self._trading_pairs:
snapshot: Dict[str, Any] = await self.get_snapshot(
trading_pair, domain=self._domain, throttler=self._throttler, api_factory=self._api_factory
)
snapshot_timestamp: float = time.time()
snapshot_msg: OrderBookMessage = BinancePerpetualOrderBook.snapshot_message_from_exchange(
snapshot, snapshot_timestamp, metadata={"trading_pair": trading_pair}
)
output.put_nowait(snapshot_msg)
self.logger().debug(f"Saved order book snapshot for {trading_pair}")
delta = CONSTANTS.ONE_HOUR - time.time() % CONSTANTS.ONE_HOUR
await self._sleep(delta)
except asyncio.CancelledError:
raise
except Exception:
self.logger().error(
"Unexpected error occurred fetching orderbook snapshots. Retrying in 5 seconds...", exc_info=True
)
await self._sleep(5.0)
async def listen_for_funding_info(self):
"""
Listen for funding information events received through the websocket channel to update the respective
FundingInfo for all active trading pairs.
"""
while True:
try:
funding_info_message: WSResponse = await self._message_queue[CONSTANTS.FUNDING_INFO_STREAM_ID].get()
data: Dict[str, Any] = funding_info_message.data["data"]
trading_pair: str = await self.convert_from_exchange_trading_pair(
exchange_trading_pair=data["s"],
domain=self._domain,
throttler=self._throttler,
api_factory=self._api_factory,
time_synchronizer=self._time_synchronizer)
if trading_pair not in self._trading_pairs:
continue
self._funding_info.update(
{
trading_pair: FundingInfo(
trading_pair=trading_pair,
index_price=Decimal(data["i"]),
mark_price=Decimal(data["p"]),
next_funding_utc_timestamp=int(data["T"]),
rate=Decimal(data["r"]),
)
}
)
except asyncio.CancelledError:
raise
except Exception as e:
self.logger().error(
f"Unexpected error occured updating funding information. Retrying in 5 seconds... Error: {str(e)}",
exc_info=True,
)
await self._sleep(5.0)
|
nilq/baby-python
|
python
|
from sys import *
import csv
def locase(s): return s[:1].lower() + s[1:]
reader = csv.DictReader(stdin, delimiter=',')
for item in reader:
itemstr = item.get('item')
itemid = itemstr[itemstr.rfind('/')+1:]
lang = item.get('itemLabel_lang')
str1 = locase(item.get('str1'))
str2 = locase(item.get('str2'))
if (str1 == str2):
print("{}|L{}|\"{}\"".format(itemid, lang, str1),
file=stdout)
|
nilq/baby-python
|
python
|
expected_results = {
"K64F": {
"desc": "error when bootloader not found",
"exception_msg": "not found"
}
}
|
nilq/baby-python
|
python
|
import random
import json
import torch
from model import NeuralNetwork
from nltk_utils import tokenize,extract_stop_words,stem,bag_of_words
import os
import time
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
with open('intents.json',encoding='UTF-8') as f:
intents = json.load(f)
DATA_FILE = 'data.pth'
data = torch.load(DATA_FILE)
input_size = data["input_size"]
hidden_size = data["hidden_size"]
output_size = data["output_size"]
all_words = data["all_words"]
tags = data["tags"]
model_state = data["model_state"]
model = NeuralNetwork(input_size,hidden_size,output_size).to(device)
model.load_state_dict(model_state)
model.eval()
bot_name = "NavlunBot"
os.system('cls')
print("Merhaba. Ben NavlunBot. Sana nasıl yardımcı olabilirim ?")
while True:
sentence = input('Me : ')
if sentence == 'quit':
break
sentence = tokenize(sentence)
sentence = extract_stop_words(sentence)
x = bag_of_words(sentence,all_words)
x = x.reshape(1,x.shape[0])
x = torch.from_numpy(x)
out = model(x)
_,pred = torch.max(out,dim=1)
tag = tags[pred.item()]
probs = torch.softmax(out, dim=1)
actual_prob = probs[0][pred.item()]
if actual_prob.item() > 0.75:
for intent in intents['intents']:
if tag == intent['tag']:
print(f"{bot_name} : {random.choice(intent['responses'])}")
if(tag == "shipment-payment"):
print(f"{bot_name} : 5 sn sonra ilgili sayfaya yönlendirileceksiniz.")
time.sleep(5)
os.system("start \"\" https://navlungo.com/ship/searchs")
else:
print(f"{bot_name} : Buna cevap veremiyorum :(")
|
nilq/baby-python
|
python
|
import time, pytest, inspect
from utils import *
from PIL import Image
def test_mixer_from_config(run_brave, create_config_file):
subtest_start_brave_with_mixers(run_brave, create_config_file)
subtest_assert_two_mixers(mixer_0_props={'width': 160, 'height': 90, 'pattern': 6})
subtest_change_mixer_pattern()
subtest_assert_two_mixers(mixer_0_props={'width': 160, 'height': 90, 'pattern': 7})
subtest_change_width_and_height()
subtest_assert_two_mixers(mixer_0_props={'width': 200, 'height': 300, 'pattern': 7})
def subtest_start_brave_with_mixers(run_brave, create_config_file):
MIXER0 = {
'width': 160,
'height': 90,
'pattern': 6
}
MIXER1 = {
'width': 640,
'height': 360
}
config = {'default_mixers': [{'props': MIXER0}, {'props': MIXER1}]}
config_file = create_config_file(config)
run_brave(config_file.name)
check_brave_is_running()
def subtest_assert_two_mixers(mixer_0_props):
assert_mixers([{
'id': 0,
'props': mixer_0_props,
}, {
'id': 1,
'props': {'width': 640, 'height': 360, 'pattern': 0},
}])
def subtest_change_mixer_pattern():
update_mixer(0, {'props': {'pattern': 7}})
def subtest_change_width_and_height():
update_mixer(0, {'props': {'width': 200, 'height': 300}})
|
nilq/baby-python
|
python
|
print 'Welcome to the Pig Latin Translator!'
# Start coding here!
original = raw_input('TELL ME a word in ENGRIXH:').lower()
|
nilq/baby-python
|
python
|
from behave import given, when, then
import time
import os
@when("you navigate to CSW homepage")
def step(context):
url = os.environ["CSW_URL"]
context.browser.get(url)
@when('you navigate to CSW page "{path}"')
def step(context, path):
url = os.environ["CSW_URL"] + path
print(url)
context.browser.get(url)
@when("you login to CSW")
def step(context):
creds = {
"email": os.environ["CSW_USER"] + "@digital.cabinet-office.gov.uk",
"client": os.environ["CSW_CLIENT"],
"secret": os.environ["CSW_SECRET"],
}
url = os.environ["CSW_URL"]
url = (
url
+ "temp-login?client="
+ creds["client"]
+ "&secret="
+ creds["secret"]
+ "&email="
+ creds["email"]
)
response = context.browser.get(url)
# context.api_session.headers.update({'x-test': 'true'})
print(response)
@then('the content of element with selector "{selector}" equals "{title}"')
def step(context, selector, title):
elem = context.browser.find_element_by_css_selector(selector).text
print(elem)
assert elem == title
@given("the credentials")
def step(context):
context.browser.header_overrides = {
"Client": os.environ["CSW_CLIENT"],
"Secret": os.environ["CSW_SECRET"],
}
print(str(context.browser.header_overrides))
@when('login post to "{url}"')
def step(context, url):
creds = {
"email": os.environ["CSW_USER"] + "@digital.cabinet-office.gov.uk",
"client": os.environ["CSW_CLIENT"],
"secret": os.environ["CSW_SECRET"],
}
url = (
url
+ "?client="
+ creds["client"]
+ "&secret="
+ creds["secret"]
+ "&email="
+ creds["email"]
)
response = context.browser.get(url)
print(response)
@then('wait "{seconds}" seconds')
def step(context, seconds):
time.sleep(int(seconds))
@then("we have a session cookie")
def step(context):
cookie = context.browser.get_cookie("session")
assert cookie is not None
|
nilq/baby-python
|
python
|
from django.contrib import admin
from models import Participant, Exchange
class ParticipantAdmin(admin.ModelAdmin):
pass
class ExchangeAdmin(admin.ModelAdmin):
pass
admin.site.register(Participant)
admin.site.register(Exchange)
|
nilq/baby-python
|
python
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class CrowdCounter(nn.Module):
def __init__(self, model_name):
super(CrowdCounter, self).__init__()
if model_name == 'AlexNet'.lower():
from .counters.AlexNet import AlexNet as net
elif model_name == 'VGG'.lower():
from .counters.VGG import VGG as net
elif model_name == 'VGG_DECODER'.lower():
from .counters.VGG_decoder import VGG_decoder as net
elif model_name == 'MCNN'.lower():
from .counters.MCNN import MCNN as net
elif model_name == 'CSRNet'.lower():
from .counters.CSRNet import CSRNet as net
elif model_name == 'SCAR'.lower():
from .counters.SCAR import SCAR as net
elif model_name == 'ResNet50'.lower():
from .counters.Res50 import Res50 as net
elif model_name == 'ResNet101'.lower():
from .counters.Res101 import Res101 as net
elif model_name == 'SFCN+'.lower():
from .counters.Res101_SFCN import Res101_SFCN as net
elif model_name == "SANet".lower():
from .counters.SANet import SANet as net
self.CCN = net()
def test_forward(self, img):
density_map = self.CCN(img)
return density_map
|
nilq/baby-python
|
python
|
"""
The file defines the training process.
@Author: Yang Lu
@Github: https://github.com/luyanger1799
@Project: https://github.com/luyanger1799/amazing-semantic-segmentation
"""
from utils.data_generator import ImageDataGenerator
from utils.helpers import get_dataset_info, check_related_path
from utils.callbacks import LearningRateScheduler
from utils.optimizers import *
from utils.losses import *
from utils.learning_rate import *
from utils.metrics import MeanIoU
from utils import utils
from builders import builder
import tensorflow as tf
import argparse
import os
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser()
parser.add_argument('--model', help='Choose the semantic segmentation methods.', type=str, required=True)
parser.add_argument('--base_model', help='Choose the backbone model.', type=str, default=None)
parser.add_argument('--dataset', help='The path of the dataset.', type=str, default='SD')
parser.add_argument('--loss', help='The loss function for traing.', type=str, default=None,
choices=['ce', 'dice_ce', 'focal_loss', 'miou_loss', 'self_balanced_focal_loss', 'ssim_loss','mix_loss'])
parser.add_argument('--num_classes', help='The number of classes to be segmented.', type=int, default=32)
parser.add_argument('--random_crop', help='Whether to randomly crop the image.', type=str2bool, default=False)
parser.add_argument('--crop_height', help='The height to crop the image.', type=int, default=256)
parser.add_argument('--crop_width', help='The width to crop the image.', type=int, default=256)
parser.add_argument('--batch_size', help='The training batch size.', type=int, default=5)
parser.add_argument('--valid_batch_size', help='The validation batch size.', type=int, default=1)
parser.add_argument('--num_epochs', help='The number of epochs to train for.', type=int, default=100)
parser.add_argument('--initial_epoch', help='The initial epoch of training.', type=int, default=0)
parser.add_argument('--h_flip', help='Whether to randomly flip the image horizontally.', type=str2bool, default=False)
parser.add_argument('--v_flip', help='Whether to randomly flip the image vertically.', type=str2bool, default=False)
parser.add_argument('--brightness', help='Randomly change the brightness (list).', type=float, default=None, nargs='+')
parser.add_argument('--rotation', help='The angle to randomly rotate the image.', type=float, default=0.)
parser.add_argument('--zoom_range', help='The times for zooming the image.', type=float, default=0., nargs='+')
parser.add_argument('--channel_shift', help='The channel shift range.', type=float, default=0.)
parser.add_argument('--data_aug_rate', help='The rate of data augmentation.', type=float, default=0.)
parser.add_argument('--checkpoint_freq', help='How often to save a checkpoint.', type=int, default=1)
parser.add_argument('--validation_freq', help='How often to perform validation.', type=int, default=1)
parser.add_argument('--num_valid_images', help='The number of images used for validation.', type=int, default=20)
parser.add_argument('--data_shuffle', help='Whether to shuffle the data.', type=str2bool, default=True)
parser.add_argument('--random_seed', help='The random shuffle seed.', type=int, default=None)
parser.add_argument('--weights', help='The path of weights to be loaded.', type=str, default=None)
parser.add_argument('--steps_per_epoch', help='The training steps of each epoch', type=int, default=None)
parser.add_argument('--lr_scheduler', help='The strategy to schedule learning rate.', type=str, default='cosine_decay',
choices=['step_decay', 'poly_decay', 'cosine_decay'])
parser.add_argument('--lr_warmup', help='Whether to use lr warm up.', type=bool, default=False)
parser.add_argument('--learning_rate', help='The initial learning rate.', type=float, default=3e-4)
parser.add_argument('--optimizer', help='The optimizer for training.', type=str, default='adam',
choices=['sgd', 'adam', 'nadam', 'adamw', 'nadamw', 'sgdw'])
args = parser.parse_args()
# check related paths
paths = check_related_path(os.getcwd())
# get image and label file names for training and validation
train_image_names, train_label_names, valid_image_names, valid_label_names, _, _ = get_dataset_info(args.dataset)
# build the model
net, base_model = builder(args.num_classes, (args.crop_height, args.crop_width), args.model, args.base_model)
# summary
net.summary()
# load weights
if args.weights is not None:
print('Loading the weights...')
net.load_weights(args.weights)
# chose loss
losses = {'ce': categorical_crossentropy_with_logits,
'dice_ce': dice_and_categorical_crossentropy_with_logits,
'focal_loss': focal_loss(),
'miou_loss': miou_loss,
'self_balanced_focal_loss': self_balanced_focal_loss(),
'ssim_loss': ssim_loss,
'mix_loss': mix_loss}
loss = losses[args.loss] if args.loss is not None else categorical_crossentropy_with_logits
# chose optimizer
total_iterations = len(train_image_names) * args.num_epochs // args.batch_size
wd_dict = utils.get_weight_decays(net)
ordered_values = []
weight_decays = utils.fill_dict_in_order(wd_dict, ordered_values)
optimizers = {'adam': tf.keras.optimizers.Adam(learning_rate=args.learning_rate),
'nadam': tf.keras.optimizers.Nadam(learning_rate=args.learning_rate),
'sgd': tf.keras.optimizers.SGD(learning_rate=args.learning_rate, momentum=0.99),
'adamw': AdamW(learning_rate=args.learning_rate, batch_size=args.batch_size,
total_iterations=total_iterations),
'nadamw': NadamW(learning_rate=args.learning_rate, batch_size=args.batch_size,
total_iterations=total_iterations),
'sgdw': SGDW(learning_rate=args.learning_rate, momentum=0.99, batch_size=args.batch_size,
total_iterations=total_iterations)}
# lr schedule strategy
if args.lr_warmup and args.num_epochs - 5 <= 0:
raise ValueError('num_epochs must be larger than 5 if lr warm up is used.')
lr_decays = {'step_decay': step_decay(args.learning_rate, args.num_epochs - 5 if args.lr_warmup else args.num_epochs,
warmup=args.lr_warmup),
'poly_decay': poly_decay(args.learning_rate, args.num_epochs - 5 if args.lr_warmup else args.num_epochs,
warmup=args.lr_warmup),
'cosine_decay': cosine_decay(args.num_epochs - 5 if args.lr_warmup else args.num_epochs,
args.learning_rate, warmup=args.lr_warmup)}
lr_decay = lr_decays[args.lr_scheduler]
# training and validation steps
steps_per_epoch = len(train_image_names) // args.batch_size if not args.steps_per_epoch else args.steps_per_epoch
validation_steps = args.num_valid_images // args.valid_batch_size
# compile the model
if args.model == 'CFNET':
loss = {'re_lu_16':mix_loss,'re_lu_27':mix_loss}
net.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=args.learning_rate),
loss=loss,
metrics=[MeanIoU(args.num_classes)])
# data generator
# data augmentation setting
train_gen = ImageDataGenerator(random_crop=args.random_crop,
rotation_range=args.rotation,
brightness_range=args.brightness,
zoom_range=args.zoom_range,
channel_shift_range=args.channel_shift,
horizontal_flip=args.v_flip,
vertical_flip=args.v_flip)
valid_gen = ImageDataGenerator()
train_generator = train_gen.flow(images_list=train_image_names,
labels_list=train_label_names,
num_classes=args.num_classes,
batch_size=args.batch_size,
target_size=(args.crop_height, args.crop_width),
shuffle=args.data_shuffle,
seed=args.random_seed,
data_aug_rate=args.data_aug_rate)
valid_generator = valid_gen.flow(images_list=valid_image_names,
labels_list=valid_label_names,
num_classes=args.num_classes,
batch_size=args.valid_batch_size,
target_size=(args.crop_height, args.crop_width))
# callbacks setting
# checkpoint setting
model_checkpoint = tf.keras.callbacks.ModelCheckpoint(
filepath=os.path.join(paths['checkpoints_path'],
'{model}_based_on_{base}_'.format(model=args.model, base=base_model) +
'miou_{val_re_lu_27_mean_io_u:04f}_' + 'ep_{epoch:02d}.h5'),
save_best_only=True, period=args.checkpoint_freq, monitor='val_re_lu_27_mean_io_u', mode='max')
# tensorboard setting
tensorboard = tf.keras.callbacks.TensorBoard(log_dir=paths['logs_path'])
# learning rate scheduler setting
learning_rate_scheduler = LearningRateScheduler(lr_decay, args.learning_rate, args.lr_warmup, steps_per_epoch,
verbose=1)
callbacks = [model_checkpoint, tensorboard, learning_rate_scheduler]
# begin training
print("\n***** Begin training *****")
print("Dataset -->", args.dataset)
print("Num Images -->", len(train_image_names))
print("Model -->", args.model)
print("Base Model -->", base_model)
print("Crop Height -->", args.crop_height)
print("Crop Width -->", args.crop_width)
print("Num Epochs -->", args.num_epochs)
print("Initial Epoch -->", args.initial_epoch)
print("Batch Size -->", args.batch_size)
print("Num Classes -->", args.num_classes)
print("Data Augmentation:")
print("\tData Augmentation Rate -->", args.data_aug_rate)
print("\tVertical Flip -->", args.v_flip)
print("\tHorizontal Flip -->", args.h_flip)
print("\tBrightness Alteration -->", args.brightness)
print("\tRotation -->", args.rotation)
print("\tZoom -->", args.zoom_range)
print("\tChannel Shift -->", args.channel_shift)
print("")
# training...
net.fit_generator(train_generator,
steps_per_epoch=steps_per_epoch,
epochs=args.num_epochs,
callbacks=callbacks,
validation_data=valid_generator,
validation_steps=validation_steps,
validation_freq=args.validation_freq,
max_queue_size=10,
workers=os.cpu_count(),
use_multiprocessing=False,
initial_epoch=args.initial_epoch)
# save weights
net.save(filepath=os.path.join(
paths['weights_path'], '{model}_based_on_{base_model}.h5'.format(model=args.model, base_model=base_model)))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: gxs
@license: (C) Copyright 2016-2019, Light2Cloud (Beijing) Web Service Co., LTD
@contact: dingjianfeng@light2cloud.com
@software: AWS-DJF
@file: delete_s3_upload_data.py
@ide: PyCharm
@time: 2020/4/16 11:18
@desc:
"""
import base64
import csv
import fnmatch
import hashlib
import os
import pathlib
import shutil
import boto3
import logging
from botocore.exceptions import ClientError
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
LOG_DIR = os.path.join(BASE_DIR, 'delete_logs')
LOG_FILE = os.path.join(LOG_DIR, 'upload_to_s3_all.log')
LOG_FILE_ERROR = os.path.join(LOG_DIR, 'upload_to_s3_warning.log')
LOG_Danger = os.path.join(LOG_DIR, 'upload_to_s3_data_danger.log')
if not os.path.isdir(LOG_DIR):
os.makedirs(LOG_DIR)
class DeleteUploadFile:
def __init__(
self, access_key=None, secret_key=None, region=None,
aws_session_token=None, profile=None, topic_arn=None,
bucket=None,
):
self.logger = self._init_logger()
self.accessKey = access_key
self.secretKey = secret_key
self.aws_session_token = aws_session_token
self.profile = profile
self.region = region
self.topic_arn = topic_arn
self.bucket = bucket
@staticmethod
def _init_logger():
_logging = logging.getLogger('l2c.%s' % __name__)
_logging.setLevel(10)
"""写入日志文件, 大等于20的日志被写入"""
fh = logging.FileHandler(LOG_FILE, mode='a', encoding='utf8')
fh.setLevel(20)
formatter_fh = logging.Formatter('%(levelname)-3s\t %(asctime)s [%(module)s, %(process)d:%(thread)d] '
'[message]: %(message)s',
datefmt="%Y-%m-%d %H:%M:%S")
fh.setFormatter(formatter_fh)
"""写入日志文件, 大等于30的日志被写入"""
fh_error = logging.FileHandler(LOG_FILE_ERROR, mode='a', encoding='utf8')
fh_error.setLevel(30)
formatter_fh_error = logging.Formatter('%(levelname)-3s\t %(asctime)s [%(module)s, %(process)d:%(thread)d] '
'[message]: %(message)s',
datefmt="%Y-%m-%d %H:%M:%S")
fh_error.setFormatter(formatter_fh_error)
"""写入日志文件, 大等于50的日志被写入"""
fh_critical = logging.FileHandler(LOG_Danger, mode='a', encoding='utf8')
fh_critical.setLevel(50)
formatter_fh_critical = logging.Formatter('%(levelname)s %(asctime)s [%(module)s, %(process)d:%(thread)d] '
'[message]: %(message)s',
datefmt="%Y-%m-%d %H:%M:%S")
fh_critical.setFormatter(formatter_fh_critical)
"""输出到终端"""
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter_ch = logging.Formatter('%(asctime)s %(name)s: [line:%(lineno)d] '
'%(levelname)s-[message]: %(message)s',
datefmt="%Y-%m-%d %H:%M:%S")
ch.setFormatter(formatter_ch)
"""向 _logging 添加handler """
_logging.addHandler(fh)
_logging.addHandler(fh_error)
_logging.addHandler(fh_critical)
_logging.addHandler(ch)
return _logging
def _aws_init_connection(self, service):
try:
s = boto3.Session(
aws_access_key_id='{}'.format(self.accessKey),
aws_secret_access_key='{}'.format(self.secretKey),
region_name='{}'.format(self.region),
)
c = s.client('{}'.format(service))
return c
except ClientError as e:
e.response['Error'].update({'operation_name': e.operation_name})
self.logger.error('使用AWS当前凭证,在连接时发生错误 {}'.format(e.response['Error']))
return []
except Exception as e:
self.logger.exception('使用AWS当前凭证,在连接时发生异常错误 {}'.format(e))
return []
def _aws_init_connection_token(self, service):
try:
s = boto3.Session(
aws_access_key_id='{}'.format(self.accessKey),
aws_secret_access_key='{}'.format(self.secretKey),
aws_session_token='{}'.format(self.aws_session_token),
region_name='{}'.format(self.region),
)
c = s.client('{}'.format(service))
return c
except ClientError as e:
e.response['Error'].update({'operation_name': e.operation_name})
self.logger.error('使用AWS当前连接令牌,在连接时发生错误 {}'.format(e.response['Error']))
return []
except Exception as e:
self.logger.exception('使用AWS当前连接令牌,在连接时发生异常错误 {}'.format(e))
return []
def _aws_init_profile(self, service):
"""
A method to initialize an AWS service connection with an AWS profile.
:param service:
:return: (object) the AWS connection object.
"""
try:
s = boto3.Session(
profile_name='{}'.format(self.profile)
)
c = s.client('{}'.format(service))
return c
except ClientError as e:
e.response['Error'].update({'operation_name': e.operation_name})
self.logger.error('使用AWS当前配置文件,在连接时发生错误 {}'.format(e.response['Error']))
return []
except Exception as e:
self.logger.exception('使用AWS当前配置文件,在连接时发生异常错误 {}'.format(e))
return []
def find_zip_file(self):
# file_directory_list = ['d/2eeQ7f/', 'd/1442413150028/', 'd/1442754128155/', 'd/1444316556440/',
# 'd/jieINz/', 'd/yayYVv/']
file_directory_list = self.list_bos_csv()
print(file_directory_list)
zip_file_lists = []
for base_path in file_directory_list:
for f_name in os.listdir(base_path):
if fnmatch.fnmatch(f_name, '__*.zip'):
zip_file_lists.append(os.path.join(base_path, f_name))
return self.delete_file(zip_file_lists)
@staticmethod
def list_bos_csv() -> list:
result = []
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
for csv_file in os.listdir(BASE_DIR):
if fnmatch.fnmatch(csv_file, '?_aws_mig_*.csv'):
print(csv_file)
with open(csv_file, mode='r', encoding='utf8', newline='') as csv_file:
reader = csv.reader(csv_file)
for item in reader:
if reader.line_num == 1 and item[0] == "concat('d/',site,'/',owner,'/',store_uid,'/')":
continue
result.append(item[0])
return result
@staticmethod
def _count_md5(file_name):
buf_size = 8192
with open(file_name, 'rb') as fp:
file_md5 = hashlib.md5()
while True:
bytes_to_read = buf_size
buf = fp.read(bytes_to_read)
if not buf:
break
file_md5.update(buf)
etag = file_md5.hexdigest()
content_md5 = str(base64.standard_b64encode(file_md5.digest()), encoding='utf-8')
return [content_md5, etag]
def _read_zip_md5(self, dir_zip_name: str):
md5 = self._count_md5(dir_zip_name)
return md5
def delete_file(self, zip_file_lists: list):
"""
:param zip_file_lists:
:return:
"""
if self.accessKey is not None and self.aws_session_token is not None:
s3c = self._aws_init_connection_token('s3')
elif self.accessKey is not None:
s3c = self._aws_init_connection('s3')
elif self.profile is not None:
s3c = self._aws_init_profile('s3')
else:
s3c = boto3.client('s3', region_name=self.region)
try:
for file in zip_file_lists:
response = s3c.head_object(
Bucket=self.bucket,
Key=file,
)
etag_zip_md5 = self._read_zip_md5(file)[1]
if response['ETag'].replace('"', "") == 1:
self.logger.info(f'校验已经上传的压缩包:{file} 完成,数据完整')
self.delete_uploaded_zip_of_path(file)
else:
new_etag = response['ETag'].replace('"', "")
self.logger.warning(f"校验已经上传的压缩包:{file} 发现上传中数据损坏..... 原始 ETag:{etag_zip_md5} "
f"上传后 ETag:{new_etag} ")
self._choose_corrupt_zip_write_to_csv(str(file))
except ClientError as e:
e.response['Error'].update({'operation_name': e.operation_name})
self.logger.error('读取S3存储桶中的数据时,发生错误 {}'.format(e.response['Error']))
return []
def delete_uploaded_zip_of_path(self, zip_file: str):
file_dir = pathlib.Path(zip_file).parent
try:
shutil.rmtree(file_dir)
p = pathlib.Path(file_dir).parent
if not os.listdir(p):
p.rmdir()
return self.logger.info(f'压缩包:{zip_file} 上传结束,删除对应路径: {file_dir} 下的所有文件 ')
except OSError as e:
self.logger.error(f'压缩包上传结束,删除对应路径: {file_dir} 的所有文件: 发生错误:{e.strerror}')
@staticmethod
def _read_csv_data(csv_file: str):
if not os.path.isfile(csv_file):
with open(csv_file, mode='a', encoding='utf8'):
pass
else:
csv_data_list = []
with open(csv_file, mode='r', encoding='utf8') as f:
csv_read = csv.reader(f)
for line in csv_read:
if line:
csv_data_list.extend(line)
return csv_data_list
def _choose_corrupt_zip_write_to_csv(self, file: str):
file_csv = 'delete_upload_check_failed_data.csv'
csv_file_list = self._read_csv_data(str(file_csv))
with open(file=file_csv, mode='a', encoding='utf8') as f:
if not csv_file_list:
csv_write = csv.writer(f)
csv_write.writerow([file])
self.logger.warning(f'将二次上传校验后出现误差的数据:{file} 写入csv文件中')
else:
if file not in set(csv_file_list):
csv_write = csv.writer(f)
csv_write.writerow([file])
self.logger.warning(f'将二次上传校验后出现误差的数据:{file} 写入csv文件中')
base_path_csv = 'delete_upload_check_failed_data_dir_path.csv'
csv_base_path_list = self._read_csv_data(str(base_path_csv))
file_dir = str(pathlib.Path(file).parent)
with open(file=str(base_path_csv), mode='a', encoding='utf8') as f:
if not csv_base_path_list:
csv_write = csv.writer(f)
csv_write.writerow([file_dir])
self.logger.critical(f'将上传后出现误差数据的路径:{file_dir} 写入csv文件中')
else:
if file_dir not in set(csv_base_path_list):
csv_write = csv.writer(f)
csv_write.writerow([file_dir])
self.logger.critical(f'将上传后出现误差数据的路径:{file_dir} 写入csv文件中')
if __name__ == '__main__':
print("root_dir: ", BASE_DIR)
print("log_file: ", LOG_FILE)
print("log_file_warning: ", LOG_FILE_ERROR)
print("log_file_danger: ", LOG_Danger)
s3 = DeleteUploadFile(
# s3
access_key='',
secret_key='',
region='',
bucket='',
)
s3.find_zip_file()
|
nilq/baby-python
|
python
|
"""
Author: Fritz Alder
Copyright:
Secure Systems Group, Aalto University
https://ssg.aalto.fi/
This code is released under Apache 2.0 license
http://www.apache.org/licenses/LICENSE-2.0
"""
import cppimport
#This will pause for a moment to compile the module
cppimport.set_quiet(False)
m = cppimport.imp("minionn")
#import minionn as m
print("\nSuccessfuly imported c++ code\n")
SLOTS = 4096
PMAX = 101285036033
import numpy as np
import os
from operator import mul
from functools import reduce
def run_test(shape):
"""
Here, we just test if the homomorphic encryption works.
As such, we only test if Dec(Enc(w)*a-c) = w*a-c for every element of w
"""
# Generate w and encrypt
w_np = np.random.randint(10000,None,size=shape,dtype='int64')
w_cpp = m.VectorInt(w_np.flatten().tolist())
w_cpp = m.VectorInt([i for i in range(0,100)])
encW = m.encrypt_w(w_cpp,pkey)
length = reduce(mul, shape, 1)
r_np = np.random.randint(PMAX, None, size=length, dtype='uint64')
r = m.VectorUInt(r_np.flatten().tolist())
v_np = np.random.randint(PMAX,None,size=length, dtype='uint64')
v = m.VectorUInt(v_np.flatten().tolist())
# Do client precomputation
encU = m.client_precomputation(encW, r, v)
# Decrypt w again
decrypted_u = m.VectorInt([])
m.decrypt_w(encU, skey, decrypted_u)
# check if values match with expected value
ww = list(w_cpp)
vv = list(v)
rr = list(r)
dd = list(decrypted_u)[:length]
"""
print("W")
print(ww)
print("R")
print(rr[:length])
print("V")
print(vv[:length])
print("D")
print(dd)
"""
print("Testing for correctness")
for i in range(0,length):
assert dd[i] == m.modulo((ww[i] * rr[i]) - vv[i])
print("Testing done.")
def test_two_vectors(vector, expected_list):
assert len(list(vector)) == len(expected_list), "Length wrong" + str(len(list(vector))) + " instead of " + str(len(expected_list))
assert list(vector) == expected_list, "Wrong result: " + str(list(vector)) + " instead of expected " + str(expected_list)
## Maths tests
print("### Basic maths tests")
a = m.VectorInt([1,2])
b = m.VectorInt([3,4])
c = m.VectorInt([4,6])
d = m.VectorInt([10000000000,20000000000,30000000000,35000000000,-21000000000])
e = m.VectorInt([1,2,-2])
null_matrix = m.VectorInt([0,0,0,0])
null_vector = m.VectorInt([0,0])
print("Testing vector operations")
test_two_vectors(m.vector_add(a,b), [4,6])
test_two_vectors(m.vector_sub(a,b), [-2,-2])
test_two_vectors(m.vector_mul(b,3), [9,12])
test_two_vectors(m.vector_div(c,2), [2,3])
m.vector_floor(d,10000000000)
test_two_vectors(d,[1,2,3,3,-2])
m.vector_raise(e,10000000000)
test_two_vectors(e,[10000000000,20000000000,-20000000000])
w = m.VectorInt([1,2,3,4])
x = m.VectorInt([4,3,2,1])
u = m.VectorInt([2,5,0,7])
b = m.VectorInt([20,10])
y = m.VectorInt([])
print("Testing matrix multiplication")
print("Normal matmul (b broadcasted)")
m.matrixmul(w,b,u,x,2,2,2,y)
test_two_vectors(y, [30,20,40,30])
print("Row wise matmul (b.T broadcasted)")
y = m.VectorInt([])
m.matrixmul_b_columns(w,b,u,x,2,2,2,y)
test_two_vectors(y, [30,30,30,30])
print("Testing extract sum")
dim_m = 10
dim_n = 5
dim_o = 6
a = [i%(dim_m*dim_n) for i in range(0,dim_m*dim_n*dim_o)]
a = sorted(a)
a_vec = m.VectorInt(a)
b_vec = m.VectorInt([])
#Test all
m.extract_sum(a_vec, b_vec, dim_o, dim_n, dim_m, 0)
b_baseline = [dim_o * i for i in range(0,dim_m*dim_n)]
test_two_vectors(b_vec, b_baseline)
#Create subset behind a and test it
new_m = 2
new_n = 2
new_o = 3
a.extend(sorted([i%(new_m*new_n) for i in range(0,new_m*new_n*new_o)]))
b_baseline = [new_o * i for i in range(0,new_m*new_n)]
a_vec = m.VectorInt(a)
b_vec = m.VectorInt([])
m.extract_sum(a_vec, b_vec, new_o, new_n, new_m, dim_m*dim_n*dim_o)
test_two_vectors(b_vec, b_baseline)
## Crypto tests
#crypto operations return a list of bytes
print("### Homomorphic + precomputation tests")
asset_folder = "assets/"
if not os.path.exists(asset_folder):
os.makedirs(asset_folder)
print("Created directory " + asset_folder)
pkey = asset_folder + "s.pkey"
skey = asset_folder + "s.skey"
shape = (10,10)
# Init library and generate keys
m.init(SLOTS)
m.gen_keys(pkey, skey)
print("Running simple encrypt/decrypt example")
sample = m.VectorInt([1,2,3,4,5,6,7,8,7,6,5,4,-12,-14])
encW = m.encrypt_w(sample,pkey)
decrypted = m.VectorInt([])
m.decrypt_w(encW, skey, decrypted)
test_two_vectors(sample, list(decrypted)[:len(list(sample))])
print("Running homomorphic test with random r and v")
run_test(shape)
print("Cleanup")
os.remove(pkey)
os.remove(skey)
try:
os.rmdir(asset_folder)
except os.OSError as identifier:
print("Not removing non-empty directory " + asset_folder)
print("### All tests passed")
|
nilq/baby-python
|
python
|
import csv
def savetoCSV(data, filename):
# specifying the fields for csv file
fields = ['Term', 'Poem', 'Part of Speech', 'Definition', 'Tags']
# writing to csv file
with open(filename, 'w') as csvfile:
# creating a csv dict writer object
writer = csv.DictWriter(csvfile, fieldnames = fields)
# writing headers (field names)
writer.writeheader()
# writing data rows
writer.writerows(data)
def main():
# load rss from web to update existing xml file
# loadRSS()
# parse xml file
newsitems = parseXML('loyslunarlexicon.wordpress.2018-11-29.xml')
# store news items in a csv file
savetoCSV(newsitems, 'posts.csv')
if __name__ == "__main__":
# calling main function
main()
|
nilq/baby-python
|
python
|
"""plugins.py contains the main type and base class used by the analyzis plugins.
It also contains the work functions used to load the plugins both from disc and
from the resources."""
from act.scio import plugins
import addict
from importlib import import_module
from importlib.machinery import ModuleSpec
from importlib.util import module_from_spec, spec_from_file_location
from pydantic import BaseModel, StrictStr
from typing import Text, List, Optional
import logging
import os
import pkgutil
module_interface = ["name", "analyze", "info", "version", "dependencies"]
class Result(BaseModel):
"""The result type returned by all analyze methods of the plugins."""
name: StrictStr
version: StrictStr
result: addict.Dict
class BasePlugin:
"""The class that all analyzis plugins inherits. Contains the basis attributes and
interface required by the plugin system."""
def __init__(self: object):
pass
name = "BasePlugin"
info = "This is the empty plugin of a plugin for Scio"
version = "0.1"
dependencies: List[Text] = []
configdir = ""
debug = False
async def analyze(self, nlpdata: addict.Dict) -> Result:
"""Main analyzis method"""
return Result(name=self.name, version=self.version, result=addict.Dict({"test": nlpdata.content}))
def load_default_plugins() -> List[BasePlugin]:
"""load_default_plugins scans the package for internal plugins, loading
them dynamically and checking for the presence of the attributes defined in
module_interface"""
myplugins: List[BasePlugin] = []
prefix = plugins.__name__ + "."
for _, modname, _ in pkgutil.iter_modules(plugins.__path__, prefix):
logging.info("loading plugin %s [%s]", modname, plugins.__path__)
p = load_plugin(modname)
if p:
myplugins.append(p)
return myplugins
def load_external_plugins(directory: Text) -> List[BasePlugin]:
"""load_external_plugins scans a directory for .py files, and attempts to
import each file, adding them to the list of modules. The functions will
only add the module to the returned list of modules if it has a dictionary
describing the module_interface list"""
myplugins: List[BasePlugin] = []
for plugin_file_name in os.listdir(directory):
if plugin_file_name == "__init__.py":
continue
plugin_path = os.path.join(directory, plugin_file_name)
if os.path.isfile(plugin_path) and plugin_path.endswith(".py"):
p = load_plugin(plugin_path)
if p:
myplugins.append(p)
return myplugins
def load_plugin(module_name: Text) -> Optional[BasePlugin]:
if module_name.endswith(".py"):
spec: ModuleSpec = spec_from_file_location("plugin_mod", module_name)
module = module_from_spec(spec)
spec.loader.exec_module(module) # type: ignore
else:
try:
module = import_module(module_name)
except Exception as e:
logging.warning(e)
return None
conform = True
try:
p: BasePlugin = module.Plugin() # type: ignore
except AttributeError as err:
logging.warning("Could not load plugin from module %s: %s", module_name, err)
return None
for mint in module_interface:
if not hasattr(p, mint):
logging.warning("%s does not have %s attribute", p.name, mint)
conform = False
if not conform:
return None
return p
|
nilq/baby-python
|
python
|
from pathlib import Path
import pytest
from pytest_mock.plugin import MockerFixture
from dotmodules.renderer import ColorAdapter, Colors
@pytest.fixture
def colors() -> Colors:
return Colors()
@pytest.fixture
def color_adapter() -> ColorAdapter:
return ColorAdapter()
class TestColorTagRecognitionCases:
@pytest.mark.parametrize(
"input_string,expected",
[
["<<TAG>>", ""], # Tags should be uppercase only.
["<<Tag>>", "<<Tag>>"], # Tags should be uppercase only.
["<<a1>>", "<<a1>>"], # Tags should be either alphanumeric or numeric.
["<<A1>>", "<<A1>>"], # Tags should be either alphanumeric or numeric.
["<<A-2>>", "<<A-2>>"], # Tags should contain only letters and numbers.
["<<1>>", ""], # Single numbers are allowed.
["<<12>>", ""], # Double numbers are allowed.
["<<123>>", ""], # Maximum three numbers are allowed.
["<<1234>>", "<<1234>>"], # Maximum three numbers are allowed.
["<tag>", "<tag>"], # A tag should be anclosed into double angle brackets.
["<TAG>", "<TAG>"], # A tag should be anclosed into double angle brackets.
["abc", "abc"], # Double angle brackets are necessary.
["ABC", "ABC"], # Double angle brackets are necessary.
["123", "123"], # Double angle brackets are necessary.
["-.,<>:;", "-.,<>:;"], # Double angle brackets are necessary.
["<<>>", "<<>>"], # Empty double angle brackets are not considered as tags.
["<< >>", "<< >>"], # Whitespace is not a tag.
["<<<<TAG>>", "<<"], # Surrounding whitespace in not a requirement.
["<< <<TAG>>", "<< "], # Surrounding whitespace in not a requirement.
["<<TAG>>>>", ">>"], # Surrounding whitespace in not a requirement.
["<<TAG>> >>", " >>"], # Surrounding whitespace in not a requirement.
["<<A>>hello<<B>>", "hello"], # Multiple tags are supported.
],
)
def test__tag_recognition_and_cleaning(
self, input_string: str, expected: str, colors: Colors
) -> None:
result = colors.decolor_string(string=input_string)
assert result == expected
@pytest.mark.parametrize(
"input_string,expected",
[
["<<TAG>>", ("TAG",)], # Tags should be uppercase only.
["<<Tag>>", None], # Tags should be uppercase only.
["<<abc123>>", None], # Tags should be either alphanumeric or numeric.
["<<ABC123>>", None], # Tags should be either alphanumeric or numeric.
["<<A-2>>", None], # Tags should contain only letters and numbers.
["<<1>>", ("1",)], # Single numbers are allowed.
["<<12>>", ("12",)], # Double numbers are allowed.
["<<123>>", ("123",)], # Maximum three numbers are allowed.
["<<1234>>", None], # Maximum three numbers are allowed.
["<tag>", None], # A tag should be anclosed into double angle brackets.
["<TAG>", None], # A tag should be anclosed into double angle brackets.
["abc", None], # Double angle brackets are necessary.
["ABC", None], # Double angle brackets are necessary.
["123", None], # Double angle brackets are necessary.
["-.,<>:;", None], # Double angle brackets are necessary.
["<<>>", None], # Empty double angle brackets are not considered as tags.
["<< >>", None], # Whitespace is not a tag.
["<<<<TAG>>", ("TAG",)], # Surrounding whitespace in not a requirement.
["<< <<TAG>>", ("TAG",)], # Surrounding whitespace in not a requirement.
["<<TAG>>>>", ("TAG",)], # Surrounding whitespace in not a requirement.
["<<TAG>> >>", ("TAG",)], # Surrounding whitespace in not a requirement.
["<<A>>hello<<B>>", ("A", "B")], # Multiple tags are supported.
],
)
def test__tag_list_collection(
self, input_string: str, expected: str, colors: Colors
) -> None:
result = colors._get_tag_list(string=input_string)
assert result == expected
class TestColorCacheHandlingCases:
def test__missing_color_can_be_filled(
self, mocker: MockerFixture, color_adapter: ColorAdapter
) -> None:
dummy_tag = "my_tag"
dummy_color = "my_color"
mock_load_color = mocker.patch(
"dotmodules.renderer.ColorAdapter._load_color_for_tag"
)
mock_load_color.return_value = dummy_color
assert color_adapter._cache == {}
result = color_adapter.resolve_tag(tag=dummy_tag)
assert result == dummy_color
assert color_adapter._cache == {dummy_tag: dummy_color}
mock_load_color.assert_called_with(tag=dummy_tag)
def test__existing_tag_wont_be_resolved(
self, mocker: MockerFixture, color_adapter: ColorAdapter
) -> None:
dummy_tag = "my_tag"
dummy_color = "my_color"
mock_load_color = mocker.patch(
"dotmodules.renderer.ColorAdapter._load_color_for_tag"
)
color_adapter._cache[dummy_tag] = dummy_color
assert color_adapter._cache == {dummy_tag: dummy_color}
result = color_adapter.resolve_tag(tag=dummy_tag)
assert result == dummy_color
assert color_adapter._cache == {dummy_tag: dummy_color}
mock_load_color.assert_not_called
class TestColorLoadingCommandAssemlingCases:
def test__mapped_tag__command_should_be_the_mapping(
self, color_adapter: ColorAdapter
) -> None:
dummy_tag = "my_tag"
dummy_mapped_tag = "my_mapped_tag"
color_adapter.TAG_MAPPING[dummy_tag] = dummy_mapped_tag
expected_command = ["utils/color_adapter.sh", dummy_mapped_tag]
result = color_adapter._assemble_color_loading_command(tag=dummy_tag)
assert result == expected_command
def test__unmapped_tag__gets_loaded_with_a_default_template(
self, color_adapter: ColorAdapter
) -> None:
dummy_tag = "123"
expected_command = ["utils/color_adapter.sh", "setaf", dummy_tag]
result = color_adapter._assemble_color_loading_command(tag=dummy_tag)
assert result == expected_command
def test__unmapped_tag__has_to_be_a_number(
self, color_adapter: ColorAdapter
) -> None:
dummy_tag = "my_non_numeric_tag"
with pytest.raises(ValueError) as e:
color_adapter._assemble_color_loading_command(tag=dummy_tag)
expected = "unmapped tag has to be numeric: 'my_non_numeric_tag'"
assert str(e.value) == expected
def test__mapped_tag__multiple_commands_can_be_generated(
self, color_adapter: ColorAdapter
) -> None:
dummy_tag_1 = "my_tag"
dummy_tag_2 = "my_tag"
dummy_mapped_tag_1 = "my_mapped_tag"
dummy_mapped_tag_2 = "my_mapped_tag"
color_adapter.TAG_MAPPING[dummy_tag_1] = dummy_mapped_tag_1
color_adapter.TAG_MAPPING[dummy_tag_2] = dummy_mapped_tag_2
expected_command_1 = ["utils/color_adapter.sh", dummy_mapped_tag_1]
expected_command_2 = ["utils/color_adapter.sh", dummy_mapped_tag_2]
result_1 = color_adapter._assemble_color_loading_command(tag=dummy_tag_1)
result_2 = color_adapter._assemble_color_loading_command(tag=dummy_tag_2)
assert result_1 == expected_command_1
assert result_2 == expected_command_2
class TestColorLoadingCases:
@pytest.fixture()
def dummy_color_adapter(self) -> str:
"""
Dummy loader script that can be called in two modes:
> dummy_color_adapter.sh --success <message>
In this mode the passed <message> will be echoed back.
> dummy_color_adapter.sh --error
In this mode the script will abort with an error.
"""
return str(Path(__file__).parent / "dummy_color_adapter.sh")
def test__colors_can_be_fetched__success(
self,
dummy_color_adapter: str,
mocker: MockerFixture,
color_adapter: ColorAdapter,
) -> None:
dummy_tag = "my_tag"
dummy_command = [dummy_color_adapter, "--success", dummy_tag]
mock_assemble_command = mocker.patch(
"dotmodules.renderer.ColorAdapter._assemble_color_loading_command"
)
mock_assemble_command.return_value = dummy_command
result = color_adapter._load_color_for_tag(tag=dummy_tag)
assert result == dummy_tag
mock_assemble_command.assert_called_with(tag=dummy_tag)
def test__colors_can_be_fetched__error__graceful_handling(
self,
dummy_color_adapter: str,
mocker: MockerFixture,
color_adapter: ColorAdapter,
) -> None:
dummy_tag = "my_tag"
dummy_command = [dummy_color_adapter, "--error"]
mock_assemble_command = mocker.patch(
"dotmodules.renderer.ColorAdapter._assemble_color_loading_command"
)
mock_assemble_command.return_value = dummy_command
result = color_adapter._load_color_for_tag(tag=dummy_tag)
# Cannot resolve tag -> returns no coloring.
assert result == ""
mock_assemble_command.assert_called_with(tag=dummy_tag)
class TestColororizeCases:
def test__no_color_tags(self, colors: Colors) -> None:
dummy_string = "I am a dummy string with no colors"
result = colors.colorize(string=dummy_string)
assert result.colorized_string == dummy_string
assert result.additional_width == 0
def test__color_tags_can_be_resolved(
self, mocker: MockerFixture, colors: Colors
) -> None:
dummy_string = "<<RED>>I am in color<<RESET>>"
mock_load_color_for_tag = mocker.patch(
"dotmodules.renderer.ColorAdapter._load_color_for_tag",
wraps=lambda tag: tag.lower(),
)
result = colors.colorize(string=dummy_string)
# The mocked color loading simply converts the tag names into lowercase.
assert result.colorized_string == "redI am in colorreset"
assert result.additional_width == 8
mock_load_color_for_tag.assert_has_calls(
[
mocker.call(tag="RED"),
mocker.call(tag="RESET"),
]
)
def test__repeated_color_tags_can_be_resolved(
self, mocker: MockerFixture, colors: Colors
) -> None:
dummy_string = "<<RED>>I am in <<RED>>color<<RESET>>"
mock_load_color_for_tag = mocker.patch(
"dotmodules.renderer.ColorAdapter._load_color_for_tag",
wraps=lambda tag: tag.lower(),
)
result = colors.colorize(string=dummy_string)
# The mocked color loading simply converts the tag names into lowercase.
assert result.colorized_string == "redI am in redcolorreset"
assert result.additional_width == 11
# The cache is only updated twice.
mock_load_color_for_tag.assert_has_calls(
[
mocker.call(tag="RED"),
mocker.call(tag="RESET"),
]
)
|
nilq/baby-python
|
python
|
# Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Driver for Parade PS8742 USB mux.."""
import hw_driver
import i2c_reg
class Ps8742Error(hw_driver.HwDriverError):
"""Error occurred accessing ps8742."""
class ps8742(hw_driver.HwDriver):
"""Object to access drv=ps8742 controls."""
# I2C Addr of typical ps8742.
USB_MUX_ADDR = 0x20
# Control reg offset.
USB_MUX_CTRL = 0
# USB3 line passthough enable.
USB_MUX_CTRL_USB3_EN = 0x20
def __init__(self, interface, params):
"""Constructor.
Args:
interface: i2c interface object to handle low-level communication to
control
params: dictionary of params needed to perform operations on this
i2c device. All items are strings initially but should be cast to
types detailed below.
Mandatory Params:
child: integer, 7-bit i2c child address
offset: integer, gpio's bit position from lsb
Optional Params:
"""
super(ps8742, self).__init__(interface, params)
child = self._get_child()
self._i2c_obj = i2c_reg.I2cReg.get_device(
self._interface, child, addr_len=1, reg_len=1, msb_first=True,
no_read=False, use_reg_cache=False)
def _Get_usb3(self):
"""Getter for usb3 enable.
Returns:
0: USB2 only.
1: USB3.
"""
value = self._i2c_obj._read_reg(self.USB_MUX_CTRL)
if self.USB_MUX_CTRL_USB3_EN & value:
return 1
return 0
def _Set_usb3(self, enable):
"""Setter for usb3 enable.
Args:
enable: 0 - USB2 only. 1 - enable USB3.
"""
value = self._i2c_obj._read_reg(self.USB_MUX_CTRL)
if not enable:
value = value & ~(self.USB_MUX_CTRL_USB3_EN)
else:
value = value | self.USB_MUX_CTRL_USB3_EN
self._i2c_obj._write_reg(self.USB_MUX_CTRL, value)
def _get_child(self):
"""Check and return needed params to call driver.
Returns:
child: 7-bit i2c address
"""
if 'child' not in self._params:
raise Ps8742Error('getting child address')
child = int(self._params['child'], 0)
return child
|
nilq/baby-python
|
python
|
import jwt
import datetime
import tornado.testing
import tornado.httpserver
import tornado.httpclient
import tornado.gen
import tornado.websocket
from app import Application
APP = Application()
JWT_TOKEN_EXPIRE = datetime.timedelta(seconds=5)
class ChatAuthHandler(tornado.testing.AsyncTestCase):
def setUp(self):
super(ChatAuthHandler, self).setUp()
server = tornado.httpserver.HTTPServer(APP)
socket, self.port = tornado.testing.bind_unused_port()
server.add_socket(socket)
@tornado.testing.gen_test
def test_auth_no_cookie(self):
connection = yield self._connect(auth=False)
response = yield connection.read_message()
self.assertIn('Not authenticated', response)
@tornado.testing.gen_test
def test_auth_invalid_token(self):
connection = yield self._connect(token='test')
response = yield connection.read_message()
self.assertIn('Not authenticated', response)
@tornado.testing.gen_test
def test_auth_success(self):
token = jwt.encode({
'username': 'tester',
'expires': (datetime.datetime.utcnow() + JWT_TOKEN_EXPIRE).isoformat(),
},
key=APP.settings['jwt_secret'],
algorithm='HS256'
)
connection = yield self._connect(token=token)
response = yield connection.read_message()
self.assertIn('Connected', response)
def _connect(self, auth=True, token=None):
jwt_cookie = 'jwt={}'.format(token or '')
request = tornado.httpclient.HTTPRequest(
url = 'ws://localhost:{}/chat'.format(self.port),
headers={'Cookie': jwt_cookie} if auth else {}
)
return tornado.websocket.websocket_connect(request)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
(베타) PyTorch를 사용한 Channels Last 메모리 형식
*******************************************************
**Author**: `Vitaly Fedyunin <https://github.com/VitalyFedyunin>`_
**번역**: `Choi Yoonjeong <https://github.com/potatochips178>`_
Channels last가 무엇인가요
----------------------------
Channels last 메모리 형식(memory format)은 차원 순서를 유지하면서 메모리 상의 NCHW 텐서(tensor)를 정렬하는 또 다른 방식입니다.
Channels last 텐서는 채널(Channel)이 가장 밀도가 높은(densest) 차원으로 정렬(예. 이미지를 픽셀x픽셀로 저장)됩니다.
예를 들어, (2개의 4 x 4 이미지에 3개의 채널이 존재하는 경우) 전형적인(연속적인) NCHW 텐서의 저장 방식은 다음과 같습니다:
.. figure:: /_static/img/classic_memory_format.png
:alt: classic_memory_format
Channels last 메모리 형식은 데이터를 다르게 정렬합니다:
.. figure:: /_static/img/channels_last_memory_format.png
:alt: channels_last_memory_format
PyTorch는 기존의 스트라이드(strides) 구조를 사용함으로써 메모리 형식을 지원(하며, eager, JIT 및 TorchScript를 포함한
기존의 모델들과 하위 호환성을 제공)합니다. 예를 들어, Channels last 형식에서 10x3x16x16 배치(batch)는 (768, 1, 48, 3)와
같은 폭(strides)을 가지고 있게 됩니다.
"""
######################################################################
# Channels last 메모리 형식은 오직 4D NCWH Tensors에서만 실행할 수 있습니다.
#
######################################################################
# 메모리 형식(Memory Format) API
# ---------------------------------
#
# 연속 메모리 형식과 channels last 메모리 형식 간에 텐서를 변환하는 방법은 다음과 같습니다.
######################################################################
# 전형적인 PyTorch의 연속적인 텐서(tensor)
import torch
N, C, H, W = 10, 3, 32, 32
x = torch.empty(N, C, H, W)
print(x.stride()) # 결과: (3072, 1024, 32, 1)
######################################################################
# 변환 연산자
x = x.to(memory_format=torch.channels_last)
print(x.shape) # 결과: (10, 3, 32, 32) 차원 순서는 보존함
print(x.stride()) # 결과: (3072, 1, 96, 3)
######################################################################
# 연속적인 형식으로 되돌리기
x = x.to(memory_format=torch.contiguous_format)
print(x.stride()) # 결과: (3072, 1024, 32, 1)
######################################################################
# 다른 방식
x = x.contiguous(memory_format=torch.channels_last)
print(x.stride()) # 결과: (3072, 1, 96, 3)
######################################################################
# 형식(format) 확인
print(x.is_contiguous(memory_format=torch.channels_last)) # 결과: True
######################################################################
# ``to`` 와 ``contiguous`` 에는 작은 차이(minor difference)가 있습니다.
# 명시적으로 텐서(tensor)의 메모리 형식을 변환할 때는 ``to`` 를 사용하는 것을
# 권장합니다.
#
# 대부분의 경우 두 API는 동일하게 동작합니다. 하지만 ``C==1`` 이거나
# ``H == 1 && W == 1`` 인 ``NCHW`` 4D 텐서의 특수한 경우에는 ``to`` 만이
# Channel last 메모리 형식으로 표현된 적절한 폭(stride)을 생성합니다.
#
# 이는 위의 두가지 경우에 텐서의 메모리 형식이 모호하기 때문입니다.
# 예를 들어, 크기가 ``N1HW`` 인 연속적인 텐서(contiguous tensor)는
# ``연속적`` 이면서 Channel last 형식으로 메모리에 저장됩니다.
# 따라서, 주어진 메모리 형식에 대해 이미 ``is_contiguous`` 로 간주되어
# ``contiguous`` 호출은 동작하지 않게(no-op) 되어, 폭(stride)을 갱신하지
# 않게 됩니다. 반면에, ``to`` 는 의도한 메모리 형식으로 적절하게 표현하기 위해
# 크기가 1인 차원에서 의미있는 폭(stride)으로 재배열(restride)합니다.
special_x = torch.empty(4, 1, 4, 4)
print(special_x.is_contiguous(memory_format=torch.channels_last)) # Ouputs: True
print(special_x.is_contiguous(memory_format=torch.contiguous_format)) # Ouputs: True
######################################################################
# 명시적 치환(permutation) API인 ``permute`` 에서도 동일하게 적용됩니다.
# 모호성이 발생할 수 있는 특별한 경우에, ``permute`` 는 의도한 메모리
# 형식으로 전달되는 폭(stride)을 생성하는 것이 보장되지 않습니다.
# ``to`` 로 명시적으로 메모리 형식을 지정하여 의도치 않은 동작을 피할
# 것을 권장합니다.
#
# 또한, 3개의 비-배치(non-batch) 차원이 모두 ``1`` 인 극단적인 경우
# (``C==1 && H==1 && W==1``), 현재 구현은 텐서를 Channels last 메모리
# 형식으로 표시할 수 없음을 알려드립니다.
######################################################################
# Channels last 방식으로 생성하기
x = torch.empty(N, C, H, W, memory_format=torch.channels_last)
print(x.stride()) # 결과: (3072, 1, 96, 3)
######################################################################
# ``clone`` 은 메모리 형식을 보존합니다.
y = x.clone()
print(y.stride()) # 결과: (3072, 1, 96, 3)
######################################################################
# ``to``, ``cuda``, ``float`` ... 등도 메모리 형식을 보존합니다.
if torch.cuda.is_available():
y = x.cuda()
print(y.stride()) # 결과: (3072, 1, 96, 3)
######################################################################
# ``empty_like``, ``*_like`` 연산자도 메모리 형식을 보존합니다.
y = torch.empty_like(x)
print(y.stride()) # 결과: (3072, 1, 96, 3)
######################################################################
# Pointwise 연산자도 메모리 형식을 보존합니다.
z = x + y
print(z.stride()) # 결과: (3072, 1, 96, 3)
######################################################################
# Conv, Batchnorm 모듈은 Channels last를 지원합니다. (단, CudNN >=7.6 에서만 동작)
# 합성곱(convolution) 모듈은 이진 p-wise 연산자(binary p-wise operator)와는 다르게
# Channels last가 주된 메모리 형식입니다. 모든 입력은 연속적인 메모리 형식이며,
# 연산자는 연속된 메모리 형식으로 출력을 생성합니다. 그렇지 않으면, 출력은
# channels last 메모리 형식입니다.
if torch.backends.cudnn.version() >= 7603:
model = torch.nn.Conv2d(8, 4, 3).cuda().half()
model = model.to(memory_format=torch.channels_last) # 모듈 인자들은 Channels last로 변환이 필요합니다
input = torch.randint(1, 10, (2, 8, 4, 4), dtype=torch.float32, requires_grad=True)
input = input.to(device="cuda", memory_format=torch.channels_last, dtype=torch.float16)
out = model(input)
print(out.is_contiguous(memory_format=torch.channels_last)) # 결과: True
######################################################################
# 입력 텐서가 Channels last를 지원하지 않는 연산자를 만나면
# 치환(permutation)이 커널에 자동으로 적용되어 입력 텐서를 연속적인 형식으로
# 복원합니다. 이 경우 과부하가 발생하여 channel last 메모리 형식의 전파가
# 중단됩니다. 그럼에도 불구하고, 올바른 출력은 보장됩니다.
######################################################################
# 성능 향상
# -------------------------------------------------------------------------------------------
# 정밀도를 줄인(reduced precision ``torch.float16``) 상태에서 Tensor Cores를 지원하는 Nvidia의 하드웨어에서
# 가장 의미심장한 성능 향상을 보였습니다. `AMP (Automated Mixed Precision)` 학습 스크립트를 활용하여
# 연속적인 형식에 비해 Channels last 방식이 22% 이상의 성능 향승을 확인할 수 있었습니다.
# 이 때, Nvidia가 제공하는 AMP를 사용했습니다. https://github.com/NVIDIA/apex
#
# ``python main_amp.py -a resnet50 --b 200 --workers 16 --opt-level O2 ./data``
# opt_level = O2
# keep_batchnorm_fp32 = None <class 'NoneType'>
# loss_scale = None <class 'NoneType'>
# CUDNN VERSION: 7603
# => creating model 'resnet50'
# Selected optimization level O2: FP16 training with FP32 batchnorm and FP32 master weights.
# Defaults for this optimization level are:
# enabled : True
# opt_level : O2
# cast_model_type : torch.float16
# patch_torch_functions : False
# keep_batchnorm_fp32 : True
# master_weights : True
# loss_scale : dynamic
# Processing user overrides (additional kwargs that are not None)...
# After processing overrides, optimization options are:
# enabled : True
# opt_level : O2
# cast_model_type : torch.float16
# patch_torch_functions : False
# keep_batchnorm_fp32 : True
# master_weights : True
# loss_scale : dynamic
# Epoch: [0][10/125] Time 0.866 (0.866) Speed 230.949 (230.949) Loss 0.6735125184 (0.6735) Prec@1 61.000 (61.000) Prec@5 100.000 (100.000)
# Epoch: [0][20/125] Time 0.259 (0.562) Speed 773.481 (355.693) Loss 0.6968704462 (0.6852) Prec@1 55.000 (58.000) Prec@5 100.000 (100.000)
# Epoch: [0][30/125] Time 0.258 (0.461) Speed 775.089 (433.965) Loss 0.7877287269 (0.7194) Prec@1 51.500 (55.833) Prec@5 100.000 (100.000)
# Epoch: [0][40/125] Time 0.259 (0.410) Speed 771.710 (487.281) Loss 0.8285319805 (0.7467) Prec@1 48.500 (54.000) Prec@5 100.000 (100.000)
# Epoch: [0][50/125] Time 0.260 (0.380) Speed 770.090 (525.908) Loss 0.7370464802 (0.7447) Prec@1 56.500 (54.500) Prec@5 100.000 (100.000)
# Epoch: [0][60/125] Time 0.258 (0.360) Speed 775.623 (555.728) Loss 0.7592862844 (0.7472) Prec@1 51.000 (53.917) Prec@5 100.000 (100.000)
# Epoch: [0][70/125] Time 0.258 (0.345) Speed 774.746 (579.115) Loss 1.9698858261 (0.9218) Prec@1 49.500 (53.286) Prec@5 100.000 (100.000)
# Epoch: [0][80/125] Time 0.260 (0.335) Speed 770.324 (597.659) Loss 2.2505953312 (1.0879) Prec@1 50.500 (52.938) Prec@5 100.000 (100.000)
######################################################################
# ``--channels-last true`` 인자를 전달하여 Channels last 형식으로 모델을 실행하면 22%의 성능 향상을 보입니다.
#
# ``python main_amp.py -a resnet50 --b 200 --workers 16 --opt-level O2 --channels-last true ./data``
# opt_level = O2
# keep_batchnorm_fp32 = None <class 'NoneType'>
# loss_scale = None <class 'NoneType'>
#
# CUDNN VERSION: 7603
#
# => creating model 'resnet50'
# Selected optimization level O2: FP16 training with FP32 batchnorm and FP32 master weights.
#
# Defaults for this optimization level are:
# enabled : True
# opt_level : O2
# cast_model_type : torch.float16
# patch_torch_functions : False
# keep_batchnorm_fp32 : True
# master_weights : True
# loss_scale : dynamic
# Processing user overrides (additional kwargs that are not None)...
# After processing overrides, optimization options are:
# enabled : True
# opt_level : O2
# cast_model_type : torch.float16
# patch_torch_functions : False
# keep_batchnorm_fp32 : True
# master_weights : True
# loss_scale : dynamic
#
# Epoch: [0][10/125] Time 0.767 (0.767) Speed 260.785 (260.785) Loss 0.7579724789 (0.7580) Prec@1 53.500 (53.500) Prec@5 100.000 (100.000)
# Epoch: [0][20/125] Time 0.198 (0.482) Speed 1012.135 (414.716) Loss 0.7007197738 (0.7293) Prec@1 49.000 (51.250) Prec@5 100.000 (100.000)
# Epoch: [0][30/125] Time 0.198 (0.387) Speed 1010.977 (516.198) Loss 0.7113101482 (0.7233) Prec@1 55.500 (52.667) Prec@5 100.000 (100.000)
# Epoch: [0][40/125] Time 0.197 (0.340) Speed 1013.023 (588.333) Loss 0.8943189979 (0.7661) Prec@1 54.000 (53.000) Prec@5 100.000 (100.000)
# Epoch: [0][50/125] Time 0.198 (0.312) Speed 1010.541 (641.977) Loss 1.7113249302 (0.9551) Prec@1 51.000 (52.600) Prec@5 100.000 (100.000)
# Epoch: [0][60/125] Time 0.198 (0.293) Speed 1011.163 (683.574) Loss 5.8537774086 (1.7716) Prec@1 50.500 (52.250) Prec@5 100.000 (100.000)
# Epoch: [0][70/125] Time 0.198 (0.279) Speed 1011.453 (716.767) Loss 5.7595844269 (2.3413) Prec@1 46.500 (51.429) Prec@5 100.000 (100.000)
# Epoch: [0][80/125] Time 0.198 (0.269) Speed 1011.827 (743.883) Loss 2.8196096420 (2.4011) Prec@1 47.500 (50.938) Prec@5 100.000 (100.000)
######################################################################
# 아래 목록의 모델들은 Channels last 형식을 전적으로 지원(full support)하며 Volta 장비에서 8%-35%의 성능 향상을 보입니다:
# ``alexnet``, ``mnasnet0_5``, ``mnasnet0_75``, ``mnasnet1_0``, ``mnasnet1_3``, ``mobilenet_v2``, ``resnet101``, ``resnet152``, ``resnet18``, ``resnet34``, ``resnet50``, ``resnext50_32x4d``, ``shufflenet_v2_x0_5``, ``shufflenet_v2_x1_0``, ``shufflenet_v2_x1_5``, ``shufflenet_v2_x2_0``, ``squeezenet1_0``, ``squeezenet1_1``, ``vgg11``, ``vgg11_bn``, ``vgg13``, ``vgg13_bn``, ``vgg16``, ``vgg16_bn``, ``vgg19``, ``vgg19_bn``, ``wide_resnet101_2``, ``wide_resnet50_2``
#
######################################################################
# 기존 모델들 변환하기
# --------------------------
#
# Channels last 지원은 기존 모델이 무엇이냐에 따라 제한되지 않습니다.
# 어떠한 모델도 Channels last로 변환할 수 있으며
# 입력(또는 특정 가중치)의 형식만 맞춰주면 (신경망) 그래프를 통해 바로 전파(propagate)할 수 있습니다.
#
# 모델을 초기화한(또는 불러온) 이후, 한 번 실행이 필요합니다.
model = model.to(memory_format=torch.channels_last) # 원하는 모델로 교체하기
# 모든 입력에 대해서 실행이 필요합니다.
input = input.to(memory_format=torch.channels_last) # 원하는 입력으로 교체하기
output = model(input)
#######################################################################
# 그러나, 모든 연산자들이 Channels last를 지원하도록 완전히 바뀐 것은 아닙니다(일반적으로는 연속적인 출력을 대신 반환합니다).
# 위의 예시들에서 Channels last를 지원하지 않는 계층(layer)은 메모리 형식 전파를 멈추게 됩니다.
# 그럼에도 불구하고, 모델을 channels last 형식으로 변환했으므로, Channels last 메모리 형식으로 4차원의 가중치를 갖는
# 각 합성곱 계층(convolution layer)에서는 Channels last 형식으로 복원되고 더 빠른 커널(faster kernel)의 이점을 누릴 수 있게 됩니다.
#
# 하지만 Channels last를 지원하지 않는 연산자들은 치환(permutation)에 의해 과부하가 발생하게 됩니다.
# 선택적으로, 변환된 모델의 성능을 향상시키고 싶은 경우 모델의 연산자들 중 channel last를 지원하지 않는 연산자를 조사하고 식별할 수 있습니다.
#
# 이는 Channel Last 지원 연산자 목록 https://github.com/pytorch/pytorch/wiki/Operators-with-Channels-Last-support 에서 사용한 연산자들이 존재하는지 확인하거나,
# eager 실행 모드에서 메모리 형식 검사를 도입하고 모델을 실행해야 합니다.
#
# 아래 코드에서, 연산자들의 출력이 입력의 메모리 형식과 일치하지 않으면 예외(exception)를 발생시킵니다.
#
#
def contains_cl(args):
for t in args:
if isinstance(t, torch.Tensor):
if t.is_contiguous(memory_format=torch.channels_last) and not t.is_contiguous():
return True
elif isinstance(t, list) or isinstance(t, tuple):
if contains_cl(list(t)):
return True
return False
def print_inputs(args, indent=""):
for t in args:
if isinstance(t, torch.Tensor):
print(indent, t.stride(), t.shape, t.device, t.dtype)
elif isinstance(t, list) or isinstance(t, tuple):
print(indent, type(t))
print_inputs(list(t), indent=indent + " ")
else:
print(indent, t)
def check_wrapper(fn):
name = fn.__name__
def check_cl(*args, **kwargs):
was_cl = contains_cl(args)
try:
result = fn(*args, **kwargs)
except Exception as e:
print("`{}` inputs are:".format(name))
print_inputs(args)
print("-------------------")
raise e
failed = False
if was_cl:
if isinstance(result, torch.Tensor):
if result.dim() == 4 and not result.is_contiguous(memory_format=torch.channels_last):
print(
"`{}` got channels_last input, but output is not channels_last:".format(name),
result.shape,
result.stride(),
result.device,
result.dtype,
)
failed = True
if failed and True:
print("`{}` inputs are:".format(name))
print_inputs(args)
raise Exception("Operator `{}` lost channels_last property".format(name))
return result
return check_cl
old_attrs = dict()
def attribute(m):
old_attrs[m] = dict()
for i in dir(m):
e = getattr(m, i)
exclude_functions = ["is_cuda", "has_names", "numel", "stride", "Tensor", "is_contiguous", "__class__"]
if i not in exclude_functions and not i.startswith("_") and "__call__" in dir(e):
try:
old_attrs[m][i] = e
setattr(m, i, check_wrapper(e))
except Exception as e:
print(i)
print(e)
attribute(torch.Tensor)
attribute(torch.nn.functional)
attribute(torch)
######################################################################
# 만약 Channels last 텐서를 지원하지 않는 연산자를 발견하였고, 기여하기를 원한다면
# 다음 개발 문서를 참고해주세요.
# https://github.com/pytorch/pytorch/wiki/Writing-memory-format-aware-operators
#
######################################################################
# 아래 코드는 torch의 속성(attributes)를 복원합니다.
for (m, attrs) in old_attrs.items():
for (k,v) in attrs.items():
setattr(m, k, v)
######################################################################
# 해야할 일
# ----------
# 다음과 같이 여전히 해야 할 일이 많이 남아있습니다:
#
# - N1HW와 NC11 Tensors의 모호성 해결하기;
# - 분산 학습을 지원하는지 확인하기;
# - 연산자 범위(operators coverage) 개선(improve)하기
#
# 개선할 부분에 대한 피드백 또는 제안이 있다면 `이슈를 만들어 <https://github.com/pytorch/pytorch/issues>`_ 알려주세요.
#
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import sys
import codecs
import os.path
from youtubeAPICrawler.database import *
#sys.stdout = codecs.getwriter('utf8')(sys.stdout)
db = YTDatabase()
data_dir = '../../../data/'
studio71 = 'network_channel_id_studio71.json'
maker = 'network_channel_id_maker.json'
broadtv = 'network_channel_id_broadtv.json'
channel71 = []
channelMaker = []
channelBroad = []
with open(path.join(data_dir, studio71)) as chfile:
channel71.extend(json.load(chfile))
print '71:', len(channel71)
with open(path.join(data_dir, maker)) as chfile:
channelMaker.extend(json.load(chfile))
print 'maker:', len(channelMaker)
with open(path.join(data_dir, broadtv)) as chfile:
channelBroad.extend(json.load(chfile))
print 'broad:', len(channelBroad)
i = 0
with db._session_scope(True) as session:
print 'channel#', session.query(Channel).count() # Number of channels in db
for ch in session.query(Channel).all():
if ch.id in channel71:
db.updateChannelNetwork(ch.id, 'Studio71')
elif ch.id in channelMaker:
db.updateChannelNetwork(ch.id, 'Maker_Studios')
elif ch.id in channelBroad:
db.updateChannelNetwork(ch.id, 'BroadbandTV')
else:
i+=1
print 'channel with no matching network:', i
|
nilq/baby-python
|
python
|
import glob, os
from random import shuffle
import numpy as np
from PIL import Image
import pycuda.driver as cuda
import tensorrt as trt
import labels
import calibrator
MEAN = (71.60167789, 82.09696889, 72.30508881)
MODEL_DIR = 'data/fcn8s/'
CITYSCAPES_DIR = '/data/shared/Cityscapes/'
TEST_IMAGE = CITYSCAPES_DIR + 'leftImg8bit/val/lindau/lindau_000042_000019_leftImg8bit.png'
CALIBRATION_DATASET_LOC = CITYSCAPES_DIR + 'leftImg8bit/train/*/*.png'
CLASSES = 19
CHANNEL = 3
HEIGHT = 500
WIDTH = 500
logger = trt.Logger(trt.Logger.ERROR)
def sub_mean_chw(data):
data = data.transpose((1, 2, 0)) # CHW -> HWC
data -= np.array(MEAN) # Broadcast subtract
data = data.transpose((2, 0 ,1)) # HWC -> CHW
return data
def color_map(output):
output = output.reshape(CLASSES, HEIGHT, WIDTH)
out_col = np.zeros(shape = (HEIGHT, WIDTH), dtype = (np.uint8, 3))
for x in range(WIDTH):
for y in range(HEIGHT):
out_col[y, x] = labels.id2label[labels.trainId2label[np.argmax(output[:, y, x])].id].color
return out_col
def create_calibration_dataset():
# Create list of calibration images
# This sample code picks 100 images at random from training set
calibration_files = glob.glob(CALIBRATION_DATASET_LOC)
shuffle(calibration_files)
return calibration_files[:100]
def get_engine(int8_calibrator, engine_file_path=""):
if os.path.exists(engine_file_path):
with open(engine_file_path, 'rb') as f, trt.Runtime(logger) as runtime:
return runtime.deserialize_cuda_engine(f.read())
else:
print("building engine...")
with trt.Builder(logger) as builder, builder.create_network() as network, trt.CaffeParser() as parser:
builder.max_batch_size=1
builder.max_workspace_size=(256 << 20)
builder.int8_mode=True
builder.int8_calibrator=int8_calibrator
builder.strict_type_constraints = True
if not os.path.exists(MODEL_DIR + 'fcn8s.prototxt'):
print("There is no prototxt at: %s"%(MODEL_DIR + 'fcn8s.prototxt'))
exit(0)
parser.parse(deploy=MODEL_DIR + 'fcn8s.prototxt', model=MODEL_DIR + 'fcn8s.caffemodel', network = network, dtype=trt.float32)
network.mark_output(network.get_layer(network.num_layers - 1).get_output(0))
engine = builder.build_cuda_engine(network)
return engine
def get_engine2(engine_file_path=""):
if os.path.exists(engine_file_path):
with open(engine_file_path, 'rb') as f, trt.Runtime(logger) as runtime:
return runtime.deserialize_cuda_engine(f.read())
else:
print("building engine...")
with trt.Builder(logger) as builder, builder.create_network() as network, trt.CaffeParser() as parser:
builder.max_batch_size=1
builder.max_workspace_size=(256 << 20)
builder.fp16_mode=False
builder.strict_type_constraints = True
if not os.path.exists(MODEL_DIR + 'fcn8s.prototxt'):
print("There is no prototxt at: %s"%(MODEL_DIR + 'fcn8s.prototxt'))
exit(0)
parser.parse(deploy=MODEL_DIR + 'fcn8s.prototxt', model=MODEL_DIR + 'fcn8s.caffemodel', network = network, dtype=trt.float32)
network.mark_output(network.get_layer(network.num_layers - 1).get_output(0))
engine = builder.build_cuda_engine(network)
return engine
def do_inference(test_data, engine, stream):
h_input = cuda.pagelocked_empty(trt.volume(engine.get_binding_shape(0)), dtype=np.float32)
h_output = cuda.pagelocked_empty(trt.volume(engine.get_binding_shape(1)), dtype=np.float32)
h_input = h_input.reshape(3, 500, 500)
h_output = h_output.reshape(19, 500, 500)
d_input = cuda.mem_alloc(h_input.nbytes)
d_output = cuda.mem_alloc(h_output.nbytes)
np.copyto(h_input, test_data)
cuda.memcpy_htod_async(d_input, h_input, stream)
# Run inference.
context = engine.create_execution_context()
context.execute_async(bindings=[int(d_input), int(d_output)], stream_handle=stream.handle)
# Transfer predictions back from the GPU.
cuda.memcpy_dtoh_async(h_output, d_output, stream)
# Synchronize the stream
stream.synchronize()
out = color_map(h_output)
return out
def main():
calibration_files = create_calibration_dataset()
# Process 5 images at a time for calibration
# This batch size can be different from MaxBatchSize (1 in this example)
print("Ready ImageBatchStream...")
batchstream = calibrator.ImageBatchStream(5, calibration_files, sub_mean_chw)
print("Stream ready done!")
print("Ready Entropy Calibration...")
int8_calibrator = calibrator.pyEntropyCalibrator(["data"], batchstream, 'data/calibration_cache.bin')
print("Calibrator ready done!")
# Build engine
engine1 = get_engine(int8_calibrator)
engine2 = get_engine2()
# Predict
test_data = calibrator.ImageBatchStream.read_image(TEST_IMAGE)
test_data = sub_mean_chw(test_data)
stream = cuda.Stream()
out1 = do_inference(test_data, engine1, stream)
out2 = do_inference(test_data, engine2, stream)
test_img = Image.fromarray(out1, 'RGB')
test_img.save("Int8_inference", "jpeg")
test_img = Image.fromarray(out2, 'RGB')
test_img.save("Float_inference", "jpeg")
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
import numpy as np
import os, errno
from PyQt4 import QtGui,QtCore
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg
from saltgui import MplCanvas
class FpParallWidget (QtGui.QWidget):
def __init__(self,parent=None):
super(FpParallWidget,self).__init__(parent)
#Load up the data:
self.loadOutparams()
#set up the file range panel
self.rangepanel=QtGui.QWidget()
# add a label:
self.FromLabel = QtGui.QLabel("From file number:")
self.ToLabel = QtGui.QLabel("To file number:")
#add the name of the file
self.FromValueLabel = QtGui.QLineEdit(str(min(self.outparams[:,0])))
self.ToValueLabel = QtGui.QLineEdit(str(max(self.outparams[:,0])))
# and a button to process the new range
self.refreshButton = QtGui.QPushButton('Refresh')
self.refreshButton.clicked.connect(self.plotOutparams)
#set up file range panel layout
rangeLayout=QtGui.QGridLayout(self.rangepanel)
rangeLayout.addWidget(self.FromLabel,0,0,1,1)
rangeLayout.addWidget(self.FromValueLabel,0,1,1,1)
rangeLayout.addWidget(self.refreshButton,0,2,2,1)
rangeLayout.addWidget(self.ToLabel,0,3,1,1)
rangeLayout.addWidget(self.ToValueLabel,0,4,1,1)
#add the radio buttons for the choice of x axis...
self.radioFilenumber= QtGui.QRadioButton("Plot vs Filenumber")
self.radioX= QtGui.QRadioButton("Plot vs etalon X")
self.radioY= QtGui.QRadioButton("Plot vs etalon Y")
#create a gropu for them:
self.radioGroupX=QtGui.QButtonGroup()
self.radioGroupX.addButton(self.radioFilenumber)
self.radioGroupX.addButton(self.radioX)
self.radioGroupX.addButton(self.radioY)
#make sure the filenumber is the default
self.radioFilenumber.setChecked(True)
#create radio buttons for the choice of y axis:
self.radioFWHM=QtGui.QRadioButton("Plots vs FWHM")
self.radioAmp=QtGui.QRadioButton("Plots vs Amplitude")
#add a group for the y axis:
self.radioGroupY=QtGui.QButtonGroup()
self.radioGroupY.addButton(self.radioFWHM)
self.radioGroupY.addButton(self.radioAmp)
#add a default:
self.radioFWHM.setChecked(True)
# display best fit in range:
self.fitpanel=QtGui.QWidget()
self.fitLabel = QtGui.QLabel("Lowest FWHM in file range:")
self.cleanOutparams()
self.getBestparams()
fitFileresult="File number: %i" %int(self.bestparams[0])
fitXresult="X: %i" % int(self.bestparams[1])
fitYresult="Y: %i" % int(self.bestparams[2])
fitZresult="Z: %i " % int(self.bestparams[3])
fitRresult="R: %.1f" % float(self.bestparams[4])
fitAmpresult="Amplitude: %.1f" % float(self.bestparams[5])
fitRmsresult="RMS: %.3f" % float(self.bestparams[6])
fitGammaresult="Gamma: %.2f" % float(self.bestparams[7])
fitFWHMresult="FWHM: %.3f" % float(self.bestparams[8])
#add the text to the fit results panel
self.fitFile = QtGui.QLabel(fitFileresult)
self.fitX = QtGui.QLabel(fitXresult)
self.fitY = QtGui.QLabel(fitYresult)
self.fitZ = QtGui.QLabel(fitZresult)
self.fitR = QtGui.QLabel(fitRresult)
self.fitAmp = QtGui.QLabel(fitAmpresult)
self.fitRms = QtGui.QLabel(fitRmsresult)
self.fitGamma = QtGui.QLabel(fitGammaresult)
self.fitFWHM = QtGui.QLabel(fitFWHMresult)
# lay them out nicely...
fitLayout=QtGui.QGridLayout(self.fitpanel)
fitLayout.addWidget(self.fitLabel,0,0,1,4)
fitLayout.addWidget(self.fitFile,3,0,1,1)
fitLayout.addWidget(self.fitX,3,1,1,1)
fitLayout.addWidget(self.fitY,3,2,1,1)
fitLayout.addWidget(self.fitZ,3,3,1,1)
fitLayout.addWidget(self.fitR,3,4,1,1)
fitLayout.addWidget(self.fitAmp,3,5,1,1)
fitLayout.addWidget(self.fitRms,3,6,1,1)
fitLayout.addWidget(self.fitGamma,3,7,1,1)
fitLayout.addWidget(self.fitFWHM,3,8,1,1)
#set up the fwhm plot
self.fwhmplot=MplCanvas()
self.fwhmaxes=self.fwhmplot.figure.add_subplot(111)
#connect mouse clicks
self.fwhmplot.mpl_connect('button_press_event',self.onClick)
#and now we know what the X and Y axis should be, make the fwhm/amp plot
self.plotOutparams()
# and check for radio button event signals!
self.radioGroupX.buttonClicked.connect(self.plotOutparams)
self.radioGroupY.buttonClicked.connect(self.plotOutparams)
#Add the X radio buttons to a horizontal layout
self.radiopanel= QtGui.QWidget()
radioLayout=QtGui.QHBoxLayout(self.radiopanel)
radioLayout.addWidget(self.radioFilenumber)
radioLayout.addWidget(self.radioX)
radioLayout.addWidget(self.radioY)
#Add the Y radio buttons to a vertical layout
self.radioYpanel=QtGui.QWidget()
radioYLayout=QtGui.QVBoxLayout(self.radioYpanel)
radioYLayout.addWidget(self.radioFWHM)
radioYLayout.addWidget(self.radioAmp)
# Set up the main layout
mainLayout = QtGui.QGridLayout()
mainLayout.addWidget(self.rangepanel,0,0,1,9)
mainLayout.addWidget(self.fitpanel,1,0,1,9)
mainLayout.addWidget(self.fwhmplot,2,0,1,4)
mainLayout.addWidget(self.radioYpanel,2,5,1,1)
mainLayout.addWidget(self.radiopanel,3,1,1,1)
self.setLayout(mainLayout)
def loadOutparams(self):
self.outparams=np.genfromtxt('outparams', skip_header=1)
return
def cleanOutparams(self):
minFile=float(self.FromValueLabel.text())
maxFile=float(self.ToValueLabel.text())
# print "reloading from %i to %i" % (minFile, maxFile)
self.cleanarr=[]
mask = (minFile <= self.outparams[:,0]) * (self.outparams[:,0] <= maxFile)
self.cleanarr = self.outparams[mask]
# print self.cleanarr[:,0]
return
def plotOutparams(self):
#set up the plot....
self.cleanOutparams()
self.fwhmaxes.clear()
if self.radioFilenumber.isChecked():
x=self.cleanarr[:,0]
elif self.radioX.isChecked():
x=self.cleanarr[:,1]
elif self.radioY.isChecked():
x=self.cleanarr[:,2]
# Work out the Y axis:
if self.radioFWHM.isChecked():
y=self.cleanarr[:,8]
elif self.radioAmp.isChecked():
y=self.cleanarr[:,5]
self.fwhmaxes.plot(x, y, 'bo')
# self.show()
# don't forget to force a redraw!
self.fwhmplot.draw()
#ummmm we forgot to update the best fit..
self.getBestparams()
self.fitFile.setText("File number: %i" %int(self.bestparams[0]))
self.fitX.setText("X: %i" % int(self.bestparams[1]))
self.fitX.setText("X: %i" % int(self.bestparams[1]))
self.fitY.setText("Y %i:" % int(self.bestparams[2]))
self.fitZ.setText("Z: %i " % int(self.bestparams[3]))
self.fitR.setText("R: %.1f" % float(self.bestparams[4]))
self.fitAmp.setText("Amplitude: %.1f" % float(self.bestparams[5]))
self.fitRms.setText("RMS: %.2f" % float(self.bestparams[6]))
self.fitGamma.setText("Gamma: %.2f" % float(self.bestparams[7]))
self.fitFWHM.setText("FWHM: %.3f" % float(self.bestparams[8]))
# self.fitpanel.show()
return
def onClick(self,event):
# What's on the X axis?
if self.radioFilenumber.isChecked():
mask = (self.cleanarr[:,0]==round(event.xdata))
elif self.radioX.isChecked():
mask = (self.cleanarr[:,1]==round(event.xdata))
elif self.radioY.isChecked():
mask = (self.cleanarr[:,2]==round(event.xdata))
# get from the array the first row that matches the X value)
datapoint = self.cleanarr[mask][0]
#format it ready for the tooltip:
text="FileNumber: %i, \nX: %i, \nY: %i, \nZ:%i, \nAmp: %.2f, \nRMS: %.2f, \nGamma: %.2f, \nFWHM: %.3f" % (int(datapoint[0]), int(datapoint[1]),int(datapoint[2]),int(datapoint[3]),datapoint[4],datapoint[6],datapoint[7],datapoint[8])
#and plonk it on! :)
QtGui.QToolTip.showText(QtCore.QPoint(338,314),text)
return
def getBestparams(self):
if self.radioFWHM.isChecked():
self.fitLabel.setText("Lowest FWHM in file range:")
mask = (self.cleanarr[:,8]==min(self.cleanarr[:,8]))
self.bestparams = self.cleanarr[mask][0]
elif self.radioAmp.isChecked():
self.fitLabel.setText("Highest Amplitude in file range:")
mask = (self.cleanarr[:,5]==max(self.cleanarr[:,5]))
self.bestparams = self.cleanarr[mask][0]
return
|
nilq/baby-python
|
python
|
from logging import NullHandler
from ast import literal_eval
import io
class RequestCountHandler(NullHandler):
def __init__(self,queue):
NullHandler.__init__(self)
self.queue = queue
def handle(self, record):
if False and "request" in record.msg:
print("adding")
dct = literal_eval(record.msg)
self.queue.put(int(dct['requests']), block=True)
def get_queue(self):
return self.queue
@property
def request_count(self):
return self.__request_count
|
nilq/baby-python
|
python
|
from .network import launch_in_thread
from . import ui
import argparse
from ..logs import logger
def main(capture_file=None):
ui.init(launch_in_thread, capture_file)
ui.async_start()
if __name__ == "__main__":
logger.debug("Starting sniffer as __main__")
parser = argparse.ArgumentParser(
description='Start the sniffer either from a file or from live capture.')
parser.add_argument('--capture', '-c', metavar='PATH', type=str,
help='Path to capture file')
parser.add_argument('--debug', '-d', action='store_true',
help='show logger debug messages')
args = parser.parse_args()
if(args.debug):
logger.setLevel("DEBUG")
else:
logger.setLevel("INFO")
if args.capture:
logger.debug("Starting sniffer with capture file")
main(args.capture)
else:
logger.debug("Starting sniffer on live interface")
main()
|
nilq/baby-python
|
python
|
import os
import sqlite3
try:
from psycopg2cffi import compat
compat.register()
except ImportError:
pass
from sqlalchemy import Column, Integer, String, ForeignKey, create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
import pytest
@pytest.fixture
def database():
class Database:
Base = declarative_base()
class Language(Base):
__tablename__ = 'languages'
id = Column(Integer, primary_key=True)
name = Column(String(16), nullable=False)
created = Column(Integer)
class Snippet(Base):
__tablename__ = 'snippets'
id = Column(Integer, primary_key=True)
code = Column(String, nullable=False)
languageid = Column(ForeignKey('languages.id'))
language = relationship('Language', backref='snippets')
def __init__(self, back_end, write_engine, read_engine):
self.back_end = back_end
self.write_engine = write_engine
self.read_engine = read_engine
def create_all(self):
self.Base.metadata.create_all(self.write_engine)
def drop_all(self):
self.Base.metadata.drop_all(self.write_engine)
postgres_uri = os.environ.get('FALCON_SQLA_POSTGRESQL_URI')
if postgres_uri:
back_end = 'postgresql'
write_engine = create_engine(postgres_uri, echo=True)
args = {'options': '-c default_transaction_read_only=on'}
read_engine = create_engine(
postgres_uri, echo=True, connect_args=args)
else:
sqlite_path = os.environ.get(
'FALCON_SQLA_TEST_DB', '/tmp/falcon-sqla/test.db')
if not os.path.exists(os.path.dirname(sqlite_path)):
os.makedirs(os.path.dirname(sqlite_path))
# NOTE(vytas): Hack until we only support SQLAlchemy with this
# improvement: https://github.com/sqlalchemy/sqlalchemy/issues/4863
def connect_ro():
uri_ro = 'file:' + sqlite_path + '?mode=ro'
return sqlite3.connect(uri_ro, uri=True)
back_end = 'sqlite'
uri = 'sqlite:///' + sqlite_path
write_engine = create_engine(uri, echo=True)
read_engine = create_engine(
uri + '?mode=ro', creator=connect_ro, echo=True)
db = Database(back_end, write_engine, read_engine)
db.create_all()
yield db
if back_end == 'sqlite':
try:
os.unlink(sqlite_path)
except OSError:
pass
else:
db.drop_all()
|
nilq/baby-python
|
python
|
import time
from django.contrib.auth.models import User
from django.urls import reverse
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.support.ui import Select
from ..management.commands.createtestdata import create_test_data
class FilterPage:
WAIT_FOR_RELOAD = 1
URL_PATH = reverse('admin:testapp_modela_changelist')
# ul-indexes
MULTISELECT_UL = 3
MULTISELECT_RELATED_UL = 7
def __init__(self, selenium, base_url):
self.base_url = base_url
self.url = base_url + self.URL_PATH
self.selenium = selenium
self.current_url = self.selenium.current_url
def login(self, client):
# login to selenium - using a cookie from the django test client
admin = User.objects.get(username='admin')
client.force_login(admin)
cookie = client.cookies['sessionid']
#selenium will set cookie domain based on current page domain
self.selenium.get(self.base_url + '/admin/')
self.selenium.add_cookie({'name': 'sessionid', 'value': cookie.value, 'secure': False, 'path': '/'})
#need to update page for logged in user
self.selenium.refresh()
def get(self, url_query=str()):
return self.selenium.get(self.url + '?' + url_query)
def wait_for_reload(self):
now = time.time()
while self.current_url == self.selenium.current_url:
self.selenium.refresh()
if time.time() - now < self.WAIT_FOR_RELOAD:
msg = "Could not reload live server page. Waited {} sec."
raise RuntimeError(msg.format(self.WAIT_FOR_RELOAD))
else:
self.current_url = self.selenium.current_url
return True
@property
def item_count(self):
return len(self.selenium.find_elements_by_xpath('//*[@id="result_list"]/tbody/tr'))
@property
def url_query(self):
return self.selenium.current_url.split('?')[-1].replace('%2C', ',')
def get_selected_li_count(self, ul):
return len(ul.find_elements_by_css_selector('li.selected'))
def use_dropdown_filter(self, select_id, option):
select = Select(self.selenium.find_element_by_id(select_id))
select.select_by_visible_text(option)
self.wait_for_reload()
return Select(self.selenium.find_element_by_id(select_id))
def use_multiselect_filter(self, ul_num, title):
ul_xpath = '//*[@id="changelist-filter"]/ul[{}]'.format(ul_num)
a_css = 'li a[title="{}"]'.format(title)
ul = self.selenium.find_element_by_xpath(ul_xpath)
ul.find_element_by_css_selector(a_css).click()
self.wait_for_reload()
return self.selenium.find_element_by_xpath(ul_xpath)
def use_multiselect_dropdown_filter(self, field, options):
select = Select(self.selenium.find_element_by_id(field + '_select'))
for value in options:
select.select_by_value(value)
self.selenium.find_element_by_id(field + '_submit').click()
self.wait_for_reload()
return Select(self.selenium.find_element_by_id(field + '_select'))
class LiveFilterTest(StaticLiveServerTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
options = Options()
options.headless = True
cls.selenium = WebDriver(options=options)
cls.url_path = reverse('admin:testapp_modela_changelist')
@classmethod
def tearDownClass(cls):
cls.selenium.quit()
super().tearDownClass()
def setUp(self):
create_test_data()
self.page = FilterPage(self.selenium, self.live_server_url)
self.page.login(self.client)
def check_dropdown_filter(self, select_id, query_key, query_value, option, count):
select = self.page.use_dropdown_filter(select_id, option)
self.assertEqual(self.page.item_count, count)
self.assertEqual(select.first_selected_option.text, option)
if option == 'All':
self.assertNotIn(query_key, self.page.url_query)
else:
self.assertIn(query_key + query_value, self.page.url_query)
def test_01_dropdown_filter(self):
self.page.get()
# check simple dropdown filter
select_id, query_key = 'dropdown-gt3_filter_select', 'dropdown_gt3='
self.check_dropdown_filter(select_id, query_key, '2', '2', 9)
self.check_dropdown_filter(select_id, query_key, '', 'All', 36)
# Check choices dropdown filter:
select_id, query_key = 'choices-dropdown_filter_select', 'choices_dropdown__exact='
self.check_dropdown_filter(select_id, query_key, '3', 'three', 4)
self.check_dropdown_filter(select_id, query_key, '', 'All', 36)
# Check related dropdown filter:
select_id, query_key = 'related-dropdown_filter_select', 'related_dropdown__id__exact='
self.check_dropdown_filter(select_id, query_key, '9', 'ModelB 9', 1)
self.check_dropdown_filter(select_id, query_key, '', 'All', 36)
def check_multiselect_filter(self, ul_num, query_key, query_value, option, count, selected):
ul = self.page.use_multiselect_filter(ul_num, option)
self.assertEqual(self.page.item_count, count)
self.assertEqual(self.page.get_selected_li_count(ul), selected)
if option == 'All':
self.assertNotIn(query_key, self.page.url_query)
else:
self.assertIn(query_key + query_value, self.page.url_query)
def test_02_multiselect_filter(self):
# start with an already filtered changelist
self.page.get('dropdown_gt3=2')
# check simple multiselect filter
ul_num, query_key = self.page.MULTISELECT_UL, 'multiselect__in='
self.check_multiselect_filter(ul_num, query_key, '4', '4', 2, 1)
self.check_multiselect_filter(ul_num, query_key, '4,3', '3', 3, 2)
self.check_multiselect_filter(ul_num, query_key, '4,3,2', '2', 5, 3)
self.check_multiselect_filter(ul_num, query_key, '', 'All', 9, 1)
# check the multiselect related filter
ul_num, query_key = self.page.MULTISELECT_RELATED_UL, 'multiselect_related__id__in='
self.check_multiselect_filter(ul_num, query_key, '34', 'ModelB 34', 1, 1)
self.check_multiselect_filter(ul_num, query_key, '34,30', 'ModelB 30', 2, 2)
self.check_multiselect_filter(ul_num, query_key, '34,30,26', 'ModelB 26', 3, 3)
self.check_multiselect_filter(ul_num, query_key, '30,26', 'ModelB 34', 2, 2)
self.check_multiselect_filter(ul_num, query_key, '', 'All', 9, 1)
def check_multiselect_dropdown_filter(self, field, options, query_key, count):
select = self.page.use_multiselect_dropdown_filter(field, options)
self.assertEqual(len(select.all_selected_options), len(options))
self.assertEqual(self.page.item_count, count)
self.assertIn(query_key + ','.join(options), self.page.url_query)
select.deselect_all()
def test_03_multiselect_dropdown_filter(self):
self.page.get()
# check multiselect-dropdown
field, query_key = 'multiselect-dropdown', 'multiselect_dropdown__in='
options = [str(i) for i in range(2, 5)]
self.check_multiselect_dropdown_filter(field, options, query_key, 18)
# check multiselect-related-dropdown
# (multiselect-dropdown filter is still effectual)
field, query_key = 'multiselect-related-dropdown', 'multiselect_related_dropdown__id__in='
options = [str(i) for i in range(1, 9)]
self.check_multiselect_dropdown_filter(field, options, query_key, 4)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import datetime
import logging
import os.path
import rescale.client
# Remember to set RESCALE_API_KEY env variable to your Rescale API key
# on platform.rescale.com (in Settings->API)
SHORT_TEST_ARCHIVE = 'inputs/all_short_tests.tar.gz'
LONG_TEST_FORMAT = 'inputs/long_test_{i}.tar.gz'
LONG_TEST_COUNT = 1
BUILD_ARCHIVE = 'inputs/echoware0.1.tar.gz'
POST_COMPARE_SCRIPT = 'inputs/compare_results.sh'
TEST_COMMAND = """
for testcase in $(find . -name "test[0-9]*" -type d); do
./echoware/bin/echo.sh $testcase
done
"""
POST_RUN_COMPARE_COMMAND = """
for testcase in $(find . -name "test[0-9]*" -type d); do
./compare_results.sh $testcase
done
"""
STDOUT_LOG = 'process_output.log'
logging.basicConfig(level=logging.INFO)
def get_or_upload(file_path):
input_file = rescale.client.RescaleFile.get_newest_by_name(os.path.basename(file_path))
if not input_file:
input_file = rescale.client.RescaleFile(file_path=file_path)
return input_file
def create_job(name, build_input, test_input, post_process, core_type, core_count):
input_files = [build_input, test_input]
job_definition = {
'name': name,
'isLowPriority': True,
'jobanalyses': [
{
'analysis': {
'code': 'custom'
},
'hardware': {
'coresPerSlot': 1,
'slots': core_count,
'coreType': {
'code': core_type
}
},
'inputFiles': [{'id': inp.id} for inp in input_files],
'command': TEST_COMMAND,
'postProcessScript': {'id': post_process.id},
'postProcessScriptCommand': POST_RUN_COMPARE_COMMAND
}
],
}
return rescale.client.RescaleJob(json_data=job_definition)
def main():
logging.info('Uploading test job input files')
short_test_bundle = get_or_upload(SHORT_TEST_ARCHIVE)
long_test_inputs = [get_or_upload(LONG_TEST_FORMAT.format(i=i))
for i in range(LONG_TEST_COUNT)]
build_input = rescale.client.RescaleFile(file_path=BUILD_ARCHIVE)
post_process_file = get_or_upload(POST_COMPARE_SCRIPT)
# create all test jobs
short_test_job = create_job('echoware0.1-all-short-tests',
build_input,
short_test_bundle,
post_process_file,
'standard-plus',
1)
long_test_jobs = [create_job('echoware0.1-long-test-{0}'.format(i),
build_input,
long_test,
post_process_file,
'standard-plus',
1)
for i, long_test in enumerate(long_test_inputs)]
# submit all
short_test_job.submit()
[long_test_job.submit() for long_test_job in long_test_jobs]
# wait for all to complete
short_test_job.wait()
[long_test_job.wait() for long_test_job in long_test_jobs]
# get results
short_test_job.get_file(STDOUT_LOG)\
.download(target='{0}.out'.format(short_test_job.name))
[job.get_file(STDOUT_LOG).download(target='{0}.out'.format(job.name))
for job in [short_test_job] + long_test_jobs]
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env pybricks-micropython
from pybricks.hubs import EV3Brick
from pybricks.ev3devices import InfraredSensor
from pybricks.parameters import Port, Button
from pybricks.tools import wait
from pybricks.media.ev3dev import SoundFile, Font
# Create the brick connection.
ev3 = EV3Brick()
# Set Font.
print_font = Font(size=16, bold=True)
ev3.screen.set_font(print_font)
# Initialize IR sensor.
ir = InfraredSensor(Port.S4)
# Adjust voice.
ev3.speaker.set_speech_options('en', 'f3', 25, 50)
ev3.speaker.set_volume(50)
# Check a pressed button and name it.
while True:
buttons = ir.buttons(1)
wait(10)
if Button.BEACON in buttons:
ev3.speaker.say('beacon')
break
elif Button.LEFT_UP in buttons and Button.RIGHT_UP in buttons:
ev3.speaker.play_file(Sound.UP)
elif Button.LEFT_DOWN in buttons and Button.RIGHT_DOWN in buttons:
ev3.speaker.play_file(Sound.DOWN)
elif Button.LEFT_UP in buttons and Button.LEFT_DOWN in buttons:
ev3.speaker.say('left')
elif Button.RIGHT_UP in buttons and Button.RIGHT_DOWN in buttons:
ev3.speaker.say('right')
elif Button.LEFT_UP in buttons:
ev3.speaker.play_file(Sound.LEFT)
ev3.speaker.play_file(Sound.UP)
elif Button.LEFT_DOWN in buttons:
ev3.speaker.play_file(Sound.LEFT)
ev3.speaker.play_file(Sound.DOWN)
elif Button.RIGHT_UP in buttons:
ev3.speaker.play_file(Sound.RIGHT)
ev3.speaker.play_file(Sound.UP)
elif Button.RIGHT_DOWN in buttons:
ev3.speaker.play_file(Sound.RIGHT)
ev3.speaker.play_file(Sound.DOWN)
ev3.speaker.say('come to me')
dist = 100
while dist > 30:
wait(10)
dist = ir.distance()
ev3.screen.print('Distance: ', dist, 'mm')
ev3.speaker.say('come to me on channel 1')
dist = 100
angle = 0
while dist > 10:
wait(10)
dist, angle = ir.beacon(1)
ev3.screen.print('Distance: ', dist*2, 'mm')
ev3.screen.print('Angle: ', angle, 'deg')
ev3.speaker.say('last try, use channel 1')
while True:
buttons = ir.keypad()
wait(10)
if Button.LEFT_UP in buttons and Button.RIGHT_UP in buttons and Button.RIGHT_DOWN in buttons:
ev3.speaker.play_file(Sound.GOODBYE)
break
elif Button.LEFT_UP in buttons and Button.RIGHT_UP in buttons:
ev3.speaker.play_file(Sound.UP)
elif Button.LEFT_DOWN in buttons and Button.RIGHT_DOWN in buttons:
ev3.speaker.play_file(Sound.DOWN)
elif Button.LEFT_UP in buttons and Button.LEFT_DOWN in buttons:
ev3.speaker.play_file(Sound.LEFT)
elif Button.RIGHT_UP in buttons and Button.RIGHT_DOWN in buttons:
ev3.speaker.play_file(Sound.RIGHT)
elif Button.LEFT_UP in buttons:
ev3.speaker.play_file(Sound.LEFT)
ev3.speaker.play_file(Sound.UP)
elif Button.LEFT_DOWN in buttons:
ev3.speaker.play_file(Sound.LEFT)
ev3.speaker.play_file(Sound.DOWN)
elif Button.RIGHT_UP in buttons:
ev3.speaker.play_file(Sound.RIGHT)
ev3.speaker.play_file(Sound.UP)
elif Button.RIGHT_DOWN in buttons:
ev3.speaker.play_file(Sound.RIGHT)
ev3.speaker.play_file(Sound.DOWN)
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.